Cargo fmt libtest
This commit is contained in:
@@ -36,17 +36,12 @@ impl<T: Write> JsonFormatter<T> {
|
|||||||
if let Some(extras) = extra {
|
if let Some(extras) = extra {
|
||||||
self.write_message(&*format!(
|
self.write_message(&*format!(
|
||||||
r#"{{ "type": "{}", "name": "{}", "event": "{}", {} }}"#,
|
r#"{{ "type": "{}", "name": "{}", "event": "{}", {} }}"#,
|
||||||
ty,
|
ty, name, evt, extras
|
||||||
name,
|
|
||||||
evt,
|
|
||||||
extras
|
|
||||||
))
|
))
|
||||||
} else {
|
} else {
|
||||||
self.write_message(&*format!(
|
self.write_message(&*format!(
|
||||||
r#"{{ "type": "{}", "name": "{}", "event": "{}" }}"#,
|
r#"{{ "type": "{}", "name": "{}", "event": "{}" }}"#,
|
||||||
ty,
|
ty, name, evt
|
||||||
name,
|
|
||||||
evt
|
|
||||||
))
|
))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -89,14 +84,12 @@ impl<T: Write> OutputFormatter for JsonFormatter<T> {
|
|||||||
self.write_event("test", desc.name.as_slice(), "failed", extra_data)
|
self.write_event("test", desc.name.as_slice(), "failed", extra_data)
|
||||||
}
|
}
|
||||||
|
|
||||||
TrFailedMsg(ref m) => {
|
TrFailedMsg(ref m) => self.write_event(
|
||||||
self.write_event(
|
"test",
|
||||||
"test",
|
desc.name.as_slice(),
|
||||||
desc.name.as_slice(),
|
"failed",
|
||||||
"failed",
|
Some(format!(r#""message": "{}""#, EscapedString(m))),
|
||||||
Some(format!(r#""message": "{}""#, EscapedString(m))),
|
),
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
TrIgnored => self.write_event("test", desc.name.as_slice(), "ignored", None),
|
TrIgnored => self.write_event("test", desc.name.as_slice(), "ignored", None),
|
||||||
|
|
||||||
@@ -116,13 +109,10 @@ impl<T: Write> OutputFormatter for JsonFormatter<T> {
|
|||||||
|
|
||||||
let line = format!(
|
let line = format!(
|
||||||
"{{ \"type\": \"bench\", \
|
"{{ \"type\": \"bench\", \
|
||||||
\"name\": \"{}\", \
|
\"name\": \"{}\", \
|
||||||
\"median\": {}, \
|
\"median\": {}, \
|
||||||
\"deviation\": {}{} }}",
|
\"deviation\": {}{} }}",
|
||||||
desc.name,
|
desc.name, median, deviation, mbps
|
||||||
median,
|
|
||||||
deviation,
|
|
||||||
mbps
|
|
||||||
);
|
);
|
||||||
|
|
||||||
self.write_message(&*line)
|
self.write_message(&*line)
|
||||||
@@ -138,16 +128,15 @@ impl<T: Write> OutputFormatter for JsonFormatter<T> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn write_run_finish(&mut self, state: &ConsoleTestState) -> io::Result<bool> {
|
fn write_run_finish(&mut self, state: &ConsoleTestState) -> io::Result<bool> {
|
||||||
|
|
||||||
self.write_message(&*format!(
|
self.write_message(&*format!(
|
||||||
"{{ \"type\": \"suite\", \
|
"{{ \"type\": \"suite\", \
|
||||||
\"event\": \"{}\", \
|
\"event\": \"{}\", \
|
||||||
\"passed\": {}, \
|
\"passed\": {}, \
|
||||||
\"failed\": {}, \
|
\"failed\": {}, \
|
||||||
\"allowed_fail\": {}, \
|
\"allowed_fail\": {}, \
|
||||||
\"ignored\": {}, \
|
\"ignored\": {}, \
|
||||||
\"measured\": {}, \
|
\"measured\": {}, \
|
||||||
\"filtered_out\": \"{}\" }}",
|
\"filtered_out\": \"{}\" }}",
|
||||||
if state.failed == 0 { "ok" } else { "failed" },
|
if state.failed == 0 { "ok" } else { "failed" },
|
||||||
state.passed,
|
state.passed,
|
||||||
state.failed + state.allowed_fail,
|
state.failed + state.allowed_fail,
|
||||||
|
|||||||
@@ -196,8 +196,7 @@ impl<T: Write> OutputFormatter for PrettyFormatter<T> {
|
|||||||
|
|
||||||
self.write_plain(&format!(
|
self.write_plain(&format!(
|
||||||
"test {} has been running for over {} seconds\n",
|
"test {} has been running for over {} seconds\n",
|
||||||
desc.name,
|
desc.name, TEST_WARN_TIMEOUT_S
|
||||||
TEST_WARN_TIMEOUT_S
|
|
||||||
))
|
))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -232,11 +231,7 @@ impl<T: Write> OutputFormatter for PrettyFormatter<T> {
|
|||||||
} else {
|
} else {
|
||||||
format!(
|
format!(
|
||||||
". {} passed; {} failed; {} ignored; {} measured; {} filtered out\n\n",
|
". {} passed; {} failed; {} ignored; {} measured; {} filtered out\n\n",
|
||||||
state.passed,
|
state.passed, state.failed, state.ignored, state.measured, state.filtered_out
|
||||||
state.failed,
|
|
||||||
state.ignored,
|
|
||||||
state.measured,
|
|
||||||
state.filtered_out
|
|
||||||
)
|
)
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|||||||
@@ -195,8 +195,7 @@ impl<T: Write> OutputFormatter for TerseFormatter<T> {
|
|||||||
fn write_timeout(&mut self, desc: &TestDesc) -> io::Result<()> {
|
fn write_timeout(&mut self, desc: &TestDesc) -> io::Result<()> {
|
||||||
self.write_plain(&format!(
|
self.write_plain(&format!(
|
||||||
"test {} has been running for over {} seconds\n",
|
"test {} has been running for over {} seconds\n",
|
||||||
desc.name,
|
desc.name, TEST_WARN_TIMEOUT_S
|
||||||
TEST_WARN_TIMEOUT_S
|
|
||||||
))
|
))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -231,11 +230,7 @@ impl<T: Write> OutputFormatter for TerseFormatter<T> {
|
|||||||
} else {
|
} else {
|
||||||
format!(
|
format!(
|
||||||
". {} passed; {} failed; {} ignored; {} measured; {} filtered out\n\n",
|
". {} passed; {} failed; {} ignored; {} measured; {} filtered out\n\n",
|
||||||
state.passed,
|
state.passed, state.failed, state.ignored, state.measured, state.filtered_out
|
||||||
state.failed,
|
|
||||||
state.ignored,
|
|
||||||
state.measured,
|
|
||||||
state.filtered_out
|
|
||||||
)
|
)
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|||||||
@@ -30,10 +30,8 @@
|
|||||||
#![unstable(feature = "test", issue = "27812")]
|
#![unstable(feature = "test", issue = "27812")]
|
||||||
#![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
|
#![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
|
||||||
html_favicon_url = "https://doc.rust-lang.org/favicon.ico",
|
html_favicon_url = "https://doc.rust-lang.org/favicon.ico",
|
||||||
html_root_url = "https://doc.rust-lang.org/nightly/",
|
html_root_url = "https://doc.rust-lang.org/nightly/", test(attr(deny(warnings))))]
|
||||||
test(attr(deny(warnings))))]
|
|
||||||
#![deny(warnings)]
|
#![deny(warnings)]
|
||||||
|
|
||||||
#![feature(asm)]
|
#![feature(asm)]
|
||||||
#![feature(fnbox)]
|
#![feature(fnbox)]
|
||||||
#![cfg_attr(any(unix, target_os = "cloudabi"), feature(libc))]
|
#![cfg_attr(any(unix, target_os = "cloudabi"), feature(libc))]
|
||||||
@@ -43,10 +41,10 @@
|
|||||||
#![feature(termination_trait_lib)]
|
#![feature(termination_trait_lib)]
|
||||||
|
|
||||||
extern crate getopts;
|
extern crate getopts;
|
||||||
extern crate term;
|
|
||||||
#[cfg(any(unix, target_os = "cloudabi"))]
|
#[cfg(any(unix, target_os = "cloudabi"))]
|
||||||
extern crate libc;
|
extern crate libc;
|
||||||
extern crate panic_unwind;
|
extern crate panic_unwind;
|
||||||
|
extern crate term;
|
||||||
|
|
||||||
pub use self::TestFn::*;
|
pub use self::TestFn::*;
|
||||||
pub use self::ColorConfig::*;
|
pub use self::ColorConfig::*;
|
||||||
@@ -72,7 +70,7 @@ use std::process::Termination;
|
|||||||
use std::sync::mpsc::{channel, Sender};
|
use std::sync::mpsc::{channel, Sender};
|
||||||
use std::sync::{Arc, Mutex};
|
use std::sync::{Arc, Mutex};
|
||||||
use std::thread;
|
use std::thread;
|
||||||
use std::time::{Instant, Duration};
|
use std::time::{Duration, Instant};
|
||||||
use std::borrow::Cow;
|
use std::borrow::Cow;
|
||||||
use std::process;
|
use std::process;
|
||||||
|
|
||||||
@@ -81,16 +79,16 @@ const QUIET_MODE_MAX_COLUMN: usize = 100; // insert a '\n' after 100 tests in qu
|
|||||||
|
|
||||||
// to be used by rustc to compile tests in libtest
|
// to be used by rustc to compile tests in libtest
|
||||||
pub mod test {
|
pub mod test {
|
||||||
pub use {Bencher, TestName, TestResult, TestDesc, TestDescAndFn, TestOpts, TrFailed,
|
pub use {assert_test_result, filter_tests, parse_opts, run_test, test_main, test_main_static,
|
||||||
TrFailedMsg, TrIgnored, TrOk, Metric, MetricMap, StaticTestFn, StaticTestName,
|
Bencher, DynTestFn, DynTestName, Metric, MetricMap, Options, ShouldPanic,
|
||||||
DynTestName, DynTestFn, assert_test_result, run_test, test_main, test_main_static,
|
StaticBenchFn, StaticTestFn, StaticTestName, TestDesc, TestDescAndFn, TestName,
|
||||||
filter_tests, parse_opts, StaticBenchFn, ShouldPanic, Options};
|
TestOpts, TestResult, TrFailed, TrFailedMsg, TrIgnored, TrOk};
|
||||||
}
|
}
|
||||||
|
|
||||||
pub mod stats;
|
pub mod stats;
|
||||||
mod formatters;
|
mod formatters;
|
||||||
|
|
||||||
use formatters::{OutputFormatter, PrettyFormatter, TerseFormatter, JsonFormatter};
|
use formatters::{JsonFormatter, OutputFormatter, PrettyFormatter, TerseFormatter};
|
||||||
|
|
||||||
// The name of a test. By convention this follows the rules for rust
|
// The name of a test. By convention this follows the rules for rust
|
||||||
// paths; i.e. it should be a series of identifiers separated by double
|
// paths; i.e. it should be a series of identifiers separated by double
|
||||||
@@ -255,7 +253,9 @@ pub struct Options {
|
|||||||
|
|
||||||
impl Options {
|
impl Options {
|
||||||
pub fn new() -> Options {
|
pub fn new() -> Options {
|
||||||
Options { display_output: false }
|
Options {
|
||||||
|
display_output: false,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn display_output(mut self, display_output: bool) -> Options {
|
pub fn display_output(mut self, display_output: bool) -> Options {
|
||||||
@@ -272,7 +272,7 @@ pub fn test_main(args: &[String], tests: Vec<TestDescAndFn>, options: Options) {
|
|||||||
Some(Err(msg)) => {
|
Some(Err(msg)) => {
|
||||||
eprintln!("error: {}", msg);
|
eprintln!("error: {}", msg);
|
||||||
process::exit(101);
|
process::exit(101);
|
||||||
},
|
}
|
||||||
None => return,
|
None => return,
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -289,7 +289,7 @@ pub fn test_main(args: &[String], tests: Vec<TestDescAndFn>, options: Options) {
|
|||||||
Err(e) => {
|
Err(e) => {
|
||||||
eprintln!("error: io error when listing tests: {:?}", e);
|
eprintln!("error: io error when listing tests: {:?}", e);
|
||||||
process::exit(101);
|
process::exit(101);
|
||||||
},
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -306,18 +306,14 @@ pub fn test_main_static(tests: &[TestDescAndFn]) {
|
|||||||
let owned_tests = tests
|
let owned_tests = tests
|
||||||
.iter()
|
.iter()
|
||||||
.map(|t| match t.testfn {
|
.map(|t| match t.testfn {
|
||||||
StaticTestFn(f) => {
|
StaticTestFn(f) => TestDescAndFn {
|
||||||
TestDescAndFn {
|
testfn: StaticTestFn(f),
|
||||||
testfn: StaticTestFn(f),
|
desc: t.desc.clone(),
|
||||||
desc: t.desc.clone(),
|
},
|
||||||
}
|
StaticBenchFn(f) => TestDescAndFn {
|
||||||
}
|
testfn: StaticBenchFn(f),
|
||||||
StaticBenchFn(f) => {
|
desc: t.desc.clone(),
|
||||||
TestDescAndFn {
|
},
|
||||||
testfn: StaticBenchFn(f),
|
|
||||||
desc: t.desc.clone(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
_ => panic!("non-static tests passed to test::test_main_static"),
|
_ => panic!("non-static tests passed to test::test_main_static"),
|
||||||
})
|
})
|
||||||
.collect();
|
.collect();
|
||||||
@@ -397,34 +393,34 @@ fn optgroups() -> getopts::Options {
|
|||||||
"",
|
"",
|
||||||
"logfile",
|
"logfile",
|
||||||
"Write logs to the specified file instead \
|
"Write logs to the specified file instead \
|
||||||
of stdout",
|
of stdout",
|
||||||
"PATH",
|
"PATH",
|
||||||
)
|
)
|
||||||
.optflag(
|
.optflag(
|
||||||
"",
|
"",
|
||||||
"nocapture",
|
"nocapture",
|
||||||
"don't capture stdout/stderr of each \
|
"don't capture stdout/stderr of each \
|
||||||
task, allow printing directly",
|
task, allow printing directly",
|
||||||
)
|
)
|
||||||
.optopt(
|
.optopt(
|
||||||
"",
|
"",
|
||||||
"test-threads",
|
"test-threads",
|
||||||
"Number of threads used for running tests \
|
"Number of threads used for running tests \
|
||||||
in parallel",
|
in parallel",
|
||||||
"n_threads",
|
"n_threads",
|
||||||
)
|
)
|
||||||
.optmulti(
|
.optmulti(
|
||||||
"",
|
"",
|
||||||
"skip",
|
"skip",
|
||||||
"Skip tests whose names contain FILTER (this flag can \
|
"Skip tests whose names contain FILTER (this flag can \
|
||||||
be used multiple times)",
|
be used multiple times)",
|
||||||
"FILTER",
|
"FILTER",
|
||||||
)
|
)
|
||||||
.optflag(
|
.optflag(
|
||||||
"q",
|
"q",
|
||||||
"quiet",
|
"quiet",
|
||||||
"Display one character per test instead of one line. \
|
"Display one character per test instead of one line. \
|
||||||
Alias to --format=terse",
|
Alias to --format=terse",
|
||||||
)
|
)
|
||||||
.optflag(
|
.optflag(
|
||||||
"",
|
"",
|
||||||
@@ -516,8 +512,7 @@ pub fn parse_opts(args: &[String]) -> Option<OptRes> {
|
|||||||
if let Some(opt) = matches.opt_str("Z") {
|
if let Some(opt) = matches.opt_str("Z") {
|
||||||
if !is_nightly() {
|
if !is_nightly() {
|
||||||
return Some(Err(
|
return Some(Err(
|
||||||
"the option `Z` is only accepted on the nightly compiler"
|
"the option `Z` is only accepted on the nightly compiler".into(),
|
||||||
.into(),
|
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -562,19 +557,17 @@ pub fn parse_opts(args: &[String]) -> Option<OptRes> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
let test_threads = match matches.opt_str("test-threads") {
|
let test_threads = match matches.opt_str("test-threads") {
|
||||||
Some(n_str) => {
|
Some(n_str) => match n_str.parse::<usize>() {
|
||||||
match n_str.parse::<usize>() {
|
Ok(0) => return Some(Err(format!("argument for --test-threads must not be 0"))),
|
||||||
Ok(0) => return Some(Err(format!("argument for --test-threads must not be 0"))),
|
Ok(n) => Some(n),
|
||||||
Ok(n) => Some(n),
|
Err(e) => {
|
||||||
Err(e) => {
|
return Some(Err(format!(
|
||||||
return Some(Err(format!(
|
"argument for --test-threads must be a number > 0 \
|
||||||
"argument for --test-threads must be a number > 0 \
|
(error: {})",
|
||||||
(error: {})",
|
e
|
||||||
e
|
)))
|
||||||
)))
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
},
|
||||||
None => None,
|
None => None,
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -586,7 +579,7 @@ pub fn parse_opts(args: &[String]) -> Option<OptRes> {
|
|||||||
Some(v) => {
|
Some(v) => {
|
||||||
return Some(Err(format!(
|
return Some(Err(format!(
|
||||||
"argument for --color must be auto, always, or never (was \
|
"argument for --color must be auto, always, or never (was \
|
||||||
{})",
|
{})",
|
||||||
v
|
v
|
||||||
)))
|
)))
|
||||||
}
|
}
|
||||||
@@ -599,8 +592,7 @@ pub fn parse_opts(args: &[String]) -> Option<OptRes> {
|
|||||||
Some("json") => {
|
Some("json") => {
|
||||||
if !allow_unstable {
|
if !allow_unstable {
|
||||||
return Some(Err(
|
return Some(Err(
|
||||||
"The \"json\" format is only accepted on the nightly compiler"
|
"The \"json\" format is only accepted on the nightly compiler".into(),
|
||||||
.into(),
|
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
OutputFormat::Json
|
OutputFormat::Json
|
||||||
@@ -609,7 +601,7 @@ pub fn parse_opts(args: &[String]) -> Option<OptRes> {
|
|||||||
Some(v) => {
|
Some(v) => {
|
||||||
return Some(Err(format!(
|
return Some(Err(format!(
|
||||||
"argument for --format must be pretty, terse, or json (was \
|
"argument for --format must be pretty, terse, or json (was \
|
||||||
{})",
|
{})",
|
||||||
v
|
v
|
||||||
)))
|
)))
|
||||||
}
|
}
|
||||||
@@ -811,8 +803,7 @@ pub fn list_tests_console(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> io::Res
|
|||||||
ntest += 1;
|
ntest += 1;
|
||||||
"test"
|
"test"
|
||||||
}
|
}
|
||||||
StaticBenchFn(..) |
|
StaticBenchFn(..) | DynBenchFn(..) => {
|
||||||
DynBenchFn(..) => {
|
|
||||||
nbench += 1;
|
nbench += 1;
|
||||||
"benchmark"
|
"benchmark"
|
||||||
}
|
}
|
||||||
@@ -834,7 +825,8 @@ pub fn list_tests_console(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> io::Res
|
|||||||
writeln!(output, "")?;
|
writeln!(output, "")?;
|
||||||
}
|
}
|
||||||
|
|
||||||
writeln!(output,
|
writeln!(
|
||||||
|
output,
|
||||||
"{}, {}",
|
"{}, {}",
|
||||||
plural(ntest, "test"),
|
plural(ntest, "test"),
|
||||||
plural(nbench, "benchmark")
|
plural(nbench, "benchmark")
|
||||||
@@ -851,7 +843,6 @@ pub fn run_tests_console(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> io::Resu
|
|||||||
st: &mut ConsoleTestState,
|
st: &mut ConsoleTestState,
|
||||||
out: &mut OutputFormatter,
|
out: &mut OutputFormatter,
|
||||||
) -> io::Result<()> {
|
) -> io::Result<()> {
|
||||||
|
|
||||||
match (*event).clone() {
|
match (*event).clone() {
|
||||||
TeFiltered(ref filtered_tests) => {
|
TeFiltered(ref filtered_tests) => {
|
||||||
st.total = filtered_tests.len();
|
st.total = filtered_tests.len();
|
||||||
@@ -989,8 +980,7 @@ fn use_color(opts: &TestOpts) -> bool {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(any(target_os = "cloudabi",
|
#[cfg(any(target_os = "cloudabi", target_os = "redox",
|
||||||
target_os = "redox",
|
|
||||||
all(target_arch = "wasm32", not(target_os = "emscripten"))))]
|
all(target_arch = "wasm32", not(target_os = "emscripten"))))]
|
||||||
fn stdout_isatty() -> bool {
|
fn stdout_isatty() -> bool {
|
||||||
// FIXME: Implement isatty on Redox
|
// FIXME: Implement isatty on Redox
|
||||||
@@ -1089,10 +1079,12 @@ where
|
|||||||
let now = Instant::now();
|
let now = Instant::now();
|
||||||
let timed_out = running_tests
|
let timed_out = running_tests
|
||||||
.iter()
|
.iter()
|
||||||
.filter_map(|(desc, timeout)| if &now >= timeout {
|
.filter_map(|(desc, timeout)| {
|
||||||
Some(desc.clone())
|
if &now >= timeout {
|
||||||
} else {
|
Some(desc.clone())
|
||||||
None
|
} else {
|
||||||
|
None
|
||||||
|
}
|
||||||
})
|
})
|
||||||
.collect();
|
.collect();
|
||||||
for test in &timed_out {
|
for test in &timed_out {
|
||||||
@@ -1174,12 +1166,10 @@ fn get_concurrency() -> usize {
|
|||||||
let opt_n: Option<usize> = s.parse().ok();
|
let opt_n: Option<usize> = s.parse().ok();
|
||||||
match opt_n {
|
match opt_n {
|
||||||
Some(n) if n > 0 => n,
|
Some(n) if n > 0 => n,
|
||||||
_ => {
|
_ => panic!(
|
||||||
panic!(
|
"RUST_TEST_THREADS is `{}`, should be a positive integer.",
|
||||||
"RUST_TEST_THREADS is `{}`, should be a positive integer.",
|
s
|
||||||
s
|
),
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Err(..) => num_cpus(),
|
Err(..) => num_cpus(),
|
||||||
@@ -1223,20 +1213,15 @@ fn get_concurrency() -> usize {
|
|||||||
1
|
1
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(any(target_os = "android",
|
#[cfg(any(target_os = "android", target_os = "cloudabi", target_os = "emscripten",
|
||||||
target_os = "cloudabi",
|
target_os = "fuchsia", target_os = "ios", target_os = "linux",
|
||||||
target_os = "emscripten",
|
target_os = "macos", target_os = "solaris"))]
|
||||||
target_os = "fuchsia",
|
|
||||||
target_os = "ios",
|
|
||||||
target_os = "linux",
|
|
||||||
target_os = "macos",
|
|
||||||
target_os = "solaris"))]
|
|
||||||
fn num_cpus() -> usize {
|
fn num_cpus() -> usize {
|
||||||
unsafe { libc::sysconf(libc::_SC_NPROCESSORS_ONLN) as usize }
|
unsafe { libc::sysconf(libc::_SC_NPROCESSORS_ONLN) as usize }
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(any(target_os = "freebsd", target_os = "dragonfly", target_os = "bitrig",
|
#[cfg(any(target_os = "freebsd", target_os = "dragonfly", target_os = "bitrig",
|
||||||
target_os = "netbsd"))]
|
target_os = "netbsd"))]
|
||||||
fn num_cpus() -> usize {
|
fn num_cpus() -> usize {
|
||||||
use std::ptr;
|
use std::ptr;
|
||||||
|
|
||||||
@@ -1308,26 +1293,28 @@ pub fn filter_tests(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> Vec<TestDescA
|
|||||||
// Remove tests that don't match the test filter
|
// Remove tests that don't match the test filter
|
||||||
filtered = match opts.filter {
|
filtered = match opts.filter {
|
||||||
None => filtered,
|
None => filtered,
|
||||||
Some(ref filter) => {
|
Some(ref filter) => filtered
|
||||||
filtered
|
.into_iter()
|
||||||
.into_iter()
|
.filter(|test| {
|
||||||
.filter(|test| if opts.filter_exact {
|
if opts.filter_exact {
|
||||||
test.desc.name.as_slice() == &filter[..]
|
test.desc.name.as_slice() == &filter[..]
|
||||||
} else {
|
} else {
|
||||||
test.desc.name.as_slice().contains(&filter[..])
|
test.desc.name.as_slice().contains(&filter[..])
|
||||||
})
|
}
|
||||||
.collect()
|
})
|
||||||
}
|
.collect(),
|
||||||
};
|
};
|
||||||
|
|
||||||
// Skip tests that match any of the skip filters
|
// Skip tests that match any of the skip filters
|
||||||
filtered = filtered
|
filtered = filtered
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.filter(|t| {
|
.filter(|t| {
|
||||||
!opts.skip.iter().any(|sf| if opts.filter_exact {
|
!opts.skip.iter().any(|sf| {
|
||||||
t.desc.name.as_slice() == &sf[..]
|
if opts.filter_exact {
|
||||||
} else {
|
t.desc.name.as_slice() == &sf[..]
|
||||||
t.desc.name.as_slice().contains(&sf[..])
|
} else {
|
||||||
|
t.desc.name.as_slice().contains(&sf[..])
|
||||||
|
}
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
.collect();
|
.collect();
|
||||||
@@ -1354,31 +1341,23 @@ pub fn filter_tests(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> Vec<TestDescA
|
|||||||
};
|
};
|
||||||
|
|
||||||
// Sort the tests alphabetically
|
// Sort the tests alphabetically
|
||||||
filtered.sort_by(|t1, t2| {
|
filtered.sort_by(|t1, t2| t1.desc.name.as_slice().cmp(t2.desc.name.as_slice()));
|
||||||
t1.desc.name.as_slice().cmp(t2.desc.name.as_slice())
|
|
||||||
});
|
|
||||||
|
|
||||||
filtered
|
filtered
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn convert_benchmarks_to_tests(tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
|
pub fn convert_benchmarks_to_tests(tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
|
||||||
// convert benchmarks to tests, if we're not benchmarking them
|
// convert benchmarks to tests, if we're not benchmarking them
|
||||||
tests.into_iter().map(|x| {
|
tests
|
||||||
let testfn = match x.testfn {
|
.into_iter()
|
||||||
DynBenchFn(bench) => {
|
.map(|x| {
|
||||||
DynTestFn(Box::new(move || {
|
let testfn = match x.testfn {
|
||||||
bench::run_once(|b| {
|
DynBenchFn(bench) => DynTestFn(Box::new(move || {
|
||||||
__rust_begin_short_backtrace(|| bench.run(b))
|
bench::run_once(|b| __rust_begin_short_backtrace(|| bench.run(b)))
|
||||||
})
|
})),
|
||||||
}))
|
StaticBenchFn(benchfn) => DynTestFn(Box::new(move || {
|
||||||
}
|
bench::run_once(|b| __rust_begin_short_backtrace(|| benchfn(b)))
|
||||||
StaticBenchFn(benchfn) => {
|
})),
|
||||||
DynTestFn(Box::new(move || {
|
|
||||||
bench::run_once(|b| {
|
|
||||||
__rust_begin_short_backtrace(|| benchfn(b))
|
|
||||||
})
|
|
||||||
}))
|
|
||||||
}
|
|
||||||
f => f,
|
f => f,
|
||||||
};
|
};
|
||||||
TestDescAndFn {
|
TestDescAndFn {
|
||||||
@@ -1395,22 +1374,22 @@ pub fn run_test(
|
|||||||
test: TestDescAndFn,
|
test: TestDescAndFn,
|
||||||
monitor_ch: Sender<MonitorMsg>,
|
monitor_ch: Sender<MonitorMsg>,
|
||||||
) {
|
) {
|
||||||
|
|
||||||
let TestDescAndFn { desc, testfn } = test;
|
let TestDescAndFn { desc, testfn } = test;
|
||||||
|
|
||||||
let ignore_because_panic_abort = cfg!(target_arch = "wasm32") &&
|
let ignore_because_panic_abort = cfg!(target_arch = "wasm32") && !cfg!(target_os = "emscripten")
|
||||||
!cfg!(target_os = "emscripten") &&
|
&& desc.should_panic != ShouldPanic::No;
|
||||||
desc.should_panic != ShouldPanic::No;
|
|
||||||
|
|
||||||
if force_ignore || desc.ignore || ignore_because_panic_abort {
|
if force_ignore || desc.ignore || ignore_because_panic_abort {
|
||||||
monitor_ch.send((desc, TrIgnored, Vec::new())).unwrap();
|
monitor_ch.send((desc, TrIgnored, Vec::new())).unwrap();
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
fn run_test_inner(desc: TestDesc,
|
fn run_test_inner(
|
||||||
monitor_ch: Sender<MonitorMsg>,
|
desc: TestDesc,
|
||||||
nocapture: bool,
|
monitor_ch: Sender<MonitorMsg>,
|
||||||
testfn: Box<FnBox() + Send>) {
|
nocapture: bool,
|
||||||
|
testfn: Box<FnBox() + Send>,
|
||||||
|
) {
|
||||||
// Buffer for capturing standard I/O
|
// Buffer for capturing standard I/O
|
||||||
let data = Arc::new(Mutex::new(Vec::new()));
|
let data = Arc::new(Mutex::new(Vec::new()));
|
||||||
let data2 = data.clone();
|
let data2 = data.clone();
|
||||||
@@ -1440,7 +1419,6 @@ pub fn run_test(
|
|||||||
.unwrap();
|
.unwrap();
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
// If the platform is single-threaded we're just going to run
|
// If the platform is single-threaded we're just going to run
|
||||||
// the test synchronously, regardless of the concurrency
|
// the test synchronously, regardless of the concurrency
|
||||||
// level.
|
// level.
|
||||||
@@ -1455,27 +1433,25 @@ pub fn run_test(
|
|||||||
|
|
||||||
match testfn {
|
match testfn {
|
||||||
DynBenchFn(bencher) => {
|
DynBenchFn(bencher) => {
|
||||||
::bench::benchmark(desc,
|
::bench::benchmark(desc, monitor_ch, opts.nocapture, |harness| {
|
||||||
monitor_ch,
|
bencher.run(harness)
|
||||||
opts.nocapture,
|
});
|
||||||
|harness| bencher.run(harness));
|
|
||||||
}
|
}
|
||||||
StaticBenchFn(benchfn) => {
|
StaticBenchFn(benchfn) => {
|
||||||
::bench::benchmark(desc,
|
::bench::benchmark(desc, monitor_ch, opts.nocapture, |harness| {
|
||||||
monitor_ch,
|
(benchfn.clone())(harness)
|
||||||
opts.nocapture,
|
});
|
||||||
|harness| (benchfn.clone())(harness));
|
|
||||||
}
|
}
|
||||||
DynTestFn(f) => {
|
DynTestFn(f) => {
|
||||||
let cb = move || {
|
let cb = move || __rust_begin_short_backtrace(f);
|
||||||
__rust_begin_short_backtrace(f)
|
|
||||||
};
|
|
||||||
run_test_inner(desc, monitor_ch, opts.nocapture, Box::new(cb))
|
run_test_inner(desc, monitor_ch, opts.nocapture, Box::new(cb))
|
||||||
}
|
}
|
||||||
StaticTestFn(f) => {
|
StaticTestFn(f) => run_test_inner(
|
||||||
run_test_inner(desc, monitor_ch, opts.nocapture,
|
desc,
|
||||||
Box::new(move || __rust_begin_short_backtrace(f)))
|
monitor_ch,
|
||||||
}
|
opts.nocapture,
|
||||||
|
Box::new(move || __rust_begin_short_backtrace(f)),
|
||||||
|
),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1487,8 +1463,7 @@ fn __rust_begin_short_backtrace<F: FnOnce()>(f: F) {
|
|||||||
|
|
||||||
fn calc_result(desc: &TestDesc, task_result: Result<(), Box<Any + Send>>) -> TestResult {
|
fn calc_result(desc: &TestDesc, task_result: Result<(), Box<Any + Send>>) -> TestResult {
|
||||||
match (&desc.should_panic, task_result) {
|
match (&desc.should_panic, task_result) {
|
||||||
(&ShouldPanic::No, Ok(())) |
|
(&ShouldPanic::No, Ok(())) | (&ShouldPanic::Yes, Err(_)) => TrOk,
|
||||||
(&ShouldPanic::Yes, Err(_)) => TrOk,
|
|
||||||
(&ShouldPanic::YesWithMessage(msg), Err(ref err)) => {
|
(&ShouldPanic::YesWithMessage(msg), Err(ref err)) => {
|
||||||
if err.downcast_ref::<String>()
|
if err.downcast_ref::<String>()
|
||||||
.map(|e| &**e)
|
.map(|e| &**e)
|
||||||
@@ -1545,7 +1520,6 @@ impl MetricMap {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// Benchmarking
|
// Benchmarking
|
||||||
|
|
||||||
/// A function that is opaque to the optimizer, to allow benchmarks to
|
/// A function that is opaque to the optimizer, to allow benchmarks to
|
||||||
@@ -1566,7 +1540,6 @@ pub fn black_box<T>(dummy: T) -> T {
|
|||||||
dummy
|
dummy
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
impl Bencher {
|
impl Bencher {
|
||||||
/// Callback for benchmark functions to run in their body.
|
/// Callback for benchmark functions to run in their body.
|
||||||
pub fn iter<T, F>(&mut self, mut inner: F)
|
pub fn iter<T, F>(&mut self, mut inner: F)
|
||||||
@@ -1605,7 +1578,6 @@ where
|
|||||||
return ns_from_dur(start.elapsed());
|
return ns_from_dur(start.elapsed());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
pub fn iter<T, F>(inner: &mut F) -> stats::Summary
|
pub fn iter<T, F>(inner: &mut F) -> stats::Summary
|
||||||
where
|
where
|
||||||
F: FnMut() -> T,
|
F: FnMut() -> T,
|
||||||
@@ -1649,8 +1621,8 @@ where
|
|||||||
|
|
||||||
// If we've run for 100ms and seem to have converged to a
|
// If we've run for 100ms and seem to have converged to a
|
||||||
// stable median.
|
// stable median.
|
||||||
if loop_run > Duration::from_millis(100) && summ.median_abs_dev_pct < 1.0 &&
|
if loop_run > Duration::from_millis(100) && summ.median_abs_dev_pct < 1.0
|
||||||
summ.median - summ5.median < summ5.median_abs_dev
|
&& summ.median - summ5.median < summ5.median_abs_dev
|
||||||
{
|
{
|
||||||
return summ5;
|
return summ5;
|
||||||
}
|
}
|
||||||
@@ -1680,7 +1652,7 @@ pub mod bench {
|
|||||||
use std::io;
|
use std::io;
|
||||||
use std::sync::{Arc, Mutex};
|
use std::sync::{Arc, Mutex};
|
||||||
use stats;
|
use stats;
|
||||||
use super::{Bencher, BenchSamples, BenchMode, Sink, MonitorMsg, TestDesc, Sender, TestResult};
|
use super::{BenchMode, BenchSamples, Bencher, MonitorMsg, Sender, Sink, TestDesc, TestResult};
|
||||||
|
|
||||||
pub fn benchmark<F>(desc: TestDesc, monitor_ch: Sender<MonitorMsg>, nocapture: bool, f: F)
|
pub fn benchmark<F>(desc: TestDesc, monitor_ch: Sender<MonitorMsg>, nocapture: bool, f: F)
|
||||||
where
|
where
|
||||||
@@ -1711,7 +1683,8 @@ pub mod bench {
|
|||||||
io::set_panic(panicio);
|
io::set_panic(panicio);
|
||||||
};
|
};
|
||||||
|
|
||||||
let test_result = match result { //bs.bench(f) {
|
let test_result = match result {
|
||||||
|
//bs.bench(f) {
|
||||||
Ok(Some(ns_iter_summ)) => {
|
Ok(Some(ns_iter_summ)) => {
|
||||||
let ns_iter = cmp::max(ns_iter_summ.median as u64, 1);
|
let ns_iter = cmp::max(ns_iter_summ.median as u64, 1);
|
||||||
let mb_s = bs.bytes * 1000 / ns_iter;
|
let mb_s = bs.bytes * 1000 / ns_iter;
|
||||||
@@ -1732,9 +1705,7 @@ pub mod bench {
|
|||||||
};
|
};
|
||||||
TestResult::TrBench(bs)
|
TestResult::TrBench(bs)
|
||||||
}
|
}
|
||||||
Err(_) => {
|
Err(_) => TestResult::TrFailed,
|
||||||
TestResult::TrFailed
|
|
||||||
}
|
|
||||||
};
|
};
|
||||||
|
|
||||||
let stdout = data.lock().unwrap().to_vec();
|
let stdout = data.lock().unwrap().to_vec();
|
||||||
@@ -1756,9 +1727,9 @@ pub mod bench {
|
|||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use test::{TrFailed, TrFailedMsg, TrIgnored, TrOk, filter_tests, parse_opts, TestDesc,
|
use test::{filter_tests, parse_opts, run_test, DynTestFn, DynTestName, MetricMap, ShouldPanic,
|
||||||
TestDescAndFn, TestOpts, run_test, MetricMap, StaticTestName, DynTestName,
|
StaticTestName, TestDesc, TestDescAndFn, TestOpts, TrFailed, TrFailedMsg,
|
||||||
DynTestFn, ShouldPanic};
|
TrIgnored, TrOk};
|
||||||
use std::sync::mpsc::channel;
|
use std::sync::mpsc::channel;
|
||||||
use bench;
|
use bench;
|
||||||
use Bencher;
|
use Bencher;
|
||||||
@@ -1904,25 +1875,26 @@ mod tests {
|
|||||||
opts.run_tests = true;
|
opts.run_tests = true;
|
||||||
opts.run_ignored = true;
|
opts.run_ignored = true;
|
||||||
|
|
||||||
let tests =
|
let tests = vec![
|
||||||
vec![TestDescAndFn {
|
TestDescAndFn {
|
||||||
desc: TestDesc {
|
desc: TestDesc {
|
||||||
name: StaticTestName("1"),
|
name: StaticTestName("1"),
|
||||||
ignore: true,
|
ignore: true,
|
||||||
should_panic: ShouldPanic::No,
|
should_panic: ShouldPanic::No,
|
||||||
allow_fail: false,
|
allow_fail: false,
|
||||||
},
|
},
|
||||||
testfn: DynTestFn(Box::new(move || {})),
|
testfn: DynTestFn(Box::new(move || {})),
|
||||||
},
|
},
|
||||||
TestDescAndFn {
|
TestDescAndFn {
|
||||||
desc: TestDesc {
|
desc: TestDesc {
|
||||||
name: StaticTestName("2"),
|
name: StaticTestName("2"),
|
||||||
ignore: false,
|
ignore: false,
|
||||||
should_panic: ShouldPanic::No,
|
should_panic: ShouldPanic::No,
|
||||||
allow_fail: false,
|
allow_fail: false,
|
||||||
},
|
},
|
||||||
testfn: DynTestFn(Box::new(move || {})),
|
testfn: DynTestFn(Box::new(move || {})),
|
||||||
}];
|
},
|
||||||
|
];
|
||||||
let filtered = filter_tests(&opts, tests);
|
let filtered = filter_tests(&opts, tests);
|
||||||
|
|
||||||
assert_eq!(filtered.len(), 1);
|
assert_eq!(filtered.len(), 1);
|
||||||
@@ -1935,17 +1907,16 @@ mod tests {
|
|||||||
fn tests() -> Vec<TestDescAndFn> {
|
fn tests() -> Vec<TestDescAndFn> {
|
||||||
vec!["base", "base::test", "base::test1", "base::test2"]
|
vec!["base", "base::test", "base::test1", "base::test2"]
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|name| {
|
.map(|name| TestDescAndFn {
|
||||||
TestDescAndFn {
|
desc: TestDesc {
|
||||||
desc: TestDesc {
|
name: StaticTestName(name),
|
||||||
name: StaticTestName(name),
|
ignore: false,
|
||||||
ignore: false,
|
should_panic: ShouldPanic::No,
|
||||||
should_panic: ShouldPanic::No,
|
allow_fail: false,
|
||||||
allow_fail: false,
|
},
|
||||||
},
|
testfn: DynTestFn(Box::new(move || {})),
|
||||||
testfn: DynTestFn(Box::new(move || {}))
|
})
|
||||||
}
|
.collect()
|
||||||
}).collect()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
let substr = filter_tests(
|
let substr = filter_tests(
|
||||||
@@ -2127,10 +2098,7 @@ mod tests {
|
|||||||
allow_fail: false,
|
allow_fail: false,
|
||||||
};
|
};
|
||||||
|
|
||||||
::bench::benchmark(desc,
|
::bench::benchmark(desc, tx, true, f);
|
||||||
tx,
|
|
||||||
true,
|
|
||||||
f);
|
|
||||||
rx.recv().unwrap();
|
rx.recv().unwrap();
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -2149,10 +2117,7 @@ mod tests {
|
|||||||
allow_fail: false,
|
allow_fail: false,
|
||||||
};
|
};
|
||||||
|
|
||||||
::bench::benchmark(desc,
|
::bench::benchmark(desc, tx, true, f);
|
||||||
tx,
|
|
||||||
true,
|
|
||||||
f);
|
|
||||||
rx.recv().unwrap();
|
rx.recv().unwrap();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -279,7 +279,6 @@ impl Stats for [f64] {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// Helper function: extract a value representing the `pct` percentile of a sorted sample-set, using
|
// Helper function: extract a value representing the `pct` percentile of a sorted sample-set, using
|
||||||
// linear interpolation. If samples are not sorted, return nonsensical value.
|
// linear interpolation. If samples are not sorted, return nonsensical value.
|
||||||
fn percentile_of_sorted(sorted_samples: &[f64], pct: f64) -> f64 {
|
fn percentile_of_sorted(sorted_samples: &[f64], pct: f64) -> f64 {
|
||||||
@@ -304,7 +303,6 @@ fn percentile_of_sorted(sorted_samples: &[f64], pct: f64) -> f64 {
|
|||||||
lo + (hi - lo) * d
|
lo + (hi - lo) * d
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/// Winsorize a set of samples, replacing values above the `100-pct` percentile
|
/// Winsorize a set of samples, replacing values above the `100-pct` percentile
|
||||||
/// and below the `pct` percentile with those percentiles themselves. This is a
|
/// and below the `pct` percentile with those percentiles themselves. This is a
|
||||||
/// way of minimizing the effect of outliers, at the cost of biasing the sample.
|
/// way of minimizing the effect of outliers, at the cost of biasing the sample.
|
||||||
@@ -338,15 +336,18 @@ mod tests {
|
|||||||
use std::io;
|
use std::io;
|
||||||
|
|
||||||
macro_rules! assert_approx_eq {
|
macro_rules! assert_approx_eq {
|
||||||
($a:expr, $b:expr) => ({
|
($a: expr, $b: expr) => {{
|
||||||
let (a, b) = (&$a, &$b);
|
let (a, b) = (&$a, &$b);
|
||||||
assert!((*a - *b).abs() < 1.0e-6,
|
assert!(
|
||||||
"{} is not approximately equal to {}", *a, *b);
|
(*a - *b).abs() < 1.0e-6,
|
||||||
})
|
"{} is not approximately equal to {}",
|
||||||
|
*a,
|
||||||
|
*b
|
||||||
|
);
|
||||||
|
}};
|
||||||
}
|
}
|
||||||
|
|
||||||
fn check(samples: &[f64], summ: &Summary) {
|
fn check(samples: &[f64], summ: &Summary) {
|
||||||
|
|
||||||
let summ2 = Summary::new(samples);
|
let summ2 = Summary::new(samples);
|
||||||
|
|
||||||
let mut w = io::sink();
|
let mut w = io::sink();
|
||||||
@@ -911,14 +912,18 @@ mod bench {
|
|||||||
|
|
||||||
#[bench]
|
#[bench]
|
||||||
pub fn sum_three_items(b: &mut Bencher) {
|
pub fn sum_three_items(b: &mut Bencher) {
|
||||||
b.iter(|| { [1e20f64, 1.5f64, -1e20f64].sum(); })
|
b.iter(|| {
|
||||||
|
[1e20f64, 1.5f64, -1e20f64].sum();
|
||||||
|
})
|
||||||
}
|
}
|
||||||
#[bench]
|
#[bench]
|
||||||
pub fn sum_many_f64(b: &mut Bencher) {
|
pub fn sum_many_f64(b: &mut Bencher) {
|
||||||
let nums = [-1e30f64, 1e60, 1e30, 1.0, -1e60];
|
let nums = [-1e30f64, 1e60, 1e30, 1.0, -1e60];
|
||||||
let v = (0..500).map(|i| nums[i % 5]).collect::<Vec<_>>();
|
let v = (0..500).map(|i| nums[i % 5]).collect::<Vec<_>>();
|
||||||
|
|
||||||
b.iter(|| { v.sum(); })
|
b.iter(|| {
|
||||||
|
v.sum();
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
#[bench]
|
#[bench]
|
||||||
|
|||||||
Reference in New Issue
Block a user