Skip to content

Commit 57bfb80

Browse files
committed
Auto merge of #65503 - popzxc:refactor-libtest, r=wesleywiser
Refactor libtest ## Short overview `libtest` got refactored and splitted into smaller modules ## Description `libtest` module is already pretty big and hard to understand. Everything is mixed up: CLI, console output, test execution, etc. This PR splits `libtest` into smaller logically-consistent modules, makes big functions smaller and more readable, and adds more comments, so `libtest` will be easier to understand and maintain. Although there are a lot of changes, all the refactoring is "soft", meaning that no public interfaces were affected and nothing should be broken. Thus this PR (at least should be) completely backward-compatible. r? @wesleywiser cc @Centril
2 parents 50ffa79 + ae04dc8 commit 57bfb80

21 files changed

+2123
-1793
lines changed

src/libtest/bench.rs

+258
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,258 @@
1+
//! Benchmarking module.
2+
pub use std::hint::black_box;
3+
4+
use super::{
5+
event::CompletedTest,
6+
helpers::sink::Sink,
7+
options::BenchMode,
8+
types::TestDesc,
9+
test_result::TestResult,
10+
Sender,
11+
};
12+
13+
use crate::stats;
14+
use std::time::{Duration, Instant};
15+
use std::cmp;
16+
use std::io;
17+
use std::panic::{catch_unwind, AssertUnwindSafe};
18+
use std::sync::{Arc, Mutex};
19+
20+
/// Manager of the benchmarking runs.
21+
///
22+
/// This is fed into functions marked with `#[bench]` to allow for
23+
/// set-up & tear-down before running a piece of code repeatedly via a
24+
/// call to `iter`.
25+
#[derive(Clone)]
26+
pub struct Bencher {
27+
mode: BenchMode,
28+
summary: Option<stats::Summary>,
29+
pub bytes: u64,
30+
}
31+
32+
impl Bencher {
33+
/// Callback for benchmark functions to run in their body.
34+
pub fn iter<T, F>(&mut self, mut inner: F)
35+
where
36+
F: FnMut() -> T,
37+
{
38+
if self.mode == BenchMode::Single {
39+
ns_iter_inner(&mut inner, 1);
40+
return;
41+
}
42+
43+
self.summary = Some(iter(&mut inner));
44+
}
45+
46+
pub fn bench<F>(&mut self, mut f: F) -> Option<stats::Summary>
47+
where
48+
F: FnMut(&mut Bencher),
49+
{
50+
f(self);
51+
return self.summary;
52+
}
53+
}
54+
55+
#[derive(Debug, Clone, PartialEq)]
56+
pub struct BenchSamples {
57+
pub ns_iter_summ: stats::Summary,
58+
pub mb_s: usize,
59+
}
60+
61+
pub fn fmt_bench_samples(bs: &BenchSamples) -> String {
62+
use std::fmt::Write;
63+
let mut output = String::new();
64+
65+
let median = bs.ns_iter_summ.median as usize;
66+
let deviation = (bs.ns_iter_summ.max - bs.ns_iter_summ.min) as usize;
67+
68+
output
69+
.write_fmt(format_args!(
70+
"{:>11} ns/iter (+/- {})",
71+
fmt_thousands_sep(median, ','),
72+
fmt_thousands_sep(deviation, ',')
73+
))
74+
.unwrap();
75+
if bs.mb_s != 0 {
76+
output
77+
.write_fmt(format_args!(" = {} MB/s", bs.mb_s))
78+
.unwrap();
79+
}
80+
output
81+
}
82+
83+
// Format a number with thousands separators
84+
fn fmt_thousands_sep(mut n: usize, sep: char) -> String {
85+
use std::fmt::Write;
86+
let mut output = String::new();
87+
let mut trailing = false;
88+
for &pow in &[9, 6, 3, 0] {
89+
let base = 10_usize.pow(pow);
90+
if pow == 0 || trailing || n / base != 0 {
91+
if !trailing {
92+
output.write_fmt(format_args!("{}", n / base)).unwrap();
93+
} else {
94+
output.write_fmt(format_args!("{:03}", n / base)).unwrap();
95+
}
96+
if pow != 0 {
97+
output.push(sep);
98+
}
99+
trailing = true;
100+
}
101+
n %= base;
102+
}
103+
104+
output
105+
}
106+
107+
fn ns_from_dur(dur: Duration) -> u64 {
108+
dur.as_secs() * 1_000_000_000 + (dur.subsec_nanos() as u64)
109+
}
110+
111+
fn ns_iter_inner<T, F>(inner: &mut F, k: u64) -> u64
112+
where
113+
F: FnMut() -> T,
114+
{
115+
let start = Instant::now();
116+
for _ in 0..k {
117+
black_box(inner());
118+
}
119+
return ns_from_dur(start.elapsed());
120+
}
121+
122+
pub fn iter<T, F>(inner: &mut F) -> stats::Summary
123+
where
124+
F: FnMut() -> T,
125+
{
126+
// Initial bench run to get ballpark figure.
127+
let ns_single = ns_iter_inner(inner, 1);
128+
129+
// Try to estimate iter count for 1ms falling back to 1m
130+
// iterations if first run took < 1ns.
131+
let ns_target_total = 1_000_000; // 1ms
132+
let mut n = ns_target_total / cmp::max(1, ns_single);
133+
134+
// if the first run took more than 1ms we don't want to just
135+
// be left doing 0 iterations on every loop. The unfortunate
136+
// side effect of not being able to do as many runs is
137+
// automatically handled by the statistical analysis below
138+
// (i.e., larger error bars).
139+
n = cmp::max(1, n);
140+
141+
let mut total_run = Duration::new(0, 0);
142+
let samples: &mut [f64] = &mut [0.0_f64; 50];
143+
loop {
144+
let loop_start = Instant::now();
145+
146+
for p in &mut *samples {
147+
*p = ns_iter_inner(inner, n) as f64 / n as f64;
148+
}
149+
150+
stats::winsorize(samples, 5.0);
151+
let summ = stats::Summary::new(samples);
152+
153+
for p in &mut *samples {
154+
let ns = ns_iter_inner(inner, 5 * n);
155+
*p = ns as f64 / (5 * n) as f64;
156+
}
157+
158+
stats::winsorize(samples, 5.0);
159+
let summ5 = stats::Summary::new(samples);
160+
161+
let loop_run = loop_start.elapsed();
162+
163+
// If we've run for 100ms and seem to have converged to a
164+
// stable median.
165+
if loop_run > Duration::from_millis(100)
166+
&& summ.median_abs_dev_pct < 1.0
167+
&& summ.median - summ5.median < summ5.median_abs_dev
168+
{
169+
return summ5;
170+
}
171+
172+
total_run = total_run + loop_run;
173+
// Longest we ever run for is 3s.
174+
if total_run > Duration::from_secs(3) {
175+
return summ5;
176+
}
177+
178+
// If we overflow here just return the results so far. We check a
179+
// multiplier of 10 because we're about to multiply by 2 and the
180+
// next iteration of the loop will also multiply by 5 (to calculate
181+
// the summ5 result)
182+
n = match n.checked_mul(10) {
183+
Some(_) => n * 2,
184+
None => {
185+
return summ5;
186+
}
187+
};
188+
}
189+
}
190+
191+
pub fn benchmark<F>(desc: TestDesc, monitor_ch: Sender<CompletedTest>, nocapture: bool, f: F)
192+
where
193+
F: FnMut(&mut Bencher),
194+
{
195+
let mut bs = Bencher {
196+
mode: BenchMode::Auto,
197+
summary: None,
198+
bytes: 0,
199+
};
200+
201+
let data = Arc::new(Mutex::new(Vec::new()));
202+
let oldio = if !nocapture {
203+
Some((
204+
io::set_print(Some(Sink::new_boxed(&data))),
205+
io::set_panic(Some(Sink::new_boxed(&data))),
206+
))
207+
} else {
208+
None
209+
};
210+
211+
let result = catch_unwind(AssertUnwindSafe(|| bs.bench(f)));
212+
213+
if let Some((printio, panicio)) = oldio {
214+
io::set_print(printio);
215+
io::set_panic(panicio);
216+
}
217+
218+
let test_result = match result {
219+
//bs.bench(f) {
220+
Ok(Some(ns_iter_summ)) => {
221+
let ns_iter = cmp::max(ns_iter_summ.median as u64, 1);
222+
let mb_s = bs.bytes * 1000 / ns_iter;
223+
224+
let bs = BenchSamples {
225+
ns_iter_summ,
226+
mb_s: mb_s as usize,
227+
};
228+
TestResult::TrBench(bs)
229+
}
230+
Ok(None) => {
231+
// iter not called, so no data.
232+
// FIXME: error in this case?
233+
let samples: &mut [f64] = &mut [0.0_f64; 1];
234+
let bs = BenchSamples {
235+
ns_iter_summ: stats::Summary::new(samples),
236+
mb_s: 0,
237+
};
238+
TestResult::TrBench(bs)
239+
}
240+
Err(_) => TestResult::TrFailed,
241+
};
242+
243+
let stdout = data.lock().unwrap().to_vec();
244+
let message = CompletedTest::new(desc, test_result, None, stdout);
245+
monitor_ch.send(message).unwrap();
246+
}
247+
248+
pub fn run_once<F>(f: F)
249+
where
250+
F: FnMut(&mut Bencher),
251+
{
252+
let mut bs = Bencher {
253+
mode: BenchMode::Single,
254+
summary: None,
255+
bytes: 0,
256+
};
257+
bs.bench(f);
258+
}

0 commit comments

Comments
 (0)