1use std::io::{BufRead, BufReader, Read, Write};
10use std::process::ChildStdout;
11use std::time::Duration;
12
13use termcolor::{Color, ColorSpec, WriteColor};
14
15use crate::core::builder::Builder;
16use crate::utils::exec::BootstrapCommand;
17
18const TERSE_TESTS_PER_LINE: usize = 88;
19
20pub(crate) fn add_flags_and_try_run_tests(
21 builder: &Builder<'_>,
22 cmd: &mut BootstrapCommand,
23) -> bool {
24 if !cmd.get_args().any(|arg| arg == "--") {
25 cmd.arg("--");
26 }
27 cmd.args(["-Z", "unstable-options", "--format", "json"]);
28
29 try_run_tests(builder, cmd, false)
30}
31
32pub(crate) fn try_run_tests(
33 builder: &Builder<'_>,
34 cmd: &mut BootstrapCommand,
35 stream: bool,
36) -> bool {
37 if run_tests(builder, cmd, stream) {
38 return true;
39 }
40
41 if builder.fail_fast {
42 crate::exit!(1);
43 }
44
45 builder.config.exec_ctx().add_to_delay_failure(format!("{cmd:?}"));
46
47 false
48}
49
50fn run_tests(builder: &Builder<'_>, cmd: &mut BootstrapCommand, stream: bool) -> bool {
51 builder.verbose(|| println!("running: {cmd:?}"));
52
53 let Some(mut streaming_command) = cmd.stream_capture_stdout(&builder.config.exec_ctx) else {
54 return true;
55 };
56
57 let renderer = Renderer::new(streaming_command.stdout.take().unwrap(), builder);
60 if stream {
61 renderer.stream_all();
62 } else {
63 renderer.render_all();
64 }
65
66 let status = streaming_command.wait().unwrap();
67 if !status.success() && builder.is_verbose() {
68 println!(
69 "\n\ncommand did not execute successfully: {cmd:?}\n\
70 expected success, got: {status}",
71 );
72 }
73
74 status.success()
75}
76
77struct Renderer<'a> {
78 stdout: BufReader<ChildStdout>,
79 failures: Vec<TestOutcome>,
80 benches: Vec<BenchOutcome>,
81 builder: &'a Builder<'a>,
82 tests_count: Option<usize>,
83 executed_tests: usize,
84 up_to_date_tests: usize,
87 ignored_tests: usize,
88 terse_tests_in_line: usize,
89 ci_latest_logged_percentage: f64,
90}
91
92impl<'a> Renderer<'a> {
93 fn new(stdout: ChildStdout, builder: &'a Builder<'a>) -> Self {
94 Self {
95 stdout: BufReader::new(stdout),
96 benches: Vec::new(),
97 failures: Vec::new(),
98 builder,
99 tests_count: None,
100 executed_tests: 0,
101 up_to_date_tests: 0,
102 ignored_tests: 0,
103 terse_tests_in_line: 0,
104 ci_latest_logged_percentage: 0.0,
105 }
106 }
107
108 fn render_all(mut self) {
109 let mut line = Vec::new();
110 loop {
111 line.clear();
112 match self.stdout.read_until(b'\n', &mut line) {
113 Ok(_) => {}
114 Err(err) if err.kind() == std::io::ErrorKind::UnexpectedEof => break,
115 Err(err) => panic!("failed to read output of test runner: {err}"),
116 }
117 if line.is_empty() {
118 break;
119 }
120
121 match serde_json::from_slice(&line) {
122 Ok(parsed) => self.render_message(parsed),
123 Err(_err) => {
124 let mut stdout = std::io::stdout();
126 stdout.write_all(&line).unwrap();
127 let _ = stdout.flush();
128 }
129 }
130 }
131
132 if self.up_to_date_tests > 0 {
133 let n = self.up_to_date_tests;
134 let s = if n > 1 { "s" } else { "" };
135 println!("help: ignored {n} up-to-date test{s}; use `--force-rerun` to prevent this\n");
136 }
137 }
138
139 fn stream_all(mut self) {
141 let mut buffer = [0; 1];
142 loop {
143 match self.stdout.read(&mut buffer) {
144 Ok(0) => break,
145 Ok(_) => {
146 let mut stdout = std::io::stdout();
147 stdout.write_all(&buffer).unwrap();
148 let _ = stdout.flush();
149 }
150 Err(err) if err.kind() == std::io::ErrorKind::UnexpectedEof => break,
151 Err(err) => panic!("failed to read output of test runner: {err}"),
152 }
153 }
154 }
155
156 fn render_test_outcome(&mut self, outcome: Outcome<'_>, test: &TestOutcome) {
157 self.executed_tests += 1;
158
159 if let Outcome::Ignored { reason } = outcome {
160 self.ignored_tests += 1;
161 if reason == Some("up-to-date") {
163 self.up_to_date_tests += 1;
164 }
165 }
166
167 #[cfg(feature = "build-metrics")]
168 self.builder.metrics.record_test(
169 &test.name,
170 match outcome {
171 Outcome::Ok | Outcome::BenchOk => build_helper::metrics::TestOutcome::Passed,
172 Outcome::Failed => build_helper::metrics::TestOutcome::Failed,
173 Outcome::Ignored { reason } => build_helper::metrics::TestOutcome::Ignored {
174 ignore_reason: reason.map(|s| s.to_string()),
175 },
176 },
177 self.builder,
178 );
179
180 if self.builder.config.verbose_tests {
181 self.render_test_outcome_verbose(outcome, test);
182 } else if self.builder.config.is_running_on_ci {
183 self.render_test_outcome_ci(outcome, test);
184 } else {
185 self.render_test_outcome_terse(outcome, test);
186 }
187 }
188
189 fn render_test_outcome_verbose(&self, outcome: Outcome<'_>, test: &TestOutcome) {
190 print!("test {} ... ", test.name);
191 self.builder.colored_stdout(|stdout| outcome.write_long(stdout)).unwrap();
192 if let Some(exec_time) = test.exec_time {
193 print!(" ({exec_time:.2?})");
194 }
195 println!();
196 }
197
198 fn render_test_outcome_terse(&mut self, outcome: Outcome<'_>, test: &TestOutcome) {
199 if self.terse_tests_in_line != 0
200 && self.terse_tests_in_line.is_multiple_of(TERSE_TESTS_PER_LINE)
201 {
202 if let Some(total) = self.tests_count {
203 let total = total.to_string();
204 let executed = format!("{:>width$}", self.executed_tests - 1, width = total.len());
205 print!(" {executed}/{total}");
206 }
207 println!();
208 self.terse_tests_in_line = 0;
209 }
210
211 self.terse_tests_in_line += 1;
212 self.builder.colored_stdout(|stdout| outcome.write_short(stdout, &test.name)).unwrap();
213 let _ = std::io::stdout().flush();
214 }
215
216 fn render_test_outcome_ci(&mut self, outcome: Outcome<'_>, test: &TestOutcome) {
217 if let Some(total) = self.tests_count {
218 let percent = self.executed_tests as f64 / total as f64;
219
220 if self.ci_latest_logged_percentage + 0.10 < percent {
221 let total = total.to_string();
222 let executed = format!("{:>width$}", self.executed_tests, width = total.len());
223 let pretty_percent = format!("{:.0}%", percent * 100.0);
224 let passed_tests = self.executed_tests - (self.failures.len() + self.ignored_tests);
225 println!(
226 "{:<4} -- {executed}/{total}, {:>total_indent$} passed, {} failed, {} ignored",
227 pretty_percent,
228 passed_tests,
229 self.failures.len(),
230 self.ignored_tests,
231 total_indent = total.len()
232 );
233 self.ci_latest_logged_percentage += 0.10;
234 }
235 }
236
237 self.builder.colored_stdout(|stdout| outcome.write_ci(stdout, &test.name)).unwrap();
238 let _ = std::io::stdout().flush();
239 }
240
241 fn render_suite_outcome(&self, outcome: Outcome<'_>, suite: &SuiteOutcome) {
242 if !self.builder.config.verbose_tests {
244 println!();
245 }
246
247 if !self.failures.is_empty() {
248 println!("\nfailures:\n");
249 for failure in &self.failures {
250 if failure.stdout.is_some() || failure.message.is_some() {
251 println!("---- {} stdout ----", failure.name);
252 if let Some(stdout) = &failure.stdout {
253 println!("{stdout}");
254 }
255 if let Some(message) = &failure.message {
256 println!("NOTE: {message}");
257 }
258 }
259 }
260
261 println!("\nfailures:");
262 for failure in &self.failures {
263 println!(" {}", failure.name);
264 }
265 }
266
267 if !self.benches.is_empty() {
268 println!("\nbenchmarks:");
269
270 let mut rows = Vec::new();
271 for bench in &self.benches {
272 rows.push((
273 &bench.name,
274 format!("{:.2?}ns/iter", bench.median),
275 format!("+/- {:.2?}", bench.deviation),
276 ));
277 }
278
279 let max_0 = rows.iter().map(|r| r.0.len()).max().unwrap_or(0);
280 let max_1 = rows.iter().map(|r| r.1.len()).max().unwrap_or(0);
281 let max_2 = rows.iter().map(|r| r.2.len()).max().unwrap_or(0);
282 for row in &rows {
283 println!(" {:<max_0$} {:>max_1$} {:>max_2$}", row.0, row.1, row.2);
284 }
285 }
286
287 print!("\ntest result: ");
288 self.builder.colored_stdout(|stdout| outcome.write_long(stdout)).unwrap();
289 println!(
290 ". {} passed; {} failed; {} ignored; {} measured; {} filtered out{time}\n",
291 suite.passed,
292 suite.failed,
293 suite.ignored,
294 suite.measured,
295 suite.filtered_out,
296 time = match suite.exec_time {
297 Some(t) => format!("; finished in {:.2?}", Duration::from_secs_f64(t)),
298 None => String::new(),
299 }
300 );
301 }
302
303 fn render_message(&mut self, message: Message) {
304 match message {
305 Message::Suite(SuiteMessage::Started { test_count }) => {
306 println!("\nrunning {test_count} tests");
307 self.benches = vec![];
308 self.failures = vec![];
309 self.ignored_tests = 0;
310 self.executed_tests = 0;
311 self.terse_tests_in_line = 0;
312 self.tests_count = Some(test_count);
313 }
314 Message::Suite(SuiteMessage::Ok(outcome)) => {
315 self.render_suite_outcome(Outcome::Ok, &outcome);
316 }
317 Message::Suite(SuiteMessage::Failed(outcome)) => {
318 self.render_suite_outcome(Outcome::Failed, &outcome);
319 }
320 Message::Bench(outcome) => {
321 let fake_test_outcome = TestOutcome {
327 name: outcome.name.clone(),
328 exec_time: None,
329 stdout: None,
330 message: None,
331 };
332 self.render_test_outcome(Outcome::BenchOk, &fake_test_outcome);
333 self.benches.push(outcome);
334 }
335 Message::Test(TestMessage::Ok(outcome)) => {
336 self.render_test_outcome(Outcome::Ok, &outcome);
337 }
338 Message::Test(TestMessage::Ignored(outcome)) => {
339 self.render_test_outcome(
340 Outcome::Ignored { reason: outcome.message.as_deref() },
341 &outcome,
342 );
343 }
344 Message::Test(TestMessage::Failed(outcome)) => {
345 self.render_test_outcome(Outcome::Failed, &outcome);
346 self.failures.push(outcome);
347 }
348 Message::Test(TestMessage::Timeout { name }) => {
349 println!("test {name} has been running for a long time");
350 }
351 Message::Test(TestMessage::Started) => {} }
353 }
354}
355
356enum Outcome<'a> {
357 Ok,
358 BenchOk,
359 Failed,
360 Ignored { reason: Option<&'a str> },
361}
362
363impl Outcome<'_> {
364 fn write_short(&self, writer: &mut dyn WriteColor, name: &str) -> Result<(), std::io::Error> {
365 match self {
366 Outcome::Ok => {
367 writer.set_color(ColorSpec::new().set_fg(Some(Color::Green)))?;
368 write!(writer, ".")?;
369 }
370 Outcome::BenchOk => {
371 writer.set_color(ColorSpec::new().set_fg(Some(Color::Cyan)))?;
372 write!(writer, "b")?;
373 }
374 Outcome::Failed => {
375 writeln!(writer)?;
378 writer.set_color(ColorSpec::new().set_fg(Some(Color::Red)))?;
379 writeln!(writer, "{name} ... F")?;
380 }
381 Outcome::Ignored { .. } => {
382 writer.set_color(ColorSpec::new().set_fg(Some(Color::Yellow)))?;
383 write!(writer, "i")?;
384 }
385 }
386 writer.reset()
387 }
388
389 fn write_long(&self, writer: &mut dyn WriteColor) -> Result<(), std::io::Error> {
390 match self {
391 Outcome::Ok => {
392 writer.set_color(ColorSpec::new().set_fg(Some(Color::Green)))?;
393 write!(writer, "ok")?;
394 }
395 Outcome::BenchOk => {
396 writer.set_color(ColorSpec::new().set_fg(Some(Color::Cyan)))?;
397 write!(writer, "benchmarked")?;
398 }
399 Outcome::Failed => {
400 writer.set_color(ColorSpec::new().set_fg(Some(Color::Red)))?;
401 write!(writer, "FAILED")?;
402 }
403 Outcome::Ignored { reason } => {
404 writer.set_color(ColorSpec::new().set_fg(Some(Color::Yellow)))?;
405 write!(writer, "ignored")?;
406 if let Some(reason) = reason {
407 write!(writer, ", {reason}")?;
408 }
409 }
410 }
411 writer.reset()
412 }
413
414 fn write_ci(&self, writer: &mut dyn WriteColor, name: &str) -> Result<(), std::io::Error> {
415 match self {
416 Outcome::Ok | Outcome::BenchOk | Outcome::Ignored { .. } => {}
417 Outcome::Failed => {
418 writer.set_color(ColorSpec::new().set_fg(Some(Color::Red)))?;
419 writeln!(writer, " {name} ... FAILED")?;
420 }
421 }
422 writer.reset()
423 }
424}
425
426#[derive(serde_derive::Deserialize)]
427#[serde(tag = "type", rename_all = "snake_case")]
428enum Message {
429 Suite(SuiteMessage),
430 Test(TestMessage),
431 Bench(BenchOutcome),
432}
433
434#[derive(serde_derive::Deserialize)]
435#[serde(tag = "event", rename_all = "snake_case")]
436enum SuiteMessage {
437 Ok(SuiteOutcome),
438 Failed(SuiteOutcome),
439 Started { test_count: usize },
440}
441
442#[derive(serde_derive::Deserialize)]
443struct SuiteOutcome {
444 passed: usize,
445 failed: usize,
446 ignored: usize,
447 measured: usize,
448 filtered_out: usize,
449 exec_time: Option<f64>,
452}
453
454#[derive(serde_derive::Deserialize)]
455#[serde(tag = "event", rename_all = "snake_case")]
456enum TestMessage {
457 Ok(TestOutcome),
458 Failed(TestOutcome),
459 Ignored(TestOutcome),
460 Timeout { name: String },
461 Started,
462}
463
464#[derive(serde_derive::Deserialize)]
465struct BenchOutcome {
466 name: String,
467 median: f64,
468 deviation: f64,
469}
470
471#[derive(serde_derive::Deserialize)]
472struct TestOutcome {
473 name: String,
474 exec_time: Option<f64>,
475 stdout: Option<String>,
476 message: Option<String>,
477}