Skip to content
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.

Commit 5786233

Browse files
authoredMar 17, 2025
Rollup merge of #138531 - Kobzol:test-diff-try-build, r=marcoieni
Store test diffs in job summaries and improve analysis formatting This PR stores the test diffs that we already have in the post-merge workflow also into individual job summaries. This makes it easier to compare test (and later also other) diffs per job, which will be especially useful for try jobs, so that we can actually see the test diffs *before* we merge a given PR. As a drive-by, I also made a bunch of cleanups in `citool` and in the formatting of the summary and post-merge analyses. These changes are split into self-contained commits. The analysis can be tested locally with the following command: ```bash $ curl https://ci-artifacts.rust-lang.org/rustc-builds/<current-sha>/metrics-<job-name>.json > metrics.json $ cargo run --manifest-path src/ci/citool/Cargo.toml postprocess-metrics metrics.json --job-name <job-name> --parent <parent-sha> > out.md ``` For example, for [this PR](#138523): ```bash $ curl https://ci-artifacts.rust-lang.org/rustc-builds/282865097d138c7f0f7a7566db5b761312dd145c/metrics-aarch64-gnu.json > metrics.json $ cargo run --manifest-path src/ci/citool/Cargo.toml postprocess-metrics metrics.json --job-name aarch64-gnu --parent d9e5539 > out.md ``` Best reviewed commit by commit. r? `@marcoieni` try-job: aarch64-gnu try-job: dist-x86_64-linux
2 parents 01062ba + c9d3147 commit 5786233

File tree

6 files changed

+532
-472
lines changed

6 files changed

+532
-472
lines changed
 

‎.github/workflows/ci.yml

Lines changed: 17 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -239,16 +239,31 @@ jobs:
239239
if: github.event_name == 'push' || env.DEPLOY == '1' || env.DEPLOY_ALT == '1'
240240

241241
- name: postprocess metrics into the summary
242+
# This step is not critical, and if some I/O problem happens, we don't want
243+
# to cancel the build.
244+
continue-on-error: true
242245
run: |
243246
if [ -f build/metrics.json ]; then
244-
./build/citool/debug/citool postprocess-metrics build/metrics.json ${GITHUB_STEP_SUMMARY}
247+
METRICS=build/metrics.json
245248
elif [ -f obj/build/metrics.json ]; then
246-
./build/citool/debug/citool postprocess-metrics obj/build/metrics.json ${GITHUB_STEP_SUMMARY}
249+
METRICS=obj/build/metrics.json
247250
else
248251
echo "No metrics.json found"
252+
exit 0
249253
fi
250254
255+
# Get closest bors merge commit
256+
PARENT_COMMIT=`git rev-list --author='bors <bors@rust-lang.org>' -n1 --first-parent HEAD^1`
257+
258+
./build/citool/debug/citool postprocess-metrics \
259+
--job-name ${CI_JOB_NAME} \
260+
--parent ${PARENT_COMMIT} \
261+
${METRICS} >> ${GITHUB_STEP_SUMMARY}
262+
251263
- name: upload job metrics to DataDog
264+
# This step is not critical, and if some I/O problem happens, we don't want
265+
# to cancel the build.
266+
continue-on-error: true
252267
if: needs.calculate_matrix.outputs.run_type != 'pr'
253268
env:
254269
DATADOG_API_KEY: ${{ secrets.DATADOG_API_KEY }}

‎src/ci/citool/src/analysis.rs

Lines changed: 362 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,362 @@
1+
use std::collections::{BTreeMap, HashMap, HashSet};
2+
3+
use build_helper::metrics::{
4+
BuildStep, JsonRoot, TestOutcome, TestSuite, TestSuiteMetadata, format_build_steps,
5+
};
6+
7+
use crate::metrics;
8+
use crate::metrics::{JobMetrics, JobName, get_test_suites};
9+
use crate::utils::{output_details, pluralize};
10+
11+
pub fn output_bootstrap_stats(metrics: &JsonRoot) {
12+
if !metrics.invocations.is_empty() {
13+
println!("# Bootstrap steps");
14+
record_bootstrap_step_durations(&metrics);
15+
record_test_suites(&metrics);
16+
}
17+
}
18+
19+
fn record_bootstrap_step_durations(metrics: &JsonRoot) {
20+
for invocation in &metrics.invocations {
21+
let step = BuildStep::from_invocation(invocation);
22+
let table = format_build_steps(&step);
23+
eprintln!("Step `{}`\n{table}\n", invocation.cmdline);
24+
output_details(&invocation.cmdline, || {
25+
println!("<pre><code>{table}</code></pre>");
26+
});
27+
}
28+
eprintln!("Recorded {} bootstrap invocation(s)", metrics.invocations.len());
29+
}
30+
31+
fn record_test_suites(metrics: &JsonRoot) {
32+
let suites = metrics::get_test_suites(&metrics);
33+
34+
if !suites.is_empty() {
35+
let aggregated = aggregate_test_suites(&suites);
36+
let table = render_table(aggregated);
37+
println!("\n# Test results\n");
38+
println!("{table}");
39+
} else {
40+
eprintln!("No test suites found in metrics");
41+
}
42+
}
43+
44+
fn render_table(suites: BTreeMap<String, TestSuiteRecord>) -> String {
45+
use std::fmt::Write;
46+
47+
let mut table = "| Test suite | Passed ✅ | Ignored 🚫 | Failed ❌ |\n".to_string();
48+
writeln!(table, "|:------|------:|------:|------:|").unwrap();
49+
50+
fn compute_pct(value: f64, total: f64) -> f64 {
51+
if total == 0.0 { 0.0 } else { value / total }
52+
}
53+
54+
fn write_row(
55+
buffer: &mut String,
56+
name: &str,
57+
record: &TestSuiteRecord,
58+
surround: &str,
59+
) -> std::fmt::Result {
60+
let TestSuiteRecord { passed, ignored, failed } = record;
61+
let total = (record.passed + record.ignored + record.failed) as f64;
62+
let passed_pct = compute_pct(*passed as f64, total) * 100.0;
63+
let ignored_pct = compute_pct(*ignored as f64, total) * 100.0;
64+
let failed_pct = compute_pct(*failed as f64, total) * 100.0;
65+
66+
write!(buffer, "| {surround}{name}{surround} |")?;
67+
write!(buffer, " {surround}{passed} ({passed_pct:.0}%){surround} |")?;
68+
write!(buffer, " {surround}{ignored} ({ignored_pct:.0}%){surround} |")?;
69+
writeln!(buffer, " {surround}{failed} ({failed_pct:.0}%){surround} |")?;
70+
71+
Ok(())
72+
}
73+
74+
let mut total = TestSuiteRecord::default();
75+
for (name, record) in suites {
76+
write_row(&mut table, &name, &record, "").unwrap();
77+
total.passed += record.passed;
78+
total.ignored += record.ignored;
79+
total.failed += record.failed;
80+
}
81+
write_row(&mut table, "Total", &total, "**").unwrap();
82+
table
83+
}
84+
85+
/// Computes a post merge CI analysis report of test differences
86+
/// between the `parent` and `current` commits.
87+
pub fn output_test_diffs(job_metrics: HashMap<JobName, JobMetrics>) {
88+
let aggregated_test_diffs = aggregate_test_diffs(&job_metrics);
89+
report_test_diffs(aggregated_test_diffs);
90+
}
91+
92+
#[derive(Default)]
93+
struct TestSuiteRecord {
94+
passed: u64,
95+
ignored: u64,
96+
failed: u64,
97+
}
98+
99+
fn test_metadata_name(metadata: &TestSuiteMetadata) -> String {
100+
match metadata {
101+
TestSuiteMetadata::CargoPackage { crates, stage, .. } => {
102+
format!("{} (stage {stage})", crates.join(", "))
103+
}
104+
TestSuiteMetadata::Compiletest { suite, stage, .. } => {
105+
format!("{suite} (stage {stage})")
106+
}
107+
}
108+
}
109+
110+
fn aggregate_test_suites(suites: &[&TestSuite]) -> BTreeMap<String, TestSuiteRecord> {
111+
let mut records: BTreeMap<String, TestSuiteRecord> = BTreeMap::new();
112+
for suite in suites {
113+
let name = test_metadata_name(&suite.metadata);
114+
let record = records.entry(name).or_default();
115+
for test in &suite.tests {
116+
match test.outcome {
117+
TestOutcome::Passed => {
118+
record.passed += 1;
119+
}
120+
TestOutcome::Failed => {
121+
record.failed += 1;
122+
}
123+
TestOutcome::Ignored { .. } => {
124+
record.ignored += 1;
125+
}
126+
}
127+
}
128+
}
129+
records
130+
}
131+
132+
/// Represents a difference in the outcome of tests between a base and a current commit.
133+
/// Maps test diffs to jobs that contained them.
134+
#[derive(Debug)]
135+
struct AggregatedTestDiffs {
136+
diffs: HashMap<TestDiff, Vec<JobName>>,
137+
}
138+
139+
fn aggregate_test_diffs(jobs: &HashMap<JobName, JobMetrics>) -> AggregatedTestDiffs {
140+
let mut diffs: HashMap<TestDiff, Vec<JobName>> = HashMap::new();
141+
142+
// Aggregate test suites
143+
for (name, metrics) in jobs {
144+
if let Some(parent) = &metrics.parent {
145+
let tests_parent = aggregate_tests(parent);
146+
let tests_current = aggregate_tests(&metrics.current);
147+
for diff in calculate_test_diffs(tests_parent, tests_current) {
148+
diffs.entry(diff).or_default().push(name.to_string());
149+
}
150+
}
151+
}
152+
153+
AggregatedTestDiffs { diffs }
154+
}
155+
156+
#[derive(Eq, PartialEq, Hash, Debug)]
157+
enum TestOutcomeDiff {
158+
ChangeOutcome { before: TestOutcome, after: TestOutcome },
159+
Missing { before: TestOutcome },
160+
Added(TestOutcome),
161+
}
162+
163+
#[derive(Eq, PartialEq, Hash, Debug)]
164+
struct TestDiff {
165+
test: Test,
166+
diff: TestOutcomeDiff,
167+
}
168+
169+
fn calculate_test_diffs(parent: TestSuiteData, current: TestSuiteData) -> HashSet<TestDiff> {
170+
let mut diffs = HashSet::new();
171+
for (test, outcome) in &current.tests {
172+
match parent.tests.get(test) {
173+
Some(before) => {
174+
if before != outcome {
175+
diffs.insert(TestDiff {
176+
test: test.clone(),
177+
diff: TestOutcomeDiff::ChangeOutcome {
178+
before: before.clone(),
179+
after: outcome.clone(),
180+
},
181+
});
182+
}
183+
}
184+
None => {
185+
diffs.insert(TestDiff {
186+
test: test.clone(),
187+
diff: TestOutcomeDiff::Added(outcome.clone()),
188+
});
189+
}
190+
}
191+
}
192+
for (test, outcome) in &parent.tests {
193+
if !current.tests.contains_key(test) {
194+
diffs.insert(TestDiff {
195+
test: test.clone(),
196+
diff: TestOutcomeDiff::Missing { before: outcome.clone() },
197+
});
198+
}
199+
}
200+
201+
diffs
202+
}
203+
204+
/// Aggregates test suite executions from all bootstrap invocations in a given CI job.
205+
#[derive(Default)]
206+
struct TestSuiteData {
207+
tests: HashMap<Test, TestOutcome>,
208+
}
209+
210+
#[derive(Hash, PartialEq, Eq, Debug, Clone)]
211+
struct Test {
212+
name: String,
213+
is_doctest: bool,
214+
}
215+
216+
/// Extracts all tests from the passed metrics and map them to their outcomes.
217+
fn aggregate_tests(metrics: &JsonRoot) -> TestSuiteData {
218+
let mut tests = HashMap::new();
219+
let test_suites = get_test_suites(&metrics);
220+
for suite in test_suites {
221+
for test in &suite.tests {
222+
// Poor man's detection of doctests based on the "(line XYZ)" suffix
223+
let is_doctest = matches!(suite.metadata, TestSuiteMetadata::CargoPackage { .. })
224+
&& test.name.contains("(line");
225+
let test_entry = Test { name: generate_test_name(&test.name, &suite), is_doctest };
226+
tests.insert(test_entry, test.outcome.clone());
227+
}
228+
}
229+
TestSuiteData { tests }
230+
}
231+
232+
/// Normalizes Windows-style path delimiters to Unix-style paths
233+
/// and adds suite metadata to the test name.
234+
fn generate_test_name(name: &str, suite: &TestSuite) -> String {
235+
let name = name.replace('\\', "/");
236+
let stage = match suite.metadata {
237+
TestSuiteMetadata::CargoPackage { stage, .. } => stage,
238+
TestSuiteMetadata::Compiletest { stage, .. } => stage,
239+
};
240+
241+
format!("{name} (stage {stage})")
242+
}
243+
244+
/// Prints test changes in Markdown format to stdout.
245+
fn report_test_diffs(diff: AggregatedTestDiffs) {
246+
println!("# Test differences");
247+
if diff.diffs.is_empty() {
248+
println!("No test diffs found");
249+
return;
250+
}
251+
252+
fn format_outcome(outcome: &TestOutcome) -> String {
253+
match outcome {
254+
TestOutcome::Passed => "pass".to_string(),
255+
TestOutcome::Failed => "fail".to_string(),
256+
TestOutcome::Ignored { ignore_reason } => {
257+
let reason = match ignore_reason {
258+
Some(reason) => format!(" ({reason})"),
259+
None => String::new(),
260+
};
261+
format!("ignore{reason}")
262+
}
263+
}
264+
}
265+
266+
fn format_diff(diff: &TestOutcomeDiff) -> String {
267+
match diff {
268+
TestOutcomeDiff::ChangeOutcome { before, after } => {
269+
format!("{} -> {}", format_outcome(before), format_outcome(after))
270+
}
271+
TestOutcomeDiff::Missing { before } => {
272+
format!("{} -> [missing]", format_outcome(before))
273+
}
274+
TestOutcomeDiff::Added(outcome) => {
275+
format!("[missing] -> {}", format_outcome(outcome))
276+
}
277+
}
278+
}
279+
280+
fn format_job_group(group: u64) -> String {
281+
format!("**J{group}**")
282+
}
283+
284+
// It would be quite noisy to repeat the jobs that contained the test changes after/next to
285+
// every test diff. At the same time, grouping the test diffs by
286+
// [unique set of jobs that contained them] also doesn't work well, because the test diffs
287+
// would have to be duplicated several times.
288+
// Instead, we create a set of unique job groups, and then print a job group after each test.
289+
// We then print the job groups at the end, as a sort of index.
290+
let mut grouped_diffs: Vec<(&TestDiff, u64)> = vec![];
291+
let mut job_list_to_group: HashMap<&[JobName], u64> = HashMap::new();
292+
let mut job_index: Vec<&[JobName]> = vec![];
293+
294+
let original_diff_count = diff.diffs.len();
295+
let diffs = diff
296+
.diffs
297+
.into_iter()
298+
.filter(|(diff, _)| !diff.test.is_doctest)
299+
.map(|(diff, mut jobs)| {
300+
jobs.sort();
301+
(diff, jobs)
302+
})
303+
.collect::<Vec<_>>();
304+
let doctest_count = original_diff_count.saturating_sub(diffs.len());
305+
306+
let max_diff_count = 100;
307+
for (diff, jobs) in diffs.iter().take(max_diff_count) {
308+
let jobs = &*jobs;
309+
let job_group = match job_list_to_group.get(jobs.as_slice()) {
310+
Some(id) => *id,
311+
None => {
312+
let id = job_index.len() as u64;
313+
job_index.push(jobs);
314+
job_list_to_group.insert(jobs, id);
315+
id
316+
}
317+
};
318+
grouped_diffs.push((diff, job_group));
319+
}
320+
321+
// Sort diffs by job group and test name
322+
grouped_diffs.sort_by(|(d1, g1), (d2, g2)| g1.cmp(&g2).then(d1.test.name.cmp(&d2.test.name)));
323+
324+
output_details(
325+
&format!("Show {} test {}\n", original_diff_count, pluralize("diff", original_diff_count)),
326+
|| {
327+
for (diff, job_group) in grouped_diffs {
328+
println!(
329+
"- `{}`: {} ({})",
330+
diff.test.name,
331+
format_diff(&diff.diff),
332+
format_job_group(job_group)
333+
);
334+
}
335+
336+
let extra_diffs = diffs.len().saturating_sub(max_diff_count);
337+
if extra_diffs > 0 {
338+
println!(
339+
"\n(and {extra_diffs} additional {})",
340+
pluralize("test diff", extra_diffs)
341+
);
342+
}
343+
344+
if doctest_count > 0 {
345+
println!(
346+
"\nAdditionally, {doctest_count} doctest {} were found. These are ignored, as they are noisy.",
347+
pluralize("diff", doctest_count)
348+
);
349+
}
350+
351+
// Now print the job group index
352+
println!("\n**Job group index**\n");
353+
for (group, jobs) in job_index.into_iter().enumerate() {
354+
println!(
355+
"- {}: {}",
356+
format_job_group(group as u64),
357+
jobs.iter().map(|j| format!("`{j}`")).collect::<Vec<_>>().join(", ")
358+
);
359+
}
360+
},
361+
);
362+
}

‎src/ci/citool/src/main.rs

Lines changed: 70 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -1,25 +1,26 @@
1+
mod analysis;
12
mod cpu_usage;
23
mod datadog;
34
mod jobs;
4-
mod merge_report;
55
mod metrics;
66
mod utils;
77

8-
use std::collections::BTreeMap;
8+
use std::collections::{BTreeMap, HashMap};
99
use std::path::{Path, PathBuf};
1010
use std::process::Command;
1111

12+
use analysis::output_bootstrap_stats;
1213
use anyhow::Context;
1314
use clap::Parser;
1415
use jobs::JobDatabase;
1516
use serde_yaml::Value;
1617

18+
use crate::analysis::output_test_diffs;
1719
use crate::cpu_usage::load_cpu_usage;
1820
use crate::datadog::upload_datadog_metric;
1921
use crate::jobs::RunType;
20-
use crate::merge_report::post_merge_report;
21-
use crate::metrics::postprocess_metrics;
22-
use crate::utils::load_env_var;
22+
use crate::metrics::{JobMetrics, download_auto_job_metrics, download_job_metrics, load_metrics};
23+
use crate::utils::{load_env_var, output_details};
2324

2425
const CI_DIRECTORY: &str = concat!(env!("CARGO_MANIFEST_DIR"), "/..");
2526
const DOCKER_DIRECTORY: &str = concat!(env!("CARGO_MANIFEST_DIR"), "/../docker");
@@ -137,6 +138,56 @@ fn upload_ci_metrics(cpu_usage_csv: &Path) -> anyhow::Result<()> {
137138
Ok(())
138139
}
139140

141+
fn postprocess_metrics(
142+
metrics_path: PathBuf,
143+
parent: Option<String>,
144+
job_name: Option<String>,
145+
) -> anyhow::Result<()> {
146+
let metrics = load_metrics(&metrics_path)?;
147+
output_bootstrap_stats(&metrics);
148+
149+
let (Some(parent), Some(job_name)) = (parent, job_name) else {
150+
return Ok(());
151+
};
152+
153+
// This command is executed also on PR builds, which might not have parent metrics
154+
// available, because some PR jobs don't run on auto builds, and PR jobs do not upload metrics
155+
// due to missing permissions.
156+
// To avoid having to detect if this is a PR job, and to avoid having failed steps in PR jobs,
157+
// we simply print an error if the parent metrics were not found, but otherwise exit
158+
// successfully.
159+
match download_job_metrics(&job_name, &parent).context("cannot download parent metrics") {
160+
Ok(parent_metrics) => {
161+
let job_metrics = HashMap::from([(
162+
job_name,
163+
JobMetrics { parent: Some(parent_metrics), current: metrics },
164+
)]);
165+
output_test_diffs(job_metrics);
166+
}
167+
Err(error) => {
168+
eprintln!("Metrics for job `{job_name}` and commit `{parent}` not found: {error:?}");
169+
}
170+
}
171+
172+
Ok(())
173+
}
174+
175+
fn post_merge_report(db: JobDatabase, current: String, parent: String) -> anyhow::Result<()> {
176+
let metrics = download_auto_job_metrics(&db, &parent, &current)?;
177+
178+
output_details("What is this?", || {
179+
println!(
180+
r#"This is an experimental post-merge analysis report that shows differences in
181+
test outcomes between the merged PR and its parent PR."#
182+
);
183+
});
184+
185+
println!("\nComparing {parent} (parent) -> {current} (this PR)\n");
186+
output_test_diffs(metrics);
187+
188+
Ok(())
189+
}
190+
140191
#[derive(clap::Parser)]
141192
enum Args {
142193
/// Calculate a list of jobs that should be executed on CI.
@@ -154,13 +205,19 @@ enum Args {
154205
#[clap(long = "type", default_value = "auto")]
155206
job_type: JobType,
156207
},
157-
/// Postprocess the metrics.json file generated by bootstrap.
208+
/// Postprocess the metrics.json file generated by bootstrap and output
209+
/// various statistics.
210+
/// If `--parent` and `--job-name` are provided, also display a diff
211+
/// against previous metrics that are downloaded from CI.
158212
PostprocessMetrics {
159213
/// Path to the metrics.json file
160214
metrics_path: PathBuf,
161-
/// Path to a file where the postprocessed metrics summary will be stored.
162-
/// Usually, this will be GITHUB_STEP_SUMMARY on CI.
163-
summary_path: PathBuf,
215+
/// A parent SHA against which to compare.
216+
#[clap(long, requires("job_name"))]
217+
parent: Option<String>,
218+
/// The name of the current job.
219+
#[clap(long, requires("parent"))]
220+
job_name: Option<String>,
164221
},
165222
/// Upload CI metrics to Datadog.
166223
UploadBuildMetrics {
@@ -211,11 +268,11 @@ fn main() -> anyhow::Result<()> {
211268
Args::UploadBuildMetrics { cpu_usage_csv } => {
212269
upload_ci_metrics(&cpu_usage_csv)?;
213270
}
214-
Args::PostprocessMetrics { metrics_path, summary_path } => {
215-
postprocess_metrics(&metrics_path, &summary_path)?;
271+
Args::PostprocessMetrics { metrics_path, parent, job_name } => {
272+
postprocess_metrics(metrics_path, parent, job_name)?;
216273
}
217-
Args::PostMergeReport { current: commit, parent } => {
218-
post_merge_report(load_db(default_jobs_file)?, parent, commit)?;
274+
Args::PostMergeReport { current, parent } => {
275+
post_merge_report(load_db(default_jobs_file)?, current, parent)?;
219276
}
220277
}
221278

‎src/ci/citool/src/merge_report.rs

Lines changed: 0 additions & 318 deletions
This file was deleted.

‎src/ci/citool/src/metrics.rs

Lines changed: 63 additions & 139 deletions
Original file line numberDiff line numberDiff line change
@@ -1,146 +1,12 @@
1-
use std::collections::BTreeMap;
2-
use std::fs::File;
3-
use std::io::Write;
1+
use std::collections::HashMap;
42
use std::path::Path;
53

64
use anyhow::Context;
7-
use build_helper::metrics::{
8-
BuildStep, JsonNode, JsonRoot, TestOutcome, TestSuite, TestSuiteMetadata, format_build_steps,
9-
};
5+
use build_helper::metrics::{JsonNode, JsonRoot, TestSuite};
106

11-
pub fn postprocess_metrics(metrics_path: &Path, summary_path: &Path) -> anyhow::Result<()> {
12-
let metrics = load_metrics(metrics_path)?;
7+
use crate::jobs::JobDatabase;
138

14-
let mut file = File::options()
15-
.append(true)
16-
.create(true)
17-
.open(summary_path)
18-
.with_context(|| format!("Cannot open summary file at {summary_path:?}"))?;
19-
20-
if !metrics.invocations.is_empty() {
21-
writeln!(file, "# Bootstrap steps")?;
22-
record_bootstrap_step_durations(&metrics, &mut file)?;
23-
record_test_suites(&metrics, &mut file)?;
24-
}
25-
26-
Ok(())
27-
}
28-
29-
fn record_bootstrap_step_durations(metrics: &JsonRoot, file: &mut File) -> anyhow::Result<()> {
30-
for invocation in &metrics.invocations {
31-
let step = BuildStep::from_invocation(invocation);
32-
let table = format_build_steps(&step);
33-
eprintln!("Step `{}`\n{table}\n", invocation.cmdline);
34-
writeln!(
35-
file,
36-
r"<details>
37-
<summary>{}</summary>
38-
<pre><code>{table}</code></pre>
39-
</details>
40-
",
41-
invocation.cmdline
42-
)?;
43-
}
44-
eprintln!("Recorded {} bootstrap invocation(s)", metrics.invocations.len());
45-
46-
Ok(())
47-
}
48-
49-
fn record_test_suites(metrics: &JsonRoot, file: &mut File) -> anyhow::Result<()> {
50-
let suites = get_test_suites(&metrics);
51-
52-
if !suites.is_empty() {
53-
let aggregated = aggregate_test_suites(&suites);
54-
let table = render_table(aggregated);
55-
writeln!(file, "\n# Test results\n")?;
56-
writeln!(file, "{table}")?;
57-
} else {
58-
eprintln!("No test suites found in metrics");
59-
}
60-
61-
Ok(())
62-
}
63-
64-
fn render_table(suites: BTreeMap<String, TestSuiteRecord>) -> String {
65-
use std::fmt::Write;
66-
67-
let mut table = "| Test suite | Passed ✅ | Ignored 🚫 | Failed ❌ |\n".to_string();
68-
writeln!(table, "|:------|------:|------:|------:|").unwrap();
69-
70-
fn compute_pct(value: f64, total: f64) -> f64 {
71-
if total == 0.0 { 0.0 } else { value / total }
72-
}
73-
74-
fn write_row(
75-
buffer: &mut String,
76-
name: &str,
77-
record: &TestSuiteRecord,
78-
surround: &str,
79-
) -> std::fmt::Result {
80-
let TestSuiteRecord { passed, ignored, failed } = record;
81-
let total = (record.passed + record.ignored + record.failed) as f64;
82-
let passed_pct = compute_pct(*passed as f64, total) * 100.0;
83-
let ignored_pct = compute_pct(*ignored as f64, total) * 100.0;
84-
let failed_pct = compute_pct(*failed as f64, total) * 100.0;
85-
86-
write!(buffer, "| {surround}{name}{surround} |")?;
87-
write!(buffer, " {surround}{passed} ({passed_pct:.0}%){surround} |")?;
88-
write!(buffer, " {surround}{ignored} ({ignored_pct:.0}%){surround} |")?;
89-
writeln!(buffer, " {surround}{failed} ({failed_pct:.0}%){surround} |")?;
90-
91-
Ok(())
92-
}
93-
94-
let mut total = TestSuiteRecord::default();
95-
for (name, record) in suites {
96-
write_row(&mut table, &name, &record, "").unwrap();
97-
total.passed += record.passed;
98-
total.ignored += record.ignored;
99-
total.failed += record.failed;
100-
}
101-
write_row(&mut table, "Total", &total, "**").unwrap();
102-
table
103-
}
104-
105-
#[derive(Default)]
106-
struct TestSuiteRecord {
107-
passed: u64,
108-
ignored: u64,
109-
failed: u64,
110-
}
111-
112-
fn test_metadata_name(metadata: &TestSuiteMetadata) -> String {
113-
match metadata {
114-
TestSuiteMetadata::CargoPackage { crates, stage, .. } => {
115-
format!("{} (stage {stage})", crates.join(", "))
116-
}
117-
TestSuiteMetadata::Compiletest { suite, stage, .. } => {
118-
format!("{suite} (stage {stage})")
119-
}
120-
}
121-
}
122-
123-
fn aggregate_test_suites(suites: &[&TestSuite]) -> BTreeMap<String, TestSuiteRecord> {
124-
let mut records: BTreeMap<String, TestSuiteRecord> = BTreeMap::new();
125-
for suite in suites {
126-
let name = test_metadata_name(&suite.metadata);
127-
let record = records.entry(name).or_default();
128-
for test in &suite.tests {
129-
match test.outcome {
130-
TestOutcome::Passed => {
131-
record.passed += 1;
132-
}
133-
TestOutcome::Failed => {
134-
record.failed += 1;
135-
}
136-
TestOutcome::Ignored { .. } => {
137-
record.ignored += 1;
138-
}
139-
}
140-
}
141-
}
142-
records
143-
}
9+
pub type JobName = String;
14410

14511
pub fn get_test_suites(metrics: &JsonRoot) -> Vec<&TestSuite> {
14612
fn visit_test_suites<'a>(nodes: &'a [JsonNode], suites: &mut Vec<&'a TestSuite>) {
@@ -163,10 +29,68 @@ pub fn get_test_suites(metrics: &JsonRoot) -> Vec<&TestSuite> {
16329
suites
16430
}
16531

166-
fn load_metrics(path: &Path) -> anyhow::Result<JsonRoot> {
32+
pub fn load_metrics(path: &Path) -> anyhow::Result<JsonRoot> {
16733
let metrics = std::fs::read_to_string(path)
16834
.with_context(|| format!("Cannot read JSON metrics from {path:?}"))?;
16935
let metrics: JsonRoot = serde_json::from_str(&metrics)
17036
.with_context(|| format!("Cannot deserialize JSON metrics from {path:?}"))?;
17137
Ok(metrics)
17238
}
39+
40+
pub struct JobMetrics {
41+
pub parent: Option<JsonRoot>,
42+
pub current: JsonRoot,
43+
}
44+
45+
/// Download before/after metrics for all auto jobs in the job database.
46+
/// `parent` and `current` should be commit SHAs.
47+
pub fn download_auto_job_metrics(
48+
job_db: &JobDatabase,
49+
parent: &str,
50+
current: &str,
51+
) -> anyhow::Result<HashMap<JobName, JobMetrics>> {
52+
let mut jobs = HashMap::default();
53+
54+
for job in &job_db.auto_jobs {
55+
eprintln!("Downloading metrics of job {}", job.name);
56+
let metrics_parent = match download_job_metrics(&job.name, parent) {
57+
Ok(metrics) => Some(metrics),
58+
Err(error) => {
59+
eprintln!(
60+
r#"Did not find metrics for job `{}` at `{parent}`: {error:?}.
61+
Maybe it was newly added?"#,
62+
job.name
63+
);
64+
None
65+
}
66+
};
67+
let metrics_current = download_job_metrics(&job.name, current)?;
68+
jobs.insert(
69+
job.name.clone(),
70+
JobMetrics { parent: metrics_parent, current: metrics_current },
71+
);
72+
}
73+
Ok(jobs)
74+
}
75+
76+
pub fn download_job_metrics(job_name: &str, sha: &str) -> anyhow::Result<JsonRoot> {
77+
let url = get_metrics_url(job_name, sha);
78+
let mut response = ureq::get(&url).call()?;
79+
if !response.status().is_success() {
80+
return Err(anyhow::anyhow!(
81+
"Cannot fetch metrics from {url}: {}\n{}",
82+
response.status(),
83+
response.body_mut().read_to_string()?
84+
));
85+
}
86+
let data: JsonRoot = response
87+
.body_mut()
88+
.read_json()
89+
.with_context(|| anyhow::anyhow!("cannot deserialize metrics from {url}"))?;
90+
Ok(data)
91+
}
92+
93+
fn get_metrics_url(job_name: &str, sha: &str) -> String {
94+
let suffix = if job_name.ends_with("-alt") { "-alt" } else { "" };
95+
format!("https://ci-artifacts.rust-lang.org/rustc-builds{suffix}/{sha}/metrics-{job_name}.json")
96+
}

‎src/ci/citool/src/utils.rs

Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9,3 +9,23 @@ pub fn load_env_var(name: &str) -> anyhow::Result<String> {
99
pub fn read_to_string<P: AsRef<Path>>(path: P) -> anyhow::Result<String> {
1010
std::fs::read_to_string(&path).with_context(|| format!("Cannot read file {:?}", path.as_ref()))
1111
}
12+
13+
pub fn pluralize(text: &str, count: usize) -> String {
14+
if count == 1 { text.to_string() } else { format!("{text}s") }
15+
}
16+
17+
/// Outputs a HTML <details> section with the provided summary.
18+
/// Output printed by `func` will be contained within the section.
19+
pub fn output_details<F>(summary: &str, func: F)
20+
where
21+
F: FnOnce(),
22+
{
23+
println!(
24+
r"<details>
25+
<summary>{summary}</summary>
26+
27+
"
28+
);
29+
func();
30+
println!("</details>\n");
31+
}

0 commit comments

Comments
 (0)
Please sign in to comment.