Skip to content
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.

Commit 08530d3

Browse files
committedMar 8, 2025·
Move coroutine layout logic to rustc_abi
1 parent b8a2170 commit 08530d3

File tree

3 files changed

+373
-334
lines changed

3 files changed

+373
-334
lines changed
 

‎compiler/rustc_abi/src/layout.rs

Lines changed: 30 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@ use std::{cmp, iter};
44

55
use rustc_hashes::Hash64;
66
use rustc_index::Idx;
7+
use rustc_index::bit_set::BitMatrix;
78
use tracing::debug;
89

910
use crate::{
@@ -12,6 +13,7 @@ use crate::{
1213
Variants, WrappingRange,
1314
};
1415

16+
mod coroutine;
1517
mod simple;
1618

1719
#[cfg(feature = "nightly")]
@@ -200,6 +202,34 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
200202
})
201203
}
202204

205+
/// Compute the layout for a coroutine.
206+
///
207+
/// This uses dedicated code instead of [`Self::layout_of_struct_or_enum`], as coroutine
208+
/// fields may be shared between multiple variants (see the [`coroutine`] module for details).
209+
pub fn coroutine<
210+
'a,
211+
F: Deref<Target = &'a LayoutData<FieldIdx, VariantIdx>> + fmt::Debug + Copy,
212+
VariantIdx: Idx,
213+
FieldIdx: Idx,
214+
LocalIdx: Idx,
215+
>(
216+
&self,
217+
local_layouts: &IndexSlice<LocalIdx, F>,
218+
prefix_layouts: IndexVec<FieldIdx, F>,
219+
variant_fields: &IndexSlice<VariantIdx, IndexVec<FieldIdx, LocalIdx>>,
220+
storage_conflicts: &BitMatrix<LocalIdx, LocalIdx>,
221+
tag_to_layout: impl Fn(Scalar) -> F,
222+
) -> LayoutCalculatorResult<FieldIdx, VariantIdx, F> {
223+
coroutine::layout(
224+
self,
225+
local_layouts,
226+
prefix_layouts,
227+
variant_fields,
228+
storage_conflicts,
229+
tag_to_layout,
230+
)
231+
}
232+
203233
pub fn univariant<
204234
'a,
205235
FieldIdx: Idx,
Lines changed: 320 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,320 @@
1+
//! Coroutine layout logic.
2+
//!
3+
//! When laying out coroutines, we divide our saved local fields into two
4+
//! categories: overlap-eligible and overlap-ineligible.
5+
//!
6+
//! Those fields which are ineligible for overlap go in a "prefix" at the
7+
//! beginning of the layout, and always have space reserved for them.
8+
//!
9+
//! Overlap-eligible fields are only assigned to one variant, so we lay
10+
//! those fields out for each variant and put them right after the
11+
//! prefix.
12+
//!
13+
//! Finally, in the layout details, we point to the fields from the
14+
//! variants they are assigned to. It is possible for some fields to be
15+
//! included in multiple variants. No field ever "moves around" in the
16+
//! layout; its offset is always the same.
17+
//!
18+
//! Also included in the layout are the upvars and the discriminant.
19+
//! These are included as fields on the "outer" layout; they are not part
20+
//! of any variant.
21+
22+
use std::iter;
23+
24+
use rustc_index::bit_set::{BitMatrix, DenseBitSet};
25+
use rustc_index::{Idx, IndexSlice, IndexVec};
26+
use tracing::{debug, trace};
27+
28+
use crate::{
29+
BackendRepr, FieldsShape, HasDataLayout, Integer, LayoutData, Primitive, ReprOptions, Scalar,
30+
StructKind, TagEncoding, Variants, WrappingRange,
31+
};
32+
33+
/// Overlap eligibility and variant assignment for each CoroutineSavedLocal.
34+
#[derive(Clone, Debug, PartialEq)]
35+
enum SavedLocalEligibility<VariantIdx, FieldIdx> {
36+
Unassigned,
37+
Assigned(VariantIdx),
38+
Ineligible(Option<FieldIdx>),
39+
}
40+
41+
/// Compute the eligibility and assignment of each local.
42+
fn coroutine_saved_local_eligibility<VariantIdx: Idx, FieldIdx: Idx, LocalIdx: Idx>(
43+
nb_locals: usize,
44+
variant_fields: &IndexSlice<VariantIdx, IndexVec<FieldIdx, LocalIdx>>,
45+
storage_conflicts: &BitMatrix<LocalIdx, LocalIdx>,
46+
) -> (DenseBitSet<LocalIdx>, IndexVec<LocalIdx, SavedLocalEligibility<VariantIdx, FieldIdx>>) {
47+
use SavedLocalEligibility::*;
48+
49+
let mut assignments: IndexVec<LocalIdx, _> = IndexVec::from_elem_n(Unassigned, nb_locals);
50+
51+
// The saved locals not eligible for overlap. These will get
52+
// "promoted" to the prefix of our coroutine.
53+
let mut ineligible_locals = DenseBitSet::new_empty(nb_locals);
54+
55+
// Figure out which of our saved locals are fields in only
56+
// one variant. The rest are deemed ineligible for overlap.
57+
for (variant_index, fields) in variant_fields.iter_enumerated() {
58+
for local in fields {
59+
match assignments[*local] {
60+
Unassigned => {
61+
assignments[*local] = Assigned(variant_index);
62+
}
63+
Assigned(idx) => {
64+
// We've already seen this local at another suspension
65+
// point, so it is no longer a candidate.
66+
trace!(
67+
"removing local {:?} in >1 variant ({:?}, {:?})",
68+
local, variant_index, idx
69+
);
70+
ineligible_locals.insert(*local);
71+
assignments[*local] = Ineligible(None);
72+
}
73+
Ineligible(_) => {}
74+
}
75+
}
76+
}
77+
78+
// Next, check every pair of eligible locals to see if they
79+
// conflict.
80+
for local_a in storage_conflicts.rows() {
81+
let conflicts_a = storage_conflicts.count(local_a);
82+
if ineligible_locals.contains(local_a) {
83+
continue;
84+
}
85+
86+
for local_b in storage_conflicts.iter(local_a) {
87+
// local_a and local_b are storage live at the same time, therefore they
88+
// cannot overlap in the coroutine layout. The only way to guarantee
89+
// this is if they are in the same variant, or one is ineligible
90+
// (which means it is stored in every variant).
91+
if ineligible_locals.contains(local_b) || assignments[local_a] == assignments[local_b] {
92+
continue;
93+
}
94+
95+
// If they conflict, we will choose one to make ineligible.
96+
// This is not always optimal; it's just a greedy heuristic that
97+
// seems to produce good results most of the time.
98+
let conflicts_b = storage_conflicts.count(local_b);
99+
let (remove, other) =
100+
if conflicts_a > conflicts_b { (local_a, local_b) } else { (local_b, local_a) };
101+
ineligible_locals.insert(remove);
102+
assignments[remove] = Ineligible(None);
103+
trace!("removing local {:?} due to conflict with {:?}", remove, other);
104+
}
105+
}
106+
107+
// Count the number of variants in use. If only one of them, then it is
108+
// impossible to overlap any locals in our layout. In this case it's
109+
// always better to make the remaining locals ineligible, so we can
110+
// lay them out with the other locals in the prefix and eliminate
111+
// unnecessary padding bytes.
112+
{
113+
let mut used_variants = DenseBitSet::new_empty(variant_fields.len());
114+
for assignment in &assignments {
115+
if let Assigned(idx) = assignment {
116+
used_variants.insert(*idx);
117+
}
118+
}
119+
if used_variants.count() < 2 {
120+
for assignment in assignments.iter_mut() {
121+
*assignment = Ineligible(None);
122+
}
123+
ineligible_locals.insert_all();
124+
}
125+
}
126+
127+
// Write down the order of our locals that will be promoted to the prefix.
128+
{
129+
for (idx, local) in ineligible_locals.iter().enumerate() {
130+
assignments[local] = Ineligible(Some(FieldIdx::new(idx)));
131+
}
132+
}
133+
debug!("coroutine saved local assignments: {:?}", assignments);
134+
135+
(ineligible_locals, assignments)
136+
}
137+
138+
/// Compute the full coroutine layout.
139+
pub(super) fn layout<
140+
'a,
141+
F: core::ops::Deref<Target = &'a LayoutData<FieldIdx, VariantIdx>> + core::fmt::Debug + Copy,
142+
VariantIdx: Idx,
143+
FieldIdx: Idx,
144+
LocalIdx: Idx,
145+
>(
146+
calc: &super::LayoutCalculator<impl HasDataLayout>,
147+
local_layouts: &IndexSlice<LocalIdx, F>,
148+
mut prefix_layouts: IndexVec<FieldIdx, F>,
149+
variant_fields: &IndexSlice<VariantIdx, IndexVec<FieldIdx, LocalIdx>>,
150+
storage_conflicts: &BitMatrix<LocalIdx, LocalIdx>,
151+
tag_to_layout: impl Fn(Scalar) -> F,
152+
) -> super::LayoutCalculatorResult<FieldIdx, VariantIdx, F> {
153+
use SavedLocalEligibility::*;
154+
155+
let (ineligible_locals, assignments) =
156+
coroutine_saved_local_eligibility(local_layouts.len(), variant_fields, storage_conflicts);
157+
158+
// Build a prefix layout, including "promoting" all ineligible
159+
// locals as part of the prefix. We compute the layout of all of
160+
// these fields at once to get optimal packing.
161+
let tag_index = prefix_layouts.len();
162+
163+
// `variant_fields` already accounts for the reserved variants, so no need to add them.
164+
let max_discr = (variant_fields.len() - 1) as u128;
165+
let discr_int = Integer::fit_unsigned(max_discr);
166+
let tag = Scalar::Initialized {
167+
value: Primitive::Int(discr_int, /* signed = */ false),
168+
valid_range: WrappingRange { start: 0, end: max_discr },
169+
};
170+
171+
let promoted_layouts = ineligible_locals.iter().map(|local| local_layouts[local]);
172+
prefix_layouts.push(tag_to_layout(tag));
173+
prefix_layouts.extend(promoted_layouts);
174+
let prefix =
175+
calc.univariant(&prefix_layouts, &ReprOptions::default(), StructKind::AlwaysSized)?;
176+
177+
let (prefix_size, prefix_align) = (prefix.size, prefix.align);
178+
179+
// Split the prefix layout into the "outer" fields (upvars and
180+
// discriminant) and the "promoted" fields. Promoted fields will
181+
// get included in each variant that requested them in
182+
// CoroutineLayout.
183+
debug!("prefix = {:#?}", prefix);
184+
let (outer_fields, promoted_offsets, promoted_memory_index) = match prefix.fields {
185+
FieldsShape::Arbitrary { mut offsets, memory_index } => {
186+
let mut inverse_memory_index = memory_index.invert_bijective_mapping();
187+
188+
// "a" (`0..b_start`) and "b" (`b_start..`) correspond to
189+
// "outer" and "promoted" fields respectively.
190+
let b_start = FieldIdx::new(tag_index + 1);
191+
let offsets_b = IndexVec::from_raw(offsets.raw.split_off(b_start.index()));
192+
let offsets_a = offsets;
193+
194+
// Disentangle the "a" and "b" components of `inverse_memory_index`
195+
// by preserving the order but keeping only one disjoint "half" each.
196+
// FIXME(eddyb) build a better abstraction for permutations, if possible.
197+
let inverse_memory_index_b: IndexVec<u32, FieldIdx> = inverse_memory_index
198+
.iter()
199+
.filter_map(|&i| i.index().checked_sub(b_start.index()).map(FieldIdx::new))
200+
.collect();
201+
inverse_memory_index.raw.retain(|&i| i.index() < b_start.index());
202+
let inverse_memory_index_a = inverse_memory_index;
203+
204+
// Since `inverse_memory_index_{a,b}` each only refer to their
205+
// respective fields, they can be safely inverted
206+
let memory_index_a = inverse_memory_index_a.invert_bijective_mapping();
207+
let memory_index_b = inverse_memory_index_b.invert_bijective_mapping();
208+
209+
let outer_fields =
210+
FieldsShape::Arbitrary { offsets: offsets_a, memory_index: memory_index_a };
211+
(outer_fields, offsets_b, memory_index_b)
212+
}
213+
_ => unreachable!(),
214+
};
215+
216+
let mut size = prefix.size;
217+
let mut align = prefix.align;
218+
let variants = variant_fields
219+
.iter_enumerated()
220+
.map(|(index, variant_fields)| {
221+
// Only include overlap-eligible fields when we compute our variant layout.
222+
let variant_only_tys = variant_fields
223+
.iter()
224+
.filter(|local| match assignments[**local] {
225+
Unassigned => unreachable!(),
226+
Assigned(v) if v == index => true,
227+
Assigned(_) => unreachable!("assignment does not match variant"),
228+
Ineligible(_) => false,
229+
})
230+
.map(|local| local_layouts[*local]);
231+
232+
let mut variant = calc.univariant(
233+
&variant_only_tys.collect::<IndexVec<_, _>>(),
234+
&ReprOptions::default(),
235+
StructKind::Prefixed(prefix_size, prefix_align.abi),
236+
)?;
237+
variant.variants = Variants::Single { index };
238+
239+
let FieldsShape::Arbitrary { offsets, memory_index } = variant.fields else {
240+
unreachable!();
241+
};
242+
243+
// Now, stitch the promoted and variant-only fields back together in
244+
// the order they are mentioned by our CoroutineLayout.
245+
// Because we only use some subset (that can differ between variants)
246+
// of the promoted fields, we can't just pick those elements of the
247+
// `promoted_memory_index` (as we'd end up with gaps).
248+
// So instead, we build an "inverse memory_index", as if all of the
249+
// promoted fields were being used, but leave the elements not in the
250+
// subset as `invalid_field_idx`, which we can filter out later to
251+
// obtain a valid (bijective) mapping.
252+
let invalid_field_idx = promoted_memory_index.len() + memory_index.len();
253+
let mut combined_inverse_memory_index =
254+
IndexVec::from_elem_n(FieldIdx::new(invalid_field_idx), invalid_field_idx);
255+
256+
let mut offsets_and_memory_index = iter::zip(offsets, memory_index);
257+
let combined_offsets = variant_fields
258+
.iter_enumerated()
259+
.map(|(i, local)| {
260+
let (offset, memory_index) = match assignments[*local] {
261+
Unassigned => unreachable!(),
262+
Assigned(_) => {
263+
let (offset, memory_index) = offsets_and_memory_index.next().unwrap();
264+
(offset, promoted_memory_index.len() as u32 + memory_index)
265+
}
266+
Ineligible(field_idx) => {
267+
let field_idx = field_idx.unwrap();
268+
(promoted_offsets[field_idx], promoted_memory_index[field_idx])
269+
}
270+
};
271+
combined_inverse_memory_index[memory_index] = i;
272+
offset
273+
})
274+
.collect();
275+
276+
// Remove the unused slots and invert the mapping to obtain the
277+
// combined `memory_index` (also see previous comment).
278+
combined_inverse_memory_index.raw.retain(|&i| i.index() != invalid_field_idx);
279+
let combined_memory_index = combined_inverse_memory_index.invert_bijective_mapping();
280+
281+
variant.fields = FieldsShape::Arbitrary {
282+
offsets: combined_offsets,
283+
memory_index: combined_memory_index,
284+
};
285+
286+
size = size.max(variant.size);
287+
align = align.max(variant.align);
288+
Ok(variant)
289+
})
290+
.collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
291+
292+
size = size.align_to(align.abi);
293+
294+
let uninhabited = prefix.uninhabited || variants.iter().all(|v| v.is_uninhabited());
295+
let abi = BackendRepr::Memory { sized: true };
296+
297+
Ok(LayoutData {
298+
variants: Variants::Multiple {
299+
tag,
300+
tag_encoding: TagEncoding::Direct,
301+
tag_field: tag_index,
302+
variants,
303+
},
304+
fields: outer_fields,
305+
backend_repr: abi,
306+
// Suppress niches inside coroutines. If the niche is inside a field that is aliased (due to
307+
// self-referentiality), getting the discriminant can cause aliasing violations.
308+
// `UnsafeCell` blocks niches for the same reason, but we don't yet have `UnsafePinned` that
309+
// would do the same for us here.
310+
// See <https://github.com/rust-lang/rust/issues/63818>, <https://github.com/rust-lang/miri/issues/3780>.
311+
// FIXME: Remove when <https://github.com/rust-lang/rust/issues/125735> is implemented and aliased coroutine fields are wrapped in `UnsafePinned`.
312+
largest_niche: None,
313+
uninhabited,
314+
size,
315+
align,
316+
max_repr_align: None,
317+
unadjusted_abi_align: align.abi,
318+
randomization_seed: Default::default(),
319+
})
320+
}

‎compiler/rustc_ty_utils/src/layout.rs

Lines changed: 23 additions & 334 deletions
Original file line numberDiff line numberDiff line change
@@ -1,17 +1,13 @@
1-
use std::fmt::Debug;
2-
use std::iter;
3-
41
use hir::def_id::DefId;
52
use rustc_abi::Integer::{I8, I32};
63
use rustc_abi::Primitive::{self, Float, Int, Pointer};
74
use rustc_abi::{
8-
AddressSpace, BackendRepr, FIRST_VARIANT, FieldIdx, FieldsShape, HasDataLayout, Integer,
9-
Layout, LayoutCalculator, LayoutCalculatorError, LayoutData, Niche, ReprOptions, Scalar, Size,
10-
StructKind, TagEncoding, VariantIdx, Variants, WrappingRange,
5+
AddressSpace, BackendRepr, FIRST_VARIANT, FieldIdx, FieldsShape, HasDataLayout, Layout,
6+
LayoutCalculatorError, LayoutData, Niche, ReprOptions, Scalar, Size, StructKind, TagEncoding,
7+
VariantIdx, Variants, WrappingRange,
118
};
129
use rustc_hashes::Hash64;
13-
use rustc_index::bit_set::{BitMatrix, DenseBitSet};
14-
use rustc_index::{Idx, IndexSlice, IndexVec};
10+
use rustc_index::IndexVec;
1511
use rustc_middle::bug;
1612
use rustc_middle::query::Providers;
1713
use rustc_middle::ty::layout::{
@@ -23,7 +19,7 @@ use rustc_middle::ty::{
2319
};
2420
use rustc_session::{DataTypeKind, FieldInfo, FieldKind, SizeKind, VariantInfo};
2521
use rustc_span::{Symbol, sym};
26-
use tracing::{debug, instrument, trace};
22+
use tracing::{debug, instrument};
2723
use {rustc_abi as abi, rustc_hir as hir};
2824

2925
use crate::errors::{NonPrimitiveSimdType, OversizedSimdType, ZeroLengthSimdType};
@@ -403,23 +399,24 @@ fn layout_of_uncached<'tcx>(
403399
.map(|ty| cx.layout_of(ty))
404400
.try_collect::<IndexVec<_, _>>()?;
405401

406-
let layout = coroutine_layout(
407-
&cx.calc,
408-
&local_layouts,
409-
prefix_layouts,
410-
&info.variant_fields,
411-
&info.storage_conflicts,
412-
|tag| TyAndLayout {
413-
ty: tag.primitive().to_ty(tcx),
414-
layout: tcx.mk_layout(LayoutData::scalar(cx, tag)),
415-
},
416-
)
417-
.map(|mut layout| {
418-
// this is similar to how ReprOptions populates its field_shuffle_seed
419-
layout.randomization_seed = tcx.def_path_hash(def_id).0.to_smaller_hash();
420-
debug!("coroutine layout ({:?}): {:#?}", ty, layout);
421-
layout
422-
});
402+
let layout = cx
403+
.calc
404+
.coroutine(
405+
&local_layouts,
406+
prefix_layouts,
407+
&info.variant_fields,
408+
&info.storage_conflicts,
409+
|tag| TyAndLayout {
410+
ty: tag.primitive().to_ty(tcx),
411+
layout: tcx.mk_layout(LayoutData::scalar(cx, tag)),
412+
},
413+
)
414+
.map(|mut layout| {
415+
// this is similar to how ReprOptions populates its field_shuffle_seed
416+
layout.randomization_seed = tcx.def_path_hash(def_id).0.to_smaller_hash();
417+
debug!("coroutine layout ({:?}): {:#?}", ty, layout);
418+
layout
419+
});
423420
map_layout(layout)?
424421
}
425422

@@ -614,314 +611,6 @@ fn layout_of_uncached<'tcx>(
614611
})
615612
}
616613

617-
/// Overlap eligibility and variant assignment for each CoroutineSavedLocal.
618-
#[derive(Clone, Debug, PartialEq)]
619-
enum SavedLocalEligibility<VariantIdx, FieldIdx> {
620-
Unassigned,
621-
Assigned(VariantIdx),
622-
Ineligible(Option<FieldIdx>),
623-
}
624-
625-
// When laying out coroutines, we divide our saved local fields into two
626-
// categories: overlap-eligible and overlap-ineligible.
627-
//
628-
// Those fields which are ineligible for overlap go in a "prefix" at the
629-
// beginning of the layout, and always have space reserved for them.
630-
//
631-
// Overlap-eligible fields are only assigned to one variant, so we lay
632-
// those fields out for each variant and put them right after the
633-
// prefix.
634-
//
635-
// Finally, in the layout details, we point to the fields from the
636-
// variants they are assigned to. It is possible for some fields to be
637-
// included in multiple variants. No field ever "moves around" in the
638-
// layout; its offset is always the same.
639-
//
640-
// Also included in the layout are the upvars and the discriminant.
641-
// These are included as fields on the "outer" layout; they are not part
642-
// of any variant.
643-
644-
/// Compute the eligibility and assignment of each local.
645-
fn coroutine_saved_local_eligibility<VariantIdx: Idx, FieldIdx: Idx, LocalIdx: Idx>(
646-
nb_locals: usize,
647-
variant_fields: &IndexSlice<VariantIdx, IndexVec<FieldIdx, LocalIdx>>,
648-
storage_conflicts: &BitMatrix<LocalIdx, LocalIdx>,
649-
) -> (DenseBitSet<LocalIdx>, IndexVec<LocalIdx, SavedLocalEligibility<VariantIdx, FieldIdx>>) {
650-
use SavedLocalEligibility::*;
651-
652-
let mut assignments: IndexVec<LocalIdx, _> = IndexVec::from_elem_n(Unassigned, nb_locals);
653-
654-
// The saved locals not eligible for overlap. These will get
655-
// "promoted" to the prefix of our coroutine.
656-
let mut ineligible_locals = DenseBitSet::new_empty(nb_locals);
657-
658-
// Figure out which of our saved locals are fields in only
659-
// one variant. The rest are deemed ineligible for overlap.
660-
for (variant_index, fields) in variant_fields.iter_enumerated() {
661-
for local in fields {
662-
match assignments[*local] {
663-
Unassigned => {
664-
assignments[*local] = Assigned(variant_index);
665-
}
666-
Assigned(idx) => {
667-
// We've already seen this local at another suspension
668-
// point, so it is no longer a candidate.
669-
trace!(
670-
"removing local {:?} in >1 variant ({:?}, {:?})",
671-
local, variant_index, idx
672-
);
673-
ineligible_locals.insert(*local);
674-
assignments[*local] = Ineligible(None);
675-
}
676-
Ineligible(_) => {}
677-
}
678-
}
679-
}
680-
681-
// Next, check every pair of eligible locals to see if they
682-
// conflict.
683-
for local_a in storage_conflicts.rows() {
684-
let conflicts_a = storage_conflicts.count(local_a);
685-
if ineligible_locals.contains(local_a) {
686-
continue;
687-
}
688-
689-
for local_b in storage_conflicts.iter(local_a) {
690-
// local_a and local_b are storage live at the same time, therefore they
691-
// cannot overlap in the coroutine layout. The only way to guarantee
692-
// this is if they are in the same variant, or one is ineligible
693-
// (which means it is stored in every variant).
694-
if ineligible_locals.contains(local_b) || assignments[local_a] == assignments[local_b] {
695-
continue;
696-
}
697-
698-
// If they conflict, we will choose one to make ineligible.
699-
// This is not always optimal; it's just a greedy heuristic that
700-
// seems to produce good results most of the time.
701-
let conflicts_b = storage_conflicts.count(local_b);
702-
let (remove, other) =
703-
if conflicts_a > conflicts_b { (local_a, local_b) } else { (local_b, local_a) };
704-
ineligible_locals.insert(remove);
705-
assignments[remove] = Ineligible(None);
706-
trace!("removing local {:?} due to conflict with {:?}", remove, other);
707-
}
708-
}
709-
710-
// Count the number of variants in use. If only one of them, then it is
711-
// impossible to overlap any locals in our layout. In this case it's
712-
// always better to make the remaining locals ineligible, so we can
713-
// lay them out with the other locals in the prefix and eliminate
714-
// unnecessary padding bytes.
715-
{
716-
let mut used_variants = DenseBitSet::new_empty(variant_fields.len());
717-
for assignment in &assignments {
718-
if let Assigned(idx) = assignment {
719-
used_variants.insert(*idx);
720-
}
721-
}
722-
if used_variants.count() < 2 {
723-
for assignment in assignments.iter_mut() {
724-
*assignment = Ineligible(None);
725-
}
726-
ineligible_locals.insert_all();
727-
}
728-
}
729-
730-
// Write down the order of our locals that will be promoted to the prefix.
731-
{
732-
for (idx, local) in ineligible_locals.iter().enumerate() {
733-
assignments[local] = Ineligible(Some(FieldIdx::new(idx)));
734-
}
735-
}
736-
debug!("coroutine saved local assignments: {:?}", assignments);
737-
738-
(ineligible_locals, assignments)
739-
}
740-
741-
/// Compute the full coroutine layout.
742-
fn coroutine_layout<
743-
'a,
744-
F: core::ops::Deref<Target = &'a LayoutData<FieldIdx, VariantIdx>> + core::fmt::Debug + Copy,
745-
VariantIdx: Idx,
746-
FieldIdx: Idx,
747-
LocalIdx: Idx,
748-
>(
749-
calc: &LayoutCalculator<impl HasDataLayout>,
750-
local_layouts: &IndexSlice<LocalIdx, F>,
751-
mut prefix_layouts: IndexVec<FieldIdx, F>,
752-
variant_fields: &IndexSlice<VariantIdx, IndexVec<FieldIdx, LocalIdx>>,
753-
storage_conflicts: &BitMatrix<LocalIdx, LocalIdx>,
754-
tag_to_layout: impl Fn(Scalar) -> F,
755-
) -> Result<LayoutData<FieldIdx, VariantIdx>, LayoutCalculatorError<F>> {
756-
use SavedLocalEligibility::*;
757-
758-
let (ineligible_locals, assignments) =
759-
coroutine_saved_local_eligibility(local_layouts.len(), variant_fields, storage_conflicts);
760-
761-
// Build a prefix layout, including "promoting" all ineligible
762-
// locals as part of the prefix. We compute the layout of all of
763-
// these fields at once to get optimal packing.
764-
let tag_index = prefix_layouts.len();
765-
766-
// `variant_fields` already accounts for the reserved variants, so no need to add them.
767-
let max_discr = (variant_fields.len() - 1) as u128;
768-
let discr_int = Integer::fit_unsigned(max_discr);
769-
let tag = Scalar::Initialized {
770-
value: Primitive::Int(discr_int, /* signed = */ false),
771-
valid_range: WrappingRange { start: 0, end: max_discr },
772-
};
773-
774-
let promoted_layouts = ineligible_locals.iter().map(|local| local_layouts[local]);
775-
prefix_layouts.push(tag_to_layout(tag));
776-
prefix_layouts.extend(promoted_layouts);
777-
let prefix =
778-
calc.univariant(&prefix_layouts, &ReprOptions::default(), StructKind::AlwaysSized)?;
779-
780-
let (prefix_size, prefix_align) = (prefix.size, prefix.align);
781-
782-
// Split the prefix layout into the "outer" fields (upvars and
783-
// discriminant) and the "promoted" fields. Promoted fields will
784-
// get included in each variant that requested them in
785-
// CoroutineLayout.
786-
debug!("prefix = {:#?}", prefix);
787-
let (outer_fields, promoted_offsets, promoted_memory_index) = match prefix.fields {
788-
FieldsShape::Arbitrary { mut offsets, memory_index } => {
789-
let mut inverse_memory_index = memory_index.invert_bijective_mapping();
790-
791-
// "a" (`0..b_start`) and "b" (`b_start..`) correspond to
792-
// "outer" and "promoted" fields respectively.
793-
let b_start = FieldIdx::new(tag_index + 1);
794-
let offsets_b = IndexVec::from_raw(offsets.raw.split_off(b_start.index()));
795-
let offsets_a = offsets;
796-
797-
// Disentangle the "a" and "b" components of `inverse_memory_index`
798-
// by preserving the order but keeping only one disjoint "half" each.
799-
// FIXME(eddyb) build a better abstraction for permutations, if possible.
800-
let inverse_memory_index_b: IndexVec<u32, FieldIdx> = inverse_memory_index
801-
.iter()
802-
.filter_map(|&i| i.index().checked_sub(b_start.index()).map(FieldIdx::new))
803-
.collect();
804-
inverse_memory_index.raw.retain(|&i| i.index() < b_start.index());
805-
let inverse_memory_index_a = inverse_memory_index;
806-
807-
// Since `inverse_memory_index_{a,b}` each only refer to their
808-
// respective fields, they can be safely inverted
809-
let memory_index_a = inverse_memory_index_a.invert_bijective_mapping();
810-
let memory_index_b = inverse_memory_index_b.invert_bijective_mapping();
811-
812-
let outer_fields =
813-
FieldsShape::Arbitrary { offsets: offsets_a, memory_index: memory_index_a };
814-
(outer_fields, offsets_b, memory_index_b)
815-
}
816-
_ => unreachable!(),
817-
};
818-
819-
let mut size = prefix.size;
820-
let mut align = prefix.align;
821-
let variants = variant_fields
822-
.iter_enumerated()
823-
.map(|(index, variant_fields)| {
824-
// Only include overlap-eligible fields when we compute our variant layout.
825-
let variant_only_tys = variant_fields
826-
.iter()
827-
.filter(|local| match assignments[**local] {
828-
Unassigned => unreachable!(),
829-
Assigned(v) if v == index => true,
830-
Assigned(_) => unreachable!("assignment does not match variant"),
831-
Ineligible(_) => false,
832-
})
833-
.map(|local| local_layouts[*local]);
834-
835-
let mut variant = calc.univariant(
836-
&variant_only_tys.collect::<IndexVec<_, _>>(),
837-
&ReprOptions::default(),
838-
StructKind::Prefixed(prefix_size, prefix_align.abi),
839-
)?;
840-
variant.variants = Variants::Single { index };
841-
842-
let FieldsShape::Arbitrary { offsets, memory_index } = variant.fields else {
843-
unreachable!();
844-
};
845-
846-
// Now, stitch the promoted and variant-only fields back together in
847-
// the order they are mentioned by our CoroutineLayout.
848-
// Because we only use some subset (that can differ between variants)
849-
// of the promoted fields, we can't just pick those elements of the
850-
// `promoted_memory_index` (as we'd end up with gaps).
851-
// So instead, we build an "inverse memory_index", as if all of the
852-
// promoted fields were being used, but leave the elements not in the
853-
// subset as `invalid_field_idx`, which we can filter out later to
854-
// obtain a valid (bijective) mapping.
855-
let invalid_field_idx = promoted_memory_index.len() + memory_index.len();
856-
let mut combined_inverse_memory_index =
857-
IndexVec::from_elem_n(FieldIdx::new(invalid_field_idx), invalid_field_idx);
858-
859-
let mut offsets_and_memory_index = iter::zip(offsets, memory_index);
860-
let combined_offsets = variant_fields
861-
.iter_enumerated()
862-
.map(|(i, local)| {
863-
let (offset, memory_index) = match assignments[*local] {
864-
Unassigned => unreachable!(),
865-
Assigned(_) => {
866-
let (offset, memory_index) = offsets_and_memory_index.next().unwrap();
867-
(offset, promoted_memory_index.len() as u32 + memory_index)
868-
}
869-
Ineligible(field_idx) => {
870-
let field_idx = field_idx.unwrap();
871-
(promoted_offsets[field_idx], promoted_memory_index[field_idx])
872-
}
873-
};
874-
combined_inverse_memory_index[memory_index] = i;
875-
offset
876-
})
877-
.collect();
878-
879-
// Remove the unused slots and invert the mapping to obtain the
880-
// combined `memory_index` (also see previous comment).
881-
combined_inverse_memory_index.raw.retain(|&i| i.index() != invalid_field_idx);
882-
let combined_memory_index = combined_inverse_memory_index.invert_bijective_mapping();
883-
884-
variant.fields = FieldsShape::Arbitrary {
885-
offsets: combined_offsets,
886-
memory_index: combined_memory_index,
887-
};
888-
889-
size = size.max(variant.size);
890-
align = align.max(variant.align);
891-
Ok(variant)
892-
})
893-
.collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
894-
895-
size = size.align_to(align.abi);
896-
897-
let uninhabited = prefix.uninhabited || variants.iter().all(|v| v.is_uninhabited());
898-
let abi = BackendRepr::Memory { sized: true };
899-
900-
Ok(LayoutData {
901-
variants: Variants::Multiple {
902-
tag,
903-
tag_encoding: TagEncoding::Direct,
904-
tag_field: tag_index,
905-
variants,
906-
},
907-
fields: outer_fields,
908-
backend_repr: abi,
909-
// Suppress niches inside coroutines. If the niche is inside a field that is aliased (due to
910-
// self-referentiality), getting the discriminant can cause aliasing violations.
911-
// `UnsafeCell` blocks niches for the same reason, but we don't yet have `UnsafePinned` that
912-
// would do the same for us here.
913-
// See <https://github.com/rust-lang/rust/issues/63818>, <https://github.com/rust-lang/miri/issues/3780>.
914-
// FIXME: Remove when <https://github.com/rust-lang/rust/issues/125735> is implemented and aliased coroutine fields are wrapped in `UnsafePinned`.
915-
largest_niche: None,
916-
uninhabited,
917-
size,
918-
align,
919-
max_repr_align: None,
920-
unadjusted_abi_align: align.abi,
921-
randomization_seed: Default::default(),
922-
})
923-
}
924-
925614
fn record_layout_for_printing<'tcx>(cx: &LayoutCx<'tcx>, layout: TyAndLayout<'tcx>) {
926615
// Ignore layouts that are done with non-empty environments or
927616
// non-monomorphic layouts, as the user only wants to see the stuff

0 commit comments

Comments
 (0)
Please sign in to comment.