Skip to content
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.

Commit bfa1a62

Browse files
authoredMar 9, 2025
Rollup merge of #138158 - moulins:move-layout-to-rustc_abi, r=workingjubilee
Move more layouting logic to `rustc_abi` Move all `LayoutData`-constructing code to `rustc_abi`: - Infaillible operations get a new `LayoutData` constructor method; - Faillible ones get a new method on `LayoutCalculator`.
2 parents 48caf81 + 08530d3 commit bfa1a62

File tree

13 files changed

+754
-750
lines changed

13 files changed

+754
-750
lines changed
 

‎compiler/rustc_abi/src/layout.rs

Lines changed: 133 additions & 59 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@ use std::{cmp, iter};
44

55
use rustc_hashes::Hash64;
66
use rustc_index::Idx;
7+
use rustc_index::bit_set::BitMatrix;
78
use tracing::debug;
89

910
use crate::{
@@ -12,6 +13,9 @@ use crate::{
1213
Variants, WrappingRange,
1314
};
1415

16+
mod coroutine;
17+
mod simple;
18+
1519
#[cfg(feature = "nightly")]
1620
mod ty;
1721

@@ -60,31 +64,44 @@ pub enum LayoutCalculatorError<F> {
6064

6165
/// The fields or variants have irreconcilable reprs
6266
ReprConflict,
67+
68+
/// The length of an SIMD type is zero
69+
ZeroLengthSimdType,
70+
71+
/// The length of an SIMD type exceeds the maximum number of lanes
72+
OversizedSimdType { max_lanes: u64 },
73+
74+
/// An element type of an SIMD type isn't a primitive
75+
NonPrimitiveSimdType(F),
6376
}
6477

6578
impl<F> LayoutCalculatorError<F> {
6679
pub fn without_payload(&self) -> LayoutCalculatorError<()> {
67-
match self {
68-
LayoutCalculatorError::UnexpectedUnsized(_) => {
69-
LayoutCalculatorError::UnexpectedUnsized(())
70-
}
71-
LayoutCalculatorError::SizeOverflow => LayoutCalculatorError::SizeOverflow,
72-
LayoutCalculatorError::EmptyUnion => LayoutCalculatorError::EmptyUnion,
73-
LayoutCalculatorError::ReprConflict => LayoutCalculatorError::ReprConflict,
80+
use LayoutCalculatorError::*;
81+
match *self {
82+
UnexpectedUnsized(_) => UnexpectedUnsized(()),
83+
SizeOverflow => SizeOverflow,
84+
EmptyUnion => EmptyUnion,
85+
ReprConflict => ReprConflict,
86+
ZeroLengthSimdType => ZeroLengthSimdType,
87+
OversizedSimdType { max_lanes } => OversizedSimdType { max_lanes },
88+
NonPrimitiveSimdType(_) => NonPrimitiveSimdType(()),
7489
}
7590
}
7691

7792
/// Format an untranslated diagnostic for this type
7893
///
7994
/// Intended for use by rust-analyzer, as neither it nor `rustc_abi` depend on fluent infra.
8095
pub fn fallback_fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
96+
use LayoutCalculatorError::*;
8197
f.write_str(match self {
82-
LayoutCalculatorError::UnexpectedUnsized(_) => {
83-
"an unsized type was found where a sized type was expected"
98+
UnexpectedUnsized(_) => "an unsized type was found where a sized type was expected",
99+
SizeOverflow => "size overflow",
100+
EmptyUnion => "type is a union with no fields",
101+
ReprConflict => "type has an invalid repr",
102+
ZeroLengthSimdType | OversizedSimdType { .. } | NonPrimitiveSimdType(_) => {
103+
"invalid simd type definition"
84104
}
85-
LayoutCalculatorError::SizeOverflow => "size overflow",
86-
LayoutCalculatorError::EmptyUnion => "type is a union with no fields",
87-
LayoutCalculatorError::ReprConflict => "type has an invalid repr",
88105
})
89106
}
90107
}
@@ -102,41 +119,115 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
102119
Self { cx }
103120
}
104121

105-
pub fn scalar_pair<FieldIdx: Idx, VariantIdx: Idx>(
122+
pub fn array_like<FieldIdx: Idx, VariantIdx: Idx, F>(
106123
&self,
107-
a: Scalar,
108-
b: Scalar,
109-
) -> LayoutData<FieldIdx, VariantIdx> {
110-
let dl = self.cx.data_layout();
111-
let b_align = b.align(dl);
112-
let align = a.align(dl).max(b_align).max(dl.aggregate_align);
113-
let b_offset = a.size(dl).align_to(b_align.abi);
114-
let size = (b_offset + b.size(dl)).align_to(align.abi);
124+
element: &LayoutData<FieldIdx, VariantIdx>,
125+
count_if_sized: Option<u64>, // None for slices
126+
) -> LayoutCalculatorResult<FieldIdx, VariantIdx, F> {
127+
let count = count_if_sized.unwrap_or(0);
128+
let size =
129+
element.size.checked_mul(count, &self.cx).ok_or(LayoutCalculatorError::SizeOverflow)?;
115130

116-
// HACK(nox): We iter on `b` and then `a` because `max_by_key`
117-
// returns the last maximum.
118-
let largest_niche = Niche::from_scalar(dl, b_offset, b)
119-
.into_iter()
120-
.chain(Niche::from_scalar(dl, Size::ZERO, a))
121-
.max_by_key(|niche| niche.available(dl));
131+
Ok(LayoutData {
132+
variants: Variants::Single { index: VariantIdx::new(0) },
133+
fields: FieldsShape::Array { stride: element.size, count },
134+
backend_repr: BackendRepr::Memory { sized: count_if_sized.is_some() },
135+
largest_niche: element.largest_niche.filter(|_| count != 0),
136+
uninhabited: element.uninhabited && count != 0,
137+
align: element.align,
138+
size,
139+
max_repr_align: None,
140+
unadjusted_abi_align: element.align.abi,
141+
randomization_seed: element.randomization_seed.wrapping_add(Hash64::new(count)),
142+
})
143+
}
122144

123-
let combined_seed = a.size(&self.cx).bytes().wrapping_add(b.size(&self.cx).bytes());
145+
pub fn simd_type<
146+
FieldIdx: Idx,
147+
VariantIdx: Idx,
148+
F: AsRef<LayoutData<FieldIdx, VariantIdx>> + fmt::Debug,
149+
>(
150+
&self,
151+
element: F,
152+
count: u64,
153+
repr_packed: bool,
154+
) -> LayoutCalculatorResult<FieldIdx, VariantIdx, F> {
155+
let elt = element.as_ref();
156+
if count == 0 {
157+
return Err(LayoutCalculatorError::ZeroLengthSimdType);
158+
} else if count > crate::MAX_SIMD_LANES {
159+
return Err(LayoutCalculatorError::OversizedSimdType {
160+
max_lanes: crate::MAX_SIMD_LANES,
161+
});
162+
}
124163

125-
LayoutData {
164+
let BackendRepr::Scalar(e_repr) = elt.backend_repr else {
165+
return Err(LayoutCalculatorError::NonPrimitiveSimdType(element));
166+
};
167+
168+
// Compute the size and alignment of the vector
169+
let dl = self.cx.data_layout();
170+
let size =
171+
elt.size.checked_mul(count, dl).ok_or_else(|| LayoutCalculatorError::SizeOverflow)?;
172+
let (repr, align) = if repr_packed && !count.is_power_of_two() {
173+
// Non-power-of-two vectors have padding up to the next power-of-two.
174+
// If we're a packed repr, remove the padding while keeping the alignment as close
175+
// to a vector as possible.
176+
(
177+
BackendRepr::Memory { sized: true },
178+
AbiAndPrefAlign {
179+
abi: Align::max_aligned_factor(size),
180+
pref: dl.llvmlike_vector_align(size).pref,
181+
},
182+
)
183+
} else {
184+
(BackendRepr::SimdVector { element: e_repr, count }, dl.llvmlike_vector_align(size))
185+
};
186+
let size = size.align_to(align.abi);
187+
188+
Ok(LayoutData {
126189
variants: Variants::Single { index: VariantIdx::new(0) },
127190
fields: FieldsShape::Arbitrary {
128-
offsets: [Size::ZERO, b_offset].into(),
129-
memory_index: [0, 1].into(),
191+
offsets: [Size::ZERO].into(),
192+
memory_index: [0].into(),
130193
},
131-
backend_repr: BackendRepr::ScalarPair(a, b),
132-
largest_niche,
194+
backend_repr: repr,
195+
largest_niche: elt.largest_niche,
133196
uninhabited: false,
134-
align,
135197
size,
198+
align,
136199
max_repr_align: None,
137-
unadjusted_abi_align: align.abi,
138-
randomization_seed: Hash64::new(combined_seed),
139-
}
200+
unadjusted_abi_align: elt.align.abi,
201+
randomization_seed: elt.randomization_seed.wrapping_add(Hash64::new(count)),
202+
})
203+
}
204+
205+
/// Compute the layout for a coroutine.
206+
///
207+
/// This uses dedicated code instead of [`Self::layout_of_struct_or_enum`], as coroutine
208+
/// fields may be shared between multiple variants (see the [`coroutine`] module for details).
209+
pub fn coroutine<
210+
'a,
211+
F: Deref<Target = &'a LayoutData<FieldIdx, VariantIdx>> + fmt::Debug + Copy,
212+
VariantIdx: Idx,
213+
FieldIdx: Idx,
214+
LocalIdx: Idx,
215+
>(
216+
&self,
217+
local_layouts: &IndexSlice<LocalIdx, F>,
218+
prefix_layouts: IndexVec<FieldIdx, F>,
219+
variant_fields: &IndexSlice<VariantIdx, IndexVec<FieldIdx, LocalIdx>>,
220+
storage_conflicts: &BitMatrix<LocalIdx, LocalIdx>,
221+
tag_to_layout: impl Fn(Scalar) -> F,
222+
) -> LayoutCalculatorResult<FieldIdx, VariantIdx, F> {
223+
coroutine::layout(
224+
self,
225+
local_layouts,
226+
prefix_layouts,
227+
variant_fields,
228+
storage_conflicts,
229+
tag_to_layout,
230+
)
140231
}
141232

142233
pub fn univariant<
@@ -214,25 +305,6 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
214305
layout
215306
}
216307

217-
pub fn layout_of_never_type<FieldIdx: Idx, VariantIdx: Idx>(
218-
&self,
219-
) -> LayoutData<FieldIdx, VariantIdx> {
220-
let dl = self.cx.data_layout();
221-
// This is also used for uninhabited enums, so we use `Variants::Empty`.
222-
LayoutData {
223-
variants: Variants::Empty,
224-
fields: FieldsShape::Primitive,
225-
backend_repr: BackendRepr::Memory { sized: true },
226-
largest_niche: None,
227-
uninhabited: true,
228-
align: dl.i8_align,
229-
size: Size::ZERO,
230-
max_repr_align: None,
231-
unadjusted_abi_align: dl.i8_align.abi,
232-
randomization_seed: Hash64::ZERO,
233-
}
234-
}
235-
236308
pub fn layout_of_struct_or_enum<
237309
'a,
238310
FieldIdx: Idx,
@@ -260,7 +332,7 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
260332
Some(present_first) => present_first,
261333
// Uninhabited because it has no variants, or only absent ones.
262334
None if is_enum => {
263-
return Ok(self.layout_of_never_type());
335+
return Ok(LayoutData::never_type(&self.cx));
264336
}
265337
// If it's a struct, still compute a layout so that we can still compute the
266338
// field offsets.
@@ -949,7 +1021,8 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
9491021
// Common prim might be uninit.
9501022
Scalar::Union { value: prim }
9511023
};
952-
let pair = self.scalar_pair::<FieldIdx, VariantIdx>(tag, prim_scalar);
1024+
let pair =
1025+
LayoutData::<FieldIdx, VariantIdx>::scalar_pair(&self.cx, tag, prim_scalar);
9531026
let pair_offsets = match pair.fields {
9541027
FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
9551028
assert_eq!(memory_index.raw, [0, 1]);
@@ -1341,7 +1414,8 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
13411414
} else {
13421415
((j, b), (i, a))
13431416
};
1344-
let pair = self.scalar_pair::<FieldIdx, VariantIdx>(a, b);
1417+
let pair =
1418+
LayoutData::<FieldIdx, VariantIdx>::scalar_pair(&self.cx, a, b);
13451419
let pair_offsets = match pair.fields {
13461420
FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
13471421
assert_eq!(memory_index.raw, [0, 1]);
Lines changed: 320 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,320 @@
1+
//! Coroutine layout logic.
2+
//!
3+
//! When laying out coroutines, we divide our saved local fields into two
4+
//! categories: overlap-eligible and overlap-ineligible.
5+
//!
6+
//! Those fields which are ineligible for overlap go in a "prefix" at the
7+
//! beginning of the layout, and always have space reserved for them.
8+
//!
9+
//! Overlap-eligible fields are only assigned to one variant, so we lay
10+
//! those fields out for each variant and put them right after the
11+
//! prefix.
12+
//!
13+
//! Finally, in the layout details, we point to the fields from the
14+
//! variants they are assigned to. It is possible for some fields to be
15+
//! included in multiple variants. No field ever "moves around" in the
16+
//! layout; its offset is always the same.
17+
//!
18+
//! Also included in the layout are the upvars and the discriminant.
19+
//! These are included as fields on the "outer" layout; they are not part
20+
//! of any variant.
21+
22+
use std::iter;
23+
24+
use rustc_index::bit_set::{BitMatrix, DenseBitSet};
25+
use rustc_index::{Idx, IndexSlice, IndexVec};
26+
use tracing::{debug, trace};
27+
28+
use crate::{
29+
BackendRepr, FieldsShape, HasDataLayout, Integer, LayoutData, Primitive, ReprOptions, Scalar,
30+
StructKind, TagEncoding, Variants, WrappingRange,
31+
};
32+
33+
/// Overlap eligibility and variant assignment for each CoroutineSavedLocal.
34+
#[derive(Clone, Debug, PartialEq)]
35+
enum SavedLocalEligibility<VariantIdx, FieldIdx> {
36+
Unassigned,
37+
Assigned(VariantIdx),
38+
Ineligible(Option<FieldIdx>),
39+
}
40+
41+
/// Compute the eligibility and assignment of each local.
42+
fn coroutine_saved_local_eligibility<VariantIdx: Idx, FieldIdx: Idx, LocalIdx: Idx>(
43+
nb_locals: usize,
44+
variant_fields: &IndexSlice<VariantIdx, IndexVec<FieldIdx, LocalIdx>>,
45+
storage_conflicts: &BitMatrix<LocalIdx, LocalIdx>,
46+
) -> (DenseBitSet<LocalIdx>, IndexVec<LocalIdx, SavedLocalEligibility<VariantIdx, FieldIdx>>) {
47+
use SavedLocalEligibility::*;
48+
49+
let mut assignments: IndexVec<LocalIdx, _> = IndexVec::from_elem_n(Unassigned, nb_locals);
50+
51+
// The saved locals not eligible for overlap. These will get
52+
// "promoted" to the prefix of our coroutine.
53+
let mut ineligible_locals = DenseBitSet::new_empty(nb_locals);
54+
55+
// Figure out which of our saved locals are fields in only
56+
// one variant. The rest are deemed ineligible for overlap.
57+
for (variant_index, fields) in variant_fields.iter_enumerated() {
58+
for local in fields {
59+
match assignments[*local] {
60+
Unassigned => {
61+
assignments[*local] = Assigned(variant_index);
62+
}
63+
Assigned(idx) => {
64+
// We've already seen this local at another suspension
65+
// point, so it is no longer a candidate.
66+
trace!(
67+
"removing local {:?} in >1 variant ({:?}, {:?})",
68+
local, variant_index, idx
69+
);
70+
ineligible_locals.insert(*local);
71+
assignments[*local] = Ineligible(None);
72+
}
73+
Ineligible(_) => {}
74+
}
75+
}
76+
}
77+
78+
// Next, check every pair of eligible locals to see if they
79+
// conflict.
80+
for local_a in storage_conflicts.rows() {
81+
let conflicts_a = storage_conflicts.count(local_a);
82+
if ineligible_locals.contains(local_a) {
83+
continue;
84+
}
85+
86+
for local_b in storage_conflicts.iter(local_a) {
87+
// local_a and local_b are storage live at the same time, therefore they
88+
// cannot overlap in the coroutine layout. The only way to guarantee
89+
// this is if they are in the same variant, or one is ineligible
90+
// (which means it is stored in every variant).
91+
if ineligible_locals.contains(local_b) || assignments[local_a] == assignments[local_b] {
92+
continue;
93+
}
94+
95+
// If they conflict, we will choose one to make ineligible.
96+
// This is not always optimal; it's just a greedy heuristic that
97+
// seems to produce good results most of the time.
98+
let conflicts_b = storage_conflicts.count(local_b);
99+
let (remove, other) =
100+
if conflicts_a > conflicts_b { (local_a, local_b) } else { (local_b, local_a) };
101+
ineligible_locals.insert(remove);
102+
assignments[remove] = Ineligible(None);
103+
trace!("removing local {:?} due to conflict with {:?}", remove, other);
104+
}
105+
}
106+
107+
// Count the number of variants in use. If only one of them, then it is
108+
// impossible to overlap any locals in our layout. In this case it's
109+
// always better to make the remaining locals ineligible, so we can
110+
// lay them out with the other locals in the prefix and eliminate
111+
// unnecessary padding bytes.
112+
{
113+
let mut used_variants = DenseBitSet::new_empty(variant_fields.len());
114+
for assignment in &assignments {
115+
if let Assigned(idx) = assignment {
116+
used_variants.insert(*idx);
117+
}
118+
}
119+
if used_variants.count() < 2 {
120+
for assignment in assignments.iter_mut() {
121+
*assignment = Ineligible(None);
122+
}
123+
ineligible_locals.insert_all();
124+
}
125+
}
126+
127+
// Write down the order of our locals that will be promoted to the prefix.
128+
{
129+
for (idx, local) in ineligible_locals.iter().enumerate() {
130+
assignments[local] = Ineligible(Some(FieldIdx::new(idx)));
131+
}
132+
}
133+
debug!("coroutine saved local assignments: {:?}", assignments);
134+
135+
(ineligible_locals, assignments)
136+
}
137+
138+
/// Compute the full coroutine layout.
139+
pub(super) fn layout<
140+
'a,
141+
F: core::ops::Deref<Target = &'a LayoutData<FieldIdx, VariantIdx>> + core::fmt::Debug + Copy,
142+
VariantIdx: Idx,
143+
FieldIdx: Idx,
144+
LocalIdx: Idx,
145+
>(
146+
calc: &super::LayoutCalculator<impl HasDataLayout>,
147+
local_layouts: &IndexSlice<LocalIdx, F>,
148+
mut prefix_layouts: IndexVec<FieldIdx, F>,
149+
variant_fields: &IndexSlice<VariantIdx, IndexVec<FieldIdx, LocalIdx>>,
150+
storage_conflicts: &BitMatrix<LocalIdx, LocalIdx>,
151+
tag_to_layout: impl Fn(Scalar) -> F,
152+
) -> super::LayoutCalculatorResult<FieldIdx, VariantIdx, F> {
153+
use SavedLocalEligibility::*;
154+
155+
let (ineligible_locals, assignments) =
156+
coroutine_saved_local_eligibility(local_layouts.len(), variant_fields, storage_conflicts);
157+
158+
// Build a prefix layout, including "promoting" all ineligible
159+
// locals as part of the prefix. We compute the layout of all of
160+
// these fields at once to get optimal packing.
161+
let tag_index = prefix_layouts.len();
162+
163+
// `variant_fields` already accounts for the reserved variants, so no need to add them.
164+
let max_discr = (variant_fields.len() - 1) as u128;
165+
let discr_int = Integer::fit_unsigned(max_discr);
166+
let tag = Scalar::Initialized {
167+
value: Primitive::Int(discr_int, /* signed = */ false),
168+
valid_range: WrappingRange { start: 0, end: max_discr },
169+
};
170+
171+
let promoted_layouts = ineligible_locals.iter().map(|local| local_layouts[local]);
172+
prefix_layouts.push(tag_to_layout(tag));
173+
prefix_layouts.extend(promoted_layouts);
174+
let prefix =
175+
calc.univariant(&prefix_layouts, &ReprOptions::default(), StructKind::AlwaysSized)?;
176+
177+
let (prefix_size, prefix_align) = (prefix.size, prefix.align);
178+
179+
// Split the prefix layout into the "outer" fields (upvars and
180+
// discriminant) and the "promoted" fields. Promoted fields will
181+
// get included in each variant that requested them in
182+
// CoroutineLayout.
183+
debug!("prefix = {:#?}", prefix);
184+
let (outer_fields, promoted_offsets, promoted_memory_index) = match prefix.fields {
185+
FieldsShape::Arbitrary { mut offsets, memory_index } => {
186+
let mut inverse_memory_index = memory_index.invert_bijective_mapping();
187+
188+
// "a" (`0..b_start`) and "b" (`b_start..`) correspond to
189+
// "outer" and "promoted" fields respectively.
190+
let b_start = FieldIdx::new(tag_index + 1);
191+
let offsets_b = IndexVec::from_raw(offsets.raw.split_off(b_start.index()));
192+
let offsets_a = offsets;
193+
194+
// Disentangle the "a" and "b" components of `inverse_memory_index`
195+
// by preserving the order but keeping only one disjoint "half" each.
196+
// FIXME(eddyb) build a better abstraction for permutations, if possible.
197+
let inverse_memory_index_b: IndexVec<u32, FieldIdx> = inverse_memory_index
198+
.iter()
199+
.filter_map(|&i| i.index().checked_sub(b_start.index()).map(FieldIdx::new))
200+
.collect();
201+
inverse_memory_index.raw.retain(|&i| i.index() < b_start.index());
202+
let inverse_memory_index_a = inverse_memory_index;
203+
204+
// Since `inverse_memory_index_{a,b}` each only refer to their
205+
// respective fields, they can be safely inverted
206+
let memory_index_a = inverse_memory_index_a.invert_bijective_mapping();
207+
let memory_index_b = inverse_memory_index_b.invert_bijective_mapping();
208+
209+
let outer_fields =
210+
FieldsShape::Arbitrary { offsets: offsets_a, memory_index: memory_index_a };
211+
(outer_fields, offsets_b, memory_index_b)
212+
}
213+
_ => unreachable!(),
214+
};
215+
216+
let mut size = prefix.size;
217+
let mut align = prefix.align;
218+
let variants = variant_fields
219+
.iter_enumerated()
220+
.map(|(index, variant_fields)| {
221+
// Only include overlap-eligible fields when we compute our variant layout.
222+
let variant_only_tys = variant_fields
223+
.iter()
224+
.filter(|local| match assignments[**local] {
225+
Unassigned => unreachable!(),
226+
Assigned(v) if v == index => true,
227+
Assigned(_) => unreachable!("assignment does not match variant"),
228+
Ineligible(_) => false,
229+
})
230+
.map(|local| local_layouts[*local]);
231+
232+
let mut variant = calc.univariant(
233+
&variant_only_tys.collect::<IndexVec<_, _>>(),
234+
&ReprOptions::default(),
235+
StructKind::Prefixed(prefix_size, prefix_align.abi),
236+
)?;
237+
variant.variants = Variants::Single { index };
238+
239+
let FieldsShape::Arbitrary { offsets, memory_index } = variant.fields else {
240+
unreachable!();
241+
};
242+
243+
// Now, stitch the promoted and variant-only fields back together in
244+
// the order they are mentioned by our CoroutineLayout.
245+
// Because we only use some subset (that can differ between variants)
246+
// of the promoted fields, we can't just pick those elements of the
247+
// `promoted_memory_index` (as we'd end up with gaps).
248+
// So instead, we build an "inverse memory_index", as if all of the
249+
// promoted fields were being used, but leave the elements not in the
250+
// subset as `invalid_field_idx`, which we can filter out later to
251+
// obtain a valid (bijective) mapping.
252+
let invalid_field_idx = promoted_memory_index.len() + memory_index.len();
253+
let mut combined_inverse_memory_index =
254+
IndexVec::from_elem_n(FieldIdx::new(invalid_field_idx), invalid_field_idx);
255+
256+
let mut offsets_and_memory_index = iter::zip(offsets, memory_index);
257+
let combined_offsets = variant_fields
258+
.iter_enumerated()
259+
.map(|(i, local)| {
260+
let (offset, memory_index) = match assignments[*local] {
261+
Unassigned => unreachable!(),
262+
Assigned(_) => {
263+
let (offset, memory_index) = offsets_and_memory_index.next().unwrap();
264+
(offset, promoted_memory_index.len() as u32 + memory_index)
265+
}
266+
Ineligible(field_idx) => {
267+
let field_idx = field_idx.unwrap();
268+
(promoted_offsets[field_idx], promoted_memory_index[field_idx])
269+
}
270+
};
271+
combined_inverse_memory_index[memory_index] = i;
272+
offset
273+
})
274+
.collect();
275+
276+
// Remove the unused slots and invert the mapping to obtain the
277+
// combined `memory_index` (also see previous comment).
278+
combined_inverse_memory_index.raw.retain(|&i| i.index() != invalid_field_idx);
279+
let combined_memory_index = combined_inverse_memory_index.invert_bijective_mapping();
280+
281+
variant.fields = FieldsShape::Arbitrary {
282+
offsets: combined_offsets,
283+
memory_index: combined_memory_index,
284+
};
285+
286+
size = size.max(variant.size);
287+
align = align.max(variant.align);
288+
Ok(variant)
289+
})
290+
.collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
291+
292+
size = size.align_to(align.abi);
293+
294+
let uninhabited = prefix.uninhabited || variants.iter().all(|v| v.is_uninhabited());
295+
let abi = BackendRepr::Memory { sized: true };
296+
297+
Ok(LayoutData {
298+
variants: Variants::Multiple {
299+
tag,
300+
tag_encoding: TagEncoding::Direct,
301+
tag_field: tag_index,
302+
variants,
303+
},
304+
fields: outer_fields,
305+
backend_repr: abi,
306+
// Suppress niches inside coroutines. If the niche is inside a field that is aliased (due to
307+
// self-referentiality), getting the discriminant can cause aliasing violations.
308+
// `UnsafeCell` blocks niches for the same reason, but we don't yet have `UnsafePinned` that
309+
// would do the same for us here.
310+
// See <https://github.com/rust-lang/rust/issues/63818>, <https://github.com/rust-lang/miri/issues/3780>.
311+
// FIXME: Remove when <https://github.com/rust-lang/rust/issues/125735> is implemented and aliased coroutine fields are wrapped in `UnsafePinned`.
312+
largest_niche: None,
313+
uninhabited,
314+
size,
315+
align,
316+
max_repr_align: None,
317+
unadjusted_abi_align: align.abi,
318+
randomization_seed: Default::default(),
319+
})
320+
}
Lines changed: 148 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,148 @@
1+
use std::num::NonZero;
2+
3+
use rustc_hashes::Hash64;
4+
use rustc_index::{Idx, IndexVec};
5+
6+
use crate::{
7+
BackendRepr, FieldsShape, HasDataLayout, LayoutData, Niche, Primitive, Scalar, Size, Variants,
8+
};
9+
10+
/// "Simple" layout constructors that cannot fail.
11+
impl<FieldIdx: Idx, VariantIdx: Idx> LayoutData<FieldIdx, VariantIdx> {
12+
pub fn unit<C: HasDataLayout>(cx: &C, sized: bool) -> Self {
13+
let dl = cx.data_layout();
14+
LayoutData {
15+
variants: Variants::Single { index: VariantIdx::new(0) },
16+
fields: FieldsShape::Arbitrary {
17+
offsets: IndexVec::new(),
18+
memory_index: IndexVec::new(),
19+
},
20+
backend_repr: BackendRepr::Memory { sized },
21+
largest_niche: None,
22+
uninhabited: false,
23+
align: dl.i8_align,
24+
size: Size::ZERO,
25+
max_repr_align: None,
26+
unadjusted_abi_align: dl.i8_align.abi,
27+
randomization_seed: Hash64::new(0),
28+
}
29+
}
30+
31+
pub fn never_type<C: HasDataLayout>(cx: &C) -> Self {
32+
let dl = cx.data_layout();
33+
// This is also used for uninhabited enums, so we use `Variants::Empty`.
34+
LayoutData {
35+
variants: Variants::Empty,
36+
fields: FieldsShape::Primitive,
37+
backend_repr: BackendRepr::Memory { sized: true },
38+
largest_niche: None,
39+
uninhabited: true,
40+
align: dl.i8_align,
41+
size: Size::ZERO,
42+
max_repr_align: None,
43+
unadjusted_abi_align: dl.i8_align.abi,
44+
randomization_seed: Hash64::ZERO,
45+
}
46+
}
47+
48+
pub fn scalar<C: HasDataLayout>(cx: &C, scalar: Scalar) -> Self {
49+
let largest_niche = Niche::from_scalar(cx, Size::ZERO, scalar);
50+
let size = scalar.size(cx);
51+
let align = scalar.align(cx);
52+
53+
let range = scalar.valid_range(cx);
54+
55+
// All primitive types for which we don't have subtype coercions should get a distinct seed,
56+
// so that types wrapping them can use randomization to arrive at distinct layouts.
57+
//
58+
// Some type information is already lost at this point, so as an approximation we derive
59+
// the seed from what remains. For example on 64-bit targets usize and u64 can no longer
60+
// be distinguished.
61+
let randomization_seed = size
62+
.bytes()
63+
.wrapping_add(
64+
match scalar.primitive() {
65+
Primitive::Int(_, true) => 1,
66+
Primitive::Int(_, false) => 2,
67+
Primitive::Float(_) => 3,
68+
Primitive::Pointer(_) => 4,
69+
} << 32,
70+
)
71+
// distinguishes references from pointers
72+
.wrapping_add((range.start as u64).rotate_right(16))
73+
// distinguishes char from u32 and bool from u8
74+
.wrapping_add((range.end as u64).rotate_right(16));
75+
76+
LayoutData {
77+
variants: Variants::Single { index: VariantIdx::new(0) },
78+
fields: FieldsShape::Primitive,
79+
backend_repr: BackendRepr::Scalar(scalar),
80+
largest_niche,
81+
uninhabited: false,
82+
size,
83+
align,
84+
max_repr_align: None,
85+
unadjusted_abi_align: align.abi,
86+
randomization_seed: Hash64::new(randomization_seed),
87+
}
88+
}
89+
90+
pub fn scalar_pair<C: HasDataLayout>(cx: &C, a: Scalar, b: Scalar) -> Self {
91+
let dl = cx.data_layout();
92+
let b_align = b.align(dl);
93+
let align = a.align(dl).max(b_align).max(dl.aggregate_align);
94+
let b_offset = a.size(dl).align_to(b_align.abi);
95+
let size = (b_offset + b.size(dl)).align_to(align.abi);
96+
97+
// HACK(nox): We iter on `b` and then `a` because `max_by_key`
98+
// returns the last maximum.
99+
let largest_niche = Niche::from_scalar(dl, b_offset, b)
100+
.into_iter()
101+
.chain(Niche::from_scalar(dl, Size::ZERO, a))
102+
.max_by_key(|niche| niche.available(dl));
103+
104+
let combined_seed = a.size(dl).bytes().wrapping_add(b.size(dl).bytes());
105+
106+
LayoutData {
107+
variants: Variants::Single { index: VariantIdx::new(0) },
108+
fields: FieldsShape::Arbitrary {
109+
offsets: [Size::ZERO, b_offset].into(),
110+
memory_index: [0, 1].into(),
111+
},
112+
backend_repr: BackendRepr::ScalarPair(a, b),
113+
largest_niche,
114+
uninhabited: false,
115+
align,
116+
size,
117+
max_repr_align: None,
118+
unadjusted_abi_align: align.abi,
119+
randomization_seed: Hash64::new(combined_seed),
120+
}
121+
}
122+
123+
/// Returns a dummy layout for an uninhabited variant.
124+
///
125+
/// Uninhabited variants get pruned as part of the layout calculation,
126+
/// so this can be used after the fact to reconstitute a layout.
127+
pub fn uninhabited_variant<C: HasDataLayout>(cx: &C, index: VariantIdx, fields: usize) -> Self {
128+
let dl = cx.data_layout();
129+
LayoutData {
130+
variants: Variants::Single { index },
131+
fields: match NonZero::new(fields) {
132+
Some(fields) => FieldsShape::Union(fields),
133+
None => FieldsShape::Arbitrary {
134+
offsets: IndexVec::new(),
135+
memory_index: IndexVec::new(),
136+
},
137+
},
138+
backend_repr: BackendRepr::Memory { sized: true },
139+
largest_niche: None,
140+
uninhabited: true,
141+
align: dl.i8_align,
142+
size: Size::ZERO,
143+
max_repr_align: None,
144+
unadjusted_abi_align: dl.i8_align.abi,
145+
randomization_seed: Hash64::ZERO,
146+
}
147+
}
148+
}

‎compiler/rustc_abi/src/layout/ty.rs

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -150,6 +150,12 @@ impl<'a, Ty> Deref for TyAndLayout<'a, Ty> {
150150
}
151151
}
152152

153+
impl<'a, Ty> AsRef<LayoutData<FieldIdx, VariantIdx>> for TyAndLayout<'a, Ty> {
154+
fn as_ref(&self) -> &LayoutData<FieldIdx, VariantIdx> {
155+
&*self.layout.0.0
156+
}
157+
}
158+
153159
/// Trait that needs to be implemented by the higher-level type representation
154160
/// (e.g. `rustc_middle::ty::Ty`), to provide `rustc_target::abi` functionality.
155161
pub trait TyAbiInterface<'a, C>: Sized + std::fmt::Debug {

‎compiler/rustc_abi/src/lib.rs

Lines changed: 7 additions & 42 deletions
Original file line numberDiff line numberDiff line change
@@ -204,6 +204,13 @@ impl ReprOptions {
204204
}
205205
}
206206

207+
/// The maximum supported number of lanes in a SIMD vector.
208+
///
209+
/// This value is selected based on backend support:
210+
/// * LLVM does not appear to have a vector width limit.
211+
/// * Cranelift stores the base-2 log of the lane count in a 4 bit integer.
212+
pub const MAX_SIMD_LANES: u64 = 1 << 0xF;
213+
207214
/// Parsed [Data layout](https://llvm.org/docs/LangRef.html#data-layout)
208215
/// for a target, which contains everything needed to compute layouts.
209216
#[derive(Debug, PartialEq, Eq)]
@@ -1743,48 +1750,6 @@ impl<FieldIdx: Idx, VariantIdx: Idx> LayoutData<FieldIdx, VariantIdx> {
17431750
pub fn is_uninhabited(&self) -> bool {
17441751
self.uninhabited
17451752
}
1746-
1747-
pub fn scalar<C: HasDataLayout>(cx: &C, scalar: Scalar) -> Self {
1748-
let largest_niche = Niche::from_scalar(cx, Size::ZERO, scalar);
1749-
let size = scalar.size(cx);
1750-
let align = scalar.align(cx);
1751-
1752-
let range = scalar.valid_range(cx);
1753-
1754-
// All primitive types for which we don't have subtype coercions should get a distinct seed,
1755-
// so that types wrapping them can use randomization to arrive at distinct layouts.
1756-
//
1757-
// Some type information is already lost at this point, so as an approximation we derive
1758-
// the seed from what remains. For example on 64-bit targets usize and u64 can no longer
1759-
// be distinguished.
1760-
let randomization_seed = size
1761-
.bytes()
1762-
.wrapping_add(
1763-
match scalar.primitive() {
1764-
Primitive::Int(_, true) => 1,
1765-
Primitive::Int(_, false) => 2,
1766-
Primitive::Float(_) => 3,
1767-
Primitive::Pointer(_) => 4,
1768-
} << 32,
1769-
)
1770-
// distinguishes references from pointers
1771-
.wrapping_add((range.start as u64).rotate_right(16))
1772-
// distinguishes char from u32 and bool from u8
1773-
.wrapping_add((range.end as u64).rotate_right(16));
1774-
1775-
LayoutData {
1776-
variants: Variants::Single { index: VariantIdx::new(0) },
1777-
fields: FieldsShape::Primitive,
1778-
backend_repr: BackendRepr::Scalar(scalar),
1779-
largest_niche,
1780-
uninhabited: false,
1781-
size,
1782-
align,
1783-
max_repr_align: None,
1784-
unadjusted_abi_align: align.abi,
1785-
randomization_seed: Hash64::new(randomization_seed),
1786-
}
1787-
}
17881753
}
17891754

17901755
impl<FieldIdx: Idx, VariantIdx: Idx> fmt::Debug for LayoutData<FieldIdx, VariantIdx>

‎compiler/rustc_middle/src/ty/layout.rs

Lines changed: 13 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -1,20 +1,17 @@
1-
use std::num::NonZero;
21
use std::ops::Bound;
32
use std::{cmp, fmt};
43

54
use rustc_abi::{
6-
AddressSpace, Align, BackendRepr, ExternAbi, FieldIdx, FieldsShape, HasDataLayout, LayoutData,
7-
PointeeInfo, PointerKind, Primitive, ReprOptions, Scalar, Size, TagEncoding, TargetDataLayout,
5+
AddressSpace, Align, ExternAbi, FieldIdx, FieldsShape, HasDataLayout, LayoutData, PointeeInfo,
6+
PointerKind, Primitive, ReprOptions, Scalar, Size, TagEncoding, TargetDataLayout,
87
TyAbiInterface, VariantIdx, Variants,
98
};
109
use rustc_error_messages::DiagMessage;
1110
use rustc_errors::{
1211
Diag, DiagArgValue, DiagCtxtHandle, Diagnostic, EmissionGuarantee, IntoDiagArg, Level,
1312
};
14-
use rustc_hashes::Hash64;
1513
use rustc_hir::LangItem;
1614
use rustc_hir::def_id::DefId;
17-
use rustc_index::IndexVec;
1815
use rustc_macros::{HashStable, TyDecodable, TyEncodable, extension};
1916
use rustc_session::config::OptLevel;
2017
use rustc_span::{DUMMY_SP, ErrorGuaranteed, Span, Symbol, sym};
@@ -185,12 +182,7 @@ pub const WIDE_PTR_ADDR: usize = 0;
185182
/// - For a slice, this is the length.
186183
pub const WIDE_PTR_EXTRA: usize = 1;
187184

188-
/// The maximum supported number of lanes in a SIMD vector.
189-
///
190-
/// This value is selected based on backend support:
191-
/// * LLVM does not appear to have a vector width limit.
192-
/// * Cranelift stores the base-2 log of the lane count in a 4 bit integer.
193-
pub const MAX_SIMD_LANES: u64 = 1 << 0xF;
185+
pub const MAX_SIMD_LANES: u64 = rustc_abi::MAX_SIMD_LANES;
194186

195187
/// Used in `check_validity_requirement` to indicate the kind of initialization
196188
/// that is checked to be valid
@@ -762,11 +754,9 @@ where
762754
variant_index: VariantIdx,
763755
) -> TyAndLayout<'tcx> {
764756
let layout = match this.variants {
765-
Variants::Single { index }
766-
// If all variants but one are uninhabited, the variant layout is the enum layout.
767-
if index == variant_index =>
768-
{
769-
this.layout
757+
// If all variants but one are uninhabited, the variant layout is the enum layout.
758+
Variants::Single { index } if index == variant_index => {
759+
return this;
770760
}
771761

772762
Variants::Single { .. } | Variants::Empty => {
@@ -783,29 +773,18 @@ where
783773
}
784774

785775
let fields = match this.ty.kind() {
786-
ty::Adt(def, _) if def.variants().is_empty() =>
787-
bug!("for_variant called on zero-variant enum {}", this.ty),
776+
ty::Adt(def, _) if def.variants().is_empty() => {
777+
bug!("for_variant called on zero-variant enum {}", this.ty)
778+
}
788779
ty::Adt(def, _) => def.variant(variant_index).fields.len(),
789780
_ => bug!("`ty_and_layout_for_variant` on unexpected type {}", this.ty),
790781
};
791-
tcx.mk_layout(LayoutData {
792-
variants: Variants::Single { index: variant_index },
793-
fields: match NonZero::new(fields) {
794-
Some(fields) => FieldsShape::Union(fields),
795-
None => FieldsShape::Arbitrary { offsets: IndexVec::new(), memory_index: IndexVec::new() },
796-
},
797-
backend_repr: BackendRepr::Memory { sized: true },
798-
largest_niche: None,
799-
uninhabited: true,
800-
align: tcx.data_layout.i8_align,
801-
size: Size::ZERO,
802-
max_repr_align: None,
803-
unadjusted_abi_align: tcx.data_layout.i8_align.abi,
804-
randomization_seed: Hash64::ZERO,
805-
})
782+
tcx.mk_layout(LayoutData::uninhabited_variant(cx, variant_index, fields))
806783
}
807784

808-
Variants::Multiple { ref variants, .. } => cx.tcx().mk_layout(variants[variant_index].clone()),
785+
Variants::Multiple { ref variants, .. } => {
786+
cx.tcx().mk_layout(variants[variant_index].clone())
787+
}
809788
};
810789

811790
assert_eq!(*layout.variants(), Variants::Single { index: variant_index });

‎compiler/rustc_ty_utils/src/layout.rs

Lines changed: 96 additions & 491 deletions
Large diffs are not rendered by default.

‎src/tools/rust-analyzer/crates/hir-ty/src/layout.rs

Lines changed: 26 additions & 109 deletions
Original file line numberDiff line numberDiff line change
@@ -6,24 +6,22 @@ use base_db::ra_salsa::Cycle;
66
use chalk_ir::{AdtId, FloatTy, IntTy, TyKind, UintTy};
77
use hir_def::{
88
layout::{
9-
BackendRepr, FieldsShape, Float, Integer, LayoutCalculator, LayoutCalculatorError,
10-
LayoutData, Primitive, ReprOptions, Scalar, Size, StructKind, TargetDataLayout,
9+
Float, Integer, LayoutCalculator, LayoutCalculatorError,
10+
LayoutData, Primitive, ReprOptions, Scalar, StructKind, TargetDataLayout,
1111
WrappingRange,
1212
},
1313
LocalFieldId, StructId,
1414
};
1515
use la_arena::{Idx, RawIdx};
1616
use rustc_abi::AddressSpace;
17-
use rustc_hashes::Hash64;
18-
use rustc_index::{IndexSlice, IndexVec};
17+
use rustc_index::IndexVec;
1918

2019
use triomphe::Arc;
2120

2221
use crate::{
2322
consteval::try_const_usize,
2423
db::{HirDatabase, InternedClosure},
2524
infer::normalize,
26-
layout::adt::struct_variant_idx,
2725
utils::ClosureSubst,
2826
Interner, ProjectionTy, Substitution, TraitEnvironment, Ty,
2927
};
@@ -125,10 +123,10 @@ impl<'a> LayoutCx<'a> {
125123
}
126124
}
127125

128-
// FIXME: move this to the `rustc_abi`.
129126
fn layout_of_simd_ty(
130127
db: &dyn HirDatabase,
131128
id: StructId,
129+
repr_packed: bool,
132130
subst: &Substitution,
133131
env: Arc<TraitEnvironment>,
134132
dl: &TargetDataLayout,
@@ -149,33 +147,10 @@ fn layout_of_simd_ty(
149147
};
150148

151149
let e_len = try_const_usize(db, &e_len).ok_or(LayoutError::HasErrorConst)? as u64;
152-
153-
// Compute the ABI of the element type:
154150
let e_ly = db.layout_of_ty(e_ty, env)?;
155-
let BackendRepr::Scalar(e_abi) = e_ly.backend_repr else {
156-
return Err(LayoutError::Unknown);
157-
};
158151

159-
// Compute the size and alignment of the vector:
160-
let size = e_ly
161-
.size
162-
.checked_mul(e_len, dl)
163-
.ok_or(LayoutError::BadCalc(LayoutCalculatorError::SizeOverflow))?;
164-
let align = dl.llvmlike_vector_align(size);
165-
let size = size.align_to(align.abi);
166-
167-
Ok(Arc::new(Layout {
168-
variants: Variants::Single { index: struct_variant_idx() },
169-
fields: FieldsShape::Arbitrary { offsets: [Size::ZERO].into(), memory_index: [0].into() },
170-
backend_repr: BackendRepr::SimdVector { element: e_abi, count: e_len },
171-
largest_niche: e_ly.largest_niche,
172-
uninhabited: false,
173-
size,
174-
align,
175-
max_repr_align: None,
176-
unadjusted_abi_align: align.abi,
177-
randomization_seed: Hash64::ZERO,
178-
}))
152+
let cx = LayoutCx::new(dl);
153+
Ok(Arc::new(cx.calc.simd_type(e_ly, e_len, repr_packed)?))
179154
}
180155

181156
pub fn layout_of_ty_query(
@@ -190,13 +165,14 @@ pub fn layout_of_ty_query(
190165
let dl = &*target;
191166
let cx = LayoutCx::new(dl);
192167
let ty = normalize(db, trait_env.clone(), ty);
193-
let result = match ty.kind(Interner) {
168+
let kind = ty.kind(Interner);
169+
let result = match kind {
194170
TyKind::Adt(AdtId(def), subst) => {
195171
if let hir_def::AdtId::StructId(s) = def {
196172
let data = db.struct_data(*s);
197173
let repr = data.repr.unwrap_or_default();
198174
if repr.simd() {
199-
return layout_of_simd_ty(db, *s, subst, trait_env, &target);
175+
return layout_of_simd_ty(db, *s, repr.packed(), subst, trait_env, &target);
200176
}
201177
};
202178
return db.layout_of_adt(*def, subst.clone(), trait_env);
@@ -216,7 +192,7 @@ pub fn layout_of_ty_query(
216192
valid_range: WrappingRange { start: 0, end: 0x10FFFF },
217193
},
218194
),
219-
chalk_ir::Scalar::Int(i) => scalar(
195+
chalk_ir::Scalar::Int(i) => Layout::scalar(dl, scalar_unit(
220196
dl,
221197
Primitive::Int(
222198
match i {
@@ -229,8 +205,8 @@ pub fn layout_of_ty_query(
229205
},
230206
true,
231207
),
232-
),
233-
chalk_ir::Scalar::Uint(i) => scalar(
208+
)),
209+
chalk_ir::Scalar::Uint(i) => Layout::scalar(dl, scalar_unit(
234210
dl,
235211
Primitive::Int(
236212
match i {
@@ -243,16 +219,16 @@ pub fn layout_of_ty_query(
243219
},
244220
false,
245221
),
246-
),
247-
chalk_ir::Scalar::Float(f) => scalar(
222+
)),
223+
chalk_ir::Scalar::Float(f) => Layout::scalar(dl, scalar_unit(
248224
dl,
249225
Primitive::Float(match f {
250226
FloatTy::F16 => Float::F16,
251227
FloatTy::F32 => Float::F32,
252228
FloatTy::F64 => Float::F64,
253229
FloatTy::F128 => Float::F128,
254230
}),
255-
),
231+
)),
256232
},
257233
TyKind::Tuple(len, tys) => {
258234
let kind = if *len == 0 { StructKind::AlwaysSized } else { StructKind::MaybeUnsized };
@@ -268,56 +244,16 @@ pub fn layout_of_ty_query(
268244
TyKind::Array(element, count) => {
269245
let count = try_const_usize(db, count).ok_or(LayoutError::HasErrorConst)? as u64;
270246
let element = db.layout_of_ty(element.clone(), trait_env)?;
271-
let size = element
272-
.size
273-
.checked_mul(count, dl)
274-
.ok_or(LayoutError::BadCalc(LayoutCalculatorError::SizeOverflow))?;
275-
276-
let backend_repr = BackendRepr::Memory { sized: true };
277-
278-
let largest_niche = if count != 0 { element.largest_niche } else { None };
279-
let uninhabited = if count != 0 { element.uninhabited } else { false };
280-
281-
Layout {
282-
variants: Variants::Single { index: struct_variant_idx() },
283-
fields: FieldsShape::Array { stride: element.size, count },
284-
backend_repr,
285-
largest_niche,
286-
uninhabited,
287-
align: element.align,
288-
size,
289-
max_repr_align: None,
290-
unadjusted_abi_align: element.align.abi,
291-
randomization_seed: Hash64::ZERO,
292-
}
247+
cx.calc.array_like::<_, _, ()>(&element, Some(count))?
293248
}
294249
TyKind::Slice(element) => {
295250
let element = db.layout_of_ty(element.clone(), trait_env)?;
296-
Layout {
297-
variants: Variants::Single { index: struct_variant_idx() },
298-
fields: FieldsShape::Array { stride: element.size, count: 0 },
299-
backend_repr: BackendRepr::Memory { sized: false },
300-
largest_niche: None,
301-
uninhabited: false,
302-
align: element.align,
303-
size: Size::ZERO,
304-
max_repr_align: None,
305-
unadjusted_abi_align: element.align.abi,
306-
randomization_seed: Hash64::ZERO,
307-
}
251+
cx.calc.array_like::<_, _, ()>(&element, None)?
252+
}
253+
TyKind::Str => {
254+
let element = scalar_unit(dl, Primitive::Int(Integer::I8, false));
255+
cx.calc.array_like::<_, _, ()>(&Layout::scalar(dl, element), None)?
308256
}
309-
TyKind::Str => Layout {
310-
variants: Variants::Single { index: struct_variant_idx() },
311-
fields: FieldsShape::Array { stride: Size::from_bytes(1), count: 0 },
312-
backend_repr: BackendRepr::Memory { sized: false },
313-
largest_niche: None,
314-
uninhabited: false,
315-
align: dl.i8_align,
316-
size: Size::ZERO,
317-
max_repr_align: None,
318-
unadjusted_abi_align: dl.i8_align.abi,
319-
randomization_seed: Hash64::ZERO,
320-
},
321257
// Potentially-wide pointers.
322258
TyKind::Ref(_, _, pointee) | TyKind::Raw(_, pointee) => {
323259
let mut data_ptr = scalar_unit(dl, Primitive::Pointer(AddressSpace::DATA));
@@ -355,17 +291,12 @@ pub fn layout_of_ty_query(
355291
};
356292

357293
// Effectively a (ptr, meta) tuple.
358-
cx.calc.scalar_pair(data_ptr, metadata)
294+
LayoutData::scalar_pair(dl, data_ptr, metadata)
359295
}
360-
TyKind::FnDef(_, _) => layout_of_unit(&cx)?,
361-
TyKind::Never => cx.calc.layout_of_never_type(),
362-
TyKind::Dyn(_) | TyKind::Foreign(_) => {
363-
let mut unit = layout_of_unit(&cx)?;
364-
match &mut unit.backend_repr {
365-
BackendRepr::Memory { sized } => *sized = false,
366-
_ => return Err(LayoutError::Unknown),
367-
}
368-
unit
296+
TyKind::Never => LayoutData::never_type(dl),
297+
TyKind::FnDef(..) | TyKind::Dyn(_) | TyKind::Foreign(_) => {
298+
let sized = matches!(kind, TyKind::FnDef(..));
299+
LayoutData::unit(dl, sized)
369300
}
370301
TyKind::Function(_) => {
371302
let mut ptr = scalar_unit(dl, Primitive::Pointer(dl.instruction_address_space));
@@ -434,16 +365,6 @@ pub fn layout_of_ty_recover(
434365
Err(LayoutError::RecursiveTypeWithoutIndirection)
435366
}
436367

437-
fn layout_of_unit(cx: &LayoutCx<'_>) -> Result<Layout, LayoutError> {
438-
cx.calc
439-
.univariant::<RustcFieldIdx, RustcEnumVariantIdx, &&Layout>(
440-
IndexSlice::empty(),
441-
&ReprOptions::default(),
442-
StructKind::AlwaysSized,
443-
)
444-
.map_err(Into::into)
445-
}
446-
447368
fn struct_tail_erasing_lifetimes(db: &dyn HirDatabase, pointee: Ty) -> Ty {
448369
match pointee.kind(Interner) {
449370
TyKind::Adt(AdtId(hir_def::AdtId::StructId(i)), subst) => {
@@ -474,9 +395,5 @@ fn scalar_unit(dl: &TargetDataLayout, value: Primitive) -> Scalar {
474395
Scalar::Initialized { value, valid_range: WrappingRange::full(value.size(dl)) }
475396
}
476397

477-
fn scalar(dl: &TargetDataLayout, value: Primitive) -> Layout {
478-
Layout::scalar(dl, scalar_unit(dl, value))
479-
}
480-
481398
#[cfg(test)]
482399
mod tests;

‎src/tools/rust-analyzer/crates/hir-ty/src/layout/adt.rs

Lines changed: 1 addition & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -16,16 +16,12 @@ use triomphe::Arc;
1616
use crate::{
1717
db::HirDatabase,
1818
lang_items::is_unsafe_cell,
19-
layout::{field_ty, Layout, LayoutError, RustcEnumVariantIdx},
19+
layout::{field_ty, Layout, LayoutError},
2020
Substitution, TraitEnvironment,
2121
};
2222

2323
use super::LayoutCx;
2424

25-
pub(crate) fn struct_variant_idx() -> RustcEnumVariantIdx {
26-
RustcEnumVariantIdx(0)
27-
}
28-
2925
pub fn layout_of_adt_query(
3026
db: &dyn HirDatabase,
3127
def: AdtId,

‎src/tools/rust-analyzer/crates/hir-ty/src/lib.rs

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -12,9 +12,6 @@ extern crate ra_ap_rustc_index as rustc_index;
1212
#[cfg(feature = "in-rust-tree")]
1313
extern crate rustc_abi;
1414

15-
#[cfg(feature = "in-rust-tree")]
16-
extern crate rustc_hashes;
17-
1815
#[cfg(not(feature = "in-rust-tree"))]
1916
extern crate ra_ap_rustc_abi as rustc_abi;
2017

‎tests/ui/async-await/in-trait/indirect-recursion-issue-112047.stderr

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,9 @@ error[E0733]: recursion in an async fn requires boxing
33
|
44
LL | async fn second(self) {
55
| ^^^^^^^^^^^^^^^^^^^^^
6+
LL |
7+
LL | self.first().await.second().await;
8+
| --------------------------------- recursive call here
69
|
710
= note: a recursive `async fn` call must introduce indirection such as `Box::pin` to avoid an infinitely sized future
811

‎tests/ui/layout/post-mono-layout-cycle-2.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
//@ build-fail
1+
//@ check-fail
22
//@ edition: 2021
33

44
use std::future::Future;

‎tests/ui/layout/post-mono-layout-cycle-2.stderr

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -12,12 +12,6 @@ LL | Blah::iter(self, iterator).await
1212
|
1313
= note: a recursive `async fn` call must introduce indirection such as `Box::pin` to avoid an infinitely sized future
1414

15-
note: the above error was encountered while instantiating `fn Wrap::<()>::ice`
16-
--> $DIR/post-mono-layout-cycle-2.rs:54:9
17-
|
18-
LL | t.ice();
19-
| ^^^^^^^
20-
2115
error: aborting due to 1 previous error
2216

2317
For more information about this error, try `rustc --explain E0733`.

0 commit comments

Comments
 (0)
Please sign in to comment.