Skip to content
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.

Commit 875f416

Browse files
committedMar 20, 2025
Auto merge of #138759 - scottmcm:operand-builder, r=<try>
Allow `enum` and `union` literals to also create SSA values Today, `Some(x)` always goes through an `alloca`, even in trivial cases where the niching means the constructor doesn't even change the value. For example, <https://rust.godbolt.org/z/6KG6PqoYz> ```rust pub fn demo(r: &i32) -> Option<&i32> { Some(r) } ``` currently emits the IR ```llvm define align 4 ptr `@demo(ptr` align 4 %r) unnamed_addr { start: %_0 = alloca [8 x i8], align 8 store ptr %r, ptr %_0, align 8 %0 = load ptr, ptr %_0, align 8 ret ptr %0 } ``` but with this PR it becomes just ```llvm define align 4 ptr `@demo(ptr` align 4 %r) unnamed_addr { start: ret ptr %r } ``` (Of course the optimizer can clean that up, but it'd be nice if it didn't have to -- especially in debug where it doesn't run. This is like #123886, but that only handled non-simd `struct`s -- this PR generalizes it to all non-simd ADTs.) There's two commits you can review independently: 1. The first is simplifying how the aggregate handling works. Past-me wrote something overly complicated, needing arrayvecs and zipping, depending on a careful iteration order of the fields, and fragile enough that even for just structs it needed extra double-checks to make sure it even made the right variant. It's replaced with something far more direct that works just like `extract_field`: use the offset to put it in exactly the correct immediate in the `OperandValue`. This doesn't support anything new, just refactors -- including moving some things off `FunctionCx` that had no reason to be there. (I have no idea why my past self put them there.) 2. The second extends that work to support more ADTs. That means handing variants other than `FIRST_VARIANT`, handling the active field for unions, refactoring the discriminant code so the Place and Operand parts can share the calculation, etc.
2 parents d8e44b7 + aa8ceb8 commit 875f416

File tree

11 files changed

+541
-239
lines changed

11 files changed

+541
-239
lines changed
 

‎Cargo.lock

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3404,11 +3404,9 @@ name = "rustc_codegen_ssa"
34043404
version = "0.0.0"
34053405
dependencies = [
34063406
"ar_archive_writer",
3407-
"arrayvec",
34083407
"bitflags",
34093408
"bstr",
34103409
"cc",
3411-
"either",
34123410
"itertools",
34133411
"libc",
34143412
"object 0.36.7",

‎compiler/rustc_codegen_ssa/Cargo.toml

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -6,13 +6,11 @@ edition = "2024"
66
[dependencies]
77
# tidy-alphabetical-start
88
ar_archive_writer = "0.4.2"
9-
arrayvec = { version = "0.7", default-features = false }
109
bitflags = "2.4.1"
1110
bstr = "1.11.3"
1211
# Pinned so `cargo update` bumps don't cause breakage. Please also update the
1312
# `cc` in `rustc_llvm` if you update the `cc` here.
1413
cc = "=1.2.16"
15-
either = "1.5.0"
1614
itertools = "0.12"
1715
pathdiff = "0.2.0"
1816
regex = "1.4"

‎compiler/rustc_codegen_ssa/src/mir/operand.rs

Lines changed: 211 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -1,14 +1,15 @@
11
use std::fmt;
22

3-
use arrayvec::ArrayVec;
4-
use either::Either;
53
use rustc_abi as abi;
6-
use rustc_abi::{Align, BackendRepr, FIRST_VARIANT, Primitive, Size, TagEncoding, Variants};
4+
use rustc_abi::{
5+
Align, BackendRepr, FIRST_VARIANT, FieldIdx, Primitive, Size, TagEncoding, VariantIdx, Variants,
6+
};
77
use rustc_middle::mir::interpret::{Pointer, Scalar, alloc_range};
88
use rustc_middle::mir::{self, ConstValue};
99
use rustc_middle::ty::Ty;
1010
use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
1111
use rustc_middle::{bug, span_bug};
12+
use rustc_session::config::OptLevel;
1213
use tracing::{debug, instrument};
1314

1415
use super::place::{PlaceRef, PlaceValue};
@@ -62,31 +63,6 @@ pub enum OperandValue<V> {
6263
}
6364

6465
impl<V: CodegenObject> OperandValue<V> {
65-
/// If this is ZeroSized/Immediate/Pair, return an array of the 0/1/2 values.
66-
/// If this is Ref, return the place.
67-
#[inline]
68-
pub(crate) fn immediates_or_place(self) -> Either<ArrayVec<V, 2>, PlaceValue<V>> {
69-
match self {
70-
OperandValue::ZeroSized => Either::Left(ArrayVec::new()),
71-
OperandValue::Immediate(a) => Either::Left(ArrayVec::from_iter([a])),
72-
OperandValue::Pair(a, b) => Either::Left([a, b].into()),
73-
OperandValue::Ref(p) => Either::Right(p),
74-
}
75-
}
76-
77-
/// Given an array of 0/1/2 immediate values, return ZeroSized/Immediate/Pair.
78-
#[inline]
79-
pub(crate) fn from_immediates(immediates: ArrayVec<V, 2>) -> Self {
80-
let mut it = immediates.into_iter();
81-
let Some(a) = it.next() else {
82-
return OperandValue::ZeroSized;
83-
};
84-
let Some(b) = it.next() else {
85-
return OperandValue::Immediate(a);
86-
};
87-
OperandValue::Pair(a, b)
88-
}
89-
9066
/// Treat this value as a pointer and return the data pointer and
9167
/// optional metadata as backend values.
9268
///
@@ -559,6 +535,123 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {
559535
}
560536
}
561537
}
538+
539+
pub(crate) fn builder(layout: TyAndLayout<'tcx>) -> OperandRef<'tcx, Result<V, abi::Scalar>> {
540+
let val = match layout.backend_repr {
541+
BackendRepr::Memory { .. } if layout.is_zst() => OperandValue::ZeroSized,
542+
BackendRepr::Scalar(s) => OperandValue::Immediate(Err(s)),
543+
BackendRepr::ScalarPair(a, b) => OperandValue::Pair(Err(a), Err(b)),
544+
_ => bug!("Cannot use type in operand builder: {layout:?}"),
545+
};
546+
OperandRef { val, layout }
547+
}
548+
549+
pub(crate) fn supports_builder(layout: TyAndLayout<'tcx>) -> bool {
550+
match layout.backend_repr {
551+
BackendRepr::Memory { .. } if layout.is_zst() => true,
552+
BackendRepr::Scalar(_) | BackendRepr::ScalarPair(_, _) => true,
553+
_ => false,
554+
}
555+
}
556+
}
557+
558+
impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, Result<V, abi::Scalar>> {
559+
pub(crate) fn insert_field<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
560+
&mut self,
561+
bx: &mut Bx,
562+
v: VariantIdx,
563+
f: FieldIdx,
564+
operand: OperandRef<'tcx, V>,
565+
) {
566+
let (expect_zst, is_zero_offset) = if let abi::FieldsShape::Primitive = self.layout.fields {
567+
// Don't ask for field layout for primitives, because that will panic.
568+
if !self.layout.uninhabited {
569+
// Real primitives only have one variant, but weird types like
570+
// `Result<!, !>` turn out to also be "Primitive", and dead code
571+
// like `Err(never)` needs to not ICE.
572+
assert_eq!(v, FIRST_VARIANT);
573+
}
574+
let first_field = f == FieldIdx::ZERO;
575+
(self.layout.is_zst() || !first_field, first_field)
576+
} else {
577+
let variant_layout = self.layout.for_variant(bx.cx(), v);
578+
let field_layout = variant_layout.field(bx.cx(), f.as_usize());
579+
let field_offset = variant_layout.fields.offset(f.as_usize());
580+
(field_layout.is_zst(), field_offset == Size::ZERO)
581+
};
582+
583+
let mut update = |tgt: &mut Result<V, abi::Scalar>, src, from_scalar| {
584+
let from_bty = bx.cx().type_from_scalar(from_scalar);
585+
let to_scalar = tgt.unwrap_err();
586+
let to_bty = bx.cx().type_from_scalar(to_scalar);
587+
let imm = transmute_immediate(bx, src, from_scalar, from_bty, to_scalar, to_bty);
588+
*tgt = Ok(imm);
589+
};
590+
591+
match (operand.val, operand.layout.backend_repr) {
592+
(OperandValue::ZeroSized, _) if expect_zst => {}
593+
(OperandValue::Immediate(v), BackendRepr::Scalar(from_scalar)) => match &mut self.val {
594+
OperandValue::Immediate(val @ Err(_)) if is_zero_offset => {
595+
update(val, v, from_scalar);
596+
}
597+
OperandValue::Pair(fst @ Err(_), _) if is_zero_offset => {
598+
update(fst, v, from_scalar);
599+
}
600+
OperandValue::Pair(_, snd @ Err(_)) if !is_zero_offset => {
601+
update(snd, v, from_scalar);
602+
}
603+
_ => bug!("Tried to insert {operand:?} into {v:?}.{f:?} of {self:?}"),
604+
},
605+
(OperandValue::Pair(a, b), BackendRepr::ScalarPair(from_sa, from_sb)) => {
606+
match &mut self.val {
607+
OperandValue::Pair(fst @ Err(_), snd @ Err(_)) => {
608+
update(fst, a, from_sa);
609+
update(snd, b, from_sb);
610+
}
611+
_ => bug!("Tried to insert {operand:?} into {v:?}.{f:?} of {self:?}"),
612+
}
613+
}
614+
_ => bug!("Unsupported operand {operand:?} inserting into {v:?}.{f:?} of {self:?}"),
615+
}
616+
}
617+
618+
pub(super) fn insert_imm(&mut self, f: FieldIdx, imm: V) {
619+
let field_offset = self.layout.fields.offset(f.as_usize());
620+
let is_zero_offset = field_offset == Size::ZERO;
621+
match &mut self.val {
622+
OperandValue::Immediate(val @ Err(_)) if is_zero_offset => {
623+
*val = Ok(imm);
624+
}
625+
OperandValue::Pair(fst @ Err(_), _) if is_zero_offset => {
626+
*fst = Ok(imm);
627+
}
628+
OperandValue::Pair(_, snd @ Err(_)) if !is_zero_offset => {
629+
*snd = Ok(imm);
630+
}
631+
_ => bug!("Tried to insert {imm:?} into field {f:?} of {self:?}"),
632+
}
633+
}
634+
635+
pub fn finalize(&self, cx: &impl CodegenMethods<'tcx, Value = V>) -> OperandRef<'tcx, V> {
636+
let OperandRef { val, layout } = *self;
637+
638+
let unwrap = |r: Result<V, abi::Scalar>| match r {
639+
Ok(v) => v,
640+
Err(s) if s.is_uninit_valid() => {
641+
let bty = cx.type_from_scalar(s);
642+
cx.const_undef(bty)
643+
}
644+
Err(_) => bug!("OperandRef::finalize called while fields are missing {self:?}"),
645+
};
646+
647+
let val = match val {
648+
OperandValue::ZeroSized => OperandValue::ZeroSized,
649+
OperandValue::Immediate(v) => OperandValue::Immediate(unwrap(v)),
650+
OperandValue::Pair(a, b) => OperandValue::Pair(unwrap(a), unwrap(b)),
651+
OperandValue::Ref(_) => bug!(),
652+
};
653+
OperandRef { val, layout }
654+
}
562655
}
563656

564657
impl<'a, 'tcx, V: CodegenObject> OperandValue<V> {
@@ -808,3 +901,93 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
808901
}
809902
}
810903
}
904+
905+
/// Transmutes one of the immediates from an [`OperandValue::Immediate`]
906+
/// or an [`OperandValue::Pair`] to an immediate of the target type.
907+
///
908+
/// `to_backend_ty` must be the *non*-immediate backend type (so it will be
909+
/// `i8`, not `i1`, for `bool`-like types.)
910+
pub(super) fn transmute_immediate<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
911+
bx: &mut Bx,
912+
mut imm: Bx::Value,
913+
from_scalar: abi::Scalar,
914+
from_backend_ty: Bx::Type,
915+
to_scalar: abi::Scalar,
916+
to_backend_ty: Bx::Type,
917+
) -> Bx::Value {
918+
assert_eq!(from_scalar.size(bx.cx()), to_scalar.size(bx.cx()));
919+
920+
// While optimizations will remove no-op transmutes, they might still be
921+
// there in debug or things that aren't no-op in MIR because they change
922+
// the Rust type but not the underlying layout/niche.
923+
if from_scalar == to_scalar && from_backend_ty == to_backend_ty {
924+
return imm;
925+
}
926+
927+
use abi::Primitive::*;
928+
imm = bx.from_immediate(imm);
929+
930+
// If we have a scalar, we must already know its range. Either
931+
//
932+
// 1) It's a parameter with `range` parameter metadata,
933+
// 2) It's something we `load`ed with `!range` metadata, or
934+
// 3) After a transmute we `assume`d the range (see below).
935+
//
936+
// That said, last time we tried removing this, it didn't actually help
937+
// the rustc-perf results, so might as well keep doing it
938+
// <https://github.com/rust-lang/rust/pull/135610#issuecomment-2599275182>
939+
assume_scalar_range(bx, imm, from_scalar, from_backend_ty);
940+
941+
imm = match (from_scalar.primitive(), to_scalar.primitive()) {
942+
(Int(..) | Float(_), Int(..) | Float(_)) => bx.bitcast(imm, to_backend_ty),
943+
(Pointer(..), Pointer(..)) => bx.pointercast(imm, to_backend_ty),
944+
(Int(..), Pointer(..)) => bx.ptradd(bx.const_null(bx.type_ptr()), imm),
945+
(Pointer(..), Int(..)) => {
946+
// FIXME: this exposes the provenance, which shouldn't be necessary.
947+
bx.ptrtoint(imm, to_backend_ty)
948+
}
949+
(Float(_), Pointer(..)) => {
950+
let int_imm = bx.bitcast(imm, bx.cx().type_isize());
951+
bx.ptradd(bx.const_null(bx.type_ptr()), int_imm)
952+
}
953+
(Pointer(..), Float(_)) => {
954+
// FIXME: this exposes the provenance, which shouldn't be necessary.
955+
let int_imm = bx.ptrtoint(imm, bx.cx().type_isize());
956+
bx.bitcast(int_imm, to_backend_ty)
957+
}
958+
};
959+
960+
// This `assume` remains important for cases like (a conceptual)
961+
// transmute::<u32, NonZeroU32>(x) == 0
962+
// since it's never passed to something with parameter metadata (especially
963+
// after MIR inlining) so the only way to tell the backend about the
964+
// constraint that the `transmute` introduced is to `assume` it.
965+
assume_scalar_range(bx, imm, to_scalar, to_backend_ty);
966+
967+
imm = bx.to_immediate_scalar(imm, to_scalar);
968+
imm
969+
}
970+
971+
pub(super) fn assume_scalar_range<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
972+
bx: &mut Bx,
973+
imm: Bx::Value,
974+
scalar: abi::Scalar,
975+
backend_ty: Bx::Type,
976+
) {
977+
if matches!(bx.cx().sess().opts.optimize, OptLevel::No) || scalar.is_always_valid(bx.cx()) {
978+
return;
979+
}
980+
981+
match scalar.primitive() {
982+
abi::Primitive::Int(..) => {
983+
let range = scalar.valid_range(bx.cx());
984+
bx.assume_integer_range(imm, backend_ty, range);
985+
}
986+
abi::Primitive::Pointer(abi::AddressSpace::DATA)
987+
if !scalar.valid_range(bx.cx()).contains(0) =>
988+
{
989+
bx.assume_nonnull(imm);
990+
}
991+
abi::Primitive::Pointer(..) | abi::Primitive::Float(..) => {}
992+
}
993+
}

‎compiler/rustc_codegen_ssa/src/mir/place.rs

Lines changed: 82 additions & 46 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,6 @@
1-
use rustc_abi::{Align, BackendRepr, FieldsShape, Size, TagEncoding, VariantIdx, Variants};
1+
use rustc_abi::{
2+
Align, BackendRepr, FieldIdx, FieldsShape, Size, TagEncoding, VariantIdx, Variants,
3+
};
24
use rustc_middle::mir::PlaceTy;
35
use rustc_middle::mir::interpret::Scalar;
46
use rustc_middle::ty::layout::{HasTyCtxt, HasTypingEnv, LayoutOf, TyAndLayout};
@@ -239,53 +241,17 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
239241
bx: &mut Bx,
240242
variant_index: VariantIdx,
241243
) {
242-
if self.layout.for_variant(bx.cx(), variant_index).is_uninhabited() {
243-
// We play it safe by using a well-defined `abort`, but we could go for immediate UB
244-
// if that turns out to be helpful.
245-
bx.abort();
246-
return;
247-
}
248-
match self.layout.variants {
249-
Variants::Empty => unreachable!("we already handled uninhabited types"),
250-
Variants::Single { index } => assert_eq!(index, variant_index),
251-
252-
Variants::Multiple { tag_encoding: TagEncoding::Direct, tag_field, .. } => {
253-
let ptr = self.project_field(bx, tag_field);
254-
let to =
255-
self.layout.ty.discriminant_for_variant(bx.tcx(), variant_index).unwrap().val;
256-
bx.store_to_place(
257-
bx.cx().const_uint_big(bx.cx().backend_type(ptr.layout), to),
258-
ptr.val,
259-
);
244+
match codegen_tagged_field_value(bx.cx(), variant_index, self.layout) {
245+
Err(UninhabitedVariantError) => {
246+
// We play it safe by using a well-defined `abort`, but we could go for immediate UB
247+
// if that turns out to be helpful.
248+
bx.abort();
260249
}
261-
Variants::Multiple {
262-
tag_encoding:
263-
TagEncoding::Niche { untagged_variant, ref niche_variants, niche_start },
264-
tag_field,
265-
..
266-
} => {
267-
if variant_index != untagged_variant {
268-
let niche = self.project_field(bx, tag_field);
269-
let niche_llty = bx.cx().immediate_backend_type(niche.layout);
270-
let BackendRepr::Scalar(scalar) = niche.layout.backend_repr else {
271-
bug!("expected a scalar placeref for the niche");
272-
};
273-
// We are supposed to compute `niche_value.wrapping_add(niche_start)` wrapping
274-
// around the `niche`'s type.
275-
// The easiest way to do that is to do wrapping arithmetic on `u128` and then
276-
// masking off any extra bits that occur because we did the arithmetic with too many bits.
277-
let niche_value = variant_index.as_u32() - niche_variants.start().as_u32();
278-
let niche_value = (niche_value as u128).wrapping_add(niche_start);
279-
let niche_value = niche_value & niche.layout.size.unsigned_int_max();
280-
281-
let niche_llval = bx.cx().scalar_to_backend(
282-
Scalar::from_uint(niche_value, niche.layout.size),
283-
scalar,
284-
niche_llty,
285-
);
286-
OperandValue::Immediate(niche_llval).store(bx, niche);
287-
}
250+
Ok(Some((tag_field, imm))) => {
251+
let tag_place = self.project_field(bx, tag_field.as_usize());
252+
OperandValue::Immediate(imm).store(bx, tag_place);
288253
}
254+
Ok(None) => {}
289255
}
290256
}
291257

@@ -471,3 +437,73 @@ fn round_up_const_value_to_alignment<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
471437
let offset = bx.and(neg_value, align_minus_1);
472438
bx.add(value, offset)
473439
}
440+
441+
/// Calculates the value that needs to be stored to mark the discriminant.
442+
///
443+
/// This might be `None` for a `struct` or a niched variant (like `Some(&3)`).
444+
///
445+
/// If it's `Some`, it returns the value to store and the field in which to
446+
/// store it. Note that this value is *not* the same as the discriminant, in
447+
/// general, as it might be a niche value or have a different size.
448+
///
449+
/// It might also be an `Err` because the variant is uninhabited.
450+
pub(super) fn codegen_tagged_field_value<'tcx, V>(
451+
cx: &impl CodegenMethods<'tcx, Value = V>,
452+
variant_index: VariantIdx,
453+
layout: TyAndLayout<'tcx>,
454+
) -> Result<Option<(FieldIdx, V)>, UninhabitedVariantError> {
455+
// By checking uninhabited-ness first we don't need to worry about types
456+
// like `(u32, !)` which are single-variant but weird.
457+
if layout.for_variant(cx, variant_index).is_uninhabited() {
458+
return Err(UninhabitedVariantError);
459+
}
460+
461+
Ok(match layout.variants {
462+
Variants::Empty => unreachable!("we already handled uninhabited types"),
463+
Variants::Single { index } => {
464+
assert_eq!(index, variant_index);
465+
None
466+
}
467+
468+
Variants::Multiple { tag_encoding: TagEncoding::Direct, tag_field, .. } => {
469+
let discr = layout.ty.discriminant_for_variant(cx.tcx(), variant_index);
470+
let to = discr.unwrap().val;
471+
let tag_layout = layout.field(cx, tag_field);
472+
let tag_llty = cx.immediate_backend_type(tag_layout);
473+
let imm = cx.const_uint_big(tag_llty, to);
474+
Some((FieldIdx::from_usize(tag_field), imm))
475+
}
476+
Variants::Multiple {
477+
tag_encoding: TagEncoding::Niche { untagged_variant, ref niche_variants, niche_start },
478+
tag_field,
479+
..
480+
} => {
481+
if variant_index != untagged_variant {
482+
let niche_layout = layout.field(cx, tag_field);
483+
let niche_llty = cx.immediate_backend_type(niche_layout);
484+
let BackendRepr::Scalar(scalar) = niche_layout.backend_repr else {
485+
bug!("expected a scalar placeref for the niche");
486+
};
487+
// We are supposed to compute `niche_value.wrapping_add(niche_start)` wrapping
488+
// around the `niche`'s type.
489+
// The easiest way to do that is to do wrapping arithmetic on `u128` and then
490+
// masking off any extra bits that occur because we did the arithmetic with too many bits.
491+
let niche_value = variant_index.as_u32() - niche_variants.start().as_u32();
492+
let niche_value = (niche_value as u128).wrapping_add(niche_start);
493+
let niche_value = niche_value & niche_layout.size.unsigned_int_max();
494+
495+
let niche_llval = cx.scalar_to_backend(
496+
Scalar::from_uint(niche_value, niche_layout.size),
497+
scalar,
498+
niche_llty,
499+
);
500+
Some((FieldIdx::from_usize(tag_field), niche_llval))
501+
} else {
502+
None
503+
}
504+
}
505+
})
506+
}
507+
508+
#[derive(Debug)]
509+
pub(super) struct UninhabitedVariantError;

‎compiler/rustc_codegen_ssa/src/mir/rvalue.rs

Lines changed: 41 additions & 152 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,6 @@
11
use std::assert_matches::assert_matches;
22

3-
use arrayvec::ArrayVec;
4-
use rustc_abi::{self as abi, FIRST_VARIANT, FieldIdx};
3+
use rustc_abi::{self as abi, FIRST_VARIANT};
54
use rustc_middle::ty::adjustment::PointerCoercion;
65
use rustc_middle::ty::layout::{HasTyCtxt, HasTypingEnv, LayoutOf, TyAndLayout};
76
use rustc_middle::ty::{self, Instance, Ty, TyCtxt};
@@ -10,8 +9,8 @@ use rustc_session::config::OptLevel;
109
use rustc_span::{DUMMY_SP, Span};
1110
use tracing::{debug, instrument};
1211

13-
use super::operand::{OperandRef, OperandValue};
14-
use super::place::PlaceRef;
12+
use super::operand::{OperandRef, OperandValue, assume_scalar_range, transmute_immediate};
13+
use super::place::{PlaceRef, codegen_tagged_field_value};
1514
use super::{FunctionCx, LocalRef};
1615
use crate::common::IntPredicate;
1716
use crate::traits::*;
@@ -261,7 +260,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
261260
{
262261
let from_backend_ty = bx.backend_type(operand.layout);
263262
let to_backend_ty = bx.backend_type(cast);
264-
Some(OperandValue::Immediate(self.transmute_immediate(
263+
Some(OperandValue::Immediate(transmute_immediate(
265264
bx,
266265
imm,
267266
from_scalar,
@@ -286,8 +285,8 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
286285
let out_a_ibty = bx.scalar_pair_element_backend_type(cast, 0, false);
287286
let out_b_ibty = bx.scalar_pair_element_backend_type(cast, 1, false);
288287
Some(OperandValue::Pair(
289-
self.transmute_immediate(bx, imm_a, in_a, in_a_ibty, out_a, out_a_ibty),
290-
self.transmute_immediate(bx, imm_b, in_b, in_b_ibty, out_b, out_b_ibty),
288+
transmute_immediate(bx, imm_a, in_a, in_a_ibty, out_a, out_a_ibty),
289+
transmute_immediate(bx, imm_b, in_b, in_b_ibty, out_b, out_b_ibty),
291290
))
292291
} else {
293292
None
@@ -301,7 +300,6 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
301300
///
302301
/// Returns `None` if the cast is not possible.
303302
fn cast_immediate(
304-
&self,
305303
bx: &mut Bx,
306304
mut imm: Bx::Value,
307305
from_scalar: abi::Scalar,
@@ -315,7 +313,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
315313
// valid ranges. For example, `char`s are passed as just `i32`, with no
316314
// way for LLVM to know that they're 0x10FFFF at most. Thus we assume
317315
// the range of the input value too, not just the output range.
318-
self.assume_scalar_range(bx, imm, from_scalar, from_backend_ty);
316+
assume_scalar_range(bx, imm, from_scalar, from_backend_ty);
319317

320318
imm = match (from_scalar.primitive(), to_scalar.primitive()) {
321319
(Int(_, is_signed), Int(..)) => bx.intcast(imm, to_backend_ty, is_signed),
@@ -348,98 +346,6 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
348346
Some(imm)
349347
}
350348

351-
/// Transmutes one of the immediates from an [`OperandValue::Immediate`]
352-
/// or an [`OperandValue::Pair`] to an immediate of the target type.
353-
///
354-
/// `to_backend_ty` must be the *non*-immediate backend type (so it will be
355-
/// `i8`, not `i1`, for `bool`-like types.)
356-
fn transmute_immediate(
357-
&self,
358-
bx: &mut Bx,
359-
mut imm: Bx::Value,
360-
from_scalar: abi::Scalar,
361-
from_backend_ty: Bx::Type,
362-
to_scalar: abi::Scalar,
363-
to_backend_ty: Bx::Type,
364-
) -> Bx::Value {
365-
assert_eq!(from_scalar.size(self.cx), to_scalar.size(self.cx));
366-
367-
// While optimizations will remove no-op transmutes, they might still be
368-
// there in debug or things that aren't no-op in MIR because they change
369-
// the Rust type but not the underlying layout/niche.
370-
if from_scalar == to_scalar && from_backend_ty == to_backend_ty {
371-
return imm;
372-
}
373-
374-
use abi::Primitive::*;
375-
imm = bx.from_immediate(imm);
376-
377-
// If we have a scalar, we must already know its range. Either
378-
//
379-
// 1) It's a parameter with `range` parameter metadata,
380-
// 2) It's something we `load`ed with `!range` metadata, or
381-
// 3) After a transmute we `assume`d the range (see below).
382-
//
383-
// That said, last time we tried removing this, it didn't actually help
384-
// the rustc-perf results, so might as well keep doing it
385-
// <https://github.com/rust-lang/rust/pull/135610#issuecomment-2599275182>
386-
self.assume_scalar_range(bx, imm, from_scalar, from_backend_ty);
387-
388-
imm = match (from_scalar.primitive(), to_scalar.primitive()) {
389-
(Int(..) | Float(_), Int(..) | Float(_)) => bx.bitcast(imm, to_backend_ty),
390-
(Pointer(..), Pointer(..)) => bx.pointercast(imm, to_backend_ty),
391-
(Int(..), Pointer(..)) => bx.ptradd(bx.const_null(bx.type_ptr()), imm),
392-
(Pointer(..), Int(..)) => {
393-
// FIXME: this exposes the provenance, which shouldn't be necessary.
394-
bx.ptrtoint(imm, to_backend_ty)
395-
}
396-
(Float(_), Pointer(..)) => {
397-
let int_imm = bx.bitcast(imm, bx.cx().type_isize());
398-
bx.ptradd(bx.const_null(bx.type_ptr()), int_imm)
399-
}
400-
(Pointer(..), Float(_)) => {
401-
// FIXME: this exposes the provenance, which shouldn't be necessary.
402-
let int_imm = bx.ptrtoint(imm, bx.cx().type_isize());
403-
bx.bitcast(int_imm, to_backend_ty)
404-
}
405-
};
406-
407-
// This `assume` remains important for cases like (a conceptual)
408-
// transmute::<u32, NonZeroU32>(x) == 0
409-
// since it's never passed to something with parameter metadata (especially
410-
// after MIR inlining) so the only way to tell the backend about the
411-
// constraint that the `transmute` introduced is to `assume` it.
412-
self.assume_scalar_range(bx, imm, to_scalar, to_backend_ty);
413-
414-
imm = bx.to_immediate_scalar(imm, to_scalar);
415-
imm
416-
}
417-
418-
fn assume_scalar_range(
419-
&self,
420-
bx: &mut Bx,
421-
imm: Bx::Value,
422-
scalar: abi::Scalar,
423-
backend_ty: Bx::Type,
424-
) {
425-
if matches!(self.cx.sess().opts.optimize, OptLevel::No) || scalar.is_always_valid(self.cx) {
426-
return;
427-
}
428-
429-
match scalar.primitive() {
430-
abi::Primitive::Int(..) => {
431-
let range = scalar.valid_range(self.cx);
432-
bx.assume_integer_range(imm, backend_ty, range);
433-
}
434-
abi::Primitive::Pointer(abi::AddressSpace::DATA)
435-
if !scalar.valid_range(self.cx).contains(0) =>
436-
{
437-
bx.assume_nonnull(imm);
438-
}
439-
abi::Primitive::Pointer(..) | abi::Primitive::Float(..) => {}
440-
}
441-
}
442-
443349
pub(crate) fn codegen_rvalue_unsized(
444350
&mut self,
445351
bx: &mut Bx,
@@ -578,7 +484,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
578484
bug!("Found {cast_kind:?} for operand {cast:?}");
579485
};
580486

581-
self.cast_immediate(bx, imm, from_scalar, from_backend_ty, to_scalar, to_backend_ty)
487+
Self::cast_immediate(bx, imm, from_scalar, from_backend_ty, to_scalar, to_backend_ty)
582488
.map(OperandValue::Immediate)
583489
.unwrap_or_else(|| {
584490
bug!("Unsupported cast of {operand:?} to {cast:?}");
@@ -776,45 +682,42 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
776682
}
777683
mir::Rvalue::Use(ref operand) => self.codegen_operand(bx, operand),
778684
mir::Rvalue::Repeat(..) => bug!("{rvalue:?} in codegen_rvalue_operand"),
779-
mir::Rvalue::Aggregate(_, ref fields) => {
685+
mir::Rvalue::Aggregate(ref kind, ref fields) => {
686+
let (variant_index, active_field_index) = match **kind {
687+
mir::AggregateKind::Adt(_, variant_index, _, _, active_field_index) => {
688+
(variant_index, active_field_index)
689+
}
690+
_ => (FIRST_VARIANT, None),
691+
};
692+
780693
let ty = rvalue.ty(self.mir, self.cx.tcx());
781694
let ty = self.monomorphize(ty);
782695
let layout = self.cx.layout_of(ty);
783696

784-
// `rvalue_creates_operand` has arranged that we only get here if
785-
// we can build the aggregate immediate from the field immediates.
786-
let mut inputs = ArrayVec::<Bx::Value, 2>::new();
787-
let mut input_scalars = ArrayVec::<abi::Scalar, 2>::new();
788-
for field_idx in layout.fields.index_by_increasing_offset() {
789-
let field_idx = FieldIdx::from_usize(field_idx);
790-
let op = self.codegen_operand(bx, &fields[field_idx]);
791-
let values = op.val.immediates_or_place().left_or_else(|p| {
792-
bug!("Field {field_idx:?} is {p:?} making {layout:?}");
793-
});
794-
let scalars = self.value_kind(op.layout).scalars().unwrap();
795-
assert_eq!(values.len(), scalars.len());
796-
inputs.extend(values);
797-
input_scalars.extend(scalars);
697+
let mut builder = OperandRef::builder(layout);
698+
for (field_idx, field) in fields.iter_enumerated() {
699+
let op = self.codegen_operand(bx, field);
700+
let fi = active_field_index.unwrap_or(field_idx);
701+
builder.insert_field(bx, variant_index, fi, op);
798702
}
799703

800-
let output_scalars = self.value_kind(layout).scalars().unwrap();
801-
itertools::izip!(&mut inputs, input_scalars, output_scalars).for_each(
802-
|(v, in_s, out_s)| {
803-
if in_s != out_s {
804-
// We have to be really careful about bool here, because
805-
// `(bool,)` stays i1 but `Cell<bool>` becomes i8.
806-
*v = bx.from_immediate(*v);
807-
*v = bx.to_immediate_scalar(*v, out_s);
704+
let tag_result = codegen_tagged_field_value(self.cx, variant_index, layout);
705+
match tag_result {
706+
Err(super::place::UninhabitedVariantError) => {
707+
// Like codegen_set_discr we use a sound abort, but could
708+
// potentially `unreachable` or just return the poison for
709+
// more optimizability, if that turns out to be helpful.
710+
bx.abort();
711+
let val = OperandValue::poison(bx, layout);
712+
OperandRef { val, layout }
713+
}
714+
Ok(maybe_tag_value) => {
715+
if let Some((tag_field, tag_imm)) = maybe_tag_value {
716+
builder.insert_imm(tag_field, tag_imm);
808717
}
809-
},
810-
);
811-
812-
let val = OperandValue::from_immediates(inputs);
813-
assert!(
814-
val.is_expected_variant_for_type(self.cx, layout),
815-
"Made wrong variant {val:?} for type {layout:?}",
816-
);
817-
OperandRef { val, layout }
718+
builder.finalize(bx.cx())
719+
}
720+
}
818721
}
819722
mir::Rvalue::ShallowInitBox(ref operand, content_ty) => {
820723
let operand = self.codegen_operand(bx, operand);
@@ -1145,19 +1048,16 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
11451048
mir::AggregateKind::RawPtr(..) => true,
11461049
mir::AggregateKind::Array(..) => false,
11471050
mir::AggregateKind::Tuple => true,
1148-
mir::AggregateKind::Adt(def_id, ..) => {
1149-
let adt_def = self.cx.tcx().adt_def(def_id);
1150-
adt_def.is_struct() && !adt_def.repr().simd()
1151-
}
1051+
mir::AggregateKind::Adt(..) => true,
11521052
mir::AggregateKind::Closure(..) => true,
11531053
// FIXME: Can we do this for simple coroutines too?
11541054
mir::AggregateKind::Coroutine(..) | mir::AggregateKind::CoroutineClosure(..) => false,
11551055
};
11561056
allowed_kind && {
1157-
let ty = rvalue.ty(self.mir, self.cx.tcx());
1158-
let ty = self.monomorphize(ty);
1057+
let ty = rvalue.ty(self.mir, self.cx.tcx());
1058+
let ty = self.monomorphize(ty);
11591059
let layout = self.cx.spanned_layout_of(ty, span);
1160-
!self.cx.is_backend_ref(layout)
1060+
OperandRef::<Bx::Value>::supports_builder(layout)
11611061
}
11621062
}
11631063
}
@@ -1200,14 +1100,3 @@ enum OperandValueKind {
12001100
Pair(abi::Scalar, abi::Scalar),
12011101
ZeroSized,
12021102
}
1203-
1204-
impl OperandValueKind {
1205-
fn scalars(self) -> Option<ArrayVec<abi::Scalar, 2>> {
1206-
Some(match self {
1207-
OperandValueKind::ZeroSized => ArrayVec::new(),
1208-
OperandValueKind::Immediate(a) => ArrayVec::from_iter([a]),
1209-
OperandValueKind::Pair(a, b) => [a, b].into(),
1210-
OperandValueKind::Ref => return None,
1211-
})
1212-
}
1213-
}

‎compiler/rustc_codegen_ssa/src/traits/type_.rs

Lines changed: 19 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
use rustc_abi::{AddressSpace, Float, Integer, Reg};
1+
use rustc_abi::{AddressSpace, Float, Integer, Primitive, Reg, Scalar};
22
use rustc_middle::bug;
33
use rustc_middle::ty::Ty;
44
use rustc_middle::ty::layout::{HasTyCtxt, HasTypingEnv, TyAndLayout};
@@ -73,6 +73,24 @@ pub trait DerivedTypeCodegenMethods<'tcx>:
7373
}
7474
}
7575

76+
fn type_from_primitive(&self, p: Primitive) -> Self::Type {
77+
use Primitive::*;
78+
match p {
79+
Int(i, _) => self.type_from_integer(i),
80+
Float(f) => self.type_from_float(f),
81+
Pointer(address_space) => self.type_ptr_ext(address_space),
82+
}
83+
}
84+
85+
fn type_from_scalar(&self, s: Scalar) -> Self::Type {
86+
// `MaybeUninit` being `repr(transparent)` somewhat implies that the type
87+
// of a scalar has to be the type of its primitive (which is true in LLVM,
88+
// where noundef is a parameter attribute or metadata) but if we ever get
89+
// a backend where that's no longer true, every use of this will need to
90+
// to carefully scrutinized and re-evaluated.
91+
self.type_from_primitive(s.primitive())
92+
}
93+
7694
fn type_needs_drop(&self, ty: Ty<'tcx>) -> bool {
7795
ty.needs_drop(self.tcx(), self.typing_env())
7896
}

‎tests/codegen/align-struct.rs

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -13,9 +13,11 @@ pub struct Nested64 {
1313
d: i8,
1414
}
1515

16+
// This has the extra field in B to ensure it's not ScalarPair,
17+
// and thus that the test actually emits it via memory, not `insertvalue`.
1618
pub enum Enum4 {
1719
A(i32),
18-
B(i32),
20+
B(i32, i32),
1921
}
2022

2123
pub enum Enum64 {
@@ -52,7 +54,7 @@ pub fn nested64(a: Align64, b: i32, c: i32, d: i8) -> Nested64 {
5254
// CHECK-LABEL: @enum4
5355
#[no_mangle]
5456
pub fn enum4(a: i32) -> Enum4 {
55-
// CHECK: %e4 = alloca [8 x i8], align 4
57+
// CHECK: %e4 = alloca [12 x i8], align 4
5658
let e4 = Enum4::A(a);
5759
e4
5860
}

‎tests/codegen/common_prim_int_ptr.rs

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -11,9 +11,9 @@
1111
#[no_mangle]
1212
pub fn insert_int(x: usize) -> Result<usize, Box<()>> {
1313
// CHECK: start:
14-
// CHECK-NEXT: inttoptr i{{[0-9]+}} %x to ptr
15-
// CHECK-NEXT: insertvalue
16-
// CHECK-NEXT: ret { i{{[0-9]+}}, ptr }
14+
// CHECK-NEXT: %[[WO_PROV:.+]] = getelementptr i8, ptr null, [[USIZE:i[0-9]+]] %x
15+
// CHECK-NEXT: %[[R:.+]] = insertvalue { [[USIZE]], ptr } { [[USIZE]] 0, ptr poison }, ptr %[[WO_PROV]], 1
16+
// CHECK-NEXT: ret { [[USIZE]], ptr } %[[R]]
1717
Ok(x)
1818
}
1919

‎tests/codegen/enum/enum-aggregate.rs

Lines changed: 94 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,94 @@
1+
//@ compile-flags: -Copt-level=0 -Cno-prepopulate-passes
2+
//@ min-llvm-version: 19
3+
//@ only-64bit
4+
5+
#![crate_type = "lib"]
6+
7+
use std::cmp::Ordering;
8+
use std::num::NonZero;
9+
use std::ptr::NonNull;
10+
11+
#[no_mangle]
12+
fn make_some_bool(x: bool) -> Option<bool> {
13+
// CHECK-LABEL: i8 @make_some_bool(i1 zeroext %x)
14+
// CHECK-NEXT: start:
15+
// CHECK-NEXT: %[[WIDER:.+]] = zext i1 %x to i8
16+
// CHECK-NEXT: ret i8 %[[WIDER]]
17+
Some(x)
18+
}
19+
20+
#[no_mangle]
21+
fn make_none_bool() -> Option<bool> {
22+
// CHECK-LABEL: i8 @make_none_bool()
23+
// CHECK-NEXT: start:
24+
// CHECK-NEXT: ret i8 2
25+
None
26+
}
27+
28+
#[no_mangle]
29+
fn make_some_ordering(x: Ordering) -> Option<Ordering> {
30+
// CHECK-LABEL: i8 @make_some_ordering(i8 %x)
31+
// CHECK-NEXT: start:
32+
// CHECK-NEXT: ret i8 %x
33+
Some(x)
34+
}
35+
36+
#[no_mangle]
37+
fn make_some_u16(x: u16) -> Option<u16> {
38+
// CHECK-LABEL: { i16, i16 } @make_some_u16(i16 %x)
39+
// CHECK-NEXT: start:
40+
// CHECK-NEXT: %0 = insertvalue { i16, i16 } { i16 1, i16 poison }, i16 %x, 1
41+
// CHECK-NEXT: ret { i16, i16 } %0
42+
Some(x)
43+
}
44+
45+
#[no_mangle]
46+
fn make_none_u16() -> Option<u16> {
47+
// CHECK-LABEL: { i16, i16 } @make_none_u16()
48+
// CHECK-NEXT: start:
49+
// CHECK-NEXT: ret { i16, i16 } { i16 0, i16 undef }
50+
None
51+
}
52+
53+
#[no_mangle]
54+
fn make_some_nzu32(x: NonZero<u32>) -> Option<NonZero<u32>> {
55+
// CHECK-LABEL: i32 @make_some_nzu32(i32 %x)
56+
// CHECK-NEXT: start:
57+
// CHECK-NEXT: ret i32 %x
58+
Some(x)
59+
}
60+
61+
#[no_mangle]
62+
fn make_ok_ptr(x: NonNull<u16>) -> Result<NonNull<u16>, usize> {
63+
// CHECK-LABEL: { i64, ptr } @make_ok_ptr(ptr %x)
64+
// CHECK-NEXT: start:
65+
// CHECK-NEXT: %0 = insertvalue { i64, ptr } { i64 0, ptr poison }, ptr %x, 1
66+
// CHECK-NEXT: ret { i64, ptr } %0
67+
Ok(x)
68+
}
69+
70+
#[no_mangle]
71+
fn make_ok_int(x: usize) -> Result<usize, NonNull<u16>> {
72+
// CHECK-LABEL: { i64, ptr } @make_ok_int(i64 %x)
73+
// CHECK-NEXT: start:
74+
// CHECK-NEXT: %[[NOPROV:.+]] = getelementptr i8, ptr null, i64 %x
75+
// CHECK-NEXT: %[[R:.+]] = insertvalue { i64, ptr } { i64 0, ptr poison }, ptr %[[NOPROV]], 1
76+
// CHECK-NEXT: ret { i64, ptr } %[[R]]
77+
Ok(x)
78+
}
79+
80+
#[no_mangle]
81+
fn make_some_ref(x: &u16) -> Option<&u16> {
82+
// CHECK-LABEL: ptr @make_some_ref(ptr align 2 %x)
83+
// CHECK-NEXT: start:
84+
// CHECK-NEXT: ret ptr %x
85+
Some(x)
86+
}
87+
88+
#[no_mangle]
89+
fn make_none_ref<'a>() -> Option<&'a u16> {
90+
// CHECK-LABEL: ptr @make_none_ref()
91+
// CHECK-NEXT: start:
92+
// CHECK-NEXT: ret ptr null
93+
None
94+
}

‎tests/codegen/set-discriminant-invalid.rs

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -16,10 +16,9 @@ impl IntoError<Error> for Api {
1616
type Source = ApiError;
1717
// CHECK-LABEL: @into_error
1818
// CHECK: llvm.trap()
19-
// Also check the next two instructions to make sure we do not match against `trap`
19+
// Also check the next instruction to make sure we do not match against `trap`
2020
// elsewhere in the code.
21-
// CHECK-NEXT: load
22-
// CHECK-NEXT: ret
21+
// CHECK-NEXT: ret i8 poison
2322
#[no_mangle]
2423
fn into_error(self, error: Self::Source) -> Error {
2524
Error::Api { source: error }

‎tests/codegen/union-aggregate.rs

Lines changed: 85 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,85 @@
1+
//@ compile-flags: -Copt-level=0 -Cno-prepopulate-passes
2+
//@ min-llvm-version: 19
3+
//@ only-64bit
4+
5+
#![crate_type = "lib"]
6+
#![feature(transparent_unions)]
7+
8+
#[repr(transparent)]
9+
union MU<T: Copy> {
10+
uninit: (),
11+
value: T,
12+
}
13+
14+
use std::cmp::Ordering;
15+
use std::num::NonZero;
16+
use std::ptr::NonNull;
17+
18+
#[no_mangle]
19+
fn make_mu_bool(x: bool) -> MU<bool> {
20+
// CHECK-LABEL: i8 @make_mu_bool(i1 zeroext %x)
21+
// CHECK-NEXT: start:
22+
// CHECK-NEXT: %[[WIDER:.+]] = zext i1 %x to i8
23+
// CHECK-NEXT: ret i8 %[[WIDER]]
24+
MU { value: x }
25+
}
26+
27+
#[no_mangle]
28+
fn make_mu_bool_uninit() -> MU<bool> {
29+
// CHECK-LABEL: i8 @make_mu_bool_uninit()
30+
// CHECK-NEXT: start:
31+
// CHECK-NEXT: ret i8 undef
32+
MU { uninit: () }
33+
}
34+
35+
#[no_mangle]
36+
fn make_mu_ref(x: &u16) -> MU<&u16> {
37+
// CHECK-LABEL: ptr @make_mu_ref(ptr align 2 %x)
38+
// CHECK-NEXT: start:
39+
// CHECK-NEXT: ret ptr %x
40+
MU { value: x }
41+
}
42+
43+
#[no_mangle]
44+
fn make_mu_ref_uninit<'a>() -> MU<&'a u16> {
45+
// CHECK-LABEL: ptr @make_mu_ref_uninit()
46+
// CHECK-NEXT: start:
47+
// CHECK-NEXT: ret ptr undef
48+
MU { uninit: () }
49+
}
50+
51+
#[no_mangle]
52+
fn make_mu_str(x: &str) -> MU<&str> {
53+
// CHECK-LABEL: { ptr, i64 } @make_mu_str(ptr align 1 %x.0, i64 %x.1)
54+
// CHECK-NEXT: start:
55+
// CHECK-NEXT: %0 = insertvalue { ptr, i64 } poison, ptr %x.0, 0
56+
// CHECK-NEXT: %1 = insertvalue { ptr, i64 } %0, i64 %x.1, 1
57+
// CHECK-NEXT: ret { ptr, i64 } %1
58+
MU { value: x }
59+
}
60+
61+
#[no_mangle]
62+
fn make_mu_str_uninit<'a>() -> MU<&'a str> {
63+
// CHECK-LABEL: { ptr, i64 } @make_mu_str_uninit()
64+
// CHECK-NEXT: start:
65+
// CHECK-NEXT: ret { ptr, i64 } undef
66+
MU { uninit: () }
67+
}
68+
69+
#[no_mangle]
70+
fn make_mu_pair(x: (u8, u32)) -> MU<(u8, u32)> {
71+
// CHECK-LABEL: { i8, i32 } @make_mu_pair(i8 %x.0, i32 %x.1)
72+
// CHECK-NEXT: start:
73+
// CHECK-NEXT: %0 = insertvalue { i8, i32 } poison, i8 %x.0, 0
74+
// CHECK-NEXT: %1 = insertvalue { i8, i32 } %0, i32 %x.1, 1
75+
// CHECK-NEXT: ret { i8, i32 } %1
76+
MU { value: x }
77+
}
78+
79+
#[no_mangle]
80+
fn make_mu_pair_uninit() -> MU<(u8, u32)> {
81+
// CHECK-LABEL: { i8, i32 } @make_mu_pair_uninit()
82+
// CHECK-NEXT: start:
83+
// CHECK-NEXT: ret { i8, i32 } undef
84+
MU { uninit: () }
85+
}

0 commit comments

Comments
 (0)
Please sign in to comment.