Let rvalue_creates_operand return true for *all* Rvalue::Aggregates
Inspired by <https://github.com/rust-lang/rust/pull/138759#discussion_r2156375342> where I noticed that we were nearly at this point, plus the comments I was writing in 143410 that reminded me a type-dependent `true` is fine. This PR splits the `OperandRef::builder` logic out to a separate type, with the updates needed to handle SIMD as well. In doing so, that makes the existing `Aggregate` path in `codegen_rvalue_operand` capable of handing SIMD values just fine. As a result, we no longer need to do layout calculations for aggregate result types when running the analysis to determine which things can be SSA in codegen.
This commit is contained in:
@@ -171,8 +171,7 @@ impl<'a, 'b, 'tcx, Bx: BuilderMethods<'b, 'tcx>> Visitor<'tcx> for LocalAnalyzer
|
|||||||
if let Some(local) = place.as_local() {
|
if let Some(local) = place.as_local() {
|
||||||
self.define(local, DefLocation::Assignment(location));
|
self.define(local, DefLocation::Assignment(location));
|
||||||
if self.locals[local] != LocalKind::Memory {
|
if self.locals[local] != LocalKind::Memory {
|
||||||
let decl_span = self.fx.mir.local_decls[local].source_info.span;
|
if !self.fx.rvalue_creates_operand(rvalue) {
|
||||||
if !self.fx.rvalue_creates_operand(rvalue, decl_span) {
|
|
||||||
self.locals[local] = LocalKind::Memory;
|
self.locals[local] = LocalKind::Memory;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -565,118 +565,167 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Creates an incomplete operand containing the [`abi::Scalar`]s expected based
|
|
||||||
/// on the `layout` passed. This is for use with [`OperandRef::insert_field`]
|
|
||||||
/// later to set the necessary immediate(s), one-by-one converting all the `Right` to `Left`.
|
|
||||||
///
|
|
||||||
/// Returns `None` for `layout`s which cannot be built this way.
|
|
||||||
pub(crate) fn builder(
|
|
||||||
layout: TyAndLayout<'tcx>,
|
|
||||||
) -> Option<OperandRef<'tcx, Either<V, abi::Scalar>>> {
|
|
||||||
// Uninhabited types are weird, because for example `Result<!, !>`
|
|
||||||
// shows up as `FieldsShape::Primitive` and we need to be able to write
|
|
||||||
// a field into `(u32, !)`. We'll do that in an `alloca` instead.
|
|
||||||
if layout.uninhabited {
|
|
||||||
return None;
|
|
||||||
}
|
|
||||||
|
|
||||||
let val = match layout.backend_repr {
|
|
||||||
BackendRepr::Memory { .. } if layout.is_zst() => OperandValue::ZeroSized,
|
|
||||||
BackendRepr::Scalar(s) => OperandValue::Immediate(Either::Right(s)),
|
|
||||||
BackendRepr::ScalarPair(a, b) => OperandValue::Pair(Either::Right(a), Either::Right(b)),
|
|
||||||
BackendRepr::Memory { .. } | BackendRepr::SimdVector { .. } => return None,
|
|
||||||
};
|
|
||||||
Some(OperandRef { val, layout })
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, Either<V, abi::Scalar>> {
|
/// Each of these variants starts out as `Either::Right` when it's uninitialized,
|
||||||
pub(crate) fn insert_field<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
|
/// then setting the field changes that to `Either::Left` with the backend value.
|
||||||
|
#[derive(Debug, Copy, Clone)]
|
||||||
|
enum OperandValueBuilder<V> {
|
||||||
|
ZeroSized,
|
||||||
|
Immediate(Either<V, abi::Scalar>),
|
||||||
|
Pair(Either<V, abi::Scalar>, Either<V, abi::Scalar>),
|
||||||
|
/// `repr(simd)` types need special handling because they each have a non-empty
|
||||||
|
/// array field (which uses [`OperandValue::Ref`]) despite the SIMD type itself
|
||||||
|
/// using [`OperandValue::Immediate`] which for any other kind of type would
|
||||||
|
/// mean that its one non-ZST field would also be [`OperandValue::Immediate`].
|
||||||
|
Vector(Either<V, ()>),
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Allows building up an `OperandRef` by setting fields one at a time.
|
||||||
|
#[derive(Debug, Copy, Clone)]
|
||||||
|
pub(super) struct OperandRefBuilder<'tcx, V> {
|
||||||
|
val: OperandValueBuilder<V>,
|
||||||
|
layout: TyAndLayout<'tcx>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a, 'tcx, V: CodegenObject> OperandRefBuilder<'tcx, V> {
|
||||||
|
/// Creates an uninitialized builder for an instance of the `layout`.
|
||||||
|
///
|
||||||
|
/// ICEs for [`BackendRepr::Memory`] types (other than ZSTs), which should
|
||||||
|
/// be built up inside a [`PlaceRef`] instead as they need an allocated place
|
||||||
|
/// into which to write the values of the fields.
|
||||||
|
pub(super) fn new(layout: TyAndLayout<'tcx>) -> Self {
|
||||||
|
let val = match layout.backend_repr {
|
||||||
|
BackendRepr::Memory { .. } if layout.is_zst() => OperandValueBuilder::ZeroSized,
|
||||||
|
BackendRepr::Scalar(s) => OperandValueBuilder::Immediate(Either::Right(s)),
|
||||||
|
BackendRepr::ScalarPair(a, b) => {
|
||||||
|
OperandValueBuilder::Pair(Either::Right(a), Either::Right(b))
|
||||||
|
}
|
||||||
|
BackendRepr::SimdVector { .. } => OperandValueBuilder::Vector(Either::Right(())),
|
||||||
|
BackendRepr::Memory { .. } => {
|
||||||
|
bug!("Cannot use non-ZST Memory-ABI type in operand builder: {layout:?}");
|
||||||
|
}
|
||||||
|
};
|
||||||
|
OperandRefBuilder { val, layout }
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(super) fn insert_field<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
|
||||||
&mut self,
|
&mut self,
|
||||||
bx: &mut Bx,
|
bx: &mut Bx,
|
||||||
v: VariantIdx,
|
variant: VariantIdx,
|
||||||
f: FieldIdx,
|
field: FieldIdx,
|
||||||
operand: OperandRef<'tcx, V>,
|
field_operand: OperandRef<'tcx, V>,
|
||||||
) {
|
) {
|
||||||
let (expect_zst, is_zero_offset) = if let abi::FieldsShape::Primitive = self.layout.fields {
|
if let OperandValue::ZeroSized = field_operand.val {
|
||||||
|
// A ZST never adds any state, so just ignore it.
|
||||||
|
// This special-casing is worth it because of things like
|
||||||
|
// `Result<!, !>` where `Ok(never)` is legal to write,
|
||||||
|
// but the type shows as FieldShape::Primitive so we can't
|
||||||
|
// actually look at the layout for the field being set.
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
let is_zero_offset = if let abi::FieldsShape::Primitive = self.layout.fields {
|
||||||
// The other branch looking at field layouts ICEs for primitives,
|
// The other branch looking at field layouts ICEs for primitives,
|
||||||
// so we need to handle them separately.
|
// so we need to handle them separately.
|
||||||
// Multiple fields is possible for cases such as aggregating
|
// Because we handled ZSTs above (like the metadata in a thin pointer),
|
||||||
// a thin pointer, where the second field is the unit.
|
// the only possibility is that we're setting the one-and-only field.
|
||||||
assert!(!self.layout.is_zst());
|
assert!(!self.layout.is_zst());
|
||||||
assert_eq!(v, FIRST_VARIANT);
|
assert_eq!(variant, FIRST_VARIANT);
|
||||||
let first_field = f == FieldIdx::ZERO;
|
assert_eq!(field, FieldIdx::ZERO);
|
||||||
(!first_field, first_field)
|
true
|
||||||
} else {
|
} else {
|
||||||
let variant_layout = self.layout.for_variant(bx.cx(), v);
|
let variant_layout = self.layout.for_variant(bx.cx(), variant);
|
||||||
let field_layout = variant_layout.field(bx.cx(), f.as_usize());
|
let field_offset = variant_layout.fields.offset(field.as_usize());
|
||||||
let field_offset = variant_layout.fields.offset(f.as_usize());
|
field_offset == Size::ZERO
|
||||||
(field_layout.is_zst(), field_offset == Size::ZERO)
|
|
||||||
};
|
};
|
||||||
|
|
||||||
let mut update = |tgt: &mut Either<V, abi::Scalar>, src, from_scalar| {
|
let mut update = |tgt: &mut Either<V, abi::Scalar>, src, from_scalar| {
|
||||||
let to_scalar = tgt.unwrap_right();
|
let to_scalar = tgt.unwrap_right();
|
||||||
|
// We transmute here (rather than just `from_immediate`) because in
|
||||||
|
// `Result<usize, *const ()>` the field of the `Ok` is an integer,
|
||||||
|
// but the corresponding scalar in the enum is a pointer.
|
||||||
let imm = transmute_scalar(bx, src, from_scalar, to_scalar);
|
let imm = transmute_scalar(bx, src, from_scalar, to_scalar);
|
||||||
*tgt = Either::Left(imm);
|
*tgt = Either::Left(imm);
|
||||||
};
|
};
|
||||||
|
|
||||||
match (operand.val, operand.layout.backend_repr) {
|
match (field_operand.val, field_operand.layout.backend_repr) {
|
||||||
(OperandValue::ZeroSized, _) if expect_zst => {}
|
(OperandValue::ZeroSized, _) => unreachable!("Handled above"),
|
||||||
(OperandValue::Immediate(v), BackendRepr::Scalar(from_scalar)) => match &mut self.val {
|
(OperandValue::Immediate(v), BackendRepr::Scalar(from_scalar)) => match &mut self.val {
|
||||||
OperandValue::Immediate(val @ Either::Right(_)) if is_zero_offset => {
|
OperandValueBuilder::Immediate(val @ Either::Right(_)) if is_zero_offset => {
|
||||||
update(val, v, from_scalar);
|
update(val, v, from_scalar);
|
||||||
}
|
}
|
||||||
OperandValue::Pair(fst @ Either::Right(_), _) if is_zero_offset => {
|
OperandValueBuilder::Pair(fst @ Either::Right(_), _) if is_zero_offset => {
|
||||||
update(fst, v, from_scalar);
|
update(fst, v, from_scalar);
|
||||||
}
|
}
|
||||||
OperandValue::Pair(_, snd @ Either::Right(_)) if !is_zero_offset => {
|
OperandValueBuilder::Pair(_, snd @ Either::Right(_)) if !is_zero_offset => {
|
||||||
update(snd, v, from_scalar);
|
update(snd, v, from_scalar);
|
||||||
}
|
}
|
||||||
_ => bug!("Tried to insert {operand:?} into {v:?}.{f:?} of {self:?}"),
|
_ => {
|
||||||
|
bug!("Tried to insert {field_operand:?} into {variant:?}.{field:?} of {self:?}")
|
||||||
|
}
|
||||||
|
},
|
||||||
|
(OperandValue::Immediate(v), BackendRepr::SimdVector { .. }) => match &mut self.val {
|
||||||
|
OperandValueBuilder::Vector(val @ Either::Right(())) if is_zero_offset => {
|
||||||
|
*val = Either::Left(v);
|
||||||
|
}
|
||||||
|
_ => {
|
||||||
|
bug!("Tried to insert {field_operand:?} into {variant:?}.{field:?} of {self:?}")
|
||||||
|
}
|
||||||
},
|
},
|
||||||
(OperandValue::Pair(a, b), BackendRepr::ScalarPair(from_sa, from_sb)) => {
|
(OperandValue::Pair(a, b), BackendRepr::ScalarPair(from_sa, from_sb)) => {
|
||||||
match &mut self.val {
|
match &mut self.val {
|
||||||
OperandValue::Pair(fst @ Either::Right(_), snd @ Either::Right(_)) => {
|
OperandValueBuilder::Pair(fst @ Either::Right(_), snd @ Either::Right(_)) => {
|
||||||
update(fst, a, from_sa);
|
update(fst, a, from_sa);
|
||||||
update(snd, b, from_sb);
|
update(snd, b, from_sb);
|
||||||
}
|
}
|
||||||
_ => bug!("Tried to insert {operand:?} into {v:?}.{f:?} of {self:?}"),
|
_ => bug!(
|
||||||
|
"Tried to insert {field_operand:?} into {variant:?}.{field:?} of {self:?}"
|
||||||
|
),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
_ => bug!("Unsupported operand {operand:?} inserting into {v:?}.{f:?} of {self:?}"),
|
(OperandValue::Ref(place), BackendRepr::Memory { .. }) => match &mut self.val {
|
||||||
|
OperandValueBuilder::Vector(val @ Either::Right(())) => {
|
||||||
|
let ibty = bx.cx().immediate_backend_type(self.layout);
|
||||||
|
let simd = bx.load_from_place(ibty, place);
|
||||||
|
*val = Either::Left(simd);
|
||||||
|
}
|
||||||
|
_ => {
|
||||||
|
bug!("Tried to insert {field_operand:?} into {variant:?}.{field:?} of {self:?}")
|
||||||
|
}
|
||||||
|
},
|
||||||
|
_ => bug!("Operand cannot be used with `insert_field`: {field_operand:?}"),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Insert the immediate value `imm` for field `f` in the *type itself*,
|
/// Insert the immediate value `imm` for field `f` in the *type itself*,
|
||||||
/// rather than into one of the variants.
|
/// rather than into one of the variants.
|
||||||
///
|
///
|
||||||
/// Most things want [`OperandRef::insert_field`] instead, but this one is
|
/// Most things want [`Self::insert_field`] instead, but this one is
|
||||||
/// necessary for writing things like enum tags that aren't in any variant.
|
/// necessary for writing things like enum tags that aren't in any variant.
|
||||||
pub(super) fn insert_imm(&mut self, f: FieldIdx, imm: V) {
|
pub(super) fn insert_imm(&mut self, f: FieldIdx, imm: V) {
|
||||||
let field_offset = self.layout.fields.offset(f.as_usize());
|
let field_offset = self.layout.fields.offset(f.as_usize());
|
||||||
let is_zero_offset = field_offset == Size::ZERO;
|
let is_zero_offset = field_offset == Size::ZERO;
|
||||||
match &mut self.val {
|
match &mut self.val {
|
||||||
OperandValue::Immediate(val @ Either::Right(_)) if is_zero_offset => {
|
OperandValueBuilder::Immediate(val @ Either::Right(_)) if is_zero_offset => {
|
||||||
*val = Either::Left(imm);
|
*val = Either::Left(imm);
|
||||||
}
|
}
|
||||||
OperandValue::Pair(fst @ Either::Right(_), _) if is_zero_offset => {
|
OperandValueBuilder::Pair(fst @ Either::Right(_), _) if is_zero_offset => {
|
||||||
*fst = Either::Left(imm);
|
*fst = Either::Left(imm);
|
||||||
}
|
}
|
||||||
OperandValue::Pair(_, snd @ Either::Right(_)) if !is_zero_offset => {
|
OperandValueBuilder::Pair(_, snd @ Either::Right(_)) if !is_zero_offset => {
|
||||||
*snd = Either::Left(imm);
|
*snd = Either::Left(imm);
|
||||||
}
|
}
|
||||||
_ => bug!("Tried to insert {imm:?} into field {f:?} of {self:?}"),
|
_ => bug!("Tried to insert {imm:?} into field {f:?} of {self:?}"),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// After having set all necessary fields, this converts the
|
/// After having set all necessary fields, this converts the builder back
|
||||||
/// `OperandValue<Either<V, _>>` (as obtained from [`OperandRef::builder`])
|
/// to the normal `OperandRef`.
|
||||||
/// to the normal `OperandValue<V>`.
|
|
||||||
///
|
///
|
||||||
/// ICEs if any required fields were not set.
|
/// ICEs if any required fields were not set.
|
||||||
pub fn build(&self, cx: &impl CodegenMethods<'tcx, Value = V>) -> OperandRef<'tcx, V> {
|
pub(super) fn build(&self, cx: &impl CodegenMethods<'tcx, Value = V>) -> OperandRef<'tcx, V> {
|
||||||
let OperandRef { val, layout } = *self;
|
let OperandRefBuilder { val, layout } = *self;
|
||||||
|
|
||||||
// For something like `Option::<u32>::None`, it's expected that the
|
// For something like `Option::<u32>::None`, it's expected that the
|
||||||
// payload scalar will not actually have been set, so this converts
|
// payload scalar will not actually have been set, so this converts
|
||||||
@@ -692,10 +741,22 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, Either<V, abi::Scalar>> {
|
|||||||
};
|
};
|
||||||
|
|
||||||
let val = match val {
|
let val = match val {
|
||||||
OperandValue::ZeroSized => OperandValue::ZeroSized,
|
OperandValueBuilder::ZeroSized => OperandValue::ZeroSized,
|
||||||
OperandValue::Immediate(v) => OperandValue::Immediate(unwrap(v)),
|
OperandValueBuilder::Immediate(v) => OperandValue::Immediate(unwrap(v)),
|
||||||
OperandValue::Pair(a, b) => OperandValue::Pair(unwrap(a), unwrap(b)),
|
OperandValueBuilder::Pair(a, b) => OperandValue::Pair(unwrap(a), unwrap(b)),
|
||||||
OperandValue::Ref(_) => bug!(),
|
OperandValueBuilder::Vector(v) => match v {
|
||||||
|
Either::Left(v) => OperandValue::Immediate(v),
|
||||||
|
Either::Right(())
|
||||||
|
if let BackendRepr::SimdVector { element, .. } = layout.backend_repr
|
||||||
|
&& element.is_uninit_valid() =>
|
||||||
|
{
|
||||||
|
let bty = cx.immediate_backend_type(layout);
|
||||||
|
OperandValue::Immediate(cx.const_undef(bty))
|
||||||
|
}
|
||||||
|
Either::Right(()) => {
|
||||||
|
bug!("OperandRef::build called while fields are missing {self:?}")
|
||||||
|
}
|
||||||
|
},
|
||||||
};
|
};
|
||||||
OperandRef { val, layout }
|
OperandRef { val, layout }
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,10 +4,9 @@ use rustc_middle::ty::layout::{HasTyCtxt, HasTypingEnv, LayoutOf, TyAndLayout};
|
|||||||
use rustc_middle::ty::{self, Instance, Ty, TyCtxt};
|
use rustc_middle::ty::{self, Instance, Ty, TyCtxt};
|
||||||
use rustc_middle::{bug, mir};
|
use rustc_middle::{bug, mir};
|
||||||
use rustc_session::config::OptLevel;
|
use rustc_session::config::OptLevel;
|
||||||
use rustc_span::{DUMMY_SP, Span};
|
|
||||||
use tracing::{debug, instrument};
|
use tracing::{debug, instrument};
|
||||||
|
|
||||||
use super::operand::{OperandRef, OperandValue};
|
use super::operand::{OperandRef, OperandRefBuilder, OperandValue};
|
||||||
use super::place::{PlaceRef, codegen_tag_value};
|
use super::place::{PlaceRef, codegen_tag_value};
|
||||||
use super::{FunctionCx, LocalRef};
|
use super::{FunctionCx, LocalRef};
|
||||||
use crate::common::{IntPredicate, TypeKind};
|
use crate::common::{IntPredicate, TypeKind};
|
||||||
@@ -181,7 +180,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
_ => {
|
_ => {
|
||||||
assert!(self.rvalue_creates_operand(rvalue, DUMMY_SP));
|
assert!(self.rvalue_creates_operand(rvalue));
|
||||||
let temp = self.codegen_rvalue_operand(bx, rvalue);
|
let temp = self.codegen_rvalue_operand(bx, rvalue);
|
||||||
temp.val.store(bx, dest);
|
temp.val.store(bx, dest);
|
||||||
}
|
}
|
||||||
@@ -354,10 +353,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
|||||||
bx: &mut Bx,
|
bx: &mut Bx,
|
||||||
rvalue: &mir::Rvalue<'tcx>,
|
rvalue: &mir::Rvalue<'tcx>,
|
||||||
) -> OperandRef<'tcx, Bx::Value> {
|
) -> OperandRef<'tcx, Bx::Value> {
|
||||||
assert!(
|
assert!(self.rvalue_creates_operand(rvalue), "cannot codegen {rvalue:?} to operand",);
|
||||||
self.rvalue_creates_operand(rvalue, DUMMY_SP),
|
|
||||||
"cannot codegen {rvalue:?} to operand",
|
|
||||||
);
|
|
||||||
|
|
||||||
match *rvalue {
|
match *rvalue {
|
||||||
mir::Rvalue::Cast(ref kind, ref source, mir_cast_ty) => {
|
mir::Rvalue::Cast(ref kind, ref source, mir_cast_ty) => {
|
||||||
@@ -668,9 +664,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
|||||||
|
|
||||||
// `rvalue_creates_operand` has arranged that we only get here if
|
// `rvalue_creates_operand` has arranged that we only get here if
|
||||||
// we can build the aggregate immediate from the field immediates.
|
// we can build the aggregate immediate from the field immediates.
|
||||||
let Some(mut builder) = OperandRef::builder(layout) else {
|
let mut builder = OperandRefBuilder::new(layout);
|
||||||
bug!("Cannot use type in operand builder: {layout:?}")
|
|
||||||
};
|
|
||||||
for (field_idx, field) in fields.iter_enumerated() {
|
for (field_idx, field) in fields.iter_enumerated() {
|
||||||
let op = self.codegen_operand(bx, field);
|
let op = self.codegen_operand(bx, field);
|
||||||
let fi = active_field_index.unwrap_or(field_idx);
|
let fi = active_field_index.unwrap_or(field_idx);
|
||||||
@@ -980,7 +974,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
|||||||
/// will not actually take the operand path because the result type is such
|
/// will not actually take the operand path because the result type is such
|
||||||
/// that it always gets an `alloca`, but where it's not worth re-checking the
|
/// that it always gets an `alloca`, but where it's not worth re-checking the
|
||||||
/// layout in this code when the right thing will happen anyway.
|
/// layout in this code when the right thing will happen anyway.
|
||||||
pub(crate) fn rvalue_creates_operand(&self, rvalue: &mir::Rvalue<'tcx>, span: Span) -> bool {
|
pub(crate) fn rvalue_creates_operand(&self, rvalue: &mir::Rvalue<'tcx>) -> bool {
|
||||||
match *rvalue {
|
match *rvalue {
|
||||||
mir::Rvalue::Cast(mir::CastKind::Transmute, ref operand, cast_ty) => {
|
mir::Rvalue::Cast(mir::CastKind::Transmute, ref operand, cast_ty) => {
|
||||||
let operand_ty = operand.ty(self.mir, self.cx.tcx());
|
let operand_ty = operand.ty(self.mir, self.cx.tcx());
|
||||||
@@ -1025,17 +1019,12 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
|||||||
mir::Rvalue::NullaryOp(..) |
|
mir::Rvalue::NullaryOp(..) |
|
||||||
mir::Rvalue::ThreadLocalRef(_) |
|
mir::Rvalue::ThreadLocalRef(_) |
|
||||||
mir::Rvalue::Use(..) |
|
mir::Rvalue::Use(..) |
|
||||||
|
mir::Rvalue::Aggregate(..) | // (*)
|
||||||
mir::Rvalue::WrapUnsafeBinder(..) => // (*)
|
mir::Rvalue::WrapUnsafeBinder(..) => // (*)
|
||||||
true,
|
true,
|
||||||
// Arrays are always aggregates, so it's not worth checking anything here.
|
// Arrays are always aggregates, so it's not worth checking anything here.
|
||||||
// (If it's really `[(); N]` or `[T; 0]` and we use the place path, fine.)
|
// (If it's really `[(); N]` or `[T; 0]` and we use the place path, fine.)
|
||||||
mir::Rvalue::Repeat(..) => false,
|
mir::Rvalue::Repeat(..) => false,
|
||||||
mir::Rvalue::Aggregate(..) => {
|
|
||||||
let ty = rvalue.ty(self.mir, self.cx.tcx());
|
|
||||||
let ty = self.monomorphize(ty);
|
|
||||||
let layout = self.cx.spanned_layout_of(ty, span);
|
|
||||||
OperandRef::<Bx::Value>::builder(layout).is_some()
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// (*) this is only true if the type is suitable
|
// (*) this is only true if the type is suitable
|
||||||
|
|||||||
@@ -112,17 +112,14 @@ fn make_uninhabited_err_indirectly(n: Never) -> Result<u32, Never> {
|
|||||||
|
|
||||||
#[no_mangle]
|
#[no_mangle]
|
||||||
fn make_fully_uninhabited_result(v: u32, n: Never) -> Result<(u32, Never), (Never, u32)> {
|
fn make_fully_uninhabited_result(v: u32, n: Never) -> Result<(u32, Never), (Never, u32)> {
|
||||||
// We don't try to do this in SSA form since the whole type is uninhabited.
|
// Actually reaching this would be UB, so we don't actually build a result.
|
||||||
|
|
||||||
// CHECK-LABEL: { i32, i32 } @make_fully_uninhabited_result(i32 %v)
|
// CHECK-LABEL: { i32, i32 } @make_fully_uninhabited_result(i32 %v)
|
||||||
// CHECK: %[[ALLOC_V:.+]] = alloca [4 x i8]
|
// CHECK-NEXT: start:
|
||||||
// CHECK: %[[RET:.+]] = alloca [8 x i8]
|
// CHECK-NEXT: call void @llvm.trap()
|
||||||
// CHECK: store i32 %v, ptr %[[ALLOC_V]]
|
// CHECK-NEXT: call void @llvm.trap()
|
||||||
// CHECK: %[[TEMP_V:.+]] = load i32, ptr %[[ALLOC_V]]
|
// CHECK-NEXT: call void @llvm.trap()
|
||||||
// CHECK: %[[INNER:.+]] = getelementptr inbounds i8, ptr %[[RET]]
|
// CHECK-NEXT: unreachable
|
||||||
// CHECK: store i32 %[[TEMP_V]], ptr %[[INNER]]
|
|
||||||
// CHECK: call void @llvm.trap()
|
|
||||||
// CHECK: unreachable
|
|
||||||
Ok((v, n))
|
Ok((v, n))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
106
tests/codegen/simd/aggregate-simd.rs
Normal file
106
tests/codegen/simd/aggregate-simd.rs
Normal file
@@ -0,0 +1,106 @@
|
|||||||
|
//@ compile-flags: -C opt-level=3 -C no-prepopulate-passes
|
||||||
|
//@ only-64bit
|
||||||
|
|
||||||
|
#![feature(core_intrinsics, repr_simd)]
|
||||||
|
#![no_std]
|
||||||
|
#![crate_type = "lib"]
|
||||||
|
|
||||||
|
use core::intrinsics::simd::{simd_add, simd_extract};
|
||||||
|
|
||||||
|
#[repr(simd)]
|
||||||
|
#[derive(Clone, Copy)]
|
||||||
|
pub struct Simd<T, const N: usize>([T; N]);
|
||||||
|
|
||||||
|
#[repr(simd, packed)]
|
||||||
|
#[derive(Clone, Copy)]
|
||||||
|
pub struct PackedSimd<T, const N: usize>([T; N]);
|
||||||
|
|
||||||
|
#[repr(transparent)]
|
||||||
|
pub struct Transparent<T>(T);
|
||||||
|
|
||||||
|
// These tests don't actually care about the add/extract, but it ensures the
|
||||||
|
// aggregated temporaries are only used in potentially-SSA ways.
|
||||||
|
|
||||||
|
#[no_mangle]
|
||||||
|
pub fn simd_aggregate_pot(x: [u32; 4], y: [u32; 4]) -> u32 {
|
||||||
|
// CHECK-LABEL: simd_aggregate_pot
|
||||||
|
// CHECK: %a = load <4 x i32>, ptr %x, align 4
|
||||||
|
// CHECK: %b = load <4 x i32>, ptr %y, align 4
|
||||||
|
// CHECK: add <4 x i32> %a, %b
|
||||||
|
|
||||||
|
unsafe {
|
||||||
|
let a = Simd(x);
|
||||||
|
let b = Simd(y);
|
||||||
|
let c = simd_add(a, b);
|
||||||
|
simd_extract(c, 1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[no_mangle]
|
||||||
|
pub fn simd_aggregate_npot(x: [u32; 7], y: [u32; 7]) -> u32 {
|
||||||
|
// CHECK-LABEL: simd_aggregate_npot
|
||||||
|
// CHECK: %a = load <7 x i32>, ptr %x, align 4
|
||||||
|
// CHECK: %b = load <7 x i32>, ptr %y, align 4
|
||||||
|
// CHECK: add <7 x i32> %a, %b
|
||||||
|
|
||||||
|
unsafe {
|
||||||
|
let a = Simd(x);
|
||||||
|
let b = Simd(y);
|
||||||
|
let c = simd_add(a, b);
|
||||||
|
simd_extract(c, 1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[no_mangle]
|
||||||
|
pub fn packed_simd_aggregate_pot(x: [u32; 4], y: [u32; 4]) -> u32 {
|
||||||
|
// CHECK-LABEL: packed_simd_aggregate_pot
|
||||||
|
// CHECK: %a = load <4 x i32>, ptr %x, align 4
|
||||||
|
// CHECK: %b = load <4 x i32>, ptr %y, align 4
|
||||||
|
// CHECK: add <4 x i32> %a, %b
|
||||||
|
|
||||||
|
unsafe {
|
||||||
|
let a = PackedSimd(x);
|
||||||
|
let b = PackedSimd(y);
|
||||||
|
let c = simd_add(a, b);
|
||||||
|
simd_extract(c, 1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[no_mangle]
|
||||||
|
pub fn packed_simd_aggregate_npot(x: [u32; 7], y: [u32; 7]) -> u32 {
|
||||||
|
// CHECK-LABEL: packed_simd_aggregate_npot
|
||||||
|
// CHECK: %b = alloca [28 x i8], align 4
|
||||||
|
// CHECK: %a = alloca [28 x i8], align 4
|
||||||
|
// CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 4 %a, ptr align 4 %x, i64 28, i1 false)
|
||||||
|
// CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 4 %b, ptr align 4 %y, i64 28, i1 false)
|
||||||
|
// CHECK: %[[TEMPA:.+]] = load <7 x i32>, ptr %a, align 4
|
||||||
|
// CHECK: %[[TEMPB:.+]] = load <7 x i32>, ptr %b, align 4
|
||||||
|
// CHECK: add <7 x i32> %[[TEMPA]], %[[TEMPB]]
|
||||||
|
|
||||||
|
unsafe {
|
||||||
|
let a = PackedSimd(x);
|
||||||
|
let b = PackedSimd(y);
|
||||||
|
let c = simd_add(a, b);
|
||||||
|
simd_extract(c, 1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[no_mangle]
|
||||||
|
pub fn transparent_simd_aggregate(x: [u32; 4]) -> u32 {
|
||||||
|
// The transparent wrapper can just use the same SSA value as its field.
|
||||||
|
// No extra processing or spilling needed.
|
||||||
|
|
||||||
|
// CHECK-LABEL: transparent_simd_aggregate
|
||||||
|
// CHECK-NOT: alloca
|
||||||
|
// CHECK: %[[RET:.+]] = alloca [4 x i8]
|
||||||
|
// CHECK-NOT: alloca
|
||||||
|
// CHECK: %a = load <4 x i32>, ptr %x, align 4
|
||||||
|
// CHECK: %[[TEMP:.+]] = extractelement <4 x i32> %a, i32 1
|
||||||
|
// CHECK: store i32 %[[TEMP]], ptr %[[RET]]
|
||||||
|
|
||||||
|
unsafe {
|
||||||
|
let a = Simd(x);
|
||||||
|
let b = Transparent(a);
|
||||||
|
simd_extract(b.0, 1)
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -4,6 +4,7 @@
|
|||||||
|
|
||||||
#![crate_type = "lib"]
|
#![crate_type = "lib"]
|
||||||
#![feature(transparent_unions)]
|
#![feature(transparent_unions)]
|
||||||
|
#![feature(repr_simd)]
|
||||||
|
|
||||||
#[repr(transparent)]
|
#[repr(transparent)]
|
||||||
union MU<T: Copy> {
|
union MU<T: Copy> {
|
||||||
@@ -83,3 +84,25 @@ fn make_mu_pair_uninit() -> MU<(u8, u32)> {
|
|||||||
// CHECK-NEXT: ret { i8, i32 } undef
|
// CHECK-NEXT: ret { i8, i32 } undef
|
||||||
MU { uninit: () }
|
MU { uninit: () }
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[repr(simd)]
|
||||||
|
#[derive(Copy, Clone)]
|
||||||
|
struct I32X32([i32; 32]);
|
||||||
|
|
||||||
|
#[no_mangle]
|
||||||
|
fn make_mu_simd(x: I32X32) -> MU<I32X32> {
|
||||||
|
// CHECK-LABEL: void @make_mu_simd(ptr{{.+}}%_0, ptr{{.+}}%x)
|
||||||
|
// CHECK-NEXT: start:
|
||||||
|
// CHECK-NEXT: %[[TEMP:.+]] = load <32 x i32>, ptr %x,
|
||||||
|
// CHECK-NEXT: store <32 x i32> %[[TEMP]], ptr %_0,
|
||||||
|
// CHECK-NEXT: ret void
|
||||||
|
MU { value: x }
|
||||||
|
}
|
||||||
|
|
||||||
|
#[no_mangle]
|
||||||
|
fn make_mu_simd_uninit() -> MU<I32X32> {
|
||||||
|
// CHECK-LABEL: void @make_mu_simd_uninit(ptr{{.+}}%_0)
|
||||||
|
// CHECK-NEXT: start:
|
||||||
|
// CHECK-NEXT: ret void
|
||||||
|
MU { uninit: () }
|
||||||
|
}
|
||||||
|
|||||||
Reference in New Issue
Block a user