Block SIMD in transmute_immediate; delete OperandValueKind
See conversation in <https://rust-lang.zulipchat.com/#narrow/channel/131828-t-compiler/topic/Is.20transmuting.20a.20.60T.60.20to.20.60Tx1.60.20.28one-element.20SIMD.20vector.29.20UB.3F/near/526262799>.
This commit is contained in:
@@ -346,6 +346,13 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {
|
|||||||
|
|
||||||
let val = if field.is_zst() {
|
let val = if field.is_zst() {
|
||||||
OperandValue::ZeroSized
|
OperandValue::ZeroSized
|
||||||
|
} else if let BackendRepr::SimdVector { .. } = self.layout.backend_repr {
|
||||||
|
// codegen_transmute_operand doesn't support SIMD, but since the previous
|
||||||
|
// check handled ZSTs, the only possible field access into something SIMD
|
||||||
|
// is to the `non_1zst_field` that's the same SIMD. (Other things, even
|
||||||
|
// just padding, would change the wrapper's representation type.)
|
||||||
|
assert_eq!(field.size, self.layout.size);
|
||||||
|
self.val
|
||||||
} else if field.size == self.layout.size {
|
} else if field.size == self.layout.size {
|
||||||
assert_eq!(offset.bytes(), 0);
|
assert_eq!(offset.bytes(), 0);
|
||||||
fx.codegen_transmute_operand(bx, *self, field).unwrap_or_else(|| {
|
fx.codegen_transmute_operand(bx, *self, field).unwrap_or_else(|| {
|
||||||
@@ -606,10 +613,8 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, Result<V, abi::Scalar>> {
|
|||||||
};
|
};
|
||||||
|
|
||||||
let mut update = |tgt: &mut Result<V, abi::Scalar>, src, from_scalar| {
|
let mut update = |tgt: &mut Result<V, abi::Scalar>, src, from_scalar| {
|
||||||
let from_bty = bx.cx().type_from_scalar(from_scalar);
|
|
||||||
let to_scalar = tgt.unwrap_err();
|
let to_scalar = tgt.unwrap_err();
|
||||||
let to_bty = bx.cx().type_from_scalar(to_scalar);
|
let imm = transmute_immediate(bx, src, from_scalar, to_scalar);
|
||||||
let imm = transmute_immediate(bx, src, from_scalar, from_bty, to_scalar, to_bty);
|
|
||||||
*tgt = Ok(imm);
|
*tgt = Ok(imm);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|||||||
@@ -1,10 +1,8 @@
|
|||||||
use std::assert_matches::assert_matches;
|
|
||||||
|
|
||||||
use rustc_abi::{self as abi, FIRST_VARIANT};
|
use rustc_abi::{self as abi, FIRST_VARIANT};
|
||||||
use rustc_middle::ty::adjustment::PointerCoercion;
|
use rustc_middle::ty::adjustment::PointerCoercion;
|
||||||
use rustc_middle::ty::layout::{HasTyCtxt, HasTypingEnv, LayoutOf, TyAndLayout};
|
use rustc_middle::ty::layout::{HasTyCtxt, HasTypingEnv, LayoutOf, TyAndLayout};
|
||||||
use rustc_middle::ty::{self, Instance, Ty, TyCtxt};
|
use rustc_middle::ty::{self, Instance, Ty, TyCtxt};
|
||||||
use rustc_middle::{bug, mir, span_bug};
|
use rustc_middle::{bug, mir};
|
||||||
use rustc_session::config::OptLevel;
|
use rustc_session::config::OptLevel;
|
||||||
use rustc_span::{DUMMY_SP, Span};
|
use rustc_span::{DUMMY_SP, Span};
|
||||||
use tracing::{debug, instrument};
|
use tracing::{debug, instrument};
|
||||||
@@ -12,7 +10,7 @@ use tracing::{debug, instrument};
|
|||||||
use super::operand::{OperandRef, OperandValue};
|
use super::operand::{OperandRef, OperandValue};
|
||||||
use super::place::PlaceRef;
|
use super::place::PlaceRef;
|
||||||
use super::{FunctionCx, LocalRef};
|
use super::{FunctionCx, LocalRef};
|
||||||
use crate::common::IntPredicate;
|
use crate::common::{IntPredicate, TypeKind};
|
||||||
use crate::traits::*;
|
use crate::traits::*;
|
||||||
use crate::{MemFlags, base};
|
use crate::{MemFlags, base};
|
||||||
|
|
||||||
@@ -200,31 +198,25 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
|||||||
assert!(src.layout.is_sized());
|
assert!(src.layout.is_sized());
|
||||||
assert!(dst.layout.is_sized());
|
assert!(dst.layout.is_sized());
|
||||||
|
|
||||||
if let Some(val) = self.codegen_transmute_operand(bx, src, dst.layout) {
|
if src.layout.size == dst.layout.size {
|
||||||
val.store(bx, dst);
|
// Since in this path we have a place anyway, we can store or copy to it,
|
||||||
return;
|
// making sure we use the destination place's alignment even if the
|
||||||
}
|
// source would normally have a higher one.
|
||||||
|
|
||||||
match src.val {
|
|
||||||
OperandValue::Ref(..) | OperandValue::ZeroSized => {
|
|
||||||
span_bug!(
|
|
||||||
self.mir.span,
|
|
||||||
"Operand path should have handled transmute \
|
|
||||||
from {src:?} to place {dst:?}"
|
|
||||||
);
|
|
||||||
}
|
|
||||||
OperandValue::Immediate(..) | OperandValue::Pair(..) => {
|
|
||||||
// When we have immediate(s), the alignment of the source is irrelevant,
|
|
||||||
// so we can store them using the destination's alignment.
|
|
||||||
src.val.store(bx, dst.val.with_type(src.layout));
|
src.val.store(bx, dst.val.with_type(src.layout));
|
||||||
}
|
} else if src.layout.is_uninhabited() {
|
||||||
|
bx.unreachable()
|
||||||
|
} else {
|
||||||
|
// Since this is known statically and the input could have existed
|
||||||
|
// without already having hit UB, might as well trap for it, even
|
||||||
|
// though it's UB so we *could* also unreachable it.
|
||||||
|
bx.abort();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Attempts to transmute an `OperandValue` to another `OperandValue`.
|
/// Attempts to transmute an `OperandValue` to another `OperandValue`.
|
||||||
///
|
///
|
||||||
/// Returns `None` for cases that can't work in that framework, such as for
|
/// Returns `None` for cases that can't work in that framework, such as for
|
||||||
/// `Immediate`->`Ref` that needs an `alloc` to get the location.
|
/// `Immediate`->`Ref` that needs an `alloca` to get the location.
|
||||||
pub(crate) fn codegen_transmute_operand(
|
pub(crate) fn codegen_transmute_operand(
|
||||||
&mut self,
|
&mut self,
|
||||||
bx: &mut Bx,
|
bx: &mut Bx,
|
||||||
@@ -247,69 +239,34 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
|||||||
return Some(OperandValue::poison(bx, cast));
|
return Some(OperandValue::poison(bx, cast));
|
||||||
}
|
}
|
||||||
|
|
||||||
let operand_kind = self.value_kind(operand.layout);
|
Some(match (operand.val, operand.layout.backend_repr, cast.backend_repr) {
|
||||||
let cast_kind = self.value_kind(cast);
|
_ if cast.is_zst() => OperandValue::ZeroSized,
|
||||||
|
(OperandValue::ZeroSized, _, _) => bug!(),
|
||||||
match operand.val {
|
(
|
||||||
OperandValue::Ref(source_place_val) => {
|
OperandValue::Ref(source_place_val),
|
||||||
|
abi::BackendRepr::Memory { .. },
|
||||||
|
abi::BackendRepr::Scalar(_) | abi::BackendRepr::ScalarPair(_, _),
|
||||||
|
) => {
|
||||||
assert_eq!(source_place_val.llextra, None);
|
assert_eq!(source_place_val.llextra, None);
|
||||||
assert_matches!(operand_kind, OperandValueKind::Ref);
|
|
||||||
// The existing alignment is part of `source_place_val`,
|
// The existing alignment is part of `source_place_val`,
|
||||||
// so that alignment will be used, not `cast`'s.
|
// so that alignment will be used, not `cast`'s.
|
||||||
Some(bx.load_operand(source_place_val.with_type(cast)).val)
|
bx.load_operand(source_place_val.with_type(cast)).val
|
||||||
}
|
|
||||||
OperandValue::ZeroSized => {
|
|
||||||
let OperandValueKind::ZeroSized = operand_kind else {
|
|
||||||
bug!("Found {operand_kind:?} for operand {operand:?}");
|
|
||||||
};
|
|
||||||
if let OperandValueKind::ZeroSized = cast_kind {
|
|
||||||
Some(OperandValue::ZeroSized)
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
}
|
|
||||||
}
|
|
||||||
OperandValue::Immediate(imm) => {
|
|
||||||
let OperandValueKind::Immediate(from_scalar) = operand_kind else {
|
|
||||||
bug!("Found {operand_kind:?} for operand {operand:?}");
|
|
||||||
};
|
|
||||||
if let OperandValueKind::Immediate(to_scalar) = cast_kind
|
|
||||||
&& from_scalar.size(self.cx) == to_scalar.size(self.cx)
|
|
||||||
{
|
|
||||||
let from_backend_ty = bx.backend_type(operand.layout);
|
|
||||||
let to_backend_ty = bx.backend_type(cast);
|
|
||||||
Some(OperandValue::Immediate(transmute_immediate(
|
|
||||||
bx,
|
|
||||||
imm,
|
|
||||||
from_scalar,
|
|
||||||
from_backend_ty,
|
|
||||||
to_scalar,
|
|
||||||
to_backend_ty,
|
|
||||||
)))
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
}
|
|
||||||
}
|
|
||||||
OperandValue::Pair(imm_a, imm_b) => {
|
|
||||||
let OperandValueKind::Pair(in_a, in_b) = operand_kind else {
|
|
||||||
bug!("Found {operand_kind:?} for operand {operand:?}");
|
|
||||||
};
|
|
||||||
if let OperandValueKind::Pair(out_a, out_b) = cast_kind
|
|
||||||
&& in_a.size(self.cx) == out_a.size(self.cx)
|
|
||||||
&& in_b.size(self.cx) == out_b.size(self.cx)
|
|
||||||
{
|
|
||||||
let in_a_ibty = bx.scalar_pair_element_backend_type(operand.layout, 0, false);
|
|
||||||
let in_b_ibty = bx.scalar_pair_element_backend_type(operand.layout, 1, false);
|
|
||||||
let out_a_ibty = bx.scalar_pair_element_backend_type(cast, 0, false);
|
|
||||||
let out_b_ibty = bx.scalar_pair_element_backend_type(cast, 1, false);
|
|
||||||
Some(OperandValue::Pair(
|
|
||||||
transmute_immediate(bx, imm_a, in_a, in_a_ibty, out_a, out_a_ibty),
|
|
||||||
transmute_immediate(bx, imm_b, in_b, in_b_ibty, out_b, out_b_ibty),
|
|
||||||
))
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
(
|
||||||
|
OperandValue::Immediate(imm),
|
||||||
|
abi::BackendRepr::Scalar(from_scalar),
|
||||||
|
abi::BackendRepr::Scalar(to_scalar),
|
||||||
|
) => OperandValue::Immediate(transmute_immediate(bx, imm, from_scalar, to_scalar)),
|
||||||
|
(
|
||||||
|
OperandValue::Pair(imm_a, imm_b),
|
||||||
|
abi::BackendRepr::ScalarPair(in_a, in_b),
|
||||||
|
abi::BackendRepr::ScalarPair(out_a, out_b),
|
||||||
|
) => OperandValue::Pair(
|
||||||
|
transmute_immediate(bx, imm_a, in_a, out_a),
|
||||||
|
transmute_immediate(bx, imm_b, in_b, out_b),
|
||||||
|
),
|
||||||
|
_ => return None,
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Cast one of the immediates from an [`OperandValue::Immediate`]
|
/// Cast one of the immediates from an [`OperandValue::Immediate`]
|
||||||
@@ -479,9 +436,8 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
|||||||
// path as the other integer-to-X casts.
|
// path as the other integer-to-X casts.
|
||||||
| mir::CastKind::PointerWithExposedProvenance => {
|
| mir::CastKind::PointerWithExposedProvenance => {
|
||||||
let imm = operand.immediate();
|
let imm = operand.immediate();
|
||||||
let operand_kind = self.value_kind(operand.layout);
|
let abi::BackendRepr::Scalar(from_scalar) = operand.layout.backend_repr else {
|
||||||
let OperandValueKind::Immediate(from_scalar) = operand_kind else {
|
bug!("Found non-scalar for operand {operand:?}");
|
||||||
bug!("Found {operand_kind:?} for operand {operand:?}");
|
|
||||||
};
|
};
|
||||||
let from_backend_ty = bx.cx().immediate_backend_type(operand.layout);
|
let from_backend_ty = bx.cx().immediate_backend_type(operand.layout);
|
||||||
|
|
||||||
@@ -491,9 +447,8 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
|||||||
let val = OperandValue::Immediate(bx.cx().const_poison(to_backend_ty));
|
let val = OperandValue::Immediate(bx.cx().const_poison(to_backend_ty));
|
||||||
return OperandRef { val, layout: cast };
|
return OperandRef { val, layout: cast };
|
||||||
}
|
}
|
||||||
let cast_kind = self.value_kind(cast);
|
let abi::BackendRepr::Scalar(to_scalar) = cast.layout.backend_repr else {
|
||||||
let OperandValueKind::Immediate(to_scalar) = cast_kind else {
|
bug!("Found non-scalar for cast {cast:?}");
|
||||||
bug!("Found {cast_kind:?} for operand {cast:?}");
|
|
||||||
};
|
};
|
||||||
|
|
||||||
self.cast_immediate(bx, imm, from_scalar, from_backend_ty, to_scalar, to_backend_ty)
|
self.cast_immediate(bx, imm, from_scalar, from_backend_ty, to_scalar, to_backend_ty)
|
||||||
@@ -993,31 +948,29 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
|||||||
let operand_ty = operand.ty(self.mir, self.cx.tcx());
|
let operand_ty = operand.ty(self.mir, self.cx.tcx());
|
||||||
let cast_layout = self.cx.layout_of(self.monomorphize(cast_ty));
|
let cast_layout = self.cx.layout_of(self.monomorphize(cast_ty));
|
||||||
let operand_layout = self.cx.layout_of(self.monomorphize(operand_ty));
|
let operand_layout = self.cx.layout_of(self.monomorphize(operand_ty));
|
||||||
|
match (operand_layout.backend_repr, cast_layout.backend_repr) {
|
||||||
match (self.value_kind(operand_layout), self.value_kind(cast_layout)) {
|
// If the input is in a place we can load immediates from there.
|
||||||
// Can always load from a pointer as needed
|
(abi::BackendRepr::Memory { .. }, abi::BackendRepr::Scalar(_) | abi::BackendRepr::ScalarPair(_, _)) => true,
|
||||||
(OperandValueKind::Ref, _) => true,
|
|
||||||
|
|
||||||
// ZST-to-ZST is the easiest thing ever
|
|
||||||
(OperandValueKind::ZeroSized, OperandValueKind::ZeroSized) => true,
|
|
||||||
|
|
||||||
// But if only one of them is a ZST the sizes can't match
|
|
||||||
(OperandValueKind::ZeroSized, _) | (_, OperandValueKind::ZeroSized) => false,
|
|
||||||
|
|
||||||
// Need to generate an `alloc` to get a pointer from an immediate
|
|
||||||
(OperandValueKind::Immediate(..) | OperandValueKind::Pair(..), OperandValueKind::Ref) => false,
|
|
||||||
|
|
||||||
// When we have scalar immediates, we can only convert things
|
// When we have scalar immediates, we can only convert things
|
||||||
// where the sizes match, to avoid endianness questions.
|
// where the sizes match, to avoid endianness questions.
|
||||||
(OperandValueKind::Immediate(a), OperandValueKind::Immediate(b)) =>
|
(abi::BackendRepr::Scalar(a), abi::BackendRepr::Scalar(b)) =>
|
||||||
a.size(self.cx) == b.size(self.cx),
|
a.size(self.cx) == b.size(self.cx),
|
||||||
(OperandValueKind::Pair(a0, a1), OperandValueKind::Pair(b0, b1)) =>
|
(abi::BackendRepr::ScalarPair(a0, a1), abi::BackendRepr::ScalarPair(b0, b1)) =>
|
||||||
a0.size(self.cx) == b0.size(self.cx) && a1.size(self.cx) == b1.size(self.cx),
|
a0.size(self.cx) == b0.size(self.cx) && a1.size(self.cx) == b1.size(self.cx),
|
||||||
|
|
||||||
// Send mixings between scalars and pairs through the memory route
|
// SIMD vectors don't work like normal immediates,
|
||||||
// FIXME: Maybe this could use insertvalue/extractvalue instead?
|
// so always send them through memory.
|
||||||
(OperandValueKind::Immediate(..), OperandValueKind::Pair(..)) |
|
(abi::BackendRepr::SimdVector { .. }, _) | (_, abi::BackendRepr::SimdVector { .. }) => false,
|
||||||
(OperandValueKind::Pair(..), OperandValueKind::Immediate(..)) => false,
|
|
||||||
|
// When the output will be in memory anyway, just use its place
|
||||||
|
// (instead of the operand path) unless it's the trivial ZST case.
|
||||||
|
(_, abi::BackendRepr::Memory { .. }) => cast_layout.is_zst(),
|
||||||
|
|
||||||
|
// Mixing Scalars and ScalarPairs can get quite complicated when
|
||||||
|
// padding and undef get involved, so leave that to the memory path.
|
||||||
|
(abi::BackendRepr::Scalar(_), abi::BackendRepr::ScalarPair(_, _)) |
|
||||||
|
(abi::BackendRepr::ScalarPair(_, _), abi::BackendRepr::Scalar(_)) => false,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
mir::Rvalue::Ref(..) |
|
mir::Rvalue::Ref(..) |
|
||||||
@@ -1062,41 +1015,6 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
|||||||
|
|
||||||
// (*) this is only true if the type is suitable
|
// (*) this is only true if the type is suitable
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Gets which variant of [`OperandValue`] is expected for a particular type.
|
|
||||||
fn value_kind(&self, layout: TyAndLayout<'tcx>) -> OperandValueKind {
|
|
||||||
if layout.is_zst() {
|
|
||||||
OperandValueKind::ZeroSized
|
|
||||||
} else if self.cx.is_backend_immediate(layout) {
|
|
||||||
assert!(!self.cx.is_backend_scalar_pair(layout));
|
|
||||||
OperandValueKind::Immediate(match layout.backend_repr {
|
|
||||||
abi::BackendRepr::Scalar(s) => s,
|
|
||||||
abi::BackendRepr::SimdVector { element, .. } => element,
|
|
||||||
x => span_bug!(self.mir.span, "Couldn't translate {x:?} as backend immediate"),
|
|
||||||
})
|
|
||||||
} else if self.cx.is_backend_scalar_pair(layout) {
|
|
||||||
let abi::BackendRepr::ScalarPair(s1, s2) = layout.backend_repr else {
|
|
||||||
span_bug!(
|
|
||||||
self.mir.span,
|
|
||||||
"Couldn't translate {:?} as backend scalar pair",
|
|
||||||
layout.backend_repr,
|
|
||||||
);
|
|
||||||
};
|
|
||||||
OperandValueKind::Pair(s1, s2)
|
|
||||||
} else {
|
|
||||||
OperandValueKind::Ref
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// The variants of this match [`OperandValue`], giving details about the
|
|
||||||
/// backend values that will be held in that other type.
|
|
||||||
#[derive(Debug, Copy, Clone)]
|
|
||||||
enum OperandValueKind {
|
|
||||||
Ref,
|
|
||||||
Immediate(abi::Scalar),
|
|
||||||
Pair(abi::Scalar, abi::Scalar),
|
|
||||||
ZeroSized,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Transmutes one of the immediates from an [`OperandValue::Immediate`]
|
/// Transmutes one of the immediates from an [`OperandValue::Immediate`]
|
||||||
@@ -1108,22 +1026,30 @@ pub(super) fn transmute_immediate<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
|
|||||||
bx: &mut Bx,
|
bx: &mut Bx,
|
||||||
mut imm: Bx::Value,
|
mut imm: Bx::Value,
|
||||||
from_scalar: abi::Scalar,
|
from_scalar: abi::Scalar,
|
||||||
from_backend_ty: Bx::Type,
|
|
||||||
to_scalar: abi::Scalar,
|
to_scalar: abi::Scalar,
|
||||||
to_backend_ty: Bx::Type,
|
|
||||||
) -> Bx::Value {
|
) -> Bx::Value {
|
||||||
assert_eq!(from_scalar.size(bx.cx()), to_scalar.size(bx.cx()));
|
assert_eq!(from_scalar.size(bx.cx()), to_scalar.size(bx.cx()));
|
||||||
|
let imm_ty = bx.cx().val_ty(imm);
|
||||||
|
assert_ne!(
|
||||||
|
bx.cx().type_kind(imm_ty),
|
||||||
|
TypeKind::Vector,
|
||||||
|
"Vector type {imm_ty:?} not allowed in transmute_immediate {from_scalar:?} -> {to_scalar:?}"
|
||||||
|
);
|
||||||
|
|
||||||
// While optimizations will remove no-op transmutes, they might still be
|
// While optimizations will remove no-op transmutes, they might still be
|
||||||
// there in debug or things that aren't no-op in MIR because they change
|
// there in debug or things that aren't no-op in MIR because they change
|
||||||
// the Rust type but not the underlying layout/niche.
|
// the Rust type but not the underlying layout/niche.
|
||||||
if from_scalar == to_scalar && from_backend_ty == to_backend_ty {
|
if from_scalar == to_scalar {
|
||||||
return imm;
|
return imm;
|
||||||
}
|
}
|
||||||
|
|
||||||
use abi::Primitive::*;
|
use abi::Primitive::*;
|
||||||
imm = bx.from_immediate(imm);
|
imm = bx.from_immediate(imm);
|
||||||
|
|
||||||
|
let from_backend_ty = bx.cx().type_from_scalar(from_scalar);
|
||||||
|
debug_assert_eq!(bx.cx().val_ty(imm), from_backend_ty);
|
||||||
|
let to_backend_ty = bx.cx().type_from_scalar(to_scalar);
|
||||||
|
|
||||||
// If we have a scalar, we must already know its range. Either
|
// If we have a scalar, we must already know its range. Either
|
||||||
//
|
//
|
||||||
// 1) It's a parameter with `range` parameter metadata,
|
// 1) It's a parameter with `range` parameter metadata,
|
||||||
@@ -1154,6 +1080,8 @@ pub(super) fn transmute_immediate<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
debug_assert_eq!(bx.cx().val_ty(imm), to_backend_ty);
|
||||||
|
|
||||||
// This `assume` remains important for cases like (a conceptual)
|
// This `assume` remains important for cases like (a conceptual)
|
||||||
// transmute::<u32, NonZeroU32>(x) == 0
|
// transmute::<u32, NonZeroU32>(x) == 0
|
||||||
// since it's never passed to something with parameter metadata (especially
|
// since it's never passed to something with parameter metadata (especially
|
||||||
|
|||||||
@@ -9,17 +9,20 @@ use std::mem::transmute;
|
|||||||
// CHECK-LABEL: @check_sse_pair_to_avx(
|
// CHECK-LABEL: @check_sse_pair_to_avx(
|
||||||
#[no_mangle]
|
#[no_mangle]
|
||||||
pub unsafe fn check_sse_pair_to_avx(x: (__m128i, __m128i)) -> __m256i {
|
pub unsafe fn check_sse_pair_to_avx(x: (__m128i, __m128i)) -> __m256i {
|
||||||
|
// CHECK: start:
|
||||||
// CHECK-NOT: alloca
|
// CHECK-NOT: alloca
|
||||||
// CHECK: %0 = load <4 x i64>, ptr %x, align 16
|
// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 32 %_0, ptr align 16 %x, i64 32, i1 false)
|
||||||
// CHECK: store <4 x i64> %0, ptr %_0, align 32
|
// CHECK-NEXT: ret void
|
||||||
transmute(x)
|
transmute(x)
|
||||||
}
|
}
|
||||||
|
|
||||||
// CHECK-LABEL: @check_sse_pair_from_avx(
|
// CHECK-LABEL: @check_sse_pair_from_avx(
|
||||||
#[no_mangle]
|
#[no_mangle]
|
||||||
pub unsafe fn check_sse_pair_from_avx(x: __m256i) -> (__m128i, __m128i) {
|
pub unsafe fn check_sse_pair_from_avx(x: __m256i) -> (__m128i, __m128i) {
|
||||||
|
// CHECK: start:
|
||||||
// CHECK-NOT: alloca
|
// CHECK-NOT: alloca
|
||||||
// CHECK: %0 = load <4 x i64>, ptr %x, align 32
|
// CHECK-NEXT: %[[TEMP:.+]] = load <4 x i64>, ptr %x, align 32
|
||||||
// CHECK: store <4 x i64> %0, ptr %_0, align 16
|
// CHECK-NEXT: store <4 x i64> %[[TEMP]], ptr %_0, align 16
|
||||||
|
// CHECK-NEXT: ret void
|
||||||
transmute(x)
|
transmute(x)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -40,8 +40,7 @@ pub fn build_array_s(x: [f32; 4]) -> S<4> {
|
|||||||
// CHECK-LABEL: @build_array_transmute_s
|
// CHECK-LABEL: @build_array_transmute_s
|
||||||
#[no_mangle]
|
#[no_mangle]
|
||||||
pub fn build_array_transmute_s(x: [f32; 4]) -> S<4> {
|
pub fn build_array_transmute_s(x: [f32; 4]) -> S<4> {
|
||||||
// CHECK: %[[VAL:.+]] = load <4 x float>, ptr %x, align [[ARRAY_ALIGN]]
|
// CHECK: call void @llvm.memcpy.{{.+}}({{.*}} align [[VECTOR_ALIGN]] {{.*}} align [[ARRAY_ALIGN]] {{.*}}, [[USIZE]] 16, i1 false)
|
||||||
// CHECK: store <4 x float> %[[VAL:.+]], ptr %_0, align [[VECTOR_ALIGN]]
|
|
||||||
unsafe { std::mem::transmute(x) }
|
unsafe { std::mem::transmute(x) }
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -55,7 +54,6 @@ pub fn build_array_t(x: [f32; 4]) -> T {
|
|||||||
// CHECK-LABEL: @build_array_transmute_t
|
// CHECK-LABEL: @build_array_transmute_t
|
||||||
#[no_mangle]
|
#[no_mangle]
|
||||||
pub fn build_array_transmute_t(x: [f32; 4]) -> T {
|
pub fn build_array_transmute_t(x: [f32; 4]) -> T {
|
||||||
// CHECK: %[[VAL:.+]] = load <4 x float>, ptr %x, align [[ARRAY_ALIGN]]
|
// CHECK: call void @llvm.memcpy.{{.+}}({{.*}} align [[VECTOR_ALIGN]] {{.*}} align [[ARRAY_ALIGN]] {{.*}}, [[USIZE]] 16, i1 false)
|
||||||
// CHECK: store <4 x float> %[[VAL:.+]], ptr %_0, align [[VECTOR_ALIGN]]
|
|
||||||
unsafe { std::mem::transmute(x) }
|
unsafe { std::mem::transmute(x) }
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -111,8 +111,11 @@ pub fn fake_bool_unsigned_to_bool(b: FakeBoolUnsigned) -> bool {
|
|||||||
struct S([i64; 1]);
|
struct S([i64; 1]);
|
||||||
|
|
||||||
// CHECK-LABEL: define{{.*}}i64 @single_element_simd_to_scalar(<1 x i64> %b)
|
// CHECK-LABEL: define{{.*}}i64 @single_element_simd_to_scalar(<1 x i64> %b)
|
||||||
// CHECK: bitcast <1 x i64> %b to i64
|
// CHECK-NEXT: start:
|
||||||
// CHECK: ret i64
|
// CHECK-NEXT: %[[RET:.+]] = alloca [8 x i8]
|
||||||
|
// CHECK-NEXT: store <1 x i64> %b, ptr %[[RET]]
|
||||||
|
// CHECK-NEXT: %[[TEMP:.+]] = load i64, ptr %[[RET]]
|
||||||
|
// CHECK-NEXT: ret i64 %[[TEMP]]
|
||||||
#[no_mangle]
|
#[no_mangle]
|
||||||
#[cfg_attr(target_family = "wasm", target_feature(enable = "simd128"))]
|
#[cfg_attr(target_family = "wasm", target_feature(enable = "simd128"))]
|
||||||
#[cfg_attr(target_arch = "arm", target_feature(enable = "neon"))]
|
#[cfg_attr(target_arch = "arm", target_feature(enable = "neon"))]
|
||||||
@@ -124,8 +127,11 @@ pub extern "C" fn single_element_simd_to_scalar(b: S) -> i64 {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// CHECK-LABEL: define{{.*}}<1 x i64> @scalar_to_single_element_simd(i64 %b)
|
// CHECK-LABEL: define{{.*}}<1 x i64> @scalar_to_single_element_simd(i64 %b)
|
||||||
// CHECK: bitcast i64 %b to <1 x i64>
|
// CHECK-NEXT: start:
|
||||||
// CHECK: ret <1 x i64>
|
// CHECK-NEXT: %[[RET:.+]] = alloca [8 x i8]
|
||||||
|
// CHECK-NEXT: store i64 %b, ptr %[[RET]]
|
||||||
|
// CHECK-NEXT: %[[TEMP:.+]] = load <1 x i64>, ptr %[[RET]]
|
||||||
|
// CHECK-NEXT: ret <1 x i64> %[[TEMP]]
|
||||||
#[no_mangle]
|
#[no_mangle]
|
||||||
#[cfg_attr(target_family = "wasm", target_feature(enable = "simd128"))]
|
#[cfg_attr(target_family = "wasm", target_feature(enable = "simd128"))]
|
||||||
#[cfg_attr(target_arch = "arm", target_feature(enable = "neon"))]
|
#[cfg_attr(target_arch = "arm", target_feature(enable = "neon"))]
|
||||||
|
|||||||
@@ -41,9 +41,6 @@ pub fn vec_iterator_cast_primitive(vec: Vec<i8>) -> Vec<u8> {
|
|||||||
// CHECK: call{{.+}}void @llvm.assume(i1 %{{.+}})
|
// CHECK: call{{.+}}void @llvm.assume(i1 %{{.+}})
|
||||||
// CHECK-NOT: loop
|
// CHECK-NOT: loop
|
||||||
// CHECK-NOT: call
|
// CHECK-NOT: call
|
||||||
// CHECK: call{{.+}}void @llvm.assume(i1 %{{.+}})
|
|
||||||
// CHECK-NOT: loop
|
|
||||||
// CHECK-NOT: call
|
|
||||||
vec.into_iter().map(|e| e as u8).collect()
|
vec.into_iter().map(|e| e as u8).collect()
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -55,9 +52,6 @@ pub fn vec_iterator_cast_wrapper(vec: Vec<u8>) -> Vec<Wrapper<u8>> {
|
|||||||
// CHECK: call{{.+}}void @llvm.assume(i1 %{{.+}})
|
// CHECK: call{{.+}}void @llvm.assume(i1 %{{.+}})
|
||||||
// CHECK-NOT: loop
|
// CHECK-NOT: loop
|
||||||
// CHECK-NOT: call
|
// CHECK-NOT: call
|
||||||
// CHECK: call{{.+}}void @llvm.assume(i1 %{{.+}})
|
|
||||||
// CHECK-NOT: loop
|
|
||||||
// CHECK-NOT: call
|
|
||||||
vec.into_iter().map(|e| Wrapper(e)).collect()
|
vec.into_iter().map(|e| Wrapper(e)).collect()
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -86,9 +80,6 @@ pub fn vec_iterator_cast_unwrap(vec: Vec<Wrapper<u8>>) -> Vec<u8> {
|
|||||||
// CHECK: call{{.+}}void @llvm.assume(i1 %{{.+}})
|
// CHECK: call{{.+}}void @llvm.assume(i1 %{{.+}})
|
||||||
// CHECK-NOT: loop
|
// CHECK-NOT: loop
|
||||||
// CHECK-NOT: call
|
// CHECK-NOT: call
|
||||||
// CHECK: call{{.+}}void @llvm.assume(i1 %{{.+}})
|
|
||||||
// CHECK-NOT: loop
|
|
||||||
// CHECK-NOT: call
|
|
||||||
vec.into_iter().map(|e| e.0).collect()
|
vec.into_iter().map(|e| e.0).collect()
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -100,9 +91,6 @@ pub fn vec_iterator_cast_aggregate(vec: Vec<[u64; 4]>) -> Vec<Foo> {
|
|||||||
// CHECK: call{{.+}}void @llvm.assume(i1 %{{.+}})
|
// CHECK: call{{.+}}void @llvm.assume(i1 %{{.+}})
|
||||||
// CHECK-NOT: loop
|
// CHECK-NOT: loop
|
||||||
// CHECK-NOT: call
|
// CHECK-NOT: call
|
||||||
// CHECK: call{{.+}}void @llvm.assume(i1 %{{.+}})
|
|
||||||
// CHECK-NOT: loop
|
|
||||||
// CHECK-NOT: call
|
|
||||||
vec.into_iter().map(|e| unsafe { std::mem::transmute(e) }).collect()
|
vec.into_iter().map(|e| unsafe { std::mem::transmute(e) }).collect()
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -114,9 +102,6 @@ pub fn vec_iterator_cast_deaggregate_tra(vec: Vec<Bar>) -> Vec<[u64; 4]> {
|
|||||||
// CHECK: call{{.+}}void @llvm.assume(i1 %{{.+}})
|
// CHECK: call{{.+}}void @llvm.assume(i1 %{{.+}})
|
||||||
// CHECK-NOT: loop
|
// CHECK-NOT: loop
|
||||||
// CHECK-NOT: call
|
// CHECK-NOT: call
|
||||||
// CHECK: call{{.+}}void @llvm.assume(i1 %{{.+}})
|
|
||||||
// CHECK-NOT: loop
|
|
||||||
// CHECK-NOT: call
|
|
||||||
|
|
||||||
// Safety: For the purpose of this test we assume that Bar layout matches [u64; 4].
|
// Safety: For the purpose of this test we assume that Bar layout matches [u64; 4].
|
||||||
// This currently is not guaranteed for repr(Rust) types, but it happens to work here and
|
// This currently is not guaranteed for repr(Rust) types, but it happens to work here and
|
||||||
@@ -133,9 +118,6 @@ pub fn vec_iterator_cast_deaggregate_fold(vec: Vec<Baz>) -> Vec<[u64; 4]> {
|
|||||||
// CHECK: call{{.+}}void @llvm.assume(i1 %{{.+}})
|
// CHECK: call{{.+}}void @llvm.assume(i1 %{{.+}})
|
||||||
// CHECK-NOT: loop
|
// CHECK-NOT: loop
|
||||||
// CHECK-NOT: call
|
// CHECK-NOT: call
|
||||||
// CHECK: call{{.+}}void @llvm.assume(i1 %{{.+}})
|
|
||||||
// CHECK-NOT: loop
|
|
||||||
// CHECK-NOT: call
|
|
||||||
|
|
||||||
// Safety: For the purpose of this test we assume that Bar layout matches [u64; 4].
|
// Safety: For the purpose of this test we assume that Bar layout matches [u64; 4].
|
||||||
// This currently is not guaranteed for repr(Rust) types, but it happens to work here and
|
// This currently is not guaranteed for repr(Rust) types, but it happens to work here and
|
||||||
@@ -156,12 +138,7 @@ pub fn vec_iterator_cast_unwrap_drop(vec: Vec<Wrapper<String>>) -> Vec<String> {
|
|||||||
// CHECK-NOT: call
|
// CHECK-NOT: call
|
||||||
// CHECK-NOT: %{{.*}} = mul
|
// CHECK-NOT: %{{.*}} = mul
|
||||||
// CHECK-NOT: %{{.*}} = udiv
|
// CHECK-NOT: %{{.*}} = udiv
|
||||||
// CHECK: call
|
// CHECK: ret void
|
||||||
// CHECK-SAME: void @llvm.assume(i1 %{{.+}})
|
|
||||||
// CHECK-NOT: br i1 %{{.*}}, label %{{.*}}, label %{{.*}}
|
|
||||||
// CHECK-NOT: call
|
|
||||||
// CHECK-NOT: %{{.*}} = mul
|
|
||||||
// CHECK-NOT: %{{.*}} = udiv
|
|
||||||
|
|
||||||
vec.into_iter().map(|Wrapper(e)| e).collect()
|
vec.into_iter().map(|Wrapper(e)| e).collect()
|
||||||
}
|
}
|
||||||
@@ -178,12 +155,6 @@ pub fn vec_iterator_cast_wrap_drop(vec: Vec<String>) -> Vec<Wrapper<String>> {
|
|||||||
// CHECK-NOT: call
|
// CHECK-NOT: call
|
||||||
// CHECK-NOT: %{{.*}} = mul
|
// CHECK-NOT: %{{.*}} = mul
|
||||||
// CHECK-NOT: %{{.*}} = udiv
|
// CHECK-NOT: %{{.*}} = udiv
|
||||||
// CHECK: call
|
|
||||||
// CHECK-SAME: void @llvm.assume(i1 %{{.+}})
|
|
||||||
// CHECK-NOT: br i1 %{{.*}}, label %{{.*}}, label %{{.*}}
|
|
||||||
// CHECK-NOT: call
|
|
||||||
// CHECK-NOT: %{{.*}} = mul
|
|
||||||
// CHECK-NOT: %{{.*}} = udiv
|
|
||||||
// CHECK: ret void
|
// CHECK: ret void
|
||||||
|
|
||||||
vec.into_iter().map(Wrapper).collect()
|
vec.into_iter().map(Wrapper).collect()
|
||||||
|
|||||||
Reference in New Issue
Block a user