Address PR feedback
This commit is contained in:
@@ -355,12 +355,7 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {
|
|||||||
self.val
|
self.val
|
||||||
} else if field.size == self.layout.size {
|
} else if field.size == self.layout.size {
|
||||||
assert_eq!(offset.bytes(), 0);
|
assert_eq!(offset.bytes(), 0);
|
||||||
fx.codegen_transmute_operand(bx, *self, field).unwrap_or_else(|| {
|
fx.codegen_transmute_operand(bx, *self, field)
|
||||||
bug!(
|
|
||||||
"Expected `codegen_transmute_operand` to handle equal-size \
|
|
||||||
field {i:?} projection from {self:?} to {field:?}"
|
|
||||||
)
|
|
||||||
})
|
|
||||||
} else {
|
} else {
|
||||||
let (in_scalar, imm) = match (self.val, self.layout.backend_repr) {
|
let (in_scalar, imm) = match (self.val, self.layout.backend_repr) {
|
||||||
// Extract a scalar component from a pair.
|
// Extract a scalar component from a pair.
|
||||||
|
|||||||
@@ -188,6 +188,10 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Transmutes the `src` value to the destination type by writing it to `dst`.
|
||||||
|
///
|
||||||
|
/// See also [`Self::codegen_transmute_operand`] for cases that can be done
|
||||||
|
/// without needing a pre-allocated place for the destination.
|
||||||
fn codegen_transmute(
|
fn codegen_transmute(
|
||||||
&mut self,
|
&mut self,
|
||||||
bx: &mut Bx,
|
bx: &mut Bx,
|
||||||
@@ -198,31 +202,36 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
|||||||
assert!(src.layout.is_sized());
|
assert!(src.layout.is_sized());
|
||||||
assert!(dst.layout.is_sized());
|
assert!(dst.layout.is_sized());
|
||||||
|
|
||||||
if src.layout.size == dst.layout.size {
|
if src.layout.size != dst.layout.size
|
||||||
|
|| src.layout.is_uninhabited()
|
||||||
|
|| dst.layout.is_uninhabited()
|
||||||
|
{
|
||||||
|
// These cases are all UB to actually hit, so don't emit code for them.
|
||||||
|
// (The size mismatches are reachable via `transmute_unchecked`.)
|
||||||
|
// We can't use unreachable because that's a terminator, and we
|
||||||
|
// need something that can be in the middle of a basic block.
|
||||||
|
bx.assume(bx.cx().const_bool(false))
|
||||||
|
} else {
|
||||||
// Since in this path we have a place anyway, we can store or copy to it,
|
// Since in this path we have a place anyway, we can store or copy to it,
|
||||||
// making sure we use the destination place's alignment even if the
|
// making sure we use the destination place's alignment even if the
|
||||||
// source would normally have a higher one.
|
// source would normally have a higher one.
|
||||||
src.val.store(bx, dst.val.with_type(src.layout));
|
src.val.store(bx, dst.val.with_type(src.layout));
|
||||||
} else if src.layout.is_uninhabited() {
|
|
||||||
bx.unreachable()
|
|
||||||
} else {
|
|
||||||
// Since this is known statically and the input could have existed
|
|
||||||
// without already having hit UB, might as well trap for it, even
|
|
||||||
// though it's UB so we *could* also unreachable it.
|
|
||||||
bx.abort();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Attempts to transmute an `OperandValue` to another `OperandValue`.
|
/// Transmutes an `OperandValue` to another `OperandValue`.
|
||||||
///
|
///
|
||||||
/// Returns `None` for cases that can't work in that framework, such as for
|
/// This is supported only for cases where [`Self::rvalue_creates_operand`]
|
||||||
/// `Immediate`->`Ref` that needs an `alloca` to get the location.
|
/// returns `true`, and will ICE otherwise. (In particular, anything that
|
||||||
|
/// would need to `alloca` in order to return a `PlaceValue` will ICE,
|
||||||
|
/// expecting those to go via [`Self::codegen_transmute`] instead where
|
||||||
|
/// the destination place is already allocated.)
|
||||||
pub(crate) fn codegen_transmute_operand(
|
pub(crate) fn codegen_transmute_operand(
|
||||||
&mut self,
|
&mut self,
|
||||||
bx: &mut Bx,
|
bx: &mut Bx,
|
||||||
operand: OperandRef<'tcx, Bx::Value>,
|
operand: OperandRef<'tcx, Bx::Value>,
|
||||||
cast: TyAndLayout<'tcx>,
|
cast: TyAndLayout<'tcx>,
|
||||||
) -> Option<OperandValue<Bx::Value>> {
|
) -> OperandValue<Bx::Value> {
|
||||||
// Check for transmutes that are always UB.
|
// Check for transmutes that are always UB.
|
||||||
if operand.layout.size != cast.size
|
if operand.layout.size != cast.size
|
||||||
|| operand.layout.is_uninhabited()
|
|| operand.layout.is_uninhabited()
|
||||||
@@ -236,17 +245,15 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
|||||||
|
|
||||||
// Because this transmute is UB, return something easy to generate,
|
// Because this transmute is UB, return something easy to generate,
|
||||||
// since it's fine that later uses of the value are probably UB.
|
// since it's fine that later uses of the value are probably UB.
|
||||||
return Some(OperandValue::poison(bx, cast));
|
return OperandValue::poison(bx, cast);
|
||||||
}
|
}
|
||||||
|
|
||||||
Some(match (operand.val, operand.layout.backend_repr, cast.backend_repr) {
|
match (operand.val, operand.layout.backend_repr, cast.backend_repr) {
|
||||||
_ if cast.is_zst() => OperandValue::ZeroSized,
|
_ if cast.is_zst() => OperandValue::ZeroSized,
|
||||||
(OperandValue::ZeroSized, _, _) => bug!(),
|
(_, _, abi::BackendRepr::Memory { .. }) => {
|
||||||
(
|
bug!("Cannot `codegen_transmute_operand` to non-ZST memory-ABI output {cast:?}");
|
||||||
OperandValue::Ref(source_place_val),
|
}
|
||||||
abi::BackendRepr::Memory { .. },
|
(OperandValue::Ref(source_place_val), abi::BackendRepr::Memory { .. }, _) => {
|
||||||
abi::BackendRepr::Scalar(_) | abi::BackendRepr::ScalarPair(_, _),
|
|
||||||
) => {
|
|
||||||
assert_eq!(source_place_val.llextra, None);
|
assert_eq!(source_place_val.llextra, None);
|
||||||
// The existing alignment is part of `source_place_val`,
|
// The existing alignment is part of `source_place_val`,
|
||||||
// so that alignment will be used, not `cast`'s.
|
// so that alignment will be used, not `cast`'s.
|
||||||
@@ -265,8 +272,8 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
|||||||
transmute_scalar(bx, imm_a, in_a, out_a),
|
transmute_scalar(bx, imm_a, in_a, out_a),
|
||||||
transmute_scalar(bx, imm_b, in_b, out_b),
|
transmute_scalar(bx, imm_b, in_b, out_b),
|
||||||
),
|
),
|
||||||
_ => return None,
|
_ => bug!("Cannot `codegen_transmute_operand` {operand:?} to {cast:?}"),
|
||||||
})
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Cast one of the immediates from an [`OperandValue::Immediate`]
|
/// Cast one of the immediates from an [`OperandValue::Immediate`]
|
||||||
@@ -458,9 +465,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
mir::CastKind::Transmute => {
|
mir::CastKind::Transmute => {
|
||||||
self.codegen_transmute_operand(bx, operand, cast).unwrap_or_else(|| {
|
self.codegen_transmute_operand(bx, operand, cast)
|
||||||
bug!("Unsupported transmute-as-operand of {operand:?} to {cast:?}");
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
OperandRef { val, layout: cast }
|
OperandRef { val, layout: cast }
|
||||||
@@ -942,6 +947,15 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
|||||||
OperandValue::Pair(val, of)
|
OperandValue::Pair(val, of)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Returns `true` if the `rvalue` can be computed into an [`OperandRef`],
|
||||||
|
/// rather than needing a full `PlaceRef` for the assignment destination.
|
||||||
|
///
|
||||||
|
/// This is used by the [`super::analyze`] code to decide which MIR locals
|
||||||
|
/// can stay as SSA values (as opposed to generating `alloca` slots for them).
|
||||||
|
/// As such, some paths here return `true` even where the specific rvalue
|
||||||
|
/// will not actually take the operand path because the result type is such
|
||||||
|
/// that it always gets an `alloca`, but where it's not worth re-checking the
|
||||||
|
/// layout in this code when the right thing will happen anyway.
|
||||||
pub(crate) fn rvalue_creates_operand(&self, rvalue: &mir::Rvalue<'tcx>, span: Span) -> bool {
|
pub(crate) fn rvalue_creates_operand(&self, rvalue: &mir::Rvalue<'tcx>, span: Span) -> bool {
|
||||||
match *rvalue {
|
match *rvalue {
|
||||||
mir::Rvalue::Cast(mir::CastKind::Transmute, ref operand, cast_ty) => {
|
mir::Rvalue::Cast(mir::CastKind::Transmute, ref operand, cast_ty) => {
|
||||||
@@ -949,8 +963,13 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
|||||||
let cast_layout = self.cx.layout_of(self.monomorphize(cast_ty));
|
let cast_layout = self.cx.layout_of(self.monomorphize(cast_ty));
|
||||||
let operand_layout = self.cx.layout_of(self.monomorphize(operand_ty));
|
let operand_layout = self.cx.layout_of(self.monomorphize(operand_ty));
|
||||||
match (operand_layout.backend_repr, cast_layout.backend_repr) {
|
match (operand_layout.backend_repr, cast_layout.backend_repr) {
|
||||||
// If the input is in a place we can load immediates from there.
|
// When the output will be in memory anyway, just use its place
|
||||||
(abi::BackendRepr::Memory { .. }, abi::BackendRepr::Scalar(_) | abi::BackendRepr::ScalarPair(_, _)) => true,
|
// (instead of the operand path) unless it's the trivial ZST case.
|
||||||
|
(_, abi::BackendRepr::Memory { .. }) => cast_layout.is_zst(),
|
||||||
|
|
||||||
|
// Otherwise (for a non-memory output) if the input is memory
|
||||||
|
// then we can just read the value from the place.
|
||||||
|
(abi::BackendRepr::Memory { .. }, _) => true,
|
||||||
|
|
||||||
// When we have scalar immediates, we can only convert things
|
// When we have scalar immediates, we can only convert things
|
||||||
// where the sizes match, to avoid endianness questions.
|
// where the sizes match, to avoid endianness questions.
|
||||||
@@ -959,18 +978,15 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
|||||||
(abi::BackendRepr::ScalarPair(a0, a1), abi::BackendRepr::ScalarPair(b0, b1)) =>
|
(abi::BackendRepr::ScalarPair(a0, a1), abi::BackendRepr::ScalarPair(b0, b1)) =>
|
||||||
a0.size(self.cx) == b0.size(self.cx) && a1.size(self.cx) == b1.size(self.cx),
|
a0.size(self.cx) == b0.size(self.cx) && a1.size(self.cx) == b1.size(self.cx),
|
||||||
|
|
||||||
// SIMD vectors don't work like normal immediates,
|
|
||||||
// so always send them through memory.
|
|
||||||
(abi::BackendRepr::SimdVector { .. }, _) | (_, abi::BackendRepr::SimdVector { .. }) => false,
|
|
||||||
|
|
||||||
// When the output will be in memory anyway, just use its place
|
|
||||||
// (instead of the operand path) unless it's the trivial ZST case.
|
|
||||||
(_, abi::BackendRepr::Memory { .. }) => cast_layout.is_zst(),
|
|
||||||
|
|
||||||
// Mixing Scalars and ScalarPairs can get quite complicated when
|
// Mixing Scalars and ScalarPairs can get quite complicated when
|
||||||
// padding and undef get involved, so leave that to the memory path.
|
// padding and undef get involved, so leave that to the memory path.
|
||||||
(abi::BackendRepr::Scalar(_), abi::BackendRepr::ScalarPair(_, _)) |
|
(abi::BackendRepr::Scalar(_), abi::BackendRepr::ScalarPair(_, _)) |
|
||||||
(abi::BackendRepr::ScalarPair(_, _), abi::BackendRepr::Scalar(_)) => false,
|
(abi::BackendRepr::ScalarPair(_, _), abi::BackendRepr::Scalar(_)) => false,
|
||||||
|
|
||||||
|
// SIMD vectors aren't worth the trouble of dealing with complex
|
||||||
|
// cases like from vectors of f32 to vectors of pointers or
|
||||||
|
// from fat pointers to vectors of u16. (See #143194 #110021 ...)
|
||||||
|
(abi::BackendRepr::SimdVector { .. }, _) | (_, abi::BackendRepr::SimdVector { .. }) => false,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
mir::Rvalue::Ref(..) |
|
mir::Rvalue::Ref(..) |
|
||||||
|
|||||||
@@ -29,28 +29,28 @@ pub struct Aggregate8(u8);
|
|||||||
// CHECK-LABEL: @check_bigger_size(
|
// CHECK-LABEL: @check_bigger_size(
|
||||||
#[no_mangle]
|
#[no_mangle]
|
||||||
pub unsafe fn check_bigger_size(x: u16) -> u32 {
|
pub unsafe fn check_bigger_size(x: u16) -> u32 {
|
||||||
// CHECK: call void @llvm.trap
|
// CHECK: call void @llvm.assume(i1 false)
|
||||||
transmute_unchecked(x)
|
transmute_unchecked(x)
|
||||||
}
|
}
|
||||||
|
|
||||||
// CHECK-LABEL: @check_smaller_size(
|
// CHECK-LABEL: @check_smaller_size(
|
||||||
#[no_mangle]
|
#[no_mangle]
|
||||||
pub unsafe fn check_smaller_size(x: u32) -> u16 {
|
pub unsafe fn check_smaller_size(x: u32) -> u16 {
|
||||||
// CHECK: call void @llvm.trap
|
// CHECK: call void @llvm.assume(i1 false)
|
||||||
transmute_unchecked(x)
|
transmute_unchecked(x)
|
||||||
}
|
}
|
||||||
|
|
||||||
// CHECK-LABEL: @check_smaller_array(
|
// CHECK-LABEL: @check_smaller_array(
|
||||||
#[no_mangle]
|
#[no_mangle]
|
||||||
pub unsafe fn check_smaller_array(x: [u32; 7]) -> [u32; 3] {
|
pub unsafe fn check_smaller_array(x: [u32; 7]) -> [u32; 3] {
|
||||||
// CHECK: call void @llvm.trap
|
// CHECK: call void @llvm.assume(i1 false)
|
||||||
transmute_unchecked(x)
|
transmute_unchecked(x)
|
||||||
}
|
}
|
||||||
|
|
||||||
// CHECK-LABEL: @check_bigger_array(
|
// CHECK-LABEL: @check_bigger_array(
|
||||||
#[no_mangle]
|
#[no_mangle]
|
||||||
pub unsafe fn check_bigger_array(x: [u32; 3]) -> [u32; 7] {
|
pub unsafe fn check_bigger_array(x: [u32; 3]) -> [u32; 7] {
|
||||||
// CHECK: call void @llvm.trap
|
// CHECK: call void @llvm.assume(i1 false)
|
||||||
transmute_unchecked(x)
|
transmute_unchecked(x)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -73,9 +73,9 @@ pub unsafe fn check_to_empty_array(x: [u32; 5]) -> [u32; 0] {
|
|||||||
#[no_mangle]
|
#[no_mangle]
|
||||||
#[custom_mir(dialect = "runtime", phase = "optimized")]
|
#[custom_mir(dialect = "runtime", phase = "optimized")]
|
||||||
pub unsafe fn check_from_empty_array(x: [u32; 0]) -> [u32; 5] {
|
pub unsafe fn check_from_empty_array(x: [u32; 0]) -> [u32; 5] {
|
||||||
// CHECK-NOT: trap
|
// CHECK-NOT: call
|
||||||
// CHECK: call void @llvm.trap
|
// CHECK: call void @llvm.assume(i1 false)
|
||||||
// CHECK-NOT: trap
|
// CHECK-NOT: call
|
||||||
mir! {
|
mir! {
|
||||||
{
|
{
|
||||||
RET = CastTransmute(x);
|
RET = CastTransmute(x);
|
||||||
|
|||||||
Reference in New Issue
Block a user