Merge commit 'd3a2366ee877075c59b38bd8ced55f224fc7ef51' into sync_cg_clif-2022-07-26
This commit is contained in:
@@ -13,15 +13,11 @@ pub(crate) fn codegen_llvm_intrinsic_call<'tcx>(
|
||||
ret: CPlace<'tcx>,
|
||||
target: Option<BasicBlock>,
|
||||
) {
|
||||
intrinsic_match! {
|
||||
fx, intrinsic, args,
|
||||
_ => {
|
||||
fx.tcx.sess.warn(&format!("unsupported llvm intrinsic {}; replacing with trap", intrinsic));
|
||||
crate::trap::trap_unimplemented(fx, intrinsic);
|
||||
};
|
||||
|
||||
match intrinsic {
|
||||
// Used by `_mm_movemask_epi8` and `_mm256_movemask_epi8`
|
||||
"llvm.x86.sse2.pmovmskb.128" | "llvm.x86.avx2.pmovmskb" | "llvm.x86.sse2.movmsk.pd", (c a) {
|
||||
"llvm.x86.sse2.pmovmskb.128" | "llvm.x86.avx2.pmovmskb" | "llvm.x86.sse2.movmsk.pd" => {
|
||||
intrinsic_args!(fx, args => (a); intrinsic);
|
||||
|
||||
let (lane_count, lane_ty) = a.layout().ty.simd_size_and_type(fx.tcx);
|
||||
let lane_ty = fx.clif_type(lane_ty).unwrap();
|
||||
assert!(lane_count <= 32);
|
||||
@@ -29,7 +25,8 @@ pub(crate) fn codegen_llvm_intrinsic_call<'tcx>(
|
||||
let mut res = fx.bcx.ins().iconst(types::I32, 0);
|
||||
|
||||
for lane in (0..lane_count).rev() {
|
||||
let a_lane = a.value_field(fx, mir::Field::new(lane.try_into().unwrap())).load_scalar(fx);
|
||||
let a_lane =
|
||||
a.value_field(fx, mir::Field::new(lane.try_into().unwrap())).load_scalar(fx);
|
||||
|
||||
// cast float to int
|
||||
let a_lane = match lane_ty {
|
||||
@@ -49,26 +46,29 @@ pub(crate) fn codegen_llvm_intrinsic_call<'tcx>(
|
||||
|
||||
let res = CValue::by_val(res, fx.layout_of(fx.tcx.types.i32));
|
||||
ret.write_cvalue(fx, res);
|
||||
};
|
||||
"llvm.x86.sse2.cmp.ps" | "llvm.x86.sse2.cmp.pd", (c x, c y, o kind) {
|
||||
let kind = crate::constant::mir_operand_get_const_val(fx, kind).expect("llvm.x86.sse2.cmp.* kind not const");
|
||||
let flt_cc = match kind.try_to_bits(Size::from_bytes(1)).unwrap_or_else(|| panic!("kind not scalar: {:?}", kind)) {
|
||||
}
|
||||
"llvm.x86.sse2.cmp.ps" | "llvm.x86.sse2.cmp.pd" => {
|
||||
let (x, y, kind) = match args {
|
||||
[x, y, kind] => (x, y, kind),
|
||||
_ => bug!("wrong number of args for intrinsic {intrinsic}"),
|
||||
};
|
||||
let x = codegen_operand(fx, x);
|
||||
let y = codegen_operand(fx, y);
|
||||
let kind = crate::constant::mir_operand_get_const_val(fx, kind)
|
||||
.expect("llvm.x86.sse2.cmp.* kind not const");
|
||||
|
||||
let flt_cc = match kind
|
||||
.try_to_bits(Size::from_bytes(1))
|
||||
.unwrap_or_else(|| panic!("kind not scalar: {:?}", kind))
|
||||
{
|
||||
0 => FloatCC::Equal,
|
||||
1 => FloatCC::LessThan,
|
||||
2 => FloatCC::LessThanOrEqual,
|
||||
7 => {
|
||||
unimplemented!("Compares corresponding elements in `a` and `b` to see if neither is `NaN`.");
|
||||
}
|
||||
3 => {
|
||||
unimplemented!("Compares corresponding elements in `a` and `b` to see if either is `NaN`.");
|
||||
}
|
||||
7 => FloatCC::Ordered,
|
||||
3 => FloatCC::Unordered,
|
||||
4 => FloatCC::NotEqual,
|
||||
5 => {
|
||||
unimplemented!("not less than");
|
||||
}
|
||||
6 => {
|
||||
unimplemented!("not less than or equal");
|
||||
}
|
||||
5 => FloatCC::UnorderedOrGreaterThanOrEqual,
|
||||
6 => FloatCC::UnorderedOrGreaterThan,
|
||||
kind => unreachable!("kind {:?}", kind),
|
||||
};
|
||||
|
||||
@@ -79,50 +79,67 @@ pub(crate) fn codegen_llvm_intrinsic_call<'tcx>(
|
||||
};
|
||||
bool_to_zero_or_max_uint(fx, res_lane_ty, res_lane)
|
||||
});
|
||||
};
|
||||
"llvm.x86.sse2.psrli.d", (c a, o imm8) {
|
||||
let imm8 = crate::constant::mir_operand_get_const_val(fx, imm8).expect("llvm.x86.sse2.psrli.d imm8 not const");
|
||||
simd_for_each_lane(fx, a, ret, &|fx, _lane_ty, _res_lane_ty, lane| {
|
||||
match imm8.try_to_bits(Size::from_bytes(4)).unwrap_or_else(|| panic!("imm8 not scalar: {:?}", imm8)) {
|
||||
imm8 if imm8 < 32 => fx.bcx.ins().ushr_imm(lane, i64::from(imm8 as u8)),
|
||||
_ => fx.bcx.ins().iconst(types::I32, 0),
|
||||
}
|
||||
}
|
||||
"llvm.x86.sse2.psrli.d" => {
|
||||
let (a, imm8) = match args {
|
||||
[a, imm8] => (a, imm8),
|
||||
_ => bug!("wrong number of args for intrinsic {intrinsic}"),
|
||||
};
|
||||
let a = codegen_operand(fx, a);
|
||||
let imm8 = crate::constant::mir_operand_get_const_val(fx, imm8)
|
||||
.expect("llvm.x86.sse2.psrli.d imm8 not const");
|
||||
|
||||
simd_for_each_lane(fx, a, ret, &|fx, _lane_ty, _res_lane_ty, lane| match imm8
|
||||
.try_to_bits(Size::from_bytes(4))
|
||||
.unwrap_or_else(|| panic!("imm8 not scalar: {:?}", imm8))
|
||||
{
|
||||
imm8 if imm8 < 32 => fx.bcx.ins().ushr_imm(lane, i64::from(imm8 as u8)),
|
||||
_ => fx.bcx.ins().iconst(types::I32, 0),
|
||||
});
|
||||
};
|
||||
"llvm.x86.sse2.pslli.d", (c a, o imm8) {
|
||||
let imm8 = crate::constant::mir_operand_get_const_val(fx, imm8).expect("llvm.x86.sse2.psrli.d imm8 not const");
|
||||
simd_for_each_lane(fx, a, ret, &|fx, _lane_ty, _res_lane_ty, lane| {
|
||||
match imm8.try_to_bits(Size::from_bytes(4)).unwrap_or_else(|| panic!("imm8 not scalar: {:?}", imm8)) {
|
||||
imm8 if imm8 < 32 => fx.bcx.ins().ishl_imm(lane, i64::from(imm8 as u8)),
|
||||
_ => fx.bcx.ins().iconst(types::I32, 0),
|
||||
}
|
||||
}
|
||||
"llvm.x86.sse2.pslli.d" => {
|
||||
let (a, imm8) = match args {
|
||||
[a, imm8] => (a, imm8),
|
||||
_ => bug!("wrong number of args for intrinsic {intrinsic}"),
|
||||
};
|
||||
let a = codegen_operand(fx, a);
|
||||
let imm8 = crate::constant::mir_operand_get_const_val(fx, imm8)
|
||||
.expect("llvm.x86.sse2.psrli.d imm8 not const");
|
||||
|
||||
simd_for_each_lane(fx, a, ret, &|fx, _lane_ty, _res_lane_ty, lane| match imm8
|
||||
.try_to_bits(Size::from_bytes(4))
|
||||
.unwrap_or_else(|| panic!("imm8 not scalar: {:?}", imm8))
|
||||
{
|
||||
imm8 if imm8 < 32 => fx.bcx.ins().ishl_imm(lane, i64::from(imm8 as u8)),
|
||||
_ => fx.bcx.ins().iconst(types::I32, 0),
|
||||
});
|
||||
};
|
||||
"llvm.x86.sse2.storeu.dq", (v mem_addr, c a) {
|
||||
}
|
||||
"llvm.x86.sse2.storeu.dq" => {
|
||||
intrinsic_args!(fx, args => (mem_addr, a); intrinsic);
|
||||
let mem_addr = mem_addr.load_scalar(fx);
|
||||
|
||||
// FIXME correctly handle the unalignment
|
||||
let dest = CPlace::for_ptr(Pointer::new(mem_addr), a.layout());
|
||||
dest.write_cvalue(fx, a);
|
||||
};
|
||||
"llvm.x86.addcarry.64", (v c_in, c a, c b) {
|
||||
llvm_add_sub(
|
||||
fx,
|
||||
BinOp::Add,
|
||||
ret,
|
||||
c_in,
|
||||
a,
|
||||
b
|
||||
);
|
||||
};
|
||||
"llvm.x86.subborrow.64", (v b_in, c a, c b) {
|
||||
llvm_add_sub(
|
||||
fx,
|
||||
BinOp::Sub,
|
||||
ret,
|
||||
b_in,
|
||||
a,
|
||||
b
|
||||
);
|
||||
};
|
||||
}
|
||||
"llvm.x86.addcarry.64" => {
|
||||
intrinsic_args!(fx, args => (c_in, a, b); intrinsic);
|
||||
let c_in = c_in.load_scalar(fx);
|
||||
|
||||
llvm_add_sub(fx, BinOp::Add, ret, c_in, a, b);
|
||||
}
|
||||
"llvm.x86.subborrow.64" => {
|
||||
intrinsic_args!(fx, args => (b_in, a, b); intrinsic);
|
||||
let b_in = b_in.load_scalar(fx);
|
||||
|
||||
llvm_add_sub(fx, BinOp::Sub, ret, b_in, a, b);
|
||||
}
|
||||
_ => {
|
||||
fx.tcx
|
||||
.sess
|
||||
.warn(&format!("unsupported llvm intrinsic {}; replacing with trap", intrinsic));
|
||||
crate::trap::trap_unimplemented(fx, intrinsic);
|
||||
}
|
||||
}
|
||||
|
||||
let dest = target.expect("all llvm intrinsics used by stdlib should return");
|
||||
|
||||
Reference in New Issue
Block a user