Merge commit 'db1a31c243a649e1fe20f5466ba181da5be35c14' into subtree-update_cg_gcc_2025-04-18

This commit is contained in:
Guillaume Gomez
2025-04-18 21:19:50 +02:00
52 changed files with 959 additions and 1241 deletions

View File

@@ -9,7 +9,8 @@ use rustc_middle::ty::Ty;
use rustc_middle::ty::layout::LayoutOf;
#[cfg(feature = "master")]
use rustc_session::config;
use rustc_target::callconv::{ArgAttributes, CastTarget, FnAbi, PassMode};
#[cfg(feature = "master")]
use rustc_target::callconv::{ArgAttributes, CastTarget, Conv, FnAbi, PassMode};
use crate::builder::Builder;
use crate::context::CodegenCx;
@@ -105,6 +106,8 @@ pub trait FnAbiGccExt<'gcc, 'tcx> {
// TODO(antoyo): return a function pointer type instead?
fn gcc_type(&self, cx: &CodegenCx<'gcc, 'tcx>) -> FnAbiGcc<'gcc>;
fn ptr_to_gcc_type(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc>;
#[cfg(feature = "master")]
fn gcc_cconv(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Option<FnAttribute<'gcc>>;
}
impl<'gcc, 'tcx> FnAbiGccExt<'gcc, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
@@ -227,4 +230,47 @@ impl<'gcc, 'tcx> FnAbiGccExt<'gcc, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
);
pointer_type
}
#[cfg(feature = "master")]
fn gcc_cconv(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Option<FnAttribute<'gcc>> {
conv_to_fn_attribute(self.conv, &cx.tcx.sess.target.arch)
}
}
#[cfg(feature = "master")]
pub fn conv_to_fn_attribute<'gcc>(conv: Conv, arch: &str) -> Option<FnAttribute<'gcc>> {
// TODO: handle the calling conventions returning None.
let attribute = match conv {
Conv::C
| Conv::Rust
| Conv::CCmseNonSecureCall
| Conv::CCmseNonSecureEntry
| Conv::RiscvInterrupt { .. } => return None,
Conv::Cold => return None,
Conv::PreserveMost => return None,
Conv::PreserveAll => return None,
Conv::GpuKernel => {
// TODO(antoyo): remove clippy allow attribute when this is implemented.
#[allow(clippy::if_same_then_else)]
if arch == "amdgpu" {
return None;
} else if arch == "nvptx64" {
return None;
} else {
panic!("Architecture {} does not support GpuKernel calling convention", arch);
}
}
Conv::AvrInterrupt => return None,
Conv::AvrNonBlockingInterrupt => return None,
Conv::ArmAapcs => return None,
Conv::Msp430Intr => return None,
Conv::X86Fastcall => return None,
Conv::X86Intr => return None,
Conv::X86Stdcall => return None,
Conv::X86ThisCall => return None,
Conv::X86VectorCall => return None,
Conv::X86_64SysV => FnAttribute::SysvAbi,
Conv::X86_64Win64 => FnAttribute::MsAbi,
};
Some(attribute)
}

View File

@@ -36,7 +36,8 @@ use crate::type_of::LayoutGccExt;
//
// 3. Clobbers. GCC has a separate list of clobbers, and clobbers don't have indexes.
// Contrary, Rust expresses clobbers through "out" operands that aren't tied to
// a variable (`_`), and such "clobbers" do have index.
// a variable (`_`), and such "clobbers" do have index. Input operands cannot also
// be clobbered.
//
// 4. Furthermore, GCC Extended Asm does not support explicit register constraints
// (like `out("eax")`) directly, offering so-called "local register variables"
@@ -161,6 +162,16 @@ impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
// Also, we don't emit any asm operands immediately; we save them to
// the one of the buffers to be emitted later.
let mut input_registers = vec![];
for op in rust_operands {
if let InlineAsmOperandRef::In { reg, .. } = *op {
if let ConstraintOrRegister::Register(reg_name) = reg_to_gcc(reg) {
input_registers.push(reg_name);
}
}
}
// 1. Normal variables (and saving operands to buffers).
for (rust_idx, op) in rust_operands.iter().enumerate() {
match *op {
@@ -183,25 +194,39 @@ impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
continue;
}
(Register(reg_name), None) => {
// `clobber_abi` can add lots of clobbers that are not supported by the target,
// such as AVX-512 registers, so we just ignore unsupported registers
let is_target_supported =
reg.reg_class().supported_types(asm_arch, true).iter().any(
|&(_, feature)| {
if let Some(feature) = feature {
self.tcx
.asm_target_features(instance.def_id())
.contains(&feature)
} else {
true // Register class is unconditionally supported
}
},
);
if input_registers.contains(&reg_name) {
// the `clobber_abi` operand is converted into a series of
// `lateout("reg") _` operands. Of course, a user could also
// explicitly define such an output operand.
//
// GCC does not allow input registers to be clobbered, so if this out register
// is also used as an in register, do not add it to the clobbers list.
// it will be treated as a lateout register with `out_place: None`
if !late {
bug!("input registers can only be used as lateout regisers");
}
("r", dummy_output_type(self.cx, reg.reg_class()))
} else {
// `clobber_abi` can add lots of clobbers that are not supported by the target,
// such as AVX-512 registers, so we just ignore unsupported registers
let is_target_supported =
reg.reg_class().supported_types(asm_arch, true).iter().any(
|&(_, feature)| {
if let Some(feature) = feature {
self.tcx
.asm_target_features(instance.def_id())
.contains(&feature)
} else {
true // Register class is unconditionally supported
}
},
);
if is_target_supported && !clobbers.contains(&reg_name) {
clobbers.push(reg_name);
if is_target_supported && !clobbers.contains(&reg_name) {
clobbers.push(reg_name);
}
continue;
}
continue;
}
};
@@ -230,13 +255,10 @@ impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
}
InlineAsmOperandRef::InOut { reg, late, in_value, out_place } => {
let constraint =
if let ConstraintOrRegister::Constraint(constraint) = reg_to_gcc(reg) {
constraint
} else {
// left for the next pass
continue;
};
let ConstraintOrRegister::Constraint(constraint) = reg_to_gcc(reg) else {
// left for the next pass
continue;
};
// Rustc frontend guarantees that input and output types are "compatible",
// so we can just use input var's type for the output variable.
@@ -589,114 +611,127 @@ fn estimate_template_length(
}
/// Converts a register class to a GCC constraint code.
fn reg_to_gcc(reg: InlineAsmRegOrRegClass) -> ConstraintOrRegister {
let constraint = match reg {
// For vector registers LLVM wants the register name to match the type size.
fn reg_to_gcc(reg_or_reg_class: InlineAsmRegOrRegClass) -> ConstraintOrRegister {
match reg_or_reg_class {
InlineAsmRegOrRegClass::Reg(reg) => {
match reg {
InlineAsmReg::X86(_) => {
// TODO(antoyo): add support for vector register.
//
// // For explicit registers, we have to create a register variable: https://stackoverflow.com/a/31774784/389119
return ConstraintOrRegister::Register(match reg.name() {
// Some of registers' names does not map 1-1 from rust to gcc
"st(0)" => "st",
ConstraintOrRegister::Register(explicit_reg_to_gcc(reg))
}
InlineAsmRegOrRegClass::RegClass(reg_class) => {
ConstraintOrRegister::Constraint(reg_class_to_gcc(reg_class))
}
}
}
name => name,
});
fn explicit_reg_to_gcc(reg: InlineAsmReg) -> &'static str {
// For explicit registers, we have to create a register variable: https://stackoverflow.com/a/31774784/389119
match reg {
InlineAsmReg::X86(reg) => {
// TODO(antoyo): add support for vector register.
match reg.reg_class() {
X86InlineAsmRegClass::reg_byte => {
// GCC does not support the `b` suffix, so we just strip it
// see https://github.com/rust-lang/rustc_codegen_gcc/issues/485
reg.name().trim_end_matches('b')
}
_ => match reg.name() {
// Some of registers' names does not map 1-1 from rust to gcc
"st(0)" => "st",
_ => unimplemented!(),
name => name,
},
}
}
// They can be retrieved from https://gcc.gnu.org/onlinedocs/gcc/Machine-Constraints.html
InlineAsmRegOrRegClass::RegClass(reg) => match reg {
InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::reg) => "r",
InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg) => "w",
InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16) => "x",
InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::preg) => {
unreachable!("clobber-only")
}
InlineAsmRegClass::Arm(ArmInlineAsmRegClass::reg) => "r",
InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg)
| InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low16)
| InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low8)
| InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg_low16)
| InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low8)
| InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low4)
| InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg)
| InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg) => "t",
InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg) => "r",
InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_upper) => "d",
InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_pair) => "r",
InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_iw) => "w",
InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_ptr) => "e",
InlineAsmRegClass::Bpf(BpfInlineAsmRegClass::reg) => "r",
InlineAsmRegClass::Bpf(BpfInlineAsmRegClass::wreg) => "w",
InlineAsmRegClass::Hexagon(HexagonInlineAsmRegClass::reg) => "r",
InlineAsmRegClass::Hexagon(HexagonInlineAsmRegClass::preg) => {
unreachable!("clobber-only")
}
InlineAsmRegClass::LoongArch(LoongArchInlineAsmRegClass::reg) => "r",
InlineAsmRegClass::LoongArch(LoongArchInlineAsmRegClass::freg) => "f",
InlineAsmRegClass::M68k(M68kInlineAsmRegClass::reg) => "r",
InlineAsmRegClass::M68k(M68kInlineAsmRegClass::reg_addr) => "a",
InlineAsmRegClass::M68k(M68kInlineAsmRegClass::reg_data) => "d",
InlineAsmRegClass::CSKY(CSKYInlineAsmRegClass::reg) => "r",
InlineAsmRegClass::CSKY(CSKYInlineAsmRegClass::freg) => "f",
InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg) => "d", // more specific than "r"
InlineAsmRegClass::Mips(MipsInlineAsmRegClass::freg) => "f",
InlineAsmRegClass::Msp430(Msp430InlineAsmRegClass::reg) => "r",
// https://github.com/gcc-mirror/gcc/blob/master/gcc/config/nvptx/nvptx.md -> look for
// "define_constraint".
InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg16) => "h",
InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg32) => "r",
InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg64) => "l",
InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::reg) => "r",
InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::reg_nonzero) => "b",
InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::freg) => "f",
InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::vreg) => "v",
InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::cr)
| InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::xer) => {
unreachable!("clobber-only")
}
InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::reg) => "r",
InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::freg) => "f",
InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::vreg) => {
unreachable!("clobber-only")
}
InlineAsmRegClass::X86(X86InlineAsmRegClass::reg) => "r",
InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd) => "Q",
InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_byte) => "q",
InlineAsmRegClass::X86(X86InlineAsmRegClass::xmm_reg)
| InlineAsmRegClass::X86(X86InlineAsmRegClass::ymm_reg) => "x",
InlineAsmRegClass::X86(X86InlineAsmRegClass::zmm_reg) => "v",
InlineAsmRegClass::X86(X86InlineAsmRegClass::kreg) => "Yk",
InlineAsmRegClass::X86(
X86InlineAsmRegClass::kreg0
| X86InlineAsmRegClass::x87_reg
| X86InlineAsmRegClass::mmx_reg
| X86InlineAsmRegClass::tmm_reg,
) => unreachable!("clobber-only"),
InlineAsmRegClass::SpirV(SpirVInlineAsmRegClass::reg) => {
bug!("GCC backend does not support SPIR-V")
}
InlineAsmRegClass::Wasm(WasmInlineAsmRegClass::local) => "r",
InlineAsmRegClass::S390x(S390xInlineAsmRegClass::reg) => "r",
InlineAsmRegClass::S390x(S390xInlineAsmRegClass::reg_addr) => "a",
InlineAsmRegClass::S390x(S390xInlineAsmRegClass::freg) => "f",
InlineAsmRegClass::S390x(S390xInlineAsmRegClass::vreg) => "v",
InlineAsmRegClass::S390x(S390xInlineAsmRegClass::areg) => {
unreachable!("clobber-only")
}
InlineAsmRegClass::Sparc(SparcInlineAsmRegClass::reg) => "r",
InlineAsmRegClass::Sparc(SparcInlineAsmRegClass::yreg) => unreachable!("clobber-only"),
InlineAsmRegClass::Err => unreachable!(),
},
};
_ => unimplemented!(),
}
}
ConstraintOrRegister::Constraint(constraint)
/// They can be retrieved from https://gcc.gnu.org/onlinedocs/gcc/Machine-Constraints.html
fn reg_class_to_gcc(reg_class: InlineAsmRegClass) -> &'static str {
match reg_class {
InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::reg) => "r",
InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg) => "w",
InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16) => "x",
InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::preg) => {
unreachable!("clobber-only")
}
InlineAsmRegClass::Arm(ArmInlineAsmRegClass::reg) => "r",
InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg)
| InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low16)
| InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low8)
| InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg_low16)
| InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low8)
| InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low4)
| InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg)
| InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg) => "t",
InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg) => "r",
InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_upper) => "d",
InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_pair) => "r",
InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_iw) => "w",
InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_ptr) => "e",
InlineAsmRegClass::Bpf(BpfInlineAsmRegClass::reg) => "r",
InlineAsmRegClass::Bpf(BpfInlineAsmRegClass::wreg) => "w",
InlineAsmRegClass::Hexagon(HexagonInlineAsmRegClass::reg) => "r",
InlineAsmRegClass::Hexagon(HexagonInlineAsmRegClass::preg) => {
unreachable!("clobber-only")
}
InlineAsmRegClass::LoongArch(LoongArchInlineAsmRegClass::reg) => "r",
InlineAsmRegClass::LoongArch(LoongArchInlineAsmRegClass::freg) => "f",
InlineAsmRegClass::M68k(M68kInlineAsmRegClass::reg) => "r",
InlineAsmRegClass::M68k(M68kInlineAsmRegClass::reg_addr) => "a",
InlineAsmRegClass::M68k(M68kInlineAsmRegClass::reg_data) => "d",
InlineAsmRegClass::CSKY(CSKYInlineAsmRegClass::reg) => "r",
InlineAsmRegClass::CSKY(CSKYInlineAsmRegClass::freg) => "f",
InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg) => "d", // more specific than "r"
InlineAsmRegClass::Mips(MipsInlineAsmRegClass::freg) => "f",
InlineAsmRegClass::Msp430(Msp430InlineAsmRegClass::reg) => "r",
// https://github.com/gcc-mirror/gcc/blob/master/gcc/config/nvptx/nvptx.md -> look for
// "define_constraint".
InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg16) => "h",
InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg32) => "r",
InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg64) => "l",
InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::reg) => "r",
InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::reg_nonzero) => "b",
InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::freg) => "f",
InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::vreg) => "v",
InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::cr)
| InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::xer) => {
unreachable!("clobber-only")
}
InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::reg) => "r",
InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::freg) => "f",
InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::vreg) => {
unreachable!("clobber-only")
}
InlineAsmRegClass::X86(X86InlineAsmRegClass::reg) => "r",
InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd) => "Q",
InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_byte) => "q",
InlineAsmRegClass::X86(X86InlineAsmRegClass::xmm_reg)
| InlineAsmRegClass::X86(X86InlineAsmRegClass::ymm_reg) => "x",
InlineAsmRegClass::X86(X86InlineAsmRegClass::zmm_reg) => "v",
InlineAsmRegClass::X86(X86InlineAsmRegClass::kreg) => "Yk",
InlineAsmRegClass::X86(
X86InlineAsmRegClass::kreg0
| X86InlineAsmRegClass::x87_reg
| X86InlineAsmRegClass::mmx_reg
| X86InlineAsmRegClass::tmm_reg,
) => unreachable!("clobber-only"),
InlineAsmRegClass::SpirV(SpirVInlineAsmRegClass::reg) => {
bug!("GCC backend does not support SPIR-V")
}
InlineAsmRegClass::Wasm(WasmInlineAsmRegClass::local) => "r",
InlineAsmRegClass::S390x(S390xInlineAsmRegClass::reg) => "r",
InlineAsmRegClass::S390x(S390xInlineAsmRegClass::reg_addr) => "a",
InlineAsmRegClass::S390x(S390xInlineAsmRegClass::freg) => "f",
InlineAsmRegClass::S390x(S390xInlineAsmRegClass::vreg) => "v",
InlineAsmRegClass::S390x(S390xInlineAsmRegClass::areg) => {
unreachable!("clobber-only")
}
InlineAsmRegClass::Sparc(SparcInlineAsmRegClass::reg) => "r",
InlineAsmRegClass::Sparc(SparcInlineAsmRegClass::yreg) => unreachable!("clobber-only"),
InlineAsmRegClass::Err => unreachable!(),
}
}
/// Type to use for outputs that are discarded. It doesn't really matter what

View File

@@ -368,16 +368,8 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
let previous_arg_count = args.len();
let orig_args = args;
let args = {
let function_address_names = self.function_address_names.borrow();
let original_function_name = function_address_names.get(&func_ptr);
func_ptr = llvm::adjust_function(self.context, &func_name, func_ptr, args);
llvm::adjust_intrinsic_arguments(
self,
gcc_func,
args.into(),
&func_name,
original_function_name,
)
llvm::adjust_intrinsic_arguments(self, gcc_func, args.into(), &func_name)
};
let args_adjusted = args.len() != previous_arg_count;
let args = self.check_ptr_call("call", func_ptr, &args);
@@ -1271,7 +1263,50 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
}
fn fcmp(&mut self, op: RealPredicate, lhs: RValue<'gcc>, rhs: RValue<'gcc>) -> RValue<'gcc> {
self.context.new_comparison(self.location, op.to_gcc_comparison(), lhs, rhs)
// LLVM has a concept of "unordered compares", where eg ULT returns true if either the two
// arguments are unordered (i.e. either is NaN), or the lhs is less than the rhs. GCC does
// not natively have this concept, so in some cases we must manually handle NaNs
let must_handle_nan = match op {
RealPredicate::RealPredicateFalse => unreachable!(),
RealPredicate::RealOEQ => false,
RealPredicate::RealOGT => false,
RealPredicate::RealOGE => false,
RealPredicate::RealOLT => false,
RealPredicate::RealOLE => false,
RealPredicate::RealONE => false,
RealPredicate::RealORD => unreachable!(),
RealPredicate::RealUNO => unreachable!(),
RealPredicate::RealUEQ => false,
RealPredicate::RealUGT => true,
RealPredicate::RealUGE => true,
RealPredicate::RealULT => true,
RealPredicate::RealULE => true,
RealPredicate::RealUNE => false,
RealPredicate::RealPredicateTrue => unreachable!(),
};
let cmp = self.context.new_comparison(self.location, op.to_gcc_comparison(), lhs, rhs);
if must_handle_nan {
let is_nan = self.context.new_binary_op(
self.location,
BinaryOp::LogicalOr,
self.cx.bool_type,
// compare a value to itself to check whether it is NaN
self.context.new_comparison(self.location, ComparisonOp::NotEquals, lhs, lhs),
self.context.new_comparison(self.location, ComparisonOp::NotEquals, rhs, rhs),
);
self.context.new_binary_op(
self.location,
BinaryOp::LogicalOr,
self.cx.bool_type,
is_nan,
cmp,
)
} else {
cmp
}
}
/* Miscellaneous instructions */

View File

@@ -23,6 +23,8 @@ use rustc_target::spec::{
HasTargetSpec, HasWasmCAbiOpt, HasX86AbiOpt, Target, TlsModel, WasmCAbi, X86Abi,
};
#[cfg(feature = "master")]
use crate::abi::conv_to_fn_attribute;
use crate::callee::get_fn;
use crate::common::SignType;
@@ -213,33 +215,7 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
let bool_type = context.new_type::<bool>();
let mut functions = FxHashMap::default();
let builtins = [
"__builtin_unreachable",
"abort",
"__builtin_expect", /*"__builtin_expect_with_probability",*/
"__builtin_constant_p",
"__builtin_add_overflow",
"__builtin_mul_overflow",
"__builtin_saddll_overflow",
/*"__builtin_sadd_overflow",*/
"__builtin_smulll_overflow", /*"__builtin_smul_overflow",*/
"__builtin_ssubll_overflow",
/*"__builtin_ssub_overflow",*/ "__builtin_sub_overflow",
"__builtin_uaddll_overflow",
"__builtin_uadd_overflow",
"__builtin_umulll_overflow",
"__builtin_umul_overflow",
"__builtin_usubll_overflow",
"__builtin_usub_overflow",
"__builtin_powif",
"__builtin_powi",
"fabsf",
"fabs",
"copysignf",
"copysign",
"nearbyintf",
"nearbyint",
];
let builtins = ["abort"];
for builtin in builtins.iter() {
functions.insert(builtin.to_string(), context.get_builtin_function(builtin));
@@ -509,7 +485,11 @@ impl<'gcc, 'tcx> MiscCodegenMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
fn declare_c_main(&self, fn_type: Self::Type) -> Option<Self::Function> {
let entry_name = self.sess().target.entry_name.as_ref();
if !self.functions.borrow().contains_key(entry_name) {
Some(self.declare_entry_fn(entry_name, fn_type, ()))
#[cfg(feature = "master")]
let conv = conv_to_fn_attribute(self.sess().target.entry_abi, &self.sess().target.arch);
#[cfg(not(feature = "master"))]
let conv = None;
Some(self.declare_entry_fn(entry_name, fn_type, conv))
} else {
// If the symbol already exists, it is an error: for example, the user wrote
// #[no_mangle] extern "C" fn main(..) {..}
@@ -605,7 +585,10 @@ impl<'b, 'tcx> CodegenCx<'b, 'tcx> {
let mut name = String::with_capacity(prefix.len() + 6);
name.push_str(prefix);
name.push('.');
name.push_str(&(idx as u64).to_base(ALPHANUMERIC_ONLY));
// Offset the index by the base so that always at least two characters
// are generated. This avoids cases where the suffix is interpreted as
// size by the assembler (for m68k: .b, .w, .l).
name.push_str(&(idx as u64 + ALPHANUMERIC_ONLY as u64).to_base(ALPHANUMERIC_ONLY));
name
}
}

View File

@@ -58,7 +58,7 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
variadic: bool,
) -> Function<'gcc> {
self.linkage.set(FunctionType::Extern);
declare_raw_fn(self, name, () /*llvm::CCallConv*/, return_type, params, variadic)
declare_raw_fn(self, name, None, return_type, params, variadic)
}
pub fn declare_global(
@@ -92,7 +92,8 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
&self,
name: &str,
_fn_type: Type<'gcc>,
callconv: (), /*llvm::CCallConv*/
#[cfg(feature = "master")] callconv: Option<FnAttribute<'gcc>>,
#[cfg(not(feature = "master"))] callconv: Option<()>,
) -> RValue<'gcc> {
// TODO(antoyo): use the fn_type parameter.
let const_string = self.context.new_type::<u8>().make_pointer().make_pointer();
@@ -123,14 +124,11 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
#[cfg(feature = "master")]
fn_attributes,
} = fn_abi.gcc_type(self);
let func = declare_raw_fn(
self,
name,
(), /*fn_abi.llvm_cconv()*/
return_type,
&arguments_type,
is_c_variadic,
);
#[cfg(feature = "master")]
let conv = fn_abi.gcc_cconv(self);
#[cfg(not(feature = "master"))]
let conv = None;
let func = declare_raw_fn(self, name, conv, return_type, &arguments_type, is_c_variadic);
self.on_stack_function_params.borrow_mut().insert(func, on_stack_param_indices);
#[cfg(feature = "master")]
for fn_attr in fn_attributes {
@@ -162,7 +160,8 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
fn declare_raw_fn<'gcc>(
cx: &CodegenCx<'gcc, '_>,
name: &str,
_callconv: (), /*llvm::CallConv*/
#[cfg(feature = "master")] callconv: Option<FnAttribute<'gcc>>,
#[cfg(not(feature = "master"))] _callconv: Option<()>,
return_type: Type<'gcc>,
param_types: &[Type<'gcc>],
variadic: bool,
@@ -192,6 +191,10 @@ fn declare_raw_fn<'gcc>(
let name = &mangle_name(name);
let func =
cx.context.new_function(None, cx.linkage.get(), return_type, &params, name, variadic);
#[cfg(feature = "master")]
if let Some(attribute) = callconv {
func.add_attribute(attribute);
}
cx.functions.borrow_mut().insert(name.to_string(), func);
#[cfg(feature = "master")]

View File

@@ -194,6 +194,7 @@ pub fn to_gcc_features<'a>(sess: &Session, s: &'a str) -> SmallVec<[&'a str; 2]>
fn arch_to_gcc(name: &str) -> &str {
match name {
"M68000" => "68000",
"M68020" => "68020",
_ => name,
}

View File

@@ -432,8 +432,17 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
);
self.context.new_call(self.location, func, &[lhs, rhs, overflow_addr])
};
// NOTE: we must assign the result of the operation to a variable at this point to make
// sure it will be evaluated by libgccjit now.
// Otherwise, it will only be evaluated when the rvalue for the call is used somewhere else
// and overflow_value will not be initialized at the correct point in the program.
let result = self.current_func().new_local(self.location, res_type, "result");
self.block.add_assignment(self.location, result, call);
(result, self.context.new_cast(self.location, overflow_value, self.bool_type).to_rvalue())
(
result.to_rvalue(),
self.context.new_cast(self.location, overflow_value, self.bool_type).to_rvalue(),
)
}
pub fn gcc_icmp(
@@ -865,6 +874,7 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
let value_type = value.get_type();
if self.is_native_int_type_or_bool(dest_typ) && self.is_native_int_type_or_bool(value_type)
{
// TODO: use self.location.
self.context.new_cast(None, value, dest_typ)
} else if self.is_native_int_type_or_bool(dest_typ) {
self.context.new_cast(None, self.low(value), dest_typ)
@@ -905,6 +915,7 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
let name_suffix = match self.type_kind(dest_typ) {
TypeKind::Float => "tisf",
TypeKind::Double => "tidf",
TypeKind::FP128 => "tixf",
kind => panic!("cannot cast a non-native integer to type {:?}", kind),
};
let sign = if signed { "" } else { "un" };

View File

@@ -1,11 +1,90 @@
use std::borrow::Cow;
use gccjit::{CType, Context, Function, FunctionPtrType, RValue, ToRValue, UnaryOp};
use gccjit::{CType, Context, Field, Function, FunctionPtrType, RValue, ToRValue, Type};
use rustc_codegen_ssa::traits::BuilderMethods;
use crate::builder::Builder;
use crate::context::CodegenCx;
fn encode_key_128_type<'a, 'gcc, 'tcx>(
builder: &Builder<'a, 'gcc, 'tcx>,
) -> (Type<'gcc>, Field<'gcc>, Field<'gcc>) {
let m128i = builder.context.new_vector_type(builder.i64_type, 2);
let field1 = builder.context.new_field(None, builder.u32_type, "field1");
let field2 = builder.context.new_field(None, m128i, "field2");
let field3 = builder.context.new_field(None, m128i, "field3");
let field4 = builder.context.new_field(None, m128i, "field4");
let field5 = builder.context.new_field(None, m128i, "field5");
let field6 = builder.context.new_field(None, m128i, "field6");
let field7 = builder.context.new_field(None, m128i, "field7");
let encode_type = builder.context.new_struct_type(
None,
"EncodeKey128Output",
&[field1, field2, field3, field4, field5, field6, field7],
);
#[cfg(feature = "master")]
encode_type.as_type().set_packed();
(encode_type.as_type(), field1, field2)
}
fn encode_key_256_type<'a, 'gcc, 'tcx>(
builder: &Builder<'a, 'gcc, 'tcx>,
) -> (Type<'gcc>, Field<'gcc>, Field<'gcc>) {
let m128i = builder.context.new_vector_type(builder.i64_type, 2);
let field1 = builder.context.new_field(None, builder.u32_type, "field1");
let field2 = builder.context.new_field(None, m128i, "field2");
let field3 = builder.context.new_field(None, m128i, "field3");
let field4 = builder.context.new_field(None, m128i, "field4");
let field5 = builder.context.new_field(None, m128i, "field5");
let field6 = builder.context.new_field(None, m128i, "field6");
let field7 = builder.context.new_field(None, m128i, "field7");
let field8 = builder.context.new_field(None, m128i, "field8");
let encode_type = builder.context.new_struct_type(
None,
"EncodeKey256Output",
&[field1, field2, field3, field4, field5, field6, field7, field8],
);
#[cfg(feature = "master")]
encode_type.as_type().set_packed();
(encode_type.as_type(), field1, field2)
}
fn aes_output_type<'a, 'gcc, 'tcx>(
builder: &Builder<'a, 'gcc, 'tcx>,
) -> (Type<'gcc>, Field<'gcc>, Field<'gcc>) {
let m128i = builder.context.new_vector_type(builder.i64_type, 2);
let field1 = builder.context.new_field(None, builder.u8_type, "field1");
let field2 = builder.context.new_field(None, m128i, "field2");
let aes_output_type = builder.context.new_struct_type(None, "AesOutput", &[field1, field2]);
let typ = aes_output_type.as_type();
#[cfg(feature = "master")]
typ.set_packed();
(typ, field1, field2)
}
fn wide_aes_output_type<'a, 'gcc, 'tcx>(
builder: &Builder<'a, 'gcc, 'tcx>,
) -> (Type<'gcc>, Field<'gcc>, Field<'gcc>) {
let m128i = builder.context.new_vector_type(builder.i64_type, 2);
let field1 = builder.context.new_field(None, builder.u8_type, "field1");
let field2 = builder.context.new_field(None, m128i, "field2");
let field3 = builder.context.new_field(None, m128i, "field3");
let field4 = builder.context.new_field(None, m128i, "field4");
let field5 = builder.context.new_field(None, m128i, "field5");
let field6 = builder.context.new_field(None, m128i, "field6");
let field7 = builder.context.new_field(None, m128i, "field7");
let field8 = builder.context.new_field(None, m128i, "field8");
let field9 = builder.context.new_field(None, m128i, "field9");
let aes_output_type = builder.context.new_struct_type(
None,
"WideAesOutput",
&[field1, field2, field3, field4, field5, field6, field7, field8, field9],
);
#[cfg(feature = "master")]
aes_output_type.as_type().set_packed();
(aes_output_type.as_type(), field1, field2)
}
#[cfg_attr(not(feature = "master"), allow(unused_variables))]
pub fn adjust_function<'gcc>(
context: &'gcc Context<'gcc>,
@@ -43,7 +122,6 @@ pub fn adjust_intrinsic_arguments<'a, 'b, 'gcc, 'tcx>(
gcc_func: FunctionPtrType<'gcc>,
mut args: Cow<'b, [RValue<'gcc>]>,
func_name: &str,
original_function_name: Option<&String>,
) -> Cow<'b, [RValue<'gcc>]> {
// TODO: this might not be a good way to workaround the missing tile builtins.
if func_name == "__builtin_trap" {
@@ -504,6 +582,72 @@ pub fn adjust_intrinsic_arguments<'a, 'b, 'gcc, 'tcx>(
let arg4 = builder.context.new_rvalue_from_int(arg4_type, -1);
args = vec![a, b, c, arg4, new_args[3]].into();
}
"__builtin_ia32_encodekey128_u32" => {
let mut new_args = args.to_vec();
let m128i = builder.context.new_vector_type(builder.i64_type, 2);
let array_type = builder.context.new_array_type(None, m128i, 6);
let result = builder.current_func().new_local(None, array_type, "result");
new_args.push(result.get_address(None));
args = new_args.into();
}
"__builtin_ia32_encodekey256_u32" => {
let mut new_args = args.to_vec();
let m128i = builder.context.new_vector_type(builder.i64_type, 2);
let array_type = builder.context.new_array_type(None, m128i, 7);
let result = builder.current_func().new_local(None, array_type, "result");
new_args.push(result.get_address(None));
args = new_args.into();
}
"__builtin_ia32_aesenc128kl_u8"
| "__builtin_ia32_aesdec128kl_u8"
| "__builtin_ia32_aesenc256kl_u8"
| "__builtin_ia32_aesdec256kl_u8" => {
let mut new_args = vec![];
let m128i = builder.context.new_vector_type(builder.i64_type, 2);
let result = builder.current_func().new_local(None, m128i, "result");
new_args.push(result.get_address(None));
new_args.extend(args.to_vec());
args = new_args.into();
}
"__builtin_ia32_aesencwide128kl_u8"
| "__builtin_ia32_aesdecwide128kl_u8"
| "__builtin_ia32_aesencwide256kl_u8"
| "__builtin_ia32_aesdecwide256kl_u8" => {
let mut new_args = vec![];
let mut old_args = args.to_vec();
let handle = old_args.swap_remove(0); // Called __P in GCC.
let first_value = old_args.swap_remove(0);
let element_type = first_value.get_type();
let array_type = builder.context.new_array_type(None, element_type, 8);
let result = builder.current_func().new_local(None, array_type, "result");
new_args.push(result.get_address(None));
let array = builder.current_func().new_local(None, array_type, "array");
let input = builder.context.new_array_constructor(
None,
array_type,
&[
first_value,
old_args.swap_remove(0),
old_args.swap_remove(0),
old_args.swap_remove(0),
old_args.swap_remove(0),
old_args.swap_remove(0),
old_args.swap_remove(0),
old_args.swap_remove(0),
],
);
builder.llbb().add_assignment(None, array, input);
let input_ptr = array.get_address(None);
let arg2_type = gcc_func.get_param_type(1);
let input_ptr = builder.context.new_cast(None, input_ptr, arg2_type);
new_args.push(input_ptr);
new_args.push(handle);
args = new_args.into();
}
_ => (),
}
} else {
@@ -541,33 +685,6 @@ pub fn adjust_intrinsic_arguments<'a, 'b, 'gcc, 'tcx>(
let c = builder.context.new_rvalue_from_vector(None, arg3_type, &[new_args[2]; 2]);
args = vec![a, b, c, new_args[3]].into();
}
"__builtin_ia32_vfmaddsubpd256"
| "__builtin_ia32_vfmaddsubps"
| "__builtin_ia32_vfmaddsubps256"
| "__builtin_ia32_vfmaddsubpd" => {
if let Some(original_function_name) = original_function_name {
match &**original_function_name {
"llvm.x86.fma.vfmsubadd.pd.256"
| "llvm.x86.fma.vfmsubadd.ps"
| "llvm.x86.fma.vfmsubadd.ps.256"
| "llvm.x86.fma.vfmsubadd.pd" => {
// NOTE: since both llvm.x86.fma.vfmsubadd.ps and llvm.x86.fma.vfmaddsub.ps maps to
// __builtin_ia32_vfmaddsubps, only add minus if this comes from a
// subadd LLVM intrinsic, e.g. _mm256_fmsubadd_pd.
let mut new_args = args.to_vec();
let arg3 = &mut new_args[2];
*arg3 = builder.context.new_unary_op(
None,
UnaryOp::Minus,
arg3.get_type(),
*arg3,
);
args = new_args.into();
}
_ => (),
}
}
}
"__builtin_ia32_ldmxcsr" => {
// The builtin __builtin_ia32_ldmxcsr takes an integer value while llvm.x86.sse.ldmxcsr takes a pointer,
// so dereference the pointer.
@@ -728,6 +845,96 @@ pub fn adjust_intrinsic_return_value<'a, 'gcc, 'tcx>(
let f16_type = builder.context.new_c_type(CType::Float16);
return_value = builder.context.new_cast(None, return_value, f16_type);
}
"__builtin_ia32_encodekey128_u32" => {
// The builtin __builtin_ia32_encodekey128_u32 writes the result in its pointer argument while
// llvm.x86.encodekey128 returns a value.
// We added a result pointer argument and now need to assign its value to the return_value expected by
// the LLVM intrinsic.
let (encode_type, field1, field2) = encode_key_128_type(builder);
let result = builder.current_func().new_local(None, encode_type, "result");
let field1 = result.access_field(None, field1);
builder.llbb().add_assignment(None, field1, return_value);
let field2 = result.access_field(None, field2);
let field2_type = field2.to_rvalue().get_type();
let array_type = builder.context.new_array_type(None, field2_type, 6);
let ptr = builder.context.new_cast(None, args[2], array_type.make_pointer());
let field2_ptr =
builder.context.new_cast(None, field2.get_address(None), array_type.make_pointer());
builder.llbb().add_assignment(
None,
field2_ptr.dereference(None),
ptr.dereference(None),
);
return_value = result.to_rvalue();
}
"__builtin_ia32_encodekey256_u32" => {
// The builtin __builtin_ia32_encodekey256_u32 writes the result in its pointer argument while
// llvm.x86.encodekey256 returns a value.
// We added a result pointer argument and now need to assign its value to the return_value expected by
// the LLVM intrinsic.
let (encode_type, field1, field2) = encode_key_256_type(builder);
let result = builder.current_func().new_local(None, encode_type, "result");
let field1 = result.access_field(None, field1);
builder.llbb().add_assignment(None, field1, return_value);
let field2 = result.access_field(None, field2);
let field2_type = field2.to_rvalue().get_type();
let array_type = builder.context.new_array_type(None, field2_type, 7);
let ptr = builder.context.new_cast(None, args[3], array_type.make_pointer());
let field2_ptr =
builder.context.new_cast(None, field2.get_address(None), array_type.make_pointer());
builder.llbb().add_assignment(
None,
field2_ptr.dereference(None),
ptr.dereference(None),
);
return_value = result.to_rvalue();
}
"__builtin_ia32_aesdec128kl_u8"
| "__builtin_ia32_aesenc128kl_u8"
| "__builtin_ia32_aesdec256kl_u8"
| "__builtin_ia32_aesenc256kl_u8" => {
// The builtin for aesdec/aesenc writes the result in its pointer argument while
// llvm.x86.aesdec128kl returns a value.
// We added a result pointer argument and now need to assign its value to the return_value expected by
// the LLVM intrinsic.
let (aes_output_type, field1, field2) = aes_output_type(builder);
let result = builder.current_func().new_local(None, aes_output_type, "result");
let field1 = result.access_field(None, field1);
builder.llbb().add_assignment(None, field1, return_value);
let field2 = result.access_field(None, field2);
let ptr = builder.context.new_cast(
None,
args[0],
field2.to_rvalue().get_type().make_pointer(),
);
builder.llbb().add_assignment(None, field2, ptr.dereference(None));
return_value = result.to_rvalue();
}
"__builtin_ia32_aesencwide128kl_u8"
| "__builtin_ia32_aesdecwide128kl_u8"
| "__builtin_ia32_aesencwide256kl_u8"
| "__builtin_ia32_aesdecwide256kl_u8" => {
// The builtin for aesdecwide/aesencwide writes the result in its pointer argument while
// llvm.x86.aesencwide128kl returns a value.
// We added a result pointer argument and now need to assign its value to the return_value expected by
// the LLVM intrinsic.
let (aes_output_type, field1, field2) = wide_aes_output_type(builder);
let result = builder.current_func().new_local(None, aes_output_type, "result");
let field1 = result.access_field(None, field1);
builder.llbb().add_assignment(None, field1, return_value);
let field2 = result.access_field(None, field2);
let field2_type = field2.to_rvalue().get_type();
let array_type = builder.context.new_array_type(None, field2_type, 8);
let ptr = builder.context.new_cast(None, args[0], array_type.make_pointer());
let field2_ptr =
builder.context.new_cast(None, field2.get_address(None), array_type.make_pointer());
builder.llbb().add_assignment(
None,
field2_ptr.dereference(None),
ptr.dereference(None),
);
return_value = result.to_rvalue();
}
_ => (),
}
@@ -915,16 +1122,6 @@ pub fn intrinsic<'gcc, 'tcx>(name: &str, cx: &CodegenCx<'gcc, 'tcx>) -> Function
"llvm.ctlz.v4i64" => "__builtin_ia32_vplzcntq_256_mask",
"llvm.ctlz.v2i64" => "__builtin_ia32_vplzcntq_128_mask",
"llvm.ctpop.v32i16" => "__builtin_ia32_vpopcountw_v32hi",
"llvm.x86.fma.vfmsub.sd" => "__builtin_ia32_vfmsubsd3",
"llvm.x86.fma.vfmsub.ss" => "__builtin_ia32_vfmsubss3",
"llvm.x86.fma.vfmsubadd.pd" => "__builtin_ia32_vfmaddsubpd",
"llvm.x86.fma.vfmsubadd.pd.256" => "__builtin_ia32_vfmaddsubpd256",
"llvm.x86.fma.vfmsubadd.ps" => "__builtin_ia32_vfmaddsubps",
"llvm.x86.fma.vfmsubadd.ps.256" => "__builtin_ia32_vfmaddsubps256",
"llvm.x86.fma.vfnmadd.sd" => "__builtin_ia32_vfnmaddsd3",
"llvm.x86.fma.vfnmadd.ss" => "__builtin_ia32_vfnmaddss3",
"llvm.x86.fma.vfnmsub.sd" => "__builtin_ia32_vfnmsubsd3",
"llvm.x86.fma.vfnmsub.ss" => "__builtin_ia32_vfnmsubss3",
"llvm.x86.avx512.conflict.d.512" => "__builtin_ia32_vpconflictsi_512_mask",
"llvm.x86.avx512.conflict.d.256" => "__builtin_ia32_vpconflictsi_256_mask",
"llvm.x86.avx512.conflict.d.128" => "__builtin_ia32_vpconflictsi_128_mask",
@@ -1002,8 +1199,6 @@ pub fn intrinsic<'gcc, 'tcx>(name: &str, cx: &CodegenCx<'gcc, 'tcx>) -> Function
"llvm.fshr.v32i16" => "__builtin_ia32_vpshrdv_v32hi",
"llvm.fshr.v16i16" => "__builtin_ia32_vpshrdv_v16hi",
"llvm.fshr.v8i16" => "__builtin_ia32_vpshrdv_v8hi",
"llvm.x86.fma.vfmadd.sd" => "__builtin_ia32_vfmaddsd3",
"llvm.x86.fma.vfmadd.ss" => "__builtin_ia32_vfmaddss3",
"llvm.x86.rdrand.64" => "__builtin_ia32_rdrand64_step",
// The above doc points to unknown builtins for the following, so override them:
@@ -1324,6 +1519,16 @@ pub fn intrinsic<'gcc, 'tcx>(name: &str, cx: &CodegenCx<'gcc, 'tcx>) -> Function
"llvm.x86.avx512fp16.mask.vfmadd.cph.256" => "__builtin_ia32_vfmaddcph256_mask3",
"llvm.x86.avx512fp16.mask.vfcmadd.cph.128" => "__builtin_ia32_vfcmaddcph128_mask3",
"llvm.x86.avx512fp16.mask.vfmadd.cph.128" => "__builtin_ia32_vfmaddcph128_mask3",
"llvm.x86.encodekey128" => "__builtin_ia32_encodekey128_u32",
"llvm.x86.encodekey256" => "__builtin_ia32_encodekey256_u32",
"llvm.x86.aesenc128kl" => "__builtin_ia32_aesenc128kl_u8",
"llvm.x86.aesdec128kl" => "__builtin_ia32_aesdec128kl_u8",
"llvm.x86.aesenc256kl" => "__builtin_ia32_aesenc256kl_u8",
"llvm.x86.aesdec256kl" => "__builtin_ia32_aesdec256kl_u8",
"llvm.x86.aesencwide128kl" => "__builtin_ia32_aesencwide128kl_u8",
"llvm.x86.aesdecwide128kl" => "__builtin_ia32_aesdecwide128kl_u8",
"llvm.x86.aesencwide256kl" => "__builtin_ia32_aesencwide256kl_u8",
"llvm.x86.aesdecwide256kl" => "__builtin_ia32_aesdecwide256kl_u8",
// TODO: support the tile builtins:
"llvm.x86.ldtilecfg" => "__builtin_trap",

View File

@@ -78,6 +78,7 @@ fn get_simple_intrinsic<'gcc, 'tcx>(
sym::maxnumf64 => "fmax",
sym::copysignf32 => "copysignf",
sym::copysignf64 => "copysign",
sym::copysignf128 => "copysignl",
sym::floorf32 => "floorf",
sym::floorf64 => "floor",
sym::ceilf32 => "ceilf",

View File

@@ -399,7 +399,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
}
#[cfg(feature = "master")]
if name == sym::simd_insert {
if name == sym::simd_insert || name == sym::simd_insert_dyn {
require!(
in_elem == arg_tys[2],
InvalidMonomorphization::InsertedType {
@@ -410,6 +410,8 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
out_ty: arg_tys[2]
}
);
// TODO(antoyo): For simd_insert, check if the index is a constant of the correct size.
let vector = args[0].immediate();
let index = args[1].immediate();
let value = args[2].immediate();
@@ -422,13 +424,15 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
}
#[cfg(feature = "master")]
if name == sym::simd_extract {
if name == sym::simd_extract || name == sym::simd_extract_dyn {
require!(
ret_ty == in_elem,
InvalidMonomorphization::ReturnType { span, name, in_elem, in_ty, ret_ty }
);
// TODO(antoyo): For simd_extract, check if the index is a constant of the correct size.
let vector = args[0].immediate();
return Ok(bx.context.new_vector_access(None, vector, args[1].immediate()).to_rvalue());
let index = args[1].immediate();
return Ok(bx.context.new_vector_access(None, vector, index).to_rvalue());
}
if name == sym::simd_select {

View File

@@ -188,10 +188,10 @@ impl CodegenBackend for GccCodegenBackend {
crate::DEFAULT_LOCALE_RESOURCE
}
fn init(&self, sess: &Session) {
fn init(&self, _sess: &Session) {
#[cfg(feature = "master")]
{
let target_cpu = target_cpu(sess);
let target_cpu = target_cpu(_sess);
// Get the second TargetInfo with the correct CPU features by setting the arch.
let context = Context::default();