Merge commit '39683d8eb7a32a74bea96ecbf1e87675d3338506' into sync_cg_gcc-2022-03-26
This commit is contained in:
@@ -1,5 +1,6 @@
|
||||
use gccjit::{ToRValue, Type};
|
||||
use gccjit::{ToLValue, ToRValue, Type};
|
||||
use rustc_codegen_ssa::traits::{AbiBuilderMethods, BaseTypeMethods};
|
||||
use rustc_data_structures::stable_set::FxHashSet;
|
||||
use rustc_middle::bug;
|
||||
use rustc_middle::ty::Ty;
|
||||
use rustc_target::abi::call::{CastTarget, FnAbi, PassMode, Reg, RegKind};
|
||||
@@ -15,9 +16,21 @@ impl<'a, 'gcc, 'tcx> AbiBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
|
||||
}
|
||||
|
||||
fn get_param(&mut self, index: usize) -> Self::Value {
|
||||
self.cx.current_func.borrow().expect("current func")
|
||||
.get_param(index as i32)
|
||||
.to_rvalue()
|
||||
let func = self.current_func();
|
||||
let param = func.get_param(index as i32);
|
||||
let on_stack =
|
||||
if let Some(on_stack_param_indices) = self.on_stack_function_params.borrow().get(&func) {
|
||||
on_stack_param_indices.contains(&index)
|
||||
}
|
||||
else {
|
||||
false
|
||||
};
|
||||
if on_stack {
|
||||
param.to_lvalue().get_address(None)
|
||||
}
|
||||
else {
|
||||
param.to_rvalue()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -87,12 +100,13 @@ impl GccType for Reg {
|
||||
|
||||
pub trait FnAbiGccExt<'gcc, 'tcx> {
|
||||
// TODO(antoyo): return a function pointer type instead?
|
||||
fn gcc_type(&self, cx: &CodegenCx<'gcc, 'tcx>) -> (Type<'gcc>, Vec<Type<'gcc>>, bool);
|
||||
fn gcc_type(&self, cx: &CodegenCx<'gcc, 'tcx>) -> (Type<'gcc>, Vec<Type<'gcc>>, bool, FxHashSet<usize>);
|
||||
fn ptr_to_gcc_type(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc>;
|
||||
}
|
||||
|
||||
impl<'gcc, 'tcx> FnAbiGccExt<'gcc, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
|
||||
fn gcc_type(&self, cx: &CodegenCx<'gcc, 'tcx>) -> (Type<'gcc>, Vec<Type<'gcc>>, bool) {
|
||||
fn gcc_type(&self, cx: &CodegenCx<'gcc, 'tcx>) -> (Type<'gcc>, Vec<Type<'gcc>>, bool, FxHashSet<usize>) {
|
||||
let mut on_stack_param_indices = FxHashSet::default();
|
||||
let args_capacity: usize = self.args.iter().map(|arg|
|
||||
if arg.pad.is_some() {
|
||||
1
|
||||
@@ -144,17 +158,22 @@ impl<'gcc, 'tcx> FnAbiGccExt<'gcc, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
|
||||
unimplemented!();
|
||||
}
|
||||
PassMode::Cast(cast) => cast.gcc_type(cx),
|
||||
PassMode::Indirect { extra_attrs: None, .. } => cx.type_ptr_to(arg.memory_ty(cx)),
|
||||
PassMode::Indirect { extra_attrs: None, on_stack: true, .. } => {
|
||||
on_stack_param_indices.insert(argument_tys.len());
|
||||
arg.memory_ty(cx)
|
||||
},
|
||||
PassMode::Indirect { extra_attrs: None, on_stack: false, .. } => cx.type_ptr_to(arg.memory_ty(cx)),
|
||||
};
|
||||
argument_tys.push(arg_ty);
|
||||
}
|
||||
|
||||
(return_ty, argument_tys, self.c_variadic)
|
||||
(return_ty, argument_tys, self.c_variadic, on_stack_param_indices)
|
||||
}
|
||||
|
||||
fn ptr_to_gcc_type(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc> {
|
||||
let (return_type, params, variadic) = self.gcc_type(cx);
|
||||
let (return_type, params, variadic, on_stack_param_indices) = self.gcc_type(cx);
|
||||
let pointer_type = cx.context.new_function_pointer_type(None, return_type, ¶ms, variadic);
|
||||
cx.on_stack_params.borrow_mut().insert(pointer_type.dyncast_function_ptr_type().expect("function ptr type"), on_stack_param_indices);
|
||||
pointer_type
|
||||
}
|
||||
}
|
||||
|
||||
@@ -45,7 +45,7 @@ pub(crate) unsafe fn codegen(cgcx: &CodegenContext<GccCodegenBackend>, _diag_han
|
||||
if env::var("CG_GCCJIT_DUMP_MODULE_NAMES").as_deref() == Ok("1") {
|
||||
println!("Module {}", module.name);
|
||||
}
|
||||
if env::var("CG_GCCJIT_DUMP_MODULE").as_deref() == Ok(&module.name) {
|
||||
if env::var("CG_GCCJIT_DUMP_ALL_MODULES").as_deref() == Ok("1") || env::var("CG_GCCJIT_DUMP_MODULE").as_deref() == Ok(&module.name) {
|
||||
println!("Dumping reproducer {}", module.name);
|
||||
let _ = fs::create_dir("/tmp/reproducers");
|
||||
// FIXME(antoyo): segfault in dump_reproducer_to_file() might be caused by
|
||||
@@ -54,6 +54,11 @@ pub(crate) unsafe fn codegen(cgcx: &CodegenContext<GccCodegenBackend>, _diag_han
|
||||
context.dump_reproducer_to_file(&format!("/tmp/reproducers/{}.c", module.name));
|
||||
println!("Dumped reproducer {}", module.name);
|
||||
}
|
||||
if env::var("CG_GCCJIT_DUMP_TO_FILE").as_deref() == Ok("1") {
|
||||
let _ = fs::create_dir("/tmp/gccjit_dumps");
|
||||
let path = &format!("/tmp/gccjit_dumps/{}.c", module.name);
|
||||
context.dump_to_file(path, true);
|
||||
}
|
||||
context.compile_to_file(OutputKind::ObjectFile, obj_out.to_str().expect("path to str"));
|
||||
}
|
||||
|
||||
|
||||
@@ -52,7 +52,7 @@ pub fn linkage_to_gcc(linkage: Linkage) -> FunctionType {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn compile_codegen_unit<'tcx>(tcx: TyCtxt<'tcx>, cgu_name: Symbol) -> (ModuleCodegen<GccContext>, u64) {
|
||||
pub fn compile_codegen_unit<'tcx>(tcx: TyCtxt<'tcx>, cgu_name: Symbol, supports_128bit_integers: bool) -> (ModuleCodegen<GccContext>, u64) {
|
||||
let prof_timer = tcx.prof.generic_activity("codegen_module");
|
||||
let start_time = Instant::now();
|
||||
|
||||
@@ -60,7 +60,7 @@ pub fn compile_codegen_unit<'tcx>(tcx: TyCtxt<'tcx>, cgu_name: Symbol) -> (Modul
|
||||
let (module, _) = tcx.dep_graph.with_task(
|
||||
dep_node,
|
||||
tcx,
|
||||
cgu_name,
|
||||
(cgu_name, supports_128bit_integers),
|
||||
module_codegen,
|
||||
Some(dep_graph::hash_result),
|
||||
);
|
||||
@@ -71,7 +71,7 @@ pub fn compile_codegen_unit<'tcx>(tcx: TyCtxt<'tcx>, cgu_name: Symbol) -> (Modul
|
||||
// the time we needed for codegenning it.
|
||||
let cost = time_to_codegen.as_secs() * 1_000_000_000 + time_to_codegen.subsec_nanos() as u64;
|
||||
|
||||
fn module_codegen(tcx: TyCtxt<'_>, cgu_name: Symbol) -> ModuleCodegen<GccContext> {
|
||||
fn module_codegen(tcx: TyCtxt<'_>, (cgu_name, supports_128bit_integers): (Symbol, bool)) -> ModuleCodegen<GccContext> {
|
||||
let cgu = tcx.codegen_unit(cgu_name);
|
||||
// Instantiate monomorphizations without filling out definitions yet...
|
||||
//let llvm_module = ModuleLlvm::new(tcx, &cgu_name.as_str());
|
||||
@@ -85,6 +85,12 @@ pub fn compile_codegen_unit<'tcx>(tcx: TyCtxt<'tcx>, cgu_name: Symbol) -> (Modul
|
||||
context.add_command_line_option("-fno-semantic-interposition");
|
||||
// NOTE: Rust relies on LLVM not doing TBAA (https://github.com/rust-lang/unsafe-code-guidelines/issues/292).
|
||||
context.add_command_line_option("-fno-strict-aliasing");
|
||||
|
||||
if tcx.sess.opts.debugging_opts.function_sections.unwrap_or(tcx.sess.target.function_sections) {
|
||||
context.add_command_line_option("-ffunction-sections");
|
||||
context.add_command_line_option("-fdata-sections");
|
||||
}
|
||||
|
||||
if env::var("CG_GCCJIT_DUMP_CODE").as_deref() == Ok("1") {
|
||||
context.set_dump_code_on_compile(true);
|
||||
}
|
||||
@@ -99,8 +105,11 @@ pub fn compile_codegen_unit<'tcx>(tcx: TyCtxt<'tcx>, cgu_name: Symbol) -> (Modul
|
||||
context.set_keep_intermediates(true);
|
||||
}
|
||||
|
||||
// TODO(bjorn3): Remove once unwinding is properly implemented
|
||||
context.set_allow_unreachable_blocks(true);
|
||||
|
||||
{
|
||||
let cx = CodegenCx::new(&context, cgu, tcx);
|
||||
let cx = CodegenCx::new(&context, cgu, tcx, supports_128bit_integers);
|
||||
|
||||
let mono_items = cgu.items_in_deterministic_order(tcx);
|
||||
for &(mono_item, (linkage, visibility)) in &mono_items {
|
||||
|
||||
@@ -30,6 +30,7 @@ use rustc_codegen_ssa::traits::{
|
||||
OverflowOp,
|
||||
StaticBuilderMethods,
|
||||
};
|
||||
use rustc_data_structures::stable_set::FxHashSet;
|
||||
use rustc_middle::ty::{ParamEnv, Ty, TyCtxt};
|
||||
use rustc_middle::ty::layout::{FnAbiError, FnAbiOfHelpers, FnAbiRequest, HasParamEnv, HasTyCtxt, LayoutError, LayoutOfHelpers, TyAndLayout};
|
||||
use rustc_span::Span;
|
||||
@@ -80,21 +81,21 @@ impl EnumClone for AtomicOrdering {
|
||||
|
||||
pub struct Builder<'a: 'gcc, 'gcc, 'tcx> {
|
||||
pub cx: &'a CodegenCx<'gcc, 'tcx>,
|
||||
pub block: Option<Block<'gcc>>,
|
||||
pub block: Block<'gcc>,
|
||||
stack_var_count: Cell<usize>,
|
||||
}
|
||||
|
||||
impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
|
||||
fn with_cx(cx: &'a CodegenCx<'gcc, 'tcx>) -> Self {
|
||||
fn with_cx(cx: &'a CodegenCx<'gcc, 'tcx>, block: Block<'gcc>) -> Self {
|
||||
Builder {
|
||||
cx,
|
||||
block: None,
|
||||
block,
|
||||
stack_var_count: Cell::new(0),
|
||||
}
|
||||
}
|
||||
|
||||
fn atomic_extremum(&mut self, operation: ExtremumOperation, dst: RValue<'gcc>, src: RValue<'gcc>, order: AtomicOrdering) -> RValue<'gcc> {
|
||||
let size = self.cx.int_width(src.get_type()) / 8;
|
||||
let size = src.get_type().get_size();
|
||||
|
||||
let func = self.current_func();
|
||||
|
||||
@@ -114,10 +115,9 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
|
||||
let after_block = func.new_block("after_while");
|
||||
self.llbb().end_with_jump(None, while_block);
|
||||
|
||||
// NOTE: since jumps were added and compare_exchange doesn't expect this, the current blocks in the
|
||||
// NOTE: since jumps were added and compare_exchange doesn't expect this, the current block in the
|
||||
// state need to be updated.
|
||||
self.block = Some(while_block);
|
||||
*self.cx.current_block.borrow_mut() = Some(while_block);
|
||||
self.switch_to_block(while_block);
|
||||
|
||||
let comparison_operator =
|
||||
match operation {
|
||||
@@ -132,17 +132,16 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
|
||||
|
||||
while_block.end_with_conditional(None, cond, while_block, after_block);
|
||||
|
||||
// NOTE: since jumps were added in a place rustc does not expect, the current blocks in the
|
||||
// NOTE: since jumps were added in a place rustc does not expect, the current block in the
|
||||
// state need to be updated.
|
||||
self.block = Some(after_block);
|
||||
*self.cx.current_block.borrow_mut() = Some(after_block);
|
||||
self.switch_to_block(after_block);
|
||||
|
||||
return_value.to_rvalue()
|
||||
}
|
||||
|
||||
fn compare_exchange(&self, dst: RValue<'gcc>, cmp: LValue<'gcc>, src: RValue<'gcc>, order: AtomicOrdering, failure_order: AtomicOrdering, weak: bool) -> RValue<'gcc> {
|
||||
let size = self.cx.int_width(src.get_type());
|
||||
let compare_exchange = self.context.get_builtin_function(&format!("__atomic_compare_exchange_{}", size / 8));
|
||||
let size = src.get_type().get_size();
|
||||
let compare_exchange = self.context.get_builtin_function(&format!("__atomic_compare_exchange_{}", size));
|
||||
let order = self.context.new_rvalue_from_int(self.i32_type, order.to_gcc());
|
||||
let failure_order = self.context.new_rvalue_from_int(self.i32_type, failure_order.to_gcc());
|
||||
let weak = self.context.new_rvalue_from_int(self.bool_type, weak as i32);
|
||||
@@ -209,6 +208,11 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
|
||||
param_types.push(param);
|
||||
}
|
||||
|
||||
let mut on_stack_param_indices = FxHashSet::default();
|
||||
if let Some(indices) = self.on_stack_params.borrow().get(&gcc_func) {
|
||||
on_stack_param_indices = indices.clone();
|
||||
}
|
||||
|
||||
if all_args_match {
|
||||
return Cow::Borrowed(args);
|
||||
}
|
||||
@@ -217,10 +221,15 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
|
||||
.into_iter()
|
||||
.zip(args.iter())
|
||||
.enumerate()
|
||||
.map(|(_i, (expected_ty, &actual_val))| {
|
||||
.map(|(index, (expected_ty, &actual_val))| {
|
||||
let actual_ty = actual_val.get_type();
|
||||
if expected_ty != actual_ty {
|
||||
self.bitcast(actual_val, expected_ty)
|
||||
if on_stack_param_indices.contains(&index) {
|
||||
actual_val.dereference(None).to_rvalue()
|
||||
}
|
||||
else {
|
||||
self.bitcast(actual_val, expected_ty)
|
||||
}
|
||||
}
|
||||
else {
|
||||
actual_val
|
||||
@@ -245,7 +254,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
|
||||
}
|
||||
|
||||
pub fn current_func(&self) -> Function<'gcc> {
|
||||
self.block.expect("block").get_function()
|
||||
self.block.get_function()
|
||||
}
|
||||
|
||||
fn function_call(&mut self, func: RValue<'gcc>, args: &[RValue<'gcc>], _funclet: Option<&Funclet>) -> RValue<'gcc> {
|
||||
@@ -256,17 +265,16 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
|
||||
// gccjit requires to use the result of functions, even when it's not used.
|
||||
// That's why we assign the result to a local or call add_eval().
|
||||
let return_type = func.get_return_type();
|
||||
let current_block = self.current_block.borrow().expect("block");
|
||||
let void_type = self.context.new_type::<()>();
|
||||
let current_func = current_block.get_function();
|
||||
let current_func = self.block.get_function();
|
||||
if return_type != void_type {
|
||||
unsafe { RETURN_VALUE_COUNT += 1 };
|
||||
let result = current_func.new_local(None, return_type, &format!("returnValue{}", unsafe { RETURN_VALUE_COUNT }));
|
||||
current_block.add_assignment(None, result, self.cx.context.new_call(None, func, &args));
|
||||
self.block.add_assignment(None, result, self.cx.context.new_call(None, func, &args));
|
||||
result.to_rvalue()
|
||||
}
|
||||
else {
|
||||
current_block.add_eval(None, self.cx.context.new_call(None, func, &args));
|
||||
self.block.add_eval(None, self.cx.context.new_call(None, func, &args));
|
||||
// Return dummy value when not having return value.
|
||||
self.context.new_rvalue_from_long(self.isize_type, 0)
|
||||
}
|
||||
@@ -279,9 +287,8 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
|
||||
// That's why we assign the result to a local or call add_eval().
|
||||
let gcc_func = func_ptr.get_type().dyncast_function_ptr_type().expect("function ptr");
|
||||
let mut return_type = gcc_func.get_return_type();
|
||||
let current_block = self.current_block.borrow().expect("block");
|
||||
let void_type = self.context.new_type::<()>();
|
||||
let current_func = current_block.get_function();
|
||||
let current_func = self.block.get_function();
|
||||
|
||||
// FIXME(antoyo): As a temporary workaround for unsupported LLVM intrinsics.
|
||||
if gcc_func.get_param_count() == 0 && format!("{:?}", func_ptr) == "__builtin_ia32_pmovmskb128" {
|
||||
@@ -290,35 +297,34 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
|
||||
|
||||
if return_type != void_type {
|
||||
unsafe { RETURN_VALUE_COUNT += 1 };
|
||||
let result = current_func.new_local(None, return_type, &format!("returnValue{}", unsafe { RETURN_VALUE_COUNT }));
|
||||
current_block.add_assignment(None, result, self.cx.context.new_call_through_ptr(None, func_ptr, &args));
|
||||
let result = current_func.new_local(None, return_type, &format!("ptrReturnValue{}", unsafe { RETURN_VALUE_COUNT }));
|
||||
self.block.add_assignment(None, result, self.cx.context.new_call_through_ptr(None, func_ptr, &args));
|
||||
result.to_rvalue()
|
||||
}
|
||||
else {
|
||||
if gcc_func.get_param_count() == 0 {
|
||||
// FIXME(antoyo): As a temporary workaround for unsupported LLVM intrinsics.
|
||||
current_block.add_eval(None, self.cx.context.new_call_through_ptr(None, func_ptr, &[]));
|
||||
self.block.add_eval(None, self.cx.context.new_call_through_ptr(None, func_ptr, &[]));
|
||||
}
|
||||
else {
|
||||
current_block.add_eval(None, self.cx.context.new_call_through_ptr(None, func_ptr, &args));
|
||||
self.block.add_eval(None, self.cx.context.new_call_through_ptr(None, func_ptr, &args));
|
||||
}
|
||||
// Return dummy value when not having return value.
|
||||
let result = current_func.new_local(None, self.isize_type, "dummyValueThatShouldNeverBeUsed");
|
||||
current_block.add_assignment(None, result, self.context.new_rvalue_from_long(self.isize_type, 0));
|
||||
self.block.add_assignment(None, result, self.context.new_rvalue_from_long(self.isize_type, 0));
|
||||
result.to_rvalue()
|
||||
}
|
||||
}
|
||||
|
||||
pub fn overflow_call(&mut self, func: Function<'gcc>, args: &[RValue<'gcc>], _funclet: Option<&Funclet>) -> RValue<'gcc> {
|
||||
pub fn overflow_call(&self, func: Function<'gcc>, args: &[RValue<'gcc>], _funclet: Option<&Funclet>) -> RValue<'gcc> {
|
||||
// gccjit requires to use the result of functions, even when it's not used.
|
||||
// That's why we assign the result to a local.
|
||||
let return_type = self.context.new_type::<bool>();
|
||||
let current_block = self.current_block.borrow().expect("block");
|
||||
let current_func = current_block.get_function();
|
||||
let current_func = self.block.get_function();
|
||||
// TODO(antoyo): return the new_call() directly? Since the overflow function has no side-effects.
|
||||
unsafe { RETURN_VALUE_COUNT += 1 };
|
||||
let result = current_func.new_local(None, return_type, &format!("returnValue{}", unsafe { RETURN_VALUE_COUNT }));
|
||||
current_block.add_assignment(None, result, self.cx.context.new_call(None, func, &args));
|
||||
let result = current_func.new_local(None, return_type, &format!("overflowReturnValue{}", unsafe { RETURN_VALUE_COUNT }));
|
||||
self.block.add_assignment(None, result, self.cx.context.new_call(None, func, &args));
|
||||
result.to_rvalue()
|
||||
}
|
||||
}
|
||||
@@ -384,14 +390,11 @@ impl<'gcc, 'tcx> BackendTypes for Builder<'_, 'gcc, 'tcx> {
|
||||
|
||||
impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
|
||||
fn build(cx: &'a CodegenCx<'gcc, 'tcx>, block: Block<'gcc>) -> Self {
|
||||
let mut bx = Builder::with_cx(cx);
|
||||
*cx.current_block.borrow_mut() = Some(block);
|
||||
bx.block = Some(block);
|
||||
bx
|
||||
Builder::with_cx(cx, block)
|
||||
}
|
||||
|
||||
fn llbb(&self) -> Block<'gcc> {
|
||||
self.block.expect("block")
|
||||
self.block
|
||||
}
|
||||
|
||||
fn append_block(cx: &'a CodegenCx<'gcc, 'tcx>, func: RValue<'gcc>, name: &str) -> Block<'gcc> {
|
||||
@@ -405,8 +408,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
|
||||
}
|
||||
|
||||
fn switch_to_block(&mut self, block: Self::BasicBlock) {
|
||||
*self.cx.current_block.borrow_mut() = Some(block);
|
||||
self.block = Some(block);
|
||||
self.block = block;
|
||||
}
|
||||
|
||||
fn ret_void(&mut self) {
|
||||
@@ -441,50 +443,42 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
|
||||
let on_val = self.const_uint_big(typ, on_val);
|
||||
gcc_cases.push(self.context.new_case(on_val, on_val, dest));
|
||||
}
|
||||
self.block.expect("block").end_with_switch(None, value, default_block, &gcc_cases);
|
||||
self.block.end_with_switch(None, value, default_block, &gcc_cases);
|
||||
}
|
||||
|
||||
fn invoke(&mut self, _typ: Type<'gcc>, _func: RValue<'gcc>, _args: &[RValue<'gcc>], then: Block<'gcc>, catch: Block<'gcc>, _funclet: Option<&Funclet>) -> RValue<'gcc> {
|
||||
let condition = self.context.new_rvalue_from_int(self.bool_type, 0);
|
||||
fn invoke(&mut self, typ: Type<'gcc>, func: RValue<'gcc>, args: &[RValue<'gcc>], then: Block<'gcc>, catch: Block<'gcc>, _funclet: Option<&Funclet>) -> RValue<'gcc> {
|
||||
// TODO(bjorn3): Properly implement unwinding.
|
||||
let call_site = self.call(typ, func, args, None);
|
||||
let condition = self.context.new_rvalue_from_int(self.bool_type, 1);
|
||||
self.llbb().end_with_conditional(None, condition, then, catch);
|
||||
self.context.new_rvalue_from_int(self.int_type, 0)
|
||||
|
||||
// TODO(antoyo)
|
||||
call_site
|
||||
}
|
||||
|
||||
fn unreachable(&mut self) {
|
||||
let func = self.context.get_builtin_function("__builtin_unreachable");
|
||||
let block = self.block.expect("block");
|
||||
block.add_eval(None, self.context.new_call(None, func, &[]));
|
||||
let return_type = block.get_function().get_return_type();
|
||||
self.block.add_eval(None, self.context.new_call(None, func, &[]));
|
||||
let return_type = self.block.get_function().get_return_type();
|
||||
let void_type = self.context.new_type::<()>();
|
||||
if return_type == void_type {
|
||||
block.end_with_void_return(None)
|
||||
self.block.end_with_void_return(None)
|
||||
}
|
||||
else {
|
||||
let return_value = self.current_func()
|
||||
.new_local(None, return_type, "unreachableReturn");
|
||||
block.end_with_return(None, return_value)
|
||||
self.block.end_with_return(None, return_value)
|
||||
}
|
||||
}
|
||||
|
||||
fn add(&mut self, a: RValue<'gcc>, mut b: RValue<'gcc>) -> RValue<'gcc> {
|
||||
// FIXME(antoyo): this should not be required.
|
||||
if format!("{:?}", a.get_type()) != format!("{:?}", b.get_type()) {
|
||||
b = self.context.new_cast(None, b, a.get_type());
|
||||
}
|
||||
a + b
|
||||
fn add(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
|
||||
self.gcc_add(a, b)
|
||||
}
|
||||
|
||||
fn fadd(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
|
||||
a + b
|
||||
}
|
||||
|
||||
fn sub(&mut self, a: RValue<'gcc>, mut b: RValue<'gcc>) -> RValue<'gcc> {
|
||||
if a.get_type() != b.get_type() {
|
||||
b = self.context.new_cast(None, b, a.get_type());
|
||||
}
|
||||
a - b
|
||||
fn sub(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
|
||||
self.gcc_sub(a, b)
|
||||
}
|
||||
|
||||
fn fsub(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
|
||||
@@ -492,7 +486,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
|
||||
}
|
||||
|
||||
fn mul(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
|
||||
a * b
|
||||
self.gcc_mul(a, b)
|
||||
}
|
||||
|
||||
fn fmul(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
|
||||
@@ -500,8 +494,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
|
||||
}
|
||||
|
||||
fn udiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
|
||||
// TODO(antoyo): convert the arguments to unsigned?
|
||||
a / b
|
||||
self.gcc_udiv(a, b)
|
||||
}
|
||||
|
||||
fn exactudiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
|
||||
@@ -511,8 +504,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
|
||||
}
|
||||
|
||||
fn sdiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
|
||||
// TODO(antoyo): convert the arguments to signed?
|
||||
a / b
|
||||
self.gcc_sdiv(a, b)
|
||||
}
|
||||
|
||||
fn exactsdiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
|
||||
@@ -529,11 +521,11 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
|
||||
}
|
||||
|
||||
fn urem(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
|
||||
a % b
|
||||
self.gcc_urem(a, b)
|
||||
}
|
||||
|
||||
fn srem(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
|
||||
a % b
|
||||
self.gcc_srem(a, b)
|
||||
}
|
||||
|
||||
fn frem(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
|
||||
@@ -549,81 +541,33 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
|
||||
}
|
||||
|
||||
fn shl(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
|
||||
// FIXME(antoyo): remove the casts when libgccjit can shift an unsigned number by an unsigned number.
|
||||
let a_type = a.get_type();
|
||||
let b_type = b.get_type();
|
||||
if a_type.is_unsigned(self) && b_type.is_signed(self) {
|
||||
let a = self.context.new_cast(None, a, b_type);
|
||||
let result = a << b;
|
||||
self.context.new_cast(None, result, a_type)
|
||||
}
|
||||
else if a_type.is_signed(self) && b_type.is_unsigned(self) {
|
||||
let b = self.context.new_cast(None, b, a_type);
|
||||
a << b
|
||||
}
|
||||
else {
|
||||
a << b
|
||||
}
|
||||
self.gcc_shl(a, b)
|
||||
}
|
||||
|
||||
fn lshr(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
|
||||
// FIXME(antoyo): remove the casts when libgccjit can shift an unsigned number by an unsigned number.
|
||||
// TODO(antoyo): cast to unsigned to do a logical shift if that does not work.
|
||||
let a_type = a.get_type();
|
||||
let b_type = b.get_type();
|
||||
if a_type.is_unsigned(self) && b_type.is_signed(self) {
|
||||
let a = self.context.new_cast(None, a, b_type);
|
||||
let result = a >> b;
|
||||
self.context.new_cast(None, result, a_type)
|
||||
}
|
||||
else if a_type.is_signed(self) && b_type.is_unsigned(self) {
|
||||
let b = self.context.new_cast(None, b, a_type);
|
||||
a >> b
|
||||
}
|
||||
else {
|
||||
a >> b
|
||||
}
|
||||
self.gcc_lshr(a, b)
|
||||
}
|
||||
|
||||
fn ashr(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
|
||||
// TODO(antoyo): check whether behavior is an arithmetic shift for >> .
|
||||
// FIXME(antoyo): remove the casts when libgccjit can shift an unsigned number by an unsigned number.
|
||||
let a_type = a.get_type();
|
||||
let b_type = b.get_type();
|
||||
if a_type.is_unsigned(self) && b_type.is_signed(self) {
|
||||
let a = self.context.new_cast(None, a, b_type);
|
||||
let result = a >> b;
|
||||
self.context.new_cast(None, result, a_type)
|
||||
}
|
||||
else if a_type.is_signed(self) && b_type.is_unsigned(self) {
|
||||
let b = self.context.new_cast(None, b, a_type);
|
||||
a >> b
|
||||
}
|
||||
else {
|
||||
a >> b
|
||||
}
|
||||
// It seems to be if the value is signed.
|
||||
self.gcc_lshr(a, b)
|
||||
}
|
||||
|
||||
fn and(&mut self, a: RValue<'gcc>, mut b: RValue<'gcc>) -> RValue<'gcc> {
|
||||
if a.get_type() != b.get_type() {
|
||||
b = self.context.new_cast(None, b, a.get_type());
|
||||
}
|
||||
a & b
|
||||
fn and(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
|
||||
self.gcc_and(a, b)
|
||||
}
|
||||
|
||||
fn or(&mut self, a: RValue<'gcc>, mut b: RValue<'gcc>) -> RValue<'gcc> {
|
||||
if a.get_type() != b.get_type() {
|
||||
b = self.context.new_cast(None, b, a.get_type());
|
||||
}
|
||||
a | b
|
||||
fn or(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
|
||||
self.cx.gcc_or(a, b)
|
||||
}
|
||||
|
||||
fn xor(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
|
||||
a ^ b
|
||||
self.gcc_xor(a, b)
|
||||
}
|
||||
|
||||
fn neg(&mut self, a: RValue<'gcc>) -> RValue<'gcc> {
|
||||
self.cx.context.new_unary_op(None, UnaryOp::Minus, a.get_type(), a)
|
||||
self.gcc_neg(a)
|
||||
}
|
||||
|
||||
fn fneg(&mut self, a: RValue<'gcc>) -> RValue<'gcc> {
|
||||
@@ -631,14 +575,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
|
||||
}
|
||||
|
||||
fn not(&mut self, a: RValue<'gcc>) -> RValue<'gcc> {
|
||||
let operation =
|
||||
if a.get_type().is_bool() {
|
||||
UnaryOp::LogicalNegate
|
||||
}
|
||||
else {
|
||||
UnaryOp::BitwiseNegate
|
||||
};
|
||||
self.cx.context.new_unary_op(None, operation, a.get_type(), a)
|
||||
self.gcc_not(a)
|
||||
}
|
||||
|
||||
fn unchecked_sadd(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
|
||||
@@ -646,7 +583,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
|
||||
}
|
||||
|
||||
fn unchecked_uadd(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
|
||||
a + b
|
||||
self.gcc_add(a, b)
|
||||
}
|
||||
|
||||
fn unchecked_ssub(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
|
||||
@@ -655,7 +592,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
|
||||
|
||||
fn unchecked_usub(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
|
||||
// TODO(antoyo): should generate poison value?
|
||||
a - b
|
||||
self.gcc_sub(a, b)
|
||||
}
|
||||
|
||||
fn unchecked_smul(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
|
||||
@@ -687,76 +624,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
|
||||
}
|
||||
|
||||
fn checked_binop(&mut self, oop: OverflowOp, typ: Ty<'_>, lhs: Self::Value, rhs: Self::Value) -> (Self::Value, Self::Value) {
|
||||
use rustc_middle::ty::{Int, IntTy::*, Uint, UintTy::*};
|
||||
|
||||
let new_kind =
|
||||
match typ.kind() {
|
||||
Int(t @ Isize) => Int(t.normalize(self.tcx.sess.target.pointer_width)),
|
||||
Uint(t @ Usize) => Uint(t.normalize(self.tcx.sess.target.pointer_width)),
|
||||
t @ (Uint(_) | Int(_)) => t.clone(),
|
||||
_ => panic!("tried to get overflow intrinsic for op applied to non-int type"),
|
||||
};
|
||||
|
||||
// TODO(antoyo): remove duplication with intrinsic?
|
||||
let name =
|
||||
match oop {
|
||||
OverflowOp::Add =>
|
||||
match new_kind {
|
||||
Int(I8) => "__builtin_add_overflow",
|
||||
Int(I16) => "__builtin_add_overflow",
|
||||
Int(I32) => "__builtin_sadd_overflow",
|
||||
Int(I64) => "__builtin_saddll_overflow",
|
||||
Int(I128) => "__builtin_add_overflow",
|
||||
|
||||
Uint(U8) => "__builtin_add_overflow",
|
||||
Uint(U16) => "__builtin_add_overflow",
|
||||
Uint(U32) => "__builtin_uadd_overflow",
|
||||
Uint(U64) => "__builtin_uaddll_overflow",
|
||||
Uint(U128) => "__builtin_add_overflow",
|
||||
|
||||
_ => unreachable!(),
|
||||
},
|
||||
OverflowOp::Sub =>
|
||||
match new_kind {
|
||||
Int(I8) => "__builtin_sub_overflow",
|
||||
Int(I16) => "__builtin_sub_overflow",
|
||||
Int(I32) => "__builtin_ssub_overflow",
|
||||
Int(I64) => "__builtin_ssubll_overflow",
|
||||
Int(I128) => "__builtin_sub_overflow",
|
||||
|
||||
Uint(U8) => "__builtin_sub_overflow",
|
||||
Uint(U16) => "__builtin_sub_overflow",
|
||||
Uint(U32) => "__builtin_usub_overflow",
|
||||
Uint(U64) => "__builtin_usubll_overflow",
|
||||
Uint(U128) => "__builtin_sub_overflow",
|
||||
|
||||
_ => unreachable!(),
|
||||
},
|
||||
OverflowOp::Mul =>
|
||||
match new_kind {
|
||||
Int(I8) => "__builtin_mul_overflow",
|
||||
Int(I16) => "__builtin_mul_overflow",
|
||||
Int(I32) => "__builtin_smul_overflow",
|
||||
Int(I64) => "__builtin_smulll_overflow",
|
||||
Int(I128) => "__builtin_mul_overflow",
|
||||
|
||||
Uint(U8) => "__builtin_mul_overflow",
|
||||
Uint(U16) => "__builtin_mul_overflow",
|
||||
Uint(U32) => "__builtin_umul_overflow",
|
||||
Uint(U64) => "__builtin_umulll_overflow",
|
||||
Uint(U128) => "__builtin_mul_overflow",
|
||||
|
||||
_ => unreachable!(),
|
||||
},
|
||||
};
|
||||
|
||||
let intrinsic = self.context.get_builtin_function(&name);
|
||||
let res = self.current_func()
|
||||
// TODO(antoyo): is it correct to use rhs type instead of the parameter typ?
|
||||
.new_local(None, rhs.get_type(), "binopResult")
|
||||
.get_address(None);
|
||||
let overflow = self.overflow_call(intrinsic, &[lhs, rhs, res], None);
|
||||
(res.dereference(None).to_rvalue(), overflow)
|
||||
self.gcc_checked_binop(oop, typ, lhs, rhs)
|
||||
}
|
||||
|
||||
fn alloca(&mut self, ty: Type<'gcc>, align: Align) -> RValue<'gcc> {
|
||||
@@ -1006,7 +874,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
|
||||
/* Casts */
|
||||
fn trunc(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
|
||||
// TODO(antoyo): check that it indeed truncate the value.
|
||||
self.context.new_cast(None, value, dest_ty)
|
||||
self.gcc_int_cast(value, dest_ty)
|
||||
}
|
||||
|
||||
fn sext(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
|
||||
@@ -1019,19 +887,19 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
|
||||
}
|
||||
|
||||
fn fptoui(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
|
||||
self.context.new_cast(None, value, dest_ty)
|
||||
self.gcc_float_to_uint_cast(value, dest_ty)
|
||||
}
|
||||
|
||||
fn fptosi(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
|
||||
self.context.new_cast(None, value, dest_ty)
|
||||
self.gcc_float_to_int_cast(value, dest_ty)
|
||||
}
|
||||
|
||||
fn uitofp(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
|
||||
self.context.new_cast(None, value, dest_ty)
|
||||
self.gcc_uint_to_float_cast(value, dest_ty)
|
||||
}
|
||||
|
||||
fn sitofp(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
|
||||
self.context.new_cast(None, value, dest_ty)
|
||||
self.gcc_int_to_float_cast(value, dest_ty)
|
||||
}
|
||||
|
||||
fn fptrunc(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
|
||||
@@ -1044,11 +912,13 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
|
||||
}
|
||||
|
||||
fn ptrtoint(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
|
||||
self.cx.ptrtoint(self.block.expect("block"), value, dest_ty)
|
||||
let usize_value = self.cx.const_bitcast(value, self.cx.type_isize());
|
||||
self.intcast(usize_value, dest_ty, false)
|
||||
}
|
||||
|
||||
fn inttoptr(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
|
||||
self.cx.inttoptr(self.block.expect("block"), value, dest_ty)
|
||||
let usize_value = self.intcast(value, self.cx.type_isize(), false);
|
||||
self.cx.const_bitcast(usize_value, dest_ty)
|
||||
}
|
||||
|
||||
fn bitcast(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
|
||||
@@ -1057,7 +927,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
|
||||
|
||||
fn intcast(&mut self, value: RValue<'gcc>, dest_typ: Type<'gcc>, _is_signed: bool) -> RValue<'gcc> {
|
||||
// NOTE: is_signed is for value, not dest_typ.
|
||||
self.cx.context.new_cast(None, value, dest_typ)
|
||||
self.gcc_int_cast(value, dest_typ)
|
||||
}
|
||||
|
||||
fn pointercast(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
|
||||
@@ -1078,21 +948,8 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
|
||||
}
|
||||
|
||||
/* Comparisons */
|
||||
fn icmp(&mut self, op: IntPredicate, mut lhs: RValue<'gcc>, mut rhs: RValue<'gcc>) -> RValue<'gcc> {
|
||||
let left_type = lhs.get_type();
|
||||
let right_type = rhs.get_type();
|
||||
if left_type != right_type {
|
||||
// NOTE: because libgccjit cannot compare function pointers.
|
||||
if left_type.dyncast_function_ptr_type().is_some() && right_type.dyncast_function_ptr_type().is_some() {
|
||||
lhs = self.context.new_cast(None, lhs, self.usize_type.make_pointer());
|
||||
rhs = self.context.new_cast(None, rhs, self.usize_type.make_pointer());
|
||||
}
|
||||
// NOTE: hack because we try to cast a vector type to the same vector type.
|
||||
else if format!("{:?}", left_type) != format!("{:?}", right_type) {
|
||||
rhs = self.context.new_cast(None, rhs, left_type);
|
||||
}
|
||||
}
|
||||
self.context.new_comparison(None, op.to_gcc_comparison(), lhs, rhs)
|
||||
fn icmp(&mut self, op: IntPredicate, lhs: RValue<'gcc>, rhs: RValue<'gcc>) -> RValue<'gcc> {
|
||||
self.gcc_icmp(op, lhs, rhs)
|
||||
}
|
||||
|
||||
fn fcmp(&mut self, op: RealPredicate, lhs: RValue<'gcc>, rhs: RValue<'gcc>) -> RValue<'gcc> {
|
||||
@@ -1100,22 +957,15 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
|
||||
}
|
||||
|
||||
/* Miscellaneous instructions */
|
||||
fn memcpy(&mut self, dst: RValue<'gcc>, dst_align: Align, src: RValue<'gcc>, src_align: Align, size: RValue<'gcc>, flags: MemFlags) {
|
||||
if flags.contains(MemFlags::NONTEMPORAL) {
|
||||
// HACK(nox): This is inefficient but there is no nontemporal memcpy.
|
||||
let val = self.load(src.get_type(), src, src_align);
|
||||
let ptr = self.pointercast(dst, self.type_ptr_to(self.val_ty(val)));
|
||||
self.store_with_flags(val, ptr, dst_align, flags);
|
||||
return;
|
||||
}
|
||||
fn memcpy(&mut self, dst: RValue<'gcc>, _dst_align: Align, src: RValue<'gcc>, _src_align: Align, size: RValue<'gcc>, flags: MemFlags) {
|
||||
assert!(!flags.contains(MemFlags::NONTEMPORAL), "non-temporal memcpy not supported");
|
||||
let size = self.intcast(size, self.type_size_t(), false);
|
||||
let _is_volatile = flags.contains(MemFlags::VOLATILE);
|
||||
let dst = self.pointercast(dst, self.type_i8p());
|
||||
let src = self.pointercast(src, self.type_ptr_to(self.type_void()));
|
||||
let memcpy = self.context.get_builtin_function("memcpy");
|
||||
let block = self.block.expect("block");
|
||||
// TODO(antoyo): handle aligns and is_volatile.
|
||||
block.add_eval(None, self.context.new_call(None, memcpy, &[dst, src, size]));
|
||||
self.block.add_eval(None, self.context.new_call(None, memcpy, &[dst, src, size]));
|
||||
}
|
||||
|
||||
fn memmove(&mut self, dst: RValue<'gcc>, dst_align: Align, src: RValue<'gcc>, src_align: Align, size: RValue<'gcc>, flags: MemFlags) {
|
||||
@@ -1132,20 +982,18 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
|
||||
let src = self.pointercast(src, self.type_ptr_to(self.type_void()));
|
||||
|
||||
let memmove = self.context.get_builtin_function("memmove");
|
||||
let block = self.block.expect("block");
|
||||
// TODO(antoyo): handle is_volatile.
|
||||
block.add_eval(None, self.context.new_call(None, memmove, &[dst, src, size]));
|
||||
self.block.add_eval(None, self.context.new_call(None, memmove, &[dst, src, size]));
|
||||
}
|
||||
|
||||
fn memset(&mut self, ptr: RValue<'gcc>, fill_byte: RValue<'gcc>, size: RValue<'gcc>, _align: Align, flags: MemFlags) {
|
||||
let _is_volatile = flags.contains(MemFlags::VOLATILE);
|
||||
let ptr = self.pointercast(ptr, self.type_i8p());
|
||||
let memset = self.context.get_builtin_function("memset");
|
||||
let block = self.block.expect("block");
|
||||
// TODO(antoyo): handle align and is_volatile.
|
||||
let fill_byte = self.context.new_cast(None, fill_byte, self.i32_type);
|
||||
let size = self.intcast(size, self.type_size_t(), false);
|
||||
block.add_eval(None, self.context.new_call(None, memset, &[ptr, fill_byte, size]));
|
||||
self.block.add_eval(None, self.context.new_call(None, memset, &[ptr, fill_byte, size]));
|
||||
}
|
||||
|
||||
fn select(&mut self, cond: RValue<'gcc>, then_val: RValue<'gcc>, mut else_val: RValue<'gcc>) -> RValue<'gcc> {
|
||||
@@ -1159,16 +1007,15 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
|
||||
then_block.add_assignment(None, variable, then_val);
|
||||
then_block.end_with_jump(None, after_block);
|
||||
|
||||
if then_val.get_type() != else_val.get_type() {
|
||||
if !then_val.get_type().is_compatible_with(else_val.get_type()) {
|
||||
else_val = self.context.new_cast(None, else_val, then_val.get_type());
|
||||
}
|
||||
else_block.add_assignment(None, variable, else_val);
|
||||
else_block.end_with_jump(None, after_block);
|
||||
|
||||
// NOTE: since jumps were added in a place rustc does not expect, the current blocks in the
|
||||
// NOTE: since jumps were added in a place rustc does not expect, the current block in the
|
||||
// state need to be updated.
|
||||
self.block = Some(after_block);
|
||||
*self.cx.current_block.borrow_mut() = Some(after_block);
|
||||
self.switch_to_block(after_block);
|
||||
|
||||
variable.to_rvalue()
|
||||
}
|
||||
@@ -1264,7 +1111,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
|
||||
}
|
||||
|
||||
fn cleanup_landing_pad(&mut self, _ty: Type<'gcc>, _pers_fn: RValue<'gcc>) -> RValue<'gcc> {
|
||||
let field1 = self.context.new_field(None, self.u8_type, "landing_pad_field_1");
|
||||
let field1 = self.context.new_field(None, self.u8_type.make_pointer(), "landing_pad_field_1");
|
||||
let field2 = self.context.new_field(None, self.i32_type, "landing_pad_field_1");
|
||||
let struct_type = self.context.new_struct_type(None, "landing_pad", &[field1, field2]);
|
||||
self.current_func().new_local(None, struct_type.as_type(), "landing_pad")
|
||||
@@ -1275,7 +1122,8 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
|
||||
}
|
||||
|
||||
fn resume(&mut self, _exn: RValue<'gcc>) {
|
||||
unimplemented!();
|
||||
// TODO(bjorn3): Properly implement unwinding.
|
||||
self.unreachable();
|
||||
}
|
||||
|
||||
fn cleanup_pad(&mut self, _parent: Option<RValue<'gcc>>, _args: &[RValue<'gcc>]) -> Funclet {
|
||||
@@ -1322,7 +1170,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
|
||||
}
|
||||
|
||||
fn atomic_rmw(&mut self, op: AtomicRmwBinOp, dst: RValue<'gcc>, src: RValue<'gcc>, order: AtomicOrdering) -> RValue<'gcc> {
|
||||
let size = self.cx.int_width(src.get_type()) / 8;
|
||||
let size = src.get_type().get_size();
|
||||
let name =
|
||||
match op {
|
||||
AtomicRmwBinOp::AtomicXchg => format!("__atomic_exchange_{}", size),
|
||||
@@ -1396,7 +1244,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
|
||||
// Fix the code in codegen_ssa::base::from_immediate.
|
||||
return value;
|
||||
}
|
||||
self.context.new_cast(None, value, dest_typ)
|
||||
self.gcc_int_cast(value, dest_typ)
|
||||
}
|
||||
|
||||
fn cx(&self) -> &CodegenCx<'gcc, 'tcx> {
|
||||
@@ -1404,7 +1252,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
|
||||
}
|
||||
|
||||
fn do_not_inline(&mut self, _llret: RValue<'gcc>) {
|
||||
unimplemented!();
|
||||
// FIMXE(bjorn3): implement
|
||||
}
|
||||
|
||||
fn set_span(&mut self, _span: Span) {}
|
||||
@@ -1470,7 +1318,7 @@ impl<'tcx> HasTargetSpec for Builder<'_, '_, 'tcx> {
|
||||
}
|
||||
}
|
||||
|
||||
trait ToGccComp {
|
||||
pub trait ToGccComp {
|
||||
fn to_gcc_comparison(&self) -> ComparisonOp;
|
||||
}
|
||||
|
||||
|
||||
@@ -1,7 +1,5 @@
|
||||
use std::convert::TryFrom;
|
||||
|
||||
use gccjit::LValue;
|
||||
use gccjit::{Block, CType, RValue, Type, ToRValue};
|
||||
use gccjit::{RValue, Type, ToRValue};
|
||||
use rustc_codegen_ssa::mir::place::PlaceRef;
|
||||
use rustc_codegen_ssa::traits::{
|
||||
BaseTypeMethods,
|
||||
@@ -35,27 +33,6 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
|
||||
global
|
||||
// TODO(antoyo): set linkage.
|
||||
}
|
||||
|
||||
pub fn inttoptr(&self, block: Block<'gcc>, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
|
||||
let func = block.get_function();
|
||||
let local = func.new_local(None, value.get_type(), "intLocal");
|
||||
block.add_assignment(None, local, value);
|
||||
let value_address = local.get_address(None);
|
||||
|
||||
let ptr = self.context.new_cast(None, value_address, dest_ty.make_pointer());
|
||||
ptr.dereference(None).to_rvalue()
|
||||
}
|
||||
|
||||
pub fn ptrtoint(&self, block: Block<'gcc>, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
|
||||
// TODO(antoyo): when libgccjit allow casting from pointer to int, remove this.
|
||||
let func = block.get_function();
|
||||
let local = func.new_local(None, value.get_type(), "ptrLocal");
|
||||
block.add_assignment(None, local, value);
|
||||
let ptr_address = local.get_address(None);
|
||||
|
||||
let ptr = self.context.new_cast(None, ptr_address, dest_ty.make_pointer());
|
||||
ptr.dereference(None).to_rvalue()
|
||||
}
|
||||
}
|
||||
|
||||
pub fn bytes_in_context<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, bytes: &[u8]) -> RValue<'gcc> {
|
||||
@@ -99,29 +76,15 @@ impl<'gcc, 'tcx> ConstMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
|
||||
}
|
||||
|
||||
fn const_int(&self, typ: Type<'gcc>, int: i64) -> RValue<'gcc> {
|
||||
self.context.new_rvalue_from_long(typ, i64::try_from(int).expect("i64::try_from"))
|
||||
self.gcc_int(typ, int)
|
||||
}
|
||||
|
||||
fn const_uint(&self, typ: Type<'gcc>, int: u64) -> RValue<'gcc> {
|
||||
self.context.new_rvalue_from_long(typ, u64::try_from(int).expect("u64::try_from") as i64)
|
||||
self.gcc_uint(typ, int)
|
||||
}
|
||||
|
||||
fn const_uint_big(&self, typ: Type<'gcc>, num: u128) -> RValue<'gcc> {
|
||||
if num >> 64 != 0 {
|
||||
// FIXME(antoyo): use a new function new_rvalue_from_unsigned_long()?
|
||||
let low = self.context.new_rvalue_from_long(self.u64_type, num as u64 as i64);
|
||||
let high = self.context.new_rvalue_from_long(typ, (num >> 64) as u64 as i64);
|
||||
|
||||
let sixty_four = self.context.new_rvalue_from_long(typ, 64);
|
||||
(high << sixty_four) | self.context.new_cast(None, low, typ)
|
||||
}
|
||||
else if typ.is_i128(self) {
|
||||
let num = self.context.new_rvalue_from_long(self.u64_type, num as u64 as i64);
|
||||
self.context.new_cast(None, num, typ)
|
||||
}
|
||||
else {
|
||||
self.context.new_rvalue_from_long(typ, num as u64 as i64)
|
||||
}
|
||||
self.gcc_uint_big(typ, num)
|
||||
}
|
||||
|
||||
fn const_bool(&self, val: bool) -> RValue<'gcc> {
|
||||
@@ -210,11 +173,8 @@ impl<'gcc, 'tcx> ConstMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
|
||||
}
|
||||
|
||||
let value = self.const_uint_big(self.type_ix(bitsize), data);
|
||||
if layout.value == Pointer {
|
||||
self.inttoptr(self.current_block.borrow().expect("block"), value, ty)
|
||||
} else {
|
||||
self.const_bitcast(value, ty)
|
||||
}
|
||||
// TODO(bjorn3): assert size is correct
|
||||
self.const_bitcast(value, ty)
|
||||
}
|
||||
Scalar::Ptr(ptr, _size) => {
|
||||
let (alloc_id, offset) = ptr.into_parts();
|
||||
@@ -418,11 +378,11 @@ impl<'gcc, 'tcx> TypeReflection<'gcc, 'tcx> for Type<'gcc> {
|
||||
}
|
||||
|
||||
fn is_i128(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
|
||||
self.unqualified() == cx.context.new_c_type(CType::Int128t)
|
||||
self.unqualified() == cx.i128_type.unqualified()
|
||||
}
|
||||
|
||||
fn is_u128(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
|
||||
self.unqualified() == cx.context.new_c_type(CType::UInt128t)
|
||||
self.unqualified() == cx.u128_type.unqualified()
|
||||
}
|
||||
|
||||
fn is_f32(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
use gccjit::{LValue, RValue, ToRValue, Type};
|
||||
use gccjit::{GlobalKind, LValue, RValue, ToRValue, Type};
|
||||
use rustc_codegen_ssa::traits::{BaseTypeMethods, ConstMethods, DerivedTypeMethods, StaticMethods};
|
||||
use rustc_hir as hir;
|
||||
use rustc_hir::Node;
|
||||
@@ -35,7 +35,12 @@ impl<'gcc, 'tcx> StaticMethods for CodegenCx<'gcc, 'tcx> {
|
||||
// following:
|
||||
for (value, variable) in &*self.const_globals.borrow() {
|
||||
if format!("{:?}", value) == format!("{:?}", cv) {
|
||||
// TODO(antoyo): upgrade alignment.
|
||||
if let Some(global_variable) = self.global_lvalues.borrow().get(variable) {
|
||||
let alignment = align.bits() as i32;
|
||||
if alignment > global_variable.get_alignment() {
|
||||
global_variable.set_alignment(alignment);
|
||||
}
|
||||
}
|
||||
return *variable;
|
||||
}
|
||||
}
|
||||
@@ -165,11 +170,9 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
|
||||
match kind {
|
||||
Some(kind) if !self.tcx.sess.fewer_names() => {
|
||||
let name = self.generate_local_symbol_name(kind);
|
||||
// TODO(antoyo): check if it's okay that TLS is off here.
|
||||
// TODO(antoyo): check if it's okay that link_section is None here.
|
||||
// TODO(antoyo): check if it's okay that no link_section is set.
|
||||
// TODO(antoyo): set alignment here as well.
|
||||
let global = self.define_global(&name[..], self.val_ty(cv), false, None);
|
||||
// TODO(antoyo): set linkage.
|
||||
let global = self.declare_private_global(&name[..], self.val_ty(cv));
|
||||
global
|
||||
}
|
||||
_ => {
|
||||
@@ -178,11 +181,11 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
|
||||
global
|
||||
},
|
||||
};
|
||||
// FIXME(antoyo): I think the name coming from generate_local_symbol_name() above cannot be used
|
||||
// globally.
|
||||
global.global_set_initializer_rvalue(cv);
|
||||
// TODO(antoyo): set unnamed address.
|
||||
global.get_address(None)
|
||||
let rvalue = global.get_address(None);
|
||||
self.global_lvalues.borrow_mut().insert(rvalue, global);
|
||||
rvalue
|
||||
}
|
||||
|
||||
pub fn get_static(&self, def_id: DefId) -> LValue<'gcc> {
|
||||
@@ -218,7 +221,13 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
|
||||
}
|
||||
|
||||
let is_tls = fn_attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL);
|
||||
let global = self.declare_global(&sym, llty, is_tls, fn_attrs.link_section);
|
||||
let global = self.declare_global(
|
||||
&sym,
|
||||
llty,
|
||||
GlobalKind::Exported,
|
||||
is_tls,
|
||||
fn_attrs.link_section,
|
||||
);
|
||||
|
||||
if !self.tcx.is_reachable_non_generic(def_id) {
|
||||
// TODO(antoyo): set visibility.
|
||||
@@ -390,6 +399,6 @@ fn check_and_apply_linkage<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, attrs: &Codeg
|
||||
// don't do this then linker errors can be generated where the linker
|
||||
// complains that one object files has a thread local version of the
|
||||
// symbol and another one doesn't.
|
||||
cx.declare_global(&sym, llty, is_tls, attrs.link_section)
|
||||
cx.declare_global(&sym, llty, GlobalKind::Imported, is_tls, attrs.link_section)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
use std::cell::{Cell, RefCell};
|
||||
|
||||
use gccjit::{Block, CType, Context, Function, FunctionType, LValue, RValue, Struct, Type};
|
||||
use gccjit::{Block, CType, Context, Function, FunctionPtrType, FunctionType, LValue, RValue, Struct, Type};
|
||||
use rustc_codegen_ssa::base::wants_msvc_seh;
|
||||
use rustc_codegen_ssa::traits::{
|
||||
BackendTypes,
|
||||
@@ -18,7 +18,6 @@ use rustc_target::abi::{call::FnAbi, HasDataLayout, PointeeInfo, Size, TargetDat
|
||||
use rustc_target::spec::{HasTargetSpec, Target, TlsModel};
|
||||
|
||||
use crate::callee::get_fn;
|
||||
use crate::declare::mangle_name;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct FuncSig<'gcc> {
|
||||
@@ -31,8 +30,7 @@ pub struct CodegenCx<'gcc, 'tcx> {
|
||||
pub codegen_unit: &'tcx CodegenUnit<'tcx>,
|
||||
pub context: &'gcc Context<'gcc>,
|
||||
|
||||
// TODO(antoyo): First set it to a dummy block to avoid using Option?
|
||||
pub current_block: RefCell<Option<Block<'gcc>>>,
|
||||
// TODO(bjorn3): Can this field be removed?
|
||||
pub current_func: RefCell<Option<Function<'gcc>>>,
|
||||
pub normal_function_addresses: RefCell<FxHashSet<RValue<'gcc>>>,
|
||||
|
||||
@@ -62,6 +60,8 @@ pub struct CodegenCx<'gcc, 'tcx> {
|
||||
pub ulonglong_type: Type<'gcc>,
|
||||
pub sizet_type: Type<'gcc>,
|
||||
|
||||
pub supports_128bit_integers: bool,
|
||||
|
||||
pub float_type: Type<'gcc>,
|
||||
pub double_type: Type<'gcc>,
|
||||
|
||||
@@ -81,9 +81,19 @@ pub struct CodegenCx<'gcc, 'tcx> {
|
||||
/// Cache generated vtables
|
||||
pub vtables: RefCell<FxHashMap<(Ty<'tcx>, Option<ty::PolyExistentialTraitRef<'tcx>>), RValue<'gcc>>>,
|
||||
|
||||
// TODO(antoyo): improve the SSA API to not require those.
|
||||
// Mapping from function pointer type to indexes of on stack parameters.
|
||||
pub on_stack_params: RefCell<FxHashMap<FunctionPtrType<'gcc>, FxHashSet<usize>>>,
|
||||
// Mapping from function to indexes of on stack parameters.
|
||||
pub on_stack_function_params: RefCell<FxHashMap<Function<'gcc>, FxHashSet<usize>>>,
|
||||
|
||||
/// Cache of emitted const globals (value -> global)
|
||||
pub const_globals: RefCell<FxHashMap<RValue<'gcc>, RValue<'gcc>>>,
|
||||
|
||||
/// Map from the address of a global variable (rvalue) to the global variable itself (lvalue).
|
||||
/// TODO(antoyo): remove when the rustc API is fixed.
|
||||
pub global_lvalues: RefCell<FxHashMap<RValue<'gcc>, LValue<'gcc>>>,
|
||||
|
||||
/// Cache of constant strings,
|
||||
pub const_str_cache: RefCell<FxHashMap<Symbol, LValue<'gcc>>>,
|
||||
|
||||
@@ -92,7 +102,6 @@ pub struct CodegenCx<'gcc, 'tcx> {
|
||||
|
||||
/// A counter that is used for generating local symbol names
|
||||
local_gen_sym_counter: Cell<usize>,
|
||||
pub global_gen_sym_counter: Cell<usize>,
|
||||
|
||||
eh_personality: Cell<Option<RValue<'gcc>>>,
|
||||
|
||||
@@ -107,22 +116,29 @@ pub struct CodegenCx<'gcc, 'tcx> {
|
||||
}
|
||||
|
||||
impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
|
||||
pub fn new(context: &'gcc Context<'gcc>, codegen_unit: &'tcx CodegenUnit<'tcx>, tcx: TyCtxt<'tcx>) -> Self {
|
||||
pub fn new(context: &'gcc Context<'gcc>, codegen_unit: &'tcx CodegenUnit<'tcx>, tcx: TyCtxt<'tcx>, supports_128bit_integers: bool) -> Self {
|
||||
let check_overflow = tcx.sess.overflow_checks();
|
||||
// TODO(antoyo): fix this mess. libgccjit seems to return random type when using new_int_type().
|
||||
let isize_type = context.new_c_type(CType::LongLong);
|
||||
let usize_type = context.new_c_type(CType::ULongLong);
|
||||
let bool_type = context.new_type::<bool>();
|
||||
let i8_type = context.new_type::<i8>();
|
||||
let i16_type = context.new_type::<i16>();
|
||||
let i32_type = context.new_type::<i32>();
|
||||
let i64_type = context.new_c_type(CType::LongLong);
|
||||
let i128_type = context.new_c_type(CType::Int128t).get_aligned(8); // TODO(antoyo): should the alignment be hard-coded?
|
||||
let u8_type = context.new_type::<u8>();
|
||||
let u16_type = context.new_type::<u16>();
|
||||
let u32_type = context.new_type::<u32>();
|
||||
let u64_type = context.new_c_type(CType::ULongLong);
|
||||
let u128_type = context.new_c_type(CType::UInt128t).get_aligned(8); // TODO(antoyo): should the alignment be hard-coded?
|
||||
|
||||
let i8_type = context.new_c_type(CType::Int8t);
|
||||
let i16_type = context.new_c_type(CType::Int16t);
|
||||
let i32_type = context.new_c_type(CType::Int32t);
|
||||
let i64_type = context.new_c_type(CType::Int64t);
|
||||
let u8_type = context.new_c_type(CType::UInt8t);
|
||||
let u16_type = context.new_c_type(CType::UInt16t);
|
||||
let u32_type = context.new_c_type(CType::UInt32t);
|
||||
let u64_type = context.new_c_type(CType::UInt64t);
|
||||
|
||||
let (i128_type, u128_type) =
|
||||
if supports_128bit_integers {
|
||||
let i128_type = context.new_c_type(CType::Int128t).get_aligned(8); // TODO(antoyo): should the alignment be hard-coded?;
|
||||
let u128_type = context.new_c_type(CType::UInt128t).get_aligned(8); // TODO(antoyo): should the alignment be hard-coded?;
|
||||
(i128_type, u128_type)
|
||||
}
|
||||
else {
|
||||
let i128_type = context.new_array_type(None, i64_type, 2);
|
||||
let u128_type = context.new_array_type(None, u64_type, 2);
|
||||
(i128_type, u128_type)
|
||||
};
|
||||
|
||||
let tls_model = to_gcc_tls_mode(tcx.sess.tls_model());
|
||||
|
||||
@@ -136,8 +152,13 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
|
||||
let ulonglong_type = context.new_c_type(CType::ULongLong);
|
||||
let sizet_type = context.new_c_type(CType::SizeT);
|
||||
|
||||
assert_eq!(isize_type, i64_type);
|
||||
assert_eq!(usize_type, u64_type);
|
||||
let isize_type = context.new_c_type(CType::LongLong);
|
||||
let usize_type = context.new_c_type(CType::ULongLong);
|
||||
let bool_type = context.new_type::<bool>();
|
||||
|
||||
// TODO(antoyo): only have those assertions on x86_64.
|
||||
assert_eq!(isize_type.get_size(), i64_type.get_size());
|
||||
assert_eq!(usize_type.get_size(), u64_type.get_size());
|
||||
|
||||
let mut functions = FxHashMap::default();
|
||||
let builtins = [
|
||||
@@ -160,7 +181,6 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
|
||||
check_overflow,
|
||||
codegen_unit,
|
||||
context,
|
||||
current_block: RefCell::new(None),
|
||||
current_func: RefCell::new(None),
|
||||
normal_function_addresses: Default::default(),
|
||||
functions: RefCell::new(functions),
|
||||
@@ -187,14 +207,19 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
|
||||
ulonglong_type,
|
||||
sizet_type,
|
||||
|
||||
supports_128bit_integers,
|
||||
|
||||
float_type,
|
||||
double_type,
|
||||
|
||||
linkage: Cell::new(FunctionType::Internal),
|
||||
instances: Default::default(),
|
||||
function_instances: Default::default(),
|
||||
on_stack_params: Default::default(),
|
||||
on_stack_function_params: Default::default(),
|
||||
vtables: Default::default(),
|
||||
const_globals: Default::default(),
|
||||
global_lvalues: Default::default(),
|
||||
const_str_cache: Default::default(),
|
||||
globals: Default::default(),
|
||||
scalar_types: Default::default(),
|
||||
@@ -203,7 +228,6 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
|
||||
struct_types: Default::default(),
|
||||
types_with_fields_to_set: Default::default(),
|
||||
local_gen_sym_counter: Cell::new(0),
|
||||
global_gen_sym_counter: Cell::new(0),
|
||||
eh_personality: Cell::new(None),
|
||||
pointee_infos: Default::default(),
|
||||
structs_as_pointer: Default::default(),
|
||||
@@ -217,6 +241,41 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
|
||||
function
|
||||
}
|
||||
|
||||
pub fn is_native_int_type(&self, typ: Type<'gcc>) -> bool {
|
||||
let types = [
|
||||
self.u8_type,
|
||||
self.u16_type,
|
||||
self.u32_type,
|
||||
self.u64_type,
|
||||
self.i8_type,
|
||||
self.i16_type,
|
||||
self.i32_type,
|
||||
self.i64_type,
|
||||
];
|
||||
|
||||
for native_type in types {
|
||||
if native_type.is_compatible_with(typ) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
self.supports_128bit_integers &&
|
||||
(self.u128_type.is_compatible_with(typ) || self.i128_type.is_compatible_with(typ))
|
||||
}
|
||||
|
||||
pub fn is_non_native_int_type(&self, typ: Type<'gcc>) -> bool {
|
||||
!self.supports_128bit_integers &&
|
||||
(self.u128_type.is_compatible_with(typ) || self.i128_type.is_compatible_with(typ))
|
||||
}
|
||||
|
||||
pub fn is_native_int_type_or_bool(&self, typ: Type<'gcc>) -> bool {
|
||||
self.is_native_int_type(typ) || typ == self.bool_type
|
||||
}
|
||||
|
||||
pub fn is_int_type_or_bool(&self, typ: Type<'gcc>) -> bool {
|
||||
self.is_native_int_type(typ) || self.is_non_native_int_type(typ) || typ == self.bool_type
|
||||
}
|
||||
|
||||
pub fn sess(&self) -> &Session {
|
||||
&self.tcx.sess
|
||||
}
|
||||
@@ -450,11 +509,6 @@ impl<'b, 'tcx> CodegenCx<'b, 'tcx> {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn unit_name<'tcx>(codegen_unit: &CodegenUnit<'tcx>) -> String {
|
||||
let name = &codegen_unit.name().to_string();
|
||||
mangle_name(&name.replace('-', "_"))
|
||||
}
|
||||
|
||||
fn to_gcc_tls_mode(tls_model: TlsModel) -> gccjit::TlsModel {
|
||||
match tls_model {
|
||||
TlsModel::GeneralDynamic => gccjit::TlsModel::GlobalDynamic,
|
||||
|
||||
@@ -5,7 +5,7 @@ use rustc_span::Symbol;
|
||||
use rustc_target::abi::call::FnAbi;
|
||||
|
||||
use crate::abi::FnAbiGccExt;
|
||||
use crate::context::{CodegenCx, unit_name};
|
||||
use crate::context::CodegenCx;
|
||||
use crate::intrinsic::llvm;
|
||||
|
||||
impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
|
||||
@@ -22,15 +22,13 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
|
||||
global
|
||||
}
|
||||
else {
|
||||
self.declare_global(name, ty, is_tls, link_section)
|
||||
self.declare_global(name, ty, GlobalKind::Exported, is_tls, link_section)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn declare_unnamed_global(&self, ty: Type<'gcc>) -> LValue<'gcc> {
|
||||
let index = self.global_gen_sym_counter.get();
|
||||
self.global_gen_sym_counter.set(index + 1);
|
||||
let name = format!("global_{}_{}", index, unit_name(&self.codegen_unit));
|
||||
self.context.new_global(None, GlobalKind::Exported, ty, &name)
|
||||
let name = self.generate_local_symbol_name("global");
|
||||
self.context.new_global(None, GlobalKind::Internal, ty, &name)
|
||||
}
|
||||
|
||||
pub fn declare_global_with_linkage(&self, name: &str, ty: Type<'gcc>, linkage: GlobalKind) -> LValue<'gcc> {
|
||||
@@ -47,8 +45,8 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
|
||||
unsafe { std::mem::transmute(func) }
|
||||
}*/
|
||||
|
||||
pub fn declare_global(&self, name: &str, ty: Type<'gcc>, is_tls: bool, link_section: Option<Symbol>) -> LValue<'gcc> {
|
||||
let global = self.context.new_global(None, GlobalKind::Exported, ty, name);
|
||||
pub fn declare_global(&self, name: &str, ty: Type<'gcc>, global_kind: GlobalKind, is_tls: bool, link_section: Option<Symbol>) -> LValue<'gcc> {
|
||||
let global = self.context.new_global(None, global_kind, ty, name);
|
||||
if is_tls {
|
||||
global.set_tls_model(self.tls_model);
|
||||
}
|
||||
@@ -82,8 +80,9 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
|
||||
}
|
||||
|
||||
pub fn declare_fn(&self, name: &str, fn_abi: &FnAbi<'tcx, Ty<'tcx>>) -> RValue<'gcc> {
|
||||
let (return_type, params, variadic) = fn_abi.gcc_type(self);
|
||||
let (return_type, params, variadic, on_stack_param_indices) = fn_abi.gcc_type(self);
|
||||
let func = declare_raw_fn(self, name, () /*fn_abi.llvm_cconv()*/, return_type, ¶ms, variadic);
|
||||
self.on_stack_function_params.borrow_mut().insert(func, on_stack_param_indices);
|
||||
// FIXME(antoyo): this is a wrong cast. That requires changing the compiler API.
|
||||
unsafe { std::mem::transmute(func) }
|
||||
}
|
||||
|
||||
730
compiler/rustc_codegen_gcc/src/int.rs
Normal file
730
compiler/rustc_codegen_gcc/src/int.rs
Normal file
@@ -0,0 +1,730 @@
|
||||
//! Module to handle integer operations.
|
||||
//! This module exists because some integer types are not supported on some gcc platforms, e.g.
|
||||
//! 128-bit integers on 32-bit platforms and thus require to be handled manually.
|
||||
|
||||
use std::convert::TryFrom;
|
||||
|
||||
use gccjit::{ComparisonOp, FunctionType, RValue, ToRValue, Type, UnaryOp, BinaryOp};
|
||||
use rustc_codegen_ssa::common::{IntPredicate, TypeKind};
|
||||
use rustc_codegen_ssa::traits::{BackendTypes, BaseTypeMethods, BuilderMethods, OverflowOp};
|
||||
use rustc_middle::ty::Ty;
|
||||
|
||||
use crate::builder::ToGccComp;
|
||||
use crate::{builder::Builder, common::{SignType, TypeReflection}, context::CodegenCx};
|
||||
|
||||
impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
|
||||
pub fn gcc_urem(&self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
|
||||
// 128-bit unsigned %: __umodti3
|
||||
self.multiplicative_operation(BinaryOp::Modulo, "mod", false, a, b)
|
||||
}
|
||||
|
||||
pub fn gcc_srem(&self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
|
||||
// 128-bit signed %: __modti3
|
||||
self.multiplicative_operation(BinaryOp::Modulo, "mod", true, a, b)
|
||||
}
|
||||
|
||||
pub fn gcc_not(&self, a: RValue<'gcc>) -> RValue<'gcc> {
|
||||
let typ = a.get_type();
|
||||
if self.is_native_int_type_or_bool(typ) {
|
||||
let operation =
|
||||
if typ.is_bool() {
|
||||
UnaryOp::LogicalNegate
|
||||
}
|
||||
else {
|
||||
UnaryOp::BitwiseNegate
|
||||
};
|
||||
self.cx.context.new_unary_op(None, operation, typ, a)
|
||||
}
|
||||
else {
|
||||
// TODO(antoyo): use __negdi2 and __negti2 instead?
|
||||
let element_type = typ.dyncast_array().expect("element type");
|
||||
let values = [
|
||||
self.cx.context.new_unary_op(None, UnaryOp::BitwiseNegate, element_type, self.low(a)),
|
||||
self.cx.context.new_unary_op(None, UnaryOp::BitwiseNegate, element_type, self.high(a)),
|
||||
];
|
||||
self.cx.context.new_array_constructor(None, typ, &values)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn gcc_neg(&self, a: RValue<'gcc>) -> RValue<'gcc> {
|
||||
let a_type = a.get_type();
|
||||
if self.is_native_int_type(a_type) {
|
||||
self.cx.context.new_unary_op(None, UnaryOp::Minus, a.get_type(), a)
|
||||
}
|
||||
else {
|
||||
let param_a = self.context.new_parameter(None, a_type, "a");
|
||||
let func = self.context.new_function(None, FunctionType::Extern, a_type, &[param_a], "__negti2", false);
|
||||
self.context.new_call(None, func, &[a])
|
||||
}
|
||||
}
|
||||
|
||||
pub fn gcc_and(&self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
|
||||
self.cx.bitwise_operation(BinaryOp::BitwiseAnd, a, b)
|
||||
}
|
||||
|
||||
pub fn gcc_lshr(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
|
||||
let a_type = a.get_type();
|
||||
let b_type = b.get_type();
|
||||
let a_native = self.is_native_int_type(a_type);
|
||||
let b_native = self.is_native_int_type(b_type);
|
||||
if a_native && b_native {
|
||||
// FIXME(antoyo): remove the casts when libgccjit can shift an unsigned number by a signed number.
|
||||
// TODO(antoyo): cast to unsigned to do a logical shift if that does not work.
|
||||
if a_type.is_signed(self) != b_type.is_signed(self) {
|
||||
let b = self.context.new_cast(None, b, a_type);
|
||||
a >> b
|
||||
}
|
||||
else {
|
||||
a >> b
|
||||
}
|
||||
}
|
||||
else if a_native && !b_native {
|
||||
self.gcc_lshr(a, self.gcc_int_cast(b, a_type))
|
||||
}
|
||||
else {
|
||||
// NOTE: we cannot use the lshr builtin because it's calling hi() (to get the most
|
||||
// significant half of the number) which uses lshr.
|
||||
|
||||
let native_int_type = a_type.dyncast_array().expect("get element type");
|
||||
|
||||
let func = self.current_func();
|
||||
let then_block = func.new_block("then");
|
||||
let else_block = func.new_block("else");
|
||||
let after_block = func.new_block("after");
|
||||
let b0_block = func.new_block("b0");
|
||||
let actual_else_block = func.new_block("actual_else");
|
||||
|
||||
let result = func.new_local(None, a_type, "shiftResult");
|
||||
|
||||
let sixty_four = self.gcc_int(native_int_type, 64);
|
||||
let sixty_three = self.gcc_int(native_int_type, 63);
|
||||
let zero = self.gcc_zero(native_int_type);
|
||||
let b = self.gcc_int_cast(b, native_int_type);
|
||||
let condition = self.gcc_icmp(IntPredicate::IntNE, self.gcc_and(b, sixty_four), zero);
|
||||
self.llbb().end_with_conditional(None, condition, then_block, else_block);
|
||||
|
||||
// TODO(antoyo): take endianness into account.
|
||||
let shift_value = self.gcc_sub(b, sixty_four);
|
||||
let high = self.high(a);
|
||||
let sign =
|
||||
if a_type.is_signed(self) {
|
||||
high >> sixty_three
|
||||
}
|
||||
else {
|
||||
zero
|
||||
};
|
||||
let values = [
|
||||
high >> shift_value,
|
||||
sign,
|
||||
];
|
||||
let array_value = self.context.new_array_constructor(None, a_type, &values);
|
||||
then_block.add_assignment(None, result, array_value);
|
||||
then_block.end_with_jump(None, after_block);
|
||||
|
||||
let condition = self.gcc_icmp(IntPredicate::IntEQ, b, zero);
|
||||
else_block.end_with_conditional(None, condition, b0_block, actual_else_block);
|
||||
|
||||
b0_block.add_assignment(None, result, a);
|
||||
b0_block.end_with_jump(None, after_block);
|
||||
|
||||
let shift_value = self.gcc_sub(sixty_four, b);
|
||||
// NOTE: cast low to its unsigned type in order to perform a logical right shift.
|
||||
let unsigned_type = native_int_type.to_unsigned(&self.cx);
|
||||
let casted_low = self.context.new_cast(None, self.low(a), unsigned_type);
|
||||
let shifted_low = casted_low >> self.context.new_cast(None, b, unsigned_type);
|
||||
let shifted_low = self.context.new_cast(None, shifted_low, native_int_type);
|
||||
let values = [
|
||||
(high << shift_value) | shifted_low,
|
||||
high >> b,
|
||||
];
|
||||
let array_value = self.context.new_array_constructor(None, a_type, &values);
|
||||
actual_else_block.add_assignment(None, result, array_value);
|
||||
actual_else_block.end_with_jump(None, after_block);
|
||||
|
||||
// NOTE: since jumps were added in a place rustc does not expect, the current block in the
|
||||
// state need to be updated.
|
||||
self.switch_to_block(after_block);
|
||||
|
||||
result.to_rvalue()
|
||||
}
|
||||
}
|
||||
|
||||
fn additive_operation(&self, operation: BinaryOp, a: RValue<'gcc>, mut b: RValue<'gcc>) -> RValue<'gcc> {
|
||||
let a_type = a.get_type();
|
||||
let b_type = b.get_type();
|
||||
if self.is_native_int_type_or_bool(a_type) && self.is_native_int_type_or_bool(b_type) {
|
||||
if a.get_type() != b.get_type() {
|
||||
b = self.context.new_cast(None, b, a.get_type());
|
||||
}
|
||||
self.context.new_binary_op(None, operation, a_type, a, b)
|
||||
}
|
||||
else {
|
||||
let signed = a_type.is_compatible_with(self.i128_type);
|
||||
let func_name =
|
||||
match (operation, signed) {
|
||||
(BinaryOp::Plus, true) => "__rust_i128_add",
|
||||
(BinaryOp::Plus, false) => "__rust_u128_add",
|
||||
(BinaryOp::Minus, true) => "__rust_i128_sub",
|
||||
(BinaryOp::Minus, false) => "__rust_u128_sub",
|
||||
_ => unreachable!("unexpected additive operation {:?}", operation),
|
||||
};
|
||||
let param_a = self.context.new_parameter(None, a_type, "a");
|
||||
let param_b = self.context.new_parameter(None, b_type, "b");
|
||||
let func = self.context.new_function(None, FunctionType::Extern, a_type, &[param_a, param_b], func_name, false);
|
||||
self.context.new_call(None, func, &[a, b])
|
||||
}
|
||||
}
|
||||
|
||||
pub fn gcc_add(&self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
|
||||
self.additive_operation(BinaryOp::Plus, a, b)
|
||||
}
|
||||
|
||||
pub fn gcc_mul(&self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
|
||||
self.multiplicative_operation(BinaryOp::Mult, "mul", true, a, b)
|
||||
}
|
||||
|
||||
pub fn gcc_sub(&self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
|
||||
self.additive_operation(BinaryOp::Minus, a, b)
|
||||
}
|
||||
|
||||
fn multiplicative_operation(&self, operation: BinaryOp, operation_name: &str, signed: bool, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
|
||||
let a_type = a.get_type();
|
||||
let b_type = b.get_type();
|
||||
if self.is_native_int_type_or_bool(a_type) && self.is_native_int_type_or_bool(b_type) {
|
||||
self.context.new_binary_op(None, operation, a_type, a, b)
|
||||
}
|
||||
else {
|
||||
let sign =
|
||||
if signed {
|
||||
""
|
||||
}
|
||||
else {
|
||||
"u"
|
||||
};
|
||||
let func_name = format!("__{}{}ti3", sign, operation_name);
|
||||
let param_a = self.context.new_parameter(None, a_type, "a");
|
||||
let param_b = self.context.new_parameter(None, b_type, "b");
|
||||
let func = self.context.new_function(None, FunctionType::Extern, a_type, &[param_a, param_b], func_name, false);
|
||||
self.context.new_call(None, func, &[a, b])
|
||||
}
|
||||
}
|
||||
|
||||
pub fn gcc_sdiv(&self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
|
||||
// TODO(antoyo): check if the types are signed?
|
||||
// 128-bit, signed: __divti3
|
||||
// TODO(antoyo): convert the arguments to signed?
|
||||
self.multiplicative_operation(BinaryOp::Divide, "div", true, a, b)
|
||||
}
|
||||
|
||||
pub fn gcc_udiv(&self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
|
||||
// 128-bit, unsigned: __udivti3
|
||||
self.multiplicative_operation(BinaryOp::Divide, "div", false, a, b)
|
||||
}
|
||||
|
||||
pub fn gcc_checked_binop(&self, oop: OverflowOp, typ: Ty<'_>, lhs: <Self as BackendTypes>::Value, rhs: <Self as BackendTypes>::Value) -> (<Self as BackendTypes>::Value, <Self as BackendTypes>::Value) {
|
||||
use rustc_middle::ty::{Int, IntTy::*, Uint, UintTy::*};
|
||||
|
||||
let new_kind =
|
||||
match typ.kind() {
|
||||
Int(t @ Isize) => Int(t.normalize(self.tcx.sess.target.pointer_width)),
|
||||
Uint(t @ Usize) => Uint(t.normalize(self.tcx.sess.target.pointer_width)),
|
||||
t @ (Uint(_) | Int(_)) => t.clone(),
|
||||
_ => panic!("tried to get overflow intrinsic for op applied to non-int type"),
|
||||
};
|
||||
|
||||
// TODO(antoyo): remove duplication with intrinsic?
|
||||
let name =
|
||||
if self.is_native_int_type(lhs.get_type()) {
|
||||
match oop {
|
||||
OverflowOp::Add =>
|
||||
match new_kind {
|
||||
Int(I8) => "__builtin_add_overflow",
|
||||
Int(I16) => "__builtin_add_overflow",
|
||||
Int(I32) => "__builtin_sadd_overflow",
|
||||
Int(I64) => "__builtin_saddll_overflow",
|
||||
Int(I128) => "__builtin_add_overflow",
|
||||
|
||||
Uint(U8) => "__builtin_add_overflow",
|
||||
Uint(U16) => "__builtin_add_overflow",
|
||||
Uint(U32) => "__builtin_uadd_overflow",
|
||||
Uint(U64) => "__builtin_uaddll_overflow",
|
||||
Uint(U128) => "__builtin_add_overflow",
|
||||
|
||||
_ => unreachable!(),
|
||||
},
|
||||
OverflowOp::Sub =>
|
||||
match new_kind {
|
||||
Int(I8) => "__builtin_sub_overflow",
|
||||
Int(I16) => "__builtin_sub_overflow",
|
||||
Int(I32) => "__builtin_ssub_overflow",
|
||||
Int(I64) => "__builtin_ssubll_overflow",
|
||||
Int(I128) => "__builtin_sub_overflow",
|
||||
|
||||
Uint(U8) => "__builtin_sub_overflow",
|
||||
Uint(U16) => "__builtin_sub_overflow",
|
||||
Uint(U32) => "__builtin_usub_overflow",
|
||||
Uint(U64) => "__builtin_usubll_overflow",
|
||||
Uint(U128) => "__builtin_sub_overflow",
|
||||
|
||||
_ => unreachable!(),
|
||||
},
|
||||
OverflowOp::Mul =>
|
||||
match new_kind {
|
||||
Int(I8) => "__builtin_mul_overflow",
|
||||
Int(I16) => "__builtin_mul_overflow",
|
||||
Int(I32) => "__builtin_smul_overflow",
|
||||
Int(I64) => "__builtin_smulll_overflow",
|
||||
Int(I128) => "__builtin_mul_overflow",
|
||||
|
||||
Uint(U8) => "__builtin_mul_overflow",
|
||||
Uint(U16) => "__builtin_mul_overflow",
|
||||
Uint(U32) => "__builtin_umul_overflow",
|
||||
Uint(U64) => "__builtin_umulll_overflow",
|
||||
Uint(U128) => "__builtin_mul_overflow",
|
||||
|
||||
_ => unreachable!(),
|
||||
},
|
||||
}
|
||||
}
|
||||
else {
|
||||
match new_kind {
|
||||
Int(I128) | Uint(U128) => {
|
||||
let func_name =
|
||||
match oop {
|
||||
OverflowOp::Add =>
|
||||
match new_kind {
|
||||
Int(I128) => "__rust_i128_addo",
|
||||
Uint(U128) => "__rust_u128_addo",
|
||||
_ => unreachable!(),
|
||||
},
|
||||
OverflowOp::Sub =>
|
||||
match new_kind {
|
||||
Int(I128) => "__rust_i128_subo",
|
||||
Uint(U128) => "__rust_u128_subo",
|
||||
_ => unreachable!(),
|
||||
},
|
||||
OverflowOp::Mul =>
|
||||
match new_kind {
|
||||
Int(I128) => "__rust_i128_mulo", // TODO(antoyo): use __muloti4d instead?
|
||||
Uint(U128) => "__rust_u128_mulo",
|
||||
_ => unreachable!(),
|
||||
},
|
||||
};
|
||||
let a_type = lhs.get_type();
|
||||
let b_type = rhs.get_type();
|
||||
let param_a = self.context.new_parameter(None, a_type, "a");
|
||||
let param_b = self.context.new_parameter(None, b_type, "b");
|
||||
let result_field = self.context.new_field(None, a_type, "result");
|
||||
let overflow_field = self.context.new_field(None, self.bool_type, "overflow");
|
||||
let return_type = self.context.new_struct_type(None, "result_overflow", &[result_field, overflow_field]);
|
||||
let func = self.context.new_function(None, FunctionType::Extern, return_type.as_type(), &[param_a, param_b], func_name, false);
|
||||
let result = self.context.new_call(None, func, &[lhs, rhs]);
|
||||
let overflow = result.access_field(None, overflow_field);
|
||||
let int_result = result.access_field(None, result_field);
|
||||
return (int_result, overflow);
|
||||
},
|
||||
_ => {
|
||||
match oop {
|
||||
OverflowOp::Mul =>
|
||||
match new_kind {
|
||||
Int(I32) => "__mulosi4",
|
||||
Int(I64) => "__mulodi4",
|
||||
_ => unreachable!(),
|
||||
},
|
||||
_ => unimplemented!("overflow operation for {:?}", new_kind),
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
let intrinsic = self.context.get_builtin_function(&name);
|
||||
let res = self.current_func()
|
||||
// TODO(antoyo): is it correct to use rhs type instead of the parameter typ?
|
||||
.new_local(None, rhs.get_type(), "binopResult")
|
||||
.get_address(None);
|
||||
let overflow = self.overflow_call(intrinsic, &[lhs, rhs, res], None);
|
||||
(res.dereference(None).to_rvalue(), overflow)
|
||||
}
|
||||
|
||||
pub fn gcc_icmp(&self, op: IntPredicate, mut lhs: RValue<'gcc>, mut rhs: RValue<'gcc>) -> RValue<'gcc> {
|
||||
let a_type = lhs.get_type();
|
||||
let b_type = rhs.get_type();
|
||||
if self.is_non_native_int_type(a_type) || self.is_non_native_int_type(b_type) {
|
||||
let signed = a_type.is_compatible_with(self.i128_type);
|
||||
let sign =
|
||||
if signed {
|
||||
""
|
||||
}
|
||||
else {
|
||||
"u"
|
||||
};
|
||||
let func_name = format!("__{}cmpti2", sign);
|
||||
let param_a = self.context.new_parameter(None, a_type, "a");
|
||||
let param_b = self.context.new_parameter(None, b_type, "b");
|
||||
let func = self.context.new_function(None, FunctionType::Extern, self.int_type, &[param_a, param_b], func_name, false);
|
||||
let cmp = self.context.new_call(None, func, &[lhs, rhs]);
|
||||
let (op, limit) =
|
||||
match op {
|
||||
IntPredicate::IntEQ => {
|
||||
return self.context.new_comparison(None, ComparisonOp::Equals, cmp, self.context.new_rvalue_one(self.int_type));
|
||||
},
|
||||
IntPredicate::IntNE => {
|
||||
return self.context.new_comparison(None, ComparisonOp::NotEquals, cmp, self.context.new_rvalue_one(self.int_type));
|
||||
},
|
||||
IntPredicate::IntUGT => (ComparisonOp::Equals, 2),
|
||||
IntPredicate::IntUGE => (ComparisonOp::GreaterThanEquals, 1),
|
||||
IntPredicate::IntULT => (ComparisonOp::Equals, 0),
|
||||
IntPredicate::IntULE => (ComparisonOp::LessThanEquals, 1),
|
||||
IntPredicate::IntSGT => (ComparisonOp::Equals, 2),
|
||||
IntPredicate::IntSGE => (ComparisonOp::GreaterThanEquals, 1),
|
||||
IntPredicate::IntSLT => (ComparisonOp::Equals, 0),
|
||||
IntPredicate::IntSLE => (ComparisonOp::LessThanEquals, 1),
|
||||
};
|
||||
self.context.new_comparison(None, op, cmp, self.context.new_rvalue_from_int(self.int_type, limit))
|
||||
}
|
||||
else {
|
||||
let left_type = lhs.get_type();
|
||||
let right_type = rhs.get_type();
|
||||
if left_type != right_type {
|
||||
// NOTE: because libgccjit cannot compare function pointers.
|
||||
if left_type.dyncast_function_ptr_type().is_some() && right_type.dyncast_function_ptr_type().is_some() {
|
||||
lhs = self.context.new_cast(None, lhs, self.usize_type.make_pointer());
|
||||
rhs = self.context.new_cast(None, rhs, self.usize_type.make_pointer());
|
||||
}
|
||||
// NOTE: hack because we try to cast a vector type to the same vector type.
|
||||
else if format!("{:?}", left_type) != format!("{:?}", right_type) {
|
||||
rhs = self.context.new_cast(None, rhs, left_type);
|
||||
}
|
||||
}
|
||||
self.context.new_comparison(None, op.to_gcc_comparison(), lhs, rhs)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn gcc_xor(&self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
|
||||
let a_type = a.get_type();
|
||||
let b_type = b.get_type();
|
||||
if self.is_native_int_type_or_bool(a_type) && self.is_native_int_type_or_bool(b_type) {
|
||||
a ^ b
|
||||
}
|
||||
else {
|
||||
let values = [
|
||||
self.low(a) ^ self.low(b),
|
||||
self.high(a) ^ self.high(b),
|
||||
];
|
||||
self.context.new_array_constructor(None, a_type, &values)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn gcc_shl(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
|
||||
let a_type = a.get_type();
|
||||
let b_type = b.get_type();
|
||||
let a_native = self.is_native_int_type(a_type);
|
||||
let b_native = self.is_native_int_type(b_type);
|
||||
if a_native && b_native {
|
||||
// FIXME(antoyo): remove the casts when libgccjit can shift an unsigned number by an unsigned number.
|
||||
if a_type.is_unsigned(self) && b_type.is_signed(self) {
|
||||
let a = self.context.new_cast(None, a, b_type);
|
||||
let result = a << b;
|
||||
self.context.new_cast(None, result, a_type)
|
||||
}
|
||||
else if a_type.is_signed(self) && b_type.is_unsigned(self) {
|
||||
let b = self.context.new_cast(None, b, a_type);
|
||||
a << b
|
||||
}
|
||||
else {
|
||||
a << b
|
||||
}
|
||||
}
|
||||
else if a_native && !b_native {
|
||||
self.gcc_shl(a, self.gcc_int_cast(b, a_type))
|
||||
}
|
||||
else {
|
||||
// NOTE: we cannot use the ashl builtin because it's calling widen_hi() which uses ashl.
|
||||
let native_int_type = a_type.dyncast_array().expect("get element type");
|
||||
|
||||
let func = self.current_func();
|
||||
let then_block = func.new_block("then");
|
||||
let else_block = func.new_block("else");
|
||||
let after_block = func.new_block("after");
|
||||
let b0_block = func.new_block("b0");
|
||||
let actual_else_block = func.new_block("actual_else");
|
||||
|
||||
let result = func.new_local(None, a_type, "shiftResult");
|
||||
|
||||
let b = self.gcc_int_cast(b, native_int_type);
|
||||
let sixty_four = self.gcc_int(native_int_type, 64);
|
||||
let zero = self.gcc_zero(native_int_type);
|
||||
let condition = self.gcc_icmp(IntPredicate::IntNE, self.gcc_and(b, sixty_four), zero);
|
||||
self.llbb().end_with_conditional(None, condition, then_block, else_block);
|
||||
|
||||
// TODO(antoyo): take endianness into account.
|
||||
let values = [
|
||||
zero,
|
||||
self.low(a) << (b - sixty_four),
|
||||
];
|
||||
let array_value = self.context.new_array_constructor(None, a_type, &values);
|
||||
then_block.add_assignment(None, result, array_value);
|
||||
then_block.end_with_jump(None, after_block);
|
||||
|
||||
let condition = self.gcc_icmp(IntPredicate::IntEQ, b, zero);
|
||||
else_block.end_with_conditional(None, condition, b0_block, actual_else_block);
|
||||
|
||||
b0_block.add_assignment(None, result, a);
|
||||
b0_block.end_with_jump(None, after_block);
|
||||
|
||||
// NOTE: cast low to its unsigned type in order to perform a logical right shift.
|
||||
let unsigned_type = native_int_type.to_unsigned(&self.cx);
|
||||
let casted_low = self.context.new_cast(None, self.low(a), unsigned_type);
|
||||
let shift_value = self.context.new_cast(None, sixty_four - b, unsigned_type);
|
||||
let high_low = self.context.new_cast(None, casted_low >> shift_value, native_int_type);
|
||||
let values = [
|
||||
self.low(a) << b,
|
||||
(self.high(a) << b) | high_low,
|
||||
];
|
||||
|
||||
let array_value = self.context.new_array_constructor(None, a_type, &values);
|
||||
actual_else_block.add_assignment(None, result, array_value);
|
||||
actual_else_block.end_with_jump(None, after_block);
|
||||
|
||||
// NOTE: since jumps were added in a place rustc does not expect, the current block in the
|
||||
// state need to be updated.
|
||||
self.switch_to_block(after_block);
|
||||
|
||||
result.to_rvalue()
|
||||
}
|
||||
}
|
||||
|
||||
pub fn gcc_bswap(&mut self, mut arg: RValue<'gcc>, width: u64) -> RValue<'gcc> {
|
||||
let arg_type = arg.get_type();
|
||||
if !self.is_native_int_type(arg_type) {
|
||||
let native_int_type = arg_type.dyncast_array().expect("get element type");
|
||||
let lsb = self.context.new_array_access(None, arg, self.context.new_rvalue_from_int(self.int_type, 0)).to_rvalue();
|
||||
let swapped_lsb = self.gcc_bswap(lsb, width / 2);
|
||||
let swapped_lsb = self.context.new_cast(None, swapped_lsb, native_int_type);
|
||||
let msb = self.context.new_array_access(None, arg, self.context.new_rvalue_from_int(self.int_type, 1)).to_rvalue();
|
||||
let swapped_msb = self.gcc_bswap(msb, width / 2);
|
||||
let swapped_msb = self.context.new_cast(None, swapped_msb, native_int_type);
|
||||
|
||||
// NOTE: we also need to swap the two elements here, in addition to swapping inside
|
||||
// the elements themselves like done above.
|
||||
return self.context.new_array_constructor(None, arg_type, &[swapped_msb, swapped_lsb]);
|
||||
}
|
||||
|
||||
// TODO(antoyo): check if it's faster to use string literals and a
|
||||
// match instead of format!.
|
||||
let bswap = self.cx.context.get_builtin_function(&format!("__builtin_bswap{}", width));
|
||||
// FIXME(antoyo): this cast should not be necessary. Remove
|
||||
// when having proper sized integer types.
|
||||
let param_type = bswap.get_param(0).to_rvalue().get_type();
|
||||
if param_type != arg_type {
|
||||
arg = self.bitcast(arg, param_type);
|
||||
}
|
||||
self.cx.context.new_call(None, bswap, &[arg])
|
||||
}
|
||||
}
|
||||
|
||||
impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
|
||||
pub fn gcc_int(&self, typ: Type<'gcc>, int: i64) -> RValue<'gcc> {
|
||||
if self.is_native_int_type_or_bool(typ) {
|
||||
self.context.new_rvalue_from_long(typ, i64::try_from(int).expect("i64::try_from"))
|
||||
}
|
||||
else {
|
||||
// NOTE: set the sign in high.
|
||||
self.from_low_high(typ, int, -(int.is_negative() as i64))
|
||||
}
|
||||
}
|
||||
|
||||
pub fn gcc_uint(&self, typ: Type<'gcc>, int: u64) -> RValue<'gcc> {
|
||||
if self.is_native_int_type_or_bool(typ) {
|
||||
self.context.new_rvalue_from_long(typ, u64::try_from(int).expect("u64::try_from") as i64)
|
||||
}
|
||||
else {
|
||||
self.from_low_high(typ, int as i64, 0)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn gcc_uint_big(&self, typ: Type<'gcc>, num: u128) -> RValue<'gcc> {
|
||||
let low = num as u64;
|
||||
let high = (num >> 64) as u64;
|
||||
if num >> 64 != 0 {
|
||||
// FIXME(antoyo): use a new function new_rvalue_from_unsigned_long()?
|
||||
if self.is_native_int_type(typ) {
|
||||
let low = self.context.new_rvalue_from_long(self.u64_type, low as i64);
|
||||
let high = self.context.new_rvalue_from_long(typ, high as i64);
|
||||
|
||||
let sixty_four = self.context.new_rvalue_from_long(typ, 64);
|
||||
let shift = high << sixty_four;
|
||||
shift | self.context.new_cast(None, low, typ)
|
||||
}
|
||||
else {
|
||||
self.from_low_high(typ, low as i64, high as i64)
|
||||
}
|
||||
}
|
||||
else if typ.is_i128(self) {
|
||||
let num = self.context.new_rvalue_from_long(self.u64_type, num as u64 as i64);
|
||||
self.gcc_int_cast(num, typ)
|
||||
}
|
||||
else {
|
||||
self.gcc_uint(typ, num as u64)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn gcc_zero(&self, typ: Type<'gcc>) -> RValue<'gcc> {
|
||||
if self.is_native_int_type_or_bool(typ) {
|
||||
self.context.new_rvalue_zero(typ)
|
||||
}
|
||||
else {
|
||||
self.from_low_high(typ, 0, 0)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn gcc_int_width(&self, typ: Type<'gcc>) -> u64 {
|
||||
if self.is_native_int_type_or_bool(typ) {
|
||||
typ.get_size() as u64 * 8
|
||||
}
|
||||
else {
|
||||
// NOTE: the only unsupported types are u128 and i128.
|
||||
128
|
||||
}
|
||||
}
|
||||
|
||||
fn bitwise_operation(&self, operation: BinaryOp, a: RValue<'gcc>, mut b: RValue<'gcc>) -> RValue<'gcc> {
|
||||
let a_type = a.get_type();
|
||||
let b_type = b.get_type();
|
||||
let a_native = self.is_native_int_type_or_bool(a_type);
|
||||
let b_native = self.is_native_int_type_or_bool(b_type);
|
||||
if a_native && b_native {
|
||||
if a_type != b_type {
|
||||
b = self.context.new_cast(None, b, a_type);
|
||||
}
|
||||
self.context.new_binary_op(None, operation, a_type, a, b)
|
||||
}
|
||||
else {
|
||||
assert!(!a_native && !b_native, "both types should either be native or non-native for or operation");
|
||||
let native_int_type = a_type.dyncast_array().expect("get element type");
|
||||
let values = [
|
||||
self.context.new_binary_op(None, operation, native_int_type, self.low(a), self.low(b)),
|
||||
self.context.new_binary_op(None, operation, native_int_type, self.high(a), self.high(b)),
|
||||
];
|
||||
self.context.new_array_constructor(None, a_type, &values)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn gcc_or(&self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
|
||||
self.bitwise_operation(BinaryOp::BitwiseOr, a, b)
|
||||
}
|
||||
|
||||
// TODO(antoyo): can we use https://github.com/rust-lang/compiler-builtins/blob/master/src/int/mod.rs#L379 instead?
|
||||
pub fn gcc_int_cast(&self, value: RValue<'gcc>, dest_typ: Type<'gcc>) -> RValue<'gcc> {
|
||||
let value_type = value.get_type();
|
||||
if self.is_native_int_type_or_bool(dest_typ) && self.is_native_int_type_or_bool(value_type) {
|
||||
self.context.new_cast(None, value, dest_typ)
|
||||
}
|
||||
else if self.is_native_int_type_or_bool(dest_typ) {
|
||||
self.context.new_cast(None, self.low(value), dest_typ)
|
||||
}
|
||||
else if self.is_native_int_type_or_bool(value_type) {
|
||||
let dest_element_type = dest_typ.dyncast_array().expect("get element type");
|
||||
|
||||
// NOTE: set the sign of the value.
|
||||
let zero = self.context.new_rvalue_zero(value_type);
|
||||
let is_negative = self.context.new_comparison(None, ComparisonOp::LessThan, value, zero);
|
||||
let is_negative = self.gcc_int_cast(is_negative, dest_element_type);
|
||||
let values = [
|
||||
self.context.new_cast(None, value, dest_element_type),
|
||||
self.context.new_unary_op(None, UnaryOp::Minus, dest_element_type, is_negative),
|
||||
];
|
||||
self.context.new_array_constructor(None, dest_typ, &values)
|
||||
}
|
||||
else {
|
||||
// Since u128 and i128 are the only types that can be unsupported, we know the type of
|
||||
// value and the destination type have the same size, so a bitcast is fine.
|
||||
self.context.new_bitcast(None, value, dest_typ)
|
||||
}
|
||||
}
|
||||
|
||||
fn int_to_float_cast(&self, signed: bool, value: RValue<'gcc>, dest_typ: Type<'gcc>) -> RValue<'gcc> {
|
||||
let value_type = value.get_type();
|
||||
if self.is_native_int_type_or_bool(value_type) {
|
||||
return self.context.new_cast(None, value, dest_typ);
|
||||
}
|
||||
|
||||
let name_suffix =
|
||||
match self.type_kind(dest_typ) {
|
||||
TypeKind::Float => "tisf",
|
||||
TypeKind::Double => "tidf",
|
||||
kind => panic!("cannot cast a non-native integer to type {:?}", kind),
|
||||
};
|
||||
let sign =
|
||||
if signed {
|
||||
""
|
||||
}
|
||||
else {
|
||||
"un"
|
||||
};
|
||||
let func_name = format!("__float{}{}", sign, name_suffix);
|
||||
let param = self.context.new_parameter(None, value_type, "n");
|
||||
let func = self.context.new_function(None, FunctionType::Extern, dest_typ, &[param], func_name, false);
|
||||
self.context.new_call(None, func, &[value])
|
||||
}
|
||||
|
||||
pub fn gcc_int_to_float_cast(&self, value: RValue<'gcc>, dest_typ: Type<'gcc>) -> RValue<'gcc> {
|
||||
self.int_to_float_cast(true, value, dest_typ)
|
||||
}
|
||||
|
||||
pub fn gcc_uint_to_float_cast(&self, value: RValue<'gcc>, dest_typ: Type<'gcc>) -> RValue<'gcc> {
|
||||
self.int_to_float_cast(false, value, dest_typ)
|
||||
}
|
||||
|
||||
fn float_to_int_cast(&self, signed: bool, value: RValue<'gcc>, dest_typ: Type<'gcc>) -> RValue<'gcc> {
|
||||
let value_type = value.get_type();
|
||||
if self.is_native_int_type_or_bool(dest_typ) {
|
||||
return self.context.new_cast(None, value, dest_typ);
|
||||
}
|
||||
|
||||
let name_suffix =
|
||||
match self.type_kind(value_type) {
|
||||
TypeKind::Float => "sfti",
|
||||
TypeKind::Double => "dfti",
|
||||
kind => panic!("cannot cast a {:?} to non-native integer", kind),
|
||||
};
|
||||
let sign =
|
||||
if signed {
|
||||
""
|
||||
}
|
||||
else {
|
||||
"uns"
|
||||
};
|
||||
let func_name = format!("__fix{}{}", sign, name_suffix);
|
||||
let param = self.context.new_parameter(None, value_type, "n");
|
||||
let func = self.context.new_function(None, FunctionType::Extern, dest_typ, &[param], func_name, false);
|
||||
self.context.new_call(None, func, &[value])
|
||||
}
|
||||
|
||||
pub fn gcc_float_to_int_cast(&self, value: RValue<'gcc>, dest_typ: Type<'gcc>) -> RValue<'gcc> {
|
||||
self.float_to_int_cast(true, value, dest_typ)
|
||||
}
|
||||
|
||||
pub fn gcc_float_to_uint_cast(&self, value: RValue<'gcc>, dest_typ: Type<'gcc>) -> RValue<'gcc> {
|
||||
self.float_to_int_cast(false, value, dest_typ)
|
||||
}
|
||||
|
||||
fn high(&self, value: RValue<'gcc>) -> RValue<'gcc> {
|
||||
self.context.new_array_access(None, value, self.context.new_rvalue_from_int(self.int_type, 1))
|
||||
.to_rvalue()
|
||||
}
|
||||
|
||||
fn low(&self, value: RValue<'gcc>) -> RValue<'gcc> {
|
||||
self.context.new_array_access(None, value, self.context.new_rvalue_from_int(self.int_type, 0))
|
||||
.to_rvalue()
|
||||
}
|
||||
|
||||
fn from_low_high(&self, typ: Type<'gcc>, low: i64, high: i64) -> RValue<'gcc> {
|
||||
let native_int_type = typ.dyncast_array().expect("get element type");
|
||||
let values = [
|
||||
self.context.new_rvalue_from_long(native_int_type, low),
|
||||
self.context.new_rvalue_from_long(native_int_type, high),
|
||||
];
|
||||
self.context.new_array_constructor(None, typ, &values)
|
||||
}
|
||||
}
|
||||
@@ -1,7 +1,7 @@
|
||||
pub mod llvm;
|
||||
mod simd;
|
||||
|
||||
use gccjit::{ComparisonOp, Function, RValue, ToRValue, Type, UnaryOp};
|
||||
use gccjit::{ComparisonOp, Function, RValue, ToRValue, Type, UnaryOp, FunctionType};
|
||||
use rustc_codegen_ssa::MemFlags;
|
||||
use rustc_codegen_ssa::base::wants_msvc_seh;
|
||||
use rustc_codegen_ssa::common::{IntPredicate, span_invalid_monomorphization_error};
|
||||
@@ -175,19 +175,18 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
|
||||
|
||||
let arg = args[0].immediate();
|
||||
let result = func.new_local(None, arg.get_type(), "zeros");
|
||||
let zero = self.cx.context.new_rvalue_zero(arg.get_type());
|
||||
let cond = self.cx.context.new_comparison(None, ComparisonOp::Equals, arg, zero);
|
||||
let zero = self.cx.gcc_zero(arg.get_type());
|
||||
let cond = self.gcc_icmp(IntPredicate::IntEQ, arg, zero);
|
||||
self.llbb().end_with_conditional(None, cond, then_block, else_block);
|
||||
|
||||
let zero_result = self.cx.context.new_rvalue_from_long(arg.get_type(), width as i64);
|
||||
let zero_result = self.cx.gcc_uint(arg.get_type(), width);
|
||||
then_block.add_assignment(None, result, zero_result);
|
||||
then_block.end_with_jump(None, after_block);
|
||||
|
||||
// NOTE: since jumps were added in a place
|
||||
// count_leading_zeroes() does not expect, the current blocks
|
||||
// count_leading_zeroes() does not expect, the current block
|
||||
// in the state need to be updated.
|
||||
*self.current_block.borrow_mut() = Some(else_block);
|
||||
self.block = Some(else_block);
|
||||
self.switch_to_block(else_block);
|
||||
|
||||
let zeros =
|
||||
match name {
|
||||
@@ -195,13 +194,12 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
|
||||
sym::cttz => self.count_trailing_zeroes(width, arg),
|
||||
_ => unreachable!(),
|
||||
};
|
||||
else_block.add_assignment(None, result, zeros);
|
||||
else_block.end_with_jump(None, after_block);
|
||||
self.llbb().add_assignment(None, result, zeros);
|
||||
self.llbb().end_with_jump(None, after_block);
|
||||
|
||||
// NOTE: since jumps were added in a place rustc does not
|
||||
// expect, the current blocks in the state need to be updated.
|
||||
*self.current_block.borrow_mut() = Some(after_block);
|
||||
self.block = Some(after_block);
|
||||
// expect, the current block in the state need to be updated.
|
||||
self.switch_to_block(after_block);
|
||||
|
||||
result.to_rvalue()
|
||||
}
|
||||
@@ -217,17 +215,7 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
|
||||
args[0].immediate() // byte swap a u8/i8 is just a no-op
|
||||
}
|
||||
else {
|
||||
// TODO(antoyo): check if it's faster to use string literals and a
|
||||
// match instead of format!.
|
||||
let bswap = self.cx.context.get_builtin_function(&format!("__builtin_bswap{}", width));
|
||||
let mut arg = args[0].immediate();
|
||||
// FIXME(antoyo): this cast should not be necessary. Remove
|
||||
// when having proper sized integer types.
|
||||
let param_type = bswap.get_param(0).to_rvalue().get_type();
|
||||
if param_type != arg.get_type() {
|
||||
arg = self.bitcast(arg, param_type);
|
||||
}
|
||||
self.cx.context.new_call(None, bswap, &[arg])
|
||||
self.gcc_bswap(args[0].immediate(), width)
|
||||
}
|
||||
},
|
||||
sym::bitreverse => self.bit_reverse(width, args[0].immediate()),
|
||||
@@ -476,17 +464,17 @@ impl<'gcc, 'tcx> ArgAbiExt<'gcc, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
|
||||
val.to_rvalue()
|
||||
};
|
||||
match self.mode {
|
||||
PassMode::Ignore => {}
|
||||
PassMode::Ignore => {},
|
||||
PassMode::Pair(..) => {
|
||||
OperandValue::Pair(next(), next()).store(bx, dst);
|
||||
}
|
||||
},
|
||||
PassMode::Indirect { extra_attrs: Some(_), .. } => {
|
||||
OperandValue::Ref(next(), Some(next()), self.layout.align.abi).store(bx, dst);
|
||||
}
|
||||
},
|
||||
PassMode::Direct(_) | PassMode::Indirect { extra_attrs: None, .. } | PassMode::Cast(_) => {
|
||||
let next_arg = next();
|
||||
self.store(bx, next_arg.to_rvalue(), dst);
|
||||
}
|
||||
self.store(bx, next_arg, dst);
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -526,7 +514,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
|
||||
|
||||
let value =
|
||||
if result_type.is_signed(self.cx) {
|
||||
self.context.new_cast(None, value, typ)
|
||||
self.gcc_int_cast(value, typ)
|
||||
}
|
||||
else {
|
||||
value
|
||||
@@ -673,30 +661,33 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
|
||||
},
|
||||
128 => {
|
||||
// TODO(antoyo): find a more efficient implementation?
|
||||
let sixty_four = self.context.new_rvalue_from_long(typ, 64);
|
||||
let high = self.context.new_cast(None, value >> sixty_four, self.u64_type);
|
||||
let low = self.context.new_cast(None, value, self.u64_type);
|
||||
let sixty_four = self.gcc_int(typ, 64);
|
||||
let right_shift = self.gcc_lshr(value, sixty_four);
|
||||
let high = self.gcc_int_cast(right_shift, self.u64_type);
|
||||
let low = self.gcc_int_cast(value, self.u64_type);
|
||||
|
||||
let reversed_high = self.bit_reverse(64, high);
|
||||
let reversed_low = self.bit_reverse(64, low);
|
||||
|
||||
let new_low = self.context.new_cast(None, reversed_high, typ);
|
||||
let new_high = self.context.new_cast(None, reversed_low, typ) << sixty_four;
|
||||
let new_low = self.gcc_int_cast(reversed_high, typ);
|
||||
let new_high = self.shl(self.gcc_int_cast(reversed_low, typ), sixty_four);
|
||||
|
||||
new_low | new_high
|
||||
self.gcc_or(new_low, new_high)
|
||||
},
|
||||
_ => {
|
||||
panic!("cannot bit reverse with width = {}", width);
|
||||
},
|
||||
};
|
||||
|
||||
self.context.new_cast(None, result, result_type)
|
||||
self.gcc_int_cast(result, result_type)
|
||||
}
|
||||
|
||||
fn count_leading_zeroes(&self, width: u64, arg: RValue<'gcc>) -> RValue<'gcc> {
|
||||
fn count_leading_zeroes(&mut self, width: u64, arg: RValue<'gcc>) -> RValue<'gcc> {
|
||||
// TODO(antoyo): use width?
|
||||
let arg_type = arg.get_type();
|
||||
let count_leading_zeroes =
|
||||
// TODO(antoyo): write a new function Type::is_compatible_with(&Type) and use it here
|
||||
// instead of using is_uint().
|
||||
if arg_type.is_uint(&self.cx) {
|
||||
"__builtin_clz"
|
||||
}
|
||||
@@ -712,9 +703,10 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
|
||||
let result = self.current_func()
|
||||
.new_local(None, array_type, "count_loading_zeroes_results");
|
||||
|
||||
let sixty_four = self.context.new_rvalue_from_long(arg_type, 64);
|
||||
let high = self.context.new_cast(None, arg >> sixty_four, self.u64_type);
|
||||
let low = self.context.new_cast(None, arg, self.u64_type);
|
||||
let sixty_four = self.const_uint(arg_type, 64);
|
||||
let shift = self.lshr(arg, sixty_four);
|
||||
let high = self.gcc_int_cast(shift, self.u64_type);
|
||||
let low = self.gcc_int_cast(arg, self.u64_type);
|
||||
|
||||
let zero = self.context.new_rvalue_zero(self.usize_type);
|
||||
let one = self.context.new_rvalue_one(self.usize_type);
|
||||
@@ -723,17 +715,18 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
|
||||
let clzll = self.context.get_builtin_function("__builtin_clzll");
|
||||
|
||||
let first_elem = self.context.new_array_access(None, result, zero);
|
||||
let first_value = self.context.new_cast(None, self.context.new_call(None, clzll, &[high]), arg_type);
|
||||
let first_value = self.gcc_int_cast(self.context.new_call(None, clzll, &[high]), arg_type);
|
||||
self.llbb()
|
||||
.add_assignment(None, first_elem, first_value);
|
||||
|
||||
let second_elem = self.context.new_array_access(None, result, one);
|
||||
let second_value = self.context.new_cast(None, self.context.new_call(None, clzll, &[low]), arg_type) + sixty_four;
|
||||
let cast = self.gcc_int_cast(self.context.new_call(None, clzll, &[low]), arg_type);
|
||||
let second_value = self.add(cast, sixty_four);
|
||||
self.llbb()
|
||||
.add_assignment(None, second_elem, second_value);
|
||||
|
||||
let third_elem = self.context.new_array_access(None, result, two);
|
||||
let third_value = self.context.new_rvalue_from_long(arg_type, 128);
|
||||
let third_value = self.const_uint(arg_type, 128);
|
||||
self.llbb()
|
||||
.add_assignment(None, third_elem, third_value);
|
||||
|
||||
@@ -749,13 +742,13 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
|
||||
|
||||
let res = self.context.new_array_access(None, result, index);
|
||||
|
||||
return self.context.new_cast(None, res, arg_type);
|
||||
return self.gcc_int_cast(res.to_rvalue(), arg_type);
|
||||
}
|
||||
else {
|
||||
let count_leading_zeroes = self.context.get_builtin_function("__builtin_clz");
|
||||
let arg = self.context.new_cast(None, arg, self.uint_type);
|
||||
let diff = self.int_width(self.uint_type) - self.int_width(arg_type);
|
||||
let diff = self.context.new_rvalue_from_long(self.int_type, diff);
|
||||
let count_leading_zeroes = self.context.get_builtin_function("__builtin_clzll");
|
||||
let arg = self.context.new_cast(None, arg, self.ulonglong_type);
|
||||
let diff = self.ulonglong_type.get_size() as i64 - arg_type.get_size() as i64;
|
||||
let diff = self.context.new_rvalue_from_long(self.int_type, diff * 8);
|
||||
let res = self.context.new_call(None, count_leading_zeroes, &[arg]) - diff;
|
||||
return self.context.new_cast(None, res, arg_type);
|
||||
};
|
||||
@@ -764,18 +757,20 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
|
||||
self.context.new_cast(None, res, arg_type)
|
||||
}
|
||||
|
||||
fn count_trailing_zeroes(&self, _width: u64, arg: RValue<'gcc>) -> RValue<'gcc> {
|
||||
fn count_trailing_zeroes(&mut self, _width: u64, arg: RValue<'gcc>) -> RValue<'gcc> {
|
||||
let result_type = arg.get_type();
|
||||
let arg =
|
||||
if result_type.is_signed(self.cx) {
|
||||
let new_type = result_type.to_unsigned(self.cx);
|
||||
self.context.new_cast(None, arg, new_type)
|
||||
self.gcc_int_cast(arg, new_type)
|
||||
}
|
||||
else {
|
||||
arg
|
||||
};
|
||||
let arg_type = arg.get_type();
|
||||
let (count_trailing_zeroes, expected_type) =
|
||||
// TODO(antoyo): write a new function Type::is_compatible_with(&Type) and use it here
|
||||
// instead of using is_uint().
|
||||
if arg_type.is_uchar(&self.cx) || arg_type.is_ushort(&self.cx) || arg_type.is_uint(&self.cx) {
|
||||
// NOTE: we don't need to & 0xFF for uchar because the result is undefined on zero.
|
||||
("__builtin_ctz", self.cx.uint_type)
|
||||
@@ -792,9 +787,10 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
|
||||
let result = self.current_func()
|
||||
.new_local(None, array_type, "count_loading_zeroes_results");
|
||||
|
||||
let sixty_four = self.context.new_rvalue_from_long(arg_type, 64);
|
||||
let high = self.context.new_cast(None, arg >> sixty_four, self.u64_type);
|
||||
let low = self.context.new_cast(None, arg, self.u64_type);
|
||||
let sixty_four = self.gcc_int(arg_type, 64);
|
||||
let shift = self.gcc_lshr(arg, sixty_four);
|
||||
let high = self.gcc_int_cast(shift, self.u64_type);
|
||||
let low = self.gcc_int_cast(arg, self.u64_type);
|
||||
|
||||
let zero = self.context.new_rvalue_zero(self.usize_type);
|
||||
let one = self.context.new_rvalue_one(self.usize_type);
|
||||
@@ -803,17 +799,17 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
|
||||
let ctzll = self.context.get_builtin_function("__builtin_ctzll");
|
||||
|
||||
let first_elem = self.context.new_array_access(None, result, zero);
|
||||
let first_value = self.context.new_cast(None, self.context.new_call(None, ctzll, &[low]), arg_type);
|
||||
let first_value = self.gcc_int_cast(self.context.new_call(None, ctzll, &[low]), arg_type);
|
||||
self.llbb()
|
||||
.add_assignment(None, first_elem, first_value);
|
||||
|
||||
let second_elem = self.context.new_array_access(None, result, one);
|
||||
let second_value = self.context.new_cast(None, self.context.new_call(None, ctzll, &[high]), arg_type) + sixty_four;
|
||||
let second_value = self.gcc_add(self.gcc_int_cast(self.context.new_call(None, ctzll, &[high]), arg_type), sixty_four);
|
||||
self.llbb()
|
||||
.add_assignment(None, second_elem, second_value);
|
||||
|
||||
let third_elem = self.context.new_array_access(None, result, two);
|
||||
let third_value = self.context.new_rvalue_from_long(arg_type, 128);
|
||||
let third_value = self.gcc_int(arg_type, 128);
|
||||
self.llbb()
|
||||
.add_assignment(None, third_elem, third_value);
|
||||
|
||||
@@ -829,10 +825,20 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
|
||||
|
||||
let res = self.context.new_array_access(None, result, index);
|
||||
|
||||
return self.context.new_cast(None, res, result_type);
|
||||
return self.gcc_int_cast(res.to_rvalue(), result_type);
|
||||
}
|
||||
else {
|
||||
unimplemented!("count_trailing_zeroes for {:?}", arg_type);
|
||||
let count_trailing_zeroes = self.context.get_builtin_function("__builtin_ctzll");
|
||||
let arg_size = arg_type.get_size();
|
||||
let casted_arg = self.context.new_cast(None, arg, self.ulonglong_type);
|
||||
let byte_diff = self.ulonglong_type.get_size() as i64 - arg_size as i64;
|
||||
let diff = self.context.new_rvalue_from_long(self.int_type, byte_diff * 8);
|
||||
let mask = self.context.new_rvalue_from_long(arg_type, -1); // To get the value with all bits set.
|
||||
let masked = mask & self.context.new_unary_op(None, UnaryOp::BitwiseNegate, arg_type, arg);
|
||||
let cond = self.context.new_comparison(None, ComparisonOp::Equals, masked, mask);
|
||||
let diff = diff * self.context.new_cast(None, cond, self.int_type);
|
||||
let res = self.context.new_call(None, count_trailing_zeroes, &[casted_arg]) - diff;
|
||||
return self.context.new_cast(None, res, result_type);
|
||||
};
|
||||
let count_trailing_zeroes = self.context.get_builtin_function(count_trailing_zeroes);
|
||||
let arg =
|
||||
@@ -846,18 +852,14 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
|
||||
self.context.new_cast(None, res, result_type)
|
||||
}
|
||||
|
||||
fn int_width(&self, typ: Type<'gcc>) -> i64 {
|
||||
self.cx.int_width(typ) as i64
|
||||
}
|
||||
|
||||
fn pop_count(&self, value: RValue<'gcc>) -> RValue<'gcc> {
|
||||
fn pop_count(&mut self, value: RValue<'gcc>) -> RValue<'gcc> {
|
||||
// TODO(antoyo): use the optimized version with fewer operations.
|
||||
let result_type = value.get_type();
|
||||
let value_type = result_type.to_unsigned(self.cx);
|
||||
|
||||
let value =
|
||||
if result_type.is_signed(self.cx) {
|
||||
self.context.new_cast(None, value, value_type)
|
||||
self.gcc_int_cast(value, value_type)
|
||||
}
|
||||
else {
|
||||
value
|
||||
@@ -867,13 +869,14 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
|
||||
// TODO(antoyo): implement in the normal algorithm below to have a more efficient
|
||||
// implementation (that does not require a call to __popcountdi2).
|
||||
let popcount = self.context.get_builtin_function("__builtin_popcountll");
|
||||
let sixty_four = self.context.new_rvalue_from_long(value_type, 64);
|
||||
let high = self.context.new_cast(None, value >> sixty_four, self.cx.ulonglong_type);
|
||||
let sixty_four = self.gcc_int(value_type, 64);
|
||||
let right_shift = self.gcc_lshr(value, sixty_four);
|
||||
let high = self.gcc_int_cast(right_shift, self.cx.ulonglong_type);
|
||||
let high = self.context.new_call(None, popcount, &[high]);
|
||||
let low = self.context.new_cast(None, value, self.cx.ulonglong_type);
|
||||
let low = self.gcc_int_cast(value, self.cx.ulonglong_type);
|
||||
let low = self.context.new_call(None, popcount, &[low]);
|
||||
let res = high + low;
|
||||
return self.context.new_cast(None, res, result_type);
|
||||
return self.gcc_int_cast(res, result_type);
|
||||
}
|
||||
|
||||
// First step.
|
||||
@@ -935,13 +938,14 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
|
||||
|
||||
// Algorithm from: https://blog.regehr.org/archives/1063
|
||||
fn rotate_left(&mut self, value: RValue<'gcc>, shift: RValue<'gcc>, width: u64) -> RValue<'gcc> {
|
||||
let max = self.context.new_rvalue_from_long(shift.get_type(), width as i64);
|
||||
let shift = shift % max;
|
||||
let max = self.const_uint(shift.get_type(), width);
|
||||
let shift = self.urem(shift, max);
|
||||
let lhs = self.shl(value, shift);
|
||||
let result_neg = self.neg(shift);
|
||||
let result_and =
|
||||
self.and(
|
||||
self.context.new_unary_op(None, UnaryOp::Minus, shift.get_type(), shift),
|
||||
self.context.new_rvalue_from_long(shift.get_type(), width as i64 - 1),
|
||||
result_neg,
|
||||
self.const_uint(shift.get_type(), width - 1),
|
||||
);
|
||||
let rhs = self.lshr(value, result_and);
|
||||
self.or(lhs, rhs)
|
||||
@@ -949,13 +953,14 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
|
||||
|
||||
// Algorithm from: https://blog.regehr.org/archives/1063
|
||||
fn rotate_right(&mut self, value: RValue<'gcc>, shift: RValue<'gcc>, width: u64) -> RValue<'gcc> {
|
||||
let max = self.context.new_rvalue_from_long(shift.get_type(), width as i64);
|
||||
let shift = shift % max;
|
||||
let max = self.const_uint(shift.get_type(), width);
|
||||
let shift = self.urem(shift, max);
|
||||
let lhs = self.lshr(value, shift);
|
||||
let result_neg = self.neg(shift);
|
||||
let result_and =
|
||||
self.and(
|
||||
self.context.new_unary_op(None, UnaryOp::Minus, shift.get_type(), shift),
|
||||
self.context.new_rvalue_from_long(shift.get_type(), width as i64 - 1),
|
||||
result_neg,
|
||||
self.const_uint(shift.get_type(), width - 1),
|
||||
);
|
||||
let rhs = self.shl(value, result_and);
|
||||
self.or(lhs, rhs)
|
||||
@@ -995,9 +1000,8 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
|
||||
self.llbb().end_with_conditional(None, overflow, then_block, after_block);
|
||||
|
||||
// NOTE: since jumps were added in a place rustc does not
|
||||
// expect, the current blocks in the state need to be updated.
|
||||
*self.current_block.borrow_mut() = Some(after_block);
|
||||
self.block = Some(after_block);
|
||||
// expect, the current block in the state need to be updated.
|
||||
self.switch_to_block(after_block);
|
||||
|
||||
res.to_rvalue()
|
||||
}
|
||||
@@ -1015,39 +1019,59 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
|
||||
fn saturating_sub(&mut self, lhs: RValue<'gcc>, rhs: RValue<'gcc>, signed: bool, width: u64) -> RValue<'gcc> {
|
||||
if signed {
|
||||
// Also based on algorithm from: https://stackoverflow.com/a/56531252/389119
|
||||
let func_name =
|
||||
match width {
|
||||
8 => "__builtin_sub_overflow",
|
||||
16 => "__builtin_sub_overflow",
|
||||
32 => "__builtin_ssub_overflow",
|
||||
64 => "__builtin_ssubll_overflow",
|
||||
128 => "__builtin_sub_overflow",
|
||||
_ => unreachable!(),
|
||||
};
|
||||
let overflow_func = self.context.get_builtin_function(func_name);
|
||||
let result_type = lhs.get_type();
|
||||
let func = self.current_func.borrow().expect("func");
|
||||
let res = func.new_local(None, result_type, "saturating_diff");
|
||||
let overflow = self.overflow_call(overflow_func, &[lhs, rhs, res.get_address(None)], None);
|
||||
let supports_native_type = self.is_native_int_type(result_type);
|
||||
let overflow =
|
||||
if supports_native_type {
|
||||
let func_name =
|
||||
match width {
|
||||
8 => "__builtin_sub_overflow",
|
||||
16 => "__builtin_sub_overflow",
|
||||
32 => "__builtin_ssub_overflow",
|
||||
64 => "__builtin_ssubll_overflow",
|
||||
128 => "__builtin_sub_overflow",
|
||||
_ => unreachable!(),
|
||||
};
|
||||
let overflow_func = self.context.get_builtin_function(func_name);
|
||||
self.overflow_call(overflow_func, &[lhs, rhs, res.get_address(None)], None)
|
||||
}
|
||||
else {
|
||||
let func_name =
|
||||
match width {
|
||||
128 => "__rust_i128_subo",
|
||||
_ => unreachable!(),
|
||||
};
|
||||
let param_a = self.context.new_parameter(None, result_type, "a");
|
||||
let param_b = self.context.new_parameter(None, result_type, "b");
|
||||
let result_field = self.context.new_field(None, result_type, "result");
|
||||
let overflow_field = self.context.new_field(None, self.bool_type, "overflow");
|
||||
let return_type = self.context.new_struct_type(None, "result_overflow", &[result_field, overflow_field]);
|
||||
let func = self.context.new_function(None, FunctionType::Extern, return_type.as_type(), &[param_a, param_b], func_name, false);
|
||||
let result = self.context.new_call(None, func, &[lhs, rhs]);
|
||||
let overflow = result.access_field(None, overflow_field);
|
||||
let int_result = result.access_field(None, result_field);
|
||||
self.llbb().add_assignment(None, res, int_result);
|
||||
overflow
|
||||
};
|
||||
|
||||
let then_block = func.new_block("then");
|
||||
let after_block = func.new_block("after");
|
||||
|
||||
let unsigned_type = self.context.new_int_type(width as i32 / 8, false);
|
||||
let shifted = self.context.new_cast(None, lhs, unsigned_type) >> self.context.new_rvalue_from_int(unsigned_type, width as i32 - 1);
|
||||
let uint_max = self.context.new_unary_op(None, UnaryOp::BitwiseNegate, unsigned_type,
|
||||
self.context.new_rvalue_from_int(unsigned_type, 0)
|
||||
);
|
||||
let int_max = uint_max >> self.context.new_rvalue_one(unsigned_type);
|
||||
then_block.add_assignment(None, res, self.context.new_cast(None, shifted + int_max, result_type));
|
||||
// NOTE: convert the type to unsigned to have an unsigned shift.
|
||||
let unsigned_type = result_type.to_unsigned(&self.cx);
|
||||
let shifted = self.gcc_lshr(self.gcc_int_cast(lhs, unsigned_type), self.gcc_int(unsigned_type, width as i64 - 1));
|
||||
let uint_max = self.gcc_not(self.gcc_int(unsigned_type, 0));
|
||||
let int_max = self.gcc_lshr(uint_max, self.gcc_int(unsigned_type, 1));
|
||||
then_block.add_assignment(None, res, self.gcc_int_cast(self.gcc_add(shifted, int_max), result_type));
|
||||
then_block.end_with_jump(None, after_block);
|
||||
|
||||
self.llbb().end_with_conditional(None, overflow, then_block, after_block);
|
||||
|
||||
// NOTE: since jumps were added in a place rustc does not
|
||||
// expect, the current blocks in the state need to be updated.
|
||||
*self.current_block.borrow_mut() = Some(after_block);
|
||||
self.block = Some(after_block);
|
||||
// expect, the current block in the state need to be updated.
|
||||
self.switch_to_block(after_block);
|
||||
|
||||
res.to_rvalue()
|
||||
}
|
||||
@@ -1062,7 +1086,9 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
|
||||
}
|
||||
|
||||
fn try_intrinsic<'gcc, 'tcx>(bx: &mut Builder<'_, 'gcc, 'tcx>, try_func: RValue<'gcc>, data: RValue<'gcc>, _catch_func: RValue<'gcc>, dest: RValue<'gcc>) {
|
||||
if bx.sess().panic_strategy() == PanicStrategy::Abort {
|
||||
// NOTE: the `|| true` here is to use the panic=abort strategy with panic=unwind too
|
||||
if bx.sess().panic_strategy() == PanicStrategy::Abort || true {
|
||||
// TODO(bjorn3): Properly implement unwinding and remove the `|| true` once this is done.
|
||||
bx.call(bx.type_void(), try_func, &[data], None);
|
||||
// Return 0 unconditionally from the intrinsic call;
|
||||
// we can never unwind.
|
||||
|
||||
@@ -163,5 +163,26 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>,
|
||||
simd_xor: Uint, Int => xor;
|
||||
}
|
||||
|
||||
macro_rules! arith_unary {
|
||||
($($name: ident: $($($p: ident),* => $call: ident),*;)*) => {
|
||||
$(if name == sym::$name {
|
||||
match in_elem.kind() {
|
||||
$($(ty::$p(_))|* => {
|
||||
return Ok(bx.$call(args[0].immediate()))
|
||||
})*
|
||||
_ => {},
|
||||
}
|
||||
require!(false,
|
||||
"unsupported operation on `{}` with element `{}`",
|
||||
in_ty,
|
||||
in_elem)
|
||||
})*
|
||||
}
|
||||
}
|
||||
|
||||
arith_unary! {
|
||||
simd_neg: Int => neg, Float => fneg;
|
||||
}
|
||||
|
||||
unimplemented!("simd {}", name);
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
/*
|
||||
* TODO(antoyo): implement equality in libgccjit based on https://zpz.github.io/blog/overloading-equality-operator-in-cpp-class-hierarchy/ (for type equality?)
|
||||
* TODO(antoyo): support #[inline] attributes.
|
||||
* TODO(antoyo): support LTO.
|
||||
* TODO(antoyo): support LTO (gcc's equivalent to Thin LTO is enabled by -fwhopr: https://stackoverflow.com/questions/64954525/does-gcc-have-thin-lto).
|
||||
*
|
||||
* TODO(antoyo): remove the patches.
|
||||
*/
|
||||
@@ -21,6 +22,7 @@ extern crate rustc_middle;
|
||||
extern crate rustc_session;
|
||||
extern crate rustc_span;
|
||||
extern crate rustc_target;
|
||||
extern crate tempfile;
|
||||
|
||||
// This prevents duplicating functions and statics that are already part of the host rustc process.
|
||||
#[allow(unused_extern_crates)]
|
||||
@@ -40,15 +42,16 @@ mod context;
|
||||
mod coverageinfo;
|
||||
mod debuginfo;
|
||||
mod declare;
|
||||
mod int;
|
||||
mod intrinsic;
|
||||
mod mono_item;
|
||||
mod type_;
|
||||
mod type_of;
|
||||
|
||||
use std::any::Any;
|
||||
use std::sync::Arc;
|
||||
use std::sync::{Arc, Mutex};
|
||||
|
||||
use gccjit::{Context, OptimizationLevel};
|
||||
use gccjit::{Context, OptimizationLevel, CType};
|
||||
use rustc_ast::expand::allocator::AllocatorKind;
|
||||
use rustc_codegen_ssa::{CodegenResults, CompiledModule, ModuleCodegen};
|
||||
use rustc_codegen_ssa::base::codegen_crate;
|
||||
@@ -61,10 +64,12 @@ use rustc_errors::{ErrorGuaranteed, Handler};
|
||||
use rustc_metadata::EncodedMetadata;
|
||||
use rustc_middle::dep_graph::{WorkProduct, WorkProductId};
|
||||
use rustc_middle::ty::TyCtxt;
|
||||
use rustc_middle::ty::query::Providers;
|
||||
use rustc_session::config::{Lto, OptLevel, OutputFilenames};
|
||||
use rustc_session::Session;
|
||||
use rustc_span::Symbol;
|
||||
use rustc_span::fatal_error::FatalError;
|
||||
use tempfile::TempDir;
|
||||
|
||||
pub struct PrintOnPanic<F: Fn() -> String>(pub F);
|
||||
|
||||
@@ -77,13 +82,29 @@ impl<F: Fn() -> String> Drop for PrintOnPanic<F> {
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct GccCodegenBackend;
|
||||
pub struct GccCodegenBackend {
|
||||
supports_128bit_integers: Arc<Mutex<bool>>,
|
||||
}
|
||||
|
||||
impl CodegenBackend for GccCodegenBackend {
|
||||
fn init(&self, sess: &Session) {
|
||||
if sess.lto() != Lto::No {
|
||||
sess.warn("LTO is not supported. You may get a linker error.");
|
||||
}
|
||||
|
||||
let temp_dir = TempDir::new().expect("cannot create temporary directory");
|
||||
let temp_file = temp_dir.into_path().join("result.asm");
|
||||
let check_context = Context::default();
|
||||
check_context.set_print_errors_to_stderr(false);
|
||||
let _int128_ty = check_context.new_c_type(CType::UInt128t);
|
||||
// NOTE: we cannot just call compile() as this would require other files than libgccjit.so.
|
||||
check_context.compile_to_file(gccjit::OutputKind::Assembler, temp_file.to_str().expect("path to str"));
|
||||
*self.supports_128bit_integers.lock().expect("lock") = check_context.get_last_error() == Ok(None);
|
||||
}
|
||||
|
||||
fn provide(&self, providers: &mut Providers) {
|
||||
// FIXME(antoyo) compute list of enabled features from cli flags
|
||||
providers.global_backend_features = |_tcx, ()| vec![];
|
||||
}
|
||||
|
||||
fn codegen_crate<'tcx>(&self, tcx: TyCtxt<'tcx>, metadata: EncodedMetadata, need_metadata_module: bool) -> Box<dyn Any> {
|
||||
@@ -129,7 +150,7 @@ impl ExtraBackendMethods for GccCodegenBackend {
|
||||
}
|
||||
|
||||
fn compile_codegen_unit<'tcx>(&self, tcx: TyCtxt<'tcx>, cgu_name: Symbol) -> (ModuleCodegen<Self::Module>, u64) {
|
||||
base::compile_codegen_unit(tcx, cgu_name)
|
||||
base::compile_codegen_unit(tcx, cgu_name, *self.supports_128bit_integers.lock().expect("lock"))
|
||||
}
|
||||
|
||||
fn target_machine_factory(&self, _sess: &Session, _opt_level: OptLevel, _features: &[String]) -> TargetMachineFactoryFn<Self> {
|
||||
@@ -237,7 +258,9 @@ impl WriteBackendMethods for GccCodegenBackend {
|
||||
/// This is the entrypoint for a hot plugged rustc_codegen_gccjit
|
||||
#[no_mangle]
|
||||
pub fn __rustc_codegen_backend() -> Box<dyn CodegenBackend> {
|
||||
Box::new(GccCodegenBackend)
|
||||
Box::new(GccCodegenBackend {
|
||||
supports_128bit_integers: Arc::new(Mutex::new(false)),
|
||||
})
|
||||
}
|
||||
|
||||
fn to_gcc_opt_level(optlevel: Option<OptLevel>) -> OptimizationLevel {
|
||||
|
||||
@@ -7,7 +7,6 @@ use rustc_middle::bug;
|
||||
use rustc_middle::ty::layout::TyAndLayout;
|
||||
use rustc_target::abi::{AddressSpace, Align, Integer, Size};
|
||||
|
||||
use crate::common::TypeReflection;
|
||||
use crate::context::CodegenCx;
|
||||
use crate::type_of::LayoutGccExt;
|
||||
|
||||
@@ -119,9 +118,15 @@ impl<'gcc, 'tcx> BaseTypeMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
|
||||
}
|
||||
|
||||
fn type_kind(&self, typ: Type<'gcc>) -> TypeKind {
|
||||
if typ.is_integral() {
|
||||
if self.is_int_type_or_bool(typ) {
|
||||
TypeKind::Integer
|
||||
}
|
||||
else if typ.is_compatible_with(self.float_type) {
|
||||
TypeKind::Float
|
||||
}
|
||||
else if typ.is_compatible_with(self.double_type) {
|
||||
TypeKind::Double
|
||||
}
|
||||
else if typ.dyncast_vector().is_some() {
|
||||
TypeKind::Vector
|
||||
}
|
||||
@@ -175,24 +180,7 @@ impl<'gcc, 'tcx> BaseTypeMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
|
||||
}
|
||||
|
||||
fn int_width(&self, typ: Type<'gcc>) -> u64 {
|
||||
if typ.is_i8(self) || typ.is_u8(self) {
|
||||
8
|
||||
}
|
||||
else if typ.is_i16(self) || typ.is_u16(self) {
|
||||
16
|
||||
}
|
||||
else if typ.is_i32(self) || typ.is_u32(self) {
|
||||
32
|
||||
}
|
||||
else if typ.is_i64(self) || typ.is_u64(self) {
|
||||
64
|
||||
}
|
||||
else if typ.is_i128(self) || typ.is_u128(self) {
|
||||
128
|
||||
}
|
||||
else {
|
||||
panic!("Cannot get width of int type {:?}", typ);
|
||||
}
|
||||
self.gcc_int_width(typ)
|
||||
}
|
||||
|
||||
fn val_ty(&self, value: RValue<'gcc>) -> Type<'gcc> {
|
||||
|
||||
@@ -251,7 +251,9 @@ impl<'tcx> LayoutGccExt<'tcx> for TyAndLayout<'tcx> {
|
||||
ty::Ref(..) | ty::RawPtr(_) => {
|
||||
return self.field(cx, index).gcc_type(cx, true);
|
||||
}
|
||||
ty::Adt(def, _) if def.is_box() => {
|
||||
// only wide pointer boxes are handled as pointers
|
||||
// thin pointer boxes with scalar allocators are handled by the general logic below
|
||||
ty::Adt(def, substs) if def.is_box() && cx.layout_of(substs.type_at(1)).is_zst() => {
|
||||
let ptr_ty = cx.tcx.mk_mut_ptr(self.ty.boxed_ty());
|
||||
return cx.layout_of(ptr_ty).scalar_pair_element_gcc_type(cx, index, immediate);
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user