make native and crust functions conform to x86-64 ABI

This commit is contained in:
Jyun-Yan You
2012-03-20 11:44:28 -07:00
committed by Graydon Hoare
parent 716dc29ffc
commit d3712a287d

View File

@@ -1,8 +1,11 @@
import driver::session::session;
import driver::session::{session, arch_x86_64};
import syntax::codemap::span;
import libc::c_uint;
import front::attr;
import lib::llvm::{ llvm, TypeRef, ValueRef };
import lib::llvm::{ llvm, TypeRef, ValueRef,
ModuleRef, CallConv, Attribute,
StructRetAttribute, ByValAttribute
};
import syntax::ast;
import back::link;
import common::*;
@@ -14,6 +17,381 @@ import std::map::hashmap;
export link_name, trans_native_mod, register_crust_fn, trans_crust_fn,
decl_native_fn;
enum x86_64_reg_class {
no_class,
integer_class,
sse_fs_class,
sse_fv_class,
sse_ds_class,
sse_dv_class,
sse_int_class,
sseup_class,
x87_class,
x87up_class,
complex_x87_class,
memory_class
}
fn is_sse(c: x86_64_reg_class) -> bool {
ret alt c {
sse_fs_class | sse_fv_class |
sse_ds_class | sse_dv_class { true }
_ { false }
};
}
fn is_ymm(cls: [x86_64_reg_class]) -> bool {
let len = vec::len(cls);
ret (len > 2u &&
is_sse(cls[0]) &&
cls[1] == sseup_class &&
cls[2] == sseup_class) ||
(len > 3u &&
is_sse(cls[1]) &&
cls[2] == sseup_class &&
cls[3] == sseup_class);
}
fn classify_ty(ty: TypeRef) -> [x86_64_reg_class] {
fn align(off: uint, ty: TypeRef) -> uint {
let a = ty_align(ty);
ret (off + a - 1u) / a * a;
}
fn struct_tys(ty: TypeRef) -> [TypeRef] {
let n = llvm::LLVMCountStructElementTypes(ty);
let elts = vec::from_elem(n as uint, ptr::null());
vec::as_buf(elts) {|buf|
llvm::LLVMGetStructElementTypes(ty, buf);
}
ret elts;
}
fn ty_align(ty: TypeRef) -> uint {
ret alt llvm::LLVMGetTypeKind(ty) as int {
8 /* integer */ {
((llvm::LLVMGetIntTypeWidth(ty) as uint) + 7u) / 8u
}
12 /* pointer */ { 8u }
2 /* float */ { 4u }
3 /* double */ { 8u }
10 /* struct */ {
vec::foldl(0u, struct_tys(ty)) {|a, t|
uint::max(a, ty_align(t))
}
}
_ {
fail "ty_size: unhandled type"
}
};
}
fn ty_size(ty: TypeRef) -> uint {
ret alt llvm::LLVMGetTypeKind(ty) as int {
8 /* integer */ {
((llvm::LLVMGetIntTypeWidth(ty) as uint) + 7u) / 8u
}
12 /* pointer */ { 8u }
2 /* float */ { 4u }
3 /* double */ { 8u }
10 /* struct */ {
vec::foldl(0u, struct_tys(ty)) {|s, t|
s + ty_size(t)
}
}
_ {
fail "ty_size: unhandled type"
}
};
}
fn all_mem(cls: [mut x86_64_reg_class]) {
vec::iteri(cls) {|i, _c|
cls[i] = memory_class;
}
}
fn unify(cls: [mut x86_64_reg_class], i: uint,
new: x86_64_reg_class) {
if cls[i] == new {
ret;
} else if cls[i] == no_class {
cls[i] = new;
} else if new == no_class {
ret;
} else if cls[i] == memory_class || new == memory_class {
cls[i] = memory_class;
} else if cls[i] == integer_class || new == integer_class {
cls[i] = integer_class;
} else if cls[i] == x87_class ||
cls[i] == x87up_class ||
cls[i] == complex_x87_class ||
new == x87_class ||
new == x87up_class ||
new == complex_x87_class {
cls[i] = memory_class;
} else {
cls[i] = new;
}
}
fn classify_struct(tys: [TypeRef],
cls: [mut x86_64_reg_class], i: uint,
off: uint) {
if vec::is_empty(tys) {
classify(T_i64(), cls, i, off);
} else {
let field_off = off;
for ty in tys {
field_off = align(field_off, ty);
classify(ty, cls, i, field_off);
field_off += ty_size(ty);
}
}
}
fn classify(ty: TypeRef,
cls: [mut x86_64_reg_class], i: uint,
off: uint) {
let t_align = ty_align(ty);
let t_size = ty_size(ty);
let misalign = off % t_align;
if misalign != 0u {
let i = off / 8u;
let e = (off + t_size + 7u) / 8u;
while i < e {
unify(cls, i, memory_class);
i += 1u;
}
ret;
}
alt llvm::LLVMGetTypeKind(ty) as int {
8 /* integer */ |
12 /* pointer */ {
unify(cls, off / 8u, integer_class);
}
2 /* float */ {
if off % 8u == 4u {
unify(cls, off / 8u, sse_fv_class);
} else {
unify(cls, off / 8u, sse_fs_class);
}
}
3 /* double */ {
unify(cls, off / 8u, sse_ds_class);
}
10 /* struct */ {
classify_struct(struct_tys(ty), cls, i, off);
}
_ {
fail "classify: unhandled type";
}
}
}
fn fixup(ty: TypeRef, cls: [mut x86_64_reg_class]) {
let i = 0u;
let e = vec::len(cls);
if vec::len(cls) > 2u &&
llvm::LLVMGetTypeKind(ty) as int == 10 /* struct */ {
if is_sse(cls[i]) {
i += 1u;
while i < e {
if cls[i] != sseup_class {
all_mem(cls);
ret;
}
i += 1u;
}
} else {
all_mem(cls);
ret
}
} else {
while i < e {
if cls[i] == memory_class {
all_mem(cls);
ret;
}
if cls[i] == x87up_class {
// for darwin
// cls[i] = sse_ds_class;
all_mem(cls);
ret;
}
if cls[i] == sseup_class {
cls[i] = sse_int_class;
} else if is_sse(cls[i]) {
i += 1u;
while cls[i] == sseup_class { i += 1u; }
} else if cls[i] == x87_class {
i += 1u;
while cls[i] == x87up_class { i += 1u; }
} else {
i += 1u;
}
}
}
}
let words = (ty_size(ty) + 7u) / 8u;
let cls = vec::to_mut(vec::from_elem(words, no_class));
if words > 4u {
all_mem(cls);
ret vec::from_mut(cls);
}
classify(ty, cls, 0u, 0u);
fixup(ty, cls);
ret vec::from_mut(cls);
}
fn llreg_ty(cls: [x86_64_reg_class]) -> TypeRef {
fn llvec_len(cls: [x86_64_reg_class]) -> uint {
let len = 1u;
for c in cls {
if c != sseup_class {
break;
}
len += 1u;
}
ret len;
}
let tys = [];
let i = 0u;
let e = vec::len(cls);
while i < e {
alt cls[i] {
integer_class {
tys += [T_i64()];
}
sse_fv_class {
let vec_len = llvec_len(vec::tailn(cls, i + 1u)) * 2u;
let vec_ty = llvm::LLVMVectorType(T_f32(),
vec_len as c_uint);
tys += [vec_ty];
i += vec_len;
cont;
}
sse_fs_class {
tys += [T_f32()];
}
sse_ds_class {
tys += [T_f64()];
}
_ {
fail "llregtype: unhandled class";
}
}
i += 1u;
}
ret T_struct(tys);
}
type x86_64_llty = {
cast: bool,
ty: TypeRef
};
type x86_64_tys = {
arg_tys: [x86_64_llty],
ret_ty: x86_64_llty,
attrs: [option<Attribute>],
sret: bool
};
fn x86_64_tys(atys: [TypeRef],
rty: TypeRef,
ret_def: bool) -> x86_64_tys {
fn is_reg_ty(ty: TypeRef) -> bool {
ret alt llvm::LLVMGetTypeKind(ty) as int {
8 /* integer */ |
12 /* pointer */ |
2 /* float */ |
3 /* double */ { true }
_ { false }
};
}
fn is_pass_byval(cls: [x86_64_reg_class]) -> bool {
ret cls[0] == memory_class ||
cls[0] == x87_class ||
cls[0] == complex_x87_class;
}
fn is_ret_bysret(cls: [x86_64_reg_class]) -> bool {
ret cls[0] == memory_class;
}
fn x86_64_ty(ty: TypeRef,
is_mem_cls: fn(cls: [x86_64_reg_class]) -> bool,
attr: Attribute) -> (x86_64_llty, option<Attribute>) {
let cast = false;
let ty_attr = option::none;
let llty = ty;
if !is_reg_ty(ty) {
let cls = classify_ty(ty);
if is_mem_cls(cls) {
llty = T_ptr(ty);
ty_attr = option::some(attr);
} else {
cast = true;
llty = llreg_ty(cls);
}
}
ret ({ cast: cast, ty: llty }, ty_attr);
}
let arg_tys = [];
let attrs = [];
for t in atys {
let (ty, attr) = x86_64_ty(t, is_pass_byval, ByValAttribute);
arg_tys += [ty];
attrs += [attr];
}
let (ret_ty, ret_attr) = x86_64_ty(rty, is_ret_bysret,
StructRetAttribute);
let sret = option::is_some(ret_attr);
if sret {
arg_tys = [ret_ty] + arg_tys;
ret_ty = { cast: false,
ty: T_void()
};
attrs = [ret_attr] + attrs;
} else if !ret_def {
ret_ty = { cast: false,
ty: T_void()
};
}
ret {
arg_tys: arg_tys,
ret_ty: ret_ty,
attrs: attrs,
sret: sret
};
}
fn decl_x86_64_fn(tys: x86_64_tys,
decl: fn(fnty: TypeRef) -> ValueRef) -> ValueRef {
let atys = vec::map(tys.arg_tys) {|t| t.ty };
let rty = tys.ret_ty.ty;
let fnty = T_fn(atys, rty);
let llfn = decl(fnty);
vec::iteri(tys.attrs) {|i, a|
alt a {
option::some(attr) {
let llarg = llvm::LLVMGetParam(llfn, i as c_uint);
llvm::LLVMAddAttribute(llarg, attr as c_uint);
}
_ {}
}
}
ret llfn;
}
fn link_name(i: @ast::native_item) -> str {
alt attr::get_meta_item_value_str_by_name(i.attrs, "link_name") {
none { ret i.ident; }
@@ -26,7 +404,8 @@ type c_stack_tys = {
ret_ty: TypeRef,
ret_def: bool,
bundle_ty: TypeRef,
shim_fn_ty: TypeRef
shim_fn_ty: TypeRef,
x86_64_tys: option<x86_64_tys>
};
fn c_arg_and_ret_lltys(ccx: @crate_ctxt,
@@ -45,12 +424,19 @@ fn c_stack_tys(ccx: @crate_ctxt,
id: ast::node_id) -> @c_stack_tys {
let (llargtys, llretty, ret_ty) = c_arg_and_ret_lltys(ccx, id);
let bundle_ty = T_struct(llargtys + [T_ptr(llretty)]);
let ret_def = !ty::type_is_bot(ret_ty) && !ty::type_is_nil(ret_ty);
let x86_64 = if ccx.sess.targ_cfg.arch == arch_x86_64 {
option::some(x86_64_tys(llargtys, llretty, ret_def))
} else {
option::none
};
ret @{
arg_tys: llargtys,
ret_ty: llretty,
ret_def: !ty::type_is_bot(ret_ty) && !ty::type_is_nil(ret_ty),
ret_def: ret_def,
bundle_ty: bundle_ty,
shim_fn_ty: T_fn([T_ptr(bundle_ty)], T_void())
shim_fn_ty: T_fn([T_ptr(bundle_ty)], T_void()),
x86_64_tys: x86_64
};
}
@@ -173,16 +559,75 @@ fn trans_native_mod(ccx: @crate_ctxt,
let llargvals = [];
let i = 0u;
let n = vec::len(tys.arg_tys);
alt tys.x86_64_tys {
some(x86_64) {
let atys = x86_64.arg_tys;
let attrs = x86_64.attrs;
if x86_64.sret {
let llretptr = GEPi(bcx, llargbundle, [0, n as int]);
let llretloc = Load(bcx, llretptr);
llargvals = [llretloc];
atys = vec::tail(atys);
attrs = vec::tail(attrs);
}
while i < n {
let llargval = load_inbounds(bcx, llargbundle, [0, i as int]);
let llargval = if atys[i].cast {
let arg_ptr = GEPi(bcx, llargbundle,
[0, i as int]);
arg_ptr = BitCast(bcx, arg_ptr,
T_ptr(atys[i].ty));
Load(bcx, arg_ptr)
} else if option::is_some(attrs[i]) {
GEPi(bcx, llargbundle, [0, i as int])
} else {
load_inbounds(bcx, llargbundle, [0, i as int])
};
llargvals += [llargval];
i += 1u;
}
}
_ {
while i < n {
let llargval = load_inbounds(bcx, llargbundle,
[0, i as int]);
llargvals += [llargval];
i += 1u;
}
}
}
ret llargvals;
}
fn build_ret(bcx: block, tys: @c_stack_tys,
llargbundle: ValueRef, llretval: ValueRef) {
alt tys.x86_64_tys {
some(x86_64) {
vec::iteri(x86_64.attrs) {|i, a|
alt a {
some(attr) {
llvm::LLVMAddInstrAttribute(
llretval, (i + 1u) as c_uint,
attr as c_uint);
}
_ {}
}
}
if x86_64.sret || !tys.ret_def {
ret;
}
let n = vec::len(tys.arg_tys);
let llretptr = GEPi(bcx, llargbundle, [0, n as int]);
let llretloc = Load(bcx, llretptr);
if x86_64.ret_ty.cast {
let tmp_ptr = BitCast(bcx, llretloc,
T_ptr(x86_64.ret_ty.ty));
Store(bcx, llretval, tmp_ptr);
} else {
Store(bcx, llretval, llretloc);
};
}
_ {
if tys.ret_def {
let n = vec::len(tys.arg_tys);
// R** llretptr = &args->r;
@@ -193,11 +638,22 @@ fn trans_native_mod(ccx: @crate_ctxt,
Store(bcx, llretval, llretloc);
}
}
}
}
let lname = link_name(native_item);
// Declare the "prototype" for the base function F:
let llbasefn = alt tys.x86_64_tys {
some(x86_64) {
decl_x86_64_fn(x86_64) {|fnty|
decl_fn(ccx.llmod, lname, cc, fnty)
}
}
_ {
let llbasefnty = T_fn(tys.arg_tys, tys.ret_ty);
let llbasefn = decl_fn(ccx.llmod, lname, cc, llbasefnty);
decl_fn(ccx.llmod, lname, cc, llbasefnty)
}
};
// Name the shim function
let shim_name = lname + "__c_stack_shim";
ret build_shim_fn_(ccx, shim_name, llbasefn, tys, cc,
@@ -311,24 +767,88 @@ fn trans_crust_fn(ccx: @crate_ctxt, path: ast_map::path, decl: ast::fn_decl,
fn build_args(bcx: block, tys: @c_stack_tys,
llwrapfn: ValueRef, llargbundle: ValueRef) {
let llretptr = alloca(bcx, tys.ret_ty);
let i = 0u, n = vec::len(tys.arg_tys);
alt tys.x86_64_tys {
option::some(x86_64) {
let atys = x86_64.arg_tys;
let attrs = x86_64.attrs;
let j = 0u;
let llretptr = if x86_64.sret {
atys = vec::tail(atys);
attrs = vec::tail(attrs);
j = 1u;
llvm::LLVMGetParam(llwrapfn, 0 as c_uint)
} else if x86_64.ret_ty.cast {
let retptr = alloca(bcx, x86_64.ret_ty.ty);
BitCast(bcx, retptr, T_ptr(tys.ret_ty))
} else {
alloca(bcx, tys.ret_ty)
};
let i = 0u, n = vec::len(atys);
while i < n {
let llargval = llvm::LLVMGetParam(
llwrapfn, i as c_uint);
store_inbounds(bcx, llargval, llargbundle, [0, i as int]);
let argval = llvm::LLVMGetParam(llwrapfn,
(i + j) as c_uint);
if option::is_some(attrs[i]) {
argval = Load(bcx, argval);
store_inbounds(bcx, argval, llargbundle,
[0, i as int]);
} else if atys[i].cast {
let argptr = GEPi(bcx, llargbundle,
[0, i as int]);
argptr = BitCast(bcx, argptr, T_ptr(atys[i].ty));
Store(bcx, argval, argptr);
} else {
store_inbounds(bcx, argval, llargbundle,
[0, i as int]);
}
i += 1u;
}
store_inbounds(bcx, llretptr, llargbundle, [0, n as int]);
}
_ {
let llretptr = alloca(bcx, tys.ret_ty);
let i = 0u, n = vec::len(tys.arg_tys);
while i < n {
let llargval = llvm::LLVMGetParam(llwrapfn,
i as c_uint);
store_inbounds(bcx, llargval, llargbundle,
[0, i as int]);
i += 1u;
}
store_inbounds(bcx, llretptr, llargbundle, [0, n as int]);
}
}
}
fn build_ret(bcx: block, tys: @c_stack_tys,
llargbundle: ValueRef) {
alt tys.x86_64_tys {
option::some(x86_64) {
if x86_64.sret || !tys.ret_def {
RetVoid(bcx);
ret;
}
let n = vec::len(tys.arg_tys);
let llretval = load_inbounds(bcx, llargbundle, [0, n as int]);
let llretval = load_inbounds(bcx, llargbundle,
[0, n as int]);
let llretval = if x86_64.ret_ty.cast {
let retptr = BitCast(bcx, llretval,
T_ptr(x86_64.ret_ty.ty));
Load(bcx, retptr)
} else {
Load(bcx, llretval)
};
Ret(bcx, llretval);
}
_ {
let n = vec::len(tys.arg_tys);
let llretval = load_inbounds(bcx, llargbundle,
[0, n as int]);
let llretval = Load(bcx, llretval);
Ret(bcx, llretval);
}
}
}
build_wrap_fn_(ccx, tys, llshimfn, llwrapfn,
ccx.upcalls.call_shim_on_rust_stack,
@@ -348,11 +868,20 @@ fn register_crust_fn(ccx: @crate_ctxt, sp: span,
path: ast_map::path, node_id: ast::node_id)
-> ValueRef {
let t = ty::node_id_to_type(ccx.tcx, node_id);
let (llargtys, llretty, _) = c_arg_and_ret_lltys(ccx, node_id);
let (llargtys, llretty, ret_ty) = c_arg_and_ret_lltys(ccx, node_id);
ret if ccx.sess.targ_cfg.arch == arch_x86_64 {
let ret_def = !ty::type_is_bot(ret_ty) && !ty::type_is_nil(ret_ty);
let x86_64 = x86_64_tys(llargtys, llretty, ret_def);
decl_x86_64_fn(x86_64) {|fnty|
register_fn_fuller(ccx, sp, path, "crust fn", node_id,
t, lib::llvm::CCallConv, fnty)
}
} else {
let llfty = T_fn(llargtys, llretty);
register_fn_fuller(ccx, sp, path, "crust fn", node_id,
t, lib::llvm::CCallConv, llfty)
}
}
fn abi_of_native_fn(ccx: @crate_ctxt, i: @ast::native_item)
-> ast::native_abi {