Describe lifetime of call argument temporaries passed indirectly

This commit is contained in:
Tomasz Miąsko
2025-03-14 10:48:02 +01:00
parent a69bc17fb8
commit 3b7ca287a7
4 changed files with 108 additions and 42 deletions

View File

@@ -1,6 +1,6 @@
use std::cmp; use std::cmp;
use rustc_abi::{BackendRepr, ExternAbi, HasDataLayout, Reg, WrappingRange}; use rustc_abi::{BackendRepr, ExternAbi, HasDataLayout, Reg, Size, WrappingRange};
use rustc_ast as ast; use rustc_ast as ast;
use rustc_ast::{InlineAsmOptions, InlineAsmTemplatePiece}; use rustc_ast::{InlineAsmOptions, InlineAsmTemplatePiece};
use rustc_data_structures::packed::Pu128; use rustc_data_structures::packed::Pu128;
@@ -158,7 +158,7 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> {
llargs: &[Bx::Value], llargs: &[Bx::Value],
destination: Option<(ReturnDest<'tcx, Bx::Value>, mir::BasicBlock)>, destination: Option<(ReturnDest<'tcx, Bx::Value>, mir::BasicBlock)>,
mut unwind: mir::UnwindAction, mut unwind: mir::UnwindAction,
copied_constant_arguments: &[PlaceRef<'tcx, <Bx as BackendTypes>::Value>], lifetime_ends_after_call: &[(Bx::Value, Size)],
instance: Option<Instance<'tcx>>, instance: Option<Instance<'tcx>>,
mergeable_succ: bool, mergeable_succ: bool,
) -> MergingSucc { ) -> MergingSucc {
@@ -245,8 +245,8 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> {
if let Some((ret_dest, target)) = destination { if let Some((ret_dest, target)) = destination {
bx.switch_to_block(fx.llbb(target)); bx.switch_to_block(fx.llbb(target));
fx.set_debug_loc(bx, self.terminator.source_info); fx.set_debug_loc(bx, self.terminator.source_info);
for tmp in copied_constant_arguments { for &(tmp, size) in lifetime_ends_after_call {
bx.lifetime_end(tmp.val.llval, tmp.layout.size); bx.lifetime_end(tmp, size);
} }
fx.store_return(bx, ret_dest, &fn_abi.ret, invokeret); fx.store_return(bx, ret_dest, &fn_abi.ret, invokeret);
} }
@@ -259,8 +259,8 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> {
} }
if let Some((ret_dest, target)) = destination { if let Some((ret_dest, target)) = destination {
for tmp in copied_constant_arguments { for &(tmp, size) in lifetime_ends_after_call {
bx.lifetime_end(tmp.val.llval, tmp.layout.size); bx.lifetime_end(tmp, size);
} }
fx.store_return(bx, ret_dest, &fn_abi.ret, llret); fx.store_return(bx, ret_dest, &fn_abi.ret, llret);
self.funclet_br(fx, bx, target, mergeable_succ) self.funclet_br(fx, bx, target, mergeable_succ)
@@ -1048,7 +1048,10 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
(args, None) (args, None)
}; };
let mut copied_constant_arguments = vec![]; // When generating arguments we sometimes introduce temporary allocations with lifetime
// that extend for the duration of a call. Keep track of those allocations and their sizes
// to generate `lifetime_end` when the call returns.
let mut lifetime_ends_after_call: Vec<(Bx::Value, Size)> = Vec::new();
'make_args: for (i, arg) in first_args.iter().enumerate() { 'make_args: for (i, arg) in first_args.iter().enumerate() {
let mut op = self.codegen_operand(bx, &arg.node); let mut op = self.codegen_operand(bx, &arg.node);
@@ -1136,12 +1139,18 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
bx.lifetime_start(tmp.val.llval, tmp.layout.size); bx.lifetime_start(tmp.val.llval, tmp.layout.size);
op.val.store(bx, tmp); op.val.store(bx, tmp);
op.val = Ref(tmp.val); op.val = Ref(tmp.val);
copied_constant_arguments.push(tmp); lifetime_ends_after_call.push((tmp.val.llval, tmp.layout.size));
} }
_ => {} _ => {}
} }
self.codegen_argument(bx, op, &mut llargs, &fn_abi.args[i]); self.codegen_argument(
bx,
op,
&mut llargs,
&fn_abi.args[i],
&mut lifetime_ends_after_call,
);
} }
let num_untupled = untuple.map(|tup| { let num_untupled = untuple.map(|tup| {
self.codegen_arguments_untupled( self.codegen_arguments_untupled(
@@ -1149,6 +1158,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
&tup.node, &tup.node,
&mut llargs, &mut llargs,
&fn_abi.args[first_args.len()..], &fn_abi.args[first_args.len()..],
&mut lifetime_ends_after_call,
) )
}); });
@@ -1173,7 +1183,13 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
); );
let last_arg = fn_abi.args.last().unwrap(); let last_arg = fn_abi.args.last().unwrap();
self.codegen_argument(bx, location, &mut llargs, last_arg); self.codegen_argument(
bx,
location,
&mut llargs,
last_arg,
&mut lifetime_ends_after_call,
);
} }
let fn_ptr = match (instance, llfn) { let fn_ptr = match (instance, llfn) {
@@ -1189,7 +1205,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
&llargs, &llargs,
destination, destination,
unwind, unwind,
&copied_constant_arguments, &lifetime_ends_after_call,
instance, instance,
mergeable_succ, mergeable_succ,
) )
@@ -1479,6 +1495,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
op: OperandRef<'tcx, Bx::Value>, op: OperandRef<'tcx, Bx::Value>,
llargs: &mut Vec<Bx::Value>, llargs: &mut Vec<Bx::Value>,
arg: &ArgAbi<'tcx, Ty<'tcx>>, arg: &ArgAbi<'tcx, Ty<'tcx>>,
lifetime_ends_after_call: &mut Vec<(Bx::Value, Size)>,
) { ) {
match arg.mode { match arg.mode {
PassMode::Ignore => return, PassMode::Ignore => return,
@@ -1517,7 +1534,9 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
None => arg.layout.align.abi, None => arg.layout.align.abi,
}; };
let scratch = PlaceValue::alloca(bx, arg.layout.size, required_align); let scratch = PlaceValue::alloca(bx, arg.layout.size, required_align);
bx.lifetime_start(scratch.llval, arg.layout.size);
op.val.store(bx, scratch.with_type(arg.layout)); op.val.store(bx, scratch.with_type(arg.layout));
lifetime_ends_after_call.push((scratch.llval, arg.layout.size));
(scratch.llval, scratch.align, true) (scratch.llval, scratch.align, true)
} }
PassMode::Cast { .. } => { PassMode::Cast { .. } => {
@@ -1538,7 +1557,9 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
// alignment requirements may be higher than the type's alignment, so copy // alignment requirements may be higher than the type's alignment, so copy
// to a higher-aligned alloca. // to a higher-aligned alloca.
let scratch = PlaceValue::alloca(bx, arg.layout.size, required_align); let scratch = PlaceValue::alloca(bx, arg.layout.size, required_align);
bx.lifetime_start(scratch.llval, arg.layout.size);
bx.typed_place_copy(scratch, op_place_val, op.layout); bx.typed_place_copy(scratch, op_place_val, op.layout);
lifetime_ends_after_call.push((scratch.llval, arg.layout.size));
(scratch.llval, scratch.align, true) (scratch.llval, scratch.align, true)
} else { } else {
(op_place_val.llval, op_place_val.align, true) (op_place_val.llval, op_place_val.align, true)
@@ -1620,6 +1641,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
operand: &mir::Operand<'tcx>, operand: &mir::Operand<'tcx>,
llargs: &mut Vec<Bx::Value>, llargs: &mut Vec<Bx::Value>,
args: &[ArgAbi<'tcx, Ty<'tcx>>], args: &[ArgAbi<'tcx, Ty<'tcx>>],
lifetime_ends_after_call: &mut Vec<(Bx::Value, Size)>,
) -> usize { ) -> usize {
let tuple = self.codegen_operand(bx, operand); let tuple = self.codegen_operand(bx, operand);
@@ -1632,13 +1654,13 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
for i in 0..tuple.layout.fields.count() { for i in 0..tuple.layout.fields.count() {
let field_ptr = tuple_ptr.project_field(bx, i); let field_ptr = tuple_ptr.project_field(bx, i);
let field = bx.load_operand(field_ptr); let field = bx.load_operand(field_ptr);
self.codegen_argument(bx, field, llargs, &args[i]); self.codegen_argument(bx, field, llargs, &args[i], lifetime_ends_after_call);
} }
} else { } else {
// If the tuple is immediate, the elements are as well. // If the tuple is immediate, the elements are as well.
for i in 0..tuple.layout.fields.count() { for i in 0..tuple.layout.fields.count() {
let op = tuple.extract_field(self, bx, i); let op = tuple.extract_field(self, bx, i);
self.codegen_argument(bx, op, llargs, &args[i]); self.codegen_argument(bx, op, llargs, &args[i], lifetime_ends_after_call);
} }
} }
tuple.layout.fields.count() tuple.layout.fields.count()

View File

@@ -2,9 +2,10 @@
//@ add-core-stubs //@ add-core-stubs
//@ revisions:i686-linux x86_64-linux //@ revisions:i686-linux x86_64-linux
//@[i686-linux] compile-flags: --target i686-unknown-linux-gnu -C panic=abort //@ compile-flags: -Cno-prepopulate-passes -Copt-level=1 -Cpanic=abort
//@[i686-linux] compile-flags: --target i686-unknown-linux-gnu
//@[i686-linux] needs-llvm-components: x86 //@[i686-linux] needs-llvm-components: x86
//@[x86_64-linux] compile-flags: --target x86_64-unknown-linux-gnu -C panic=abort //@[x86_64-linux] compile-flags: --target x86_64-unknown-linux-gnu
//@[x86_64-linux] needs-llvm-components: x86 //@[x86_64-linux] needs-llvm-components: x86
// Tests that we correctly copy arguments into allocas when the alignment of the byval argument // Tests that we correctly copy arguments into allocas when the alignment of the byval argument
@@ -54,8 +55,10 @@ extern "C" {
pub unsafe fn rust_to_c_increases_alignment(x: Align1) { pub unsafe fn rust_to_c_increases_alignment(x: Align1) {
// i686-linux: start: // i686-linux: start:
// i686-linux-NEXT: [[ALLOCA:%[0-9a-z]+]] = alloca [48 x i8], align 4 // i686-linux-NEXT: [[ALLOCA:%[0-9a-z]+]] = alloca [48 x i8], align 4
// i686-linux-NEXT: call void @llvm.lifetime.start.p0(i64 48, ptr {{.*}}[[ALLOCA]])
// i686-linux-NEXT: call void @llvm.memcpy.{{.+}}(ptr {{.*}}align 4 {{.*}}[[ALLOCA]], ptr {{.*}}align 1 {{.*}}%x // i686-linux-NEXT: call void @llvm.memcpy.{{.+}}(ptr {{.*}}align 4 {{.*}}[[ALLOCA]], ptr {{.*}}align 1 {{.*}}%x
// i686-linux-NEXT: call void @extern_c_align1({{.+}} [[ALLOCA]]) // i686-linux-NEXT: call void @extern_c_align1({{.+}} [[ALLOCA]])
// i686-linux-NEXT: call void @llvm.lifetime.end.p0(i64 48, ptr {{.*}}[[ALLOCA]])
// x86_64-linux: start: // x86_64-linux: start:
// x86_64-linux-NEXT: call void @extern_c_align1 // x86_64-linux-NEXT: call void @extern_c_align1

View File

@@ -0,0 +1,68 @@
// Test that temporary allocas used for call arguments have their lifetimes described by
// intrinsics.
//
//@ add-core-stubs
//@ compile-flags: -Copt-level=1 -Cno-prepopulate-passes --crate-type=lib --target i686-unknown-linux-gnu
//@ needs-llvm-components: x86
#![feature(no_core)]
#![no_std]
#![no_core]
extern crate minicore;
use minicore::*;
// Const operand. Regression test for #98156.
//
// CHECK-LABEL: define void @const_indirect(
// CHECK-NEXT: start:
// CHECK-NEXT: [[B:%.*]] = alloca
// CHECK-NEXT: [[A:%.*]] = alloca
// CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4096, ptr [[A]])
// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[A]], ptr align 4 {{.*}}, i32 4096, i1 false)
// CHECK-NEXT: call void %h(ptr {{.*}} [[A]])
// CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4096, ptr [[A]])
// CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4096, ptr [[B]])
// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[B]], ptr align 4 {{.*}}, i32 4096, i1 false)
// CHECK-NEXT: call void %h(ptr {{.*}} [[B]])
// CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4096, ptr [[B]])
#[no_mangle]
pub fn const_indirect(h: extern "C" fn([u32; 1024])) {
const C: [u32; 1024] = [0; 1024];
h(C);
h(C);
}
#[repr(C)]
pub struct Str {
pub ptr: *const u8,
pub len: usize,
}
// Pair of immediates. Regression test for #132014.
//
// CHECK-LABEL: define void @immediate_indirect(ptr {{.*}}%s.0, i32 {{.*}}%s.1, ptr {{.*}}%g)
// CHECK-NEXT: start:
// CHECK-NEXT: [[A:%.*]] = alloca
// CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[A]])
// CHECK-NEXT: store ptr %s.0, ptr [[A]]
// CHECK-NEXT: [[B:%.]] = getelementptr inbounds i8, ptr [[A]], i32 4
// CHECK-NEXT: store i32 %s.1, ptr [[B]]
// CHECK-NEXT: call void %g(ptr {{.*}} [[A]])
// CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[A]])
#[no_mangle]
pub fn immediate_indirect(s: Str, g: extern "C" fn(Str)) {
g(s);
}
// Indirect argument with a higher alignment requirement than the type's.
//
// CHECK-LABEL: define void @align_indirect(ptr{{.*}} align 1{{.*}} %a, ptr{{.*}} %fun)
// CHECK-NEXT: start:
// CHECK-NEXT: [[A:%.*]] = alloca [1024 x i8], align 4
// CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1024, ptr [[A]])
// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[A]], ptr align 1 %a, i32 1024, i1 false)
// CHECK-NEXT: call void %fun(ptr {{.*}} [[A]])
// CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1024, ptr [[A]])
#[no_mangle]
pub fn align_indirect(a: [u8; 1024], fun: extern "C" fn([u8; 1024])) {
fun(a);
}

View File

@@ -1,27 +0,0 @@
// This test checks that temporaries for indirectly-passed arguments get lifetime markers.
//@ compile-flags: -Copt-level=3 -C no-prepopulate-passes -Zmir-opt-level=0
#![crate_type = "lib"]
extern "Rust" {
fn f(x: [u8; 1024]);
}
const A: [u8; 1024] = [0; 1024];
// CHECK-LABEL: @const_arg_indirect
#[no_mangle]
pub unsafe fn const_arg_indirect() {
// Ensure that the live ranges for the two argument temporaries don't overlap.
// CHECK: call void @llvm.lifetime.start
// CHECK: call void @f
// CHECK: call void @llvm.lifetime.end
// CHECK: call void @llvm.lifetime.start
// CHECK: call void @f
// CHECK: call void @llvm.lifetime.end
f(A);
f(A);
}