rt: Add task_local_data and related builtin calls (Closes #2680)
This commit is contained in:
@@ -914,7 +914,19 @@ rust_signal_cond_lock(rust_cond_lock *lock) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// set/get/atexit task_local_data can run on the rust stack for speed.
|
||||||
|
extern "C" void *
|
||||||
|
rust_get_task_local_data(rust_task *task) {
|
||||||
|
return task->task_local_data;
|
||||||
|
}
|
||||||
|
extern "C" void
|
||||||
|
rust_set_task_local_data(rust_task *task, void *data) {
|
||||||
|
task->task_local_data = data;
|
||||||
|
}
|
||||||
|
extern "C" void
|
||||||
|
rust_task_local_data_atexit(rust_task *task, void (*cleanup_fn)(void *data)) {
|
||||||
|
task->task_local_data_cleanup = cleanup_fn;
|
||||||
|
}
|
||||||
|
|
||||||
//
|
//
|
||||||
// Local Variables:
|
// Local Variables:
|
||||||
|
|||||||
@@ -31,6 +31,8 @@ rust_task::rust_task(rust_sched_loop *sched_loop, rust_task_state state,
|
|||||||
propagate_failure(true),
|
propagate_failure(true),
|
||||||
cc_counter(0),
|
cc_counter(0),
|
||||||
total_stack_sz(0),
|
total_stack_sz(0),
|
||||||
|
task_local_data(NULL),
|
||||||
|
task_local_data_cleanup(NULL),
|
||||||
state(state),
|
state(state),
|
||||||
cond(NULL),
|
cond(NULL),
|
||||||
cond_name("none"),
|
cond_name("none"),
|
||||||
@@ -115,6 +117,16 @@ cleanup_task(cleanup_args *args) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Clean up TLS. This will only be set if TLS was used to begin with.
|
||||||
|
// Because this is a crust function, it must be called from the C stack.
|
||||||
|
if (task->task_local_data_cleanup != NULL) {
|
||||||
|
// This assert should hold but it's not our job to ensure it (and
|
||||||
|
// the condition might change). Handled in libcore/task.rs.
|
||||||
|
// assert(task->task_local_data != NULL);
|
||||||
|
task->task_local_data_cleanup(task->task_local_data);
|
||||||
|
task->task_local_data = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
// FIXME (#2676): For performance we should do the annihilator
|
// FIXME (#2676): For performance we should do the annihilator
|
||||||
// instead of the cycle collector even under normal termination, but
|
// instead of the cycle collector even under normal termination, but
|
||||||
// since that would hide memory management errors (like not derefing
|
// since that would hide memory management errors (like not derefing
|
||||||
|
|||||||
@@ -163,6 +163,10 @@ rust_task : public kernel_owned<rust_task>, rust_cond
|
|||||||
// The amount of stack we're using, excluding red zones
|
// The amount of stack we're using, excluding red zones
|
||||||
size_t total_stack_sz;
|
size_t total_stack_sz;
|
||||||
|
|
||||||
|
// Used by rust task management routines in libcore/task.rs.
|
||||||
|
void *task_local_data;
|
||||||
|
void (*task_local_data_cleanup)(void *data);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
|
||||||
// Protects state, cond, cond_name
|
// Protects state, cond, cond_name
|
||||||
@@ -375,6 +379,10 @@ rust_task::call_on_c_stack(void *args, void *fn_ptr) {
|
|||||||
// Too expensive to check
|
// Too expensive to check
|
||||||
// assert(on_rust_stack());
|
// assert(on_rust_stack());
|
||||||
|
|
||||||
|
// The shim functions generated by rustc contain the morestack prologue, so
|
||||||
|
// we need to let them know they have enough stack.
|
||||||
|
record_sp_limit(0);
|
||||||
|
|
||||||
uintptr_t prev_rust_sp = next_rust_sp;
|
uintptr_t prev_rust_sp = next_rust_sp;
|
||||||
next_rust_sp = get_sp();
|
next_rust_sp = get_sp();
|
||||||
|
|
||||||
@@ -398,12 +406,19 @@ rust_task::call_on_c_stack(void *args, void *fn_ptr) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
next_rust_sp = prev_rust_sp;
|
next_rust_sp = prev_rust_sp;
|
||||||
|
|
||||||
|
record_stack_limit();
|
||||||
}
|
}
|
||||||
|
|
||||||
inline void
|
inline void
|
||||||
rust_task::call_on_rust_stack(void *args, void *fn_ptr) {
|
rust_task::call_on_rust_stack(void *args, void *fn_ptr) {
|
||||||
// Too expensive to check
|
// Too expensive to check
|
||||||
// assert(!on_rust_stack());
|
// assert(!on_rust_stack());
|
||||||
|
|
||||||
|
// Because of the hack in the other function that disables the stack limit
|
||||||
|
// when entering the C stack, here we restore the stack limit again.
|
||||||
|
record_stack_limit();
|
||||||
|
|
||||||
assert(get_sp_limit() != 0 && "Stack must be configured");
|
assert(get_sp_limit() != 0 && "Stack must be configured");
|
||||||
assert(next_rust_sp);
|
assert(next_rust_sp);
|
||||||
|
|
||||||
@@ -427,6 +442,8 @@ rust_task::call_on_rust_stack(void *args, void *fn_ptr) {
|
|||||||
scoped_lock with(kill_lock);
|
scoped_lock with(kill_lock);
|
||||||
reentered_rust_stack = had_reentered_rust_stack;
|
reentered_rust_stack = had_reentered_rust_stack;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
record_sp_limit(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
inline void
|
inline void
|
||||||
|
|||||||
@@ -49,19 +49,12 @@ extern "C" CDECL void
|
|||||||
upcall_call_shim_on_c_stack(void *args, void *fn_ptr) {
|
upcall_call_shim_on_c_stack(void *args, void *fn_ptr) {
|
||||||
rust_task *task = rust_get_current_task();
|
rust_task *task = rust_get_current_task();
|
||||||
|
|
||||||
// FIXME (#1226) - The shim functions generated by rustc contain the
|
|
||||||
// morestack prologue, so we need to let them know they have enough
|
|
||||||
// stack.
|
|
||||||
record_sp_limit(0);
|
|
||||||
|
|
||||||
try {
|
try {
|
||||||
task->call_on_c_stack(args, fn_ptr);
|
task->call_on_c_stack(args, fn_ptr);
|
||||||
} catch (...) {
|
} catch (...) {
|
||||||
// Logging here is not reliable
|
// Logging here is not reliable
|
||||||
assert(false && "Foreign code threw an exception");
|
assert(false && "Foreign code threw an exception");
|
||||||
}
|
}
|
||||||
|
|
||||||
task->record_stack_limit();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -72,11 +65,6 @@ extern "C" CDECL void
|
|||||||
upcall_call_shim_on_rust_stack(void *args, void *fn_ptr) {
|
upcall_call_shim_on_rust_stack(void *args, void *fn_ptr) {
|
||||||
rust_task *task = rust_get_current_task();
|
rust_task *task = rust_get_current_task();
|
||||||
|
|
||||||
// FIXME (#2680): Because of the hack in the other function that disables
|
|
||||||
// the stack limit when entering the C stack, here we restore the stack
|
|
||||||
// limit again.
|
|
||||||
task->record_stack_limit();
|
|
||||||
|
|
||||||
try {
|
try {
|
||||||
task->call_on_rust_stack(args, fn_ptr);
|
task->call_on_rust_stack(args, fn_ptr);
|
||||||
} catch (...) {
|
} catch (...) {
|
||||||
@@ -85,9 +73,6 @@ upcall_call_shim_on_rust_stack(void *args, void *fn_ptr) {
|
|||||||
// Logging here is not reliable
|
// Logging here is not reliable
|
||||||
assert(false && "Rust task failed after reentering the Rust stack");
|
assert(false && "Rust task failed after reentering the Rust stack");
|
||||||
}
|
}
|
||||||
|
|
||||||
// FIXME (#2680): As above
|
|
||||||
record_sp_limit(0);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**********************************************************************/
|
/**********************************************************************/
|
||||||
|
|||||||
@@ -167,3 +167,6 @@ rust_lock_cond_lock
|
|||||||
rust_unlock_cond_lock
|
rust_unlock_cond_lock
|
||||||
rust_wait_cond_lock
|
rust_wait_cond_lock
|
||||||
rust_signal_cond_lock
|
rust_signal_cond_lock
|
||||||
|
rust_get_task_local_data
|
||||||
|
rust_set_task_local_data
|
||||||
|
rust_task_local_data_atexit
|
||||||
|
|||||||
Reference in New Issue
Block a user