rt: Inline everything on the C-stack-switching path

This commit is contained in:
Brian Anderson
2012-02-09 22:15:15 -08:00
parent dff256cd19
commit d90a9d3da0
5 changed files with 155 additions and 148 deletions

View File

@@ -673,19 +673,6 @@ rust_task::record_stack_limit() {
record_sp(stk->data + LIMIT_OFFSET + RED_ZONE_SIZE); record_sp(stk->data + LIMIT_OFFSET + RED_ZONE_SIZE);
} }
extern "C" uintptr_t get_sp();
static bool
sp_in_stk_seg(uintptr_t sp, stk_seg *stk) {
// Not positive these bounds for sp are correct. I think that the first
// possible value for esp on a new stack is stk->end, which points to the
// address before the first value to be pushed onto a new stack. The last
// possible address we can push data to is stk->data. Regardless, there's
// so much slop at either end that we should never hit one of these
// boundaries.
return (uintptr_t)stk->data <= sp && sp <= stk->end;
}
/* /*
Called by landing pads during unwinding to figure out which Called by landing pads during unwinding to figure out which
stack segment we are currently running on, delete the others, stack segment we are currently running on, delete the others,
@@ -702,25 +689,6 @@ rust_task::reset_stack_limit() {
record_stack_limit(); record_stack_limit();
} }
/*
Returns true if we're currently running on the Rust stack
*/
bool
rust_task::on_rust_stack() {
uintptr_t sp = get_sp();
bool in_first_segment = sp_in_stk_seg(sp, stk);
if (in_first_segment) {
return true;
} else if (stk->next != NULL) {
// This happens only when calling the upcall to delete
// a stack segment
bool in_second_segment = sp_in_stk_seg(sp, stk->next);
return in_second_segment;
} else {
return false;
}
}
void void
rust_task::check_stack_canary() { rust_task::check_stack_canary() {
::check_stack_canary(stk); ::check_stack_canary(stk);
@@ -732,76 +700,6 @@ rust_task::config_notify(chan_handle chan) {
notify_chan = chan; notify_chan = chan;
} }
// This is the function that switches stacks by calling another function with
// a single void* argument while changing the stack pointer. It has a funny
// name because gdb doesn't normally like to backtrace through split stacks
// (thinks it indicates a bug), but has a special case to allow functions
// named __morestack to move the stack pointer around.
extern "C" void __morestack(void *args, void *fn_ptr, uintptr_t stack_ptr);
static uintptr_t
sanitize_next_sp(uintptr_t next_sp) {
// Since I'm not precisely sure where the next stack pointer sits in
// relation to where the context switch actually happened, nor in relation
// to the amount of stack needed for calling __morestack I've added some
// extra bytes here.
// FIXME: On the rust stack this potentially puts is quite far into the
// red zone. Might want to just allocate a new rust stack every time we
// switch back to rust.
const uintptr_t padding = 16;
return align_down(next_sp - padding);
}
void
rust_task::call_on_c_stack(void *args, void *fn_ptr) {
I(thread, on_rust_stack());
next_rust_sp = get_sp();
bool borrowed_a_c_stack = false;
uintptr_t sp;
if (c_stack == NULL) {
c_stack = thread->borrow_c_stack();
next_c_sp = align_down(c_stack->end);
sp = next_c_sp;
borrowed_a_c_stack = true;
} else {
sp = sanitize_next_sp(next_c_sp);
}
__morestack(args, fn_ptr, sp);
// Note that we may not actually get here if we threw an exception,
// in which case we will return the c stack when the exception is caught.
if (borrowed_a_c_stack) {
return_c_stack();
}
}
void
rust_task::call_on_rust_stack(void *args, void *fn_ptr) {
I(thread, !on_rust_stack());
I(thread, next_rust_sp);
next_c_sp = get_sp();
uintptr_t sp = sanitize_next_sp(next_rust_sp);
__morestack(args, fn_ptr, sp);
}
void
rust_task::return_c_stack() {
I(thread, on_rust_stack());
I(thread, c_stack != NULL);
thread->return_c_stack(c_stack);
c_stack = NULL;
next_c_sp = 0;
}
// //
// Local Variables: // Local Variables:
// mode: C++ // mode: C++

View File

@@ -198,6 +198,109 @@ public:
void call_on_rust_stack(void *args, void *fn_ptr); void call_on_rust_stack(void *args, void *fn_ptr);
}; };
// Get a rough approximation of the current stack pointer
extern "C" uintptr_t get_sp();
// This is the function that switches stacks by calling another function with
// a single void* argument while changing the stack pointer. It has a funny
// name because gdb doesn't normally like to backtrace through split stacks
// (thinks it indicates a bug), but has a special case to allow functions
// named __morestack to move the stack pointer around.
extern "C" void __morestack(void *args, void *fn_ptr, uintptr_t stack_ptr);
inline static uintptr_t
sanitize_next_sp(uintptr_t next_sp) {
// Since I'm not precisely sure where the next stack pointer sits in
// relation to where the context switch actually happened, nor in relation
// to the amount of stack needed for calling __morestack I've added some
// extra bytes here.
// FIXME: On the rust stack this potentially puts is quite far into the
// red zone. Might want to just allocate a new rust stack every time we
// switch back to rust.
const uintptr_t padding = 16;
return align_down(next_sp - padding);
}
inline void
rust_task::call_on_c_stack(void *args, void *fn_ptr) {
I(thread, on_rust_stack());
next_rust_sp = get_sp();
bool borrowed_a_c_stack = false;
uintptr_t sp;
if (c_stack == NULL) {
c_stack = thread->borrow_c_stack();
next_c_sp = align_down(c_stack->end);
sp = next_c_sp;
borrowed_a_c_stack = true;
} else {
sp = sanitize_next_sp(next_c_sp);
}
__morestack(args, fn_ptr, sp);
// Note that we may not actually get here if we threw an exception,
// in which case we will return the c stack when the exception is caught.
if (borrowed_a_c_stack) {
return_c_stack();
}
}
inline void
rust_task::call_on_rust_stack(void *args, void *fn_ptr) {
I(thread, !on_rust_stack());
I(thread, next_rust_sp);
next_c_sp = get_sp();
uintptr_t sp = sanitize_next_sp(next_rust_sp);
__morestack(args, fn_ptr, sp);
}
inline void
rust_task::return_c_stack() {
I(thread, on_rust_stack());
I(thread, c_stack != NULL);
thread->return_c_stack(c_stack);
c_stack = NULL;
next_c_sp = 0;
}
inline bool
sp_in_stk_seg(uintptr_t sp, stk_seg *stk) {
// Not positive these bounds for sp are correct. I think that the first
// possible value for esp on a new stack is stk->end, which points to the
// address before the first value to be pushed onto a new stack. The last
// possible address we can push data to is stk->data. Regardless, there's
// so much slop at either end that we should never hit one of these
// boundaries.
return (uintptr_t)stk->data <= sp && sp <= stk->end;
}
/*
Returns true if we're currently running on the Rust stack
*/
inline bool
rust_task::on_rust_stack() {
uintptr_t sp = get_sp();
bool in_first_segment = sp_in_stk_seg(sp, stk);
if (in_first_segment) {
return true;
} else if (stk->next != NULL) {
// This happens only when calling the upcall to delete
// a stack segment
bool in_second_segment = sp_in_stk_seg(sp, stk->next);
return in_second_segment;
} else {
return false;
}
}
// //
// Local Variables: // Local Variables:
// mode: C++ // mode: C++

View File

@@ -337,16 +337,6 @@ rust_task_thread::place_task_in_tls(rust_task *task) {
assert(!result && "Couldn't place the task in TLS!"); assert(!result && "Couldn't place the task in TLS!");
task->record_stack_limit(); task->record_stack_limit();
} }
rust_task *
rust_task_thread::get_task() {
if (!tls_initialized)
return NULL;
rust_task *task = reinterpret_cast<rust_task *>
(pthread_getspecific(task_key));
assert(task && "Couldn't get the task from TLS!");
return task;
}
#else #else
void void
rust_task_thread::init_tls() { rust_task_thread::init_tls() {
@@ -361,15 +351,6 @@ rust_task_thread::place_task_in_tls(rust_task *task) {
assert(result && "Couldn't place the task in TLS!"); assert(result && "Couldn't place the task in TLS!");
task->record_stack_limit(); task->record_stack_limit();
} }
rust_task *
rust_task_thread::get_task() {
if (!tls_initialized)
return NULL;
rust_task *task = reinterpret_cast<rust_task *>(TlsGetValue(task_key));
assert(task && "Couldn't get the task from TLS!");
return task;
}
#endif #endif
void void
@@ -402,32 +383,6 @@ rust_task_thread::unprepare_c_stack() {
} }
} }
// NB: Runs on the Rust stack
stk_seg *
rust_task_thread::borrow_c_stack() {
I(this, cached_c_stack);
stk_seg *your_stack;
if (extra_c_stack) {
your_stack = extra_c_stack;
extra_c_stack = NULL;
} else {
your_stack = cached_c_stack;
cached_c_stack = NULL;
}
return your_stack;
}
// NB: Runs on the Rust stack
void
rust_task_thread::return_c_stack(stk_seg *stack) {
I(this, !extra_c_stack);
if (!cached_c_stack) {
cached_c_stack = stack;
} else {
extra_c_stack = stack;
}
}
// //
// Local Variables: // Local Variables:
// mode: C++ // mode: C++

View File

@@ -150,6 +150,58 @@ rust_task_thread::get_log() {
return _log; return _log;
} }
#ifndef __WIN32__
inline rust_task *
rust_task_thread::get_task() {
if (!tls_initialized)
return NULL;
rust_task *task = reinterpret_cast<rust_task *>
(pthread_getspecific(task_key));
assert(task && "Couldn't get the task from TLS!");
return task;
}
#else
inline rust_task *
rust_task_thread::get_task() {
if (!tls_initialized)
return NULL;
rust_task *task = reinterpret_cast<rust_task *>(TlsGetValue(task_key));
assert(task && "Couldn't get the task from TLS!");
return task;
}
#endif
// NB: Runs on the Rust stack
inline stk_seg *
rust_task_thread::borrow_c_stack() {
I(this, cached_c_stack);
stk_seg *your_stack;
if (extra_c_stack) {
your_stack = extra_c_stack;
extra_c_stack = NULL;
} else {
your_stack = cached_c_stack;
cached_c_stack = NULL;
}
return your_stack;
}
// NB: Runs on the Rust stack
inline void
rust_task_thread::return_c_stack(stk_seg *stack) {
I(this, !extra_c_stack);
if (!cached_c_stack) {
cached_c_stack = stack;
} else {
extra_c_stack = stack;
}
}
// //
// Local Variables: // Local Variables:
// mode: C++ // mode: C++

View File

@@ -75,7 +75,6 @@ upcall_call_shim_on_c_stack(void *args, void *fn_ptr) {
abort(); abort();
} }
task = rust_task_thread::get_task();
task->record_stack_limit(); task->record_stack_limit();
} }