rt: Use rust_task_thread's C-stack pool for native calls

This commit is contained in:
Brian Anderson
2012-02-09 01:13:32 -08:00
parent bfb80064d2
commit 79b1563abb
6 changed files with 99 additions and 31 deletions

View File

@@ -29,8 +29,6 @@ struct registers_t {
uint32_t eip; uint32_t eip;
} __attribute__((aligned(16))); } __attribute__((aligned(16)));
extern "C" void __morestack(void *args, void *fn_ptr, uintptr_t stack_ptr);
class context { class context {
public: public:
registers_t regs; registers_t regs;
@@ -41,10 +39,6 @@ public:
void swap(context &out); void swap(context &out);
void call(void *f, void *arg, void *sp); void call(void *f, void *arg, void *sp);
void call_and_change_stacks(void *args, void *fn_ptr) {
__morestack(args, fn_ptr, regs.esp);
}
}; };
#endif #endif

View File

@@ -28,8 +28,6 @@ struct registers_t {
uint64_t data[RUSTRT_MAX]; uint64_t data[RUSTRT_MAX];
} __attribute__((aligned(16))); } __attribute__((aligned(16)));
extern "C" void __morestack(void *args, void *fn_ptr, uintptr_t stack_ptr);
class context { class context {
public: public:
registers_t regs; registers_t regs;
@@ -40,10 +38,6 @@ public:
void swap(context &out); void swap(context &out);
void call(void *f, void *arg, void *sp); void call(void *f, void *arg, void *sp);
void call_and_change_stacks(void *args, void *fn_ptr) {
__morestack(args, fn_ptr, regs.data[RUSTRT_RSP]);
}
}; };
#endif #endif

View File

@@ -88,7 +88,9 @@ rust_task::rust_task(rust_task_thread *thread, rust_task_list *state,
propagate_failure(true), propagate_failure(true),
dynastack(this), dynastack(this),
cc_counter(0), cc_counter(0),
total_stack_sz(0) total_stack_sz(0),
c_stack(NULL),
next_c_sp(0)
{ {
LOGPTR(thread, "new task", (uintptr_t)this); LOGPTR(thread, "new task", (uintptr_t)this);
DLOG(thread, task, "sizeof(task) = %d (0x%x)", sizeof *this, sizeof *this); DLOG(thread, task, "sizeof(task) = %d (0x%x)", sizeof *this, sizeof *this);
@@ -166,7 +168,6 @@ cleanup_task(cleanup_args *args) {
} }
// This runs on the Rust stack // This runs on the Rust stack
extern "C" CDECL
void task_start_wrapper(spawn_args *a) void task_start_wrapper(spawn_args *a)
{ {
rust_task *task = a->task; rust_task *task = a->task;
@@ -180,8 +181,15 @@ void task_start_wrapper(spawn_args *a)
A(task->thread, ex == task, A(task->thread, ex == task,
"Expected this task to be thrown for unwinding"); "Expected this task to be thrown for unwinding");
threw_exception = true; threw_exception = true;
if (task->c_stack) {
task->return_c_stack();
}
} }
// We should have returned any C stack by now
I(task->thread, task->c_stack == NULL);
rust_opaque_box* env = a->envptr; rust_opaque_box* env = a->envptr;
if(env) { if(env) {
// free the environment (which should be a unique closure). // free the environment (which should be a unique closure).
@@ -722,10 +730,35 @@ rust_task::config_notify(chan_handle chan) {
notify_chan = chan; notify_chan = chan;
} }
extern "C" void __morestack(void *args, void *fn_ptr, uintptr_t stack_ptr);
void void
rust_task::call_on_c_stack(void *args, void *fn_ptr) { rust_task::call_on_c_stack(void *args, void *fn_ptr) {
I(thread, on_rust_stack()); I(thread, on_rust_stack());
thread->c_context.call_and_change_stacks(args, fn_ptr);
bool borrowed_a_c_stack = false;
if (c_stack == NULL) {
c_stack = thread->borrow_c_stack();
next_c_sp = align_down(c_stack->end);
borrowed_a_c_stack = true;
}
__morestack(args, fn_ptr, next_c_sp);
// Note that we may not actually get here if we threw an exception,
// in which case we will return the c stack when the exception is caught.
if (borrowed_a_c_stack) {
return_c_stack();
}
}
void
rust_task::return_c_stack() {
I(thread, on_rust_stack());
I(thread, c_stack != NULL);
thread->return_c_stack(c_stack);
c_stack = NULL;
next_c_sp = 0;
} }
// //

View File

@@ -36,6 +36,8 @@ typedef unsigned long task_result;
#define tr_success 0 #define tr_success 0
#define tr_failure 1 #define tr_failure 1
struct spawn_args;
// std::lib::task::task_notification // std::lib::task::task_notification
// //
// since it's currently a unary tag, we only add the fields. // since it's currently a unary tag, we only add the fields.
@@ -104,6 +106,11 @@ rust_task : public kernel_owned<rust_task>, rust_cond
size_t total_stack_sz; size_t total_stack_sz;
private: private:
// The stack used for running C code, borrowed from the scheduler thread
stk_seg *c_stack;
uintptr_t next_c_sp;
// Called when the atomic refcount reaches zero // Called when the atomic refcount reaches zero
void delete_this(); void delete_this();
@@ -112,6 +119,10 @@ private:
void free_stack(stk_seg *stk); void free_stack(stk_seg *stk);
size_t get_next_stack_size(size_t min, size_t current, size_t requested); size_t get_next_stack_size(size_t min, size_t current, size_t requested);
void return_c_stack();
friend void task_start_wrapper(spawn_args *a);
public: public:
// Only a pointer to 'name' is kept, so it must live as long as this task. // Only a pointer to 'name' is kept, so it must live as long as this task.

View File

@@ -61,10 +61,6 @@ rust_task_thread::~rust_task_thread() {
#ifndef __WIN32__ #ifndef __WIN32__
pthread_attr_destroy(&attr); pthread_attr_destroy(&attr);
#endif #endif
if (cached_c_stack) {
destroy_stack(kernel, cached_c_stack);
}
} }
void void
@@ -72,7 +68,9 @@ rust_task_thread::activate(rust_task *task) {
task->ctx.next = &c_context; task->ctx.next = &c_context;
DLOG(this, task, "descheduling..."); DLOG(this, task, "descheduling...");
lock.unlock(); lock.unlock();
prepare_c_stack();
task->ctx.swap(c_context); task->ctx.swap(c_context);
unprepare_c_stack();
lock.lock(); lock.lock();
DLOG(this, task, "task has returned"); DLOG(this, task, "task has returned");
} }
@@ -287,6 +285,13 @@ rust_task_thread::start_main_loop() {
DLOG(this, dom, "finished main-loop %d", id); DLOG(this, dom, "finished main-loop %d", id);
lock.unlock(); lock.unlock();
I(this, !extra_c_stack);
if (cached_c_stack) {
unconfig_valgrind_stack(cached_c_stack);
destroy_stack(kernel, cached_c_stack);
cached_c_stack = NULL;
}
} }
rust_crate_cache * rust_crate_cache *
@@ -374,24 +379,51 @@ rust_task_thread::exit() {
lock.signal(); lock.signal();
} }
stk_seg * // Before activating each task, make sure we have a C stack available.
rust_task_thread::borrow_c_stack() { // It needs to be allocated ahead of time (while we're on our own
// stack), because once we're on the Rust stack we won't have enough
if (cached_c_stack) { // room to do the allocation
stk_seg *your_stack = cached_c_stack; void
cached_c_stack = NULL; rust_task_thread::prepare_c_stack() {
return your_stack; I(this, !extra_c_stack);
} else { if (!cached_c_stack) {
return create_stack(kernel, C_STACK_SIZE); cached_c_stack = create_stack(kernel, C_STACK_SIZE);
} }
} }
void void
rust_task_thread::return_c_stack(stk_seg *stack) { rust_task_thread::unprepare_c_stack() {
if (cached_c_stack) { if (extra_c_stack) {
destroy_stack(kernel, stack); destroy_stack(kernel, extra_c_stack);
extra_c_stack = NULL;
}
}
// NB: Runs on the Rust stack
stk_seg *
rust_task_thread::borrow_c_stack() {
I(this, cached_c_stack);
stk_seg *your_stack;
if (extra_c_stack) {
your_stack = extra_c_stack;
extra_c_stack = NULL;
} else { } else {
your_stack = cached_c_stack;
cached_c_stack = NULL;
}
config_valgrind_stack(your_stack);
return your_stack;
}
// NB: Runs on the Rust stack
void
rust_task_thread::return_c_stack(stk_seg *stack) {
I(this, !extra_c_stack);
unconfig_valgrind_stack(stack);
if (!cached_c_stack) {
cached_c_stack = stack; cached_c_stack = stack;
} else {
extra_c_stack = stack;
} }
} }

View File

@@ -95,6 +95,10 @@ struct rust_task_thread : public kernel_owned<rust_task_thread>,
private: private:
stk_seg *cached_c_stack; stk_seg *cached_c_stack;
stk_seg *extra_c_stack;
void prepare_c_stack();
void unprepare_c_stack();
public: public: