rt: Change the scheme used for terminating the kernel

Instead of joining on the scheduler threads, instead keep a count of active
schedulers. When there are no more schedulers raise a signal for the main
thread to continue.

This will be required once schedulers can be added and removed from the
running kernel.
This commit is contained in:
Brian Anderson
2012-02-04 14:54:10 -08:00
parent 6eafe5d772
commit f39e64d56a
5 changed files with 58 additions and 11 deletions

View File

@@ -18,6 +18,11 @@ rust_kernel::rust_kernel(rust_srv *srv, size_t num_threads) :
{
sched = new (this, "rust_scheduler")
rust_scheduler(this, srv, num_threads);
live_schedulers = 1;
}
rust_kernel::~rust_kernel() {
delete sched;
}
void
@@ -41,10 +46,6 @@ rust_kernel::fatal(char const *fmt, ...) {
va_end(args);
}
rust_kernel::~rust_kernel() {
delete sched;
}
void *
rust_kernel::malloc(size_t size, const char *tag) {
return _region.malloc(size, tag);
@@ -61,8 +62,16 @@ void rust_kernel::free(void *mem) {
int rust_kernel::start_schedulers()
{
I(this, !sched_lock.lock_held_by_current_thread());
sched->start_task_threads();
{
scoped_lock with(sched_lock);
// Schedulers could possibly have already exited
if (live_schedulers != 0) {
sched_lock.wait();
}
return rval;
}
}
rust_scheduler *
@@ -70,6 +79,17 @@ rust_kernel::get_default_scheduler() {
return sched;
}
void
rust_kernel::release_scheduler() {
I(this, !sched_lock.lock_held_by_current_thread());
scoped_lock with(sched_lock);
--live_schedulers;
if (live_schedulers == 0) {
// We're all done. Tell the main thread to continue
sched_lock.signal();
}
}
void
rust_kernel::fail() {
// FIXME: On windows we're getting "Application has requested the

View File

@@ -35,6 +35,13 @@ private:
lock_and_signal rval_lock;
int rval;
// Protects live_schedulers
lock_and_signal sched_lock;
// Tracks the number of schedulers currently running.
// When this hits 0 we will signal the sched_lock and the
// kernel will terminate.
uintptr_t live_schedulers;
public:
struct rust_env *env;
@@ -53,6 +60,8 @@ public:
int start_schedulers();
rust_scheduler* get_default_scheduler();
// Called by a scheduler to indicate that it is terminating
void release_scheduler();
#ifdef __WIN32__
void win32_require(LPCTSTR fn, BOOL ok);

View File

@@ -7,6 +7,7 @@ rust_scheduler::rust_scheduler(rust_kernel *kernel,
kernel(kernel),
srv(srv),
env(srv->env),
live_threads(num_threads),
num_threads(num_threads)
{
isaac_init(kernel, &rctx);
@@ -59,11 +60,6 @@ rust_scheduler::start_task_threads()
rust_task_thread *thread = threads[i];
thread->start();
}
for(size_t i = 0; i < num_threads; ++i) {
rust_task_thread *thread = threads[i];
thread->join();
}
}
void
@@ -102,3 +98,16 @@ size_t
rust_scheduler::number_of_threads() {
return num_threads;
}
void
rust_scheduler::release_task_thread() {
I(this, !lock.lock_held_by_current_thread());
uintptr_t new_live_threads;
{
scoped_lock with(lock);
new_live_threads = --live_threads;
}
if (new_live_threads == 0) {
kernel->release_scheduler();
}
}

View File

@@ -10,9 +10,13 @@ public:
rust_srv *srv;
rust_env *env;
private:
// Protects the random number context and live_threads
lock_and_signal lock;
array_list<rust_task_thread *> threads;
// When this hits zero we'll tell the kernel to release us
uintptr_t live_threads;
randctx rctx;
array_list<rust_task_thread *> threads;
const size_t num_threads;
void create_task_threads();
@@ -31,8 +35,12 @@ public:
const char *name,
size_t init_stack_sz);
rust_task_id create_task(rust_task *spawner, const char *name);
void exit();
size_t number_of_threads();
// Called by each thread when it terminates. When all threads
// terminate the scheduler does as well.
void release_task_thread();
};
#endif /* RUST_SCHEDULER_H */

View File

@@ -296,6 +296,7 @@ rust_task_thread::create_task(rust_task *spawner, const char *name,
void rust_task_thread::run() {
this->start_main_loop();
sched->release_task_thread();
}
#ifndef _WIN32