Removing proxies and message queues.

This commit is contained in:
Eric Holk
2011-07-29 11:00:44 -07:00
committed by Graydon Hoare
parent bc4e9afe25
commit d1dbb99984
17 changed files with 64 additions and 690 deletions

View File

@@ -1,15 +1,14 @@
#include "rust_internal.h"
#define KLOG_(...) \
#define KLOG_(...) \
KLOG(this, kern, __VA_ARGS__)
#define KLOG_ERR_(field, ...) \
#define KLOG_ERR_(field, ...) \
KLOG_LVL(this, field, log_err, __VA_ARGS__)
rust_kernel::rust_kernel(rust_srv *srv, size_t num_threads) :
_region(srv, true),
_log(srv, NULL),
srv(srv),
_interrupt_kernel_loop(FALSE),
num_threads(num_threads),
rval(0),
live_tasks(0),
@@ -22,15 +21,9 @@ rust_kernel::rust_kernel(rust_srv *srv, size_t num_threads) :
rust_scheduler *
rust_kernel::create_scheduler(int id) {
_kernel_lock.lock();
rust_message_queue *message_queue =
new (this, "rust_message_queue") rust_message_queue(srv, this);
rust_srv *srv = this->srv->clone();
rust_scheduler *sched =
new (this, "rust_scheduler")
rust_scheduler(this, message_queue, srv, id);
rust_handle<rust_scheduler> *handle = internal_get_sched_handle(sched);
message_queue->associate(handle);
message_queues.append(message_queue);
new (this, "rust_scheduler") rust_scheduler(this, srv, id);
KLOG_("created scheduler: " PTR ", id: %d, index: %d",
sched, id, sched->list_index);
_kernel_lock.signal_all();
@@ -43,7 +36,6 @@ rust_kernel::destroy_scheduler(rust_scheduler *sched) {
_kernel_lock.lock();
KLOG_("deleting scheduler: " PTR ", name: %s, index: %d",
sched, sched->name, sched->list_index);
sched->message_queue->disassociate();
rust_srv *srv = sched->srv;
delete sched;
delete srv;
@@ -65,46 +57,6 @@ void rust_kernel::destroy_schedulers() {
}
}
rust_handle<rust_scheduler> *
rust_kernel::internal_get_sched_handle(rust_scheduler *sched) {
rust_handle<rust_scheduler> *handle = NULL;
if (_sched_handles.get(sched, &handle) == false) {
handle = new (this, "rust_handle<rust_scheduler")
rust_handle<rust_scheduler>(this, sched->message_queue, sched);
_sched_handles.put(sched, handle);
}
return handle;
}
rust_handle<rust_task> *
rust_kernel::get_task_handle(rust_task *task) {
_kernel_lock.lock();
rust_handle<rust_task> *handle = NULL;
if (_task_handles.get(task, &handle) == false) {
handle =
new (this, "rust_handle<rust_task>")
rust_handle<rust_task>(this, task->sched->message_queue, task);
_task_handles.put(task, handle);
}
_kernel_lock.unlock();
return handle;
}
rust_handle<rust_port> *
rust_kernel::get_port_handle(rust_port *port) {
_kernel_lock.lock();
rust_handle<rust_port> *handle = NULL;
if (_port_handles.get(port, &handle) == false) {
handle = new (this, "rust_handle<rust_port>")
rust_handle<rust_port>(this,
port->task->sched->message_queue,
port);
_port_handles.put(port, handle);
}
_kernel_lock.unlock();
return handle;
}
void
rust_kernel::log_all_scheduler_state() {
for(size_t i = 0; i < num_threads; ++i) {
@@ -141,73 +93,8 @@ rust_kernel::fatal(char const *fmt, ...) {
va_end(args);
}
void
rust_kernel::pump_message_queues() {
for (size_t i = 0; i < message_queues.length(); i++) {
rust_message_queue *queue = message_queues[i];
if (queue->is_associated() == false) {
rust_message *message = NULL;
while (queue->dequeue(&message)) {
message->kernel_process();
delete message;
}
}
}
}
void
rust_kernel::start_kernel_loop() {
_kernel_lock.lock();
while (_interrupt_kernel_loop == false) {
_kernel_lock.wait();
pump_message_queues();
}
_kernel_lock.unlock();
}
void
rust_kernel::run() {
KLOG_("started kernel loop");
start_kernel_loop();
KLOG_("finished kernel loop");
}
void
rust_kernel::terminate_kernel_loop() {
KLOG_("terminating kernel loop");
_interrupt_kernel_loop = true;
signal_kernel_lock();
join();
}
rust_kernel::~rust_kernel() {
destroy_schedulers();
terminate_kernel_loop();
// It's possible that the message pump misses some messages because
// of races, so pump any remaining messages here. By now all domain
// threads should have been joined, so we shouldn't miss any more
// messages.
pump_message_queues();
KLOG_("freeing handles");
free_handles(_task_handles);
KLOG_("..task handles freed");
free_handles(_port_handles);
KLOG_("..port handles freed");
free_handles(_sched_handles);
KLOG_("..sched handles freed");
KLOG_("freeing queues");
rust_message_queue *queue = NULL;
while (message_queues.pop(&queue)) {
K(srv, queue->is_empty(), "Kernel message queue should be empty "
"before killing the kernel.");
delete queue;
}
}
void *
@@ -224,26 +111,6 @@ void rust_kernel::free(void *mem) {
_region.free(mem);
}
template<class T> void
rust_kernel::free_handles(hash_map<T*, rust_handle<T>* > &map) {
T* key;
rust_handle<T> *value;
while (map.pop(&key, &value)) {
KLOG_("...freeing " PTR, value);
delete value;
}
}
void
rust_kernel::notify_message_enqueued(rust_message_queue *queue,
rust_message *message) {
// The message pump needs to handle this message if the queue is not
// associated with a domain, therefore signal the message pump.
if (queue->is_associated() == false) {
signal_kernel_lock();
}
}
void
rust_kernel::signal_kernel_lock() {
_kernel_lock.lock();