replace assertion macros with plain asserts

This commit is contained in:
Jon Morton
2012-04-01 21:14:16 -05:00
parent 9ec21933f1
commit 413994ea3e
12 changed files with 100 additions and 124 deletions

View File

@@ -12,18 +12,18 @@ circular_buffer::circular_buffer(rust_kernel *kernel, size_t unit_sz) :
_unread(0),
_buffer((uint8_t *)kernel->malloc(_buffer_sz, "circular_buffer")) {
A(kernel, unit_sz, "Unit size must be larger than zero.");
assert(unit_sz && "Unit size must be larger than zero.");
KLOG(kernel, mem, "new circular_buffer(buffer_sz=%d, unread=%d)"
"-> circular_buffer=0x%" PRIxPTR,
_buffer_sz, _unread, this);
A(kernel, _buffer, "Failed to allocate buffer.");
assert(_buffer && "Failed to allocate buffer.");
}
circular_buffer::~circular_buffer() {
KLOG(kernel, mem, "~circular_buffer 0x%" PRIxPTR, this);
I(kernel, _buffer);
assert(_buffer);
W(kernel, _unread == 0,
"freeing circular_buffer with %d unread bytes", _unread);
kernel->free(_buffer);
@@ -31,7 +31,7 @@ circular_buffer::~circular_buffer() {
size_t
circular_buffer::initial_size() {
I(kernel, unit_sz > 0);
assert(unit_sz > 0);
return INITIAL_CIRCULAR_BUFFER_SIZE_IN_UNITS * unit_sz;
}
@@ -40,8 +40,8 @@ circular_buffer::initial_size() {
*/
void
circular_buffer::transfer(void *dst) {
I(kernel, dst);
I(kernel, _unread <= _buffer_sz);
assert(dst);
assert(_unread <= _buffer_sz);
uint8_t *ptr = (uint8_t *) dst;
@@ -53,13 +53,13 @@ circular_buffer::transfer(void *dst) {
} else {
head_sz = _buffer_sz - _next;
}
I(kernel, _next + head_sz <= _buffer_sz);
assert(_next + head_sz <= _buffer_sz);
memcpy(ptr, _buffer + _next, head_sz);
// Then copy any other items from the beginning of the buffer
I(kernel, _unread >= head_sz);
assert(_unread >= head_sz);
size_t tail_sz = _unread - head_sz;
I(kernel, head_sz + tail_sz <= _buffer_sz);
assert(head_sz + tail_sz <= _buffer_sz);
memcpy(ptr + head_sz, _buffer, tail_sz);
}
@@ -69,9 +69,9 @@ circular_buffer::transfer(void *dst) {
*/
void
circular_buffer::enqueue(void *src) {
I(kernel, src);
I(kernel, _unread <= _buffer_sz);
I(kernel, _buffer);
assert(src);
assert(_unread <= _buffer_sz);
assert(_buffer);
// Grow if necessary.
if (_unread == _buffer_sz) {
@@ -82,20 +82,20 @@ circular_buffer::enqueue(void *src) {
"unread: %d, next: %d, buffer_sz: %d, unit_sz: %d",
_unread, _next, _buffer_sz, unit_sz);
I(kernel, _unread < _buffer_sz);
I(kernel, _unread + unit_sz <= _buffer_sz);
assert(_unread < _buffer_sz);
assert(_unread + unit_sz <= _buffer_sz);
// Copy data
size_t dst_idx = _next + _unread;
I(kernel, dst_idx >= _buffer_sz || dst_idx + unit_sz <= _buffer_sz);
assert(dst_idx >= _buffer_sz || dst_idx + unit_sz <= _buffer_sz);
if (dst_idx >= _buffer_sz) {
dst_idx -= _buffer_sz;
I(kernel, _next >= unit_sz);
I(kernel, dst_idx <= _next - unit_sz);
assert(_next >= unit_sz);
assert(dst_idx <= _next - unit_sz);
}
I(kernel, dst_idx + unit_sz <= _buffer_sz);
assert(dst_idx + unit_sz <= _buffer_sz);
memcpy(&_buffer[dst_idx], src, unit_sz);
_unread += unit_sz;
@@ -109,17 +109,17 @@ circular_buffer::enqueue(void *src) {
*/
void
circular_buffer::dequeue(void *dst) {
I(kernel, unit_sz > 0);
I(kernel, _unread >= unit_sz);
I(kernel, _unread <= _buffer_sz);
I(kernel, _buffer);
assert(unit_sz > 0);
assert(_unread >= unit_sz);
assert(_unread <= _buffer_sz);
assert(_buffer);
KLOG(kernel, mem,
"circular_buffer dequeue "
"unread: %d, next: %d, buffer_sz: %d, unit_sz: %d",
_unread, _next, _buffer_sz, unit_sz);
I(kernel, _next + unit_sz <= _buffer_sz);
assert(_next + unit_sz <= _buffer_sz);
if (dst != NULL) {
memcpy(dst, &_buffer[_next], unit_sz);
}
@@ -153,7 +153,7 @@ circular_buffer::grow() {
void
circular_buffer::shrink() {
size_t new_buffer_sz = _buffer_sz / 2;
I(kernel, initial_size() <= new_buffer_sz);
assert(initial_size() <= new_buffer_sz);
KLOG(kernel, mem, "circular_buffer is shrinking to %d bytes",
new_buffer_sz);
void *new_buffer = kernel->malloc(new_buffer_sz,

View File

@@ -55,7 +55,7 @@ void memory_region::free(void *mem) {
# endif
if (_live_allocations < 1) {
_srv->fatal("live_allocs < 1", __FILE__, __LINE__, "");
assert(false && "live_allocs < 1");
}
release_alloc(mem);
maybe_poison(mem);
@@ -88,7 +88,7 @@ memory_region::realloc(void *mem, size_t orig_size) {
alloc->index, _allocation_list[alloc->index], alloc);
printf("realloc: ptr 0x%" PRIxPTR " (%s) is not in allocation_list\n",
(uintptr_t) get_data(alloc), alloc->tag);
_srv->fatal("not in allocation_list", __FILE__, __LINE__, "");
assert(false && "not in allocation_list");
}
else {
_allocation_list[newMem->index] = newMem;
@@ -166,8 +166,8 @@ memory_region::~memory_region() {
# endif
if (_live_allocations > 0) {
_srv->fatal(msg, __FILE__, __LINE__,
"%d objects", _live_allocations);
fprintf(stderr, "%s\n", msg);
assert(false);
}
if (_synchronized) { _lock.unlock(); }
}
@@ -184,7 +184,7 @@ memory_region::release_alloc(void *mem) {
if (_allocation_list[alloc->index] != alloc) {
printf("free: ptr 0x%" PRIxPTR " (%s) is not in allocation_list\n",
(uintptr_t) get_data(alloc), alloc->tag);
_srv->fatal("not in allocation_list", __FILE__, __LINE__, "");
assert(false && "not in allocation_list");
}
else {
// printf("freed index %d\n", index);

View File

@@ -450,8 +450,7 @@ rust_get_sched_id() {
extern "C" CDECL rust_sched_id
rust_new_sched(uintptr_t threads) {
rust_task *task = rust_sched_loop::get_task();
A(task->sched_loop, threads > 0,
"Can't create a scheduler with no threads, silly!");
assert(threads > 0 && "Can't create a scheduler with no threads, silly!");
return task->kernel->create_scheduler(threads);
}
@@ -606,36 +605,31 @@ rust_dbg_lock_create() {
extern "C" CDECL void
rust_dbg_lock_destroy(lock_and_signal *lock) {
rust_task *task = rust_sched_loop::get_task();
I(task->sched_loop, lock);
assert(lock);
delete lock;
}
extern "C" CDECL void
rust_dbg_lock_lock(lock_and_signal *lock) {
rust_task *task = rust_sched_loop::get_task();
I(task->sched_loop, lock);
assert(lock);
lock->lock();
}
extern "C" CDECL void
rust_dbg_lock_unlock(lock_and_signal *lock) {
rust_task *task = rust_sched_loop::get_task();
I(task->sched_loop, lock);
assert(lock);
lock->unlock();
}
extern "C" CDECL void
rust_dbg_lock_wait(lock_and_signal *lock) {
rust_task *task = rust_sched_loop::get_task();
I(task->sched_loop, lock);
assert(lock);
lock->wait();
}
extern "C" CDECL void
rust_dbg_lock_signal(lock_and_signal *lock) {
rust_task *task = rust_sched_loop::get_task();
I(task->sched_loop, lock);
assert(lock);
lock->signal();
}

View File

@@ -24,18 +24,6 @@ typedef intptr_t rust_sched_id;
typedef intptr_t rust_task_id;
typedef intptr_t rust_port_id;
#define I(dom, e) ((e) ? (void)0 : \
(dom)->srv->fatal(#e, __FILE__, __LINE__, ""))
#define W(dom, e, s, ...) ((e) ? (void)0 : \
(dom)->srv->warning(#e, __FILE__, __LINE__, s, ## __VA_ARGS__))
#define A(dom, e, s, ...) ((e) ? (void)0 : \
(dom)->srv->fatal(#e, __FILE__, __LINE__, s, ## __VA_ARGS__))
#define K(srv, e, s, ...) ((e) ? (void)0 : \
srv->fatal(#e, __FILE__, __LINE__, s, ## __VA_ARGS__))
#define PTR "0x%" PRIxPTR
// This drives our preemption scheme.

View File

@@ -67,13 +67,13 @@ rust_kernel::create_scheduler(size_t num_threads) {
// the scheduler reaper.
bool start_reaper = sched_table.empty();
id = max_sched_id++;
K(srv, id != INTPTR_MAX, "Hit the maximum scheduler id");
assert(id != INTPTR_MAX && "Hit the maximum scheduler id");
sched = new (this, "rust_scheduler")
rust_scheduler(this, srv, num_threads, id);
bool is_new = sched_table
.insert(std::pair<rust_sched_id,
rust_scheduler*>(id, sched)).second;
A(this, is_new, "Reusing a sched id?");
assert(is_new && "Reusing a sched id?");
if (start_reaper) {
sched_reaper.start();
}
@@ -118,7 +118,7 @@ rust_kernel::wait_for_schedulers()
rust_sched_id id = join_list.back();
join_list.pop_back();
sched_map::iterator iter = sched_table.find(id);
I(this, iter != sched_table.end());
assert(iter != sched_table.end());
rust_scheduler *sched = iter->second;
sched_table.erase(iter);
sched->join_task_threads();
@@ -175,7 +175,7 @@ rust_kernel::fail() {
rust_task_id
rust_kernel::generate_task_id() {
rust_task_id id = sync::increment(max_task_id);
K(srv, id != INTPTR_MAX, "Hit the maximum task id");
assert(id != INTPTR_MAX && "Hit the maximum task id");
return id;
}
@@ -189,7 +189,7 @@ rust_kernel::register_port(rust_port *port) {
port_table.put(new_port_id, port);
new_live_ports = port_table.count();
}
K(srv, new_port_id != INTPTR_MAX, "Hit the maximum port id");
assert(new_port_id != INTPTR_MAX && "Hit the maximum port id");
KLOG_("Registered port %" PRIdPTR, new_port_id);
KLOG_("Total outstanding ports: %d", new_live_ports);
return new_port_id;
@@ -233,7 +233,7 @@ rust_kernel::win32_require(LPCTSTR fn, BOOL ok) {
(LPTSTR) &buf, 0, NULL );
KLOG_ERR_(dom, "%s failed with error %ld: %s", fn, err, buf);
LocalFree((HLOCAL)buf);
I(this, ok);
assert(ok);
}
}
#endif

View File

@@ -51,7 +51,7 @@ void rust_port::end_detach() {
// Just take the lock to make sure that the thread that signaled
// the detach_cond isn't still holding it
scoped_lock with(ref_lock);
I(task->sched_loop, ref_count == 0);
assert(ref_count == 0);
}
void rust_port::send(void *sptr) {
@@ -61,7 +61,7 @@ void rust_port::send(void *sptr) {
buffer.enqueue(sptr);
A(kernel, !buffer.is_empty(),
assert(!buffer.is_empty() &&
"rust_chan::transmit with nothing to send.");
if (task->blocked_on(this)) {

View File

@@ -10,12 +10,12 @@ rust_port_selector::select(rust_task *task, rust_port **dptr,
rust_port **ports,
size_t n_ports, uintptr_t *yield) {
I(task->sched_loop, this->ports == NULL);
I(task->sched_loop, this->n_ports == 0);
I(task->sched_loop, dptr != NULL);
I(task->sched_loop, ports != NULL);
I(task->sched_loop, n_ports != 0);
I(task->sched_loop, yield != NULL);
assert(this->ports == NULL);
assert(this->n_ports == 0);
assert(dptr != NULL);
assert(ports != NULL);
assert(n_ports != 0);
assert(yield != NULL);
*yield = false;
size_t locks_taken = 0;
@@ -31,7 +31,7 @@ rust_port_selector::select(rust_task *task, rust_port **dptr,
for (size_t i = 0; i < n_ports; i++) {
size_t k = (i + j) % n_ports;
rust_port *port = ports[k];
I(task->sched_loop, port != NULL);
assert(port != NULL);
port->lock.lock();
locks_taken++;
@@ -46,7 +46,7 @@ rust_port_selector::select(rust_task *task, rust_port **dptr,
if (!found_msg) {
this->ports = ports;
this->n_ports = n_ports;
I(task->sched_loop, task->rendezvous_ptr == NULL);
assert(task->rendezvous_ptr == NULL);
task->rendezvous_ptr = (uintptr_t*)dptr;
task->block(this, "waiting for select rendezvous");

View File

@@ -130,7 +130,7 @@ rust_sched_loop::reap_dead_tasks() {
void
rust_sched_loop::release_task(rust_task *task) {
// Nobody should have a ref to the task at this point
I(this, task->get_ref_count() == 0);
assert(task->get_ref_count() == 0);
// Now delete the task, which will require using this thread's
// memory region.
delete task;
@@ -149,9 +149,9 @@ rust_sched_loop::release_task(rust_task *task) {
rust_task *
rust_sched_loop::schedule_task() {
lock.must_have_lock();
I(this, this);
assert(this);
// FIXME: in the face of failing tasks, this is not always right.
// I(this, n_live_tasks() > 0);
// assert(n_live_tasks() > 0);
if (running_tasks.length() > 0) {
size_t k = isaac_rand(&rctx);
// Look around for a runnable task, starting at k.
@@ -190,14 +190,14 @@ rust_sched_loop::log_state() {
void
rust_sched_loop::on_pump_loop(rust_signal *signal) {
I(this, pump_signal == NULL);
I(this, signal != NULL);
assert(pump_signal == NULL);
assert(signal != NULL);
pump_signal = signal;
}
void
rust_sched_loop::pump_loop() {
I(this, pump_signal != NULL);
assert(pump_signal != NULL);
pump_signal->signal();
}
@@ -209,8 +209,7 @@ rust_sched_loop::run_single_turn() {
lock.lock();
if (!should_exit) {
A(this, dead_task == NULL,
"Tasks should only die after running");
assert(dead_task == NULL && "Tasks should only die after running");
DLOG(this, dom, "worker %d, number_of_live_tasks = %d",
id, number_of_live_tasks());
@@ -227,7 +226,7 @@ rust_sched_loop::run_single_turn() {
return sched_loop_state_block;
}
I(this, scheduled_task->running());
assert(scheduled_task->running());
DLOG(this, task,
"activating task %s 0x%" PRIxPTR
@@ -256,15 +255,15 @@ rust_sched_loop::run_single_turn() {
lock.unlock();
return sched_loop_state_keep_going;
} else {
A(this, running_tasks.is_empty(), "Should have no running tasks");
A(this, blocked_tasks.is_empty(), "Should have no blocked tasks");
A(this, dead_task == NULL, "Should have no dead tasks");
assert(running_tasks.is_empty() && "Should have no running tasks");
assert(blocked_tasks.is_empty() && "Should have no blocked tasks");
assert(dead_task == NULL && "Should have no dead tasks");
DLOG(this, dom, "finished main-loop %d", id);
lock.unlock();
I(this, !extra_c_stack);
assert(!extra_c_stack);
if (cached_c_stack) {
destroy_stack(kernel->region(), cached_c_stack);
cached_c_stack = NULL;
@@ -326,7 +325,7 @@ rust_sched_loop::transition(rust_task *task,
"task %s " PTR " state change '%s' -> '%s' while in '%s'",
name, (uintptr_t)this, state_name(src), state_name(dst),
state_name(task->get_state()));
I(this, task->get_state() == src);
assert(task->get_state() == src);
rust_task_list *src_list = state_list(src);
if (src_list) {
src_list->remove(task);
@@ -336,7 +335,7 @@ rust_sched_loop::transition(rust_task *task,
dst_list->append(task);
}
if (dst == task_state_dead) {
I(this, dead_task == NULL);
assert(dead_task == NULL);
dead_task = task;
}
task->set_state(dst, cond, cond_name);
@@ -388,7 +387,7 @@ rust_sched_loop::exit() {
// room to do the allocation
void
rust_sched_loop::prepare_c_stack(rust_task *task) {
I(this, !extra_c_stack);
assert(!extra_c_stack);
if (!cached_c_stack && !task->have_c_stack()) {
cached_c_stack = create_stack(kernel->region(), C_STACK_SIZE);
}

View File

@@ -165,7 +165,7 @@ rust_sched_loop::get_task() {
// NB: Runs on the Rust stack
inline stk_seg *
rust_sched_loop::borrow_c_stack() {
I(this, cached_c_stack);
assert(cached_c_stack);
stk_seg *your_stack;
if (extra_c_stack) {
your_stack = extra_c_stack;
@@ -180,7 +180,7 @@ rust_sched_loop::borrow_c_stack() {
// NB: Runs on the Rust stack
inline void
rust_sched_loop::return_c_stack(stk_seg *stack) {
I(this, !extra_c_stack);
assert(!extra_c_stack);
if (!cached_c_stack) {
cached_c_stack = stack;
} else {

View File

@@ -70,7 +70,7 @@ rust_task::delete_this()
/* FIXME: tighten this up, there are some more
assertions that hold at task-lifecycle events. */
I(sched_loop, ref_count == 0); // ||
assert(ref_count == 0); // ||
// (ref_count == 1 && this == sched->root_task));
sched_loop->release_task(this);
@@ -125,7 +125,7 @@ cleanup_task(cleanup_args *args) {
#ifndef __WIN32__
task->conclude_failure();
#else
A(task->sched_loop, false, "Shouldn't happen");
assert(false && "Shouldn't happen");
#endif
}
}
@@ -141,8 +141,7 @@ void task_start_wrapper(spawn_args *a)
// must have void return type, we can safely pass 0.
a->f(0, a->envptr, a->argptr);
} catch (rust_task *ex) {
A(task->sched_loop, ex == task,
"Expected this task to be thrown for unwinding");
assert(ex == task && "Expected this task to be thrown for unwinding");
threw_exception = true;
if (task->c_stack) {
@@ -155,7 +154,7 @@ void task_start_wrapper(spawn_args *a)
}
// We should have returned any C stack by now
I(task->sched_loop, task->c_stack == NULL);
assert(task->c_stack == NULL);
rust_opaque_box* env = a->envptr;
if(env) {
@@ -181,7 +180,7 @@ rust_task::start(spawn_fn spawnee_fn,
" with env 0x%" PRIxPTR " and arg 0x%" PRIxPTR,
spawnee_fn, envptr, argptr);
I(sched_loop, stk->data != NULL);
assert(stk->data != NULL);
char *sp = (char *)stk->end;
@@ -220,7 +219,7 @@ rust_task::must_fail_from_being_killed_unlocked() {
void
rust_task::yield(bool *killed) {
if (must_fail_from_being_killed()) {
I(sched_loop, !blocked());
assert(!blocked());
*killed = true;
}
@@ -389,8 +388,8 @@ rust_task::block(rust_cond *on, const char* name) {
LOG(this, task, "Blocking on 0x%" PRIxPTR ", cond: 0x%" PRIxPTR,
(uintptr_t) on, (uintptr_t) cond);
A(sched_loop, cond == NULL, "Cannot block an already blocked task.");
A(sched_loop, on != NULL, "Cannot block on a NULL object.");
assert(cond == NULL && "Cannot block an already blocked task.");
assert(on != NULL && "Cannot block on a NULL object.");
transition(task_state_running, task_state_blocked, on, name);
@@ -399,11 +398,10 @@ rust_task::block(rust_cond *on, const char* name) {
void
rust_task::wakeup(rust_cond *from) {
A(sched_loop, cond != NULL, "Cannot wake up unblocked task.");
assert(cond != NULL && "Cannot wake up unblocked task.");
LOG(this, task, "Blocked on 0x%" PRIxPTR " woken up on 0x%" PRIxPTR,
(uintptr_t) cond, (uintptr_t) from);
A(sched_loop, cond == from,
"Cannot wake up blocked task on wrong condition.");
assert(cond == from && "Cannot wake up blocked task on wrong condition.");
transition(task_state_blocked, task_state_running, NULL, "none");
}
@@ -462,7 +460,7 @@ rust_task::get_next_stack_size(size_t min, size_t current, size_t requested) {
sz = std::max(sz, next);
LOG(this, mem, "next stack size: %" PRIdPTR, sz);
I(sched_loop, requested <= sz);
assert(requested <= sz);
return sz;
}
@@ -539,7 +537,7 @@ void
rust_task::cleanup_after_turn() {
// Delete any spare stack segments that were left
// behind by calls to prev_stack
I(sched_loop, stk);
assert(stk);
while (stk->next) {
stk_seg *new_next = stk->next->next;
free_stack(stk->next);
@@ -569,8 +567,7 @@ reset_stack_limit_on_c_stack(reset_args *args) {
uintptr_t sp = args->sp;
while (!sp_in_stk_seg(sp, task->stk)) {
task->stk = task->stk->prev;
A(task->sched_loop, task->stk != NULL,
"Failed to find the current stack");
assert(task->stk != NULL && "Failed to find the current stack");
}
task->record_stack_limit();
}
@@ -598,10 +595,10 @@ rust_task::check_stack_canary() {
void
rust_task::delete_all_stacks() {
I(sched_loop, !on_rust_stack());
assert(!on_rust_stack());
// Delete all the stacks. There may be more than one if the task failed
// and no landing pads stopped to clean up.
I(sched_loop, stk->next == NULL);
assert(stk->next == NULL);
while (stk != NULL) {
stk_seg *prev = stk->prev;
free_stack(stk);

View File

@@ -304,7 +304,7 @@ sanitize_next_sp(uintptr_t next_sp) {
inline void
rust_task::call_on_c_stack(void *args, void *fn_ptr) {
// Too expensive to check
// I(thread, on_rust_stack());
// assert(on_rust_stack());
uintptr_t prev_rust_sp = next_rust_sp;
next_rust_sp = get_sp();
@@ -334,9 +334,9 @@ rust_task::call_on_c_stack(void *args, void *fn_ptr) {
inline void
rust_task::call_on_rust_stack(void *args, void *fn_ptr) {
// Too expensive to check
// I(thread, !on_rust_stack());
A(sched_loop, get_sp_limit() != 0, "Stack must be configured");
I(sched_loop, next_rust_sp);
// assert(!on_rust_stack());
assert(get_sp_limit() != 0 && "Stack must be configured");
assert(next_rust_sp);
bool had_reentered_rust_stack = reentered_rust_stack;
reentered_rust_stack = true;
@@ -357,8 +357,8 @@ rust_task::call_on_rust_stack(void *args, void *fn_ptr) {
inline void
rust_task::return_c_stack() {
// Too expensive to check
// I(thread, on_rust_stack());
I(sched_loop, c_stack != NULL);
// assert(on_rust_stack());
assert(c_stack != NULL);
sched_loop->return_c_stack(c_stack);
c_stack = NULL;
next_c_sp = 0;
@@ -368,8 +368,8 @@ rust_task::return_c_stack() {
inline void *
rust_task::next_stack(size_t stk_sz, void *args_addr, size_t args_sz) {
new_stack_fast(stk_sz + args_sz);
A(sched_loop, stk->end - (uintptr_t)stk->data >= stk_sz + args_sz,
"Did not receive enough stack");
assert(stk->end - (uintptr_t)stk->data >= stk_sz + args_sz
&& "Did not receive enough stack");
uint8_t *new_sp = (uint8_t*)stk->end;
// Push the function arguments to the new stack
new_sp = align_down(new_sp - args_sz);
@@ -438,11 +438,10 @@ record_sp_limit(void *limit);
inline void
rust_task::record_stack_limit() {
I(sched_loop, stk);
A(sched_loop,
(uintptr_t)stk->end - RED_ZONE_SIZE
- (uintptr_t)stk->data >= LIMIT_OFFSET,
"Stack size must be greater than LIMIT_OFFSET");
assert(stk);
assert((uintptr_t)stk->end - RED_ZONE_SIZE
- (uintptr_t)stk->data >= LIMIT_OFFSET
&& "Stack size must be greater than LIMIT_OFFSET");
record_sp_limit(stk->data + LIMIT_OFFSET + RED_ZONE_SIZE);
}

View File

@@ -61,11 +61,10 @@ isaac_init(rust_kernel *kernel, randctx *rctx)
CryptReleaseContext(hProv, 0));
#else
int fd = open("/dev/urandom", O_RDONLY);
I(kernel, fd > 0);
I(kernel,
read(fd, (void*) &rctx->randrsl, sizeof(rctx->randrsl))
assert(fd > 0);
assert(read(fd, (void*) &rctx->randrsl, sizeof(rctx->randrsl))
== sizeof(rctx->randrsl));
I(kernel, close(fd) == 0);
assert(close(fd) == 0);
#endif
}