Removing more unnecessary unsafe blocks throughout

This commit is contained in:
Alex Crichton
2013-04-23 19:33:33 -04:00
parent c089a17854
commit 4c08a8d6c3
8 changed files with 82 additions and 96 deletions

View File

@@ -231,66 +231,64 @@ unsafe fn walk_gc_roots(mem: Memory, sentinel: **Word, visitor: Visitor) {
// the stack. // the stack.
let mut reached_sentinel = ptr::is_null(sentinel); let mut reached_sentinel = ptr::is_null(sentinel);
for stackwalk::walk_stack |frame| { for stackwalk::walk_stack |frame| {
unsafe { let pc = last_ret;
let pc = last_ret; let Segment {segment: next_segment, boundary: boundary} =
let Segment {segment: next_segment, boundary: boundary} = find_segment_for_frame(frame.fp, segment);
find_segment_for_frame(frame.fp, segment); segment = next_segment;
segment = next_segment; // Each stack segment is bounded by a morestack frame. The
// Each stack segment is bounded by a morestack frame. The // morestack frame includes two return addresses, one for
// morestack frame includes two return addresses, one for // morestack itself, at the normal offset from the frame
// morestack itself, at the normal offset from the frame // pointer, and then a second return address for the
// pointer, and then a second return address for the // function prologue (which called morestack after
// function prologue (which called morestack after // determining that it had hit the end of the stack).
// determining that it had hit the end of the stack). // Since morestack itself takes two parameters, the offset
// Since morestack itself takes two parameters, the offset // for this second return address is 3 greater than the
// for this second return address is 3 greater than the // return address for morestack.
// return address for morestack. let ret_offset = if boundary { 4 } else { 1 };
let ret_offset = if boundary { 4 } else { 1 }; last_ret = *ptr::offset(frame.fp, ret_offset) as *Word;
last_ret = *ptr::offset(frame.fp, ret_offset) as *Word;
if ptr::is_null(pc) { if ptr::is_null(pc) {
loop; loop;
} }
let mut delay_reached_sentinel = reached_sentinel; let mut delay_reached_sentinel = reached_sentinel;
let sp = is_safe_point(pc); let sp = is_safe_point(pc);
match sp { match sp {
Some(sp_info) => { Some(sp_info) => {
for walk_safe_point(frame.fp, sp_info) |root, tydesc| { for walk_safe_point(frame.fp, sp_info) |root, tydesc| {
// Skip roots until we see the sentinel. // Skip roots until we see the sentinel.
if !reached_sentinel { if !reached_sentinel {
if root == sentinel { if root == sentinel {
delay_reached_sentinel = true; delay_reached_sentinel = true;
}
loop;
} }
loop;
}
// Skip null pointers, which can occur when a // Skip null pointers, which can occur when a
// unique pointer has already been freed. // unique pointer has already been freed.
if ptr::is_null(*root) { if ptr::is_null(*root) {
loop; loop;
}
if ptr::is_null(tydesc) {
// Root is a generic box.
let refcount = **root;
if mem | task_local_heap != 0 && refcount != -1 {
if !visitor(root, tydesc) { return; }
} else if mem | exchange_heap != 0 && refcount == -1 {
if !visitor(root, tydesc) { return; }
} }
} else {
if ptr::is_null(tydesc) { // Root is a non-immediate.
// Root is a generic box. if mem | stack != 0 {
let refcount = **root; if !visitor(root, tydesc) { return; }
if mem | task_local_heap != 0 && refcount != -1 {
if !visitor(root, tydesc) { return; }
} else if mem | exchange_heap != 0 && refcount == -1 {
if !visitor(root, tydesc) { return; }
}
} else {
// Root is a non-immediate.
if mem | stack != 0 {
if !visitor(root, tydesc) { return; }
}
} }
} }
}
None => ()
} }
reached_sentinel = delay_reached_sentinel; }
None => ()
} }
reached_sentinel = delay_reached_sentinel;
} }
} }

View File

@@ -156,9 +156,7 @@ pub impl PacketHeader {
unsafe fn unblock(&self) { unsafe fn unblock(&self) {
let old_task = swap_task(&mut self.blocked_task, ptr::null()); let old_task = swap_task(&mut self.blocked_task, ptr::null());
if !old_task.is_null() { if !old_task.is_null() {
unsafe { rustrt::rust_task_deref(old_task)
rustrt::rust_task_deref(old_task)
}
} }
match swap_state_acq(&mut self.state, Empty) { match swap_state_acq(&mut self.state, Empty) {
Empty | Blocked => (), Empty | Blocked => (),

View File

@@ -80,10 +80,8 @@ pub unsafe fn unsafe_borrow() -> &mut Scheduler {
} }
pub unsafe fn unsafe_borrow_io() -> &mut IoFactoryObject { pub unsafe fn unsafe_borrow_io() -> &mut IoFactoryObject {
unsafe { let sched = unsafe_borrow();
let sched = unsafe_borrow(); return sched.event_loop.io().unwrap();
return sched.event_loop.io().unwrap();
}
} }
fn tls_key() -> tls::Key { fn tls_key() -> tls::Key {

View File

@@ -98,7 +98,7 @@ pub enum uv_req_type {
pub unsafe fn malloc_handle(handle: uv_handle_type) -> *c_void { pub unsafe fn malloc_handle(handle: uv_handle_type) -> *c_void {
assert!(handle != UV_UNKNOWN_HANDLE && handle != UV_HANDLE_TYPE_MAX); assert!(handle != UV_UNKNOWN_HANDLE && handle != UV_HANDLE_TYPE_MAX);
let size = unsafe { rust_uv_handle_size(handle as uint) }; let size = rust_uv_handle_size(handle as uint);
let p = malloc(size); let p = malloc(size);
assert!(p.is_not_null()); assert!(p.is_not_null());
return p; return p;
@@ -110,7 +110,7 @@ pub unsafe fn free_handle(v: *c_void) {
pub unsafe fn malloc_req(req: uv_req_type) -> *c_void { pub unsafe fn malloc_req(req: uv_req_type) -> *c_void {
assert!(req != UV_UNKNOWN_REQ && req != UV_REQ_TYPE_MAX); assert!(req != UV_UNKNOWN_REQ && req != UV_REQ_TYPE_MAX);
let size = unsafe { rust_uv_req_size(req as uint) }; let size = rust_uv_req_size(req as uint);
let p = malloc(size); let p = malloc(size);
assert!(p.is_not_null()); assert!(p.is_not_null());
return p; return p;

View File

@@ -262,18 +262,16 @@ pub impl<T:Owned> Exclusive<T> {
// the exclusive. Supporting that is a work in progress. // the exclusive. Supporting that is a work in progress.
#[inline(always)] #[inline(always)]
unsafe fn with<U>(&self, f: &fn(x: &mut T) -> U) -> U { unsafe fn with<U>(&self, f: &fn(x: &mut T) -> U) -> U {
unsafe { let rec = get_shared_mutable_state(&self.x);
let rec = get_shared_mutable_state(&self.x); do (*rec).lock.lock {
do (*rec).lock.lock { if (*rec).failed {
if (*rec).failed { fail!(
fail!( ~"Poisoned exclusive - another task failed inside!");
~"Poisoned exclusive - another task failed inside!");
}
(*rec).failed = true;
let result = f(&mut (*rec).data);
(*rec).failed = false;
result
} }
(*rec).failed = true;
let result = f(&mut (*rec).data);
(*rec).failed = false;
result
} }
} }

View File

@@ -43,11 +43,11 @@ pub unsafe fn weaken_task(f: &fn(Port<ShutdownMsg>)) {
let task = get_task_id(); let task = get_task_id();
// Expect the weak task service to be alive // Expect the weak task service to be alive
assert!(service.try_send(RegisterWeakTask(task, shutdown_chan))); assert!(service.try_send(RegisterWeakTask(task, shutdown_chan)));
unsafe { rust_dec_kernel_live_count(); } rust_dec_kernel_live_count();
do (|| { do (|| {
f(shutdown_port.take()) f(shutdown_port.take())
}).finally || { }).finally || {
unsafe { rust_inc_kernel_live_count(); } rust_inc_kernel_live_count();
// Service my have already exited // Service my have already exited
service.send(UnregisterWeakTask(task)); service.send(UnregisterWeakTask(task));
} }

View File

@@ -2628,13 +2628,11 @@ pub fn get_item_val(ccx: @CrateContext, id: ast::node_id) -> ValueRef {
let class_ty = ty::lookup_item_type(tcx, parent_id).ty; let class_ty = ty::lookup_item_type(tcx, parent_id).ty;
// This code shouldn't be reached if the class is generic // This code shouldn't be reached if the class is generic
assert!(!ty::type_has_params(class_ty)); assert!(!ty::type_has_params(class_ty));
let lldty = unsafe { let lldty = T_fn(~[
T_fn(~[
T_ptr(T_i8()), T_ptr(T_i8()),
T_ptr(type_of(ccx, class_ty)) T_ptr(type_of(ccx, class_ty))
], ],
T_nil()) T_nil());
};
let s = get_dtor_symbol(ccx, /*bad*/copy *pt, dt.node.id, None); let s = get_dtor_symbol(ccx, /*bad*/copy *pt, dt.node.id, None);
/* Make the declaration for the dtor */ /* Make the declaration for the dtor */

View File

@@ -177,15 +177,13 @@ pub impl<T:Owned> MutexARC<T> {
*/ */
#[inline(always)] #[inline(always)]
unsafe fn access<U>(&self, blk: &fn(x: &mut T) -> U) -> U { unsafe fn access<U>(&self, blk: &fn(x: &mut T) -> U) -> U {
unsafe { let state = get_shared_mutable_state(&self.x);
let state = get_shared_mutable_state(&self.x); // Borrowck would complain about this if the function were
// Borrowck would complain about this if the function were // not already unsafe. See borrow_rwlock, far below.
// not already unsafe. See borrow_rwlock, far below. do (&(*state).lock).lock {
do (&(*state).lock).lock { check_poison(true, (*state).failed);
check_poison(true, (*state).failed); let _z = PoisonOnFail(&mut (*state).failed);
let _z = PoisonOnFail(&mut (*state).failed); blk(&mut (*state).data)
blk(&mut (*state).data)
}
} }
} }
@@ -195,16 +193,14 @@ pub impl<T:Owned> MutexARC<T> {
&self, &self,
blk: &fn(x: &'x mut T, c: &'c Condvar) -> U) -> U blk: &fn(x: &'x mut T, c: &'c Condvar) -> U) -> U
{ {
unsafe { let state = get_shared_mutable_state(&self.x);
let state = get_shared_mutable_state(&self.x); do (&(*state).lock).lock_cond |cond| {
do (&(*state).lock).lock_cond |cond| { check_poison(true, (*state).failed);
check_poison(true, (*state).failed); let _z = PoisonOnFail(&mut (*state).failed);
let _z = PoisonOnFail(&mut (*state).failed); blk(&mut (*state).data,
blk(&mut (*state).data, &Condvar {is_mutex: true,
&Condvar {is_mutex: true, failed: &mut (*state).failed,
failed: &mut (*state).failed, cond: cond })
cond: cond })
}
} }
} }
} }