Move atomically to unstable::sync, and document what it actually does. Close #7872.

This commit is contained in:
Ben Blum
2013-07-30 21:38:44 -04:00
parent 2e6dc161b6
commit bc7cee7bbf
5 changed files with 57 additions and 62 deletions

View File

@@ -84,7 +84,7 @@ pub struct Death {
on_exit: Option<~fn(bool)>, on_exit: Option<~fn(bool)>,
// nesting level counter for task::unkillable calls (0 == killable). // nesting level counter for task::unkillable calls (0 == killable).
unkillable: int, unkillable: int,
// nesting level counter for task::atomically calls (0 == can yield). // nesting level counter for unstable::atomically calls (0 == can yield).
wont_sleep: int, wont_sleep: int,
// A "spare" handle to the kill flag inside the kill handle. Used during // A "spare" handle to the kill flag inside the kill handle. Used during
// blocking/waking as an optimization to avoid two xadds on the refcount. // blocking/waking as an optimization to avoid two xadds on the refcount.

View File

@@ -655,44 +655,6 @@ pub unsafe fn rekillable<U>(f: &fn() -> U) -> U {
} }
} }
/**
* A stronger version of unkillable that also inhibits scheduling operations.
* For use with exclusive Arcs, which use pthread mutexes directly.
*/
pub unsafe fn atomically<U>(f: &fn() -> U) -> U {
use rt::task::Task;
match context() {
OldTaskContext => {
let t = rt::rust_get_task();
do (|| {
rt::rust_task_inhibit_kill(t);
rt::rust_task_inhibit_yield(t);
f()
}).finally {
rt::rust_task_allow_yield(t);
rt::rust_task_allow_kill(t);
}
}
TaskContext => {
let t = Local::unsafe_borrow::<Task>();
do (|| {
// It's important to inhibit kill after inhibiting yield, because
// inhibit-kill might fail if we were already killed, and the
// inhibit-yield must happen to match the finally's allow-yield.
(*t).death.inhibit_yield();
(*t).death.inhibit_kill((*t).unwinder.unwinding);
f()
}).finally {
(*t).death.allow_kill((*t).unwinder.unwinding);
(*t).death.allow_yield();
}
}
// FIXME(#3095): As in unkillable().
_ => f()
}
}
#[test] #[should_fail] #[ignore(cfg(windows))] #[test] #[should_fail] #[ignore(cfg(windows))]
fn test_cant_dup_task_builder() { fn test_cant_dup_task_builder() {
let mut builder = task(); let mut builder = task();
@@ -1177,21 +1139,6 @@ fn test_unkillable_nested() {
po.recv(); po.recv();
} }
#[test] #[should_fail] #[ignore(cfg(windows))]
fn test_atomically() {
unsafe { do atomically { yield(); } }
}
#[test]
fn test_atomically2() {
unsafe { do atomically { } } yield(); // shouldn't fail
}
#[test] #[should_fail] #[ignore(cfg(windows))]
fn test_atomically_nested() {
unsafe { do atomically { do atomically { } yield(); } }
}
#[test] #[test]
fn test_child_doesnt_ref_parent() { fn test_child_doesnt_ref_parent() {
// If the child refcounts the parent task, this will stack overflow when // If the child refcounts the parent task, this will stack overflow when

View File

@@ -105,7 +105,7 @@ mod dl {
use path; use path;
use ptr; use ptr;
use str; use str;
use task; use unstable::sync::atomically;
use result::*; use result::*;
pub unsafe fn open_external(filename: &path::Path) -> *libc::c_void { pub unsafe fn open_external(filename: &path::Path) -> *libc::c_void {
@@ -120,7 +120,7 @@ mod dl {
pub fn check_for_errors_in<T>(f: &fn()->T) -> Result<T, ~str> { pub fn check_for_errors_in<T>(f: &fn()->T) -> Result<T, ~str> {
unsafe { unsafe {
do task::atomically { do atomically {
let _old_error = dlerror(); let _old_error = dlerror();
let result = f(); let result = f();
@@ -164,7 +164,7 @@ mod dl {
use libc; use libc;
use path; use path;
use ptr; use ptr;
use task; use unstable::sync::atomically;
use result::*; use result::*;
pub unsafe fn open_external(filename: &path::Path) -> *libc::c_void { pub unsafe fn open_external(filename: &path::Path) -> *libc::c_void {
@@ -181,7 +181,7 @@ mod dl {
pub fn check_for_errors_in<T>(f: &fn()->T) -> Result<T, ~str> { pub fn check_for_errors_in<T>(f: &fn()->T) -> Result<T, ~str> {
unsafe { unsafe {
do task::atomically { do atomically {
SetLastError(0); SetLastError(0);
let result = f(); let result = f();

View File

@@ -85,7 +85,7 @@ fn test_run_in_bare_thread_exchange() {
pub fn change_dir_locked(p: &Path, action: &fn()) -> bool { pub fn change_dir_locked(p: &Path, action: &fn()) -> bool {
use os; use os;
use os::change_dir; use os::change_dir;
use task; use unstable::sync::atomically;
use unstable::finally::Finally; use unstable::finally::Finally;
unsafe { unsafe {
@@ -93,7 +93,7 @@ pub fn change_dir_locked(p: &Path, action: &fn()) -> bool {
// in the `action` callback can cause deadlock. Doing it in // in the `action` callback can cause deadlock. Doing it in
// `task::atomically` to try to avoid that, but ... I don't know // `task::atomically` to try to avoid that, but ... I don't know
// this is all bogus. // this is all bogus.
return do task::atomically { return do atomically {
rust_take_change_dir_lock(); rust_take_change_dir_lock();
do (||{ do (||{

View File

@@ -16,7 +16,6 @@ use ptr;
use option::*; use option::*;
use either::{Either, Left, Right}; use either::{Either, Left, Right};
use task; use task;
use task::atomically;
use unstable::atomics::{AtomicOption,AtomicUint,Acquire,Release,SeqCst}; use unstable::atomics::{AtomicOption,AtomicUint,Acquire,Release,SeqCst};
use unstable::finally::Finally; use unstable::finally::Finally;
use ops::Drop; use ops::Drop;
@@ -271,6 +270,48 @@ impl<T> Drop for UnsafeAtomicRcBox<T>{
/****************************************************************************/ /****************************************************************************/
/**
* Enables a runtime assertion that no operation in the argument closure shall
* use scheduler operations (yield, recv, spawn, etc). This is for use with
* pthread mutexes, which may block the entire scheduler thread, rather than
* just one task, and is hence prone to deadlocks if mixed with yielding.
*
* NOTE: THIS DOES NOT PROVIDE LOCKING, or any sort of critical-section
* synchronization whatsoever. It only makes sense to use for CPU-local issues.
*/
// FIXME(#8140) should not be pub
pub unsafe fn atomically<U>(f: &fn() -> U) -> U {
use rt::task::Task;
use task::rt;
use rt::local::Local;
use rt::{context, OldTaskContext, TaskContext};
match context() {
OldTaskContext => {
let t = rt::rust_get_task();
do (|| {
rt::rust_task_inhibit_kill(t);
rt::rust_task_inhibit_yield(t);
f()
}).finally {
rt::rust_task_allow_yield(t);
rt::rust_task_allow_kill(t);
}
}
TaskContext => {
let t = Local::unsafe_borrow::<Task>();
do (|| {
(*t).death.inhibit_yield();
f()
}).finally {
(*t).death.allow_yield();
}
}
// FIXME(#3095): As in unkillable().
_ => f()
}
}
#[allow(non_camel_case_types)] // runtime type #[allow(non_camel_case_types)] // runtime type
type rust_little_lock = *libc::c_void; type rust_little_lock = *libc::c_void;
@@ -395,11 +436,18 @@ mod tests {
use cell::Cell; use cell::Cell;
use comm; use comm;
use option::*; use option::*;
use super::{Exclusive, UnsafeAtomicRcBox}; use super::{Exclusive, UnsafeAtomicRcBox, atomically};
use task; use task;
use uint; use uint;
use util; use util;
#[test]
fn test_atomically() {
// NB. The whole runtime will abort on an 'atomic-sleep' violation,
// so we can't really test for the converse behaviour.
unsafe { do atomically { } } task::yield(); // oughtn't fail
}
#[test] #[test]
fn exclusive_new_arc() { fn exclusive_new_arc() {
unsafe { unsafe {