Auto merge of #51529 - nodakai:improve-sys_common-mutex, r=oli-obk

libstd: add an RAII utility for sys_common::mutex::Mutex

It is indeed debatable whether or not we should introduce more sophistication like this to the lowest layer of a system library. In fact, `Drop::drop()` cannot be `unsafe` (IIRC there was a discussion on introducing an unsafe variant of `Drop` whose entire scope must be within `unsafe`)
This commit is contained in:
bors
2018-06-17 20:11:35 +00:00
9 changed files with 73 additions and 69 deletions

View File

@@ -20,6 +20,9 @@ pub struct Lazy<T> {
init: fn() -> Arc<T>, init: fn() -> Arc<T>,
} }
#[inline]
const fn done<T>() -> *mut Arc<T> { 1_usize as *mut _ }
unsafe impl<T> Sync for Lazy<T> {} unsafe impl<T> Sync for Lazy<T> {}
impl<T: Send + Sync + 'static> Lazy<T> { impl<T: Send + Sync + 'static> Lazy<T> {
@@ -33,17 +36,15 @@ impl<T: Send + Sync + 'static> Lazy<T> {
pub fn get(&'static self) -> Option<Arc<T>> { pub fn get(&'static self) -> Option<Arc<T>> {
unsafe { unsafe {
self.lock.lock(); let _guard = self.lock.lock();
let ptr = self.ptr.get(); let ptr = self.ptr.get();
let ret = if ptr.is_null() { if ptr.is_null() {
Some(self.init()) Some(self.init())
} else if ptr as usize == 1 { } else if ptr == done() {
None None
} else { } else {
Some((*ptr).clone()) Some((*ptr).clone())
}; }
self.lock.unlock();
return ret
} }
} }
@@ -53,10 +54,10 @@ impl<T: Send + Sync + 'static> Lazy<T> {
// the at exit handler). Otherwise we just return the freshly allocated // the at exit handler). Otherwise we just return the freshly allocated
// `Arc`. // `Arc`.
let registered = sys_common::at_exit(move || { let registered = sys_common::at_exit(move || {
self.lock.lock(); let ptr = {
let ptr = self.ptr.get(); let _guard = self.lock.lock();
self.ptr.set(1 as *mut _); self.ptr.replace(done())
self.lock.unlock(); };
drop(Box::from_raw(ptr)) drop(Box::from_raw(ptr))
}); });
let ret = (self.init)(); let ret = (self.init)();

View File

@@ -227,7 +227,7 @@ impl<T: ?Sized> Mutex<T> {
#[stable(feature = "rust1", since = "1.0.0")] #[stable(feature = "rust1", since = "1.0.0")]
pub fn lock(&self) -> LockResult<MutexGuard<T>> { pub fn lock(&self) -> LockResult<MutexGuard<T>> {
unsafe { unsafe {
self.inner.lock(); self.inner.raw_lock();
MutexGuard::new(self) MutexGuard::new(self)
} }
} }
@@ -454,7 +454,7 @@ impl<'a, T: ?Sized> Drop for MutexGuard<'a, T> {
fn drop(&mut self) { fn drop(&mut self) {
unsafe { unsafe {
self.__lock.poison.done(&self.__poison); self.__lock.poison.done(&self.__poison);
self.__lock.inner.unlock(); self.__lock.inner.raw_unlock();
} }
} }
} }

View File

@@ -73,17 +73,15 @@ mod imp {
CStr::from_ptr(*argv.offset(i) as *const libc::c_char).to_bytes().to_vec() CStr::from_ptr(*argv.offset(i) as *const libc::c_char).to_bytes().to_vec()
}).collect(); }).collect();
LOCK.lock(); let _guard = LOCK.lock();
let ptr = get_global_ptr(); let ptr = get_global_ptr();
assert!((*ptr).is_none()); assert!((*ptr).is_none());
(*ptr) = Some(box args); (*ptr) = Some(box args);
LOCK.unlock();
} }
pub unsafe fn cleanup() { pub unsafe fn cleanup() {
LOCK.lock(); let _guard = LOCK.lock();
*get_global_ptr() = None; *get_global_ptr() = None;
LOCK.unlock();
} }
pub fn args() -> Args { pub fn args() -> Args {
@@ -96,16 +94,14 @@ mod imp {
fn clone() -> Option<Vec<Vec<u8>>> { fn clone() -> Option<Vec<Vec<u8>>> {
unsafe { unsafe {
LOCK.lock(); let _guard = LOCK.lock();
let ptr = get_global_ptr(); let ptr = get_global_ptr();
let ret = (*ptr).as_ref().map(|s| (**s).clone()); (*ptr).as_ref().map(|s| (**s).clone())
LOCK.unlock();
return ret
} }
} }
fn get_global_ptr() -> *mut Option<Box<Vec<Vec<u8>>>> { unsafe fn get_global_ptr() -> *mut Option<Box<Vec<Vec<u8>>>> {
unsafe { mem::transmute(&GLOBAL_ARGS_PTR) } mem::transmute(&GLOBAL_ARGS_PTR)
} }
} }

View File

@@ -82,17 +82,15 @@ mod imp {
static LOCK: Mutex = Mutex::new(); static LOCK: Mutex = Mutex::new();
pub unsafe fn init(argc: isize, argv: *const *const u8) { pub unsafe fn init(argc: isize, argv: *const *const u8) {
LOCK.lock(); let _guard = LOCK.lock();
ARGC = argc; ARGC = argc;
ARGV = argv; ARGV = argv;
LOCK.unlock();
} }
pub unsafe fn cleanup() { pub unsafe fn cleanup() {
LOCK.lock(); let _guard = LOCK.lock();
ARGC = 0; ARGC = 0;
ARGV = ptr::null(); ARGV = ptr::null();
LOCK.unlock();
} }
pub fn args() -> Args { pub fn args() -> Args {
@@ -104,13 +102,11 @@ mod imp {
fn clone() -> Vec<OsString> { fn clone() -> Vec<OsString> {
unsafe { unsafe {
LOCK.lock(); let _guard = LOCK.lock();
let ret = (0..ARGC).map(|i| { (0..ARGC).map(|i| {
let cstr = CStr::from_ptr(*ARGV.offset(i) as *const libc::c_char); let cstr = CStr::from_ptr(*ARGV.offset(i) as *const libc::c_char);
OsStringExt::from_vec(cstr.to_bytes().to_vec()) OsStringExt::from_vec(cstr.to_bytes().to_vec())
}).collect(); }).collect()
LOCK.unlock();
return ret
} }
} }
} }

View File

@@ -409,10 +409,9 @@ pub unsafe fn environ() -> *mut *const *const c_char {
/// environment variables of the current process. /// environment variables of the current process.
pub fn env() -> Env { pub fn env() -> Env {
unsafe { unsafe {
ENV_LOCK.lock(); let _guard = ENV_LOCK.lock();
let mut environ = *environ(); let mut environ = *environ();
if environ == ptr::null() { if environ == ptr::null() {
ENV_LOCK.unlock();
panic!("os::env() failure getting env string from OS: {}", panic!("os::env() failure getting env string from OS: {}",
io::Error::last_os_error()); io::Error::last_os_error());
} }
@@ -423,12 +422,10 @@ pub fn env() -> Env {
} }
environ = environ.offset(1); environ = environ.offset(1);
} }
let ret = Env { return Env {
iter: result.into_iter(), iter: result.into_iter(),
_dont_send_or_sync_me: PhantomData, _dont_send_or_sync_me: PhantomData,
}; }
ENV_LOCK.unlock();
return ret
} }
fn parse(input: &[u8]) -> Option<(OsString, OsString)> { fn parse(input: &[u8]) -> Option<(OsString, OsString)> {
@@ -452,15 +449,14 @@ pub fn getenv(k: &OsStr) -> io::Result<Option<OsString>> {
// always None as well // always None as well
let k = CString::new(k.as_bytes())?; let k = CString::new(k.as_bytes())?;
unsafe { unsafe {
ENV_LOCK.lock(); let _guard = ENV_LOCK.lock();
let s = libc::getenv(k.as_ptr()) as *const libc::c_char; let s = libc::getenv(k.as_ptr()) as *const libc::c_char;
let ret = if s.is_null() { let ret = if s.is_null() {
None None
} else { } else {
Some(OsStringExt::from_vec(CStr::from_ptr(s).to_bytes().to_vec())) Some(OsStringExt::from_vec(CStr::from_ptr(s).to_bytes().to_vec()))
}; };
ENV_LOCK.unlock(); Ok(ret)
return Ok(ret)
} }
} }
@@ -469,10 +465,8 @@ pub fn setenv(k: &OsStr, v: &OsStr) -> io::Result<()> {
let v = CString::new(v.as_bytes())?; let v = CString::new(v.as_bytes())?;
unsafe { unsafe {
ENV_LOCK.lock(); let _guard = ENV_LOCK.lock();
let ret = cvt(libc::setenv(k.as_ptr(), v.as_ptr(), 1)).map(|_| ()); cvt(libc::setenv(k.as_ptr(), v.as_ptr(), 1)).map(|_| ())
ENV_LOCK.unlock();
return ret
} }
} }
@@ -480,10 +474,8 @@ pub fn unsetenv(n: &OsStr) -> io::Result<()> {
let nbuf = CString::new(n.as_bytes())?; let nbuf = CString::new(n.as_bytes())?;
unsafe { unsafe {
ENV_LOCK.lock(); let _guard = ENV_LOCK.lock();
let ret = cvt(libc::unsetenv(nbuf.as_ptr())).map(|_| ()); cvt(libc::unsetenv(nbuf.as_ptr())).map(|_| ())
ENV_LOCK.unlock();
return ret
} }
} }

View File

@@ -14,6 +14,7 @@
use boxed::FnBox; use boxed::FnBox;
use ptr; use ptr;
use mem;
use sys_common::mutex::Mutex; use sys_common::mutex::Mutex;
type Queue = Vec<Box<FnBox()>>; type Queue = Vec<Box<FnBox()>>;
@@ -25,6 +26,8 @@ type Queue = Vec<Box<FnBox()>>;
static LOCK: Mutex = Mutex::new(); static LOCK: Mutex = Mutex::new();
static mut QUEUE: *mut Queue = ptr::null_mut(); static mut QUEUE: *mut Queue = ptr::null_mut();
const DONE: *mut Queue = 1_usize as *mut _;
// The maximum number of times the cleanup routines will be run. While running // The maximum number of times the cleanup routines will be run. While running
// the at_exit closures new ones may be registered, and this count is the number // the at_exit closures new ones may be registered, and this count is the number
// of times the new closures will be allowed to register successfully. After // of times the new closures will be allowed to register successfully. After
@@ -35,7 +38,7 @@ unsafe fn init() -> bool {
if QUEUE.is_null() { if QUEUE.is_null() {
let state: Box<Queue> = box Vec::new(); let state: Box<Queue> = box Vec::new();
QUEUE = Box::into_raw(state); QUEUE = Box::into_raw(state);
} else if QUEUE as usize == 1 { } else if QUEUE == DONE {
// can't re-init after a cleanup // can't re-init after a cleanup
return false return false
} }
@@ -44,18 +47,18 @@ unsafe fn init() -> bool {
} }
pub fn cleanup() { pub fn cleanup() {
for i in 0..ITERS { for i in 1..=ITERS {
unsafe { unsafe {
LOCK.lock(); let queue = {
let queue = QUEUE; let _guard = LOCK.lock();
QUEUE = if i == ITERS - 1 {1} else {0} as *mut _; mem::replace(&mut QUEUE, if i == ITERS { DONE } else { ptr::null_mut() })
LOCK.unlock(); };
// make sure we're not recursively cleaning up // make sure we're not recursively cleaning up
assert!(queue as usize != 1); assert!(queue != DONE);
// If we never called init, not need to cleanup! // If we never called init, not need to cleanup!
if queue as usize != 0 { if !queue.is_null() {
let queue: Box<Queue> = Box::from_raw(queue); let queue: Box<Queue> = Box::from_raw(queue);
for to_run in *queue { for to_run in *queue {
to_run(); to_run();
@@ -66,15 +69,13 @@ pub fn cleanup() {
} }
pub fn push(f: Box<FnBox()>) -> bool { pub fn push(f: Box<FnBox()>) -> bool {
let mut ret = true;
unsafe { unsafe {
LOCK.lock(); let _guard = LOCK.lock();
if init() { if init() {
(*QUEUE).push(f); (*QUEUE).push(f);
true
} else { } else {
ret = false; false
} }
LOCK.unlock();
} }
ret
} }

View File

@@ -37,7 +37,15 @@ impl Mutex {
/// Behavior is undefined if the mutex has been moved between this and any /// Behavior is undefined if the mutex has been moved between this and any
/// previous function call. /// previous function call.
#[inline] #[inline]
pub unsafe fn lock(&self) { self.0.lock() } pub unsafe fn raw_lock(&self) { self.0.lock() }
/// Calls raw_lock() and then returns an RAII guard to guarantee the mutex
/// will be unlocked.
#[inline]
pub unsafe fn lock(&self) -> MutexGuard {
self.raw_lock();
MutexGuard(&self.0)
}
/// Attempts to lock the mutex without blocking, returning whether it was /// Attempts to lock the mutex without blocking, returning whether it was
/// successfully acquired or not. /// successfully acquired or not.
@@ -51,8 +59,11 @@ impl Mutex {
/// ///
/// Behavior is undefined if the current thread does not actually hold the /// Behavior is undefined if the current thread does not actually hold the
/// mutex. /// mutex.
///
/// Consider switching from the pair of raw_lock() and raw_unlock() to
/// lock() whenever possible.
#[inline] #[inline]
pub unsafe fn unlock(&self) { self.0.unlock() } pub unsafe fn raw_unlock(&self) { self.0.unlock() }
/// Deallocates all resources associated with this mutex. /// Deallocates all resources associated with this mutex.
/// ///
@@ -64,3 +75,14 @@ impl Mutex {
// not meant to be exported to the outside world, just the containing module // not meant to be exported to the outside world, just the containing module
pub fn raw(mutex: &Mutex) -> &imp::Mutex { &mutex.0 } pub fn raw(mutex: &Mutex) -> &imp::Mutex { &mutex.0 }
#[must_use]
/// A simple RAII utility for the above Mutex without the poisoning semantics.
pub struct MutexGuard<'a>(&'a imp::Mutex);
impl<'a> Drop for MutexGuard<'a> {
#[inline]
fn drop(&mut self) {
unsafe { self.0.unlock(); }
}
}

View File

@@ -162,13 +162,12 @@ impl StaticKey {
// we just simplify the whole branch. // we just simplify the whole branch.
if imp::requires_synchronized_create() { if imp::requires_synchronized_create() {
static INIT_LOCK: Mutex = Mutex::new(); static INIT_LOCK: Mutex = Mutex::new();
INIT_LOCK.lock(); let _guard = INIT_LOCK.lock();
let mut key = self.key.load(Ordering::SeqCst); let mut key = self.key.load(Ordering::SeqCst);
if key == 0 { if key == 0 {
key = imp::create(self.dtor) as usize; key = imp::create(self.dtor) as usize;
self.key.store(key, Ordering::SeqCst); self.key.store(key, Ordering::SeqCst);
} }
INIT_LOCK.unlock();
rtassert!(key != 0); rtassert!(key != 0);
return key return key
} }

View File

@@ -935,20 +935,17 @@ impl ThreadId {
static mut COUNTER: u64 = 0; static mut COUNTER: u64 = 0;
unsafe { unsafe {
GUARD.lock(); let _guard = GUARD.lock();
// If we somehow use up all our bits, panic so that we're not // If we somehow use up all our bits, panic so that we're not
// covering up subtle bugs of IDs being reused. // covering up subtle bugs of IDs being reused.
if COUNTER == ::u64::MAX { if COUNTER == ::u64::MAX {
GUARD.unlock();
panic!("failed to generate unique thread ID: bitspace exhausted"); panic!("failed to generate unique thread ID: bitspace exhausted");
} }
let id = COUNTER; let id = COUNTER;
COUNTER += 1; COUNTER += 1;
GUARD.unlock();
ThreadId(id) ThreadId(id)
} }
} }