mv std libs to library/
This commit is contained in:
747
library/std/src/thread/local.rs
Normal file
747
library/std/src/thread/local.rs
Normal file
@@ -0,0 +1,747 @@
|
||||
//! Thread local storage
|
||||
|
||||
#![unstable(feature = "thread_local_internals", issue = "none")]
|
||||
|
||||
use crate::error::Error;
|
||||
use crate::fmt;
|
||||
|
||||
/// A thread local storage key which owns its contents.
|
||||
///
|
||||
/// This key uses the fastest possible implementation available to it for the
|
||||
/// target platform. It is instantiated with the [`thread_local!`] macro and the
|
||||
/// primary method is the [`with`] method.
|
||||
///
|
||||
/// The [`with`] method yields a reference to the contained value which cannot be
|
||||
/// sent across threads or escape the given closure.
|
||||
///
|
||||
/// # Initialization and Destruction
|
||||
///
|
||||
/// Initialization is dynamically performed on the first call to [`with`]
|
||||
/// within a thread, and values that implement [`Drop`] get destructed when a
|
||||
/// thread exits. Some caveats apply, which are explained below.
|
||||
///
|
||||
/// A `LocalKey`'s initializer cannot recursively depend on itself, and using
|
||||
/// a `LocalKey` in this way will cause the initializer to infinitely recurse
|
||||
/// on the first call to `with`.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use std::cell::RefCell;
|
||||
/// use std::thread;
|
||||
///
|
||||
/// thread_local!(static FOO: RefCell<u32> = RefCell::new(1));
|
||||
///
|
||||
/// FOO.with(|f| {
|
||||
/// assert_eq!(*f.borrow(), 1);
|
||||
/// *f.borrow_mut() = 2;
|
||||
/// });
|
||||
///
|
||||
/// // each thread starts out with the initial value of 1
|
||||
/// let t = thread::spawn(move|| {
|
||||
/// FOO.with(|f| {
|
||||
/// assert_eq!(*f.borrow(), 1);
|
||||
/// *f.borrow_mut() = 3;
|
||||
/// });
|
||||
/// });
|
||||
///
|
||||
/// // wait for the thread to complete and bail out on panic
|
||||
/// t.join().unwrap();
|
||||
///
|
||||
/// // we retain our original value of 2 despite the child thread
|
||||
/// FOO.with(|f| {
|
||||
/// assert_eq!(*f.borrow(), 2);
|
||||
/// });
|
||||
/// ```
|
||||
///
|
||||
/// # Platform-specific behavior
|
||||
///
|
||||
/// Note that a "best effort" is made to ensure that destructors for types
|
||||
/// stored in thread local storage are run, but not all platforms can guarantee
|
||||
/// that destructors will be run for all types in thread local storage. For
|
||||
/// example, there are a number of known caveats where destructors are not run:
|
||||
///
|
||||
/// 1. On Unix systems when pthread-based TLS is being used, destructors will
|
||||
/// not be run for TLS values on the main thread when it exits. Note that the
|
||||
/// application will exit immediately after the main thread exits as well.
|
||||
/// 2. On all platforms it's possible for TLS to re-initialize other TLS slots
|
||||
/// during destruction. Some platforms ensure that this cannot happen
|
||||
/// infinitely by preventing re-initialization of any slot that has been
|
||||
/// destroyed, but not all platforms have this guard. Those platforms that do
|
||||
/// not guard typically have a synthetic limit after which point no more
|
||||
/// destructors are run.
|
||||
///
|
||||
/// [`with`]: ../../std/thread/struct.LocalKey.html#method.with
|
||||
/// [`thread_local!`]: ../../std/macro.thread_local.html
|
||||
/// [`Drop`]: ../../std/ops/trait.Drop.html
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
pub struct LocalKey<T: 'static> {
|
||||
// This outer `LocalKey<T>` type is what's going to be stored in statics,
|
||||
// but actual data inside will sometimes be tagged with #[thread_local].
|
||||
// It's not valid for a true static to reference a #[thread_local] static,
|
||||
// so we get around that by exposing an accessor through a layer of function
|
||||
// indirection (this thunk).
|
||||
//
|
||||
// Note that the thunk is itself unsafe because the returned lifetime of the
|
||||
// slot where data lives, `'static`, is not actually valid. The lifetime
|
||||
// here is actually slightly shorter than the currently running thread!
|
||||
//
|
||||
// Although this is an extra layer of indirection, it should in theory be
|
||||
// trivially devirtualizable by LLVM because the value of `inner` never
|
||||
// changes and the constant should be readonly within a crate. This mainly
|
||||
// only runs into problems when TLS statics are exported across crates.
|
||||
inner: unsafe fn() -> Option<&'static T>,
|
||||
}
|
||||
|
||||
#[stable(feature = "std_debug", since = "1.16.0")]
|
||||
impl<T: 'static> fmt::Debug for LocalKey<T> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.pad("LocalKey { .. }")
|
||||
}
|
||||
}
|
||||
|
||||
/// Declare a new thread local storage key of type [`std::thread::LocalKey`].
|
||||
///
|
||||
/// # Syntax
|
||||
///
|
||||
/// The macro wraps any number of static declarations and makes them thread local.
|
||||
/// Publicity and attributes for each static are allowed. Example:
|
||||
///
|
||||
/// ```
|
||||
/// use std::cell::RefCell;
|
||||
/// thread_local! {
|
||||
/// pub static FOO: RefCell<u32> = RefCell::new(1);
|
||||
///
|
||||
/// #[allow(unused)]
|
||||
/// static BAR: RefCell<f32> = RefCell::new(1.0);
|
||||
/// }
|
||||
/// # fn main() {}
|
||||
/// ```
|
||||
///
|
||||
/// See [LocalKey documentation][`std::thread::LocalKey`] for more
|
||||
/// information.
|
||||
///
|
||||
/// [`std::thread::LocalKey`]: ../std/thread/struct.LocalKey.html
|
||||
#[macro_export]
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
#[allow_internal_unstable(thread_local_internals)]
|
||||
macro_rules! thread_local {
|
||||
// empty (base case for the recursion)
|
||||
() => {};
|
||||
|
||||
// process multiple declarations
|
||||
($(#[$attr:meta])* $vis:vis static $name:ident: $t:ty = $init:expr; $($rest:tt)*) => (
|
||||
$crate::__thread_local_inner!($(#[$attr])* $vis $name, $t, $init);
|
||||
$crate::thread_local!($($rest)*);
|
||||
);
|
||||
|
||||
// handle a single declaration
|
||||
($(#[$attr:meta])* $vis:vis static $name:ident: $t:ty = $init:expr) => (
|
||||
$crate::__thread_local_inner!($(#[$attr])* $vis $name, $t, $init);
|
||||
);
|
||||
}
|
||||
|
||||
#[doc(hidden)]
|
||||
#[unstable(feature = "thread_local_internals", reason = "should not be necessary", issue = "none")]
|
||||
#[macro_export]
|
||||
#[allow_internal_unstable(thread_local_internals, cfg_target_thread_local, thread_local)]
|
||||
#[allow_internal_unsafe]
|
||||
macro_rules! __thread_local_inner {
|
||||
(@key $t:ty, $init:expr) => {
|
||||
{
|
||||
#[inline]
|
||||
fn __init() -> $t { $init }
|
||||
|
||||
unsafe fn __getit() -> $crate::option::Option<&'static $t> {
|
||||
#[cfg(all(target_arch = "wasm32", not(target_feature = "atomics")))]
|
||||
static __KEY: $crate::thread::__StaticLocalKeyInner<$t> =
|
||||
$crate::thread::__StaticLocalKeyInner::new();
|
||||
|
||||
#[thread_local]
|
||||
#[cfg(all(
|
||||
target_thread_local,
|
||||
not(all(target_arch = "wasm32", not(target_feature = "atomics"))),
|
||||
))]
|
||||
static __KEY: $crate::thread::__FastLocalKeyInner<$t> =
|
||||
$crate::thread::__FastLocalKeyInner::new();
|
||||
|
||||
#[cfg(all(
|
||||
not(target_thread_local),
|
||||
not(all(target_arch = "wasm32", not(target_feature = "atomics"))),
|
||||
))]
|
||||
static __KEY: $crate::thread::__OsLocalKeyInner<$t> =
|
||||
$crate::thread::__OsLocalKeyInner::new();
|
||||
|
||||
__KEY.get(__init)
|
||||
}
|
||||
|
||||
unsafe {
|
||||
$crate::thread::LocalKey::new(__getit)
|
||||
}
|
||||
}
|
||||
};
|
||||
($(#[$attr:meta])* $vis:vis $name:ident, $t:ty, $init:expr) => {
|
||||
$(#[$attr])* $vis const $name: $crate::thread::LocalKey<$t> =
|
||||
$crate::__thread_local_inner!(@key $t, $init);
|
||||
}
|
||||
}
|
||||
|
||||
/// An error returned by [`LocalKey::try_with`](struct.LocalKey.html#method.try_with).
|
||||
#[stable(feature = "thread_local_try_with", since = "1.26.0")]
|
||||
#[derive(Clone, Copy, Eq, PartialEq)]
|
||||
pub struct AccessError {
|
||||
_private: (),
|
||||
}
|
||||
|
||||
#[stable(feature = "thread_local_try_with", since = "1.26.0")]
|
||||
impl fmt::Debug for AccessError {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.debug_struct("AccessError").finish()
|
||||
}
|
||||
}
|
||||
|
||||
#[stable(feature = "thread_local_try_with", since = "1.26.0")]
|
||||
impl fmt::Display for AccessError {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
fmt::Display::fmt("already destroyed", f)
|
||||
}
|
||||
}
|
||||
|
||||
#[stable(feature = "thread_local_try_with", since = "1.26.0")]
|
||||
impl Error for AccessError {}
|
||||
|
||||
impl<T: 'static> LocalKey<T> {
|
||||
#[doc(hidden)]
|
||||
#[unstable(
|
||||
feature = "thread_local_internals",
|
||||
reason = "recently added to create a key",
|
||||
issue = "none"
|
||||
)]
|
||||
pub const unsafe fn new(inner: unsafe fn() -> Option<&'static T>) -> LocalKey<T> {
|
||||
LocalKey { inner }
|
||||
}
|
||||
|
||||
/// Acquires a reference to the value in this TLS key.
|
||||
///
|
||||
/// This will lazily initialize the value if this thread has not referenced
|
||||
/// this key yet.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This function will `panic!()` if the key currently has its
|
||||
/// destructor running, and it **may** panic if the destructor has
|
||||
/// previously been run for this thread.
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
pub fn with<F, R>(&'static self, f: F) -> R
|
||||
where
|
||||
F: FnOnce(&T) -> R,
|
||||
{
|
||||
self.try_with(f).expect(
|
||||
"cannot access a Thread Local Storage value \
|
||||
during or after destruction",
|
||||
)
|
||||
}
|
||||
|
||||
/// Acquires a reference to the value in this TLS key.
|
||||
///
|
||||
/// This will lazily initialize the value if this thread has not referenced
|
||||
/// this key yet. If the key has been destroyed (which may happen if this is called
|
||||
/// in a destructor), this function will return an [`AccessError`](struct.AccessError.html).
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This function will still `panic!()` if the key is uninitialized and the
|
||||
/// key's initializer panics.
|
||||
#[stable(feature = "thread_local_try_with", since = "1.26.0")]
|
||||
#[inline]
|
||||
pub fn try_with<F, R>(&'static self, f: F) -> Result<R, AccessError>
|
||||
where
|
||||
F: FnOnce(&T) -> R,
|
||||
{
|
||||
unsafe {
|
||||
let thread_local = (self.inner)().ok_or(AccessError { _private: () })?;
|
||||
Ok(f(thread_local))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
mod lazy {
|
||||
use crate::cell::UnsafeCell;
|
||||
use crate::hint;
|
||||
use crate::mem;
|
||||
|
||||
pub struct LazyKeyInner<T> {
|
||||
inner: UnsafeCell<Option<T>>,
|
||||
}
|
||||
|
||||
impl<T> LazyKeyInner<T> {
|
||||
pub const fn new() -> LazyKeyInner<T> {
|
||||
LazyKeyInner { inner: UnsafeCell::new(None) }
|
||||
}
|
||||
|
||||
pub unsafe fn get(&self) -> Option<&'static T> {
|
||||
(*self.inner.get()).as_ref()
|
||||
}
|
||||
|
||||
pub unsafe fn initialize<F: FnOnce() -> T>(&self, init: F) -> &'static T {
|
||||
// Execute the initialization up front, *then* move it into our slot,
|
||||
// just in case initialization fails.
|
||||
let value = init();
|
||||
let ptr = self.inner.get();
|
||||
|
||||
// note that this can in theory just be `*ptr = Some(value)`, but due to
|
||||
// the compiler will currently codegen that pattern with something like:
|
||||
//
|
||||
// ptr::drop_in_place(ptr)
|
||||
// ptr::write(ptr, Some(value))
|
||||
//
|
||||
// Due to this pattern it's possible for the destructor of the value in
|
||||
// `ptr` (e.g., if this is being recursively initialized) to re-access
|
||||
// TLS, in which case there will be a `&` and `&mut` pointer to the same
|
||||
// value (an aliasing violation). To avoid setting the "I'm running a
|
||||
// destructor" flag we just use `mem::replace` which should sequence the
|
||||
// operations a little differently and make this safe to call.
|
||||
let _ = mem::replace(&mut *ptr, Some(value));
|
||||
|
||||
// After storing `Some` we want to get a reference to the contents of
|
||||
// what we just stored. While we could use `unwrap` here and it should
|
||||
// always work it empirically doesn't seem to always get optimized away,
|
||||
// which means that using something like `try_with` can pull in
|
||||
// panicking code and cause a large size bloat.
|
||||
match *ptr {
|
||||
Some(ref x) => x,
|
||||
None => hint::unreachable_unchecked(),
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(unused)]
|
||||
pub unsafe fn take(&mut self) -> Option<T> {
|
||||
(*self.inner.get()).take()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// On some platforms like wasm32 there's no threads, so no need to generate
|
||||
/// thread locals and we can instead just use plain statics!
|
||||
#[doc(hidden)]
|
||||
#[cfg(all(target_arch = "wasm32", not(target_feature = "atomics")))]
|
||||
pub mod statik {
|
||||
use super::lazy::LazyKeyInner;
|
||||
use crate::fmt;
|
||||
|
||||
pub struct Key<T> {
|
||||
inner: LazyKeyInner<T>,
|
||||
}
|
||||
|
||||
unsafe impl<T> Sync for Key<T> {}
|
||||
|
||||
impl<T> fmt::Debug for Key<T> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.pad("Key { .. }")
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Key<T> {
|
||||
pub const fn new() -> Key<T> {
|
||||
Key { inner: LazyKeyInner::new() }
|
||||
}
|
||||
|
||||
pub unsafe fn get(&self, init: fn() -> T) -> Option<&'static T> {
|
||||
let value = match self.inner.get() {
|
||||
Some(ref value) => value,
|
||||
None => self.inner.initialize(init),
|
||||
};
|
||||
Some(value)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[doc(hidden)]
|
||||
#[cfg(target_thread_local)]
|
||||
pub mod fast {
|
||||
use super::lazy::LazyKeyInner;
|
||||
use crate::cell::Cell;
|
||||
use crate::fmt;
|
||||
use crate::mem;
|
||||
use crate::sys::thread_local_dtor::register_dtor;
|
||||
|
||||
#[derive(Copy, Clone)]
|
||||
enum DtorState {
|
||||
Unregistered,
|
||||
Registered,
|
||||
RunningOrHasRun,
|
||||
}
|
||||
|
||||
// This data structure has been carefully constructed so that the fast path
|
||||
// only contains one branch on x86. That optimization is necessary to avoid
|
||||
// duplicated tls lookups on OSX.
|
||||
//
|
||||
// LLVM issue: https://bugs.llvm.org/show_bug.cgi?id=41722
|
||||
pub struct Key<T> {
|
||||
// If `LazyKeyInner::get` returns `None`, that indicates either:
|
||||
// * The value has never been initialized
|
||||
// * The value is being recursively initialized
|
||||
// * The value has already been destroyed or is being destroyed
|
||||
// To determine which kind of `None`, check `dtor_state`.
|
||||
//
|
||||
// This is very optimizer friendly for the fast path - initialized but
|
||||
// not yet dropped.
|
||||
inner: LazyKeyInner<T>,
|
||||
|
||||
// Metadata to keep track of the state of the destructor. Remember that
|
||||
// this variable is thread-local, not global.
|
||||
dtor_state: Cell<DtorState>,
|
||||
}
|
||||
|
||||
impl<T> fmt::Debug for Key<T> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.pad("Key { .. }")
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Key<T> {
|
||||
pub const fn new() -> Key<T> {
|
||||
Key { inner: LazyKeyInner::new(), dtor_state: Cell::new(DtorState::Unregistered) }
|
||||
}
|
||||
|
||||
pub unsafe fn get<F: FnOnce() -> T>(&self, init: F) -> Option<&'static T> {
|
||||
match self.inner.get() {
|
||||
Some(val) => Some(val),
|
||||
None => self.try_initialize(init),
|
||||
}
|
||||
}
|
||||
|
||||
// `try_initialize` is only called once per fast thread local variable,
|
||||
// except in corner cases where thread_local dtors reference other
|
||||
// thread_local's, or it is being recursively initialized.
|
||||
//
|
||||
// Macos: Inlining this function can cause two `tlv_get_addr` calls to
|
||||
// be performed for every call to `Key::get`. The #[cold] hint makes
|
||||
// that less likely.
|
||||
// LLVM issue: https://bugs.llvm.org/show_bug.cgi?id=41722
|
||||
#[cold]
|
||||
unsafe fn try_initialize<F: FnOnce() -> T>(&self, init: F) -> Option<&'static T> {
|
||||
if !mem::needs_drop::<T>() || self.try_register_dtor() {
|
||||
Some(self.inner.initialize(init))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
// `try_register_dtor` is only called once per fast thread local
|
||||
// variable, except in corner cases where thread_local dtors reference
|
||||
// other thread_local's, or it is being recursively initialized.
|
||||
unsafe fn try_register_dtor(&self) -> bool {
|
||||
match self.dtor_state.get() {
|
||||
DtorState::Unregistered => {
|
||||
// dtor registration happens before initialization.
|
||||
register_dtor(self as *const _ as *mut u8, destroy_value::<T>);
|
||||
self.dtor_state.set(DtorState::Registered);
|
||||
true
|
||||
}
|
||||
DtorState::Registered => {
|
||||
// recursively initialized
|
||||
true
|
||||
}
|
||||
DtorState::RunningOrHasRun => false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
unsafe extern "C" fn destroy_value<T>(ptr: *mut u8) {
|
||||
let ptr = ptr as *mut Key<T>;
|
||||
|
||||
// Right before we run the user destructor be sure to set the
|
||||
// `Option<T>` to `None`, and `dtor_state` to `RunningOrHasRun`. This
|
||||
// causes future calls to `get` to run `try_initialize_drop` again,
|
||||
// which will now fail, and return `None`.
|
||||
let value = (*ptr).inner.take();
|
||||
(*ptr).dtor_state.set(DtorState::RunningOrHasRun);
|
||||
drop(value);
|
||||
}
|
||||
}
|
||||
|
||||
#[doc(hidden)]
|
||||
pub mod os {
|
||||
use super::lazy::LazyKeyInner;
|
||||
use crate::cell::Cell;
|
||||
use crate::fmt;
|
||||
use crate::marker;
|
||||
use crate::ptr;
|
||||
use crate::sys_common::thread_local_key::StaticKey as OsStaticKey;
|
||||
|
||||
pub struct Key<T> {
|
||||
// OS-TLS key that we'll use to key off.
|
||||
os: OsStaticKey,
|
||||
marker: marker::PhantomData<Cell<T>>,
|
||||
}
|
||||
|
||||
impl<T> fmt::Debug for Key<T> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.pad("Key { .. }")
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<T> Sync for Key<T> {}
|
||||
|
||||
struct Value<T: 'static> {
|
||||
inner: LazyKeyInner<T>,
|
||||
key: &'static Key<T>,
|
||||
}
|
||||
|
||||
impl<T: 'static> Key<T> {
|
||||
pub const fn new() -> Key<T> {
|
||||
Key { os: OsStaticKey::new(Some(destroy_value::<T>)), marker: marker::PhantomData }
|
||||
}
|
||||
|
||||
pub unsafe fn get(&'static self, init: fn() -> T) -> Option<&'static T> {
|
||||
let ptr = self.os.get() as *mut Value<T>;
|
||||
if ptr as usize > 1 {
|
||||
if let Some(ref value) = (*ptr).inner.get() {
|
||||
return Some(value);
|
||||
}
|
||||
}
|
||||
self.try_initialize(init)
|
||||
}
|
||||
|
||||
// `try_initialize` is only called once per os thread local variable,
|
||||
// except in corner cases where thread_local dtors reference other
|
||||
// thread_local's, or it is being recursively initialized.
|
||||
unsafe fn try_initialize(&'static self, init: fn() -> T) -> Option<&'static T> {
|
||||
let ptr = self.os.get() as *mut Value<T>;
|
||||
if ptr as usize == 1 {
|
||||
// destructor is running
|
||||
return None;
|
||||
}
|
||||
|
||||
let ptr = if ptr.is_null() {
|
||||
// If the lookup returned null, we haven't initialized our own
|
||||
// local copy, so do that now.
|
||||
let ptr: Box<Value<T>> = box Value { inner: LazyKeyInner::new(), key: self };
|
||||
let ptr = Box::into_raw(ptr);
|
||||
self.os.set(ptr as *mut u8);
|
||||
ptr
|
||||
} else {
|
||||
// recursive initialization
|
||||
ptr
|
||||
};
|
||||
|
||||
Some((*ptr).inner.initialize(init))
|
||||
}
|
||||
}
|
||||
|
||||
unsafe extern "C" fn destroy_value<T: 'static>(ptr: *mut u8) {
|
||||
// The OS TLS ensures that this key contains a NULL value when this
|
||||
// destructor starts to run. We set it back to a sentinel value of 1 to
|
||||
// ensure that any future calls to `get` for this thread will return
|
||||
// `None`.
|
||||
//
|
||||
// Note that to prevent an infinite loop we reset it back to null right
|
||||
// before we return from the destructor ourselves.
|
||||
let ptr = Box::from_raw(ptr as *mut Value<T>);
|
||||
let key = ptr.key;
|
||||
key.os.set(1 as *mut u8);
|
||||
drop(ptr);
|
||||
key.os.set(ptr::null_mut());
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(all(test, not(target_os = "emscripten")))]
|
||||
mod tests {
|
||||
use crate::cell::{Cell, UnsafeCell};
|
||||
use crate::sync::mpsc::{channel, Sender};
|
||||
use crate::thread;
|
||||
|
||||
struct Foo(Sender<()>);
|
||||
|
||||
impl Drop for Foo {
|
||||
fn drop(&mut self) {
|
||||
let Foo(ref s) = *self;
|
||||
s.send(()).unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn smoke_no_dtor() {
|
||||
thread_local!(static FOO: Cell<i32> = Cell::new(1));
|
||||
|
||||
FOO.with(|f| {
|
||||
assert_eq!(f.get(), 1);
|
||||
f.set(2);
|
||||
});
|
||||
let (tx, rx) = channel();
|
||||
let _t = thread::spawn(move || {
|
||||
FOO.with(|f| {
|
||||
assert_eq!(f.get(), 1);
|
||||
});
|
||||
tx.send(()).unwrap();
|
||||
});
|
||||
rx.recv().unwrap();
|
||||
|
||||
FOO.with(|f| {
|
||||
assert_eq!(f.get(), 2);
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn states() {
|
||||
struct Foo;
|
||||
impl Drop for Foo {
|
||||
fn drop(&mut self) {
|
||||
assert!(FOO.try_with(|_| ()).is_err());
|
||||
}
|
||||
}
|
||||
thread_local!(static FOO: Foo = Foo);
|
||||
|
||||
thread::spawn(|| {
|
||||
assert!(FOO.try_with(|_| ()).is_ok());
|
||||
})
|
||||
.join()
|
||||
.ok()
|
||||
.expect("thread panicked");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn smoke_dtor() {
|
||||
thread_local!(static FOO: UnsafeCell<Option<Foo>> = UnsafeCell::new(None));
|
||||
|
||||
let (tx, rx) = channel();
|
||||
let _t = thread::spawn(move || unsafe {
|
||||
let mut tx = Some(tx);
|
||||
FOO.with(|f| {
|
||||
*f.get() = Some(Foo(tx.take().unwrap()));
|
||||
});
|
||||
});
|
||||
rx.recv().unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn circular() {
|
||||
struct S1;
|
||||
struct S2;
|
||||
thread_local!(static K1: UnsafeCell<Option<S1>> = UnsafeCell::new(None));
|
||||
thread_local!(static K2: UnsafeCell<Option<S2>> = UnsafeCell::new(None));
|
||||
static mut HITS: u32 = 0;
|
||||
|
||||
impl Drop for S1 {
|
||||
fn drop(&mut self) {
|
||||
unsafe {
|
||||
HITS += 1;
|
||||
if K2.try_with(|_| ()).is_err() {
|
||||
assert_eq!(HITS, 3);
|
||||
} else {
|
||||
if HITS == 1 {
|
||||
K2.with(|s| *s.get() = Some(S2));
|
||||
} else {
|
||||
assert_eq!(HITS, 3);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
impl Drop for S2 {
|
||||
fn drop(&mut self) {
|
||||
unsafe {
|
||||
HITS += 1;
|
||||
assert!(K1.try_with(|_| ()).is_ok());
|
||||
assert_eq!(HITS, 2);
|
||||
K1.with(|s| *s.get() = Some(S1));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
thread::spawn(move || {
|
||||
drop(S1);
|
||||
})
|
||||
.join()
|
||||
.ok()
|
||||
.expect("thread panicked");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn self_referential() {
|
||||
struct S1;
|
||||
thread_local!(static K1: UnsafeCell<Option<S1>> = UnsafeCell::new(None));
|
||||
|
||||
impl Drop for S1 {
|
||||
fn drop(&mut self) {
|
||||
assert!(K1.try_with(|_| ()).is_err());
|
||||
}
|
||||
}
|
||||
|
||||
thread::spawn(move || unsafe {
|
||||
K1.with(|s| *s.get() = Some(S1));
|
||||
})
|
||||
.join()
|
||||
.ok()
|
||||
.expect("thread panicked");
|
||||
}
|
||||
|
||||
// Note that this test will deadlock if TLS destructors aren't run (this
|
||||
// requires the destructor to be run to pass the test).
|
||||
#[test]
|
||||
fn dtors_in_dtors_in_dtors() {
|
||||
struct S1(Sender<()>);
|
||||
thread_local!(static K1: UnsafeCell<Option<S1>> = UnsafeCell::new(None));
|
||||
thread_local!(static K2: UnsafeCell<Option<Foo>> = UnsafeCell::new(None));
|
||||
|
||||
impl Drop for S1 {
|
||||
fn drop(&mut self) {
|
||||
let S1(ref tx) = *self;
|
||||
unsafe {
|
||||
let _ = K2.try_with(|s| *s.get() = Some(Foo(tx.clone())));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let (tx, rx) = channel();
|
||||
let _t = thread::spawn(move || unsafe {
|
||||
let mut tx = Some(tx);
|
||||
K1.with(|s| *s.get() = Some(S1(tx.take().unwrap())));
|
||||
});
|
||||
rx.recv().unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod dynamic_tests {
|
||||
use crate::cell::RefCell;
|
||||
use crate::collections::HashMap;
|
||||
|
||||
#[test]
|
||||
fn smoke() {
|
||||
fn square(i: i32) -> i32 {
|
||||
i * i
|
||||
}
|
||||
thread_local!(static FOO: i32 = square(3));
|
||||
|
||||
FOO.with(|f| {
|
||||
assert_eq!(*f, 9);
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn hashmap() {
|
||||
fn map() -> RefCell<HashMap<i32, i32>> {
|
||||
let mut m = HashMap::new();
|
||||
m.insert(1, 2);
|
||||
RefCell::new(m)
|
||||
}
|
||||
thread_local!(static FOO: RefCell<HashMap<i32, i32>> = map());
|
||||
|
||||
FOO.with(|map| {
|
||||
assert_eq!(map.borrow()[&1], 2);
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn refcell_vec() {
|
||||
thread_local!(static FOO: RefCell<Vec<u32>> = RefCell::new(vec![1, 2, 3]));
|
||||
|
||||
FOO.with(|vec| {
|
||||
assert_eq!(vec.borrow().len(), 3);
|
||||
vec.borrow_mut().push(4);
|
||||
assert_eq!(vec.borrow()[3], 4);
|
||||
});
|
||||
}
|
||||
}
|
||||
1787
library/std/src/thread/mod.rs
Normal file
1787
library/std/src/thread/mod.rs
Normal file
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user