2019-02-11 04:23:21 +09:00
|
|
|
use crate::ffi::CStr;
|
2024-03-03 00:25:51 -08:00
|
|
|
use crate::ffi::CString;
|
2019-02-11 04:23:21 +09:00
|
|
|
use crate::io;
|
2024-01-29 23:59:09 +01:00
|
|
|
use crate::num::NonZero;
|
2021-04-14 02:37:36 +02:00
|
|
|
use crate::sys::unsupported;
|
2019-02-11 04:23:21 +09:00
|
|
|
use crate::time::Duration;
|
2017-10-22 20:01:00 -07:00
|
|
|
|
2021-04-14 02:37:36 +02:00
|
|
|
pub struct Thread(!);
|
2017-10-22 20:01:00 -07:00
|
|
|
|
|
|
|
|
pub const DEFAULT_MIN_STACK_SIZE: usize = 4096;
|
|
|
|
|
|
|
|
|
|
impl Thread {
|
2018-11-30 10:39:12 +05:30
|
|
|
// unsafe: see thread::Builder::spawn_unchecked for safety requirements
|
2019-04-10 00:46:28 +08:00
|
|
|
pub unsafe fn new(_stack: usize, _p: Box<dyn FnOnce()>) -> io::Result<Thread> {
|
2017-10-22 20:01:00 -07:00
|
|
|
unsupported()
|
|
|
|
|
}
|
|
|
|
|
|
2021-04-28 15:51:14 +02:00
|
|
|
pub fn yield_now() {}
|
2017-10-22 20:01:00 -07:00
|
|
|
|
2021-04-28 15:51:14 +02:00
|
|
|
pub fn set_name(_name: &CStr) {}
|
2024-03-03 00:25:51 -08:00
|
|
|
pub fn get_name() -> Option<CString> {
|
|
|
|
|
None
|
|
|
|
|
}
|
2017-10-22 20:01:00 -07:00
|
|
|
|
2018-08-15 10:51:24 -07:00
|
|
|
pub fn sleep(dur: Duration) {
|
2019-02-11 04:23:21 +09:00
|
|
|
use crate::arch::wasm32;
|
|
|
|
|
use crate::cmp;
|
2018-08-15 10:51:24 -07:00
|
|
|
|
|
|
|
|
// Use an atomic wait to block the current thread artificially with a
|
|
|
|
|
// timeout listed. Note that we should never be notified (return value
|
|
|
|
|
// of 0) or our comparison should never fail (return value of 1) so we
|
|
|
|
|
// should always only resume execution through a timeout (return value
|
|
|
|
|
// 2).
|
|
|
|
|
let mut nanos = dur.as_nanos();
|
|
|
|
|
while nanos > 0 {
|
2020-06-02 07:59:11 +00:00
|
|
|
let amt = cmp::min(i64::MAX as u128, nanos);
|
2018-08-15 10:51:24 -07:00
|
|
|
let mut x = 0;
|
2020-07-18 08:43:09 -07:00
|
|
|
let val = unsafe { wasm32::memory_atomic_wait32(&mut x, 0, amt as i64) };
|
2018-08-15 10:51:24 -07:00
|
|
|
debug_assert_eq!(val, 2);
|
|
|
|
|
nanos -= amt;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2021-04-28 15:51:14 +02:00
|
|
|
pub fn join(self) {}
|
2017-10-22 20:01:00 -07:00
|
|
|
}
|
|
|
|
|
|
2024-01-29 23:59:09 +01:00
|
|
|
pub fn available_parallelism() -> io::Result<NonZero<usize>> {
|
2021-06-21 11:31:07 +02:00
|
|
|
unsupported()
|
2021-04-29 11:40:14 +02:00
|
|
|
}
|
|
|
|
|
|
2017-10-22 20:01:00 -07:00
|
|
|
pub mod guard {
|
Use a range to identify SIGSEGV in stack guards
Previously, the `guard::init()` and `guard::current()` functions were
returning a `usize` address representing the top of the stack guard,
respectively for the main thread and for spawned threads. The `SIGSEGV`
handler on `unix` targets checked if a fault was within one page below
that address, if so reporting it as a stack overflow.
Now `unix` targets report a `Range<usize>` representing the guard
memory, so it can cover arbitrary guard sizes. Non-`unix` targets which
always return `None` for guards now do so with `Option<!>`, so they
don't pay any overhead.
For `linux-gnu` in particular, the previous guard upper-bound was
`stackaddr + guardsize`, as the protected memory was *inside* the stack.
This was a glibc bug, and starting from 2.27 they are moving the guard
*past* the end of the stack. However, there's no simple way for us to
know where the guard page actually lies, so now we declare it as the
whole range of `stackaddr ± guardsize`, and any fault therein will be
called a stack overflow. This fixes #47863.
2018-01-31 11:41:29 -08:00
|
|
|
pub type Guard = !;
|
|
|
|
|
pub unsafe fn current() -> Option<Guard> {
|
|
|
|
|
None
|
|
|
|
|
}
|
|
|
|
|
pub unsafe fn init() -> Option<Guard> {
|
|
|
|
|
None
|
|
|
|
|
}
|
2017-10-22 20:01:00 -07:00
|
|
|
}
|