Add more from the std
This commit is contained in:
167
crates/std/src/sync/barrier.rs
Normal file
167
crates/std/src/sync/barrier.rs
Normal file
@@ -0,0 +1,167 @@
|
||||
use crate::fmt;
|
||||
use core::panic::RefUnwindSafe;
|
||||
use crate::sync::nonpoison::{Condvar, Mutex};
|
||||
|
||||
/// A barrier enables multiple threads to synchronize the beginning
|
||||
/// of some computation.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use std::sync::Barrier;
|
||||
/// use std::thread;
|
||||
///
|
||||
/// let n = 10;
|
||||
/// let barrier = Barrier::new(n);
|
||||
/// thread::scope(|s| {
|
||||
/// for _ in 0..n {
|
||||
/// // The same messages will be printed together.
|
||||
/// // You will NOT see any interleaving.
|
||||
/// s.spawn(|| {
|
||||
/// println!("before wait");
|
||||
/// barrier.wait();
|
||||
/// println!("after wait");
|
||||
/// });
|
||||
/// }
|
||||
/// });
|
||||
/// ```
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
pub struct Barrier {
|
||||
lock: Mutex<BarrierState>,
|
||||
cvar: Condvar,
|
||||
num_threads: usize,
|
||||
}
|
||||
|
||||
#[stable(feature = "unwind_safe_lock_refs", since = "1.12.0")]
|
||||
impl RefUnwindSafe for Barrier {}
|
||||
|
||||
// The inner state of a double barrier
|
||||
struct BarrierState {
|
||||
count: usize,
|
||||
generation_id: usize,
|
||||
}
|
||||
|
||||
/// A `BarrierWaitResult` is returned by [`Barrier::wait()`] when all threads
|
||||
/// in the [`Barrier`] have rendezvoused.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use std::sync::Barrier;
|
||||
///
|
||||
/// let barrier = Barrier::new(1);
|
||||
/// let barrier_wait_result = barrier.wait();
|
||||
/// ```
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
pub struct BarrierWaitResult(bool);
|
||||
|
||||
#[stable(feature = "std_debug", since = "1.16.0")]
|
||||
impl fmt::Debug for Barrier {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.debug_struct("Barrier").finish_non_exhaustive()
|
||||
}
|
||||
}
|
||||
|
||||
impl Barrier {
|
||||
/// Creates a new barrier that can block a given number of threads.
|
||||
///
|
||||
/// A barrier will block all threads which call [`wait()`] until the `n`th thread calls [`wait()`],
|
||||
/// and then wake up all threads at once.
|
||||
///
|
||||
/// [`wait()`]: Barrier::wait
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use std::sync::Barrier;
|
||||
///
|
||||
/// let barrier = Barrier::new(10);
|
||||
/// ```
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
#[rustc_const_stable(feature = "const_barrier", since = "1.78.0")]
|
||||
#[must_use]
|
||||
#[inline]
|
||||
pub const fn new(n: usize) -> Barrier {
|
||||
Barrier {
|
||||
lock: Mutex::new(BarrierState { count: 0, generation_id: 0 }),
|
||||
cvar: Condvar::new(),
|
||||
num_threads: n,
|
||||
}
|
||||
}
|
||||
|
||||
/// Blocks the current thread until all threads have rendezvoused here.
|
||||
///
|
||||
/// Barriers are re-usable after all threads have rendezvoused once, and can
|
||||
/// be used continuously.
|
||||
///
|
||||
/// A single (arbitrary) thread will receive a [`BarrierWaitResult`] that
|
||||
/// returns `true` from [`BarrierWaitResult::is_leader()`] when returning
|
||||
/// from this function, and all other threads will receive a result that
|
||||
/// will return `false` from [`BarrierWaitResult::is_leader()`].
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use std::sync::Barrier;
|
||||
/// use std::thread;
|
||||
///
|
||||
/// let n = 10;
|
||||
/// let barrier = Barrier::new(n);
|
||||
/// thread::scope(|s| {
|
||||
/// for _ in 0..n {
|
||||
/// // The same messages will be printed together.
|
||||
/// // You will NOT see any interleaving.
|
||||
/// s.spawn(|| {
|
||||
/// println!("before wait");
|
||||
/// barrier.wait();
|
||||
/// println!("after wait");
|
||||
/// });
|
||||
/// }
|
||||
/// });
|
||||
/// ```
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
pub fn wait(&self) -> BarrierWaitResult {
|
||||
let mut lock = self.lock.lock();
|
||||
let local_gen = lock.generation_id;
|
||||
lock.count += 1;
|
||||
if lock.count < self.num_threads {
|
||||
self.cvar.wait_while(&mut lock, |state| local_gen == state.generation_id);
|
||||
BarrierWaitResult(false)
|
||||
} else {
|
||||
lock.count = 0;
|
||||
lock.generation_id = lock.generation_id.wrapping_add(1);
|
||||
self.cvar.notify_all();
|
||||
BarrierWaitResult(true)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[stable(feature = "std_debug", since = "1.16.0")]
|
||||
impl fmt::Debug for BarrierWaitResult {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.debug_struct("BarrierWaitResult").field("is_leader", &self.is_leader()).finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl BarrierWaitResult {
|
||||
/// Returns `true` if this thread is the "leader thread" for the call to
|
||||
/// [`Barrier::wait()`].
|
||||
///
|
||||
/// Only one thread will have `true` returned from their result, all other
|
||||
/// threads will have `false` returned.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use std::sync::Barrier;
|
||||
///
|
||||
/// let barrier = Barrier::new(1);
|
||||
/// let barrier_wait_result = barrier.wait();
|
||||
/// println!("{:?}", barrier_wait_result.is_leader());
|
||||
/// ```
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
#[must_use]
|
||||
pub fn is_leader(&self) -> bool {
|
||||
self.0
|
||||
}
|
||||
}
|
||||
422
crates/std/src/sync/lazy_lock.rs
Normal file
422
crates/std/src/sync/lazy_lock.rs
Normal file
@@ -0,0 +1,422 @@
|
||||
use super::once::OnceExclusiveState;
|
||||
use crate::cell::UnsafeCell;
|
||||
use crate::mem::ManuallyDrop;
|
||||
use crate::ops::{Deref, DerefMut};
|
||||
use core::panic::{RefUnwindSafe, UnwindSafe};
|
||||
use crate::sync::Once;
|
||||
use crate::{fmt, ptr};
|
||||
|
||||
// We use the state of a Once as discriminant value. Upon creation, the state is
|
||||
// "incomplete" and `f` contains the initialization closure. In the first call to
|
||||
// `call_once`, `f` is taken and run. If it succeeds, `value` is set and the state
|
||||
// is changed to "complete". If it panics, the Once is poisoned, so none of the
|
||||
// two fields is initialized.
|
||||
union Data<T, F> {
|
||||
value: ManuallyDrop<T>,
|
||||
f: ManuallyDrop<F>,
|
||||
}
|
||||
|
||||
/// A value which is initialized on the first access.
|
||||
///
|
||||
/// This type is a thread-safe [`LazyCell`], and can be used in statics.
|
||||
/// Since initialization may be called from multiple threads, any
|
||||
/// dereferencing call will block the calling thread if another
|
||||
/// initialization routine is currently running.
|
||||
///
|
||||
/// [`LazyCell`]: crate::cell::LazyCell
|
||||
///
|
||||
/// # Poisoning
|
||||
///
|
||||
/// If the initialization closure passed to [`LazyLock::new`] panics, the lock will be poisoned.
|
||||
/// Once the lock is poisoned, any threads that attempt to access this lock (via a dereference
|
||||
/// or via an explicit call to [`force()`]) will panic.
|
||||
///
|
||||
/// This concept is similar to that of poisoning in the [`std::sync::poison`] module. A key
|
||||
/// difference, however, is that poisoning in `LazyLock` is _unrecoverable_. All future accesses of
|
||||
/// the lock from other threads will panic, whereas a type in [`std::sync::poison`] like
|
||||
/// [`std::sync::poison::Mutex`] allows recovery via [`PoisonError::into_inner()`].
|
||||
///
|
||||
/// [`force()`]: LazyLock::force
|
||||
/// [`std::sync::poison`]: crate::sync::poison
|
||||
/// [`std::sync::poison::Mutex`]: crate::sync::poison::Mutex
|
||||
/// [`PoisonError::into_inner()`]: crate::sync::poison::PoisonError::into_inner
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// Initialize static variables with `LazyLock`.
|
||||
/// ```
|
||||
/// use std::sync::LazyLock;
|
||||
///
|
||||
/// // Note: static items do not call [`Drop`] on program termination, so this won't be deallocated.
|
||||
/// // this is fine, as the OS can deallocate the terminated program faster than we can free memory
|
||||
/// // but tools like valgrind might report "memory leaks" as it isn't obvious this is intentional.
|
||||
/// static DEEP_THOUGHT: LazyLock<String> = LazyLock::new(|| {
|
||||
/// # mod another_crate {
|
||||
/// # pub fn great_question() -> String { "42".to_string() }
|
||||
/// # }
|
||||
/// // M3 Ultra takes about 16 million years in --release config
|
||||
/// another_crate::great_question()
|
||||
/// });
|
||||
///
|
||||
/// // The `String` is built, stored in the `LazyLock`, and returned as `&String`.
|
||||
/// let _ = &*DEEP_THOUGHT;
|
||||
/// ```
|
||||
///
|
||||
/// Initialize fields with `LazyLock`.
|
||||
/// ```
|
||||
/// use std::sync::LazyLock;
|
||||
///
|
||||
/// #[derive(Debug)]
|
||||
/// struct UseCellLock {
|
||||
/// number: LazyLock<u32>,
|
||||
/// }
|
||||
/// fn main() {
|
||||
/// let lock: LazyLock<u32> = LazyLock::new(|| 0u32);
|
||||
///
|
||||
/// let data = UseCellLock { number: lock };
|
||||
/// println!("{}", *data.number);
|
||||
/// }
|
||||
/// ```
|
||||
#[stable(feature = "lazy_cell", since = "1.80.0")]
|
||||
pub struct LazyLock<T, F = fn() -> T> {
|
||||
// FIXME(nonpoison_once): if possible, switch to nonpoison version once it is available
|
||||
once: Once,
|
||||
data: UnsafeCell<Data<T, F>>,
|
||||
}
|
||||
|
||||
impl<T, F: FnOnce() -> T> LazyLock<T, F> {
|
||||
/// Creates a new lazy value with the given initializing function.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use std::sync::LazyLock;
|
||||
///
|
||||
/// let hello = "Hello, World!".to_string();
|
||||
///
|
||||
/// let lazy = LazyLock::new(|| hello.to_uppercase());
|
||||
///
|
||||
/// assert_eq!(&*lazy, "HELLO, WORLD!");
|
||||
/// ```
|
||||
#[inline]
|
||||
#[stable(feature = "lazy_cell", since = "1.80.0")]
|
||||
#[rustc_const_stable(feature = "lazy_cell", since = "1.80.0")]
|
||||
pub const fn new(f: F) -> LazyLock<T, F> {
|
||||
LazyLock { once: Once::new(), data: UnsafeCell::new(Data { f: ManuallyDrop::new(f) }) }
|
||||
}
|
||||
|
||||
/// Creates a new lazy value that is already initialized.
|
||||
#[inline]
|
||||
#[cfg(test)]
|
||||
pub(crate) fn preinit(value: T) -> LazyLock<T, F> {
|
||||
let once = Once::new();
|
||||
once.call_once(|| {});
|
||||
LazyLock { once, data: UnsafeCell::new(Data { value: ManuallyDrop::new(value) }) }
|
||||
}
|
||||
|
||||
/// Consumes this `LazyLock` returning the stored value.
|
||||
///
|
||||
/// Returns `Ok(value)` if `Lazy` is initialized and `Err(f)` otherwise.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// Panics if the lock is poisoned.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// #![feature(lazy_cell_into_inner)]
|
||||
///
|
||||
/// use std::sync::LazyLock;
|
||||
///
|
||||
/// let hello = "Hello, World!".to_string();
|
||||
///
|
||||
/// let lazy = LazyLock::new(|| hello.to_uppercase());
|
||||
///
|
||||
/// assert_eq!(&*lazy, "HELLO, WORLD!");
|
||||
/// assert_eq!(LazyLock::into_inner(lazy).ok(), Some("HELLO, WORLD!".to_string()));
|
||||
/// ```
|
||||
#[unstable(feature = "lazy_cell_into_inner", issue = "125623")]
|
||||
pub fn into_inner(mut this: Self) -> Result<T, F> {
|
||||
let state = this.once.state();
|
||||
match state {
|
||||
OnceExclusiveState::Poisoned => panic_poisoned(),
|
||||
state => {
|
||||
let this = ManuallyDrop::new(this);
|
||||
let data = unsafe { ptr::read(&this.data) }.into_inner();
|
||||
match state {
|
||||
OnceExclusiveState::Incomplete => {
|
||||
Err(ManuallyDrop::into_inner(unsafe { data.f }))
|
||||
}
|
||||
OnceExclusiveState::Complete => {
|
||||
Ok(ManuallyDrop::into_inner(unsafe { data.value }))
|
||||
}
|
||||
OnceExclusiveState::Poisoned => unreachable!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Forces the evaluation of this lazy value and returns a mutable reference to
|
||||
/// the result.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// If the initialization closure panics (the one that is passed to the [`new()`] method), the
|
||||
/// panic is propagated to the caller, and the lock becomes poisoned. This will cause all future
|
||||
/// accesses of the lock (via [`force()`] or a dereference) to panic.
|
||||
///
|
||||
/// [`new()`]: LazyLock::new
|
||||
/// [`force()`]: LazyLock::force
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use std::sync::LazyLock;
|
||||
///
|
||||
/// let mut lazy = LazyLock::new(|| 92);
|
||||
///
|
||||
/// let p = LazyLock::force_mut(&mut lazy);
|
||||
/// assert_eq!(*p, 92);
|
||||
/// *p = 44;
|
||||
/// assert_eq!(*lazy, 44);
|
||||
/// ```
|
||||
#[inline]
|
||||
#[stable(feature = "lazy_get", since = "1.94.0")]
|
||||
pub fn force_mut(this: &mut LazyLock<T, F>) -> &mut T {
|
||||
#[cold]
|
||||
/// # Safety
|
||||
/// May only be called when the state is `Incomplete`.
|
||||
unsafe fn really_init_mut<T, F: FnOnce() -> T>(this: &mut LazyLock<T, F>) -> &mut T {
|
||||
struct PoisonOnPanic<'a, T, F>(&'a mut LazyLock<T, F>);
|
||||
impl<T, F> Drop for PoisonOnPanic<'_, T, F> {
|
||||
#[inline]
|
||||
fn drop(&mut self) {
|
||||
self.0.once.set_state(OnceExclusiveState::Poisoned);
|
||||
}
|
||||
}
|
||||
|
||||
// SAFETY: We always poison if the initializer panics (then we never check the data),
|
||||
// or set the data on success.
|
||||
let f = unsafe { ManuallyDrop::take(&mut this.data.get_mut().f) };
|
||||
// INVARIANT: Initiated from mutable reference, don't drop because we read it.
|
||||
let guard = PoisonOnPanic(this);
|
||||
let data = f();
|
||||
guard.0.data.get_mut().value = ManuallyDrop::new(data);
|
||||
guard.0.once.set_state(OnceExclusiveState::Complete);
|
||||
core::mem::forget(guard);
|
||||
// SAFETY: We put the value there above.
|
||||
unsafe { &mut this.data.get_mut().value }
|
||||
}
|
||||
|
||||
let state = this.once.state();
|
||||
match state {
|
||||
OnceExclusiveState::Poisoned => panic_poisoned(),
|
||||
// SAFETY: The `Once` states we completed the initialization.
|
||||
OnceExclusiveState::Complete => unsafe { &mut this.data.get_mut().value },
|
||||
// SAFETY: The state is `Incomplete`.
|
||||
OnceExclusiveState::Incomplete => unsafe { really_init_mut(this) },
|
||||
}
|
||||
}
|
||||
|
||||
/// Forces the evaluation of this lazy value and returns a reference to
|
||||
/// result. This is equivalent to the `Deref` impl, but is explicit.
|
||||
///
|
||||
/// This method will block the calling thread if another initialization
|
||||
/// routine is currently running.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// If the initialization closure panics (the one that is passed to the [`new()`] method), the
|
||||
/// panic is propagated to the caller, and the lock becomes poisoned. This will cause all future
|
||||
/// accesses of the lock (via [`force()`] or a dereference) to panic.
|
||||
///
|
||||
/// [`new()`]: LazyLock::new
|
||||
/// [`force()`]: LazyLock::force
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use std::sync::LazyLock;
|
||||
///
|
||||
/// let lazy = LazyLock::new(|| 92);
|
||||
///
|
||||
/// assert_eq!(LazyLock::force(&lazy), &92);
|
||||
/// assert_eq!(&*lazy, &92);
|
||||
/// ```
|
||||
#[inline]
|
||||
#[stable(feature = "lazy_cell", since = "1.80.0")]
|
||||
#[rustc_should_not_be_called_on_const_items]
|
||||
pub fn force(this: &LazyLock<T, F>) -> &T {
|
||||
this.once.call_once_force(|state| {
|
||||
if state.is_poisoned() {
|
||||
panic_poisoned();
|
||||
}
|
||||
|
||||
// SAFETY: `call_once` only runs this closure once, ever.
|
||||
let data = unsafe { &mut *this.data.get() };
|
||||
let f = unsafe { ManuallyDrop::take(&mut data.f) };
|
||||
let value = f();
|
||||
data.value = ManuallyDrop::new(value);
|
||||
});
|
||||
|
||||
// SAFETY:
|
||||
// There are four possible scenarios:
|
||||
// * the closure was called and initialized `value`.
|
||||
// * the closure was called and panicked, so this point is never reached.
|
||||
// * the closure was not called, but a previous call initialized `value`.
|
||||
// * the closure was not called because the Once is poisoned, which we handled above.
|
||||
// So `value` has definitely been initialized and will not be modified again.
|
||||
unsafe { &*(*this.data.get()).value }
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, F> LazyLock<T, F> {
|
||||
/// Returns a mutable reference to the value if initialized. Otherwise (if uninitialized or
|
||||
/// poisoned), returns `None`.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use std::sync::LazyLock;
|
||||
///
|
||||
/// let mut lazy = LazyLock::new(|| 92);
|
||||
///
|
||||
/// assert_eq!(LazyLock::get_mut(&mut lazy), None);
|
||||
/// let _ = LazyLock::force(&lazy);
|
||||
/// *LazyLock::get_mut(&mut lazy).unwrap() = 44;
|
||||
/// assert_eq!(*lazy, 44);
|
||||
/// ```
|
||||
#[inline]
|
||||
#[stable(feature = "lazy_get", since = "1.94.0")]
|
||||
pub fn get_mut(this: &mut LazyLock<T, F>) -> Option<&mut T> {
|
||||
// `state()` does not perform an atomic load, so prefer it over `is_complete()`.
|
||||
let state = this.once.state();
|
||||
match state {
|
||||
// SAFETY:
|
||||
// The closure has been run successfully, so `value` has been initialized.
|
||||
OnceExclusiveState::Complete => Some(unsafe { &mut this.data.get_mut().value }),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns a reference to the value if initialized. Otherwise (if uninitialized or poisoned),
|
||||
/// returns `None`.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use std::sync::LazyLock;
|
||||
///
|
||||
/// let lazy = LazyLock::new(|| 92);
|
||||
///
|
||||
/// assert_eq!(LazyLock::get(&lazy), None);
|
||||
/// let _ = LazyLock::force(&lazy);
|
||||
/// assert_eq!(LazyLock::get(&lazy), Some(&92));
|
||||
/// ```
|
||||
#[inline]
|
||||
#[stable(feature = "lazy_get", since = "1.94.0")]
|
||||
#[rustc_should_not_be_called_on_const_items]
|
||||
pub fn get(this: &LazyLock<T, F>) -> Option<&T> {
|
||||
if this.once.is_completed() {
|
||||
// SAFETY:
|
||||
// The closure has been run successfully, so `value` has been initialized
|
||||
// and will not be modified again.
|
||||
Some(unsafe { &(*this.data.get()).value })
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[stable(feature = "lazy_cell", since = "1.80.0")]
|
||||
impl<T, F> Drop for LazyLock<T, F> {
|
||||
fn drop(&mut self) {
|
||||
match self.once.state() {
|
||||
OnceExclusiveState::Incomplete => unsafe {
|
||||
ManuallyDrop::drop(&mut self.data.get_mut().f)
|
||||
},
|
||||
OnceExclusiveState::Complete => unsafe {
|
||||
ManuallyDrop::drop(&mut self.data.get_mut().value)
|
||||
},
|
||||
OnceExclusiveState::Poisoned => {}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[stable(feature = "lazy_cell", since = "1.80.0")]
|
||||
impl<T, F: FnOnce() -> T> Deref for LazyLock<T, F> {
|
||||
type Target = T;
|
||||
|
||||
/// Dereferences the value.
|
||||
///
|
||||
/// This method will block the calling thread if another initialization
|
||||
/// routine is currently running.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// If the initialization closure panics (the one that is passed to the [`new()`] method), the
|
||||
/// panic is propagated to the caller, and the lock becomes poisoned. This will cause all future
|
||||
/// accesses of the lock (via [`force()`] or a dereference) to panic.
|
||||
///
|
||||
/// [`new()`]: LazyLock::new
|
||||
/// [`force()`]: LazyLock::force
|
||||
#[inline]
|
||||
fn deref(&self) -> &T {
|
||||
LazyLock::force(self)
|
||||
}
|
||||
}
|
||||
|
||||
#[stable(feature = "lazy_deref_mut", since = "1.89.0")]
|
||||
impl<T, F: FnOnce() -> T> DerefMut for LazyLock<T, F> {
|
||||
/// # Panics
|
||||
///
|
||||
/// If the initialization closure panics (the one that is passed to the [`new()`] method), the
|
||||
/// panic is propagated to the caller, and the lock becomes poisoned. This will cause all future
|
||||
/// accesses of the lock (via [`force()`] or a dereference) to panic.
|
||||
///
|
||||
/// [`new()`]: LazyLock::new
|
||||
/// [`force()`]: LazyLock::force
|
||||
#[inline]
|
||||
fn deref_mut(&mut self) -> &mut T {
|
||||
LazyLock::force_mut(self)
|
||||
}
|
||||
}
|
||||
|
||||
#[stable(feature = "lazy_cell", since = "1.80.0")]
|
||||
impl<T: Default> Default for LazyLock<T> {
|
||||
/// Creates a new lazy value using `Default` as the initializing function.
|
||||
#[inline]
|
||||
fn default() -> LazyLock<T> {
|
||||
LazyLock::new(T::default)
|
||||
}
|
||||
}
|
||||
|
||||
#[stable(feature = "lazy_cell", since = "1.80.0")]
|
||||
impl<T: fmt::Debug, F> fmt::Debug for LazyLock<T, F> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
let mut d = f.debug_tuple("LazyLock");
|
||||
match LazyLock::get(self) {
|
||||
Some(v) => d.field(v),
|
||||
None => d.field(&format_args!("<uninit>")),
|
||||
};
|
||||
d.finish()
|
||||
}
|
||||
}
|
||||
|
||||
#[cold]
|
||||
#[inline(never)]
|
||||
fn panic_poisoned() -> ! {
|
||||
panic!("LazyLock instance has previously been poisoned")
|
||||
}
|
||||
|
||||
// We never create a `&F` from a `&LazyLock<T, F>` so it is fine
|
||||
// to not impl `Sync` for `F`.
|
||||
#[stable(feature = "lazy_cell", since = "1.80.0")]
|
||||
unsafe impl<T: Sync + Send, F: Send> Sync for LazyLock<T, F> {}
|
||||
// auto-derived `Send` impl is OK.
|
||||
|
||||
#[stable(feature = "lazy_cell", since = "1.80.0")]
|
||||
impl<T: RefUnwindSafe + UnwindSafe, F: UnwindSafe> RefUnwindSafe for LazyLock<T, F> {}
|
||||
#[stable(feature = "lazy_cell", since = "1.80.0")]
|
||||
impl<T: UnwindSafe, F: UnwindSafe> UnwindSafe for LazyLock<T, F> {}
|
||||
569
crates/std/src/sync/mpmc/array.rs
Normal file
569
crates/std/src/sync/mpmc/array.rs
Normal file
@@ -0,0 +1,569 @@
|
||||
//! Bounded channel based on a preallocated array.
|
||||
//!
|
||||
//! This flavor has a fixed, positive capacity.
|
||||
//!
|
||||
//! The implementation is based on Dmitry Vyukov's bounded MPMC queue.
|
||||
//!
|
||||
//! Source:
|
||||
//! - <http://www.1024cores.net/home/lock-free-algorithms/queues/bounded-mpmc-queue>
|
||||
//! - <https://docs.google.com/document/d/1yIAYmbvL3JxOKOjuCyon7JhW4cSv1wy5hC0ApeGMV9s/pub>
|
||||
|
||||
use super::context::Context;
|
||||
use super::error::*;
|
||||
use super::select::{Operation, Selected, Token};
|
||||
use super::utils::{Backoff, CachePadded};
|
||||
use super::waker::SyncWaker;
|
||||
use crate::cell::UnsafeCell;
|
||||
use crate::mem::MaybeUninit;
|
||||
use crate::ptr;
|
||||
use crate::sync::atomic::{self, Atomic, AtomicUsize, Ordering};
|
||||
use crate::time::Instant;
|
||||
|
||||
/// A slot in a channel.
|
||||
struct Slot<T> {
|
||||
/// The current stamp.
|
||||
stamp: Atomic<usize>,
|
||||
|
||||
/// The message in this slot. Either read out in `read` or dropped through
|
||||
/// `discard_all_messages`.
|
||||
msg: UnsafeCell<MaybeUninit<T>>,
|
||||
}
|
||||
|
||||
/// The token type for the array flavor.
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct ArrayToken {
|
||||
/// Slot to read from or write to.
|
||||
slot: *const u8,
|
||||
|
||||
/// Stamp to store into the slot after reading or writing.
|
||||
stamp: usize,
|
||||
}
|
||||
|
||||
impl Default for ArrayToken {
|
||||
#[inline]
|
||||
fn default() -> Self {
|
||||
ArrayToken { slot: ptr::null(), stamp: 0 }
|
||||
}
|
||||
}
|
||||
|
||||
/// Bounded channel based on a preallocated array.
|
||||
pub(crate) struct Channel<T> {
|
||||
/// The head of the channel.
|
||||
///
|
||||
/// This value is a "stamp" consisting of an index into the buffer, a mark bit, and a lap, but
|
||||
/// packed into a single `usize`. The lower bits represent the index, while the upper bits
|
||||
/// represent the lap. The mark bit in the head is always zero.
|
||||
///
|
||||
/// Messages are popped from the head of the channel.
|
||||
head: CachePadded<Atomic<usize>>,
|
||||
|
||||
/// The tail of the channel.
|
||||
///
|
||||
/// This value is a "stamp" consisting of an index into the buffer, a mark bit, and a lap, but
|
||||
/// packed into a single `usize`. The lower bits represent the index, while the upper bits
|
||||
/// represent the lap. The mark bit indicates that the channel is disconnected.
|
||||
///
|
||||
/// Messages are pushed into the tail of the channel.
|
||||
tail: CachePadded<Atomic<usize>>,
|
||||
|
||||
/// The buffer holding slots.
|
||||
buffer: Box<[Slot<T>]>,
|
||||
|
||||
/// The channel capacity.
|
||||
cap: usize,
|
||||
|
||||
/// A stamp with the value of `{ lap: 1, mark: 0, index: 0 }`.
|
||||
one_lap: usize,
|
||||
|
||||
/// If this bit is set in the tail, that means the channel is disconnected.
|
||||
mark_bit: usize,
|
||||
|
||||
/// Senders waiting while the channel is full.
|
||||
senders: SyncWaker,
|
||||
|
||||
/// Receivers waiting while the channel is empty and not disconnected.
|
||||
receivers: SyncWaker,
|
||||
}
|
||||
|
||||
impl<T> Channel<T> {
|
||||
/// Creates a bounded channel of capacity `cap`.
|
||||
pub(crate) fn with_capacity(cap: usize) -> Self {
|
||||
assert!(cap > 0, "capacity must be positive");
|
||||
|
||||
// Compute constants `mark_bit` and `one_lap`.
|
||||
let mark_bit = (cap + 1).next_power_of_two();
|
||||
let one_lap = mark_bit * 2;
|
||||
|
||||
// Head is initialized to `{ lap: 0, mark: 0, index: 0 }`.
|
||||
let head = 0;
|
||||
// Tail is initialized to `{ lap: 0, mark: 0, index: 0 }`.
|
||||
let tail = 0;
|
||||
|
||||
// Allocate a buffer of `cap` slots initialized
|
||||
// with stamps.
|
||||
let buffer: Box<[Slot<T>]> = (0..cap)
|
||||
.map(|i| {
|
||||
// Set the stamp to `{ lap: 0, mark: 0, index: i }`.
|
||||
Slot { stamp: AtomicUsize::new(i), msg: UnsafeCell::new(MaybeUninit::uninit()) }
|
||||
})
|
||||
.collect();
|
||||
|
||||
Channel {
|
||||
buffer,
|
||||
cap,
|
||||
one_lap,
|
||||
mark_bit,
|
||||
head: CachePadded::new(AtomicUsize::new(head)),
|
||||
tail: CachePadded::new(AtomicUsize::new(tail)),
|
||||
senders: SyncWaker::new(),
|
||||
receivers: SyncWaker::new(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Attempts to reserve a slot for sending a message.
|
||||
fn start_send(&self, token: &mut Token) -> bool {
|
||||
let backoff = Backoff::new();
|
||||
let mut tail = self.tail.load(Ordering::Relaxed);
|
||||
|
||||
loop {
|
||||
// Check if the channel is disconnected.
|
||||
if tail & self.mark_bit != 0 {
|
||||
token.array.slot = ptr::null();
|
||||
token.array.stamp = 0;
|
||||
return true;
|
||||
}
|
||||
|
||||
// Deconstruct the tail.
|
||||
let index = tail & (self.mark_bit - 1);
|
||||
let lap = tail & !(self.one_lap - 1);
|
||||
|
||||
// Inspect the corresponding slot.
|
||||
debug_assert!(index < self.buffer.len());
|
||||
let slot = unsafe { self.buffer.get_unchecked(index) };
|
||||
let stamp = slot.stamp.load(Ordering::Acquire);
|
||||
|
||||
// If the tail and the stamp match, we may attempt to push.
|
||||
if tail == stamp {
|
||||
let new_tail = if index + 1 < self.cap {
|
||||
// Same lap, incremented index.
|
||||
// Set to `{ lap: lap, mark: 0, index: index + 1 }`.
|
||||
tail + 1
|
||||
} else {
|
||||
// One lap forward, index wraps around to zero.
|
||||
// Set to `{ lap: lap.wrapping_add(1), mark: 0, index: 0 }`.
|
||||
lap.wrapping_add(self.one_lap)
|
||||
};
|
||||
|
||||
// Try moving the tail.
|
||||
match self.tail.compare_exchange_weak(
|
||||
tail,
|
||||
new_tail,
|
||||
Ordering::SeqCst,
|
||||
Ordering::Relaxed,
|
||||
) {
|
||||
Ok(_) => {
|
||||
// Prepare the token for the follow-up call to `write`.
|
||||
token.array.slot = slot as *const Slot<T> as *const u8;
|
||||
token.array.stamp = tail + 1;
|
||||
return true;
|
||||
}
|
||||
Err(_) => {
|
||||
backoff.spin_light();
|
||||
tail = self.tail.load(Ordering::Relaxed);
|
||||
}
|
||||
}
|
||||
} else if stamp.wrapping_add(self.one_lap) == tail + 1 {
|
||||
atomic::fence(Ordering::SeqCst);
|
||||
let head = self.head.load(Ordering::Relaxed);
|
||||
|
||||
// If the head lags one lap behind the tail as well...
|
||||
if head.wrapping_add(self.one_lap) == tail {
|
||||
// ...then the channel is full.
|
||||
return false;
|
||||
}
|
||||
|
||||
backoff.spin_light();
|
||||
tail = self.tail.load(Ordering::Relaxed);
|
||||
} else {
|
||||
// Snooze because we need to wait for the stamp to get updated.
|
||||
backoff.spin_heavy();
|
||||
tail = self.tail.load(Ordering::Relaxed);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Writes a message into the channel.
|
||||
pub(crate) unsafe fn write(&self, token: &mut Token, msg: T) -> Result<(), T> {
|
||||
// If there is no slot, the channel is disconnected.
|
||||
if token.array.slot.is_null() {
|
||||
return Err(msg);
|
||||
}
|
||||
|
||||
// Write the message into the slot and update the stamp.
|
||||
unsafe {
|
||||
let slot: &Slot<T> = &*(token.array.slot as *const Slot<T>);
|
||||
slot.msg.get().write(MaybeUninit::new(msg));
|
||||
slot.stamp.store(token.array.stamp, Ordering::Release);
|
||||
}
|
||||
|
||||
// Wake a sleeping receiver.
|
||||
self.receivers.notify();
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Attempts to reserve a slot for receiving a message.
|
||||
fn start_recv(&self, token: &mut Token) -> bool {
|
||||
let backoff = Backoff::new();
|
||||
let mut head = self.head.load(Ordering::Relaxed);
|
||||
|
||||
loop {
|
||||
// Deconstruct the head.
|
||||
let index = head & (self.mark_bit - 1);
|
||||
let lap = head & !(self.one_lap - 1);
|
||||
|
||||
// Inspect the corresponding slot.
|
||||
debug_assert!(index < self.buffer.len());
|
||||
let slot = unsafe { self.buffer.get_unchecked(index) };
|
||||
let stamp = slot.stamp.load(Ordering::Acquire);
|
||||
|
||||
// If the stamp is ahead of the head by 1, we may attempt to pop.
|
||||
if head + 1 == stamp {
|
||||
let new = if index + 1 < self.cap {
|
||||
// Same lap, incremented index.
|
||||
// Set to `{ lap: lap, mark: 0, index: index + 1 }`.
|
||||
head + 1
|
||||
} else {
|
||||
// One lap forward, index wraps around to zero.
|
||||
// Set to `{ lap: lap.wrapping_add(1), mark: 0, index: 0 }`.
|
||||
lap.wrapping_add(self.one_lap)
|
||||
};
|
||||
|
||||
// Try moving the head.
|
||||
match self.head.compare_exchange_weak(
|
||||
head,
|
||||
new,
|
||||
Ordering::SeqCst,
|
||||
Ordering::Relaxed,
|
||||
) {
|
||||
Ok(_) => {
|
||||
// Prepare the token for the follow-up call to `read`.
|
||||
token.array.slot = slot as *const Slot<T> as *const u8;
|
||||
token.array.stamp = head.wrapping_add(self.one_lap);
|
||||
return true;
|
||||
}
|
||||
Err(_) => {
|
||||
backoff.spin_light();
|
||||
head = self.head.load(Ordering::Relaxed);
|
||||
}
|
||||
}
|
||||
} else if stamp == head {
|
||||
atomic::fence(Ordering::SeqCst);
|
||||
let tail = self.tail.load(Ordering::Relaxed);
|
||||
|
||||
// If the tail equals the head, that means the channel is empty.
|
||||
if (tail & !self.mark_bit) == head {
|
||||
// If the channel is disconnected...
|
||||
if tail & self.mark_bit != 0 {
|
||||
// ...then receive an error.
|
||||
token.array.slot = ptr::null();
|
||||
token.array.stamp = 0;
|
||||
return true;
|
||||
} else {
|
||||
// Otherwise, the receive operation is not ready.
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
backoff.spin_light();
|
||||
head = self.head.load(Ordering::Relaxed);
|
||||
} else {
|
||||
// Snooze because we need to wait for the stamp to get updated.
|
||||
backoff.spin_heavy();
|
||||
head = self.head.load(Ordering::Relaxed);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Reads a message from the channel.
|
||||
pub(crate) unsafe fn read(&self, token: &mut Token) -> Result<T, ()> {
|
||||
if token.array.slot.is_null() {
|
||||
// The channel is disconnected.
|
||||
return Err(());
|
||||
}
|
||||
|
||||
// Read the message from the slot and update the stamp.
|
||||
let msg = unsafe {
|
||||
let slot: &Slot<T> = &*(token.array.slot as *const Slot<T>);
|
||||
|
||||
let msg = slot.msg.get().read().assume_init();
|
||||
slot.stamp.store(token.array.stamp, Ordering::Release);
|
||||
msg
|
||||
};
|
||||
|
||||
// Wake a sleeping sender.
|
||||
self.senders.notify();
|
||||
Ok(msg)
|
||||
}
|
||||
|
||||
/// Attempts to send a message into the channel.
|
||||
pub(crate) fn try_send(&self, msg: T) -> Result<(), TrySendError<T>> {
|
||||
let token = &mut Token::default();
|
||||
if self.start_send(token) {
|
||||
unsafe { self.write(token, msg).map_err(TrySendError::Disconnected) }
|
||||
} else {
|
||||
Err(TrySendError::Full(msg))
|
||||
}
|
||||
}
|
||||
|
||||
/// Sends a message into the channel.
|
||||
pub(crate) fn send(
|
||||
&self,
|
||||
msg: T,
|
||||
deadline: Option<Instant>,
|
||||
) -> Result<(), SendTimeoutError<T>> {
|
||||
let token = &mut Token::default();
|
||||
loop {
|
||||
// Try sending a message.
|
||||
if self.start_send(token) {
|
||||
let res = unsafe { self.write(token, msg) };
|
||||
return res.map_err(SendTimeoutError::Disconnected);
|
||||
}
|
||||
|
||||
if let Some(d) = deadline {
|
||||
if Instant::now() >= d {
|
||||
return Err(SendTimeoutError::Timeout(msg));
|
||||
}
|
||||
}
|
||||
|
||||
Context::with(|cx| {
|
||||
// Prepare for blocking until a receiver wakes us up.
|
||||
let oper = Operation::hook(token);
|
||||
self.senders.register(oper, cx);
|
||||
|
||||
// Has the channel become ready just now?
|
||||
if !self.is_full() || self.is_disconnected() {
|
||||
let _ = cx.try_select(Selected::Aborted);
|
||||
}
|
||||
|
||||
// Block the current thread.
|
||||
// SAFETY: the context belongs to the current thread.
|
||||
let sel = unsafe { cx.wait_until(deadline) };
|
||||
|
||||
match sel {
|
||||
Selected::Waiting => unreachable!(),
|
||||
Selected::Aborted | Selected::Disconnected => {
|
||||
self.senders.unregister(oper).unwrap();
|
||||
}
|
||||
Selected::Operation(_) => {}
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
/// Attempts to receive a message without blocking.
|
||||
pub(crate) fn try_recv(&self) -> Result<T, TryRecvError> {
|
||||
let token = &mut Token::default();
|
||||
|
||||
if self.start_recv(token) {
|
||||
unsafe { self.read(token).map_err(|_| TryRecvError::Disconnected) }
|
||||
} else {
|
||||
Err(TryRecvError::Empty)
|
||||
}
|
||||
}
|
||||
|
||||
/// Receives a message from the channel.
|
||||
pub(crate) fn recv(&self, deadline: Option<Instant>) -> Result<T, RecvTimeoutError> {
|
||||
let token = &mut Token::default();
|
||||
loop {
|
||||
// Try receiving a message.
|
||||
if self.start_recv(token) {
|
||||
let res = unsafe { self.read(token) };
|
||||
return res.map_err(|_| RecvTimeoutError::Disconnected);
|
||||
}
|
||||
|
||||
if let Some(d) = deadline {
|
||||
if Instant::now() >= d {
|
||||
return Err(RecvTimeoutError::Timeout);
|
||||
}
|
||||
}
|
||||
|
||||
Context::with(|cx| {
|
||||
// Prepare for blocking until a sender wakes us up.
|
||||
let oper = Operation::hook(token);
|
||||
self.receivers.register(oper, cx);
|
||||
|
||||
// Has the channel become ready just now?
|
||||
if !self.is_empty() || self.is_disconnected() {
|
||||
let _ = cx.try_select(Selected::Aborted);
|
||||
}
|
||||
|
||||
// Block the current thread.
|
||||
// SAFETY: the context belongs to the current thread.
|
||||
let sel = unsafe { cx.wait_until(deadline) };
|
||||
|
||||
match sel {
|
||||
Selected::Waiting => unreachable!(),
|
||||
Selected::Aborted | Selected::Disconnected => {
|
||||
self.receivers.unregister(oper).unwrap();
|
||||
// If the channel was disconnected, we still have to check for remaining
|
||||
// messages.
|
||||
}
|
||||
Selected::Operation(_) => {}
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the current number of messages inside the channel.
|
||||
pub(crate) fn len(&self) -> usize {
|
||||
loop {
|
||||
// Load the tail, then load the head.
|
||||
let tail = self.tail.load(Ordering::SeqCst);
|
||||
let head = self.head.load(Ordering::SeqCst);
|
||||
|
||||
// If the tail didn't change, we've got consistent values to work with.
|
||||
if self.tail.load(Ordering::SeqCst) == tail {
|
||||
let hix = head & (self.mark_bit - 1);
|
||||
let tix = tail & (self.mark_bit - 1);
|
||||
|
||||
return if hix < tix {
|
||||
tix - hix
|
||||
} else if hix > tix {
|
||||
self.cap - hix + tix
|
||||
} else if (tail & !self.mark_bit) == head {
|
||||
0
|
||||
} else {
|
||||
self.cap
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the capacity of the channel.
|
||||
#[allow(clippy::unnecessary_wraps)] // This is intentional.
|
||||
pub(crate) fn capacity(&self) -> Option<usize> {
|
||||
Some(self.cap)
|
||||
}
|
||||
|
||||
/// Disconnects senders and wakes up all blocked receivers.
|
||||
///
|
||||
/// Returns `true` if this call disconnected the channel.
|
||||
pub(crate) fn disconnect_senders(&self) -> bool {
|
||||
let tail = self.tail.fetch_or(self.mark_bit, Ordering::SeqCst);
|
||||
|
||||
if tail & self.mark_bit == 0 {
|
||||
self.receivers.disconnect();
|
||||
true
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
/// Disconnects receivers and wakes up all blocked senders.
|
||||
///
|
||||
/// Returns `true` if this call disconnected the channel.
|
||||
///
|
||||
/// # Safety
|
||||
/// May only be called once upon dropping the last receiver. The
|
||||
/// destruction of all other receivers must have been observed with acquire
|
||||
/// ordering or stronger.
|
||||
pub(crate) unsafe fn disconnect_receivers(&self) -> bool {
|
||||
let tail = self.tail.fetch_or(self.mark_bit, Ordering::SeqCst);
|
||||
let disconnected = if tail & self.mark_bit == 0 {
|
||||
self.senders.disconnect();
|
||||
true
|
||||
} else {
|
||||
false
|
||||
};
|
||||
|
||||
unsafe { self.discard_all_messages(tail) };
|
||||
disconnected
|
||||
}
|
||||
|
||||
/// Discards all messages.
|
||||
///
|
||||
/// `tail` should be the current (and therefore last) value of `tail`.
|
||||
///
|
||||
/// # Panicking
|
||||
/// If a destructor panics, the remaining messages are leaked, matching the
|
||||
/// behavior of the unbounded channel.
|
||||
///
|
||||
/// # Safety
|
||||
/// This method must only be called when dropping the last receiver. The
|
||||
/// destruction of all other receivers must have been observed with acquire
|
||||
/// ordering or stronger.
|
||||
unsafe fn discard_all_messages(&self, tail: usize) {
|
||||
debug_assert!(self.is_disconnected());
|
||||
|
||||
// Only receivers modify `head`, so since we are the last one,
|
||||
// this value will not change and will not be observed (since
|
||||
// no new messages can be sent after disconnection).
|
||||
let mut head = self.head.load(Ordering::Relaxed);
|
||||
let tail = tail & !self.mark_bit;
|
||||
|
||||
let backoff = Backoff::new();
|
||||
loop {
|
||||
// Deconstruct the head.
|
||||
let index = head & (self.mark_bit - 1);
|
||||
let lap = head & !(self.one_lap - 1);
|
||||
|
||||
// Inspect the corresponding slot.
|
||||
debug_assert!(index < self.buffer.len());
|
||||
let slot = unsafe { self.buffer.get_unchecked(index) };
|
||||
let stamp = slot.stamp.load(Ordering::Acquire);
|
||||
|
||||
// If the stamp is ahead of the head by 1, we may drop the message.
|
||||
if head + 1 == stamp {
|
||||
head = if index + 1 < self.cap {
|
||||
// Same lap, incremented index.
|
||||
// Set to `{ lap: lap, mark: 0, index: index + 1 }`.
|
||||
head + 1
|
||||
} else {
|
||||
// One lap forward, index wraps around to zero.
|
||||
// Set to `{ lap: lap.wrapping_add(1), mark: 0, index: 0 }`.
|
||||
lap.wrapping_add(self.one_lap)
|
||||
};
|
||||
|
||||
unsafe {
|
||||
(*slot.msg.get()).assume_init_drop();
|
||||
}
|
||||
// If the tail equals the head, that means the channel is empty.
|
||||
} else if tail == head {
|
||||
return;
|
||||
// Otherwise, a sender is about to write into the slot, so we need
|
||||
// to wait for it to update the stamp.
|
||||
} else {
|
||||
backoff.spin_heavy();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns `true` if the channel is disconnected.
|
||||
pub(crate) fn is_disconnected(&self) -> bool {
|
||||
self.tail.load(Ordering::SeqCst) & self.mark_bit != 0
|
||||
}
|
||||
|
||||
/// Returns `true` if the channel is empty.
|
||||
pub(crate) fn is_empty(&self) -> bool {
|
||||
let head = self.head.load(Ordering::SeqCst);
|
||||
let tail = self.tail.load(Ordering::SeqCst);
|
||||
|
||||
// Is the tail equal to the head?
|
||||
//
|
||||
// Note: If the head changes just before we load the tail, that means there was a moment
|
||||
// when the channel was not empty, so it is safe to just return `false`.
|
||||
(tail & !self.mark_bit) == head
|
||||
}
|
||||
|
||||
/// Returns `true` if the channel is full.
|
||||
pub(crate) fn is_full(&self) -> bool {
|
||||
let tail = self.tail.load(Ordering::SeqCst);
|
||||
let head = self.head.load(Ordering::SeqCst);
|
||||
|
||||
// Is the head lagging one lap behind tail?
|
||||
//
|
||||
// Note: If the tail changes just before we load the head, that means there was a moment
|
||||
// when the channel was not full, so it is safe to just return `false`.
|
||||
head.wrapping_add(self.one_lap) == tail & !self.mark_bit
|
||||
}
|
||||
}
|
||||
159
crates/std/src/sync/mpmc/context.rs
Normal file
159
crates/std/src/sync/mpmc/context.rs
Normal file
@@ -0,0 +1,159 @@
|
||||
//! Thread-local channel context.
|
||||
|
||||
use super::select::Selected;
|
||||
use super::waker::current_thread_id;
|
||||
use crate::cell::Cell;
|
||||
use crate::ptr;
|
||||
use alloc_crate::sync::Arc;
|
||||
use crate::sync::atomic::{Atomic, AtomicPtr, AtomicUsize, Ordering};
|
||||
use crate::thread::{self, Thread};
|
||||
use crate::time::Instant;
|
||||
|
||||
/// Thread-local context.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct Context {
|
||||
inner: Arc<Inner>,
|
||||
}
|
||||
|
||||
/// Inner representation of `Context`.
|
||||
#[derive(Debug)]
|
||||
struct Inner {
|
||||
/// Selected operation.
|
||||
select: Atomic<usize>,
|
||||
|
||||
/// A slot into which another thread may store a pointer to its `Packet`.
|
||||
packet: Atomic<*mut ()>,
|
||||
|
||||
/// Thread handle.
|
||||
thread: Thread,
|
||||
|
||||
/// Thread id.
|
||||
thread_id: usize,
|
||||
}
|
||||
|
||||
impl Context {
|
||||
/// Creates a new context for the duration of the closure.
|
||||
#[inline]
|
||||
pub fn with<F, R>(f: F) -> R
|
||||
where
|
||||
F: FnOnce(&Context) -> R,
|
||||
{
|
||||
thread_local! {
|
||||
/// Cached thread-local context.
|
||||
static CONTEXT: Cell<Option<Context>> = Cell::new(Some(Context::new()));
|
||||
}
|
||||
|
||||
let mut f = Some(f);
|
||||
let mut f = |cx: &Context| -> R {
|
||||
let f = f.take().unwrap();
|
||||
f(cx)
|
||||
};
|
||||
|
||||
CONTEXT
|
||||
.try_with(|cell| match cell.take() {
|
||||
None => f(&Context::new()),
|
||||
Some(cx) => {
|
||||
cx.reset();
|
||||
let res = f(&cx);
|
||||
cell.set(Some(cx));
|
||||
res
|
||||
}
|
||||
})
|
||||
.unwrap_or_else(|_| f(&Context::new()))
|
||||
}
|
||||
|
||||
/// Creates a new `Context`.
|
||||
#[cold]
|
||||
fn new() -> Context {
|
||||
Context {
|
||||
inner: Arc::new(Inner {
|
||||
select: AtomicUsize::new(Selected::Waiting.into()),
|
||||
packet: AtomicPtr::new(ptr::null_mut()),
|
||||
thread: thread::current_or_unnamed(),
|
||||
thread_id: current_thread_id(),
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
/// Resets `select` and `packet`.
|
||||
#[inline]
|
||||
fn reset(&self) {
|
||||
self.inner.select.store(Selected::Waiting.into(), Ordering::Release);
|
||||
self.inner.packet.store(ptr::null_mut(), Ordering::Release);
|
||||
}
|
||||
|
||||
/// Attempts to select an operation.
|
||||
///
|
||||
/// On failure, the previously selected operation is returned.
|
||||
#[inline]
|
||||
pub fn try_select(&self, select: Selected) -> Result<(), Selected> {
|
||||
self.inner
|
||||
.select
|
||||
.compare_exchange(
|
||||
Selected::Waiting.into(),
|
||||
select.into(),
|
||||
Ordering::AcqRel,
|
||||
Ordering::Acquire,
|
||||
)
|
||||
.map(|_| ())
|
||||
.map_err(|e| e.into())
|
||||
}
|
||||
|
||||
/// Stores a packet.
|
||||
///
|
||||
/// This method must be called after `try_select` succeeds and there is a packet to provide.
|
||||
#[inline]
|
||||
pub fn store_packet(&self, packet: *mut ()) {
|
||||
if !packet.is_null() {
|
||||
self.inner.packet.store(packet, Ordering::Release);
|
||||
}
|
||||
}
|
||||
|
||||
/// Waits until an operation is selected and returns it.
|
||||
///
|
||||
/// If the deadline is reached, `Selected::Aborted` will be selected.
|
||||
///
|
||||
/// # Safety
|
||||
/// This may only be called from the thread this `Context` belongs to.
|
||||
#[inline]
|
||||
pub unsafe fn wait_until(&self, deadline: Option<Instant>) -> Selected {
|
||||
loop {
|
||||
// Check whether an operation has been selected.
|
||||
let sel = Selected::from(self.inner.select.load(Ordering::Acquire));
|
||||
if sel != Selected::Waiting {
|
||||
return sel;
|
||||
}
|
||||
|
||||
// If there's a deadline, park the current thread until the deadline is reached.
|
||||
if let Some(end) = deadline {
|
||||
let now = Instant::now();
|
||||
|
||||
if now < end {
|
||||
// SAFETY: guaranteed by caller.
|
||||
unsafe { self.inner.thread.park_timeout(end - now) };
|
||||
} else {
|
||||
// The deadline has been reached. Try aborting select.
|
||||
return match self.try_select(Selected::Aborted) {
|
||||
Ok(()) => Selected::Aborted,
|
||||
Err(s) => s,
|
||||
};
|
||||
}
|
||||
} else {
|
||||
// SAFETY: guaranteed by caller.
|
||||
unsafe { self.inner.thread.park() };
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Unparks the thread this context belongs to.
|
||||
#[inline]
|
||||
pub fn unpark(&self) {
|
||||
self.inner.thread.unpark();
|
||||
}
|
||||
|
||||
/// Returns the id of the thread this context belongs to.
|
||||
#[inline]
|
||||
pub fn thread_id(&self) -> usize {
|
||||
self.inner.thread_id
|
||||
}
|
||||
}
|
||||
136
crates/std/src/sync/mpmc/counter.rs
Normal file
136
crates/std/src/sync/mpmc/counter.rs
Normal file
@@ -0,0 +1,136 @@
|
||||
use crate::sync::atomic::{Atomic, AtomicBool, AtomicUsize, Ordering};
|
||||
use crate::{ops, process};
|
||||
|
||||
/// Reference counter internals.
|
||||
struct Counter<C> {
|
||||
/// The number of senders associated with the channel.
|
||||
senders: Atomic<usize>,
|
||||
|
||||
/// The number of receivers associated with the channel.
|
||||
receivers: Atomic<usize>,
|
||||
|
||||
/// Set to `true` if the last sender or the last receiver reference deallocates the channel.
|
||||
destroy: Atomic<bool>,
|
||||
|
||||
/// The internal channel.
|
||||
chan: C,
|
||||
}
|
||||
|
||||
/// Wraps a channel into the reference counter.
|
||||
pub(crate) fn new<C>(chan: C) -> (Sender<C>, Receiver<C>) {
|
||||
let counter = Box::into_raw(Box::new(Counter {
|
||||
senders: AtomicUsize::new(1),
|
||||
receivers: AtomicUsize::new(1),
|
||||
destroy: AtomicBool::new(false),
|
||||
chan,
|
||||
}));
|
||||
let s = Sender { counter };
|
||||
let r = Receiver { counter };
|
||||
(s, r)
|
||||
}
|
||||
|
||||
/// The sending side.
|
||||
pub(crate) struct Sender<C> {
|
||||
counter: *mut Counter<C>,
|
||||
}
|
||||
|
||||
impl<C> Sender<C> {
|
||||
/// Returns the internal `Counter`.
|
||||
fn counter(&self) -> &Counter<C> {
|
||||
unsafe { &*self.counter }
|
||||
}
|
||||
|
||||
/// Acquires another sender reference.
|
||||
pub(crate) fn acquire(&self) -> Sender<C> {
|
||||
let count = self.counter().senders.fetch_add(1, Ordering::Relaxed);
|
||||
|
||||
// Cloning senders and calling `mem::forget` on the clones could potentially overflow the
|
||||
// counter. It's very difficult to recover sensibly from such degenerate scenarios so we
|
||||
// just abort when the count becomes very large.
|
||||
if count > isize::MAX as usize {
|
||||
process::abort();
|
||||
}
|
||||
|
||||
Sender { counter: self.counter }
|
||||
}
|
||||
|
||||
/// Releases the sender reference.
|
||||
///
|
||||
/// Function `disconnect` will be called if this is the last sender reference.
|
||||
pub(crate) unsafe fn release<F: FnOnce(&C) -> bool>(&self, disconnect: F) {
|
||||
if self.counter().senders.fetch_sub(1, Ordering::AcqRel) == 1 {
|
||||
disconnect(&self.counter().chan);
|
||||
|
||||
if self.counter().destroy.swap(true, Ordering::AcqRel) {
|
||||
drop(unsafe { Box::from_raw(self.counter) });
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<C> ops::Deref for Sender<C> {
|
||||
type Target = C;
|
||||
|
||||
fn deref(&self) -> &C {
|
||||
&self.counter().chan
|
||||
}
|
||||
}
|
||||
|
||||
impl<C> PartialEq for Sender<C> {
|
||||
fn eq(&self, other: &Sender<C>) -> bool {
|
||||
self.counter == other.counter
|
||||
}
|
||||
}
|
||||
|
||||
/// The receiving side.
|
||||
pub(crate) struct Receiver<C> {
|
||||
counter: *mut Counter<C>,
|
||||
}
|
||||
|
||||
impl<C> Receiver<C> {
|
||||
/// Returns the internal `Counter`.
|
||||
fn counter(&self) -> &Counter<C> {
|
||||
unsafe { &*self.counter }
|
||||
}
|
||||
|
||||
/// Acquires another receiver reference.
|
||||
pub(crate) fn acquire(&self) -> Receiver<C> {
|
||||
let count = self.counter().receivers.fetch_add(1, Ordering::Relaxed);
|
||||
|
||||
// Cloning receivers and calling `mem::forget` on the clones could potentially overflow the
|
||||
// counter. It's very difficult to recover sensibly from such degenerate scenarios so we
|
||||
// just abort when the count becomes very large.
|
||||
if count > isize::MAX as usize {
|
||||
process::abort();
|
||||
}
|
||||
|
||||
Receiver { counter: self.counter }
|
||||
}
|
||||
|
||||
/// Releases the receiver reference.
|
||||
///
|
||||
/// Function `disconnect` will be called if this is the last receiver reference.
|
||||
pub(crate) unsafe fn release<F: FnOnce(&C) -> bool>(&self, disconnect: F) {
|
||||
if self.counter().receivers.fetch_sub(1, Ordering::AcqRel) == 1 {
|
||||
disconnect(&self.counter().chan);
|
||||
|
||||
if self.counter().destroy.swap(true, Ordering::AcqRel) {
|
||||
drop(unsafe { Box::from_raw(self.counter) });
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<C> ops::Deref for Receiver<C> {
|
||||
type Target = C;
|
||||
|
||||
fn deref(&self) -> &C {
|
||||
&self.counter().chan
|
||||
}
|
||||
}
|
||||
|
||||
impl<C> PartialEq for Receiver<C> {
|
||||
fn eq(&self, other: &Receiver<C>) -> bool {
|
||||
self.counter == other.counter
|
||||
}
|
||||
}
|
||||
49
crates/std/src/sync/mpmc/error.rs
Normal file
49
crates/std/src/sync/mpmc/error.rs
Normal file
@@ -0,0 +1,49 @@
|
||||
pub use crate::sync::mpsc::{RecvError, RecvTimeoutError, SendError, TryRecvError, TrySendError};
|
||||
use crate::{error, fmt};
|
||||
|
||||
/// An error returned from the [`send_timeout`] method.
|
||||
///
|
||||
/// The error contains the message being sent so it can be recovered.
|
||||
///
|
||||
/// [`send_timeout`]: super::Sender::send_timeout
|
||||
#[derive(PartialEq, Eq, Clone, Copy)]
|
||||
#[unstable(feature = "mpmc_channel", issue = "126840")]
|
||||
pub enum SendTimeoutError<T> {
|
||||
/// The message could not be sent because the channel is full and the operation timed out.
|
||||
///
|
||||
/// If this is a zero-capacity channel, then the error indicates that there was no receiver
|
||||
/// available to receive the message and the operation timed out.
|
||||
Timeout(T),
|
||||
|
||||
/// The message could not be sent because the channel is disconnected.
|
||||
Disconnected(T),
|
||||
}
|
||||
|
||||
#[unstable(feature = "mpmc_channel", issue = "126840")]
|
||||
impl<T> fmt::Debug for SendTimeoutError<T> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
"SendTimeoutError(..)".fmt(f)
|
||||
}
|
||||
}
|
||||
|
||||
#[unstable(feature = "mpmc_channel", issue = "126840")]
|
||||
impl<T> fmt::Display for SendTimeoutError<T> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match *self {
|
||||
SendTimeoutError::Timeout(..) => "timed out waiting on send operation".fmt(f),
|
||||
SendTimeoutError::Disconnected(..) => "sending on a disconnected channel".fmt(f),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[unstable(feature = "mpmc_channel", issue = "126840")]
|
||||
impl<T> error::Error for SendTimeoutError<T> {}
|
||||
|
||||
#[unstable(feature = "mpmc_channel", issue = "126840")]
|
||||
impl<T> From<SendError<T>> for SendTimeoutError<T> {
|
||||
fn from(err: SendError<T>) -> SendTimeoutError<T> {
|
||||
match err {
|
||||
SendError(e) => SendTimeoutError::Disconnected(e),
|
||||
}
|
||||
}
|
||||
}
|
||||
668
crates/std/src/sync/mpmc/list.rs
Normal file
668
crates/std/src/sync/mpmc/list.rs
Normal file
@@ -0,0 +1,668 @@
|
||||
//! Unbounded channel implemented as a linked list.
|
||||
|
||||
use super::context::Context;
|
||||
use super::error::*;
|
||||
use super::select::{Operation, Selected, Token};
|
||||
use super::utils::{Backoff, CachePadded};
|
||||
use super::waker::SyncWaker;
|
||||
use crate::cell::UnsafeCell;
|
||||
use crate::marker::PhantomData;
|
||||
use crate::mem::MaybeUninit;
|
||||
use crate::ptr;
|
||||
use crate::sync::atomic::{self, Atomic, AtomicPtr, AtomicUsize, Ordering};
|
||||
use crate::time::Instant;
|
||||
|
||||
// Bits indicating the state of a slot:
|
||||
// * If a message has been written into the slot, `WRITE` is set.
|
||||
// * If a message has been read from the slot, `READ` is set.
|
||||
// * If the block is being destroyed, `DESTROY` is set.
|
||||
const WRITE: usize = 1;
|
||||
const READ: usize = 2;
|
||||
const DESTROY: usize = 4;
|
||||
|
||||
// Each block covers one "lap" of indices.
|
||||
const LAP: usize = 32;
|
||||
// The maximum number of messages a block can hold.
|
||||
const BLOCK_CAP: usize = LAP - 1;
|
||||
// How many lower bits are reserved for metadata.
|
||||
const SHIFT: usize = 1;
|
||||
// Has two different purposes:
|
||||
// * If set in head, indicates that the block is not the last one.
|
||||
// * If set in tail, indicates that the channel is disconnected.
|
||||
const MARK_BIT: usize = 1;
|
||||
|
||||
/// A slot in a block.
|
||||
struct Slot<T> {
|
||||
/// The message.
|
||||
msg: UnsafeCell<MaybeUninit<T>>,
|
||||
|
||||
/// The state of the slot.
|
||||
state: Atomic<usize>,
|
||||
}
|
||||
|
||||
impl<T> Slot<T> {
|
||||
/// Waits until a message is written into the slot.
|
||||
fn wait_write(&self) {
|
||||
let backoff = Backoff::new();
|
||||
while self.state.load(Ordering::Acquire) & WRITE == 0 {
|
||||
backoff.spin_heavy();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A block in a linked list.
|
||||
///
|
||||
/// Each block in the list can hold up to `BLOCK_CAP` messages.
|
||||
struct Block<T> {
|
||||
/// The next block in the linked list.
|
||||
next: Atomic<*mut Block<T>>,
|
||||
|
||||
/// Slots for messages.
|
||||
slots: [Slot<T>; BLOCK_CAP],
|
||||
}
|
||||
|
||||
impl<T> Block<T> {
|
||||
/// Creates an empty block.
|
||||
fn new() -> Box<Block<T>> {
|
||||
// SAFETY: This is safe because:
|
||||
// [1] `Block::next` (Atomic<*mut _>) may be safely zero initialized.
|
||||
// [2] `Block::slots` (Array) may be safely zero initialized because of [3, 4].
|
||||
// [3] `Slot::msg` (UnsafeCell) may be safely zero initialized because it
|
||||
// holds a MaybeUninit.
|
||||
// [4] `Slot::state` (Atomic<usize>) may be safely zero initialized.
|
||||
unsafe { Box::new_zeroed().assume_init() }
|
||||
}
|
||||
|
||||
/// Waits until the next pointer is set.
|
||||
fn wait_next(&self) -> *mut Block<T> {
|
||||
let backoff = Backoff::new();
|
||||
loop {
|
||||
let next = self.next.load(Ordering::Acquire);
|
||||
if !next.is_null() {
|
||||
return next;
|
||||
}
|
||||
backoff.spin_heavy();
|
||||
}
|
||||
}
|
||||
|
||||
/// Sets the `DESTROY` bit in slots starting from `start` and destroys the block.
|
||||
unsafe fn destroy(this: *mut Block<T>, start: usize) {
|
||||
// It is not necessary to set the `DESTROY` bit in the last slot because that slot has
|
||||
// begun destruction of the block.
|
||||
for i in start..BLOCK_CAP - 1 {
|
||||
let slot = unsafe { (*this).slots.get_unchecked(i) };
|
||||
|
||||
// Mark the `DESTROY` bit if a thread is still using the slot.
|
||||
if slot.state.load(Ordering::Acquire) & READ == 0
|
||||
&& slot.state.fetch_or(DESTROY, Ordering::AcqRel) & READ == 0
|
||||
{
|
||||
// If a thread is still using the slot, it will continue destruction of the block.
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// No thread is using the block, now it is safe to destroy it.
|
||||
drop(unsafe { Box::from_raw(this) });
|
||||
}
|
||||
}
|
||||
|
||||
/// A position in a channel.
|
||||
#[derive(Debug)]
|
||||
struct Position<T> {
|
||||
/// The index in the channel.
|
||||
index: Atomic<usize>,
|
||||
|
||||
/// The block in the linked list.
|
||||
block: Atomic<*mut Block<T>>,
|
||||
}
|
||||
|
||||
/// The token type for the list flavor.
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct ListToken {
|
||||
/// The block of slots.
|
||||
block: *const u8,
|
||||
|
||||
/// The offset into the block.
|
||||
offset: usize,
|
||||
}
|
||||
|
||||
impl Default for ListToken {
|
||||
#[inline]
|
||||
fn default() -> Self {
|
||||
ListToken { block: ptr::null(), offset: 0 }
|
||||
}
|
||||
}
|
||||
|
||||
/// Unbounded channel implemented as a linked list.
|
||||
///
|
||||
/// Each message sent into the channel is assigned a sequence number, i.e. an index. Indices are
|
||||
/// represented as numbers of type `usize` and wrap on overflow.
|
||||
///
|
||||
/// Consecutive messages are grouped into blocks in order to put less pressure on the allocator and
|
||||
/// improve cache efficiency.
|
||||
pub(crate) struct Channel<T> {
|
||||
/// The head of the channel.
|
||||
head: CachePadded<Position<T>>,
|
||||
|
||||
/// The tail of the channel.
|
||||
tail: CachePadded<Position<T>>,
|
||||
|
||||
/// Receivers waiting while the channel is empty and not disconnected.
|
||||
receivers: SyncWaker,
|
||||
|
||||
/// Indicates that dropping a `Channel<T>` may drop messages of type `T`.
|
||||
_marker: PhantomData<T>,
|
||||
}
|
||||
|
||||
impl<T> Channel<T> {
|
||||
/// Creates a new unbounded channel.
|
||||
pub(crate) fn new() -> Self {
|
||||
Channel {
|
||||
head: CachePadded::new(Position {
|
||||
block: AtomicPtr::new(ptr::null_mut()),
|
||||
index: AtomicUsize::new(0),
|
||||
}),
|
||||
tail: CachePadded::new(Position {
|
||||
block: AtomicPtr::new(ptr::null_mut()),
|
||||
index: AtomicUsize::new(0),
|
||||
}),
|
||||
receivers: SyncWaker::new(),
|
||||
_marker: PhantomData,
|
||||
}
|
||||
}
|
||||
|
||||
/// Attempts to reserve a slot for sending a message.
|
||||
fn start_send(&self, token: &mut Token) -> bool {
|
||||
let backoff = Backoff::new();
|
||||
let mut tail = self.tail.index.load(Ordering::Acquire);
|
||||
let mut block = self.tail.block.load(Ordering::Acquire);
|
||||
let mut next_block = None;
|
||||
|
||||
loop {
|
||||
// Check if the channel is disconnected.
|
||||
if tail & MARK_BIT != 0 {
|
||||
token.list.block = ptr::null();
|
||||
return true;
|
||||
}
|
||||
|
||||
// Calculate the offset of the index into the block.
|
||||
let offset = (tail >> SHIFT) % LAP;
|
||||
|
||||
// If we reached the end of the block, wait until the next one is installed.
|
||||
if offset == BLOCK_CAP {
|
||||
backoff.spin_heavy();
|
||||
tail = self.tail.index.load(Ordering::Acquire);
|
||||
block = self.tail.block.load(Ordering::Acquire);
|
||||
continue;
|
||||
}
|
||||
|
||||
// If we're going to have to install the next block, allocate it in advance in order to
|
||||
// make the wait for other threads as short as possible.
|
||||
if offset + 1 == BLOCK_CAP && next_block.is_none() {
|
||||
next_block = Some(Block::<T>::new());
|
||||
}
|
||||
|
||||
// If this is the first message to be sent into the channel, we need to allocate the
|
||||
// first block and install it.
|
||||
if block.is_null() {
|
||||
let new = Box::into_raw(Block::<T>::new());
|
||||
|
||||
if self
|
||||
.tail
|
||||
.block
|
||||
.compare_exchange(block, new, Ordering::Release, Ordering::Relaxed)
|
||||
.is_ok()
|
||||
{
|
||||
// This yield point leaves the channel in a half-initialized state where the
|
||||
// tail.block pointer is set but the head.block is not. This is used to
|
||||
// facilitate the test in src/tools/miri/tests/pass/issues/issue-139553.rs
|
||||
#[cfg(miri)]
|
||||
crate::thread::yield_now();
|
||||
self.head.block.store(new, Ordering::Release);
|
||||
block = new;
|
||||
} else {
|
||||
next_block = unsafe { Some(Box::from_raw(new)) };
|
||||
tail = self.tail.index.load(Ordering::Acquire);
|
||||
block = self.tail.block.load(Ordering::Acquire);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
let new_tail = tail + (1 << SHIFT);
|
||||
|
||||
// Try advancing the tail forward.
|
||||
match self.tail.index.compare_exchange_weak(
|
||||
tail,
|
||||
new_tail,
|
||||
Ordering::SeqCst,
|
||||
Ordering::Acquire,
|
||||
) {
|
||||
Ok(_) => unsafe {
|
||||
// If we've reached the end of the block, install the next one.
|
||||
if offset + 1 == BLOCK_CAP {
|
||||
let next_block = Box::into_raw(next_block.unwrap());
|
||||
self.tail.block.store(next_block, Ordering::Release);
|
||||
self.tail.index.fetch_add(1 << SHIFT, Ordering::Release);
|
||||
(*block).next.store(next_block, Ordering::Release);
|
||||
}
|
||||
|
||||
token.list.block = block as *const u8;
|
||||
token.list.offset = offset;
|
||||
return true;
|
||||
},
|
||||
Err(_) => {
|
||||
backoff.spin_light();
|
||||
tail = self.tail.index.load(Ordering::Acquire);
|
||||
block = self.tail.block.load(Ordering::Acquire);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Writes a message into the channel.
|
||||
pub(crate) unsafe fn write(&self, token: &mut Token, msg: T) -> Result<(), T> {
|
||||
// If there is no slot, the channel is disconnected.
|
||||
if token.list.block.is_null() {
|
||||
return Err(msg);
|
||||
}
|
||||
|
||||
// Write the message into the slot.
|
||||
let block = token.list.block as *mut Block<T>;
|
||||
let offset = token.list.offset;
|
||||
unsafe {
|
||||
let slot = (*block).slots.get_unchecked(offset);
|
||||
slot.msg.get().write(MaybeUninit::new(msg));
|
||||
slot.state.fetch_or(WRITE, Ordering::Release);
|
||||
}
|
||||
|
||||
// Wake a sleeping receiver.
|
||||
self.receivers.notify();
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Attempts to reserve a slot for receiving a message.
|
||||
fn start_recv(&self, token: &mut Token) -> bool {
|
||||
let backoff = Backoff::new();
|
||||
let mut head = self.head.index.load(Ordering::Acquire);
|
||||
let mut block = self.head.block.load(Ordering::Acquire);
|
||||
|
||||
loop {
|
||||
// Calculate the offset of the index into the block.
|
||||
let offset = (head >> SHIFT) % LAP;
|
||||
|
||||
// If we reached the end of the block, wait until the next one is installed.
|
||||
if offset == BLOCK_CAP {
|
||||
backoff.spin_heavy();
|
||||
head = self.head.index.load(Ordering::Acquire);
|
||||
block = self.head.block.load(Ordering::Acquire);
|
||||
continue;
|
||||
}
|
||||
|
||||
let mut new_head = head + (1 << SHIFT);
|
||||
|
||||
if new_head & MARK_BIT == 0 {
|
||||
atomic::fence(Ordering::SeqCst);
|
||||
let tail = self.tail.index.load(Ordering::Relaxed);
|
||||
|
||||
// If the tail equals the head, that means the channel is empty.
|
||||
if head >> SHIFT == tail >> SHIFT {
|
||||
// If the channel is disconnected...
|
||||
if tail & MARK_BIT != 0 {
|
||||
// ...then receive an error.
|
||||
token.list.block = ptr::null();
|
||||
return true;
|
||||
} else {
|
||||
// Otherwise, the receive operation is not ready.
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// If head and tail are not in the same block, set `MARK_BIT` in head.
|
||||
if (head >> SHIFT) / LAP != (tail >> SHIFT) / LAP {
|
||||
new_head |= MARK_BIT;
|
||||
}
|
||||
}
|
||||
|
||||
// The block can be null here only if the first message is being sent into the channel.
|
||||
// In that case, just wait until it gets initialized.
|
||||
if block.is_null() {
|
||||
backoff.spin_heavy();
|
||||
head = self.head.index.load(Ordering::Acquire);
|
||||
block = self.head.block.load(Ordering::Acquire);
|
||||
continue;
|
||||
}
|
||||
|
||||
// Try moving the head index forward.
|
||||
match self.head.index.compare_exchange_weak(
|
||||
head,
|
||||
new_head,
|
||||
Ordering::SeqCst,
|
||||
Ordering::Acquire,
|
||||
) {
|
||||
Ok(_) => unsafe {
|
||||
// If we've reached the end of the block, move to the next one.
|
||||
if offset + 1 == BLOCK_CAP {
|
||||
let next = (*block).wait_next();
|
||||
let mut next_index = (new_head & !MARK_BIT).wrapping_add(1 << SHIFT);
|
||||
if !(*next).next.load(Ordering::Relaxed).is_null() {
|
||||
next_index |= MARK_BIT;
|
||||
}
|
||||
|
||||
self.head.block.store(next, Ordering::Release);
|
||||
self.head.index.store(next_index, Ordering::Release);
|
||||
}
|
||||
|
||||
token.list.block = block as *const u8;
|
||||
token.list.offset = offset;
|
||||
return true;
|
||||
},
|
||||
Err(_) => {
|
||||
backoff.spin_light();
|
||||
head = self.head.index.load(Ordering::Acquire);
|
||||
block = self.head.block.load(Ordering::Acquire);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Reads a message from the channel.
|
||||
pub(crate) unsafe fn read(&self, token: &mut Token) -> Result<T, ()> {
|
||||
if token.list.block.is_null() {
|
||||
// The channel is disconnected.
|
||||
return Err(());
|
||||
}
|
||||
|
||||
// Read the message.
|
||||
let block = token.list.block as *mut Block<T>;
|
||||
let offset = token.list.offset;
|
||||
unsafe {
|
||||
let slot = (*block).slots.get_unchecked(offset);
|
||||
slot.wait_write();
|
||||
let msg = slot.msg.get().read().assume_init();
|
||||
|
||||
// Destroy the block if we've reached the end, or if another thread wanted to destroy but
|
||||
// couldn't because we were busy reading from the slot.
|
||||
if offset + 1 == BLOCK_CAP {
|
||||
Block::destroy(block, 0);
|
||||
} else if slot.state.fetch_or(READ, Ordering::AcqRel) & DESTROY != 0 {
|
||||
Block::destroy(block, offset + 1);
|
||||
}
|
||||
|
||||
Ok(msg)
|
||||
}
|
||||
}
|
||||
|
||||
/// Attempts to send a message into the channel.
|
||||
pub(crate) fn try_send(&self, msg: T) -> Result<(), TrySendError<T>> {
|
||||
self.send(msg, None).map_err(|err| match err {
|
||||
SendTimeoutError::Disconnected(msg) => TrySendError::Disconnected(msg),
|
||||
SendTimeoutError::Timeout(_) => unreachable!(),
|
||||
})
|
||||
}
|
||||
|
||||
/// Sends a message into the channel.
|
||||
pub(crate) fn send(
|
||||
&self,
|
||||
msg: T,
|
||||
_deadline: Option<Instant>,
|
||||
) -> Result<(), SendTimeoutError<T>> {
|
||||
let token = &mut Token::default();
|
||||
assert!(self.start_send(token));
|
||||
unsafe { self.write(token, msg).map_err(SendTimeoutError::Disconnected) }
|
||||
}
|
||||
|
||||
/// Attempts to receive a message without blocking.
|
||||
pub(crate) fn try_recv(&self) -> Result<T, TryRecvError> {
|
||||
let token = &mut Token::default();
|
||||
|
||||
if self.start_recv(token) {
|
||||
unsafe { self.read(token).map_err(|_| TryRecvError::Disconnected) }
|
||||
} else {
|
||||
Err(TryRecvError::Empty)
|
||||
}
|
||||
}
|
||||
|
||||
/// Receives a message from the channel.
|
||||
pub(crate) fn recv(&self, deadline: Option<Instant>) -> Result<T, RecvTimeoutError> {
|
||||
let token = &mut Token::default();
|
||||
loop {
|
||||
if self.start_recv(token) {
|
||||
unsafe {
|
||||
return self.read(token).map_err(|_| RecvTimeoutError::Disconnected);
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(d) = deadline {
|
||||
if Instant::now() >= d {
|
||||
return Err(RecvTimeoutError::Timeout);
|
||||
}
|
||||
}
|
||||
|
||||
// Prepare for blocking until a sender wakes us up.
|
||||
Context::with(|cx| {
|
||||
let oper = Operation::hook(token);
|
||||
self.receivers.register(oper, cx);
|
||||
|
||||
// Has the channel become ready just now?
|
||||
if !self.is_empty() || self.is_disconnected() {
|
||||
let _ = cx.try_select(Selected::Aborted);
|
||||
}
|
||||
|
||||
// Block the current thread.
|
||||
// SAFETY: the context belongs to the current thread.
|
||||
let sel = unsafe { cx.wait_until(deadline) };
|
||||
|
||||
match sel {
|
||||
Selected::Waiting => unreachable!(),
|
||||
Selected::Aborted | Selected::Disconnected => {
|
||||
self.receivers.unregister(oper).unwrap();
|
||||
// If the channel was disconnected, we still have to check for remaining
|
||||
// messages.
|
||||
}
|
||||
Selected::Operation(_) => {}
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the current number of messages inside the channel.
|
||||
pub(crate) fn len(&self) -> usize {
|
||||
loop {
|
||||
// Load the tail index, then load the head index.
|
||||
let mut tail = self.tail.index.load(Ordering::SeqCst);
|
||||
let mut head = self.head.index.load(Ordering::SeqCst);
|
||||
|
||||
// If the tail index didn't change, we've got consistent indices to work with.
|
||||
if self.tail.index.load(Ordering::SeqCst) == tail {
|
||||
// Erase the lower bits.
|
||||
tail &= !((1 << SHIFT) - 1);
|
||||
head &= !((1 << SHIFT) - 1);
|
||||
|
||||
// Fix up indices if they fall onto block ends.
|
||||
if (tail >> SHIFT) & (LAP - 1) == LAP - 1 {
|
||||
tail = tail.wrapping_add(1 << SHIFT);
|
||||
}
|
||||
if (head >> SHIFT) & (LAP - 1) == LAP - 1 {
|
||||
head = head.wrapping_add(1 << SHIFT);
|
||||
}
|
||||
|
||||
// Rotate indices so that head falls into the first block.
|
||||
let lap = (head >> SHIFT) / LAP;
|
||||
tail = tail.wrapping_sub((lap * LAP) << SHIFT);
|
||||
head = head.wrapping_sub((lap * LAP) << SHIFT);
|
||||
|
||||
// Remove the lower bits.
|
||||
tail >>= SHIFT;
|
||||
head >>= SHIFT;
|
||||
|
||||
// Return the difference minus the number of blocks between tail and head.
|
||||
return tail - head - tail / LAP;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the capacity of the channel.
|
||||
pub(crate) fn capacity(&self) -> Option<usize> {
|
||||
None
|
||||
}
|
||||
|
||||
/// Disconnects senders and wakes up all blocked receivers.
|
||||
///
|
||||
/// Returns `true` if this call disconnected the channel.
|
||||
pub(crate) fn disconnect_senders(&self) -> bool {
|
||||
let tail = self.tail.index.fetch_or(MARK_BIT, Ordering::SeqCst);
|
||||
|
||||
if tail & MARK_BIT == 0 {
|
||||
self.receivers.disconnect();
|
||||
true
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
/// Disconnects receivers.
|
||||
///
|
||||
/// Returns `true` if this call disconnected the channel.
|
||||
pub(crate) fn disconnect_receivers(&self) -> bool {
|
||||
let tail = self.tail.index.fetch_or(MARK_BIT, Ordering::SeqCst);
|
||||
|
||||
if tail & MARK_BIT == 0 {
|
||||
// If receivers are dropped first, discard all messages to free
|
||||
// memory eagerly.
|
||||
self.discard_all_messages();
|
||||
true
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
/// Discards all messages.
|
||||
///
|
||||
/// This method should only be called when all receivers are dropped.
|
||||
fn discard_all_messages(&self) {
|
||||
let backoff = Backoff::new();
|
||||
let mut tail = self.tail.index.load(Ordering::Acquire);
|
||||
loop {
|
||||
let offset = (tail >> SHIFT) % LAP;
|
||||
if offset != BLOCK_CAP {
|
||||
break;
|
||||
}
|
||||
|
||||
// New updates to tail will be rejected by MARK_BIT and aborted unless it's
|
||||
// at boundary. We need to wait for the updates take affect otherwise there
|
||||
// can be memory leaks.
|
||||
backoff.spin_heavy();
|
||||
tail = self.tail.index.load(Ordering::Acquire);
|
||||
}
|
||||
|
||||
let mut head = self.head.index.load(Ordering::Acquire);
|
||||
// The channel may be uninitialized, so we have to swap to avoid overwriting any sender's attempts
|
||||
// to initialize the first block before noticing that the receivers disconnected. Late allocations
|
||||
// will be deallocated by the sender in Drop.
|
||||
let mut block = self.head.block.swap(ptr::null_mut(), Ordering::AcqRel);
|
||||
|
||||
// If we're going to be dropping messages we need to synchronize with initialization
|
||||
if head >> SHIFT != tail >> SHIFT {
|
||||
// The block can be null here only if a sender is in the process of initializing the
|
||||
// channel while another sender managed to send a message by inserting it into the
|
||||
// semi-initialized channel and advanced the tail.
|
||||
// In that case, just wait until it gets initialized.
|
||||
while block.is_null() {
|
||||
backoff.spin_heavy();
|
||||
block = self.head.block.swap(ptr::null_mut(), Ordering::AcqRel);
|
||||
}
|
||||
}
|
||||
// After this point `head.block` is not modified again and it will be deallocated if it's
|
||||
// non-null. The `Drop` code of the channel, which runs after this function, also attempts
|
||||
// to deallocate `head.block` if it's non-null. Therefore this function must maintain the
|
||||
// invariant that if a deallocation of head.block is attempted then it must also be set to
|
||||
// NULL. Failing to do so will lead to the Drop code attempting a double free. For this
|
||||
// reason both reads above do an atomic swap instead of a simple atomic load.
|
||||
|
||||
unsafe {
|
||||
// Drop all messages between head and tail and deallocate the heap-allocated blocks.
|
||||
while head >> SHIFT != tail >> SHIFT {
|
||||
let offset = (head >> SHIFT) % LAP;
|
||||
|
||||
if offset < BLOCK_CAP {
|
||||
// Drop the message in the slot.
|
||||
let slot = (*block).slots.get_unchecked(offset);
|
||||
slot.wait_write();
|
||||
let p = &mut *slot.msg.get();
|
||||
p.as_mut_ptr().drop_in_place();
|
||||
} else {
|
||||
(*block).wait_next();
|
||||
// Deallocate the block and move to the next one.
|
||||
let next = (*block).next.load(Ordering::Acquire);
|
||||
drop(Box::from_raw(block));
|
||||
block = next;
|
||||
}
|
||||
|
||||
head = head.wrapping_add(1 << SHIFT);
|
||||
}
|
||||
|
||||
// Deallocate the last remaining block.
|
||||
if !block.is_null() {
|
||||
drop(Box::from_raw(block));
|
||||
}
|
||||
}
|
||||
|
||||
head &= !MARK_BIT;
|
||||
self.head.index.store(head, Ordering::Release);
|
||||
}
|
||||
|
||||
/// Returns `true` if the channel is disconnected.
|
||||
pub(crate) fn is_disconnected(&self) -> bool {
|
||||
self.tail.index.load(Ordering::SeqCst) & MARK_BIT != 0
|
||||
}
|
||||
|
||||
/// Returns `true` if the channel is empty.
|
||||
pub(crate) fn is_empty(&self) -> bool {
|
||||
let head = self.head.index.load(Ordering::SeqCst);
|
||||
let tail = self.tail.index.load(Ordering::SeqCst);
|
||||
head >> SHIFT == tail >> SHIFT
|
||||
}
|
||||
|
||||
/// Returns `true` if the channel is full.
|
||||
pub(crate) fn is_full(&self) -> bool {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Drop for Channel<T> {
|
||||
fn drop(&mut self) {
|
||||
let mut head = self.head.index.load(Ordering::Relaxed);
|
||||
let mut tail = self.tail.index.load(Ordering::Relaxed);
|
||||
let mut block = self.head.block.load(Ordering::Relaxed);
|
||||
|
||||
// Erase the lower bits.
|
||||
head &= !((1 << SHIFT) - 1);
|
||||
tail &= !((1 << SHIFT) - 1);
|
||||
|
||||
unsafe {
|
||||
// Drop all messages between head and tail and deallocate the heap-allocated blocks.
|
||||
while head != tail {
|
||||
let offset = (head >> SHIFT) % LAP;
|
||||
|
||||
if offset < BLOCK_CAP {
|
||||
// Drop the message in the slot.
|
||||
let slot = (*block).slots.get_unchecked(offset);
|
||||
let p = &mut *slot.msg.get();
|
||||
p.as_mut_ptr().drop_in_place();
|
||||
} else {
|
||||
// Deallocate the block and move to the next one.
|
||||
let next = (*block).next.load(Ordering::Relaxed);
|
||||
drop(Box::from_raw(block));
|
||||
block = next;
|
||||
}
|
||||
|
||||
head = head.wrapping_add(1 << SHIFT);
|
||||
}
|
||||
|
||||
// Deallocate the last remaining block.
|
||||
if !block.is_null() {
|
||||
drop(Box::from_raw(block));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
1388
crates/std/src/sync/mpmc/mod.rs
Normal file
1388
crates/std/src/sync/mpmc/mod.rs
Normal file
File diff suppressed because it is too large
Load Diff
71
crates/std/src/sync/mpmc/select.rs
Normal file
71
crates/std/src/sync/mpmc/select.rs
Normal file
@@ -0,0 +1,71 @@
|
||||
/// Temporary data that gets initialized during a blocking operation, and is consumed by
|
||||
/// `read` or `write`.
|
||||
///
|
||||
/// Each field contains data associated with a specific channel flavor.
|
||||
#[derive(Debug, Default)]
|
||||
pub struct Token {
|
||||
pub(crate) array: super::array::ArrayToken,
|
||||
pub(crate) list: super::list::ListToken,
|
||||
#[allow(dead_code)]
|
||||
pub(crate) zero: super::zero::ZeroToken,
|
||||
}
|
||||
|
||||
/// Identifier associated with an operation by a specific thread on a specific channel.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub struct Operation(usize);
|
||||
|
||||
impl Operation {
|
||||
/// Creates an operation identifier from a mutable reference.
|
||||
///
|
||||
/// This function essentially just turns the address of the reference into a number. The
|
||||
/// reference should point to a variable that is specific to the thread and the operation,
|
||||
/// and is alive for the entire duration of a blocking operation.
|
||||
#[inline]
|
||||
pub fn hook<T>(r: &mut T) -> Operation {
|
||||
let val = r as *mut T as usize;
|
||||
// Make sure that the pointer address doesn't equal the numerical representation of
|
||||
// `Selected::{Waiting, Aborted, Disconnected}`.
|
||||
assert!(val > 2);
|
||||
Operation(val)
|
||||
}
|
||||
}
|
||||
|
||||
/// Current state of a blocking operation.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum Selected {
|
||||
/// Still waiting for an operation.
|
||||
Waiting,
|
||||
|
||||
/// The attempt to block the current thread has been aborted.
|
||||
Aborted,
|
||||
|
||||
/// An operation became ready because a channel is disconnected.
|
||||
Disconnected,
|
||||
|
||||
/// An operation became ready because a message can be sent or received.
|
||||
Operation(Operation),
|
||||
}
|
||||
|
||||
impl From<usize> for Selected {
|
||||
#[inline]
|
||||
fn from(val: usize) -> Selected {
|
||||
match val {
|
||||
0 => Selected::Waiting,
|
||||
1 => Selected::Aborted,
|
||||
2 => Selected::Disconnected,
|
||||
oper => Selected::Operation(Operation(oper)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Into<usize> for Selected {
|
||||
#[inline]
|
||||
fn into(self) -> usize {
|
||||
match self {
|
||||
Selected::Waiting => 0,
|
||||
Selected::Aborted => 1,
|
||||
Selected::Disconnected => 2,
|
||||
Selected::Operation(Operation(val)) => val,
|
||||
}
|
||||
}
|
||||
}
|
||||
14
crates/std/src/sync/mpmc/tests.rs
Normal file
14
crates/std/src/sync/mpmc/tests.rs
Normal file
@@ -0,0 +1,14 @@
|
||||
// Ensure that thread_local init with `const { 0 }` still has unique address at run-time
|
||||
#[test]
|
||||
fn waker_current_thread_id() {
|
||||
let first = super::waker::current_thread_id();
|
||||
let t = crate::thread::spawn(move || {
|
||||
let second = super::waker::current_thread_id();
|
||||
assert_ne!(first, second);
|
||||
assert_eq!(second, super::waker::current_thread_id());
|
||||
});
|
||||
|
||||
assert_eq!(first, super::waker::current_thread_id());
|
||||
t.join().unwrap();
|
||||
assert_eq!(first, super::waker::current_thread_id());
|
||||
}
|
||||
137
crates/std/src/sync/mpmc/utils.rs
Normal file
137
crates/std/src/sync/mpmc/utils.rs
Normal file
@@ -0,0 +1,137 @@
|
||||
use crate::cell::Cell;
|
||||
use crate::ops::{Deref, DerefMut};
|
||||
|
||||
/// Pads and aligns a value to the length of a cache line.
|
||||
#[derive(Clone, Copy, Default, Hash, PartialEq, Eq)]
|
||||
// Starting from Intel's Sandy Bridge, spatial prefetcher is now pulling pairs of 64-byte cache
|
||||
// lines at a time, so we have to align to 128 bytes rather than 64.
|
||||
//
|
||||
// Sources:
|
||||
// - https://www.intel.com/content/dam/www/public/us/en/documents/manuals/64-ia-32-architectures-optimization-manual.pdf
|
||||
// - https://github.com/facebook/folly/blob/1b5288e6eea6df074758f877c849b6e73bbb9fbb/folly/lang/Align.h#L107
|
||||
//
|
||||
// ARM's big.LITTLE architecture has asymmetric cores and "big" cores have 128-byte cache line size.
|
||||
//
|
||||
// Sources:
|
||||
// - https://www.mono-project.com/news/2016/09/12/arm64-icache/
|
||||
//
|
||||
// powerpc64 has 128-byte cache line size.
|
||||
//
|
||||
// Sources:
|
||||
// - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_ppc64x.go#L9
|
||||
#[cfg_attr(
|
||||
any(target_arch = "x86_64", target_arch = "aarch64", target_arch = "powerpc64",),
|
||||
repr(align(128))
|
||||
)]
|
||||
// arm, mips and mips64 have 32-byte cache line size.
|
||||
//
|
||||
// Sources:
|
||||
// - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_arm.go#L7
|
||||
// - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_mips.go#L7
|
||||
// - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_mipsle.go#L7
|
||||
// - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_mips64x.go#L9
|
||||
#[cfg_attr(
|
||||
any(
|
||||
target_arch = "arm",
|
||||
target_arch = "mips",
|
||||
target_arch = "mips32r6",
|
||||
target_arch = "mips64",
|
||||
target_arch = "mips64r6",
|
||||
),
|
||||
repr(align(32))
|
||||
)]
|
||||
// s390x has 256-byte cache line size.
|
||||
//
|
||||
// Sources:
|
||||
// - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_s390x.go#L7
|
||||
#[cfg_attr(target_arch = "s390x", repr(align(256)))]
|
||||
// x86, wasm and riscv have 64-byte cache line size.
|
||||
//
|
||||
// Sources:
|
||||
// - https://github.com/golang/go/blob/dda2991c2ea0c5914714469c4defc2562a907230/src/internal/cpu/cpu_x86.go#L9
|
||||
// - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_wasm.go#L7
|
||||
// - https://github.com/golang/go/blob/5e31f78c8a4ed1b872ddc194f0cd1ae931b37d7e/src/internal/cpu/cpu_riscv64.go#L7
|
||||
//
|
||||
// All others are assumed to have 64-byte cache line size.
|
||||
#[cfg_attr(
|
||||
not(any(
|
||||
target_arch = "x86_64",
|
||||
target_arch = "aarch64",
|
||||
target_arch = "powerpc64",
|
||||
target_arch = "arm",
|
||||
target_arch = "mips",
|
||||
target_arch = "mips32r6",
|
||||
target_arch = "mips64",
|
||||
target_arch = "mips64r6",
|
||||
target_arch = "s390x",
|
||||
)),
|
||||
repr(align(64))
|
||||
)]
|
||||
pub struct CachePadded<T> {
|
||||
value: T,
|
||||
}
|
||||
|
||||
impl<T> CachePadded<T> {
|
||||
/// Pads and aligns a value to the length of a cache line.
|
||||
pub fn new(value: T) -> CachePadded<T> {
|
||||
CachePadded::<T> { value }
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Deref for CachePadded<T> {
|
||||
type Target = T;
|
||||
|
||||
fn deref(&self) -> &T {
|
||||
&self.value
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> DerefMut for CachePadded<T> {
|
||||
fn deref_mut(&mut self) -> &mut T {
|
||||
&mut self.value
|
||||
}
|
||||
}
|
||||
|
||||
const SPIN_LIMIT: u32 = 6;
|
||||
|
||||
/// Performs quadratic backoff in spin loops.
|
||||
pub struct Backoff {
|
||||
step: Cell<u32>,
|
||||
}
|
||||
|
||||
impl Backoff {
|
||||
/// Creates a new `Backoff`.
|
||||
pub fn new() -> Self {
|
||||
Backoff { step: Cell::new(0) }
|
||||
}
|
||||
|
||||
/// Backs off using lightweight spinning.
|
||||
///
|
||||
/// This method should be used for retrying an operation because another thread made
|
||||
/// progress. i.e. on CAS failure.
|
||||
#[inline]
|
||||
pub fn spin_light(&self) {
|
||||
let step = self.step.get().min(SPIN_LIMIT);
|
||||
for _ in 0..step.pow(2) {
|
||||
crate::hint::spin_loop();
|
||||
}
|
||||
|
||||
self.step.set(self.step.get() + 1);
|
||||
}
|
||||
|
||||
/// Backs off using heavyweight spinning.
|
||||
///
|
||||
/// This method should be used in blocking loops where parking the thread is not an option.
|
||||
#[inline]
|
||||
pub fn spin_heavy(&self) {
|
||||
if self.step.get() <= SPIN_LIMIT {
|
||||
for _ in 0..self.step.get().pow(2) {
|
||||
crate::hint::spin_loop()
|
||||
}
|
||||
} else {
|
||||
crate::thread::yield_now();
|
||||
}
|
||||
|
||||
self.step.set(self.step.get() + 1);
|
||||
}
|
||||
}
|
||||
209
crates/std/src/sync/mpmc/waker.rs
Normal file
209
crates/std/src/sync/mpmc/waker.rs
Normal file
@@ -0,0 +1,209 @@
|
||||
//! Waking mechanism for threads blocked on channel operations.
|
||||
|
||||
use super::context::Context;
|
||||
use super::select::{Operation, Selected};
|
||||
use crate::ptr;
|
||||
use crate::sync::Mutex;
|
||||
use crate::sync::atomic::{Atomic, AtomicBool, Ordering};
|
||||
|
||||
/// Represents a thread blocked on a specific channel operation.
|
||||
pub(crate) struct Entry {
|
||||
/// The operation.
|
||||
pub(crate) oper: Operation,
|
||||
|
||||
/// Optional packet.
|
||||
pub(crate) packet: *mut (),
|
||||
|
||||
/// Context associated with the thread owning this operation.
|
||||
pub(crate) cx: Context,
|
||||
}
|
||||
|
||||
/// A queue of threads blocked on channel operations.
|
||||
///
|
||||
/// This data structure is used by threads to register blocking operations and get woken up once
|
||||
/// an operation becomes ready.
|
||||
pub(crate) struct Waker {
|
||||
/// A list of select operations.
|
||||
selectors: Vec<Entry>,
|
||||
|
||||
/// A list of operations waiting to be ready.
|
||||
observers: Vec<Entry>,
|
||||
}
|
||||
|
||||
impl Waker {
|
||||
/// Creates a new `Waker`.
|
||||
#[inline]
|
||||
pub(crate) fn new() -> Self {
|
||||
Waker { selectors: Vec::new(), observers: Vec::new() }
|
||||
}
|
||||
|
||||
/// Registers a select operation.
|
||||
#[inline]
|
||||
pub(crate) fn register(&mut self, oper: Operation, cx: &Context) {
|
||||
self.register_with_packet(oper, ptr::null_mut(), cx);
|
||||
}
|
||||
|
||||
/// Registers a select operation and a packet.
|
||||
#[inline]
|
||||
pub(crate) fn register_with_packet(&mut self, oper: Operation, packet: *mut (), cx: &Context) {
|
||||
self.selectors.push(Entry { oper, packet, cx: cx.clone() });
|
||||
}
|
||||
|
||||
/// Unregisters a select operation.
|
||||
#[inline]
|
||||
pub(crate) fn unregister(&mut self, oper: Operation) -> Option<Entry> {
|
||||
if let Some((i, _)) =
|
||||
self.selectors.iter().enumerate().find(|&(_, entry)| entry.oper == oper)
|
||||
{
|
||||
let entry = self.selectors.remove(i);
|
||||
Some(entry)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// Attempts to find another thread's entry, select the operation, and wake it up.
|
||||
#[inline]
|
||||
pub(crate) fn try_select(&mut self) -> Option<Entry> {
|
||||
if self.selectors.is_empty() {
|
||||
None
|
||||
} else {
|
||||
let thread_id = current_thread_id();
|
||||
|
||||
self.selectors
|
||||
.iter()
|
||||
.position(|selector| {
|
||||
// Does the entry belong to a different thread?
|
||||
selector.cx.thread_id() != thread_id
|
||||
&& selector // Try selecting this operation.
|
||||
.cx
|
||||
.try_select(Selected::Operation(selector.oper))
|
||||
.is_ok()
|
||||
&& {
|
||||
// Provide the packet.
|
||||
selector.cx.store_packet(selector.packet);
|
||||
// Wake the thread up.
|
||||
selector.cx.unpark();
|
||||
true
|
||||
}
|
||||
})
|
||||
// Remove the entry from the queue to keep it clean and improve
|
||||
// performance.
|
||||
.map(|pos| self.selectors.remove(pos))
|
||||
}
|
||||
}
|
||||
|
||||
/// Notifies all operations waiting to be ready.
|
||||
#[inline]
|
||||
pub(crate) fn notify(&mut self) {
|
||||
for entry in self.observers.drain(..) {
|
||||
if entry.cx.try_select(Selected::Operation(entry.oper)).is_ok() {
|
||||
entry.cx.unpark();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Notifies all registered operations that the channel is disconnected.
|
||||
#[inline]
|
||||
pub(crate) fn disconnect(&mut self) {
|
||||
for entry in self.selectors.iter() {
|
||||
if entry.cx.try_select(Selected::Disconnected).is_ok() {
|
||||
// Wake the thread up.
|
||||
//
|
||||
// Here we don't remove the entry from the queue. Registered threads must
|
||||
// unregister from the waker by themselves. They might also want to recover the
|
||||
// packet value and destroy it, if necessary.
|
||||
entry.cx.unpark();
|
||||
}
|
||||
}
|
||||
|
||||
self.notify();
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for Waker {
|
||||
#[inline]
|
||||
fn drop(&mut self) {
|
||||
debug_assert_eq!(self.selectors.len(), 0);
|
||||
debug_assert_eq!(self.observers.len(), 0);
|
||||
}
|
||||
}
|
||||
|
||||
/// A waker that can be shared among threads without locking.
|
||||
///
|
||||
/// This is a simple wrapper around `Waker` that internally uses a mutex for synchronization.
|
||||
pub(crate) struct SyncWaker {
|
||||
/// The inner `Waker`.
|
||||
inner: Mutex<Waker>,
|
||||
|
||||
/// `true` if the waker is empty.
|
||||
is_empty: Atomic<bool>,
|
||||
}
|
||||
|
||||
impl SyncWaker {
|
||||
/// Creates a new `SyncWaker`.
|
||||
#[inline]
|
||||
pub(crate) fn new() -> Self {
|
||||
SyncWaker { inner: Mutex::new(Waker::new()), is_empty: AtomicBool::new(true) }
|
||||
}
|
||||
|
||||
/// Registers the current thread with an operation.
|
||||
#[inline]
|
||||
pub(crate) fn register(&self, oper: Operation, cx: &Context) {
|
||||
let mut inner = self.inner.lock().unwrap();
|
||||
inner.register(oper, cx);
|
||||
self.is_empty
|
||||
.store(inner.selectors.is_empty() && inner.observers.is_empty(), Ordering::SeqCst);
|
||||
}
|
||||
|
||||
/// Unregisters an operation previously registered by the current thread.
|
||||
#[inline]
|
||||
pub(crate) fn unregister(&self, oper: Operation) -> Option<Entry> {
|
||||
let mut inner = self.inner.lock().unwrap();
|
||||
let entry = inner.unregister(oper);
|
||||
self.is_empty
|
||||
.store(inner.selectors.is_empty() && inner.observers.is_empty(), Ordering::SeqCst);
|
||||
entry
|
||||
}
|
||||
|
||||
/// Attempts to find one thread (not the current one), select its operation, and wake it up.
|
||||
#[inline]
|
||||
pub(crate) fn notify(&self) {
|
||||
if !self.is_empty.load(Ordering::SeqCst) {
|
||||
let mut inner = self.inner.lock().unwrap();
|
||||
if !self.is_empty.load(Ordering::SeqCst) {
|
||||
inner.try_select();
|
||||
inner.notify();
|
||||
self.is_empty.store(
|
||||
inner.selectors.is_empty() && inner.observers.is_empty(),
|
||||
Ordering::SeqCst,
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Notifies all threads that the channel is disconnected.
|
||||
#[inline]
|
||||
pub(crate) fn disconnect(&self) {
|
||||
let mut inner = self.inner.lock().unwrap();
|
||||
inner.disconnect();
|
||||
self.is_empty
|
||||
.store(inner.selectors.is_empty() && inner.observers.is_empty(), Ordering::SeqCst);
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for SyncWaker {
|
||||
#[inline]
|
||||
fn drop(&mut self) {
|
||||
debug_assert!(self.is_empty.load(Ordering::SeqCst));
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns a unique id for the current thread.
|
||||
#[inline]
|
||||
pub fn current_thread_id() -> usize {
|
||||
// `u8` is not drop so this variable will be available during thread destruction,
|
||||
// whereas `thread::current()` would not be
|
||||
thread_local! { static DUMMY: u8 = const { 0 } }
|
||||
DUMMY.with(|x| (x as *const u8).addr())
|
||||
}
|
||||
319
crates/std/src/sync/mpmc/zero.rs
Normal file
319
crates/std/src/sync/mpmc/zero.rs
Normal file
@@ -0,0 +1,319 @@
|
||||
//! Zero-capacity channel.
|
||||
//!
|
||||
//! This kind of channel is also known as *rendezvous* channel.
|
||||
|
||||
use super::context::Context;
|
||||
use super::error::*;
|
||||
use super::select::{Operation, Selected, Token};
|
||||
use super::utils::Backoff;
|
||||
use super::waker::Waker;
|
||||
use crate::cell::UnsafeCell;
|
||||
use crate::marker::PhantomData;
|
||||
use crate::sync::Mutex;
|
||||
use crate::sync::atomic::{Atomic, AtomicBool, Ordering};
|
||||
use crate::time::Instant;
|
||||
use crate::{fmt, ptr};
|
||||
|
||||
/// A pointer to a packet.
|
||||
pub(crate) struct ZeroToken(*mut ());
|
||||
|
||||
impl Default for ZeroToken {
|
||||
fn default() -> Self {
|
||||
Self(ptr::null_mut())
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for ZeroToken {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
fmt::Debug::fmt(&(self.0 as usize), f)
|
||||
}
|
||||
}
|
||||
|
||||
/// A slot for passing one message from a sender to a receiver.
|
||||
struct Packet<T> {
|
||||
/// Equals `true` if the packet is allocated on the stack.
|
||||
on_stack: bool,
|
||||
|
||||
/// Equals `true` once the packet is ready for reading or writing.
|
||||
ready: Atomic<bool>,
|
||||
|
||||
/// The message.
|
||||
msg: UnsafeCell<Option<T>>,
|
||||
}
|
||||
|
||||
impl<T> Packet<T> {
|
||||
/// Creates an empty packet on the stack.
|
||||
fn empty_on_stack() -> Packet<T> {
|
||||
Packet { on_stack: true, ready: AtomicBool::new(false), msg: UnsafeCell::new(None) }
|
||||
}
|
||||
|
||||
/// Creates a packet on the stack, containing a message.
|
||||
fn message_on_stack(msg: T) -> Packet<T> {
|
||||
Packet { on_stack: true, ready: AtomicBool::new(false), msg: UnsafeCell::new(Some(msg)) }
|
||||
}
|
||||
|
||||
/// Waits until the packet becomes ready for reading or writing.
|
||||
fn wait_ready(&self) {
|
||||
let backoff = Backoff::new();
|
||||
while !self.ready.load(Ordering::Acquire) {
|
||||
backoff.spin_heavy();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Inner representation of a zero-capacity channel.
|
||||
struct Inner {
|
||||
/// Senders waiting to pair up with a receive operation.
|
||||
senders: Waker,
|
||||
|
||||
/// Receivers waiting to pair up with a send operation.
|
||||
receivers: Waker,
|
||||
|
||||
/// Equals `true` when the channel is disconnected.
|
||||
is_disconnected: bool,
|
||||
}
|
||||
|
||||
/// Zero-capacity channel.
|
||||
pub(crate) struct Channel<T> {
|
||||
/// Inner representation of the channel.
|
||||
inner: Mutex<Inner>,
|
||||
|
||||
/// Indicates that dropping a `Channel<T>` may drop values of type `T`.
|
||||
_marker: PhantomData<T>,
|
||||
}
|
||||
|
||||
impl<T> Channel<T> {
|
||||
/// Constructs a new zero-capacity channel.
|
||||
pub(crate) fn new() -> Self {
|
||||
Channel {
|
||||
inner: Mutex::new(Inner {
|
||||
senders: Waker::new(),
|
||||
receivers: Waker::new(),
|
||||
is_disconnected: false,
|
||||
}),
|
||||
_marker: PhantomData,
|
||||
}
|
||||
}
|
||||
|
||||
/// Writes a message into the packet.
|
||||
pub(crate) unsafe fn write(&self, token: &mut Token, msg: T) -> Result<(), T> {
|
||||
// If there is no packet, the channel is disconnected.
|
||||
if token.zero.0.is_null() {
|
||||
return Err(msg);
|
||||
}
|
||||
|
||||
unsafe {
|
||||
let packet = &*(token.zero.0 as *const Packet<T>);
|
||||
packet.msg.get().write(Some(msg));
|
||||
packet.ready.store(true, Ordering::Release);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Reads a message from the packet.
|
||||
pub(crate) unsafe fn read(&self, token: &mut Token) -> Result<T, ()> {
|
||||
// If there is no packet, the channel is disconnected.
|
||||
if token.zero.0.is_null() {
|
||||
return Err(());
|
||||
}
|
||||
|
||||
let packet = unsafe { &*(token.zero.0 as *const Packet<T>) };
|
||||
|
||||
if packet.on_stack {
|
||||
// The message has been in the packet from the beginning, so there is no need to wait
|
||||
// for it. However, after reading the message, we need to set `ready` to `true` in
|
||||
// order to signal that the packet can be destroyed.
|
||||
let msg = unsafe { packet.msg.get().replace(None) }.unwrap();
|
||||
packet.ready.store(true, Ordering::Release);
|
||||
Ok(msg)
|
||||
} else {
|
||||
// Wait until the message becomes available, then read it and destroy the
|
||||
// heap-allocated packet.
|
||||
packet.wait_ready();
|
||||
unsafe {
|
||||
let msg = packet.msg.get().replace(None).unwrap();
|
||||
drop(Box::from_raw(token.zero.0 as *mut Packet<T>));
|
||||
Ok(msg)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Attempts to send a message into the channel.
|
||||
pub(crate) fn try_send(&self, msg: T) -> Result<(), TrySendError<T>> {
|
||||
let token = &mut Token::default();
|
||||
let mut inner = self.inner.lock().unwrap();
|
||||
|
||||
// If there's a waiting receiver, pair up with it.
|
||||
if let Some(operation) = inner.receivers.try_select() {
|
||||
token.zero.0 = operation.packet;
|
||||
drop(inner);
|
||||
unsafe {
|
||||
self.write(token, msg).ok().unwrap();
|
||||
}
|
||||
Ok(())
|
||||
} else if inner.is_disconnected {
|
||||
Err(TrySendError::Disconnected(msg))
|
||||
} else {
|
||||
Err(TrySendError::Full(msg))
|
||||
}
|
||||
}
|
||||
|
||||
/// Sends a message into the channel.
|
||||
pub(crate) fn send(
|
||||
&self,
|
||||
msg: T,
|
||||
deadline: Option<Instant>,
|
||||
) -> Result<(), SendTimeoutError<T>> {
|
||||
let token = &mut Token::default();
|
||||
let mut inner = self.inner.lock().unwrap();
|
||||
|
||||
// If there's a waiting receiver, pair up with it.
|
||||
if let Some(operation) = inner.receivers.try_select() {
|
||||
token.zero.0 = operation.packet;
|
||||
drop(inner);
|
||||
unsafe {
|
||||
self.write(token, msg).ok().unwrap();
|
||||
}
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
if inner.is_disconnected {
|
||||
return Err(SendTimeoutError::Disconnected(msg));
|
||||
}
|
||||
|
||||
Context::with(|cx| {
|
||||
// Prepare for blocking until a receiver wakes us up.
|
||||
let oper = Operation::hook(token);
|
||||
let mut packet = Packet::<T>::message_on_stack(msg);
|
||||
inner.senders.register_with_packet(oper, (&raw mut packet) as *mut (), cx);
|
||||
inner.receivers.notify();
|
||||
drop(inner);
|
||||
|
||||
// Block the current thread.
|
||||
// SAFETY: the context belongs to the current thread.
|
||||
let sel = unsafe { cx.wait_until(deadline) };
|
||||
|
||||
match sel {
|
||||
Selected::Waiting => unreachable!(),
|
||||
Selected::Aborted => {
|
||||
self.inner.lock().unwrap().senders.unregister(oper).unwrap();
|
||||
let msg = unsafe { packet.msg.get().replace(None).unwrap() };
|
||||
Err(SendTimeoutError::Timeout(msg))
|
||||
}
|
||||
Selected::Disconnected => {
|
||||
self.inner.lock().unwrap().senders.unregister(oper).unwrap();
|
||||
let msg = unsafe { packet.msg.get().replace(None).unwrap() };
|
||||
Err(SendTimeoutError::Disconnected(msg))
|
||||
}
|
||||
Selected::Operation(_) => {
|
||||
// Wait until the message is read, then drop the packet.
|
||||
packet.wait_ready();
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
/// Attempts to receive a message without blocking.
|
||||
pub(crate) fn try_recv(&self) -> Result<T, TryRecvError> {
|
||||
let token = &mut Token::default();
|
||||
let mut inner = self.inner.lock().unwrap();
|
||||
|
||||
// If there's a waiting sender, pair up with it.
|
||||
if let Some(operation) = inner.senders.try_select() {
|
||||
token.zero.0 = operation.packet;
|
||||
drop(inner);
|
||||
unsafe { self.read(token).map_err(|_| TryRecvError::Disconnected) }
|
||||
} else if inner.is_disconnected {
|
||||
Err(TryRecvError::Disconnected)
|
||||
} else {
|
||||
Err(TryRecvError::Empty)
|
||||
}
|
||||
}
|
||||
|
||||
/// Receives a message from the channel.
|
||||
pub(crate) fn recv(&self, deadline: Option<Instant>) -> Result<T, RecvTimeoutError> {
|
||||
let token = &mut Token::default();
|
||||
let mut inner = self.inner.lock().unwrap();
|
||||
|
||||
// If there's a waiting sender, pair up with it.
|
||||
if let Some(operation) = inner.senders.try_select() {
|
||||
token.zero.0 = operation.packet;
|
||||
drop(inner);
|
||||
unsafe {
|
||||
return self.read(token).map_err(|_| RecvTimeoutError::Disconnected);
|
||||
}
|
||||
}
|
||||
|
||||
if inner.is_disconnected {
|
||||
return Err(RecvTimeoutError::Disconnected);
|
||||
}
|
||||
|
||||
Context::with(|cx| {
|
||||
// Prepare for blocking until a sender wakes us up.
|
||||
let oper = Operation::hook(token);
|
||||
let mut packet = Packet::<T>::empty_on_stack();
|
||||
inner.receivers.register_with_packet(oper, (&raw mut packet) as *mut (), cx);
|
||||
inner.senders.notify();
|
||||
drop(inner);
|
||||
|
||||
// Block the current thread.
|
||||
// SAFETY: the context belongs to the current thread.
|
||||
let sel = unsafe { cx.wait_until(deadline) };
|
||||
|
||||
match sel {
|
||||
Selected::Waiting => unreachable!(),
|
||||
Selected::Aborted => {
|
||||
self.inner.lock().unwrap().receivers.unregister(oper).unwrap();
|
||||
Err(RecvTimeoutError::Timeout)
|
||||
}
|
||||
Selected::Disconnected => {
|
||||
self.inner.lock().unwrap().receivers.unregister(oper).unwrap();
|
||||
Err(RecvTimeoutError::Disconnected)
|
||||
}
|
||||
Selected::Operation(_) => {
|
||||
// Wait until the message is provided, then read it.
|
||||
packet.wait_ready();
|
||||
unsafe { Ok(packet.msg.get().replace(None).unwrap()) }
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
/// Disconnects the channel and wakes up all blocked senders and receivers.
|
||||
///
|
||||
/// Returns `true` if this call disconnected the channel.
|
||||
pub(crate) fn disconnect(&self) -> bool {
|
||||
let mut inner = self.inner.lock().unwrap();
|
||||
|
||||
if !inner.is_disconnected {
|
||||
inner.is_disconnected = true;
|
||||
inner.senders.disconnect();
|
||||
inner.receivers.disconnect();
|
||||
true
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the current number of messages inside the channel.
|
||||
pub(crate) fn len(&self) -> usize {
|
||||
0
|
||||
}
|
||||
|
||||
/// Returns the capacity of the channel.
|
||||
#[allow(clippy::unnecessary_wraps)] // This is intentional.
|
||||
pub(crate) fn capacity(&self) -> Option<usize> {
|
||||
Some(0)
|
||||
}
|
||||
|
||||
/// Returns `true` if the channel is empty.
|
||||
pub(crate) fn is_empty(&self) -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
/// Returns `true` if the channel is full.
|
||||
pub(crate) fn is_full(&self) -> bool {
|
||||
true
|
||||
}
|
||||
}
|
||||
1214
crates/std/src/sync/mpsc.rs
Normal file
1214
crates/std/src/sync/mpsc.rs
Normal file
File diff suppressed because it is too large
Load Diff
45
crates/std/src/sync/nonpoison.rs
Normal file
45
crates/std/src/sync/nonpoison.rs
Normal file
@@ -0,0 +1,45 @@
|
||||
//! Non-poisoning synchronous locks.
|
||||
//!
|
||||
//! The difference from the locks in the [`poison`] module is that the locks in this module will not
|
||||
//! become poisoned when a thread panics while holding a guard.
|
||||
//!
|
||||
//! [`poison`]: super::poison
|
||||
|
||||
use crate::fmt;
|
||||
|
||||
/// A type alias for the result of a nonblocking locking method.
|
||||
#[unstable(feature = "sync_nonpoison", issue = "134645")]
|
||||
pub type TryLockResult<Guard> = Result<Guard, WouldBlock>;
|
||||
|
||||
/// A lock could not be acquired at this time because the operation would otherwise block.
|
||||
#[unstable(feature = "sync_nonpoison", issue = "134645")]
|
||||
pub struct WouldBlock;
|
||||
|
||||
#[unstable(feature = "sync_nonpoison", issue = "134645")]
|
||||
impl fmt::Debug for WouldBlock {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
"WouldBlock".fmt(f)
|
||||
}
|
||||
}
|
||||
|
||||
#[unstable(feature = "sync_nonpoison", issue = "134645")]
|
||||
impl fmt::Display for WouldBlock {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
"try_lock failed because the operation would block".fmt(f)
|
||||
}
|
||||
}
|
||||
|
||||
#[unstable(feature = "nonpoison_condvar", issue = "134645")]
|
||||
pub use self::condvar::Condvar;
|
||||
#[unstable(feature = "mapped_lock_guards", issue = "117108")]
|
||||
pub use self::mutex::MappedMutexGuard;
|
||||
#[unstable(feature = "nonpoison_mutex", issue = "134645")]
|
||||
pub use self::mutex::{Mutex, MutexGuard};
|
||||
#[unstable(feature = "mapped_lock_guards", issue = "117108")]
|
||||
pub use self::rwlock::{MappedRwLockReadGuard, MappedRwLockWriteGuard};
|
||||
#[unstable(feature = "nonpoison_rwlock", issue = "134645")]
|
||||
pub use self::rwlock::{RwLock, RwLockReadGuard, RwLockWriteGuard};
|
||||
|
||||
mod condvar;
|
||||
mod mutex;
|
||||
mod rwlock;
|
||||
444
crates/std/src/sync/nonpoison/condvar.rs
Normal file
444
crates/std/src/sync/nonpoison/condvar.rs
Normal file
@@ -0,0 +1,444 @@
|
||||
use crate::fmt;
|
||||
use crate::ops::DerefMut;
|
||||
use crate::sync::WaitTimeoutResult;
|
||||
use crate::sync::nonpoison::{MutexGuard, mutex};
|
||||
use crate::sys::sync as sys;
|
||||
use crate::time::{Duration, Instant};
|
||||
|
||||
/// A Condition Variable
|
||||
///
|
||||
/// For more information about condition variables, check out the documentation for the poisoning
|
||||
/// variant of this type at [`poison::Condvar`].
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// Note that this `Condvar` does **not** propagate information about threads that panic while
|
||||
/// holding a lock. If you need this functionality, see [`poison::Mutex`] and [`poison::Condvar`].
|
||||
///
|
||||
/// ```
|
||||
/// #![feature(nonpoison_mutex)]
|
||||
/// #![feature(nonpoison_condvar)]
|
||||
///
|
||||
/// use std::sync::nonpoison::{Mutex, Condvar};
|
||||
/// use std::sync::Arc;
|
||||
/// use std::thread;
|
||||
///
|
||||
/// let pair = Arc::new((Mutex::new(false), Condvar::new()));
|
||||
/// let pair2 = Arc::clone(&pair);
|
||||
///
|
||||
/// // Inside of our lock, spawn a new thread, and then wait for it to start.
|
||||
/// thread::spawn(move || {
|
||||
/// let (lock, cvar) = &*pair2;
|
||||
/// let mut started = lock.lock();
|
||||
/// *started = true;
|
||||
/// // We notify the condvar that the value has changed.
|
||||
/// cvar.notify_one();
|
||||
/// });
|
||||
///
|
||||
/// // Wait for the thread to start up.
|
||||
/// let (lock, cvar) = &*pair;
|
||||
/// let mut started = lock.lock();
|
||||
/// while !*started {
|
||||
/// cvar.wait(&mut started);
|
||||
/// }
|
||||
/// ```
|
||||
///
|
||||
/// [`poison::Mutex`]: crate::sync::poison::Mutex
|
||||
/// [`poison::Condvar`]: crate::sync::poison::Condvar
|
||||
#[unstable(feature = "nonpoison_condvar", issue = "134645")]
|
||||
pub struct Condvar {
|
||||
inner: sys::Condvar,
|
||||
}
|
||||
|
||||
impl Condvar {
|
||||
/// Creates a new condition variable which is ready to be waited on and
|
||||
/// notified.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use std::sync::Condvar;
|
||||
///
|
||||
/// let condvar = Condvar::new();
|
||||
/// ```
|
||||
#[unstable(feature = "nonpoison_condvar", issue = "134645")]
|
||||
#[must_use]
|
||||
#[inline]
|
||||
pub const fn new() -> Condvar {
|
||||
Condvar { inner: sys::Condvar::new() }
|
||||
}
|
||||
|
||||
/// Blocks the current thread until this condition variable receives a
|
||||
/// notification.
|
||||
///
|
||||
/// This function will atomically unlock the mutex specified (represented by
|
||||
/// `guard`) and block the current thread. This means that any calls
|
||||
/// to [`notify_one`] or [`notify_all`] which happen logically after the
|
||||
/// mutex is unlocked are candidates to wake this thread up. When this
|
||||
/// function call returns, the lock specified will have been re-acquired.
|
||||
///
|
||||
/// Note that this function is susceptible to spurious wakeups. Condition
|
||||
/// variables normally have a boolean predicate associated with them, and
|
||||
/// the predicate must always be checked each time this function returns to
|
||||
/// protect against spurious wakeups.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This function may [`panic!`] if it is used with more than one mutex
|
||||
/// over time.
|
||||
///
|
||||
/// [`notify_one`]: Self::notify_one
|
||||
/// [`notify_all`]: Self::notify_all
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// #![feature(nonpoison_mutex)]
|
||||
/// #![feature(nonpoison_condvar)]
|
||||
///
|
||||
/// use std::sync::nonpoison::{Mutex, Condvar};
|
||||
/// use std::sync::Arc;
|
||||
/// use std::thread;
|
||||
///
|
||||
/// let pair = Arc::new((Mutex::new(false), Condvar::new()));
|
||||
/// let pair2 = Arc::clone(&pair);
|
||||
///
|
||||
/// thread::spawn(move || {
|
||||
/// let (lock, cvar) = &*pair2;
|
||||
/// let mut started = lock.lock();
|
||||
/// *started = true;
|
||||
/// // We notify the condvar that the value has changed.
|
||||
/// cvar.notify_one();
|
||||
/// });
|
||||
///
|
||||
/// // Wait for the thread to start up.
|
||||
/// let (lock, cvar) = &*pair;
|
||||
/// let mut started = lock.lock();
|
||||
/// // As long as the value inside the `Mutex<bool>` is `false`, we wait.
|
||||
/// while !*started {
|
||||
/// cvar.wait(&mut started);
|
||||
/// }
|
||||
/// ```
|
||||
#[unstable(feature = "nonpoison_condvar", issue = "134645")]
|
||||
pub fn wait<T>(&self, guard: &mut MutexGuard<'_, T>) {
|
||||
unsafe {
|
||||
let lock = mutex::guard_lock(guard);
|
||||
self.inner.wait(lock);
|
||||
}
|
||||
}
|
||||
|
||||
/// Blocks the current thread until the provided condition becomes false.
|
||||
///
|
||||
/// `condition` is checked immediately; if not met (returns `true`), this
|
||||
/// will [`wait`] for the next notification then check again. This repeats
|
||||
/// until `condition` returns `false`, in which case this function returns.
|
||||
///
|
||||
/// This function will atomically unlock the mutex specified (represented by
|
||||
/// `guard`) and block the current thread. This means that any calls
|
||||
/// to [`notify_one`] or [`notify_all`] which happen logically after the
|
||||
/// mutex is unlocked are candidates to wake this thread up. When this
|
||||
/// function call returns, the lock specified will have been re-acquired.
|
||||
///
|
||||
/// [`wait`]: Self::wait
|
||||
/// [`notify_one`]: Self::notify_one
|
||||
/// [`notify_all`]: Self::notify_all
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// #![feature(nonpoison_mutex)]
|
||||
/// #![feature(nonpoison_condvar)]
|
||||
///
|
||||
/// use std::sync::nonpoison::{Mutex, Condvar};
|
||||
/// use std::sync::Arc;
|
||||
/// use std::thread;
|
||||
///
|
||||
/// let pair = Arc::new((Mutex::new(true), Condvar::new()));
|
||||
/// let pair2 = Arc::clone(&pair);
|
||||
///
|
||||
/// thread::spawn(move || {
|
||||
/// let (lock, cvar) = &*pair2;
|
||||
/// let mut pending = lock.lock();
|
||||
/// *pending = false;
|
||||
/// // We notify the condvar that the value has changed.
|
||||
/// cvar.notify_one();
|
||||
/// });
|
||||
///
|
||||
/// // Wait for the thread to start up.
|
||||
/// let (lock, cvar) = &*pair;
|
||||
/// // As long as the value inside the `Mutex<bool>` is `true`, we wait.
|
||||
/// let mut guard = lock.lock();
|
||||
/// cvar.wait_while(&mut guard, |pending| { *pending });
|
||||
/// ```
|
||||
#[unstable(feature = "nonpoison_condvar", issue = "134645")]
|
||||
pub fn wait_while<T, F>(&self, guard: &mut MutexGuard<'_, T>, mut condition: F)
|
||||
where
|
||||
F: FnMut(&mut T) -> bool,
|
||||
{
|
||||
while condition(guard.deref_mut()) {
|
||||
self.wait(guard);
|
||||
}
|
||||
}
|
||||
|
||||
/// Waits on this condition variable for a notification, timing out after a
|
||||
/// specified duration.
|
||||
///
|
||||
/// The semantics of this function are equivalent to [`wait`] except that
|
||||
/// the thread will be blocked for roughly no longer than `dur`. This
|
||||
/// method should not be used for precise timing due to anomalies such as
|
||||
/// preemption or platform differences that might not cause the maximum
|
||||
/// amount of time waited to be precisely `dur`.
|
||||
///
|
||||
/// Note that the best effort is made to ensure that the time waited is
|
||||
/// measured with a monotonic clock, and not affected by the changes made to
|
||||
/// the system time. This function is susceptible to spurious wakeups.
|
||||
/// Condition variables normally have a boolean predicate associated with
|
||||
/// them, and the predicate must always be checked each time this function
|
||||
/// returns to protect against spurious wakeups. Furthermore, since the timeout
|
||||
/// is given relative to the moment this function is called, it needs to be adjusted
|
||||
/// when this function is called in a loop. The [`wait_timeout_while`] method
|
||||
/// lets you wait with a timeout while a predicate is true, taking care of all these concerns.
|
||||
///
|
||||
/// The returned [`WaitTimeoutResult`] value indicates if the timeout is
|
||||
/// known to have elapsed.
|
||||
///
|
||||
/// Like [`wait`], the lock specified will have been re-acquired when this function
|
||||
/// returns, regardless of whether the timeout elapsed or not.
|
||||
///
|
||||
/// [`wait`]: Self::wait
|
||||
/// [`wait_timeout_while`]: Self::wait_timeout_while
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// #![feature(nonpoison_mutex)]
|
||||
/// #![feature(nonpoison_condvar)]
|
||||
///
|
||||
/// use std::sync::nonpoison::{Mutex, Condvar};
|
||||
/// use std::sync::Arc;
|
||||
/// use std::thread;
|
||||
/// use std::time::Duration;
|
||||
///
|
||||
/// let pair = Arc::new((Mutex::new(false), Condvar::new()));
|
||||
/// let pair2 = Arc::clone(&pair);
|
||||
///
|
||||
/// thread::spawn(move || {
|
||||
/// let (lock, cvar) = &*pair2;
|
||||
/// let mut started = lock.lock();
|
||||
/// *started = true;
|
||||
/// // We notify the condvar that the value has changed.
|
||||
/// cvar.notify_one();
|
||||
/// });
|
||||
///
|
||||
/// // wait for the thread to start up
|
||||
/// let (lock, cvar) = &*pair;
|
||||
/// let mut started = lock.lock();
|
||||
/// // as long as the value inside the `Mutex<bool>` is `false`, we wait
|
||||
/// loop {
|
||||
/// let result = cvar.wait_timeout(&mut started, Duration::from_millis(10));
|
||||
/// // 10 milliseconds have passed, or maybe the value changed!
|
||||
/// if *started == true {
|
||||
/// // We received the notification and the value has been updated, we can leave.
|
||||
/// break
|
||||
/// }
|
||||
/// }
|
||||
/// ```
|
||||
#[unstable(feature = "nonpoison_condvar", issue = "134645")]
|
||||
pub fn wait_timeout<T>(
|
||||
&self,
|
||||
guard: &mut MutexGuard<'_, T>,
|
||||
dur: Duration,
|
||||
) -> WaitTimeoutResult {
|
||||
let success = unsafe {
|
||||
let lock = mutex::guard_lock(guard);
|
||||
self.inner.wait_timeout(lock, dur)
|
||||
};
|
||||
WaitTimeoutResult(!success)
|
||||
}
|
||||
|
||||
/// Waits on this condition variable for a notification, timing out after a
|
||||
/// specified duration.
|
||||
///
|
||||
/// The semantics of this function are equivalent to [`wait_while`] except
|
||||
/// that the thread will be blocked for roughly no longer than `dur`. This
|
||||
/// method should not be used for precise timing due to anomalies such as
|
||||
/// preemption or platform differences that might not cause the maximum
|
||||
/// amount of time waited to be precisely `dur`.
|
||||
///
|
||||
/// Note that the best effort is made to ensure that the time waited is
|
||||
/// measured with a monotonic clock, and not affected by the changes made to
|
||||
/// the system time.
|
||||
///
|
||||
/// The returned [`WaitTimeoutResult`] value indicates if the timeout is
|
||||
/// known to have elapsed without the condition being met.
|
||||
///
|
||||
/// Like [`wait_while`], the lock specified will have been re-acquired when this
|
||||
/// function returns, regardless of whether the timeout elapsed or not.
|
||||
///
|
||||
/// [`wait_while`]: Self::wait_while
|
||||
/// [`wait_timeout`]: Self::wait_timeout
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// #![feature(nonpoison_mutex)]
|
||||
/// #![feature(nonpoison_condvar)]
|
||||
///
|
||||
/// use std::sync::nonpoison::{Mutex, Condvar};
|
||||
/// use std::sync::Arc;
|
||||
/// use std::thread;
|
||||
/// use std::time::Duration;
|
||||
///
|
||||
/// let pair = Arc::new((Mutex::new(true), Condvar::new()));
|
||||
/// let pair2 = Arc::clone(&pair);
|
||||
///
|
||||
/// thread::spawn(move || {
|
||||
/// let (lock, cvar) = &*pair2;
|
||||
/// let mut pending = lock.lock();
|
||||
/// *pending = false;
|
||||
/// // We notify the condvar that the value has changed.
|
||||
/// cvar.notify_one();
|
||||
/// });
|
||||
///
|
||||
/// // wait for the thread to start up
|
||||
/// let (lock, cvar) = &*pair;
|
||||
/// let mut guard = lock.lock();
|
||||
/// let result = cvar.wait_timeout_while(
|
||||
/// &mut guard,
|
||||
/// Duration::from_millis(100),
|
||||
/// |&mut pending| pending,
|
||||
/// );
|
||||
/// if result.timed_out() {
|
||||
/// // timed-out without the condition ever evaluating to false.
|
||||
/// }
|
||||
/// // access the locked mutex via guard
|
||||
/// ```
|
||||
#[unstable(feature = "nonpoison_condvar", issue = "134645")]
|
||||
pub fn wait_timeout_while<T, F>(
|
||||
&self,
|
||||
guard: &mut MutexGuard<'_, T>,
|
||||
dur: Duration,
|
||||
mut condition: F,
|
||||
) -> WaitTimeoutResult
|
||||
where
|
||||
F: FnMut(&mut T) -> bool,
|
||||
{
|
||||
let start = Instant::now();
|
||||
|
||||
while condition(guard.deref_mut()) {
|
||||
let timeout = match dur.checked_sub(start.elapsed()) {
|
||||
Some(timeout) => timeout,
|
||||
None => return WaitTimeoutResult(true),
|
||||
};
|
||||
|
||||
self.wait_timeout(guard, timeout);
|
||||
}
|
||||
|
||||
WaitTimeoutResult(false)
|
||||
}
|
||||
|
||||
/// Wakes up one blocked thread on this condvar.
|
||||
///
|
||||
/// If there is a blocked thread on this condition variable, then it will
|
||||
/// be woken up from its call to [`wait`] or [`wait_timeout`]. Calls to
|
||||
/// `notify_one` are not buffered in any way.
|
||||
///
|
||||
/// To wake up all threads, see [`notify_all`].
|
||||
///
|
||||
/// [`wait`]: Self::wait
|
||||
/// [`wait_timeout`]: Self::wait_timeout
|
||||
/// [`notify_all`]: Self::notify_all
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// #![feature(nonpoison_mutex)]
|
||||
/// #![feature(nonpoison_condvar)]
|
||||
///
|
||||
/// use std::sync::nonpoison::{Mutex, Condvar};
|
||||
/// use std::sync::Arc;
|
||||
/// use std::thread;
|
||||
///
|
||||
/// let pair = Arc::new((Mutex::new(false), Condvar::new()));
|
||||
/// let pair2 = Arc::clone(&pair);
|
||||
///
|
||||
/// thread::spawn(move || {
|
||||
/// let (lock, cvar) = &*pair2;
|
||||
/// let mut started = lock.lock();
|
||||
/// *started = true;
|
||||
/// // We notify the condvar that the value has changed.
|
||||
/// cvar.notify_one();
|
||||
/// });
|
||||
///
|
||||
/// // Wait for the thread to start up.
|
||||
/// let (lock, cvar) = &*pair;
|
||||
/// let mut started = lock.lock();
|
||||
/// // As long as the value inside the `Mutex<bool>` is `false`, we wait.
|
||||
/// while !*started {
|
||||
/// cvar.wait(&mut started);
|
||||
/// }
|
||||
/// ```
|
||||
#[unstable(feature = "nonpoison_condvar", issue = "134645")]
|
||||
pub fn notify_one(&self) {
|
||||
self.inner.notify_one()
|
||||
}
|
||||
|
||||
/// Wakes up all blocked threads on this condvar.
|
||||
///
|
||||
/// This method will ensure that any current waiters on the condition
|
||||
/// variable are awoken. Calls to `notify_all()` are not buffered in any
|
||||
/// way.
|
||||
///
|
||||
/// To wake up only one thread, see [`notify_one`].
|
||||
///
|
||||
/// [`notify_one`]: Self::notify_one
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// #![feature(nonpoison_mutex)]
|
||||
/// #![feature(nonpoison_condvar)]
|
||||
///
|
||||
/// use std::sync::nonpoison::{Mutex, Condvar};
|
||||
/// use std::sync::Arc;
|
||||
/// use std::thread;
|
||||
///
|
||||
/// let pair = Arc::new((Mutex::new(false), Condvar::new()));
|
||||
/// let pair2 = Arc::clone(&pair);
|
||||
///
|
||||
/// thread::spawn(move || {
|
||||
/// let (lock, cvar) = &*pair2;
|
||||
/// let mut started = lock.lock();
|
||||
/// *started = true;
|
||||
/// // We notify the condvar that the value has changed.
|
||||
/// cvar.notify_all();
|
||||
/// });
|
||||
///
|
||||
/// // Wait for the thread to start up.
|
||||
/// let (lock, cvar) = &*pair;
|
||||
/// let mut started = lock.lock();
|
||||
/// // As long as the value inside the `Mutex<bool>` is `false`, we wait.
|
||||
/// while !*started {
|
||||
/// cvar.wait(&mut started);
|
||||
/// }
|
||||
/// ```
|
||||
#[unstable(feature = "nonpoison_condvar", issue = "134645")]
|
||||
pub fn notify_all(&self) {
|
||||
self.inner.notify_all()
|
||||
}
|
||||
}
|
||||
|
||||
#[unstable(feature = "nonpoison_condvar", issue = "134645")]
|
||||
impl fmt::Debug for Condvar {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.debug_struct("Condvar").finish_non_exhaustive()
|
||||
}
|
||||
}
|
||||
|
||||
#[unstable(feature = "nonpoison_condvar", issue = "134645")]
|
||||
impl Default for Condvar {
|
||||
/// Creates a `Condvar` which is ready to be waited on and notified.
|
||||
fn default() -> Condvar {
|
||||
Condvar::new()
|
||||
}
|
||||
}
|
||||
649
crates/std/src/sync/nonpoison/mutex.rs
Normal file
649
crates/std/src/sync/nonpoison/mutex.rs
Normal file
@@ -0,0 +1,649 @@
|
||||
use crate::cell::UnsafeCell;
|
||||
use crate::fmt;
|
||||
use crate::marker::PhantomData;
|
||||
use crate::mem::{self, ManuallyDrop};
|
||||
use crate::ops::{Deref, DerefMut};
|
||||
use crate::ptr::NonNull;
|
||||
use crate::sync::nonpoison::{TryLockResult, WouldBlock};
|
||||
use crate::sys::sync as sys;
|
||||
|
||||
/// A mutual exclusion primitive useful for protecting shared data that does not keep track of
|
||||
/// lock poisoning.
|
||||
///
|
||||
/// For more information about mutexes, check out the documentation for the poisoning variant of
|
||||
/// this lock at [`poison::Mutex`].
|
||||
///
|
||||
/// [`poison::Mutex`]: crate::sync::poison::Mutex
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// Note that this `Mutex` does **not** propagate threads that panic while holding the lock via
|
||||
/// poisoning. If you need this functionality, see [`poison::Mutex`].
|
||||
///
|
||||
/// ```
|
||||
/// #![feature(nonpoison_mutex)]
|
||||
///
|
||||
/// use std::thread;
|
||||
/// use std::sync::{Arc, nonpoison::Mutex};
|
||||
///
|
||||
/// let mutex = Arc::new(Mutex::new(0u32));
|
||||
/// let mut handles = Vec::new();
|
||||
///
|
||||
/// for n in 0..10 {
|
||||
/// let m = Arc::clone(&mutex);
|
||||
/// let handle = thread::spawn(move || {
|
||||
/// let mut guard = m.lock();
|
||||
/// *guard += 1;
|
||||
/// panic!("panic from thread {n} {guard}")
|
||||
/// });
|
||||
/// handles.push(handle);
|
||||
/// }
|
||||
///
|
||||
/// for h in handles {
|
||||
/// let _ = h.join();
|
||||
/// }
|
||||
///
|
||||
/// println!("Finished, locked {} times", mutex.lock());
|
||||
/// ```
|
||||
#[unstable(feature = "nonpoison_mutex", issue = "134645")]
|
||||
#[cfg_attr(not(test), rustc_diagnostic_item = "NonPoisonMutex")]
|
||||
pub struct Mutex<T: ?Sized> {
|
||||
inner: sys::Mutex,
|
||||
data: UnsafeCell<T>,
|
||||
}
|
||||
|
||||
/// `T` must be `Send` for a [`Mutex`] to be `Send` because it is possible to acquire
|
||||
/// the owned `T` from the `Mutex` via [`into_inner`].
|
||||
///
|
||||
/// [`into_inner`]: Mutex::into_inner
|
||||
#[unstable(feature = "nonpoison_mutex", issue = "134645")]
|
||||
unsafe impl<T: ?Sized + Send> Send for Mutex<T> {}
|
||||
|
||||
/// `T` must be `Send` for [`Mutex`] to be `Sync`.
|
||||
/// This ensures that the protected data can be accessed safely from multiple threads
|
||||
/// without causing data races or other unsafe behavior.
|
||||
///
|
||||
/// [`Mutex<T>`] provides mutable access to `T` to one thread at a time. However, it's essential
|
||||
/// for `T` to be `Send` because it's not safe for non-`Send` structures to be accessed in
|
||||
/// this manner. For instance, consider [`Rc`], a non-atomic reference counted smart pointer,
|
||||
/// which is not `Send`. With `Rc`, we can have multiple copies pointing to the same heap
|
||||
/// allocation with a non-atomic reference count. If we were to use `Mutex<Rc<_>>`, it would
|
||||
/// only protect one instance of `Rc` from shared access, leaving other copies vulnerable
|
||||
/// to potential data races.
|
||||
///
|
||||
/// Also note that it is not necessary for `T` to be `Sync` as `&T` is only made available
|
||||
/// to one thread at a time if `T` is not `Sync`.
|
||||
///
|
||||
/// [`Rc`]: crate::rc::Rc
|
||||
#[unstable(feature = "nonpoison_mutex", issue = "134645")]
|
||||
unsafe impl<T: ?Sized + Send> Sync for Mutex<T> {}
|
||||
|
||||
/// An RAII implementation of a "scoped lock" of a mutex. When this structure is
|
||||
/// dropped (falls out of scope), the lock will be unlocked.
|
||||
///
|
||||
/// The data protected by the mutex can be accessed through this guard via its
|
||||
/// [`Deref`] and [`DerefMut`] implementations.
|
||||
///
|
||||
/// This structure is created by the [`lock`] and [`try_lock`] methods on
|
||||
/// [`Mutex`].
|
||||
///
|
||||
/// [`lock`]: Mutex::lock
|
||||
/// [`try_lock`]: Mutex::try_lock
|
||||
#[must_use = "if unused the Mutex will immediately unlock"]
|
||||
#[must_not_suspend = "holding a MutexGuard across suspend \
|
||||
points can cause deadlocks, delays, \
|
||||
and cause Futures to not implement `Send`"]
|
||||
#[unstable(feature = "nonpoison_mutex", issue = "134645")]
|
||||
#[clippy::has_significant_drop]
|
||||
#[cfg_attr(not(test), rustc_diagnostic_item = "NonPoisonMutexGuard")]
|
||||
pub struct MutexGuard<'a, T: ?Sized + 'a> {
|
||||
lock: &'a Mutex<T>,
|
||||
}
|
||||
|
||||
/// A [`MutexGuard`] is not `Send` to maximize platform portability.
|
||||
///
|
||||
/// On platforms that use POSIX threads (commonly referred to as pthreads) there is a requirement to
|
||||
/// release mutex locks on the same thread they were acquired.
|
||||
/// For this reason, [`MutexGuard`] must not implement `Send` to prevent it being dropped from
|
||||
/// another thread.
|
||||
#[unstable(feature = "nonpoison_mutex", issue = "134645")]
|
||||
impl<T: ?Sized> !Send for MutexGuard<'_, T> {}
|
||||
|
||||
/// `T` must be `Sync` for a [`MutexGuard<T>`] to be `Sync`
|
||||
/// because it is possible to get a `&T` from `&MutexGuard` (via `Deref`).
|
||||
#[unstable(feature = "nonpoison_mutex", issue = "134645")]
|
||||
unsafe impl<T: ?Sized + Sync> Sync for MutexGuard<'_, T> {}
|
||||
|
||||
/// An RAII mutex guard returned by `MutexGuard::map`, which can point to a
|
||||
/// subfield of the protected data. When this structure is dropped (falls out
|
||||
/// of scope), the lock will be unlocked.
|
||||
///
|
||||
/// The main difference between `MappedMutexGuard` and [`MutexGuard`] is that the
|
||||
/// former cannot be used with [`Condvar`], since that could introduce soundness issues if the
|
||||
/// locked object is modified by another thread while the `Mutex` is unlocked.
|
||||
///
|
||||
/// The data protected by the mutex can be accessed through this guard via its
|
||||
/// [`Deref`] and [`DerefMut`] implementations.
|
||||
///
|
||||
/// This structure is created by the [`map`] and [`filter_map`] methods on
|
||||
/// [`MutexGuard`].
|
||||
///
|
||||
/// [`map`]: MutexGuard::map
|
||||
/// [`filter_map`]: MutexGuard::filter_map
|
||||
/// [`Condvar`]: crate::sync::nonpoison::Condvar
|
||||
#[must_use = "if unused the Mutex will immediately unlock"]
|
||||
#[must_not_suspend = "holding a MappedMutexGuard across suspend \
|
||||
points can cause deadlocks, delays, \
|
||||
and cause Futures to not implement `Send`"]
|
||||
#[unstable(feature = "mapped_lock_guards", issue = "117108")]
|
||||
// #[unstable(feature = "nonpoison_mutex", issue = "134645")]
|
||||
#[clippy::has_significant_drop]
|
||||
pub struct MappedMutexGuard<'a, T: ?Sized + 'a> {
|
||||
// NB: we use a pointer instead of `&'a mut T` to avoid `noalias` violations, because a
|
||||
// `MappedMutexGuard` argument doesn't hold uniqueness for its whole scope, only until it drops.
|
||||
// `NonNull` is covariant over `T`, so we add a `PhantomData<&'a mut T>` field
|
||||
// below for the correct variance over `T` (invariance).
|
||||
data: NonNull<T>,
|
||||
inner: &'a sys::Mutex,
|
||||
_variance: PhantomData<&'a mut T>,
|
||||
}
|
||||
|
||||
#[unstable(feature = "mapped_lock_guards", issue = "117108")]
|
||||
// #[unstable(feature = "nonpoison_mutex", issue = "134645")]
|
||||
impl<T: ?Sized> !Send for MappedMutexGuard<'_, T> {}
|
||||
#[unstable(feature = "mapped_lock_guards", issue = "117108")]
|
||||
// #[unstable(feature = "nonpoison_mutex", issue = "134645")]
|
||||
unsafe impl<T: ?Sized + Sync> Sync for MappedMutexGuard<'_, T> {}
|
||||
|
||||
impl<T> Mutex<T> {
|
||||
/// Creates a new mutex in an unlocked state ready for use.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// #![feature(nonpoison_mutex)]
|
||||
///
|
||||
/// use std::sync::nonpoison::Mutex;
|
||||
///
|
||||
/// let mutex = Mutex::new(0);
|
||||
/// ```
|
||||
#[unstable(feature = "nonpoison_mutex", issue = "134645")]
|
||||
#[inline]
|
||||
pub const fn new(t: T) -> Mutex<T> {
|
||||
Mutex { inner: sys::Mutex::new(), data: UnsafeCell::new(t) }
|
||||
}
|
||||
|
||||
/// Returns the contained value by cloning it.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// #![feature(nonpoison_mutex)]
|
||||
/// #![feature(lock_value_accessors)]
|
||||
///
|
||||
/// use std::sync::nonpoison::Mutex;
|
||||
///
|
||||
/// let mut mutex = Mutex::new(7);
|
||||
///
|
||||
/// assert_eq!(mutex.get_cloned(), 7);
|
||||
/// ```
|
||||
#[unstable(feature = "lock_value_accessors", issue = "133407")]
|
||||
// #[unstable(feature = "nonpoison_mutex", issue = "134645")]
|
||||
pub fn get_cloned(&self) -> T
|
||||
where
|
||||
T: Clone,
|
||||
{
|
||||
self.lock().clone()
|
||||
}
|
||||
|
||||
/// Sets the contained value.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// #![feature(nonpoison_mutex)]
|
||||
/// #![feature(lock_value_accessors)]
|
||||
///
|
||||
/// use std::sync::nonpoison::Mutex;
|
||||
///
|
||||
/// let mut mutex = Mutex::new(7);
|
||||
///
|
||||
/// assert_eq!(mutex.get_cloned(), 7);
|
||||
/// mutex.set(11);
|
||||
/// assert_eq!(mutex.get_cloned(), 11);
|
||||
/// ```
|
||||
#[unstable(feature = "lock_value_accessors", issue = "133407")]
|
||||
// #[unstable(feature = "nonpoison_mutex", issue = "134645")]
|
||||
pub fn set(&self, value: T) {
|
||||
if mem::needs_drop::<T>() {
|
||||
// If the contained value has a non-trivial destructor, we
|
||||
// call that destructor after the lock has been released.
|
||||
drop(self.replace(value))
|
||||
} else {
|
||||
*self.lock() = value;
|
||||
}
|
||||
}
|
||||
|
||||
/// Replaces the contained value with `value`, and returns the old contained value.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// #![feature(nonpoison_mutex)]
|
||||
/// #![feature(lock_value_accessors)]
|
||||
///
|
||||
/// use std::sync::nonpoison::Mutex;
|
||||
///
|
||||
/// let mut mutex = Mutex::new(7);
|
||||
///
|
||||
/// assert_eq!(mutex.replace(11), 7);
|
||||
/// assert_eq!(mutex.get_cloned(), 11);
|
||||
/// ```
|
||||
#[unstable(feature = "lock_value_accessors", issue = "133407")]
|
||||
// #[unstable(feature = "nonpoison_mutex", issue = "134645")]
|
||||
pub fn replace(&self, value: T) -> T {
|
||||
let mut guard = self.lock();
|
||||
mem::replace(&mut *guard, value)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: ?Sized> Mutex<T> {
|
||||
/// Acquires a mutex, blocking the current thread until it is able to do so.
|
||||
///
|
||||
/// This function will block the local thread until it is available to acquire
|
||||
/// the mutex. Upon returning, the thread is the only thread with the lock
|
||||
/// held. An RAII guard is returned to allow scoped unlock of the lock. When
|
||||
/// the guard goes out of scope, the mutex will be unlocked.
|
||||
///
|
||||
/// The exact behavior on locking a mutex in the thread which already holds
|
||||
/// the lock is left unspecified. However, this function will not return on
|
||||
/// the second call (it might panic or deadlock, for example).
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This function might panic when called if the lock is already held by
|
||||
/// the current thread.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// #![feature(nonpoison_mutex)]
|
||||
///
|
||||
/// use std::sync::{Arc, nonpoison::Mutex};
|
||||
/// use std::thread;
|
||||
///
|
||||
/// let mutex = Arc::new(Mutex::new(0));
|
||||
/// let c_mutex = Arc::clone(&mutex);
|
||||
///
|
||||
/// thread::spawn(move || {
|
||||
/// *c_mutex.lock() = 10;
|
||||
/// }).join().expect("thread::spawn failed");
|
||||
/// assert_eq!(*mutex.lock(), 10);
|
||||
/// ```
|
||||
#[unstable(feature = "nonpoison_mutex", issue = "134645")]
|
||||
pub fn lock(&self) -> MutexGuard<'_, T> {
|
||||
unsafe {
|
||||
self.inner.lock();
|
||||
MutexGuard::new(self)
|
||||
}
|
||||
}
|
||||
|
||||
/// Attempts to acquire this lock.
|
||||
///
|
||||
/// This function does not block. If the lock could not be acquired at this time, then
|
||||
/// [`WouldBlock`] is returned. Otherwise, an RAII guard is returned.
|
||||
///
|
||||
/// The lock will be unlocked when the guard is dropped.
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// If the mutex could not be acquired because it is already locked, then this call will return
|
||||
/// the [`WouldBlock`] error.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use std::sync::{Arc, Mutex};
|
||||
/// use std::thread;
|
||||
///
|
||||
/// let mutex = Arc::new(Mutex::new(0));
|
||||
/// let c_mutex = Arc::clone(&mutex);
|
||||
///
|
||||
/// thread::spawn(move || {
|
||||
/// let mut lock = c_mutex.try_lock();
|
||||
/// if let Ok(ref mut mutex) = lock {
|
||||
/// **mutex = 10;
|
||||
/// } else {
|
||||
/// println!("try_lock failed");
|
||||
/// }
|
||||
/// }).join().expect("thread::spawn failed");
|
||||
/// assert_eq!(*mutex.lock().unwrap(), 10);
|
||||
/// ```
|
||||
#[unstable(feature = "nonpoison_mutex", issue = "134645")]
|
||||
pub fn try_lock(&self) -> TryLockResult<MutexGuard<'_, T>> {
|
||||
unsafe { if self.inner.try_lock() { Ok(MutexGuard::new(self)) } else { Err(WouldBlock) } }
|
||||
}
|
||||
|
||||
/// Consumes this mutex, returning the underlying data.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// #![feature(nonpoison_mutex)]
|
||||
///
|
||||
/// use std::sync::nonpoison::Mutex;
|
||||
///
|
||||
/// let mutex = Mutex::new(0);
|
||||
/// assert_eq!(mutex.into_inner(), 0);
|
||||
/// ```
|
||||
#[unstable(feature = "nonpoison_mutex", issue = "134645")]
|
||||
pub fn into_inner(self) -> T
|
||||
where
|
||||
T: Sized,
|
||||
{
|
||||
self.data.into_inner()
|
||||
}
|
||||
|
||||
/// Returns a mutable reference to the underlying data.
|
||||
///
|
||||
/// Since this call borrows the `Mutex` mutably, no actual locking needs to
|
||||
/// take place -- the mutable borrow statically guarantees no locks exist.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// #![feature(nonpoison_mutex)]
|
||||
///
|
||||
/// use std::sync::nonpoison::Mutex;
|
||||
///
|
||||
/// let mut mutex = Mutex::new(0);
|
||||
/// *mutex.get_mut() = 10;
|
||||
/// assert_eq!(*mutex.lock(), 10);
|
||||
/// ```
|
||||
#[unstable(feature = "nonpoison_mutex", issue = "134645")]
|
||||
pub fn get_mut(&mut self) -> &mut T {
|
||||
self.data.get_mut()
|
||||
}
|
||||
|
||||
/// Returns a raw pointer to the underlying data.
|
||||
///
|
||||
/// The returned pointer is always non-null and properly aligned, but it is
|
||||
/// the user's responsibility to ensure that any reads and writes through it
|
||||
/// are properly synchronized to avoid data races, and that it is not read
|
||||
/// or written through after the mutex is dropped.
|
||||
#[unstable(feature = "mutex_data_ptr", issue = "140368")]
|
||||
// #[unstable(feature = "nonpoison_mutex", issue = "134645")]
|
||||
pub const fn data_ptr(&self) -> *mut T {
|
||||
self.data.get()
|
||||
}
|
||||
|
||||
/// Acquires the mutex and provides mutable access to the underlying data by passing
|
||||
/// a mutable reference to the given closure.
|
||||
///
|
||||
/// This method acquires the lock, calls the provided closure with a mutable reference
|
||||
/// to the data, and returns the result of the closure. The lock is released after
|
||||
/// the closure completes, even if it panics.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// #![feature(lock_value_accessors, nonpoison_mutex)]
|
||||
///
|
||||
/// use std::sync::nonpoison::Mutex;
|
||||
///
|
||||
/// let mutex = Mutex::new(2);
|
||||
///
|
||||
/// let result = mutex.with_mut(|data| {
|
||||
/// *data += 3;
|
||||
///
|
||||
/// *data + 5
|
||||
/// });
|
||||
///
|
||||
/// assert_eq!(*mutex.lock(), 5);
|
||||
/// assert_eq!(result, 10);
|
||||
/// ```
|
||||
#[unstable(feature = "lock_value_accessors", issue = "133407")]
|
||||
// #[unstable(feature = "nonpoison_mutex", issue = "134645")]
|
||||
pub fn with_mut<F, R>(&self, f: F) -> R
|
||||
where
|
||||
F: FnOnce(&mut T) -> R,
|
||||
{
|
||||
f(&mut self.lock())
|
||||
}
|
||||
}
|
||||
|
||||
#[unstable(feature = "nonpoison_mutex", issue = "134645")]
|
||||
impl<T> From<T> for Mutex<T> {
|
||||
/// Creates a new mutex in an unlocked state ready for use.
|
||||
/// This is equivalent to [`Mutex::new`].
|
||||
fn from(t: T) -> Self {
|
||||
Mutex::new(t)
|
||||
}
|
||||
}
|
||||
|
||||
#[unstable(feature = "nonpoison_mutex", issue = "134645")]
|
||||
impl<T: Default> Default for Mutex<T> {
|
||||
/// Creates a `Mutex<T>`, with the `Default` value for T.
|
||||
fn default() -> Mutex<T> {
|
||||
Mutex::new(Default::default())
|
||||
}
|
||||
}
|
||||
|
||||
#[unstable(feature = "nonpoison_mutex", issue = "134645")]
|
||||
impl<T: ?Sized + fmt::Debug> fmt::Debug for Mutex<T> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
let mut d = f.debug_struct("Mutex");
|
||||
match self.try_lock() {
|
||||
Ok(guard) => {
|
||||
d.field("data", &&*guard);
|
||||
}
|
||||
Err(WouldBlock) => {
|
||||
d.field("data", &"<locked>");
|
||||
}
|
||||
}
|
||||
d.finish_non_exhaustive()
|
||||
}
|
||||
}
|
||||
|
||||
impl<'mutex, T: ?Sized> MutexGuard<'mutex, T> {
|
||||
unsafe fn new(lock: &'mutex Mutex<T>) -> MutexGuard<'mutex, T> {
|
||||
return MutexGuard { lock };
|
||||
}
|
||||
}
|
||||
|
||||
#[unstable(feature = "nonpoison_mutex", issue = "134645")]
|
||||
impl<T: ?Sized> Deref for MutexGuard<'_, T> {
|
||||
type Target = T;
|
||||
|
||||
fn deref(&self) -> &T {
|
||||
unsafe { &*self.lock.data.get() }
|
||||
}
|
||||
}
|
||||
|
||||
#[unstable(feature = "nonpoison_mutex", issue = "134645")]
|
||||
impl<T: ?Sized> DerefMut for MutexGuard<'_, T> {
|
||||
fn deref_mut(&mut self) -> &mut T {
|
||||
unsafe { &mut *self.lock.data.get() }
|
||||
}
|
||||
}
|
||||
|
||||
#[unstable(feature = "nonpoison_mutex", issue = "134645")]
|
||||
impl<T: ?Sized> Drop for MutexGuard<'_, T> {
|
||||
#[inline]
|
||||
fn drop(&mut self) {
|
||||
unsafe {
|
||||
self.lock.inner.unlock();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[unstable(feature = "nonpoison_mutex", issue = "134645")]
|
||||
impl<T: ?Sized + fmt::Debug> fmt::Debug for MutexGuard<'_, T> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
fmt::Debug::fmt(&**self, f)
|
||||
}
|
||||
}
|
||||
|
||||
#[unstable(feature = "nonpoison_mutex", issue = "134645")]
|
||||
impl<T: ?Sized + fmt::Display> fmt::Display for MutexGuard<'_, T> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
(**self).fmt(f)
|
||||
}
|
||||
}
|
||||
|
||||
/// For use in [`nonpoison::condvar`](super::condvar).
|
||||
pub(super) fn guard_lock<'a, T: ?Sized>(guard: &MutexGuard<'a, T>) -> &'a sys::Mutex {
|
||||
&guard.lock.inner
|
||||
}
|
||||
|
||||
impl<'a, T: ?Sized> MutexGuard<'a, T> {
|
||||
/// Makes a [`MappedMutexGuard`] for a component of the borrowed data, e.g.
|
||||
/// an enum variant.
|
||||
///
|
||||
/// The `Mutex` is already locked, so this cannot fail.
|
||||
///
|
||||
/// This is an associated function that needs to be used as
|
||||
/// `MutexGuard::map(...)`. A method would interfere with methods of the
|
||||
/// same name on the contents of the `MutexGuard` used through `Deref`.
|
||||
#[unstable(feature = "mapped_lock_guards", issue = "117108")]
|
||||
// #[unstable(feature = "nonpoison_mutex", issue = "134645")]
|
||||
pub fn map<U, F>(orig: Self, f: F) -> MappedMutexGuard<'a, U>
|
||||
where
|
||||
F: FnOnce(&mut T) -> &mut U,
|
||||
U: ?Sized,
|
||||
{
|
||||
// SAFETY: the conditions of `MutexGuard::new` were satisfied when the original guard
|
||||
// was created, and have been upheld throughout `map` and/or `filter_map`.
|
||||
// The signature of the closure guarantees that it will not "leak" the lifetime of the reference
|
||||
// passed to it. If the closure panics, the guard will be dropped.
|
||||
let data = NonNull::from(f(unsafe { &mut *orig.lock.data.get() }));
|
||||
let orig = ManuallyDrop::new(orig);
|
||||
MappedMutexGuard { data, inner: &orig.lock.inner, _variance: PhantomData }
|
||||
}
|
||||
|
||||
/// Makes a [`MappedMutexGuard`] for a component of the borrowed data. The
|
||||
/// original guard is returned as an `Err(...)` if the closure returns
|
||||
/// `None`.
|
||||
///
|
||||
/// The `Mutex` is already locked, so this cannot fail.
|
||||
///
|
||||
/// This is an associated function that needs to be used as
|
||||
/// `MutexGuard::filter_map(...)`. A method would interfere with methods of the
|
||||
/// same name on the contents of the `MutexGuard` used through `Deref`.
|
||||
#[unstable(feature = "mapped_lock_guards", issue = "117108")]
|
||||
// #[unstable(feature = "nonpoison_mutex", issue = "134645")]
|
||||
pub fn filter_map<U, F>(orig: Self, f: F) -> Result<MappedMutexGuard<'a, U>, Self>
|
||||
where
|
||||
F: FnOnce(&mut T) -> Option<&mut U>,
|
||||
U: ?Sized,
|
||||
{
|
||||
// SAFETY: the conditions of `MutexGuard::new` were satisfied when the original guard
|
||||
// was created, and have been upheld throughout `map` and/or `filter_map`.
|
||||
// The signature of the closure guarantees that it will not "leak" the lifetime of the reference
|
||||
// passed to it. If the closure panics, the guard will be dropped.
|
||||
match f(unsafe { &mut *orig.lock.data.get() }) {
|
||||
Some(data) => {
|
||||
let data = NonNull::from(data);
|
||||
let orig = ManuallyDrop::new(orig);
|
||||
Ok(MappedMutexGuard { data, inner: &orig.lock.inner, _variance: PhantomData })
|
||||
}
|
||||
None => Err(orig),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[unstable(feature = "mapped_lock_guards", issue = "117108")]
|
||||
impl<T: ?Sized> Deref for MappedMutexGuard<'_, T> {
|
||||
type Target = T;
|
||||
|
||||
fn deref(&self) -> &T {
|
||||
unsafe { self.data.as_ref() }
|
||||
}
|
||||
}
|
||||
|
||||
#[unstable(feature = "mapped_lock_guards", issue = "117108")]
|
||||
impl<T: ?Sized> DerefMut for MappedMutexGuard<'_, T> {
|
||||
fn deref_mut(&mut self) -> &mut T {
|
||||
unsafe { self.data.as_mut() }
|
||||
}
|
||||
}
|
||||
|
||||
#[unstable(feature = "mapped_lock_guards", issue = "117108")]
|
||||
impl<T: ?Sized> Drop for MappedMutexGuard<'_, T> {
|
||||
#[inline]
|
||||
fn drop(&mut self) {
|
||||
unsafe {
|
||||
self.inner.unlock();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[unstable(feature = "mapped_lock_guards", issue = "117108")]
|
||||
impl<T: ?Sized + fmt::Debug> fmt::Debug for MappedMutexGuard<'_, T> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
fmt::Debug::fmt(&**self, f)
|
||||
}
|
||||
}
|
||||
|
||||
#[unstable(feature = "mapped_lock_guards", issue = "117108")]
|
||||
impl<T: ?Sized + fmt::Display> fmt::Display for MappedMutexGuard<'_, T> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
(**self).fmt(f)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T: ?Sized> MappedMutexGuard<'a, T> {
|
||||
/// Makes a [`MappedMutexGuard`] for a component of the borrowed data, e.g.
|
||||
/// an enum variant.
|
||||
///
|
||||
/// The `Mutex` is already locked, so this cannot fail.
|
||||
///
|
||||
/// This is an associated function that needs to be used as
|
||||
/// `MappedMutexGuard::map(...)`. A method would interfere with methods of the
|
||||
/// same name on the contents of the `MutexGuard` used through `Deref`.
|
||||
#[unstable(feature = "mapped_lock_guards", issue = "117108")]
|
||||
// #[unstable(feature = "nonpoison_mutex", issue = "134645")]
|
||||
pub fn map<U, F>(mut orig: Self, f: F) -> MappedMutexGuard<'a, U>
|
||||
where
|
||||
F: FnOnce(&mut T) -> &mut U,
|
||||
U: ?Sized,
|
||||
{
|
||||
// SAFETY: the conditions of `MutexGuard::new` were satisfied when the original guard
|
||||
// was created, and have been upheld throughout `map` and/or `filter_map`.
|
||||
// The signature of the closure guarantees that it will not "leak" the lifetime of the reference
|
||||
// passed to it. If the closure panics, the guard will be dropped.
|
||||
let data = NonNull::from(f(unsafe { orig.data.as_mut() }));
|
||||
let orig = ManuallyDrop::new(orig);
|
||||
MappedMutexGuard { data, inner: orig.inner, _variance: PhantomData }
|
||||
}
|
||||
|
||||
/// Makes a [`MappedMutexGuard`] for a component of the borrowed data. The
|
||||
/// original guard is returned as an `Err(...)` if the closure returns
|
||||
/// `None`.
|
||||
///
|
||||
/// The `Mutex` is already locked, so this cannot fail.
|
||||
///
|
||||
/// This is an associated function that needs to be used as
|
||||
/// `MappedMutexGuard::filter_map(...)`. A method would interfere with methods of the
|
||||
/// same name on the contents of the `MutexGuard` used through `Deref`.
|
||||
#[unstable(feature = "mapped_lock_guards", issue = "117108")]
|
||||
// #[unstable(feature = "nonpoison_mutex", issue = "134645")]
|
||||
pub fn filter_map<U, F>(mut orig: Self, f: F) -> Result<MappedMutexGuard<'a, U>, Self>
|
||||
where
|
||||
F: FnOnce(&mut T) -> Option<&mut U>,
|
||||
U: ?Sized,
|
||||
{
|
||||
// SAFETY: the conditions of `MutexGuard::new` were satisfied when the original guard
|
||||
// was created, and have been upheld throughout `map` and/or `filter_map`.
|
||||
// The signature of the closure guarantees that it will not "leak" the lifetime of the reference
|
||||
// passed to it. If the closure panics, the guard will be dropped.
|
||||
match f(unsafe { orig.data.as_mut() }) {
|
||||
Some(data) => {
|
||||
let data = NonNull::from(data);
|
||||
let orig = ManuallyDrop::new(orig);
|
||||
Ok(MappedMutexGuard { data, inner: orig.inner, _variance: PhantomData })
|
||||
}
|
||||
None => Err(orig),
|
||||
}
|
||||
}
|
||||
}
|
||||
1140
crates/std/src/sync/nonpoison/rwlock.rs
Normal file
1140
crates/std/src/sync/nonpoison/rwlock.rs
Normal file
File diff suppressed because it is too large
Load Diff
395
crates/std/src/sync/once.rs
Normal file
395
crates/std/src/sync/once.rs
Normal file
@@ -0,0 +1,395 @@
|
||||
//! A "once initialization" primitive
|
||||
//!
|
||||
//! This primitive is meant to be used to run one-time initialization. An
|
||||
//! example use case would be for initializing an FFI library.
|
||||
|
||||
use crate::fmt;
|
||||
use core::panic::{RefUnwindSafe, UnwindSafe};
|
||||
use crate::sys::sync as sys;
|
||||
|
||||
/// A low-level synchronization primitive for one-time global execution.
|
||||
///
|
||||
/// Previously this was the only "execute once" synchronization in `std`.
|
||||
/// Other libraries implemented novel synchronizing types with `Once`, like
|
||||
/// [`OnceLock<T>`] or [`LazyLock<T, F>`], before those were added to `std`.
|
||||
/// `OnceLock<T>` in particular supersedes `Once` in functionality and should
|
||||
/// be preferred for the common case where the `Once` is associated with data.
|
||||
///
|
||||
/// This type can only be constructed with [`Once::new()`].
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use std::sync::Once;
|
||||
///
|
||||
/// static START: Once = Once::new();
|
||||
///
|
||||
/// START.call_once(|| {
|
||||
/// // run initialization here
|
||||
/// });
|
||||
/// ```
|
||||
///
|
||||
/// [`OnceLock<T>`]: crate::sync::OnceLock
|
||||
/// [`LazyLock<T, F>`]: crate::sync::LazyLock
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
pub struct Once {
|
||||
inner: sys::Once,
|
||||
}
|
||||
|
||||
#[stable(feature = "sync_once_unwind_safe", since = "1.59.0")]
|
||||
impl UnwindSafe for Once {}
|
||||
|
||||
#[stable(feature = "sync_once_unwind_safe", since = "1.59.0")]
|
||||
impl RefUnwindSafe for Once {}
|
||||
|
||||
/// State yielded to [`Once::call_once_force()`]’s closure parameter. The state
|
||||
/// can be used to query the poison status of the [`Once`].
|
||||
#[stable(feature = "once_poison", since = "1.51.0")]
|
||||
pub struct OnceState {
|
||||
pub(crate) inner: sys::OnceState,
|
||||
}
|
||||
|
||||
/// Used for the internal implementation of `sys::sync::once` on different platforms and the
|
||||
/// [`LazyLock`](crate::sync::LazyLock) implementation.
|
||||
pub(crate) enum OnceExclusiveState {
|
||||
Incomplete,
|
||||
Poisoned,
|
||||
Complete,
|
||||
}
|
||||
|
||||
/// Initialization value for static [`Once`] values.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use std::sync::{Once, ONCE_INIT};
|
||||
///
|
||||
/// static START: Once = ONCE_INIT;
|
||||
/// ```
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
#[deprecated(
|
||||
since = "1.38.0",
|
||||
note = "the `Once::new()` function is now preferred",
|
||||
suggestion = "Once::new()"
|
||||
)]
|
||||
pub const ONCE_INIT: Once = Once::new();
|
||||
|
||||
impl Once {
|
||||
/// Creates a new `Once` value.
|
||||
#[inline]
|
||||
#[stable(feature = "once_new", since = "1.2.0")]
|
||||
#[rustc_const_stable(feature = "const_once_new", since = "1.32.0")]
|
||||
#[must_use]
|
||||
pub const fn new() -> Once {
|
||||
Once { inner: sys::Once::new() }
|
||||
}
|
||||
|
||||
/// Performs an initialization routine once and only once. The given closure
|
||||
/// will be executed if this is the first time `call_once` has been called,
|
||||
/// and otherwise the routine will *not* be invoked.
|
||||
///
|
||||
/// This method will block the calling thread if another initialization
|
||||
/// routine is currently running.
|
||||
///
|
||||
/// When this function returns, it is guaranteed that some initialization
|
||||
/// has run and completed (it might not be the closure specified). It is also
|
||||
/// guaranteed that any memory writes performed by the executed closure can
|
||||
/// be reliably observed by other threads at this point (there is a
|
||||
/// happens-before relation between the closure and code executing after the
|
||||
/// return).
|
||||
///
|
||||
/// If the given closure recursively invokes `call_once` on the same [`Once`]
|
||||
/// instance, the exact behavior is not specified: allowed outcomes are
|
||||
/// a panic or a deadlock.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use std::sync::Once;
|
||||
///
|
||||
/// static mut VAL: usize = 0;
|
||||
/// static INIT: Once = Once::new();
|
||||
///
|
||||
/// // Accessing a `static mut` is unsafe much of the time, but if we do so
|
||||
/// // in a synchronized fashion (e.g., write once or read all) then we're
|
||||
/// // good to go!
|
||||
/// //
|
||||
/// // This function will only call `expensive_computation` once, and will
|
||||
/// // otherwise always return the value returned from the first invocation.
|
||||
/// fn get_cached_val() -> usize {
|
||||
/// unsafe {
|
||||
/// INIT.call_once(|| {
|
||||
/// VAL = expensive_computation();
|
||||
/// });
|
||||
/// VAL
|
||||
/// }
|
||||
/// }
|
||||
///
|
||||
/// fn expensive_computation() -> usize {
|
||||
/// // ...
|
||||
/// # 2
|
||||
/// }
|
||||
/// ```
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// The closure `f` will only be executed once even if this is called
|
||||
/// concurrently amongst many threads. If that closure panics, however, then
|
||||
/// it will *poison* this [`Once`] instance, causing all future invocations of
|
||||
/// `call_once` to also panic.
|
||||
///
|
||||
/// This is similar to [poisoning with mutexes][poison], but this mechanism
|
||||
/// is guaranteed to never skip panics within `f`.
|
||||
///
|
||||
/// [poison]: struct.Mutex.html#poisoning
|
||||
#[inline]
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
#[track_caller]
|
||||
#[rustc_should_not_be_called_on_const_items]
|
||||
pub fn call_once<F>(&self, f: F)
|
||||
where
|
||||
F: FnOnce(),
|
||||
{
|
||||
// Fast path check
|
||||
if self.inner.is_completed() {
|
||||
return;
|
||||
}
|
||||
|
||||
let mut f = Some(f);
|
||||
self.inner.call(false, &mut |_| f.take().unwrap()());
|
||||
}
|
||||
|
||||
/// Performs the same function as [`call_once()`] except ignores poisoning.
|
||||
///
|
||||
/// Unlike [`call_once()`], if this [`Once`] has been poisoned (i.e., a previous
|
||||
/// call to [`call_once()`] or [`call_once_force()`] caused a panic), calling
|
||||
/// [`call_once_force()`] will still invoke the closure `f` and will _not_
|
||||
/// result in an immediate panic. If `f` panics, the [`Once`] will remain
|
||||
/// in a poison state. If `f` does _not_ panic, the [`Once`] will no
|
||||
/// longer be in a poison state and all future calls to [`call_once()`] or
|
||||
/// [`call_once_force()`] will be no-ops.
|
||||
///
|
||||
/// The closure `f` is yielded a [`OnceState`] structure which can be used
|
||||
/// to query the poison status of the [`Once`].
|
||||
///
|
||||
/// [`call_once()`]: Once::call_once
|
||||
/// [`call_once_force()`]: Once::call_once_force
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use std::sync::Once;
|
||||
/// use std::thread;
|
||||
///
|
||||
/// static INIT: Once = Once::new();
|
||||
///
|
||||
/// // poison the once
|
||||
/// let handle = thread::spawn(|| {
|
||||
/// INIT.call_once(|| panic!());
|
||||
/// });
|
||||
/// assert!(handle.join().is_err());
|
||||
///
|
||||
/// // poisoning propagates
|
||||
/// let handle = thread::spawn(|| {
|
||||
/// INIT.call_once(|| {});
|
||||
/// });
|
||||
/// assert!(handle.join().is_err());
|
||||
///
|
||||
/// // call_once_force will still run and reset the poisoned state
|
||||
/// INIT.call_once_force(|state| {
|
||||
/// assert!(state.is_poisoned());
|
||||
/// });
|
||||
///
|
||||
/// // once any success happens, we stop propagating the poison
|
||||
/// INIT.call_once(|| {});
|
||||
/// ```
|
||||
#[inline]
|
||||
#[stable(feature = "once_poison", since = "1.51.0")]
|
||||
#[rustc_should_not_be_called_on_const_items]
|
||||
pub fn call_once_force<F>(&self, f: F)
|
||||
where
|
||||
F: FnOnce(&OnceState),
|
||||
{
|
||||
// Fast path check
|
||||
if self.inner.is_completed() {
|
||||
return;
|
||||
}
|
||||
|
||||
let mut f = Some(f);
|
||||
self.inner.call(true, &mut |p| f.take().unwrap()(p));
|
||||
}
|
||||
|
||||
/// Returns `true` if some [`call_once()`] call has completed
|
||||
/// successfully. Specifically, `is_completed` will return false in
|
||||
/// the following situations:
|
||||
/// * [`call_once()`] was not called at all,
|
||||
/// * [`call_once()`] was called, but has not yet completed,
|
||||
/// * the [`Once`] instance is poisoned
|
||||
///
|
||||
/// This function returning `false` does not mean that [`Once`] has not been
|
||||
/// executed. For example, it may have been executed in the time between
|
||||
/// when `is_completed` starts executing and when it returns, in which case
|
||||
/// the `false` return value would be stale (but still permissible).
|
||||
///
|
||||
/// [`call_once()`]: Once::call_once
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use std::sync::Once;
|
||||
///
|
||||
/// static INIT: Once = Once::new();
|
||||
///
|
||||
/// assert_eq!(INIT.is_completed(), false);
|
||||
/// INIT.call_once(|| {
|
||||
/// assert_eq!(INIT.is_completed(), false);
|
||||
/// });
|
||||
/// assert_eq!(INIT.is_completed(), true);
|
||||
/// ```
|
||||
///
|
||||
/// ```
|
||||
/// use std::sync::Once;
|
||||
/// use std::thread;
|
||||
///
|
||||
/// static INIT: Once = Once::new();
|
||||
///
|
||||
/// assert_eq!(INIT.is_completed(), false);
|
||||
/// let handle = thread::spawn(|| {
|
||||
/// INIT.call_once(|| panic!());
|
||||
/// });
|
||||
/// assert!(handle.join().is_err());
|
||||
/// assert_eq!(INIT.is_completed(), false);
|
||||
/// ```
|
||||
#[stable(feature = "once_is_completed", since = "1.43.0")]
|
||||
#[inline]
|
||||
pub fn is_completed(&self) -> bool {
|
||||
self.inner.is_completed()
|
||||
}
|
||||
|
||||
/// Blocks the current thread until initialization has completed.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```rust
|
||||
/// use std::sync::Once;
|
||||
/// use std::thread;
|
||||
///
|
||||
/// static READY: Once = Once::new();
|
||||
///
|
||||
/// let thread = thread::spawn(|| {
|
||||
/// READY.wait();
|
||||
/// println!("everything is ready");
|
||||
/// });
|
||||
///
|
||||
/// READY.call_once(|| println!("performing setup"));
|
||||
/// ```
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// If this [`Once`] has been poisoned because an initialization closure has
|
||||
/// panicked, this method will also panic. Use [`wait_force`](Self::wait_force)
|
||||
/// if this behavior is not desired.
|
||||
#[stable(feature = "once_wait", since = "1.86.0")]
|
||||
#[rustc_should_not_be_called_on_const_items]
|
||||
pub fn wait(&self) {
|
||||
if !self.inner.is_completed() {
|
||||
self.inner.wait(false);
|
||||
}
|
||||
}
|
||||
|
||||
/// Blocks the current thread until initialization has completed, ignoring
|
||||
/// poisoning.
|
||||
///
|
||||
/// If this [`Once`] has been poisoned, this function blocks until it
|
||||
/// becomes completed, unlike [`Once::wait()`], which panics in this case.
|
||||
#[stable(feature = "once_wait", since = "1.86.0")]
|
||||
#[rustc_should_not_be_called_on_const_items]
|
||||
pub fn wait_force(&self) {
|
||||
if !self.inner.is_completed() {
|
||||
self.inner.wait(true);
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the current state of the `Once` instance.
|
||||
///
|
||||
/// Since this takes a mutable reference, no initialization can currently
|
||||
/// be running, so the state must be either "incomplete", "poisoned" or
|
||||
/// "complete".
|
||||
#[inline]
|
||||
pub(crate) fn state(&mut self) -> OnceExclusiveState {
|
||||
self.inner.state()
|
||||
}
|
||||
|
||||
/// Sets current state of the `Once` instance.
|
||||
///
|
||||
/// Since this takes a mutable reference, no initialization can currently
|
||||
/// be running, so the state must be either "incomplete", "poisoned" or
|
||||
/// "complete".
|
||||
#[inline]
|
||||
pub(crate) fn set_state(&mut self, new_state: OnceExclusiveState) {
|
||||
self.inner.set_state(new_state);
|
||||
}
|
||||
}
|
||||
|
||||
#[stable(feature = "std_debug", since = "1.16.0")]
|
||||
impl fmt::Debug for Once {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.debug_struct("Once").finish_non_exhaustive()
|
||||
}
|
||||
}
|
||||
|
||||
impl OnceState {
|
||||
/// Returns `true` if the associated [`Once`] was poisoned prior to the
|
||||
/// invocation of the closure passed to [`Once::call_once_force()`].
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// A poisoned [`Once`]:
|
||||
///
|
||||
/// ```
|
||||
/// use std::sync::Once;
|
||||
/// use std::thread;
|
||||
///
|
||||
/// static INIT: Once = Once::new();
|
||||
///
|
||||
/// // poison the once
|
||||
/// let handle = thread::spawn(|| {
|
||||
/// INIT.call_once(|| panic!());
|
||||
/// });
|
||||
/// assert!(handle.join().is_err());
|
||||
///
|
||||
/// INIT.call_once_force(|state| {
|
||||
/// assert!(state.is_poisoned());
|
||||
/// });
|
||||
/// ```
|
||||
///
|
||||
/// An unpoisoned [`Once`]:
|
||||
///
|
||||
/// ```
|
||||
/// use std::sync::Once;
|
||||
///
|
||||
/// static INIT: Once = Once::new();
|
||||
///
|
||||
/// INIT.call_once_force(|state| {
|
||||
/// assert!(!state.is_poisoned());
|
||||
/// });
|
||||
#[stable(feature = "once_poison", since = "1.51.0")]
|
||||
#[inline]
|
||||
pub fn is_poisoned(&self) -> bool {
|
||||
self.inner.is_poisoned()
|
||||
}
|
||||
|
||||
/// Poison the associated [`Once`] without explicitly panicking.
|
||||
// NOTE: This is currently only exposed for `OnceLock`.
|
||||
#[inline]
|
||||
pub(crate) fn poison(&self) {
|
||||
self.inner.poison();
|
||||
}
|
||||
}
|
||||
|
||||
#[stable(feature = "std_debug", since = "1.16.0")]
|
||||
impl fmt::Debug for OnceState {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.debug_struct("OnceState").field("poisoned", &self.is_poisoned()).finish()
|
||||
}
|
||||
}
|
||||
709
crates/std/src/sync/once_lock.rs
Normal file
709
crates/std/src/sync/once_lock.rs
Normal file
@@ -0,0 +1,709 @@
|
||||
use super::once::OnceExclusiveState;
|
||||
use crate::cell::UnsafeCell;
|
||||
use crate::fmt;
|
||||
use crate::marker::PhantomData;
|
||||
use crate::mem::MaybeUninit;
|
||||
use core::panic::{RefUnwindSafe, UnwindSafe};
|
||||
use crate::sync::Once;
|
||||
|
||||
/// A synchronization primitive which can nominally be written to only once.
|
||||
///
|
||||
/// This type is a thread-safe [`OnceCell`], and can be used in statics.
|
||||
/// In many simple cases, you can use [`LazyLock<T, F>`] instead to get the benefits of this type
|
||||
/// with less effort: `LazyLock<T, F>` "looks like" `&T` because it initializes with `F` on deref!
|
||||
/// Where OnceLock shines is when LazyLock is too simple to support a given case, as LazyLock
|
||||
/// doesn't allow additional inputs to its function after you call [`LazyLock::new(|| ...)`].
|
||||
///
|
||||
/// A `OnceLock` can be thought of as a safe abstraction over uninitialized data that becomes
|
||||
/// initialized once written.
|
||||
///
|
||||
/// Unlike [`Mutex`](crate::sync::Mutex), `OnceLock` is never poisoned on panic.
|
||||
///
|
||||
/// [`OnceCell`]: crate::cell::OnceCell
|
||||
/// [`LazyLock<T, F>`]: crate::sync::LazyLock
|
||||
/// [`LazyLock::new(|| ...)`]: crate::sync::LazyLock::new
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// Writing to a `OnceLock` from a separate thread:
|
||||
///
|
||||
/// ```
|
||||
/// use std::sync::OnceLock;
|
||||
///
|
||||
/// static CELL: OnceLock<usize> = OnceLock::new();
|
||||
///
|
||||
/// // `OnceLock` has not been written to yet.
|
||||
/// assert!(CELL.get().is_none());
|
||||
///
|
||||
/// // Spawn a thread and write to `OnceLock`.
|
||||
/// std::thread::spawn(|| {
|
||||
/// let value = CELL.get_or_init(|| 12345);
|
||||
/// assert_eq!(value, &12345);
|
||||
/// })
|
||||
/// .join()
|
||||
/// .unwrap();
|
||||
///
|
||||
/// // `OnceLock` now contains the value.
|
||||
/// assert_eq!(
|
||||
/// CELL.get(),
|
||||
/// Some(&12345),
|
||||
/// );
|
||||
/// ```
|
||||
///
|
||||
/// You can use `OnceLock` to implement a type that requires "append-only" logic:
|
||||
///
|
||||
/// ```
|
||||
/// use std::sync::{OnceLock, atomic::{AtomicU32, Ordering}};
|
||||
/// use std::thread;
|
||||
///
|
||||
/// struct OnceList<T> {
|
||||
/// data: OnceLock<T>,
|
||||
/// next: OnceLock<Box<OnceList<T>>>,
|
||||
/// }
|
||||
/// impl<T> OnceList<T> {
|
||||
/// const fn new() -> OnceList<T> {
|
||||
/// OnceList { data: OnceLock::new(), next: OnceLock::new() }
|
||||
/// }
|
||||
/// fn push(&self, value: T) {
|
||||
/// // FIXME: this impl is concise, but is also slow for long lists or many threads.
|
||||
/// // as an exercise, consider how you might improve on it while preserving the behavior
|
||||
/// if let Err(value) = self.data.set(value) {
|
||||
/// let next = self.next.get_or_init(|| Box::new(OnceList::new()));
|
||||
/// next.push(value)
|
||||
/// };
|
||||
/// }
|
||||
/// fn contains(&self, example: &T) -> bool
|
||||
/// where
|
||||
/// T: PartialEq,
|
||||
/// {
|
||||
/// self.data.get().map(|item| item == example).filter(|v| *v).unwrap_or_else(|| {
|
||||
/// self.next.get().map(|next| next.contains(example)).unwrap_or(false)
|
||||
/// })
|
||||
/// }
|
||||
/// }
|
||||
///
|
||||
/// // Let's exercise this new Sync append-only list by doing a little counting
|
||||
/// static LIST: OnceList<u32> = OnceList::new();
|
||||
/// static COUNTER: AtomicU32 = AtomicU32::new(0);
|
||||
///
|
||||
/// # const LEN: u32 = if cfg!(miri) { 50 } else { 1000 };
|
||||
/// # /*
|
||||
/// const LEN: u32 = 1000;
|
||||
/// # */
|
||||
/// thread::scope(|s| {
|
||||
/// for _ in 0..thread::available_parallelism().unwrap().get() {
|
||||
/// s.spawn(|| {
|
||||
/// while let i @ 0..LEN = COUNTER.fetch_add(1, Ordering::Relaxed) {
|
||||
/// LIST.push(i);
|
||||
/// }
|
||||
/// });
|
||||
/// }
|
||||
/// });
|
||||
///
|
||||
/// for i in 0..LEN {
|
||||
/// assert!(LIST.contains(&i));
|
||||
/// }
|
||||
///
|
||||
/// ```
|
||||
#[stable(feature = "once_cell", since = "1.70.0")]
|
||||
pub struct OnceLock<T> {
|
||||
// FIXME(nonpoison_once): switch to nonpoison version once it is available
|
||||
once: Once,
|
||||
// Whether or not the value is initialized is tracked by `once.is_completed()`.
|
||||
value: UnsafeCell<MaybeUninit<T>>,
|
||||
/// `PhantomData` to make sure dropck understands we're dropping T in our Drop impl.
|
||||
///
|
||||
/// ```compile_fail,E0597
|
||||
/// use std::sync::OnceLock;
|
||||
///
|
||||
/// struct A<'a>(&'a str);
|
||||
///
|
||||
/// impl<'a> Drop for A<'a> {
|
||||
/// fn drop(&mut self) {}
|
||||
/// }
|
||||
///
|
||||
/// let cell = OnceLock::new();
|
||||
/// {
|
||||
/// let s = String::new();
|
||||
/// let _ = cell.set(A(&s));
|
||||
/// }
|
||||
/// ```
|
||||
_marker: PhantomData<T>,
|
||||
}
|
||||
|
||||
impl<T> OnceLock<T> {
|
||||
/// Creates a new uninitialized cell.
|
||||
#[inline]
|
||||
#[must_use]
|
||||
#[stable(feature = "once_cell", since = "1.70.0")]
|
||||
#[rustc_const_stable(feature = "once_cell", since = "1.70.0")]
|
||||
pub const fn new() -> OnceLock<T> {
|
||||
OnceLock {
|
||||
once: Once::new(),
|
||||
value: UnsafeCell::new(MaybeUninit::uninit()),
|
||||
_marker: PhantomData,
|
||||
}
|
||||
}
|
||||
|
||||
/// Gets the reference to the underlying value.
|
||||
///
|
||||
/// Returns `None` if the cell is uninitialized, or being initialized.
|
||||
/// This method never blocks.
|
||||
#[inline]
|
||||
#[stable(feature = "once_cell", since = "1.70.0")]
|
||||
#[rustc_should_not_be_called_on_const_items]
|
||||
pub fn get(&self) -> Option<&T> {
|
||||
if self.initialized() {
|
||||
// Safe b/c checked initialized
|
||||
Some(unsafe { self.get_unchecked() })
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// Gets the mutable reference to the underlying value.
|
||||
///
|
||||
/// Returns `None` if the cell is uninitialized.
|
||||
///
|
||||
/// This method never blocks. Since it borrows the `OnceLock` mutably,
|
||||
/// it is statically guaranteed that no active borrows to the `OnceLock`
|
||||
/// exist, including from other threads.
|
||||
#[inline]
|
||||
#[stable(feature = "once_cell", since = "1.70.0")]
|
||||
pub fn get_mut(&mut self) -> Option<&mut T> {
|
||||
if self.initialized_mut() {
|
||||
// Safe b/c checked initialized and we have a unique access
|
||||
Some(unsafe { self.get_unchecked_mut() })
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// Blocks the current thread until the cell is initialized.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// Waiting for a computation on another thread to finish:
|
||||
/// ```rust
|
||||
/// use std::thread;
|
||||
/// use std::sync::OnceLock;
|
||||
///
|
||||
/// let value = OnceLock::new();
|
||||
///
|
||||
/// thread::scope(|s| {
|
||||
/// s.spawn(|| value.set(1 + 1));
|
||||
///
|
||||
/// let result = value.wait();
|
||||
/// assert_eq!(result, &2);
|
||||
/// })
|
||||
/// ```
|
||||
#[inline]
|
||||
#[stable(feature = "once_wait", since = "1.86.0")]
|
||||
#[rustc_should_not_be_called_on_const_items]
|
||||
pub fn wait(&self) -> &T {
|
||||
self.once.wait_force();
|
||||
|
||||
unsafe { self.get_unchecked() }
|
||||
}
|
||||
|
||||
/// Initializes the contents of the cell to `value`.
|
||||
///
|
||||
/// May block if another thread is currently attempting to initialize the cell. The cell is
|
||||
/// guaranteed to contain a value when `set` returns, though not necessarily the one provided.
|
||||
///
|
||||
/// Returns `Ok(())` if the cell was uninitialized and
|
||||
/// `Err(value)` if the cell was already initialized.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use std::sync::OnceLock;
|
||||
///
|
||||
/// static CELL: OnceLock<i32> = OnceLock::new();
|
||||
///
|
||||
/// fn main() {
|
||||
/// assert!(CELL.get().is_none());
|
||||
///
|
||||
/// std::thread::spawn(|| {
|
||||
/// assert_eq!(CELL.set(92), Ok(()));
|
||||
/// }).join().unwrap();
|
||||
///
|
||||
/// assert_eq!(CELL.set(62), Err(62));
|
||||
/// assert_eq!(CELL.get(), Some(&92));
|
||||
/// }
|
||||
/// ```
|
||||
#[inline]
|
||||
#[stable(feature = "once_cell", since = "1.70.0")]
|
||||
#[rustc_should_not_be_called_on_const_items]
|
||||
pub fn set(&self, value: T) -> Result<(), T> {
|
||||
match self.try_insert(value) {
|
||||
Ok(_) => Ok(()),
|
||||
Err((_, value)) => Err(value),
|
||||
}
|
||||
}
|
||||
|
||||
/// Initializes the contents of the cell to `value` if the cell was uninitialized,
|
||||
/// then returns a reference to it.
|
||||
///
|
||||
/// May block if another thread is currently attempting to initialize the cell. The cell is
|
||||
/// guaranteed to contain a value when `try_insert` returns, though not necessarily the
|
||||
/// one provided.
|
||||
///
|
||||
/// Returns `Ok(&value)` if the cell was uninitialized and
|
||||
/// `Err((¤t_value, value))` if it was already initialized.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// #![feature(once_cell_try_insert)]
|
||||
///
|
||||
/// use std::sync::OnceLock;
|
||||
///
|
||||
/// static CELL: OnceLock<i32> = OnceLock::new();
|
||||
///
|
||||
/// fn main() {
|
||||
/// assert!(CELL.get().is_none());
|
||||
///
|
||||
/// std::thread::spawn(|| {
|
||||
/// assert_eq!(CELL.try_insert(92), Ok(&92));
|
||||
/// }).join().unwrap();
|
||||
///
|
||||
/// assert_eq!(CELL.try_insert(62), Err((&92, 62)));
|
||||
/// assert_eq!(CELL.get(), Some(&92));
|
||||
/// }
|
||||
/// ```
|
||||
#[inline]
|
||||
#[unstable(feature = "once_cell_try_insert", issue = "116693")]
|
||||
#[rustc_should_not_be_called_on_const_items]
|
||||
pub fn try_insert(&self, value: T) -> Result<&T, (&T, T)> {
|
||||
let mut value = Some(value);
|
||||
let res = self.get_or_init(|| value.take().unwrap());
|
||||
match value {
|
||||
None => Ok(res),
|
||||
Some(value) => Err((res, value)),
|
||||
}
|
||||
}
|
||||
|
||||
/// Gets the contents of the cell, initializing it to `f()` if the cell
|
||||
/// was uninitialized.
|
||||
///
|
||||
/// Many threads may call `get_or_init` concurrently with different
|
||||
/// initializing functions, but it is guaranteed that only one function
|
||||
/// will be executed if the function doesn't panic.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// If `f()` panics, the panic is propagated to the caller, and the cell
|
||||
/// remains uninitialized.
|
||||
///
|
||||
/// It is an error to reentrantly initialize the cell from `f`. The
|
||||
/// exact outcome is unspecified. Current implementation deadlocks, but
|
||||
/// this may be changed to a panic in the future.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use std::sync::OnceLock;
|
||||
///
|
||||
/// let cell = OnceLock::new();
|
||||
/// let value = cell.get_or_init(|| 92);
|
||||
/// assert_eq!(value, &92);
|
||||
/// let value = cell.get_or_init(|| unreachable!());
|
||||
/// assert_eq!(value, &92);
|
||||
/// ```
|
||||
#[inline]
|
||||
#[stable(feature = "once_cell", since = "1.70.0")]
|
||||
#[rustc_should_not_be_called_on_const_items]
|
||||
pub fn get_or_init<F>(&self, f: F) -> &T
|
||||
where
|
||||
F: FnOnce() -> T,
|
||||
{
|
||||
match self.get_or_try_init(|| Ok::<T, !>(f())) {
|
||||
Ok(val) => val,
|
||||
}
|
||||
}
|
||||
|
||||
/// Gets the mutable reference of the contents of the cell, initializing
|
||||
/// it to `f()` if the cell was uninitialized.
|
||||
///
|
||||
/// This method never blocks. Since it borrows the `OnceLock` mutably,
|
||||
/// it is statically guaranteed that no active borrows to the `OnceLock`
|
||||
/// exist, including from other threads.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// If `f()` panics, the panic is propagated to the caller, and the cell
|
||||
/// remains uninitialized.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// #![feature(once_cell_get_mut)]
|
||||
///
|
||||
/// use std::sync::OnceLock;
|
||||
///
|
||||
/// let mut cell = OnceLock::new();
|
||||
/// let value = cell.get_mut_or_init(|| 92);
|
||||
/// assert_eq!(*value, 92);
|
||||
///
|
||||
/// *value += 2;
|
||||
/// assert_eq!(*value, 94);
|
||||
///
|
||||
/// let value = cell.get_mut_or_init(|| unreachable!());
|
||||
/// assert_eq!(*value, 94);
|
||||
/// ```
|
||||
#[inline]
|
||||
#[unstable(feature = "once_cell_get_mut", issue = "121641")]
|
||||
pub fn get_mut_or_init<F>(&mut self, f: F) -> &mut T
|
||||
where
|
||||
F: FnOnce() -> T,
|
||||
{
|
||||
match self.get_mut_or_try_init(|| Ok::<T, !>(f())) {
|
||||
Ok(val) => val,
|
||||
}
|
||||
}
|
||||
|
||||
/// Gets the contents of the cell, initializing it to `f()` if
|
||||
/// the cell was uninitialized. If the cell was uninitialized
|
||||
/// and `f()` failed, an error is returned.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// If `f()` panics, the panic is propagated to the caller, and
|
||||
/// the cell remains uninitialized.
|
||||
///
|
||||
/// It is an error to reentrantly initialize the cell from `f`.
|
||||
/// The exact outcome is unspecified. Current implementation
|
||||
/// deadlocks, but this may be changed to a panic in the future.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// #![feature(once_cell_try)]
|
||||
///
|
||||
/// use std::sync::OnceLock;
|
||||
///
|
||||
/// let cell = OnceLock::new();
|
||||
/// assert_eq!(cell.get_or_try_init(|| Err(())), Err(()));
|
||||
/// assert!(cell.get().is_none());
|
||||
/// let value = cell.get_or_try_init(|| -> Result<i32, ()> {
|
||||
/// Ok(92)
|
||||
/// });
|
||||
/// assert_eq!(value, Ok(&92));
|
||||
/// assert_eq!(cell.get(), Some(&92))
|
||||
/// ```
|
||||
#[inline]
|
||||
#[unstable(feature = "once_cell_try", issue = "109737")]
|
||||
#[rustc_should_not_be_called_on_const_items]
|
||||
pub fn get_or_try_init<F, E>(&self, f: F) -> Result<&T, E>
|
||||
where
|
||||
F: FnOnce() -> Result<T, E>,
|
||||
{
|
||||
// Fast path check
|
||||
// NOTE: We need to perform an acquire on the state in this method
|
||||
// in order to correctly synchronize `LazyLock::force`. This is
|
||||
// currently done by calling `self.get()`, which in turn calls
|
||||
// `self.initialized()`, which in turn performs the acquire.
|
||||
if let Some(value) = self.get() {
|
||||
return Ok(value);
|
||||
}
|
||||
self.initialize(f)?;
|
||||
|
||||
// SAFETY: The inner value has been initialized
|
||||
Ok(unsafe { self.get_unchecked() })
|
||||
}
|
||||
|
||||
/// Gets the mutable reference of the contents of the cell, initializing
|
||||
/// it to `f()` if the cell was uninitialized. If the cell was uninitialized
|
||||
/// and `f()` failed, an error is returned.
|
||||
///
|
||||
/// This method never blocks. Since it borrows the `OnceLock` mutably,
|
||||
/// it is statically guaranteed that no active borrows to the `OnceLock`
|
||||
/// exist, including from other threads.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// If `f()` panics, the panic is propagated to the caller, and
|
||||
/// the cell remains uninitialized.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// #![feature(once_cell_get_mut)]
|
||||
///
|
||||
/// use std::sync::OnceLock;
|
||||
///
|
||||
/// let mut cell: OnceLock<u32> = OnceLock::new();
|
||||
///
|
||||
/// // Failed attempts to initialize the cell do not change its contents
|
||||
/// assert!(cell.get_mut_or_try_init(|| "not a number!".parse()).is_err());
|
||||
/// assert!(cell.get().is_none());
|
||||
///
|
||||
/// let value = cell.get_mut_or_try_init(|| "1234".parse());
|
||||
/// assert_eq!(value, Ok(&mut 1234));
|
||||
/// *value.unwrap() += 2;
|
||||
/// assert_eq!(cell.get(), Some(&1236))
|
||||
/// ```
|
||||
#[inline]
|
||||
#[unstable(feature = "once_cell_get_mut", issue = "121641")]
|
||||
pub fn get_mut_or_try_init<F, E>(&mut self, f: F) -> Result<&mut T, E>
|
||||
where
|
||||
F: FnOnce() -> Result<T, E>,
|
||||
{
|
||||
if self.get_mut().is_none() {
|
||||
self.initialize(f)?;
|
||||
}
|
||||
|
||||
// SAFETY: The inner value has been initialized
|
||||
Ok(unsafe { self.get_unchecked_mut() })
|
||||
}
|
||||
|
||||
/// Consumes the `OnceLock`, returning the wrapped value. Returns
|
||||
/// `None` if the cell was uninitialized.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use std::sync::OnceLock;
|
||||
///
|
||||
/// let cell: OnceLock<String> = OnceLock::new();
|
||||
/// assert_eq!(cell.into_inner(), None);
|
||||
///
|
||||
/// let cell = OnceLock::new();
|
||||
/// cell.set("hello".to_string()).unwrap();
|
||||
/// assert_eq!(cell.into_inner(), Some("hello".to_string()));
|
||||
/// ```
|
||||
#[inline]
|
||||
#[stable(feature = "once_cell", since = "1.70.0")]
|
||||
pub fn into_inner(mut self) -> Option<T> {
|
||||
self.take()
|
||||
}
|
||||
|
||||
/// Takes the value out of this `OnceLock`, moving it back to an uninitialized state.
|
||||
///
|
||||
/// Has no effect and returns `None` if the `OnceLock` was uninitialized.
|
||||
///
|
||||
/// Since this method borrows the `OnceLock` mutably, it is statically guaranteed that
|
||||
/// no active borrows to the `OnceLock` exist, including from other threads.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use std::sync::OnceLock;
|
||||
///
|
||||
/// let mut cell: OnceLock<String> = OnceLock::new();
|
||||
/// assert_eq!(cell.take(), None);
|
||||
///
|
||||
/// let mut cell = OnceLock::new();
|
||||
/// cell.set("hello".to_string()).unwrap();
|
||||
/// assert_eq!(cell.take(), Some("hello".to_string()));
|
||||
/// assert_eq!(cell.get(), None);
|
||||
/// ```
|
||||
#[inline]
|
||||
#[stable(feature = "once_cell", since = "1.70.0")]
|
||||
pub fn take(&mut self) -> Option<T> {
|
||||
if self.initialized_mut() {
|
||||
self.once = Once::new();
|
||||
// SAFETY: `self.value` is initialized and contains a valid `T`.
|
||||
// `self.once` is reset, so `initialized()` will be false again
|
||||
// which prevents the value from being read twice.
|
||||
unsafe { Some(self.value.get_mut().assume_init_read()) }
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn initialized(&self) -> bool {
|
||||
self.once.is_completed()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn initialized_mut(&mut self) -> bool {
|
||||
// `state()` does not perform an atomic load, so prefer it over `is_complete()`.
|
||||
let state = self.once.state();
|
||||
match state {
|
||||
OnceExclusiveState::Complete => true,
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
|
||||
#[cold]
|
||||
#[optimize(size)]
|
||||
fn initialize<F, E>(&self, f: F) -> Result<(), E>
|
||||
where
|
||||
F: FnOnce() -> Result<T, E>,
|
||||
{
|
||||
let mut res: Result<(), E> = Ok(());
|
||||
let slot = &self.value;
|
||||
|
||||
// Ignore poisoning from other threads
|
||||
// If another thread panics, then we'll be able to run our closure
|
||||
self.once.call_once_force(|p| {
|
||||
match f() {
|
||||
Ok(value) => {
|
||||
unsafe { (&mut *slot.get()).write(value) };
|
||||
}
|
||||
Err(e) => {
|
||||
res = Err(e);
|
||||
|
||||
// Treat the underlying `Once` as poisoned since we
|
||||
// failed to initialize our value.
|
||||
p.poison();
|
||||
}
|
||||
}
|
||||
});
|
||||
res
|
||||
}
|
||||
|
||||
/// # Safety
|
||||
///
|
||||
/// The cell must be initialized
|
||||
#[inline]
|
||||
unsafe fn get_unchecked(&self) -> &T {
|
||||
debug_assert!(self.initialized());
|
||||
unsafe { (&*self.value.get()).assume_init_ref() }
|
||||
}
|
||||
|
||||
/// # Safety
|
||||
///
|
||||
/// The cell must be initialized
|
||||
#[inline]
|
||||
unsafe fn get_unchecked_mut(&mut self) -> &mut T {
|
||||
debug_assert!(self.initialized_mut());
|
||||
unsafe { self.value.get_mut().assume_init_mut() }
|
||||
}
|
||||
}
|
||||
|
||||
// Why do we need `T: Send`?
|
||||
// Thread A creates a `OnceLock` and shares it with
|
||||
// scoped thread B, which fills the cell, which is
|
||||
// then destroyed by A. That is, destructor observes
|
||||
// a sent value.
|
||||
#[stable(feature = "once_cell", since = "1.70.0")]
|
||||
unsafe impl<T: Sync + Send> Sync for OnceLock<T> {}
|
||||
#[stable(feature = "once_cell", since = "1.70.0")]
|
||||
unsafe impl<T: Send> Send for OnceLock<T> {}
|
||||
|
||||
#[stable(feature = "once_cell", since = "1.70.0")]
|
||||
impl<T: RefUnwindSafe + UnwindSafe> RefUnwindSafe for OnceLock<T> {}
|
||||
#[stable(feature = "once_cell", since = "1.70.0")]
|
||||
impl<T: UnwindSafe> UnwindSafe for OnceLock<T> {}
|
||||
|
||||
#[stable(feature = "once_cell", since = "1.70.0")]
|
||||
#[rustc_const_unstable(feature = "const_default", issue = "143894")]
|
||||
impl<T> const Default for OnceLock<T> {
|
||||
/// Creates a new uninitialized cell.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```
|
||||
/// use std::sync::OnceLock;
|
||||
///
|
||||
/// fn main() {
|
||||
/// assert_eq!(OnceLock::<()>::new(), OnceLock::default());
|
||||
/// }
|
||||
/// ```
|
||||
#[inline]
|
||||
fn default() -> OnceLock<T> {
|
||||
OnceLock::new()
|
||||
}
|
||||
}
|
||||
|
||||
#[stable(feature = "once_cell", since = "1.70.0")]
|
||||
impl<T: fmt::Debug> fmt::Debug for OnceLock<T> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
let mut d = f.debug_tuple("OnceLock");
|
||||
match self.get() {
|
||||
Some(v) => d.field(v),
|
||||
None => d.field(&format_args!("<uninit>")),
|
||||
};
|
||||
d.finish()
|
||||
}
|
||||
}
|
||||
|
||||
#[stable(feature = "once_cell", since = "1.70.0")]
|
||||
impl<T: Clone> Clone for OnceLock<T> {
|
||||
#[inline]
|
||||
fn clone(&self) -> OnceLock<T> {
|
||||
let cell = Self::new();
|
||||
if let Some(value) = self.get() {
|
||||
match cell.set(value.clone()) {
|
||||
Ok(()) => (),
|
||||
Err(_) => unreachable!(),
|
||||
}
|
||||
}
|
||||
cell
|
||||
}
|
||||
}
|
||||
|
||||
#[stable(feature = "once_cell", since = "1.70.0")]
|
||||
impl<T> From<T> for OnceLock<T> {
|
||||
/// Creates a new cell with its contents set to `value`.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```
|
||||
/// use std::sync::OnceLock;
|
||||
///
|
||||
/// # fn main() -> Result<(), i32> {
|
||||
/// let a = OnceLock::from(3);
|
||||
/// let b = OnceLock::new();
|
||||
/// b.set(3)?;
|
||||
/// assert_eq!(a, b);
|
||||
/// Ok(())
|
||||
/// # }
|
||||
/// ```
|
||||
#[inline]
|
||||
fn from(value: T) -> Self {
|
||||
let cell = Self::new();
|
||||
match cell.set(value) {
|
||||
Ok(()) => cell,
|
||||
Err(_) => unreachable!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[stable(feature = "once_cell", since = "1.70.0")]
|
||||
impl<T: PartialEq> PartialEq for OnceLock<T> {
|
||||
/// Equality for two `OnceLock`s.
|
||||
///
|
||||
/// Two `OnceLock`s are equal if they either both contain values and their
|
||||
/// values are equal, or if neither contains a value.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use std::sync::OnceLock;
|
||||
///
|
||||
/// let five = OnceLock::new();
|
||||
/// five.set(5).unwrap();
|
||||
///
|
||||
/// let also_five = OnceLock::new();
|
||||
/// also_five.set(5).unwrap();
|
||||
///
|
||||
/// assert!(five == also_five);
|
||||
///
|
||||
/// assert!(OnceLock::<u32>::new() == OnceLock::<u32>::new());
|
||||
/// ```
|
||||
#[inline]
|
||||
fn eq(&self, other: &OnceLock<T>) -> bool {
|
||||
self.get() == other.get()
|
||||
}
|
||||
}
|
||||
|
||||
#[stable(feature = "once_cell", since = "1.70.0")]
|
||||
impl<T: Eq> Eq for OnceLock<T> {}
|
||||
|
||||
#[stable(feature = "once_cell", since = "1.70.0")]
|
||||
unsafe impl<#[may_dangle] T> Drop for OnceLock<T> {
|
||||
#[inline]
|
||||
fn drop(&mut self) {
|
||||
if self.initialized_mut() {
|
||||
// SAFETY: The cell is initialized and being dropped, so it can't
|
||||
// be accessed again. We also don't touch the `T` other than
|
||||
// dropping it, which validates our usage of #[may_dangle].
|
||||
unsafe { self.value.get_mut().assume_init_drop() };
|
||||
}
|
||||
}
|
||||
}
|
||||
389
crates/std/src/sync/poison.rs
Normal file
389
crates/std/src/sync/poison.rs
Normal file
@@ -0,0 +1,389 @@
|
||||
//! Synchronization objects that employ poisoning.
|
||||
//!
|
||||
//! # Poisoning
|
||||
//!
|
||||
//! All synchronization objects in this module implement a strategy called
|
||||
//! "poisoning" where a primitive becomes poisoned if it recognizes that some
|
||||
//! thread has panicked while holding the exclusive access granted by the
|
||||
//! primitive. This information is then propagated to all other threads
|
||||
//! to signify that the data protected by this primitive is likely tainted
|
||||
//! (some invariant is not being upheld).
|
||||
//!
|
||||
//! The specifics of how this "poisoned" state affects other threads and whether
|
||||
//! the panics are recognized reliably or on a best-effort basis depend on the
|
||||
//! primitive. See [Overview](#overview) below.
|
||||
//!
|
||||
//! The synchronization objects in this module have alternative implementations that do not employ
|
||||
//! poisoning in the [`std::sync::nonpoison`] module.
|
||||
//!
|
||||
//! [`std::sync::nonpoison`]: crate::sync::nonpoison
|
||||
//!
|
||||
//! # Overview
|
||||
//!
|
||||
//! Below is a list of synchronization objects provided by this module
|
||||
//! with a high-level overview for each object and a description
|
||||
//! of how it employs "poisoning".
|
||||
//!
|
||||
//! - [`Condvar`]: Condition Variable, providing the ability to block
|
||||
//! a thread while waiting for an event to occur.
|
||||
//!
|
||||
//! Condition variables are typically associated with
|
||||
//! a boolean predicate (a condition) and a mutex.
|
||||
//! This implementation is associated with [`poison::Mutex`](Mutex),
|
||||
//! which employs poisoning.
|
||||
//! For this reason, [`Condvar::wait()`] will return a [`LockResult`],
|
||||
//! just like [`poison::Mutex::lock()`](Mutex::lock) does.
|
||||
//!
|
||||
//! - [`Mutex`]: Mutual Exclusion mechanism, which ensures that at
|
||||
//! most one thread at a time is able to access some data.
|
||||
//!
|
||||
//! Panicking while holding the lock typically poisons the mutex, but it is
|
||||
//! not guaranteed to detect this condition in all circumstances.
|
||||
//! [`Mutex::lock()`] returns a [`LockResult`], providing a way to deal with
|
||||
//! the poisoned state. See [`Mutex`'s documentation](Mutex#poisoning) for more.
|
||||
//!
|
||||
//! - [`RwLock`]: Provides a mutual exclusion mechanism which allows
|
||||
//! multiple readers at the same time, while allowing only one
|
||||
//! writer at a time. In some cases, this can be more efficient than
|
||||
//! a mutex.
|
||||
//!
|
||||
//! This implementation, like [`Mutex`], usually becomes poisoned on a panic.
|
||||
//! Note, however, that an `RwLock` may only be poisoned if a panic occurs
|
||||
//! while it is locked exclusively (write mode). If a panic occurs in any reader,
|
||||
//! then the lock will not be poisoned.
|
||||
//!
|
||||
//! Note that the [`Once`] type also employs poisoning, but since it has non-poisoning `force`
|
||||
//! methods available on it, there is no separate `nonpoison` and `poison` version.
|
||||
//!
|
||||
//! [`Once`]: crate::sync::Once
|
||||
|
||||
// If we are not unwinding, `PoisonError` is uninhabited.
|
||||
#![cfg_attr(not(panic = "unwind"), expect(unreachable_code))]
|
||||
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
pub use self::condvar::Condvar;
|
||||
#[unstable(feature = "mapped_lock_guards", issue = "117108")]
|
||||
pub use self::mutex::MappedMutexGuard;
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
pub use self::mutex::{Mutex, MutexGuard};
|
||||
#[unstable(feature = "mapped_lock_guards", issue = "117108")]
|
||||
pub use self::rwlock::{MappedRwLockReadGuard, MappedRwLockWriteGuard};
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
pub use self::rwlock::{RwLock, RwLockReadGuard, RwLockWriteGuard};
|
||||
use crate::error::Error;
|
||||
use crate::fmt;
|
||||
#[cfg(panic = "unwind")]
|
||||
use crate::sync::atomic::{Atomic, AtomicBool, Ordering};
|
||||
#[cfg(panic = "unwind")]
|
||||
use crate::thread;
|
||||
|
||||
mod condvar;
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
mod mutex;
|
||||
mod rwlock;
|
||||
|
||||
pub(crate) struct Flag {
|
||||
#[cfg(panic = "unwind")]
|
||||
failed: Atomic<bool>,
|
||||
}
|
||||
|
||||
// Note that the Ordering uses to access the `failed` field of `Flag` below is
|
||||
// always `Relaxed`, and that's because this isn't actually protecting any data,
|
||||
// it's just a flag whether we've panicked or not.
|
||||
//
|
||||
// The actual location that this matters is when a mutex is **locked** which is
|
||||
// where we have external synchronization ensuring that we see memory
|
||||
// reads/writes to this flag.
|
||||
//
|
||||
// As a result, if it matters, we should see the correct value for `failed` in
|
||||
// all cases.
|
||||
|
||||
impl Flag {
|
||||
#[inline]
|
||||
pub const fn new() -> Flag {
|
||||
Flag {
|
||||
#[cfg(panic = "unwind")]
|
||||
failed: AtomicBool::new(false),
|
||||
}
|
||||
}
|
||||
|
||||
/// Checks the flag for an unguarded borrow, where we only care about existing poison.
|
||||
#[inline]
|
||||
pub fn borrow(&self) -> LockResult<()> {
|
||||
if self.get() { Err(PoisonError::new(())) } else { Ok(()) }
|
||||
}
|
||||
|
||||
/// Checks the flag for a guarded borrow, where we may also set poison when `done`.
|
||||
#[inline]
|
||||
pub fn guard(&self) -> LockResult<Guard> {
|
||||
let ret = Guard {
|
||||
#[cfg(panic = "unwind")]
|
||||
panicking: thread::panicking(),
|
||||
};
|
||||
if self.get() { Err(PoisonError::new(ret)) } else { Ok(ret) }
|
||||
}
|
||||
|
||||
#[inline]
|
||||
#[cfg(panic = "unwind")]
|
||||
pub fn done(&self, guard: &Guard) {
|
||||
if !guard.panicking && thread::panicking() {
|
||||
self.failed.store(true, Ordering::Relaxed);
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
#[cfg(not(panic = "unwind"))]
|
||||
pub fn done(&self, _guard: &Guard) {}
|
||||
|
||||
#[inline]
|
||||
#[cfg(panic = "unwind")]
|
||||
pub fn get(&self) -> bool {
|
||||
self.failed.load(Ordering::Relaxed)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
#[cfg(not(panic = "unwind"))]
|
||||
pub fn get(&self) -> bool {
|
||||
false
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn clear(&self) {
|
||||
#[cfg(panic = "unwind")]
|
||||
self.failed.store(false, Ordering::Relaxed)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct Guard {
|
||||
#[cfg(panic = "unwind")]
|
||||
panicking: bool,
|
||||
}
|
||||
|
||||
/// A type of error which can be returned whenever a lock is acquired.
|
||||
///
|
||||
/// Both [`Mutex`]es and [`RwLock`]s are poisoned whenever a thread fails while the lock
|
||||
/// is held. The precise semantics for when a lock is poisoned is documented on
|
||||
/// each lock. For a lock in the poisoned state, unless the state is cleared manually,
|
||||
/// all future acquisitions will return this error.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use std::sync::{Arc, Mutex};
|
||||
/// use std::thread;
|
||||
///
|
||||
/// let mutex = Arc::new(Mutex::new(1));
|
||||
///
|
||||
/// // poison the mutex
|
||||
/// let c_mutex = Arc::clone(&mutex);
|
||||
/// let _ = thread::spawn(move || {
|
||||
/// let mut data = c_mutex.lock().unwrap();
|
||||
/// *data = 2;
|
||||
/// panic!();
|
||||
/// }).join();
|
||||
///
|
||||
/// match mutex.lock() {
|
||||
/// Ok(_) => unreachable!(),
|
||||
/// Err(p_err) => {
|
||||
/// let data = p_err.get_ref();
|
||||
/// println!("recovered: {data}");
|
||||
/// }
|
||||
/// };
|
||||
/// ```
|
||||
/// [`Mutex`]: crate::sync::Mutex
|
||||
/// [`RwLock`]: crate::sync::RwLock
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
pub struct PoisonError<T> {
|
||||
data: T,
|
||||
#[cfg(not(panic = "unwind"))]
|
||||
_never: !,
|
||||
}
|
||||
|
||||
/// An enumeration of possible errors associated with a [`TryLockResult`] which
|
||||
/// can occur while trying to acquire a lock, from the [`try_lock`] method on a
|
||||
/// [`Mutex`] or the [`try_read`] and [`try_write`] methods on an [`RwLock`].
|
||||
///
|
||||
/// [`try_lock`]: crate::sync::Mutex::try_lock
|
||||
/// [`try_read`]: crate::sync::RwLock::try_read
|
||||
/// [`try_write`]: crate::sync::RwLock::try_write
|
||||
/// [`Mutex`]: crate::sync::Mutex
|
||||
/// [`RwLock`]: crate::sync::RwLock
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
pub enum TryLockError<T> {
|
||||
/// The lock could not be acquired because another thread failed while holding
|
||||
/// the lock.
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
Poisoned(#[stable(feature = "rust1", since = "1.0.0")] PoisonError<T>),
|
||||
/// The lock could not be acquired at this time because the operation would
|
||||
/// otherwise block.
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
WouldBlock,
|
||||
}
|
||||
|
||||
/// A type alias for the result of a lock method which can be poisoned.
|
||||
///
|
||||
/// The [`Ok`] variant of this result indicates that the primitive was not
|
||||
/// poisoned, and the operation result is contained within. The [`Err`] variant indicates
|
||||
/// that the primitive was poisoned. Note that the [`Err`] variant *also* carries
|
||||
/// an associated value assigned by the lock method, and it can be acquired through the
|
||||
/// [`into_inner`] method. The semantics of the associated value depends on the corresponding
|
||||
/// lock method.
|
||||
///
|
||||
/// [`into_inner`]: PoisonError::into_inner
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
pub type LockResult<T> = Result<T, PoisonError<T>>;
|
||||
|
||||
/// A type alias for the result of a nonblocking locking method.
|
||||
///
|
||||
/// For more information, see [`LockResult`]. A `TryLockResult` doesn't
|
||||
/// necessarily hold the associated guard in the [`Err`] type as the lock might not
|
||||
/// have been acquired for other reasons.
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
pub type TryLockResult<Guard> = Result<Guard, TryLockError<Guard>>;
|
||||
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
impl<T> fmt::Debug for PoisonError<T> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.debug_struct("PoisonError").finish_non_exhaustive()
|
||||
}
|
||||
}
|
||||
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
impl<T> fmt::Display for PoisonError<T> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
"poisoned lock: another task failed inside".fmt(f)
|
||||
}
|
||||
}
|
||||
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
impl<T> Error for PoisonError<T> {}
|
||||
|
||||
impl<T> PoisonError<T> {
|
||||
/// Creates a `PoisonError`.
|
||||
///
|
||||
/// This is generally created by methods like [`Mutex::lock`](crate::sync::Mutex::lock)
|
||||
/// or [`RwLock::read`](crate::sync::RwLock::read).
|
||||
///
|
||||
/// This method may panic if std was built with `panic="abort"`.
|
||||
#[cfg(panic = "unwind")]
|
||||
#[stable(feature = "sync_poison", since = "1.2.0")]
|
||||
pub fn new(data: T) -> PoisonError<T> {
|
||||
PoisonError { data }
|
||||
}
|
||||
|
||||
/// Creates a `PoisonError`.
|
||||
///
|
||||
/// This is generally created by methods like [`Mutex::lock`](crate::sync::Mutex::lock)
|
||||
/// or [`RwLock::read`](crate::sync::RwLock::read).
|
||||
///
|
||||
/// This method may panic if std was built with `panic="abort"`.
|
||||
#[cfg(not(panic = "unwind"))]
|
||||
#[stable(feature = "sync_poison", since = "1.2.0")]
|
||||
#[track_caller]
|
||||
pub fn new(_data: T) -> PoisonError<T> {
|
||||
panic!("PoisonError created in a libstd built with panic=\"abort\"")
|
||||
}
|
||||
|
||||
/// Consumes this error indicating that a lock is poisoned, returning the
|
||||
/// associated data.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use std::collections::HashSet;
|
||||
/// use std::sync::{Arc, Mutex};
|
||||
/// use std::thread;
|
||||
///
|
||||
/// let mutex = Arc::new(Mutex::new(HashSet::new()));
|
||||
///
|
||||
/// // poison the mutex
|
||||
/// let c_mutex = Arc::clone(&mutex);
|
||||
/// let _ = thread::spawn(move || {
|
||||
/// let mut data = c_mutex.lock().unwrap();
|
||||
/// data.insert(10);
|
||||
/// panic!();
|
||||
/// }).join();
|
||||
///
|
||||
/// let p_err = mutex.lock().unwrap_err();
|
||||
/// let data = p_err.into_inner();
|
||||
/// println!("recovered {} items", data.len());
|
||||
/// ```
|
||||
#[stable(feature = "sync_poison", since = "1.2.0")]
|
||||
pub fn into_inner(self) -> T {
|
||||
self.data
|
||||
}
|
||||
|
||||
/// Reaches into this error indicating that a lock is poisoned, returning a
|
||||
/// reference to the associated data.
|
||||
#[stable(feature = "sync_poison", since = "1.2.0")]
|
||||
pub fn get_ref(&self) -> &T {
|
||||
&self.data
|
||||
}
|
||||
|
||||
/// Reaches into this error indicating that a lock is poisoned, returning a
|
||||
/// mutable reference to the associated data.
|
||||
#[stable(feature = "sync_poison", since = "1.2.0")]
|
||||
pub fn get_mut(&mut self) -> &mut T {
|
||||
&mut self.data
|
||||
}
|
||||
}
|
||||
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
impl<T> From<PoisonError<T>> for TryLockError<T> {
|
||||
fn from(err: PoisonError<T>) -> TryLockError<T> {
|
||||
TryLockError::Poisoned(err)
|
||||
}
|
||||
}
|
||||
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
impl<T> fmt::Debug for TryLockError<T> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match *self {
|
||||
#[cfg(panic = "unwind")]
|
||||
TryLockError::Poisoned(..) => "Poisoned(..)".fmt(f),
|
||||
#[cfg(not(panic = "unwind"))]
|
||||
TryLockError::Poisoned(ref p) => match p._never {},
|
||||
TryLockError::WouldBlock => "WouldBlock".fmt(f),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
impl<T> fmt::Display for TryLockError<T> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match *self {
|
||||
#[cfg(panic = "unwind")]
|
||||
TryLockError::Poisoned(..) => "poisoned lock: another task failed inside",
|
||||
#[cfg(not(panic = "unwind"))]
|
||||
TryLockError::Poisoned(ref p) => match p._never {},
|
||||
TryLockError::WouldBlock => "try_lock failed because the operation would block",
|
||||
}
|
||||
.fmt(f)
|
||||
}
|
||||
}
|
||||
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
impl<T> Error for TryLockError<T> {
|
||||
#[allow(deprecated)]
|
||||
fn cause(&self) -> Option<&dyn Error> {
|
||||
match *self {
|
||||
#[cfg(panic = "unwind")]
|
||||
TryLockError::Poisoned(ref p) => Some(p),
|
||||
#[cfg(not(panic = "unwind"))]
|
||||
TryLockError::Poisoned(ref p) => match p._never {},
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn map_result<T, U, F>(result: LockResult<T>, f: F) -> LockResult<U>
|
||||
where
|
||||
F: FnOnce(T) -> U,
|
||||
{
|
||||
match result {
|
||||
Ok(t) => Ok(f(t)),
|
||||
#[cfg(panic = "unwind")]
|
||||
Err(PoisonError { data }) => Err(PoisonError::new(f(data))),
|
||||
}
|
||||
}
|
||||
510
crates/std/src/sync/poison/condvar.rs
Normal file
510
crates/std/src/sync/poison/condvar.rs
Normal file
@@ -0,0 +1,510 @@
|
||||
use crate::fmt;
|
||||
use crate::sync::WaitTimeoutResult;
|
||||
use crate::sync::poison::{self, LockResult, MutexGuard, PoisonError, mutex};
|
||||
use crate::sys::sync as sys;
|
||||
use crate::time::{Duration, Instant};
|
||||
|
||||
/// A Condition Variable
|
||||
///
|
||||
/// Condition variables represent the ability to block a thread such that it
|
||||
/// consumes no CPU time while waiting for an event to occur. Condition
|
||||
/// variables are typically associated with a boolean predicate (a condition)
|
||||
/// and a mutex. The predicate is always verified inside of the mutex before
|
||||
/// determining that a thread must block.
|
||||
///
|
||||
/// Functions in this module will block the current **thread** of execution.
|
||||
/// Note that any attempt to use multiple mutexes on the same condition
|
||||
/// variable may result in a runtime panic.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use std::sync::{Arc, Mutex, Condvar};
|
||||
/// use std::thread;
|
||||
///
|
||||
/// let pair = Arc::new((Mutex::new(false), Condvar::new()));
|
||||
/// let pair2 = Arc::clone(&pair);
|
||||
///
|
||||
/// // Inside of our lock, spawn a new thread, and then wait for it to start.
|
||||
/// thread::spawn(move || {
|
||||
/// let (lock, cvar) = &*pair2;
|
||||
/// let mut started = lock.lock().unwrap();
|
||||
/// *started = true;
|
||||
/// // We notify the condvar that the value has changed.
|
||||
/// cvar.notify_one();
|
||||
/// });
|
||||
///
|
||||
/// // Wait for the thread to start up.
|
||||
/// let (lock, cvar) = &*pair;
|
||||
/// let mut started = lock.lock().unwrap();
|
||||
/// while !*started {
|
||||
/// started = cvar.wait(started).unwrap();
|
||||
/// }
|
||||
/// ```
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
pub struct Condvar {
|
||||
inner: sys::Condvar,
|
||||
}
|
||||
|
||||
impl Condvar {
|
||||
/// Creates a new condition variable which is ready to be waited on and
|
||||
/// notified.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use std::sync::Condvar;
|
||||
///
|
||||
/// let condvar = Condvar::new();
|
||||
/// ```
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
#[rustc_const_stable(feature = "const_locks", since = "1.63.0")]
|
||||
#[must_use]
|
||||
#[inline]
|
||||
pub const fn new() -> Condvar {
|
||||
Condvar { inner: sys::Condvar::new() }
|
||||
}
|
||||
|
||||
/// Blocks the current thread until this condition variable receives a
|
||||
/// notification.
|
||||
///
|
||||
/// This function will atomically unlock the mutex specified (represented by
|
||||
/// `guard`) and block the current thread. This means that any calls
|
||||
/// to [`notify_one`] or [`notify_all`] which happen logically after the
|
||||
/// mutex is unlocked are candidates to wake this thread up. When this
|
||||
/// function call returns, the lock specified will have been re-acquired.
|
||||
///
|
||||
/// Note that this function is susceptible to spurious wakeups. Condition
|
||||
/// variables normally have a boolean predicate associated with them, and
|
||||
/// the predicate must always be checked each time this function returns to
|
||||
/// protect against spurious wakeups.
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// This function will return an error if the mutex being waited on is
|
||||
/// poisoned when this thread re-acquires the lock. For more information,
|
||||
/// see information about [poisoning] on the [`Mutex`] type.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This function may [`panic!`] if it is used with more than one mutex
|
||||
/// over time.
|
||||
///
|
||||
/// [`notify_one`]: Self::notify_one
|
||||
/// [`notify_all`]: Self::notify_all
|
||||
/// [poisoning]: super::Mutex#poisoning
|
||||
/// [`Mutex`]: super::Mutex
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use std::sync::{Arc, Mutex, Condvar};
|
||||
/// use std::thread;
|
||||
///
|
||||
/// let pair = Arc::new((Mutex::new(false), Condvar::new()));
|
||||
/// let pair2 = Arc::clone(&pair);
|
||||
///
|
||||
/// thread::spawn(move || {
|
||||
/// let (lock, cvar) = &*pair2;
|
||||
/// let mut started = lock.lock().unwrap();
|
||||
/// *started = true;
|
||||
/// // We notify the condvar that the value has changed.
|
||||
/// cvar.notify_one();
|
||||
/// });
|
||||
///
|
||||
/// // Wait for the thread to start up.
|
||||
/// let (lock, cvar) = &*pair;
|
||||
/// let mut started = lock.lock().unwrap();
|
||||
/// // As long as the value inside the `Mutex<bool>` is `false`, we wait.
|
||||
/// while !*started {
|
||||
/// started = cvar.wait(started).unwrap();
|
||||
/// }
|
||||
/// ```
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
#[rustc_should_not_be_called_on_const_items]
|
||||
pub fn wait<'a, T>(&self, guard: MutexGuard<'a, T>) -> LockResult<MutexGuard<'a, T>> {
|
||||
let poisoned = unsafe {
|
||||
let lock = mutex::guard_lock(&guard);
|
||||
self.inner.wait(lock);
|
||||
mutex::guard_poison(&guard).get()
|
||||
};
|
||||
if poisoned { Err(PoisonError::new(guard)) } else { Ok(guard) }
|
||||
}
|
||||
|
||||
/// Blocks the current thread until the provided condition becomes false.
|
||||
///
|
||||
/// `condition` is checked immediately; if not met (returns `true`), this
|
||||
/// will [`wait`] for the next notification then check again. This repeats
|
||||
/// until `condition` returns `false`, in which case this function returns.
|
||||
///
|
||||
/// This function will atomically unlock the mutex specified (represented by
|
||||
/// `guard`) and block the current thread. This means that any calls
|
||||
/// to [`notify_one`] or [`notify_all`] which happen logically after the
|
||||
/// mutex is unlocked are candidates to wake this thread up. When this
|
||||
/// function call returns, the lock specified will have been re-acquired.
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// This function will return an error if the mutex being waited on is
|
||||
/// poisoned when this thread re-acquires the lock. For more information,
|
||||
/// see information about [poisoning] on the [`Mutex`] type.
|
||||
///
|
||||
/// [`wait`]: Self::wait
|
||||
/// [`notify_one`]: Self::notify_one
|
||||
/// [`notify_all`]: Self::notify_all
|
||||
/// [poisoning]: super::Mutex#poisoning
|
||||
/// [`Mutex`]: super::Mutex
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use std::sync::{Arc, Mutex, Condvar};
|
||||
/// use std::thread;
|
||||
///
|
||||
/// let pair = Arc::new((Mutex::new(true), Condvar::new()));
|
||||
/// let pair2 = Arc::clone(&pair);
|
||||
///
|
||||
/// thread::spawn(move || {
|
||||
/// let (lock, cvar) = &*pair2;
|
||||
/// let mut pending = lock.lock().unwrap();
|
||||
/// *pending = false;
|
||||
/// // We notify the condvar that the value has changed.
|
||||
/// cvar.notify_one();
|
||||
/// });
|
||||
///
|
||||
/// // Wait for the thread to start up.
|
||||
/// let (lock, cvar) = &*pair;
|
||||
/// // As long as the value inside the `Mutex<bool>` is `true`, we wait.
|
||||
/// let _guard = cvar.wait_while(lock.lock().unwrap(), |pending| { *pending }).unwrap();
|
||||
/// ```
|
||||
#[stable(feature = "wait_until", since = "1.42.0")]
|
||||
#[rustc_should_not_be_called_on_const_items]
|
||||
pub fn wait_while<'a, T, F>(
|
||||
&self,
|
||||
mut guard: MutexGuard<'a, T>,
|
||||
mut condition: F,
|
||||
) -> LockResult<MutexGuard<'a, T>>
|
||||
where
|
||||
F: FnMut(&mut T) -> bool,
|
||||
{
|
||||
while condition(&mut *guard) {
|
||||
guard = self.wait(guard)?;
|
||||
}
|
||||
Ok(guard)
|
||||
}
|
||||
|
||||
/// Waits on this condition variable for a notification, timing out after a
|
||||
/// specified duration.
|
||||
///
|
||||
/// The semantics of this function are equivalent to [`wait`]
|
||||
/// except that the thread will be blocked for roughly no longer
|
||||
/// than `ms` milliseconds. This method should not be used for
|
||||
/// precise timing due to anomalies such as preemption or platform
|
||||
/// differences that might not cause the maximum amount of time
|
||||
/// waited to be precisely `ms`.
|
||||
///
|
||||
/// Note that the best effort is made to ensure that the time waited is
|
||||
/// measured with a monotonic clock, and not affected by the changes made to
|
||||
/// the system time.
|
||||
///
|
||||
/// The returned boolean is `false` only if the timeout is known
|
||||
/// to have elapsed.
|
||||
///
|
||||
/// Like [`wait`], the lock specified will be re-acquired when this function
|
||||
/// returns, regardless of whether the timeout elapsed or not.
|
||||
///
|
||||
/// [`wait`]: Self::wait
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use std::sync::{Arc, Mutex, Condvar};
|
||||
/// use std::thread;
|
||||
///
|
||||
/// let pair = Arc::new((Mutex::new(false), Condvar::new()));
|
||||
/// let pair2 = Arc::clone(&pair);
|
||||
///
|
||||
/// thread::spawn(move || {
|
||||
/// let (lock, cvar) = &*pair2;
|
||||
/// let mut started = lock.lock().unwrap();
|
||||
/// *started = true;
|
||||
/// // We notify the condvar that the value has changed.
|
||||
/// cvar.notify_one();
|
||||
/// });
|
||||
///
|
||||
/// // Wait for the thread to start up.
|
||||
/// let (lock, cvar) = &*pair;
|
||||
/// let mut started = lock.lock().unwrap();
|
||||
/// // As long as the value inside the `Mutex<bool>` is `false`, we wait.
|
||||
/// loop {
|
||||
/// let result = cvar.wait_timeout_ms(started, 10).unwrap();
|
||||
/// // 10 milliseconds have passed, or maybe the value changed!
|
||||
/// started = result.0;
|
||||
/// if *started == true {
|
||||
/// // We received the notification and the value has been updated, we can leave.
|
||||
/// break
|
||||
/// }
|
||||
/// }
|
||||
/// ```
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
#[rustc_should_not_be_called_on_const_items]
|
||||
#[deprecated(since = "1.6.0", note = "replaced by `std::sync::Condvar::wait_timeout`")]
|
||||
pub fn wait_timeout_ms<'a, T>(
|
||||
&self,
|
||||
guard: MutexGuard<'a, T>,
|
||||
ms: u32,
|
||||
) -> LockResult<(MutexGuard<'a, T>, bool)> {
|
||||
let res = self.wait_timeout(guard, Duration::from_millis(ms as u64));
|
||||
poison::map_result(res, |(a, b)| (a, !b.timed_out()))
|
||||
}
|
||||
|
||||
/// Waits on this condition variable for a notification, timing out after a
|
||||
/// specified duration.
|
||||
///
|
||||
/// The semantics of this function are equivalent to [`wait`] except that
|
||||
/// the thread will be blocked for roughly no longer than `dur`. This
|
||||
/// method should not be used for precise timing due to anomalies such as
|
||||
/// preemption or platform differences that might not cause the maximum
|
||||
/// amount of time waited to be precisely `dur`.
|
||||
///
|
||||
/// Note that the best effort is made to ensure that the time waited is
|
||||
/// measured with a monotonic clock, and not affected by the changes made to
|
||||
/// the system time. This function is susceptible to spurious wakeups.
|
||||
/// Condition variables normally have a boolean predicate associated with
|
||||
/// them, and the predicate must always be checked each time this function
|
||||
/// returns to protect against spurious wakeups. Furthermore, since the timeout
|
||||
/// is given relative to the moment this function is called, it needs to be adjusted
|
||||
/// when this function is called in a loop. The [`wait_timeout_while`] method
|
||||
/// lets you wait with a timeout while a predicate is true, taking care of all these concerns.
|
||||
///
|
||||
/// The returned [`WaitTimeoutResult`] value indicates if the timeout is
|
||||
/// known to have elapsed.
|
||||
///
|
||||
/// Like [`wait`], the lock specified will be re-acquired when this function
|
||||
/// returns, regardless of whether the timeout elapsed or not.
|
||||
///
|
||||
/// [`wait`]: Self::wait
|
||||
/// [`wait_timeout_while`]: Self::wait_timeout_while
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use std::sync::{Arc, Mutex, Condvar};
|
||||
/// use std::thread;
|
||||
/// use std::time::Duration;
|
||||
///
|
||||
/// let pair = Arc::new((Mutex::new(false), Condvar::new()));
|
||||
/// let pair2 = Arc::clone(&pair);
|
||||
///
|
||||
/// thread::spawn(move || {
|
||||
/// let (lock, cvar) = &*pair2;
|
||||
/// let mut started = lock.lock().unwrap();
|
||||
/// *started = true;
|
||||
/// // We notify the condvar that the value has changed.
|
||||
/// cvar.notify_one();
|
||||
/// });
|
||||
///
|
||||
/// // wait for the thread to start up
|
||||
/// let (lock, cvar) = &*pair;
|
||||
/// let mut started = lock.lock().unwrap();
|
||||
/// // as long as the value inside the `Mutex<bool>` is `false`, we wait
|
||||
/// loop {
|
||||
/// let result = cvar.wait_timeout(started, Duration::from_millis(10)).unwrap();
|
||||
/// // 10 milliseconds have passed, or maybe the value changed!
|
||||
/// started = result.0;
|
||||
/// if *started == true {
|
||||
/// // We received the notification and the value has been updated, we can leave.
|
||||
/// break
|
||||
/// }
|
||||
/// }
|
||||
/// ```
|
||||
#[stable(feature = "wait_timeout", since = "1.5.0")]
|
||||
#[rustc_should_not_be_called_on_const_items]
|
||||
pub fn wait_timeout<'a, T>(
|
||||
&self,
|
||||
guard: MutexGuard<'a, T>,
|
||||
dur: Duration,
|
||||
) -> LockResult<(MutexGuard<'a, T>, WaitTimeoutResult)> {
|
||||
let (poisoned, result) = unsafe {
|
||||
let lock = mutex::guard_lock(&guard);
|
||||
let success = self.inner.wait_timeout(lock, dur);
|
||||
(mutex::guard_poison(&guard).get(), WaitTimeoutResult(!success))
|
||||
};
|
||||
if poisoned { Err(PoisonError::new((guard, result))) } else { Ok((guard, result)) }
|
||||
}
|
||||
|
||||
/// Waits on this condition variable for a notification, timing out after a
|
||||
/// specified duration.
|
||||
///
|
||||
/// The semantics of this function are equivalent to [`wait_while`] except
|
||||
/// that the thread will be blocked for roughly no longer than `dur`. This
|
||||
/// method should not be used for precise timing due to anomalies such as
|
||||
/// preemption or platform differences that might not cause the maximum
|
||||
/// amount of time waited to be precisely `dur`.
|
||||
///
|
||||
/// Note that the best effort is made to ensure that the time waited is
|
||||
/// measured with a monotonic clock, and not affected by the changes made to
|
||||
/// the system time.
|
||||
///
|
||||
/// The returned [`WaitTimeoutResult`] value indicates if the timeout is
|
||||
/// known to have elapsed without the condition being met.
|
||||
///
|
||||
/// Like [`wait_while`], the lock specified will be re-acquired when this
|
||||
/// function returns, regardless of whether the timeout elapsed or not.
|
||||
///
|
||||
/// [`wait_while`]: Self::wait_while
|
||||
/// [`wait_timeout`]: Self::wait_timeout
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use std::sync::{Arc, Mutex, Condvar};
|
||||
/// use std::thread;
|
||||
/// use std::time::Duration;
|
||||
///
|
||||
/// let pair = Arc::new((Mutex::new(true), Condvar::new()));
|
||||
/// let pair2 = Arc::clone(&pair);
|
||||
///
|
||||
/// thread::spawn(move || {
|
||||
/// let (lock, cvar) = &*pair2;
|
||||
/// let mut pending = lock.lock().unwrap();
|
||||
/// *pending = false;
|
||||
/// // We notify the condvar that the value has changed.
|
||||
/// cvar.notify_one();
|
||||
/// });
|
||||
///
|
||||
/// // wait for the thread to start up
|
||||
/// let (lock, cvar) = &*pair;
|
||||
/// let result = cvar.wait_timeout_while(
|
||||
/// lock.lock().unwrap(),
|
||||
/// Duration::from_millis(100),
|
||||
/// |&mut pending| pending,
|
||||
/// ).unwrap();
|
||||
/// if result.1.timed_out() {
|
||||
/// // timed-out without the condition ever evaluating to false.
|
||||
/// }
|
||||
/// // access the locked mutex via result.0
|
||||
/// ```
|
||||
#[stable(feature = "wait_timeout_until", since = "1.42.0")]
|
||||
#[rustc_should_not_be_called_on_const_items]
|
||||
pub fn wait_timeout_while<'a, T, F>(
|
||||
&self,
|
||||
mut guard: MutexGuard<'a, T>,
|
||||
dur: Duration,
|
||||
mut condition: F,
|
||||
) -> LockResult<(MutexGuard<'a, T>, WaitTimeoutResult)>
|
||||
where
|
||||
F: FnMut(&mut T) -> bool,
|
||||
{
|
||||
let start = Instant::now();
|
||||
loop {
|
||||
if !condition(&mut *guard) {
|
||||
return Ok((guard, WaitTimeoutResult(false)));
|
||||
}
|
||||
let timeout = match dur.checked_sub(start.elapsed()) {
|
||||
Some(timeout) => timeout,
|
||||
None => return Ok((guard, WaitTimeoutResult(true))),
|
||||
};
|
||||
guard = self.wait_timeout(guard, timeout)?.0;
|
||||
}
|
||||
}
|
||||
|
||||
/// Wakes up one blocked thread on this condvar.
|
||||
///
|
||||
/// If there is a blocked thread on this condition variable, then it will
|
||||
/// be woken up from its call to [`wait`] or [`wait_timeout`]. Calls to
|
||||
/// `notify_one` are not buffered in any way.
|
||||
///
|
||||
/// To wake up all threads, see [`notify_all`].
|
||||
///
|
||||
/// [`wait`]: Self::wait
|
||||
/// [`wait_timeout`]: Self::wait_timeout
|
||||
/// [`notify_all`]: Self::notify_all
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use std::sync::{Arc, Mutex, Condvar};
|
||||
/// use std::thread;
|
||||
///
|
||||
/// let pair = Arc::new((Mutex::new(false), Condvar::new()));
|
||||
/// let pair2 = Arc::clone(&pair);
|
||||
///
|
||||
/// thread::spawn(move || {
|
||||
/// let (lock, cvar) = &*pair2;
|
||||
/// let mut started = lock.lock().unwrap();
|
||||
/// *started = true;
|
||||
/// // We notify the condvar that the value has changed.
|
||||
/// cvar.notify_one();
|
||||
/// });
|
||||
///
|
||||
/// // Wait for the thread to start up.
|
||||
/// let (lock, cvar) = &*pair;
|
||||
/// let mut started = lock.lock().unwrap();
|
||||
/// // As long as the value inside the `Mutex<bool>` is `false`, we wait.
|
||||
/// while !*started {
|
||||
/// started = cvar.wait(started).unwrap();
|
||||
/// }
|
||||
/// ```
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
#[rustc_should_not_be_called_on_const_items]
|
||||
pub fn notify_one(&self) {
|
||||
self.inner.notify_one()
|
||||
}
|
||||
|
||||
/// Wakes up all blocked threads on this condvar.
|
||||
///
|
||||
/// This method will ensure that any current waiters on the condition
|
||||
/// variable are awoken. Calls to `notify_all()` are not buffered in any
|
||||
/// way.
|
||||
///
|
||||
/// To wake up only one thread, see [`notify_one`].
|
||||
///
|
||||
/// [`notify_one`]: Self::notify_one
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use std::sync::{Arc, Mutex, Condvar};
|
||||
/// use std::thread;
|
||||
///
|
||||
/// let pair = Arc::new((Mutex::new(false), Condvar::new()));
|
||||
/// let pair2 = Arc::clone(&pair);
|
||||
///
|
||||
/// thread::spawn(move || {
|
||||
/// let (lock, cvar) = &*pair2;
|
||||
/// let mut started = lock.lock().unwrap();
|
||||
/// *started = true;
|
||||
/// // We notify the condvar that the value has changed.
|
||||
/// cvar.notify_all();
|
||||
/// });
|
||||
///
|
||||
/// // Wait for the thread to start up.
|
||||
/// let (lock, cvar) = &*pair;
|
||||
/// let mut started = lock.lock().unwrap();
|
||||
/// // As long as the value inside the `Mutex<bool>` is `false`, we wait.
|
||||
/// while !*started {
|
||||
/// started = cvar.wait(started).unwrap();
|
||||
/// }
|
||||
/// ```
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
#[rustc_should_not_be_called_on_const_items]
|
||||
pub fn notify_all(&self) {
|
||||
self.inner.notify_all()
|
||||
}
|
||||
}
|
||||
|
||||
#[stable(feature = "std_debug", since = "1.16.0")]
|
||||
impl fmt::Debug for Condvar {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.debug_struct("Condvar").finish_non_exhaustive()
|
||||
}
|
||||
}
|
||||
|
||||
#[stable(feature = "condvar_default", since = "1.10.0")]
|
||||
impl Default for Condvar {
|
||||
/// Creates a `Condvar` which is ready to be waited on and notified.
|
||||
fn default() -> Condvar {
|
||||
Condvar::new()
|
||||
}
|
||||
}
|
||||
946
crates/std/src/sync/poison/mutex.rs
Normal file
946
crates/std/src/sync/poison/mutex.rs
Normal file
@@ -0,0 +1,946 @@
|
||||
use crate::cell::UnsafeCell;
|
||||
use crate::fmt;
|
||||
use crate::marker::PhantomData;
|
||||
use crate::mem::{self, ManuallyDrop};
|
||||
use crate::ops::{Deref, DerefMut};
|
||||
use crate::ptr::NonNull;
|
||||
use crate::sync::{LockResult, PoisonError, TryLockError, TryLockResult, poison};
|
||||
use crate::sys::sync as sys;
|
||||
|
||||
/// A mutual exclusion primitive useful for protecting shared data
|
||||
///
|
||||
/// This mutex will block threads waiting for the lock to become available. The
|
||||
/// mutex can be created via a [`new`] constructor. Each mutex has a type parameter
|
||||
/// which represents the data that it is protecting. The data can only be accessed
|
||||
/// through the RAII guards returned from [`lock`] and [`try_lock`], which
|
||||
/// guarantees that the data is only ever accessed when the mutex is locked.
|
||||
///
|
||||
/// # Poisoning
|
||||
///
|
||||
/// The mutexes in this module implement a strategy called "poisoning" where a
|
||||
/// mutex becomes poisoned if it recognizes that the thread holding it has
|
||||
/// panicked.
|
||||
///
|
||||
/// Once a mutex is poisoned, all other threads are unable to access the data by
|
||||
/// default as it is likely tainted (some invariant is not being upheld). For a
|
||||
/// mutex, this means that the [`lock`] and [`try_lock`] methods return a
|
||||
/// [`Result`] which indicates whether a mutex has been poisoned or not. Most
|
||||
/// usage of a mutex will simply [`unwrap()`] these results, propagating panics
|
||||
/// among threads to ensure that a possibly invalid invariant is not witnessed.
|
||||
///
|
||||
/// Poisoning is only advisory: the [`PoisonError`] type has an [`into_inner`]
|
||||
/// method which will return the guard that would have otherwise been returned
|
||||
/// on a successful lock. This allows access to the data, despite the lock being
|
||||
/// poisoned.
|
||||
///
|
||||
/// In addition, the panic detection is not ideal, so even unpoisoned mutexes
|
||||
/// need to be handled with care, since certain panics may have been skipped.
|
||||
/// Here is a non-exhaustive list of situations where this might occur:
|
||||
///
|
||||
/// - If a mutex is locked while a panic is underway, e.g. within a [`Drop`]
|
||||
/// implementation or a [panic hook], panicking for the second time while the
|
||||
/// lock is held will leave the mutex unpoisoned. Note that while double panic
|
||||
/// usually aborts the program, [`catch_unwind`] can prevent this.
|
||||
///
|
||||
/// - Locking and unlocking the mutex across different panic contexts, e.g. by
|
||||
/// storing the guard to a [`Cell`] within [`Drop::drop`] and accessing it
|
||||
/// outside, or vice versa, can affect poisoning status in an unexpected way.
|
||||
///
|
||||
/// - Foreign exceptions do not currently trigger poisoning even in absence of
|
||||
/// other panics.
|
||||
///
|
||||
/// While this rarely happens in realistic code, `unsafe` code cannot rely on
|
||||
/// poisoning for soundness, since the behavior of poisoning can depend on
|
||||
/// outside context. Here's an example of **incorrect** use of poisoning:
|
||||
///
|
||||
/// ```rust
|
||||
/// use std::sync::Mutex;
|
||||
///
|
||||
/// struct MutexBox<T> {
|
||||
/// data: Mutex<*mut T>,
|
||||
/// }
|
||||
///
|
||||
/// impl<T> MutexBox<T> {
|
||||
/// pub fn new(value: T) -> Self {
|
||||
/// Self {
|
||||
/// data: Mutex::new(Box::into_raw(Box::new(value))),
|
||||
/// }
|
||||
/// }
|
||||
///
|
||||
/// pub fn replace_with(&self, f: impl FnOnce(T) -> T) {
|
||||
/// let ptr = self.data.lock().expect("poisoned");
|
||||
/// // While `f` is running, the data is moved out of `*ptr`. If `f`
|
||||
/// // panics, `*ptr` keeps pointing at a dropped value. The intention
|
||||
/// // is that this will poison the mutex, so the following calls to
|
||||
/// // `replace_with` will panic without reading `*ptr`. But since
|
||||
/// // poisoning is not guaranteed to occur if this is run from a panic
|
||||
/// // hook, this can lead to use-after-free.
|
||||
/// unsafe {
|
||||
/// (*ptr).write(f((*ptr).read()));
|
||||
/// }
|
||||
/// }
|
||||
/// }
|
||||
/// ```
|
||||
///
|
||||
/// [`new`]: Self::new
|
||||
/// [`lock`]: Self::lock
|
||||
/// [`try_lock`]: Self::try_lock
|
||||
/// [`unwrap()`]: Result::unwrap
|
||||
/// [`PoisonError`]: super::PoisonError
|
||||
/// [`into_inner`]: super::PoisonError::into_inner
|
||||
/// [panic hook]: core::panic::set_hook
|
||||
/// [`catch_unwind`]: core::panic::catch_unwind
|
||||
/// [`Cell`]: crate::cell::Cell
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use std::sync::{Arc, Mutex};
|
||||
/// use std::thread;
|
||||
/// use std::sync::mpsc::channel;
|
||||
///
|
||||
/// const N: usize = 10;
|
||||
///
|
||||
/// // Spawn a few threads to increment a shared variable (non-atomically), and
|
||||
/// // let the main thread know once all increments are done.
|
||||
/// //
|
||||
/// // Here we're using an Arc to share memory among threads, and the data inside
|
||||
/// // the Arc is protected with a mutex.
|
||||
/// let data = Arc::new(Mutex::new(0));
|
||||
///
|
||||
/// let (tx, rx) = channel();
|
||||
/// for _ in 0..N {
|
||||
/// let (data, tx) = (Arc::clone(&data), tx.clone());
|
||||
/// thread::spawn(move || {
|
||||
/// // The shared state can only be accessed once the lock is held.
|
||||
/// // Our non-atomic increment is safe because we're the only thread
|
||||
/// // which can access the shared state when the lock is held.
|
||||
/// //
|
||||
/// // We unwrap() the return value to assert that we are not expecting
|
||||
/// // threads to ever fail while holding the lock.
|
||||
/// let mut data = data.lock().unwrap();
|
||||
/// *data += 1;
|
||||
/// if *data == N {
|
||||
/// tx.send(()).unwrap();
|
||||
/// }
|
||||
/// // the lock is unlocked here when `data` goes out of scope.
|
||||
/// });
|
||||
/// }
|
||||
///
|
||||
/// rx.recv().unwrap();
|
||||
/// ```
|
||||
///
|
||||
/// To recover from a poisoned mutex:
|
||||
///
|
||||
/// ```
|
||||
/// use std::sync::{Arc, Mutex};
|
||||
/// use std::thread;
|
||||
///
|
||||
/// let lock = Arc::new(Mutex::new(0_u32));
|
||||
/// let lock2 = Arc::clone(&lock);
|
||||
///
|
||||
/// let _ = thread::spawn(move || -> () {
|
||||
/// // This thread will acquire the mutex first, unwrapping the result of
|
||||
/// // `lock` because the lock has not been poisoned.
|
||||
/// let _guard = lock2.lock().unwrap();
|
||||
///
|
||||
/// // This panic while holding the lock (`_guard` is in scope) will poison
|
||||
/// // the mutex.
|
||||
/// panic!();
|
||||
/// }).join();
|
||||
///
|
||||
/// // The lock is poisoned by this point, but the returned result can be
|
||||
/// // pattern matched on to return the underlying guard on both branches.
|
||||
/// let mut guard = match lock.lock() {
|
||||
/// Ok(guard) => guard,
|
||||
/// Err(poisoned) => poisoned.into_inner(),
|
||||
/// };
|
||||
///
|
||||
/// *guard += 1;
|
||||
/// ```
|
||||
///
|
||||
/// To unlock a mutex guard sooner than the end of the enclosing scope,
|
||||
/// either create an inner scope or drop the guard manually.
|
||||
///
|
||||
/// ```
|
||||
/// use std::sync::{Arc, Mutex};
|
||||
/// use std::thread;
|
||||
///
|
||||
/// const N: usize = 3;
|
||||
///
|
||||
/// let data_mutex = Arc::new(Mutex::new(vec![1, 2, 3, 4]));
|
||||
/// let res_mutex = Arc::new(Mutex::new(0));
|
||||
///
|
||||
/// let mut threads = Vec::with_capacity(N);
|
||||
/// (0..N).for_each(|_| {
|
||||
/// let data_mutex_clone = Arc::clone(&data_mutex);
|
||||
/// let res_mutex_clone = Arc::clone(&res_mutex);
|
||||
///
|
||||
/// threads.push(thread::spawn(move || {
|
||||
/// // Here we use a block to limit the lifetime of the lock guard.
|
||||
/// let result = {
|
||||
/// let mut data = data_mutex_clone.lock().unwrap();
|
||||
/// // This is the result of some important and long-ish work.
|
||||
/// let result = data.iter().fold(0, |acc, x| acc + x * 2);
|
||||
/// data.push(result);
|
||||
/// result
|
||||
/// // The mutex guard gets dropped here, together with any other values
|
||||
/// // created in the critical section.
|
||||
/// };
|
||||
/// // The guard created here is a temporary dropped at the end of the statement, i.e.
|
||||
/// // the lock would not remain being held even if the thread did some additional work.
|
||||
/// *res_mutex_clone.lock().unwrap() += result;
|
||||
/// }));
|
||||
/// });
|
||||
///
|
||||
/// let mut data = data_mutex.lock().unwrap();
|
||||
/// // This is the result of some important and long-ish work.
|
||||
/// let result = data.iter().fold(0, |acc, x| acc + x * 2);
|
||||
/// data.push(result);
|
||||
/// // We drop the `data` explicitly because it's not necessary anymore and the
|
||||
/// // thread still has work to do. This allows other threads to start working on
|
||||
/// // the data immediately, without waiting for the rest of the unrelated work
|
||||
/// // to be done here.
|
||||
/// //
|
||||
/// // It's even more important here than in the threads because we `.join` the
|
||||
/// // threads after that. If we had not dropped the mutex guard, a thread could
|
||||
/// // be waiting forever for it, causing a deadlock.
|
||||
/// // As in the threads, a block could have been used instead of calling the
|
||||
/// // `drop` function.
|
||||
/// drop(data);
|
||||
/// // Here the mutex guard is not assigned to a variable and so, even if the
|
||||
/// // scope does not end after this line, the mutex is still released: there is
|
||||
/// // no deadlock.
|
||||
/// *res_mutex.lock().unwrap() += result;
|
||||
///
|
||||
/// threads.into_iter().for_each(|thread| {
|
||||
/// thread
|
||||
/// .join()
|
||||
/// .expect("The thread creating or execution failed !")
|
||||
/// });
|
||||
///
|
||||
/// assert_eq!(*res_mutex.lock().unwrap(), 800);
|
||||
/// ```
|
||||
///
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
#[cfg_attr(not(test), rustc_diagnostic_item = "Mutex")]
|
||||
pub struct Mutex<T: ?Sized> {
|
||||
inner: sys::Mutex,
|
||||
poison: poison::Flag,
|
||||
data: UnsafeCell<T>,
|
||||
}
|
||||
|
||||
/// `T` must be `Send` for a [`Mutex`] to be `Send` because it is possible to acquire
|
||||
/// the owned `T` from the `Mutex` via [`into_inner`].
|
||||
///
|
||||
/// [`into_inner`]: Mutex::into_inner
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
unsafe impl<T: ?Sized + Send> Send for Mutex<T> {}
|
||||
|
||||
/// `T` must be `Send` for [`Mutex`] to be `Sync`.
|
||||
/// This ensures that the protected data can be accessed safely from multiple threads
|
||||
/// without causing data races or other unsafe behavior.
|
||||
///
|
||||
/// [`Mutex<T>`] provides mutable access to `T` to one thread at a time. However, it's essential
|
||||
/// for `T` to be `Send` because it's not safe for non-`Send` structures to be accessed in
|
||||
/// this manner. For instance, consider [`Rc`], a non-atomic reference counted smart pointer,
|
||||
/// which is not `Send`. With `Rc`, we can have multiple copies pointing to the same heap
|
||||
/// allocation with a non-atomic reference count. If we were to use `Mutex<Rc<_>>`, it would
|
||||
/// only protect one instance of `Rc` from shared access, leaving other copies vulnerable
|
||||
/// to potential data races.
|
||||
///
|
||||
/// Also note that it is not necessary for `T` to be `Sync` as `&T` is only made available
|
||||
/// to one thread at a time if `T` is not `Sync`.
|
||||
///
|
||||
/// [`Rc`]: crate::rc::Rc
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
unsafe impl<T: ?Sized + Send> Sync for Mutex<T> {}
|
||||
|
||||
/// An RAII implementation of a "scoped lock" of a mutex. When this structure is
|
||||
/// dropped (falls out of scope), the lock will be unlocked.
|
||||
///
|
||||
/// The data protected by the mutex can be accessed through this guard via its
|
||||
/// [`Deref`] and [`DerefMut`] implementations.
|
||||
///
|
||||
/// This structure is created by the [`lock`] and [`try_lock`] methods on
|
||||
/// [`Mutex`].
|
||||
///
|
||||
/// [`lock`]: Mutex::lock
|
||||
/// [`try_lock`]: Mutex::try_lock
|
||||
#[must_use = "if unused the Mutex will immediately unlock"]
|
||||
#[must_not_suspend = "holding a MutexGuard across suspend \
|
||||
points can cause deadlocks, delays, \
|
||||
and cause Futures to not implement `Send`"]
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
#[clippy::has_significant_drop]
|
||||
#[cfg_attr(not(test), rustc_diagnostic_item = "MutexGuard")]
|
||||
pub struct MutexGuard<'a, T: ?Sized + 'a> {
|
||||
lock: &'a Mutex<T>,
|
||||
poison: poison::Guard,
|
||||
}
|
||||
|
||||
/// A [`MutexGuard`] is not `Send` to maximize platform portability.
|
||||
///
|
||||
/// On platforms that use POSIX threads (commonly referred to as pthreads) there is a requirement to
|
||||
/// release mutex locks on the same thread they were acquired.
|
||||
/// For this reason, [`MutexGuard`] must not implement `Send` to prevent it being dropped from
|
||||
/// another thread.
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
impl<T: ?Sized> !Send for MutexGuard<'_, T> {}
|
||||
|
||||
/// `T` must be `Sync` for a [`MutexGuard<T>`] to be `Sync`
|
||||
/// because it is possible to get a `&T` from `&MutexGuard` (via `Deref`).
|
||||
#[stable(feature = "mutexguard", since = "1.19.0")]
|
||||
unsafe impl<T: ?Sized + Sync> Sync for MutexGuard<'_, T> {}
|
||||
|
||||
/// An RAII mutex guard returned by `MutexGuard::map`, which can point to a
|
||||
/// subfield of the protected data. When this structure is dropped (falls out
|
||||
/// of scope), the lock will be unlocked.
|
||||
///
|
||||
/// The main difference between `MappedMutexGuard` and [`MutexGuard`] is that the
|
||||
/// former cannot be used with [`Condvar`], since that
|
||||
/// could introduce soundness issues if the locked object is modified by another
|
||||
/// thread while the `Mutex` is unlocked.
|
||||
///
|
||||
/// The data protected by the mutex can be accessed through this guard via its
|
||||
/// [`Deref`] and [`DerefMut`] implementations.
|
||||
///
|
||||
/// This structure is created by the [`map`] and [`filter_map`] methods on
|
||||
/// [`MutexGuard`].
|
||||
///
|
||||
/// [`map`]: MutexGuard::map
|
||||
/// [`filter_map`]: MutexGuard::filter_map
|
||||
/// [`Condvar`]: crate::sync::Condvar
|
||||
#[must_use = "if unused the Mutex will immediately unlock"]
|
||||
#[must_not_suspend = "holding a MappedMutexGuard across suspend \
|
||||
points can cause deadlocks, delays, \
|
||||
and cause Futures to not implement `Send`"]
|
||||
#[unstable(feature = "mapped_lock_guards", issue = "117108")]
|
||||
#[clippy::has_significant_drop]
|
||||
pub struct MappedMutexGuard<'a, T: ?Sized + 'a> {
|
||||
// NB: we use a pointer instead of `&'a mut T` to avoid `noalias` violations, because a
|
||||
// `MappedMutexGuard` argument doesn't hold uniqueness for its whole scope, only until it drops.
|
||||
// `NonNull` is covariant over `T`, so we add a `PhantomData<&'a mut T>` field
|
||||
// below for the correct variance over `T` (invariance).
|
||||
data: NonNull<T>,
|
||||
inner: &'a sys::Mutex,
|
||||
poison_flag: &'a poison::Flag,
|
||||
poison: poison::Guard,
|
||||
_variance: PhantomData<&'a mut T>,
|
||||
}
|
||||
|
||||
#[unstable(feature = "mapped_lock_guards", issue = "117108")]
|
||||
impl<T: ?Sized> !Send for MappedMutexGuard<'_, T> {}
|
||||
#[unstable(feature = "mapped_lock_guards", issue = "117108")]
|
||||
unsafe impl<T: ?Sized + Sync> Sync for MappedMutexGuard<'_, T> {}
|
||||
|
||||
impl<T> Mutex<T> {
|
||||
/// Creates a new mutex in an unlocked state ready for use.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use std::sync::Mutex;
|
||||
///
|
||||
/// let mutex = Mutex::new(0);
|
||||
/// ```
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
#[rustc_const_stable(feature = "const_locks", since = "1.63.0")]
|
||||
#[inline]
|
||||
pub const fn new(t: T) -> Mutex<T> {
|
||||
Mutex { inner: sys::Mutex::new(), poison: poison::Flag::new(), data: UnsafeCell::new(t) }
|
||||
}
|
||||
|
||||
/// Returns the contained value by cloning it.
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// If another user of this mutex panicked while holding the mutex, then
|
||||
/// this call will return an error instead.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// #![feature(lock_value_accessors)]
|
||||
///
|
||||
/// use std::sync::Mutex;
|
||||
///
|
||||
/// let mut mutex = Mutex::new(7);
|
||||
///
|
||||
/// assert_eq!(mutex.get_cloned().unwrap(), 7);
|
||||
/// ```
|
||||
#[unstable(feature = "lock_value_accessors", issue = "133407")]
|
||||
pub fn get_cloned(&self) -> Result<T, PoisonError<()>>
|
||||
where
|
||||
T: Clone,
|
||||
{
|
||||
match self.lock() {
|
||||
Ok(guard) => Ok((*guard).clone()),
|
||||
Err(_) => Err(PoisonError::new(())),
|
||||
}
|
||||
}
|
||||
|
||||
/// Sets the contained value.
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// If another user of this mutex panicked while holding the mutex, then
|
||||
/// this call will return an error containing the provided `value` instead.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// #![feature(lock_value_accessors)]
|
||||
///
|
||||
/// use std::sync::Mutex;
|
||||
///
|
||||
/// let mut mutex = Mutex::new(7);
|
||||
///
|
||||
/// assert_eq!(mutex.get_cloned().unwrap(), 7);
|
||||
/// mutex.set(11).unwrap();
|
||||
/// assert_eq!(mutex.get_cloned().unwrap(), 11);
|
||||
/// ```
|
||||
#[unstable(feature = "lock_value_accessors", issue = "133407")]
|
||||
#[rustc_should_not_be_called_on_const_items]
|
||||
pub fn set(&self, value: T) -> Result<(), PoisonError<T>> {
|
||||
if mem::needs_drop::<T>() {
|
||||
// If the contained value has non-trivial destructor, we
|
||||
// call that destructor after the lock being released.
|
||||
self.replace(value).map(drop)
|
||||
} else {
|
||||
match self.lock() {
|
||||
Ok(mut guard) => {
|
||||
*guard = value;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
Err(_) => Err(PoisonError::new(value)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Replaces the contained value with `value`, and returns the old contained value.
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// If another user of this mutex panicked while holding the mutex, then
|
||||
/// this call will return an error containing the provided `value` instead.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// #![feature(lock_value_accessors)]
|
||||
///
|
||||
/// use std::sync::Mutex;
|
||||
///
|
||||
/// let mut mutex = Mutex::new(7);
|
||||
///
|
||||
/// assert_eq!(mutex.replace(11).unwrap(), 7);
|
||||
/// assert_eq!(mutex.get_cloned().unwrap(), 11);
|
||||
/// ```
|
||||
#[unstable(feature = "lock_value_accessors", issue = "133407")]
|
||||
#[rustc_should_not_be_called_on_const_items]
|
||||
pub fn replace(&self, value: T) -> LockResult<T> {
|
||||
match self.lock() {
|
||||
Ok(mut guard) => Ok(mem::replace(&mut *guard, value)),
|
||||
Err(_) => Err(PoisonError::new(value)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: ?Sized> Mutex<T> {
|
||||
/// Acquires a mutex, blocking the current thread until it is able to do so.
|
||||
///
|
||||
/// This function will block the local thread until it is available to acquire
|
||||
/// the mutex. Upon returning, the thread is the only thread with the lock
|
||||
/// held. An RAII guard is returned to allow scoped unlock of the lock. When
|
||||
/// the guard goes out of scope, the mutex will be unlocked.
|
||||
///
|
||||
/// The exact behavior on locking a mutex in the thread which already holds
|
||||
/// the lock is left unspecified. However, this function will not return on
|
||||
/// the second call (it might panic or deadlock, for example).
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// If another user of this mutex panicked while holding the mutex, then
|
||||
/// this call will return an error once the mutex is acquired. The acquired
|
||||
/// mutex guard will be contained in the returned error.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This function might panic when called if the lock is already held by
|
||||
/// the current thread.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use std::sync::{Arc, Mutex};
|
||||
/// use std::thread;
|
||||
///
|
||||
/// let mutex = Arc::new(Mutex::new(0));
|
||||
/// let c_mutex = Arc::clone(&mutex);
|
||||
///
|
||||
/// thread::spawn(move || {
|
||||
/// *c_mutex.lock().unwrap() = 10;
|
||||
/// }).join().expect("thread::spawn failed");
|
||||
/// assert_eq!(*mutex.lock().unwrap(), 10);
|
||||
/// ```
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
#[rustc_should_not_be_called_on_const_items]
|
||||
pub fn lock(&self) -> LockResult<MutexGuard<'_, T>> {
|
||||
unsafe {
|
||||
self.inner.lock();
|
||||
MutexGuard::new(self)
|
||||
}
|
||||
}
|
||||
|
||||
/// Attempts to acquire this lock.
|
||||
///
|
||||
/// If the lock could not be acquired at this time, then [`Err`] is returned.
|
||||
/// Otherwise, an RAII guard is returned. The lock will be unlocked when the
|
||||
/// guard is dropped.
|
||||
///
|
||||
/// This function does not block.
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// If another user of this mutex panicked while holding the mutex, then
|
||||
/// this call will return the [`Poisoned`] error if the mutex would
|
||||
/// otherwise be acquired. An acquired lock guard will be contained
|
||||
/// in the returned error.
|
||||
///
|
||||
/// If the mutex could not be acquired because it is already locked, then
|
||||
/// this call will return the [`WouldBlock`] error.
|
||||
///
|
||||
/// [`Poisoned`]: TryLockError::Poisoned
|
||||
/// [`WouldBlock`]: TryLockError::WouldBlock
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use std::sync::{Arc, Mutex};
|
||||
/// use std::thread;
|
||||
///
|
||||
/// let mutex = Arc::new(Mutex::new(0));
|
||||
/// let c_mutex = Arc::clone(&mutex);
|
||||
///
|
||||
/// thread::spawn(move || {
|
||||
/// let mut lock = c_mutex.try_lock();
|
||||
/// if let Ok(ref mut mutex) = lock {
|
||||
/// **mutex = 10;
|
||||
/// } else {
|
||||
/// println!("try_lock failed");
|
||||
/// }
|
||||
/// }).join().expect("thread::spawn failed");
|
||||
/// assert_eq!(*mutex.lock().unwrap(), 10);
|
||||
/// ```
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
#[rustc_should_not_be_called_on_const_items]
|
||||
pub fn try_lock(&self) -> TryLockResult<MutexGuard<'_, T>> {
|
||||
unsafe {
|
||||
if self.inner.try_lock() {
|
||||
Ok(MutexGuard::new(self)?)
|
||||
} else {
|
||||
Err(TryLockError::WouldBlock)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Determines whether the mutex is poisoned.
|
||||
///
|
||||
/// If another thread is active, the mutex can still become poisoned at any
|
||||
/// time. You should not trust a `false` value for program correctness
|
||||
/// without additional synchronization.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use std::sync::{Arc, Mutex};
|
||||
/// use std::thread;
|
||||
///
|
||||
/// let mutex = Arc::new(Mutex::new(0));
|
||||
/// let c_mutex = Arc::clone(&mutex);
|
||||
///
|
||||
/// let _ = thread::spawn(move || {
|
||||
/// let _lock = c_mutex.lock().unwrap();
|
||||
/// panic!(); // the mutex gets poisoned
|
||||
/// }).join();
|
||||
/// assert_eq!(mutex.is_poisoned(), true);
|
||||
/// ```
|
||||
#[inline]
|
||||
#[stable(feature = "sync_poison", since = "1.2.0")]
|
||||
pub fn is_poisoned(&self) -> bool {
|
||||
self.poison.get()
|
||||
}
|
||||
|
||||
/// Clear the poisoned state from a mutex.
|
||||
///
|
||||
/// If the mutex is poisoned, it will remain poisoned until this function is called. This
|
||||
/// allows recovering from a poisoned state and marking that it has recovered. For example, if
|
||||
/// the value is overwritten by a known-good value, then the mutex can be marked as
|
||||
/// un-poisoned. Or possibly, the value could be inspected to determine if it is in a
|
||||
/// consistent state, and if so the poison is removed.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use std::sync::{Arc, Mutex};
|
||||
/// use std::thread;
|
||||
///
|
||||
/// let mutex = Arc::new(Mutex::new(0));
|
||||
/// let c_mutex = Arc::clone(&mutex);
|
||||
///
|
||||
/// let _ = thread::spawn(move || {
|
||||
/// let _lock = c_mutex.lock().unwrap();
|
||||
/// panic!(); // the mutex gets poisoned
|
||||
/// }).join();
|
||||
///
|
||||
/// assert_eq!(mutex.is_poisoned(), true);
|
||||
/// let x = mutex.lock().unwrap_or_else(|mut e| {
|
||||
/// **e.get_mut() = 1;
|
||||
/// mutex.clear_poison();
|
||||
/// e.into_inner()
|
||||
/// });
|
||||
/// assert_eq!(mutex.is_poisoned(), false);
|
||||
/// assert_eq!(*x, 1);
|
||||
/// ```
|
||||
#[inline]
|
||||
#[stable(feature = "mutex_unpoison", since = "1.77.0")]
|
||||
#[rustc_should_not_be_called_on_const_items]
|
||||
pub fn clear_poison(&self) {
|
||||
self.poison.clear();
|
||||
}
|
||||
|
||||
/// Consumes this mutex, returning the underlying data.
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// If another user of this mutex panicked while holding the mutex, then
|
||||
/// this call will return an error containing the underlying data
|
||||
/// instead.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use std::sync::Mutex;
|
||||
///
|
||||
/// let mutex = Mutex::new(0);
|
||||
/// assert_eq!(mutex.into_inner().unwrap(), 0);
|
||||
/// ```
|
||||
#[stable(feature = "mutex_into_inner", since = "1.6.0")]
|
||||
pub fn into_inner(self) -> LockResult<T>
|
||||
where
|
||||
T: Sized,
|
||||
{
|
||||
let data = self.data.into_inner();
|
||||
poison::map_result(self.poison.borrow(), |()| data)
|
||||
}
|
||||
|
||||
/// Returns a mutable reference to the underlying data.
|
||||
///
|
||||
/// Since this call borrows the `Mutex` mutably, no actual locking needs to
|
||||
/// take place -- the mutable borrow statically guarantees no new locks can be acquired
|
||||
/// while this reference exists. Note that this method does not clear any previous abandoned locks
|
||||
/// (e.g., via [`forget()`] on a [`MutexGuard`]).
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// If another user of this mutex panicked while holding the mutex, then
|
||||
/// this call will return an error containing a mutable reference to the
|
||||
/// underlying data instead.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use std::sync::Mutex;
|
||||
///
|
||||
/// let mut mutex = Mutex::new(0);
|
||||
/// *mutex.get_mut().unwrap() = 10;
|
||||
/// assert_eq!(*mutex.lock().unwrap(), 10);
|
||||
/// ```
|
||||
///
|
||||
/// [`forget()`]: mem::forget
|
||||
#[stable(feature = "mutex_get_mut", since = "1.6.0")]
|
||||
pub fn get_mut(&mut self) -> LockResult<&mut T> {
|
||||
let data = self.data.get_mut();
|
||||
poison::map_result(self.poison.borrow(), |()| data)
|
||||
}
|
||||
|
||||
/// Returns a raw pointer to the underlying data.
|
||||
///
|
||||
/// The returned pointer is always non-null and properly aligned, but it is
|
||||
/// the user's responsibility to ensure that any reads and writes through it
|
||||
/// are properly synchronized to avoid data races, and that it is not read
|
||||
/// or written through after the mutex is dropped.
|
||||
#[unstable(feature = "mutex_data_ptr", issue = "140368")]
|
||||
pub const fn data_ptr(&self) -> *mut T {
|
||||
self.data.get()
|
||||
}
|
||||
}
|
||||
|
||||
#[stable(feature = "mutex_from", since = "1.24.0")]
|
||||
impl<T> From<T> for Mutex<T> {
|
||||
/// Creates a new mutex in an unlocked state ready for use.
|
||||
/// This is equivalent to [`Mutex::new`].
|
||||
fn from(t: T) -> Self {
|
||||
Mutex::new(t)
|
||||
}
|
||||
}
|
||||
|
||||
#[stable(feature = "mutex_default", since = "1.10.0")]
|
||||
impl<T: Default> Default for Mutex<T> {
|
||||
/// Creates a `Mutex<T>`, with the `Default` value for T.
|
||||
fn default() -> Mutex<T> {
|
||||
Mutex::new(Default::default())
|
||||
}
|
||||
}
|
||||
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
impl<T: ?Sized + fmt::Debug> fmt::Debug for Mutex<T> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
let mut d = f.debug_struct("Mutex");
|
||||
match self.try_lock() {
|
||||
Ok(guard) => {
|
||||
d.field("data", &&*guard);
|
||||
}
|
||||
Err(TryLockError::Poisoned(err)) => {
|
||||
d.field("data", &&**err.get_ref());
|
||||
}
|
||||
Err(TryLockError::WouldBlock) => {
|
||||
d.field("data", &"<locked>");
|
||||
}
|
||||
}
|
||||
d.field("poisoned", &self.poison.get());
|
||||
d.finish_non_exhaustive()
|
||||
}
|
||||
}
|
||||
|
||||
impl<'mutex, T: ?Sized> MutexGuard<'mutex, T> {
|
||||
unsafe fn new(lock: &'mutex Mutex<T>) -> LockResult<MutexGuard<'mutex, T>> {
|
||||
poison::map_result(lock.poison.guard(), |guard| MutexGuard { lock, poison: guard })
|
||||
}
|
||||
}
|
||||
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
impl<T: ?Sized> Deref for MutexGuard<'_, T> {
|
||||
type Target = T;
|
||||
|
||||
fn deref(&self) -> &T {
|
||||
unsafe { &*self.lock.data.get() }
|
||||
}
|
||||
}
|
||||
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
impl<T: ?Sized> DerefMut for MutexGuard<'_, T> {
|
||||
fn deref_mut(&mut self) -> &mut T {
|
||||
unsafe { &mut *self.lock.data.get() }
|
||||
}
|
||||
}
|
||||
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
impl<T: ?Sized> Drop for MutexGuard<'_, T> {
|
||||
#[inline]
|
||||
fn drop(&mut self) {
|
||||
unsafe {
|
||||
self.lock.poison.done(&self.poison);
|
||||
self.lock.inner.unlock();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[stable(feature = "std_debug", since = "1.16.0")]
|
||||
impl<T: ?Sized + fmt::Debug> fmt::Debug for MutexGuard<'_, T> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
fmt::Debug::fmt(&**self, f)
|
||||
}
|
||||
}
|
||||
|
||||
#[stable(feature = "std_guard_impls", since = "1.20.0")]
|
||||
impl<T: ?Sized + fmt::Display> fmt::Display for MutexGuard<'_, T> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
(**self).fmt(f)
|
||||
}
|
||||
}
|
||||
|
||||
/// For use in [`nonpoison::condvar`](super::condvar).
|
||||
pub(super) fn guard_lock<'a, T: ?Sized>(guard: &MutexGuard<'a, T>) -> &'a sys::Mutex {
|
||||
&guard.lock.inner
|
||||
}
|
||||
|
||||
/// For use in [`nonpoison::condvar`](super::condvar).
|
||||
pub(super) fn guard_poison<'a, T: ?Sized>(guard: &MutexGuard<'a, T>) -> &'a poison::Flag {
|
||||
&guard.lock.poison
|
||||
}
|
||||
|
||||
impl<'a, T: ?Sized> MutexGuard<'a, T> {
|
||||
/// Makes a [`MappedMutexGuard`] for a component of the borrowed data, e.g.
|
||||
/// an enum variant.
|
||||
///
|
||||
/// The `Mutex` is already locked, so this cannot fail.
|
||||
///
|
||||
/// This is an associated function that needs to be used as
|
||||
/// `MutexGuard::map(...)`. A method would interfere with methods of the
|
||||
/// same name on the contents of the `MutexGuard` used through `Deref`.
|
||||
#[unstable(feature = "mapped_lock_guards", issue = "117108")]
|
||||
pub fn map<U, F>(orig: Self, f: F) -> MappedMutexGuard<'a, U>
|
||||
where
|
||||
F: FnOnce(&mut T) -> &mut U,
|
||||
U: ?Sized,
|
||||
{
|
||||
// SAFETY: the conditions of `MutexGuard::new` were satisfied when the original guard
|
||||
// was created, and have been upheld throughout `map` and/or `filter_map`.
|
||||
// The signature of the closure guarantees that it will not "leak" the lifetime of the reference
|
||||
// passed to it. If the closure panics, the guard will be dropped.
|
||||
let data = NonNull::from(f(unsafe { &mut *orig.lock.data.get() }));
|
||||
let orig = ManuallyDrop::new(orig);
|
||||
MappedMutexGuard {
|
||||
data,
|
||||
inner: &orig.lock.inner,
|
||||
poison_flag: &orig.lock.poison,
|
||||
poison: orig.poison.clone(),
|
||||
_variance: PhantomData,
|
||||
}
|
||||
}
|
||||
|
||||
/// Makes a [`MappedMutexGuard`] for a component of the borrowed data. The
|
||||
/// original guard is returned as an `Err(...)` if the closure returns
|
||||
/// `None`.
|
||||
///
|
||||
/// The `Mutex` is already locked, so this cannot fail.
|
||||
///
|
||||
/// This is an associated function that needs to be used as
|
||||
/// `MutexGuard::filter_map(...)`. A method would interfere with methods of the
|
||||
/// same name on the contents of the `MutexGuard` used through `Deref`.
|
||||
#[unstable(feature = "mapped_lock_guards", issue = "117108")]
|
||||
pub fn filter_map<U, F>(orig: Self, f: F) -> Result<MappedMutexGuard<'a, U>, Self>
|
||||
where
|
||||
F: FnOnce(&mut T) -> Option<&mut U>,
|
||||
U: ?Sized,
|
||||
{
|
||||
// SAFETY: the conditions of `MutexGuard::new` were satisfied when the original guard
|
||||
// was created, and have been upheld throughout `map` and/or `filter_map`.
|
||||
// The signature of the closure guarantees that it will not "leak" the lifetime of the reference
|
||||
// passed to it. If the closure panics, the guard will be dropped.
|
||||
match f(unsafe { &mut *orig.lock.data.get() }) {
|
||||
Some(data) => {
|
||||
let data = NonNull::from(data);
|
||||
let orig = ManuallyDrop::new(orig);
|
||||
Ok(MappedMutexGuard {
|
||||
data,
|
||||
inner: &orig.lock.inner,
|
||||
poison_flag: &orig.lock.poison,
|
||||
poison: orig.poison.clone(),
|
||||
_variance: PhantomData,
|
||||
})
|
||||
}
|
||||
None => Err(orig),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[unstable(feature = "mapped_lock_guards", issue = "117108")]
|
||||
impl<T: ?Sized> Deref for MappedMutexGuard<'_, T> {
|
||||
type Target = T;
|
||||
|
||||
fn deref(&self) -> &T {
|
||||
unsafe { self.data.as_ref() }
|
||||
}
|
||||
}
|
||||
|
||||
#[unstable(feature = "mapped_lock_guards", issue = "117108")]
|
||||
impl<T: ?Sized> DerefMut for MappedMutexGuard<'_, T> {
|
||||
fn deref_mut(&mut self) -> &mut T {
|
||||
unsafe { self.data.as_mut() }
|
||||
}
|
||||
}
|
||||
|
||||
#[unstable(feature = "mapped_lock_guards", issue = "117108")]
|
||||
impl<T: ?Sized> Drop for MappedMutexGuard<'_, T> {
|
||||
#[inline]
|
||||
fn drop(&mut self) {
|
||||
unsafe {
|
||||
self.poison_flag.done(&self.poison);
|
||||
self.inner.unlock();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[unstable(feature = "mapped_lock_guards", issue = "117108")]
|
||||
impl<T: ?Sized + fmt::Debug> fmt::Debug for MappedMutexGuard<'_, T> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
fmt::Debug::fmt(&**self, f)
|
||||
}
|
||||
}
|
||||
|
||||
#[unstable(feature = "mapped_lock_guards", issue = "117108")]
|
||||
impl<T: ?Sized + fmt::Display> fmt::Display for MappedMutexGuard<'_, T> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
(**self).fmt(f)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T: ?Sized> MappedMutexGuard<'a, T> {
|
||||
/// Makes a [`MappedMutexGuard`] for a component of the borrowed data, e.g.
|
||||
/// an enum variant.
|
||||
///
|
||||
/// The `Mutex` is already locked, so this cannot fail.
|
||||
///
|
||||
/// This is an associated function that needs to be used as
|
||||
/// `MappedMutexGuard::map(...)`. A method would interfere with methods of the
|
||||
/// same name on the contents of the `MutexGuard` used through `Deref`.
|
||||
#[unstable(feature = "mapped_lock_guards", issue = "117108")]
|
||||
pub fn map<U, F>(mut orig: Self, f: F) -> MappedMutexGuard<'a, U>
|
||||
where
|
||||
F: FnOnce(&mut T) -> &mut U,
|
||||
U: ?Sized,
|
||||
{
|
||||
// SAFETY: the conditions of `MutexGuard::new` were satisfied when the original guard
|
||||
// was created, and have been upheld throughout `map` and/or `filter_map`.
|
||||
// The signature of the closure guarantees that it will not "leak" the lifetime of the reference
|
||||
// passed to it. If the closure panics, the guard will be dropped.
|
||||
let data = NonNull::from(f(unsafe { orig.data.as_mut() }));
|
||||
let orig = ManuallyDrop::new(orig);
|
||||
MappedMutexGuard {
|
||||
data,
|
||||
inner: orig.inner,
|
||||
poison_flag: orig.poison_flag,
|
||||
poison: orig.poison.clone(),
|
||||
_variance: PhantomData,
|
||||
}
|
||||
}
|
||||
|
||||
/// Makes a [`MappedMutexGuard`] for a component of the borrowed data. The
|
||||
/// original guard is returned as an `Err(...)` if the closure returns
|
||||
/// `None`.
|
||||
///
|
||||
/// The `Mutex` is already locked, so this cannot fail.
|
||||
///
|
||||
/// This is an associated function that needs to be used as
|
||||
/// `MappedMutexGuard::filter_map(...)`. A method would interfere with methods of the
|
||||
/// same name on the contents of the `MutexGuard` used through `Deref`.
|
||||
#[unstable(feature = "mapped_lock_guards", issue = "117108")]
|
||||
pub fn filter_map<U, F>(mut orig: Self, f: F) -> Result<MappedMutexGuard<'a, U>, Self>
|
||||
where
|
||||
F: FnOnce(&mut T) -> Option<&mut U>,
|
||||
U: ?Sized,
|
||||
{
|
||||
// SAFETY: the conditions of `MutexGuard::new` were satisfied when the original guard
|
||||
// was created, and have been upheld throughout `map` and/or `filter_map`.
|
||||
// The signature of the closure guarantees that it will not "leak" the lifetime of the reference
|
||||
// passed to it. If the closure panics, the guard will be dropped.
|
||||
match f(unsafe { orig.data.as_mut() }) {
|
||||
Some(data) => {
|
||||
let data = NonNull::from(data);
|
||||
let orig = ManuallyDrop::new(orig);
|
||||
Ok(MappedMutexGuard {
|
||||
data,
|
||||
inner: orig.inner,
|
||||
poison_flag: orig.poison_flag,
|
||||
poison: orig.poison.clone(),
|
||||
_variance: PhantomData,
|
||||
})
|
||||
}
|
||||
None => Err(orig),
|
||||
}
|
||||
}
|
||||
}
|
||||
1274
crates/std/src/sync/poison/rwlock.rs
Normal file
1274
crates/std/src/sync/poison/rwlock.rs
Normal file
File diff suppressed because it is too large
Load Diff
432
crates/std/src/sync/reentrant_lock.rs
Normal file
432
crates/std/src/sync/reentrant_lock.rs
Normal file
@@ -0,0 +1,432 @@
|
||||
use crate::cell::UnsafeCell;
|
||||
use crate::fmt;
|
||||
use crate::ops::Deref;
|
||||
use core::panic::{RefUnwindSafe, UnwindSafe};
|
||||
use crate::sys::sync as sys;
|
||||
use crate::thread::{ThreadId, current_id};
|
||||
|
||||
/// A re-entrant mutual exclusion lock
|
||||
///
|
||||
/// This lock will block *other* threads waiting for the lock to become
|
||||
/// available. The thread which has already locked the mutex can lock it
|
||||
/// multiple times without blocking, preventing a common source of deadlocks.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// Allow recursively calling a function needing synchronization from within
|
||||
/// a callback (this is how [`StdoutLock`](crate::io::StdoutLock) is currently
|
||||
/// implemented):
|
||||
///
|
||||
/// ```
|
||||
/// #![feature(reentrant_lock)]
|
||||
///
|
||||
/// use std::cell::RefCell;
|
||||
/// use std::sync::ReentrantLock;
|
||||
///
|
||||
/// pub struct Log {
|
||||
/// data: RefCell<String>,
|
||||
/// }
|
||||
///
|
||||
/// impl Log {
|
||||
/// pub fn append(&self, msg: &str) {
|
||||
/// self.data.borrow_mut().push_str(msg);
|
||||
/// }
|
||||
/// }
|
||||
///
|
||||
/// static LOG: ReentrantLock<Log> = ReentrantLock::new(Log { data: RefCell::new(String::new()) });
|
||||
///
|
||||
/// pub fn with_log<R>(f: impl FnOnce(&Log) -> R) -> R {
|
||||
/// let log = LOG.lock();
|
||||
/// f(&*log)
|
||||
/// }
|
||||
///
|
||||
/// with_log(|log| {
|
||||
/// log.append("Hello");
|
||||
/// with_log(|log| log.append(" there!"));
|
||||
/// });
|
||||
/// ```
|
||||
///
|
||||
// # Implementation details
|
||||
//
|
||||
// The 'owner' field tracks which thread has locked the mutex.
|
||||
//
|
||||
// We use thread::current_id() as the thread identifier, which is just the
|
||||
// current thread's ThreadId, so it's unique across the process lifetime.
|
||||
//
|
||||
// If `owner` is set to the identifier of the current thread,
|
||||
// we assume the mutex is already locked and instead of locking it again,
|
||||
// we increment `lock_count`.
|
||||
//
|
||||
// When unlocking, we decrement `lock_count`, and only unlock the mutex when
|
||||
// it reaches zero.
|
||||
//
|
||||
// `lock_count` is protected by the mutex and only accessed by the thread that has
|
||||
// locked the mutex, so needs no synchronization.
|
||||
//
|
||||
// `owner` can be checked by other threads that want to see if they already
|
||||
// hold the lock, so needs to be atomic. If it compares equal, we're on the
|
||||
// same thread that holds the mutex and memory access can use relaxed ordering
|
||||
// since we're not dealing with multiple threads. If it's not equal,
|
||||
// synchronization is left to the mutex, making relaxed memory ordering for
|
||||
// the `owner` field fine in all cases.
|
||||
//
|
||||
// On systems without 64 bit atomics we also store the address of a TLS variable
|
||||
// along the 64-bit TID. We then first check that address against the address
|
||||
// of that variable on the current thread, and only if they compare equal do we
|
||||
// compare the actual TIDs. Because we only ever read the TID on the same thread
|
||||
// that it was written on (or a thread sharing the TLS block with that writer thread),
|
||||
// we don't need to further synchronize the TID accesses, so they can be regular 64-bit
|
||||
// non-atomic accesses.
|
||||
#[unstable(feature = "reentrant_lock", issue = "121440")]
|
||||
pub struct ReentrantLock<T: ?Sized> {
|
||||
mutex: sys::Mutex,
|
||||
owner: Tid,
|
||||
lock_count: UnsafeCell<u32>,
|
||||
data: T,
|
||||
}
|
||||
|
||||
cfg_select!(
|
||||
target_has_atomic = "64" => {
|
||||
use crate::sync::atomic::{Atomic, AtomicU64, Ordering::Relaxed};
|
||||
|
||||
struct Tid(Atomic<u64>);
|
||||
|
||||
impl Tid {
|
||||
const fn new() -> Self {
|
||||
Self(AtomicU64::new(0))
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn contains(&self, owner: ThreadId) -> bool {
|
||||
owner.as_u64().get() == self.0.load(Relaxed)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
// This is just unsafe to match the API of the Tid type below.
|
||||
unsafe fn set(&self, tid: Option<ThreadId>) {
|
||||
let value = tid.map_or(0, |tid| tid.as_u64().get());
|
||||
self.0.store(value, Relaxed);
|
||||
}
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
/// Returns the address of a TLS variable. This is guaranteed to
|
||||
/// be unique across all currently alive threads.
|
||||
fn tls_addr() -> usize {
|
||||
thread_local! { static X: u8 = const { 0u8 } };
|
||||
|
||||
X.with(|p| <*const u8>::addr(p))
|
||||
}
|
||||
|
||||
use crate::sync::atomic::{
|
||||
Atomic,
|
||||
AtomicUsize,
|
||||
Ordering,
|
||||
};
|
||||
|
||||
struct Tid {
|
||||
// When a thread calls `set()`, this value gets updated to
|
||||
// the address of a thread local on that thread. This is
|
||||
// used as a first check in `contains()`; if the `tls_addr`
|
||||
// doesn't match the TLS address of the current thread, then
|
||||
// the ThreadId also can't match. Only if the TLS addresses do
|
||||
// match do we read out the actual TID.
|
||||
// Note also that we can use relaxed atomic operations here, because
|
||||
// we only ever read from the tid if `tls_addr` matches the current
|
||||
// TLS address. In that case, either the tid has been set by
|
||||
// the current thread, or by a thread that has terminated before
|
||||
// the current thread's `tls_addr` was allocated. In either case, no further
|
||||
// synchronization is needed (as per <https://github.com/rust-lang/miri/issues/3450>)
|
||||
tls_addr: Atomic<usize>,
|
||||
tid: UnsafeCell<u64>,
|
||||
}
|
||||
|
||||
unsafe impl Send for Tid {}
|
||||
unsafe impl Sync for Tid {}
|
||||
|
||||
impl Tid {
|
||||
const fn new() -> Self {
|
||||
Self { tls_addr: AtomicUsize::new(0), tid: UnsafeCell::new(0) }
|
||||
}
|
||||
|
||||
#[inline]
|
||||
// NOTE: This assumes that `owner` is the ID of the current
|
||||
// thread, and may spuriously return `false` if that's not the case.
|
||||
fn contains(&self, owner: ThreadId) -> bool {
|
||||
// We must call `tls_addr()` *before* doing the load to ensure that if we reuse an
|
||||
// earlier thread's address, the `tls_addr.load()` below happens-after everything
|
||||
// that thread did.
|
||||
let tls_addr = tls_addr();
|
||||
// SAFETY: See the comments in the struct definition.
|
||||
self.tls_addr.load(Ordering::Relaxed) == tls_addr
|
||||
&& unsafe { *self.tid.get() } == owner.as_u64().get()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
// This may only be called by one thread at a time, and can lead to
|
||||
// race conditions otherwise.
|
||||
unsafe fn set(&self, tid: Option<ThreadId>) {
|
||||
// It's important that we set `self.tls_addr` to 0 if the tid is
|
||||
// cleared. Otherwise, there might be race conditions between
|
||||
// `set()` and `get()`.
|
||||
let tls_addr = if tid.is_some() { tls_addr() } else { 0 };
|
||||
let value = tid.map_or(0, |tid| tid.as_u64().get());
|
||||
self.tls_addr.store(tls_addr, Ordering::Relaxed);
|
||||
unsafe { *self.tid.get() = value };
|
||||
}
|
||||
}
|
||||
}
|
||||
);
|
||||
|
||||
#[unstable(feature = "reentrant_lock", issue = "121440")]
|
||||
unsafe impl<T: Send + ?Sized> Send for ReentrantLock<T> {}
|
||||
#[unstable(feature = "reentrant_lock", issue = "121440")]
|
||||
unsafe impl<T: Send + ?Sized> Sync for ReentrantLock<T> {}
|
||||
|
||||
// Because of the `UnsafeCell`, these traits are not implemented automatically
|
||||
#[unstable(feature = "reentrant_lock", issue = "121440")]
|
||||
impl<T: UnwindSafe + ?Sized> UnwindSafe for ReentrantLock<T> {}
|
||||
#[unstable(feature = "reentrant_lock", issue = "121440")]
|
||||
impl<T: RefUnwindSafe + ?Sized> RefUnwindSafe for ReentrantLock<T> {}
|
||||
|
||||
/// An RAII implementation of a "scoped lock" of a re-entrant lock. When this
|
||||
/// structure is dropped (falls out of scope), the lock will be unlocked.
|
||||
///
|
||||
/// The data protected by the mutex can be accessed through this guard via its
|
||||
/// [`Deref`] implementation.
|
||||
///
|
||||
/// This structure is created by the [`lock`](ReentrantLock::lock) method on
|
||||
/// [`ReentrantLock`].
|
||||
///
|
||||
/// # Mutability
|
||||
///
|
||||
/// Unlike [`MutexGuard`](super::MutexGuard), `ReentrantLockGuard` does not
|
||||
/// implement [`DerefMut`](crate::ops::DerefMut), because implementation of
|
||||
/// the trait would violate Rust’s reference aliasing rules. Use interior
|
||||
/// mutability (usually [`RefCell`](crate::cell::RefCell)) in order to mutate
|
||||
/// the guarded data.
|
||||
#[must_use = "if unused the ReentrantLock will immediately unlock"]
|
||||
#[unstable(feature = "reentrant_lock", issue = "121440")]
|
||||
pub struct ReentrantLockGuard<'a, T: ?Sized + 'a> {
|
||||
lock: &'a ReentrantLock<T>,
|
||||
}
|
||||
|
||||
#[unstable(feature = "reentrant_lock", issue = "121440")]
|
||||
impl<T: ?Sized> !Send for ReentrantLockGuard<'_, T> {}
|
||||
|
||||
#[unstable(feature = "reentrant_lock", issue = "121440")]
|
||||
unsafe impl<T: ?Sized + Sync> Sync for ReentrantLockGuard<'_, T> {}
|
||||
|
||||
#[unstable(feature = "reentrant_lock", issue = "121440")]
|
||||
impl<T> ReentrantLock<T> {
|
||||
/// Creates a new re-entrant lock in an unlocked state ready for use.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// #![feature(reentrant_lock)]
|
||||
/// use std::sync::ReentrantLock;
|
||||
///
|
||||
/// let lock = ReentrantLock::new(0);
|
||||
/// ```
|
||||
pub const fn new(t: T) -> ReentrantLock<T> {
|
||||
ReentrantLock {
|
||||
mutex: sys::Mutex::new(),
|
||||
owner: Tid::new(),
|
||||
lock_count: UnsafeCell::new(0),
|
||||
data: t,
|
||||
}
|
||||
}
|
||||
|
||||
/// Consumes this lock, returning the underlying data.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// #![feature(reentrant_lock)]
|
||||
///
|
||||
/// use std::sync::ReentrantLock;
|
||||
///
|
||||
/// let lock = ReentrantLock::new(0);
|
||||
/// assert_eq!(lock.into_inner(), 0);
|
||||
/// ```
|
||||
pub fn into_inner(self) -> T {
|
||||
self.data
|
||||
}
|
||||
}
|
||||
|
||||
#[unstable(feature = "reentrant_lock", issue = "121440")]
|
||||
impl<T: ?Sized> ReentrantLock<T> {
|
||||
/// Acquires the lock, blocking the current thread until it is able to do
|
||||
/// so.
|
||||
///
|
||||
/// This function will block the caller until it is available to acquire
|
||||
/// the lock. Upon returning, the thread is the only thread with the lock
|
||||
/// held. When the thread calling this method already holds the lock, the
|
||||
/// call succeeds without blocking.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// #![feature(reentrant_lock)]
|
||||
/// use std::cell::Cell;
|
||||
/// use std::sync::{Arc, ReentrantLock};
|
||||
/// use std::thread;
|
||||
///
|
||||
/// let lock = Arc::new(ReentrantLock::new(Cell::new(0)));
|
||||
/// let c_lock = Arc::clone(&lock);
|
||||
///
|
||||
/// thread::spawn(move || {
|
||||
/// c_lock.lock().set(10);
|
||||
/// }).join().expect("thread::spawn failed");
|
||||
/// assert_eq!(lock.lock().get(), 10);
|
||||
/// ```
|
||||
pub fn lock(&self) -> ReentrantLockGuard<'_, T> {
|
||||
let this_thread = current_id();
|
||||
// Safety: We only touch lock_count when we own the inner mutex.
|
||||
// Additionally, we only call `self.owner.set()` while holding
|
||||
// the inner mutex, so no two threads can call it concurrently.
|
||||
unsafe {
|
||||
if self.owner.contains(this_thread) {
|
||||
self.increment_lock_count().expect("lock count overflow in reentrant mutex");
|
||||
} else {
|
||||
self.mutex.lock();
|
||||
self.owner.set(Some(this_thread));
|
||||
debug_assert_eq!(*self.lock_count.get(), 0);
|
||||
*self.lock_count.get() = 1;
|
||||
}
|
||||
}
|
||||
ReentrantLockGuard { lock: self }
|
||||
}
|
||||
|
||||
/// Returns a mutable reference to the underlying data.
|
||||
///
|
||||
/// Since this call borrows the `ReentrantLock` mutably, no actual locking
|
||||
/// needs to take place -- the mutable borrow statically guarantees no locks
|
||||
/// exist.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// #![feature(reentrant_lock)]
|
||||
/// use std::sync::ReentrantLock;
|
||||
///
|
||||
/// let mut lock = ReentrantLock::new(0);
|
||||
/// *lock.get_mut() = 10;
|
||||
/// assert_eq!(*lock.lock(), 10);
|
||||
/// ```
|
||||
pub fn get_mut(&mut self) -> &mut T {
|
||||
&mut self.data
|
||||
}
|
||||
|
||||
/// Attempts to acquire this lock.
|
||||
///
|
||||
/// If the lock could not be acquired at this time, then `None` is returned.
|
||||
/// Otherwise, an RAII guard is returned.
|
||||
///
|
||||
/// This function does not block.
|
||||
// FIXME maybe make it a public part of the API?
|
||||
#[unstable(issue = "none", feature = "std_internals")]
|
||||
#[doc(hidden)]
|
||||
pub fn try_lock(&self) -> Option<ReentrantLockGuard<'_, T>> {
|
||||
let this_thread = current_id();
|
||||
// Safety: We only touch lock_count when we own the inner mutex.
|
||||
// Additionally, we only call `self.owner.set()` while holding
|
||||
// the inner mutex, so no two threads can call it concurrently.
|
||||
unsafe {
|
||||
if self.owner.contains(this_thread) {
|
||||
self.increment_lock_count()?;
|
||||
Some(ReentrantLockGuard { lock: self })
|
||||
} else if self.mutex.try_lock() {
|
||||
self.owner.set(Some(this_thread));
|
||||
debug_assert_eq!(*self.lock_count.get(), 0);
|
||||
*self.lock_count.get() = 1;
|
||||
Some(ReentrantLockGuard { lock: self })
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns a raw pointer to the underlying data.
|
||||
///
|
||||
/// The returned pointer is always non-null and properly aligned, but it is
|
||||
/// the user's responsibility to ensure that any reads through it are
|
||||
/// properly synchronized to avoid data races, and that it is not read
|
||||
/// through after the lock is dropped.
|
||||
#[unstable(feature = "reentrant_lock_data_ptr", issue = "140368")]
|
||||
pub const fn data_ptr(&self) -> *const T {
|
||||
&raw const self.data
|
||||
}
|
||||
|
||||
unsafe fn increment_lock_count(&self) -> Option<()> {
|
||||
unsafe {
|
||||
*self.lock_count.get() = (*self.lock_count.get()).checked_add(1)?;
|
||||
}
|
||||
Some(())
|
||||
}
|
||||
}
|
||||
|
||||
#[unstable(feature = "reentrant_lock", issue = "121440")]
|
||||
impl<T: fmt::Debug + ?Sized> fmt::Debug for ReentrantLock<T> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
let mut d = f.debug_struct("ReentrantLock");
|
||||
match self.try_lock() {
|
||||
Some(v) => d.field("data", &&*v),
|
||||
None => d.field("data", &format_args!("<locked>")),
|
||||
};
|
||||
d.finish_non_exhaustive()
|
||||
}
|
||||
}
|
||||
|
||||
#[unstable(feature = "reentrant_lock", issue = "121440")]
|
||||
impl<T: Default> Default for ReentrantLock<T> {
|
||||
fn default() -> Self {
|
||||
Self::new(T::default())
|
||||
}
|
||||
}
|
||||
|
||||
#[unstable(feature = "reentrant_lock", issue = "121440")]
|
||||
impl<T> From<T> for ReentrantLock<T> {
|
||||
fn from(t: T) -> Self {
|
||||
Self::new(t)
|
||||
}
|
||||
}
|
||||
|
||||
#[unstable(feature = "reentrant_lock", issue = "121440")]
|
||||
impl<T: ?Sized> Deref for ReentrantLockGuard<'_, T> {
|
||||
type Target = T;
|
||||
|
||||
fn deref(&self) -> &T {
|
||||
&self.lock.data
|
||||
}
|
||||
}
|
||||
|
||||
#[unstable(feature = "reentrant_lock", issue = "121440")]
|
||||
impl<T: fmt::Debug + ?Sized> fmt::Debug for ReentrantLockGuard<'_, T> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
(**self).fmt(f)
|
||||
}
|
||||
}
|
||||
|
||||
#[unstable(feature = "reentrant_lock", issue = "121440")]
|
||||
impl<T: fmt::Display + ?Sized> fmt::Display for ReentrantLockGuard<'_, T> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
(**self).fmt(f)
|
||||
}
|
||||
}
|
||||
|
||||
#[unstable(feature = "reentrant_lock", issue = "121440")]
|
||||
impl<T: ?Sized> Drop for ReentrantLockGuard<'_, T> {
|
||||
#[inline]
|
||||
fn drop(&mut self) {
|
||||
// Safety: We own the lock.
|
||||
unsafe {
|
||||
*self.lock.lock_count.get() -= 1;
|
||||
if *self.lock.lock_count.get() == 0 {
|
||||
self.lock.owner.set(None);
|
||||
self.lock.mutex.unlock();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user