Compare commits
8 Commits
51780b3a78
...
387e586086
| Author | SHA1 | Date | |
|---|---|---|---|
| 387e586086 | |||
| a134536d2f | |||
| 48a75485b6 | |||
| 3121c0b68b | |||
| 45d23efe77 | |||
| fc3a04a20e | |||
| 9b8afd2c5c | |||
| a087bdd523 |
@@ -3,10 +3,9 @@ target = "riscv64.json"
|
||||
|
||||
[unstable]
|
||||
json-target-spec = true
|
||||
build-std = ["core", "compiler_builtins", "alloc"]
|
||||
build-std-features = ["compiler-builtins-mem"]
|
||||
|
||||
[target.riscv64]
|
||||
rustflags = [
|
||||
"-C", "link-arg=-Tilm.ld",
|
||||
"--sysroot", "sysroot"
|
||||
]
|
||||
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -7,3 +7,5 @@
|
||||
disk.img
|
||||
**/*.mem
|
||||
mnt
|
||||
|
||||
sysroot/lib/rustlib/riscv64
|
||||
|
||||
3
.gitmodules
vendored
Normal file
3
.gitmodules
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
[submodule "library/backtrace"]
|
||||
path = library/backtrace
|
||||
url = https://github.com/rust-lang/backtrace-rs.git
|
||||
@@ -1,6 +1,7 @@
|
||||
[workspace]
|
||||
resolver = "3"
|
||||
members = ["crates/bytes-struct","crates/io","crates/std", "crates/shared", "user/*"]
|
||||
members = [ "user/*"]
|
||||
exclude = ["library"]
|
||||
|
||||
[package]
|
||||
name = "kernel-rust"
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
#![feature(iterator_try_collect, iter_order_by)]
|
||||
#![allow(unused_features)]
|
||||
#![cfg_attr(any(not(feature = "std"), target_arch = "riscv64"), no_std)]
|
||||
|
||||
use core::cell::RefCell;
|
||||
|
||||
@@ -7,6 +7,6 @@ edition = "2024"
|
||||
proc-macro = true
|
||||
|
||||
[dependencies]
|
||||
image = "0.25"
|
||||
image = { version = "0.25", default-features = false, features = ["png"] }
|
||||
syn = { version = "2", features = ["full"] }
|
||||
zyn = "0.5"
|
||||
|
||||
@@ -1,12 +0,0 @@
|
||||
[package]
|
||||
name = "os-std-macros"
|
||||
version = "0.1.0"
|
||||
edition = "2024"
|
||||
|
||||
[lib]
|
||||
proc-macro = true
|
||||
|
||||
[dependencies]
|
||||
proc-macro2 = "1"
|
||||
quote = "1"
|
||||
syn = { version = "2", features = ["full"] }
|
||||
@@ -1 +0,0 @@
|
||||
|
||||
@@ -1,10 +0,0 @@
|
||||
[package]
|
||||
name = "std"
|
||||
version = "0.1.0"
|
||||
edition = "2024"
|
||||
|
||||
[dependencies]
|
||||
hashbrown = "0.16"
|
||||
os-std-macros = { path = "../os-std-macros" }
|
||||
shared = { path = "../shared", features = ["user"] }
|
||||
io = { path = "../io", features = ["alloc"] }
|
||||
@@ -1,490 +0,0 @@
|
||||
//! Memory allocation APIs.
|
||||
//!
|
||||
//! In a given program, the standard library has one “global” memory allocator
|
||||
//! that is used for example by `Box<T>` and `Vec<T>`.
|
||||
//!
|
||||
//! Currently the default global allocator is unspecified. Libraries, however,
|
||||
//! like `cdylib`s and `staticlib`s are guaranteed to use the [`System`] by
|
||||
//! default.
|
||||
//!
|
||||
//! # The `#[global_allocator]` attribute
|
||||
//!
|
||||
//! This attribute allows configuring the choice of global allocator.
|
||||
//! You can use this to implement a completely custom global allocator
|
||||
//! to route all[^system-alloc] default allocation requests to a custom object.
|
||||
//!
|
||||
//! ```rust
|
||||
//! use std::alloc::{GlobalAlloc, System, Layout};
|
||||
//!
|
||||
//! struct MyAllocator;
|
||||
//!
|
||||
//! unsafe impl GlobalAlloc for MyAllocator {
|
||||
//! unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
|
||||
//! unsafe { System.alloc(layout) }
|
||||
//! }
|
||||
//!
|
||||
//! unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
|
||||
//! unsafe { System.dealloc(ptr, layout) }
|
||||
//! }
|
||||
//! }
|
||||
//!
|
||||
//! #[global_allocator]
|
||||
//! static GLOBAL: MyAllocator = MyAllocator;
|
||||
//!
|
||||
//! fn main() {
|
||||
//! // This `Vec` will allocate memory through `GLOBAL` above
|
||||
//! let mut v = Vec::new();
|
||||
//! v.push(1);
|
||||
//! }
|
||||
//! ```
|
||||
//!
|
||||
//! The attribute is used on a `static` item whose type implements the
|
||||
//! [`GlobalAlloc`] trait. This type can be provided by an external library:
|
||||
//!
|
||||
//! ```rust,ignore (demonstrates crates.io usage)
|
||||
//! use jemallocator::Jemalloc;
|
||||
//!
|
||||
//! #[global_allocator]
|
||||
//! static GLOBAL: Jemalloc = Jemalloc;
|
||||
//!
|
||||
//! fn main() {}
|
||||
//! ```
|
||||
//!
|
||||
//! The `#[global_allocator]` can only be used once in a crate
|
||||
//! or its recursive dependencies.
|
||||
//!
|
||||
//! [^system-alloc]: Note that the Rust standard library internals may still
|
||||
//! directly call [`System`] when necessary (for example for the runtime
|
||||
//! support typically required to implement a global allocator, see [re-entrance] on [`GlobalAlloc`]
|
||||
//! for more details).
|
||||
//!
|
||||
//! [re-entrance]: trait.GlobalAlloc.html#re-entrance
|
||||
|
||||
#![deny(unsafe_op_in_unsafe_fn)]
|
||||
#![stable(feature = "alloc_module", since = "1.28.0")]
|
||||
|
||||
use core::ptr::NonNull;
|
||||
use core::sync::atomic::{AtomicBool, AtomicPtr, Ordering};
|
||||
use core::{hint, mem, ptr};
|
||||
|
||||
#[stable(feature = "alloc_module", since = "1.28.0")]
|
||||
#[doc(inline)]
|
||||
pub use alloc_crate::alloc::*;
|
||||
|
||||
/// The default memory allocator provided by the operating system.
|
||||
///
|
||||
/// This is based on `malloc` on Unix platforms and `HeapAlloc` on Windows,
|
||||
/// plus related functions. However, it is not valid to mix use of the backing
|
||||
/// system allocator with `System`, as this implementation may include extra
|
||||
/// work, such as to serve alignment requests greater than the alignment
|
||||
/// provided directly by the backing system allocator.
|
||||
///
|
||||
/// This type implements the [`GlobalAlloc`] trait. Currently the default
|
||||
/// global allocator is unspecified. Libraries, however, like `cdylib`s and
|
||||
/// `staticlib`s are guaranteed to use the [`System`] by default and as such
|
||||
/// work as if they had this definition:
|
||||
///
|
||||
/// ```rust
|
||||
/// use std::alloc::System;
|
||||
///
|
||||
/// #[global_allocator]
|
||||
/// static A: System = System;
|
||||
///
|
||||
/// fn main() {
|
||||
/// let a = Box::new(4); // Allocates from the system allocator.
|
||||
/// println!("{a}");
|
||||
/// }
|
||||
/// ```
|
||||
///
|
||||
/// You can also define your own wrapper around `System` if you'd like, such as
|
||||
/// keeping track of the number of all bytes allocated:
|
||||
///
|
||||
/// ```rust
|
||||
/// use std::alloc::{System, GlobalAlloc, Layout};
|
||||
/// use std::sync::atomic::{AtomicUsize, Ordering::Relaxed};
|
||||
///
|
||||
/// struct Counter;
|
||||
///
|
||||
/// static ALLOCATED: AtomicUsize = AtomicUsize::new(0);
|
||||
///
|
||||
/// unsafe impl GlobalAlloc for Counter {
|
||||
/// unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
|
||||
/// let ret = unsafe { System.alloc(layout) };
|
||||
/// if !ret.is_null() {
|
||||
/// ALLOCATED.fetch_add(layout.size(), Relaxed);
|
||||
/// }
|
||||
/// ret
|
||||
/// }
|
||||
///
|
||||
/// unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
|
||||
/// unsafe { System.dealloc(ptr, layout); }
|
||||
/// ALLOCATED.fetch_sub(layout.size(), Relaxed);
|
||||
/// }
|
||||
/// }
|
||||
///
|
||||
/// #[global_allocator]
|
||||
/// static A: Counter = Counter;
|
||||
///
|
||||
/// fn main() {
|
||||
/// println!("allocated bytes before main: {}", ALLOCATED.load(Relaxed));
|
||||
/// }
|
||||
/// ```
|
||||
///
|
||||
/// It can also be used directly to allocate memory independently of whatever
|
||||
/// global allocator has been selected for a Rust program. For example if a Rust
|
||||
/// program opts in to using jemalloc as the global allocator, `System` will
|
||||
/// still allocate memory using `malloc` and `HeapAlloc`.
|
||||
#[stable(feature = "alloc_system_type", since = "1.28.0")]
|
||||
#[derive(Debug, Default, Copy, Clone)]
|
||||
pub struct System;
|
||||
|
||||
impl System {
|
||||
#[inline]
|
||||
fn alloc_impl(&self, layout: Layout, zeroed: bool) -> Result<NonNull<[u8]>, AllocError> {
|
||||
match layout.size() {
|
||||
0 => Ok(NonNull::slice_from_raw_parts(layout.dangling_ptr(), 0)),
|
||||
// SAFETY: `layout` is non-zero in size,
|
||||
size => unsafe {
|
||||
let raw_ptr = if zeroed {
|
||||
GlobalAlloc::alloc_zeroed(self, layout)
|
||||
} else {
|
||||
GlobalAlloc::alloc(self, layout)
|
||||
};
|
||||
let ptr = NonNull::new(raw_ptr).ok_or(AllocError)?;
|
||||
Ok(NonNull::slice_from_raw_parts(ptr, size))
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// SAFETY: Same as `Allocator::grow`
|
||||
#[inline]
|
||||
unsafe fn grow_impl(
|
||||
&self,
|
||||
ptr: NonNull<u8>,
|
||||
old_layout: Layout,
|
||||
new_layout: Layout,
|
||||
zeroed: bool,
|
||||
) -> Result<NonNull<[u8]>, AllocError> {
|
||||
debug_assert!(
|
||||
new_layout.size() >= old_layout.size(),
|
||||
"`new_layout.size()` must be greater than or equal to `old_layout.size()`"
|
||||
);
|
||||
|
||||
match old_layout.size() {
|
||||
0 => self.alloc_impl(new_layout, zeroed),
|
||||
|
||||
// SAFETY: `new_size` is non-zero as `new_size` is greater than or equal to `old_size`
|
||||
// as required by safety conditions and the `old_size == 0` case was handled in the
|
||||
// previous match arm. Other conditions must be upheld by the caller
|
||||
old_size if old_layout.align() == new_layout.align() => unsafe {
|
||||
let new_size = new_layout.size();
|
||||
|
||||
// `realloc` probably checks for `new_size >= old_layout.size()` or something similar.
|
||||
hint::assert_unchecked(new_size >= old_layout.size());
|
||||
|
||||
let raw_ptr = GlobalAlloc::realloc(self, ptr.as_ptr(), old_layout, new_size);
|
||||
let ptr = NonNull::new(raw_ptr).ok_or(AllocError)?;
|
||||
if zeroed {
|
||||
raw_ptr.add(old_size).write_bytes(0, new_size - old_size);
|
||||
}
|
||||
Ok(NonNull::slice_from_raw_parts(ptr, new_size))
|
||||
},
|
||||
|
||||
// SAFETY: because `new_layout.size()` must be greater than or equal to `old_size`,
|
||||
// both the old and new memory allocation are valid for reads and writes for `old_size`
|
||||
// bytes. Also, because the old allocation wasn't yet deallocated, it cannot overlap
|
||||
// `new_ptr`. Thus, the call to `copy_nonoverlapping` is safe. The safety contract
|
||||
// for `dealloc` must be upheld by the caller.
|
||||
old_size => unsafe {
|
||||
let new_ptr = self.alloc_impl(new_layout, zeroed)?;
|
||||
ptr::copy_nonoverlapping(ptr.as_ptr(), new_ptr.as_mut_ptr(), old_size);
|
||||
Allocator::deallocate(self, ptr, old_layout);
|
||||
Ok(new_ptr)
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// The Allocator impl checks the layout size to be non-zero and forwards to the GlobalAlloc impl,
|
||||
// which is in `std::sys::*::alloc`.
|
||||
#[unstable(feature = "allocator_api", issue = "32838")]
|
||||
unsafe impl Allocator for System {
|
||||
#[inline]
|
||||
fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
|
||||
self.alloc_impl(layout, false)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn allocate_zeroed(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
|
||||
self.alloc_impl(layout, true)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) {
|
||||
if layout.size() != 0 {
|
||||
// SAFETY: `layout` is non-zero in size,
|
||||
// other conditions must be upheld by the caller
|
||||
unsafe { GlobalAlloc::dealloc(self, ptr.as_ptr(), layout) }
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
unsafe fn grow(
|
||||
&self,
|
||||
ptr: NonNull<u8>,
|
||||
old_layout: Layout,
|
||||
new_layout: Layout,
|
||||
) -> Result<NonNull<[u8]>, AllocError> {
|
||||
// SAFETY: all conditions must be upheld by the caller
|
||||
unsafe { self.grow_impl(ptr, old_layout, new_layout, false) }
|
||||
}
|
||||
|
||||
#[inline]
|
||||
unsafe fn grow_zeroed(
|
||||
&self,
|
||||
ptr: NonNull<u8>,
|
||||
old_layout: Layout,
|
||||
new_layout: Layout,
|
||||
) -> Result<NonNull<[u8]>, AllocError> {
|
||||
// SAFETY: all conditions must be upheld by the caller
|
||||
unsafe { self.grow_impl(ptr, old_layout, new_layout, true) }
|
||||
}
|
||||
|
||||
#[inline]
|
||||
unsafe fn shrink(
|
||||
&self,
|
||||
ptr: NonNull<u8>,
|
||||
old_layout: Layout,
|
||||
new_layout: Layout,
|
||||
) -> Result<NonNull<[u8]>, AllocError> {
|
||||
debug_assert!(
|
||||
new_layout.size() <= old_layout.size(),
|
||||
"`new_layout.size()` must be smaller than or equal to `old_layout.size()`"
|
||||
);
|
||||
|
||||
match new_layout.size() {
|
||||
// SAFETY: conditions must be upheld by the caller
|
||||
0 => unsafe {
|
||||
Allocator::deallocate(self, ptr, old_layout);
|
||||
Ok(NonNull::slice_from_raw_parts(new_layout.dangling_ptr(), 0))
|
||||
},
|
||||
|
||||
// SAFETY: `new_size` is non-zero. Other conditions must be upheld by the caller
|
||||
new_size if old_layout.align() == new_layout.align() => unsafe {
|
||||
// `realloc` probably checks for `new_size <= old_layout.size()` or something similar.
|
||||
hint::assert_unchecked(new_size <= old_layout.size());
|
||||
|
||||
let raw_ptr = GlobalAlloc::realloc(self, ptr.as_ptr(), old_layout, new_size);
|
||||
let ptr = NonNull::new(raw_ptr).ok_or(AllocError)?;
|
||||
Ok(NonNull::slice_from_raw_parts(ptr, new_size))
|
||||
},
|
||||
|
||||
// SAFETY: because `new_size` must be smaller than or equal to `old_layout.size()`,
|
||||
// both the old and new memory allocation are valid for reads and writes for `new_size`
|
||||
// bytes. Also, because the old allocation wasn't yet deallocated, it cannot overlap
|
||||
// `new_ptr`. Thus, the call to `copy_nonoverlapping` is safe. The safety contract
|
||||
// for `dealloc` must be upheld by the caller.
|
||||
new_size => unsafe {
|
||||
let new_ptr = Allocator::allocate(self, new_layout)?;
|
||||
ptr::copy_nonoverlapping(ptr.as_ptr(), new_ptr.as_mut_ptr(), new_size);
|
||||
Allocator::deallocate(self, ptr, old_layout);
|
||||
Ok(new_ptr)
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static HOOK: AtomicPtr<()> = AtomicPtr::new(ptr::null_mut());
|
||||
|
||||
/// Registers a custom allocation error hook, replacing any that was previously registered.
|
||||
///
|
||||
/// The allocation error hook is invoked when an infallible memory allocation fails — that is,
|
||||
/// as a consequence of calling [`handle_alloc_error`] — before the runtime aborts.
|
||||
///
|
||||
/// The allocation error hook is a global resource. [`take_alloc_error_hook`] may be used to
|
||||
/// retrieve a previously registered hook and wrap or discard it.
|
||||
///
|
||||
/// # What the provided `hook` function should expect
|
||||
///
|
||||
/// The hook function is provided with a [`Layout`] struct which contains information
|
||||
/// about the allocation that failed.
|
||||
///
|
||||
/// The hook function may choose to panic or abort; in the event that it returns normally, this
|
||||
/// will cause an immediate abort.
|
||||
///
|
||||
/// Since [`take_alloc_error_hook`] is a safe function that allows retrieving the hook, the hook
|
||||
/// function must be _sound_ to call even if no memory allocations were attempted.
|
||||
///
|
||||
/// # The default hook
|
||||
///
|
||||
/// The default hook, used if [`set_alloc_error_hook`] is never called, prints a message to
|
||||
/// standard error (and then returns, causing the runtime to abort the process).
|
||||
/// Compiler options may cause it to panic instead, and the default behavior may be changed
|
||||
/// to panicking in future versions of Rust.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// #![feature(alloc_error_hook)]
|
||||
///
|
||||
/// use std::alloc::{Layout, set_alloc_error_hook};
|
||||
///
|
||||
/// fn custom_alloc_error_hook(layout: Layout) {
|
||||
/// panic!("memory allocation of {} bytes failed", layout.size());
|
||||
/// }
|
||||
///
|
||||
/// set_alloc_error_hook(custom_alloc_error_hook);
|
||||
/// ```
|
||||
#[unstable(feature = "alloc_error_hook", issue = "51245")]
|
||||
pub fn set_alloc_error_hook(hook: fn(Layout)) {
|
||||
HOOK.store(hook as *mut (), Ordering::Release);
|
||||
}
|
||||
|
||||
// /// Unregisters the current allocation error hook, returning it.
|
||||
// ///
|
||||
// /// *See also the function [`set_alloc_error_hook`].*
|
||||
// ///
|
||||
// /// If no custom hook is registered, the default hook will be returned.
|
||||
// #[unstable(feature = "alloc_error_hook", issue = "51245")]
|
||||
// pub fn take_alloc_error_hook() -> fn(Layout) {
|
||||
// let hook = HOOK.swap(ptr::null_mut(), Ordering::Acquire);
|
||||
// if hook.is_null() { default_alloc_error_hook } else { unsafe { mem::transmute(hook) } }
|
||||
// }
|
||||
|
||||
// #[optimize(size)]
|
||||
// fn default_alloc_error_hook(layout: Layout) {
|
||||
// if cfg!(panic = "immediate-abort") {
|
||||
// return;
|
||||
// }
|
||||
|
||||
// // This is the default path taken on OOM, and the only path taken on stable with std.
|
||||
// // Crucially, it does *not* call any user-defined code, and therefore users do not have to
|
||||
// // worry about allocation failure causing reentrancy issues. That makes it different from
|
||||
// // the default `__rdl_alloc_error_handler` defined in alloc (i.e., the default alloc error
|
||||
// // handler that is called when there is no `#[alloc_error_handler]`), which triggers a
|
||||
// // regular panic and thus can invoke a user-defined panic hook, executing arbitrary
|
||||
// // user-defined code.
|
||||
|
||||
// static PREV_ALLOC_FAILURE: AtomicBool = AtomicBool::new(false);
|
||||
// if PREV_ALLOC_FAILURE.swap(true, Ordering::Relaxed) {
|
||||
// // Don't try to print a backtrace if a previous alloc error happened. This likely means
|
||||
// // there is not enough memory to print a backtrace, although it could also mean that two
|
||||
// // threads concurrently run out of memory.
|
||||
// rtprintpanic!(
|
||||
// "memory allocation of {} bytes failed\nskipping backtrace printing to avoid potential recursion\n",
|
||||
// layout.size()
|
||||
// );
|
||||
// return;
|
||||
// } else {
|
||||
// rtprintpanic!("memory allocation of {} bytes failed\n", layout.size());
|
||||
// }
|
||||
|
||||
// let Some(mut out) = crate::sys::stdio::panic_output() else {
|
||||
// return;
|
||||
// };
|
||||
|
||||
// // Use a lock to prevent mixed output in multithreading context.
|
||||
// // Some platforms also require it when printing a backtrace, like `SymFromAddr` on Windows.
|
||||
// // Make sure to not take this lock until after checking PREV_ALLOC_FAILURE to avoid deadlocks
|
||||
// // when there is too little memory to print a backtrace.
|
||||
// let mut lock = crate::sys::backtrace::lock();
|
||||
|
||||
// match crate::panic::get_backtrace_style() {
|
||||
// Some(crate::panic::BacktraceStyle::Short) => {
|
||||
// drop(lock.print(&mut out, crate::backtrace_rs::PrintFmt::Short))
|
||||
// }
|
||||
// Some(crate::panic::BacktraceStyle::Full) => {
|
||||
// drop(lock.print(&mut out, crate::backtrace_rs::PrintFmt::Full))
|
||||
// }
|
||||
// Some(crate::panic::BacktraceStyle::Off) => {
|
||||
// use crate::io::Write;
|
||||
// let _ = writeln!(
|
||||
// out,
|
||||
// "note: run with `RUST_BACKTRACE=1` environment variable to display a \
|
||||
// backtrace"
|
||||
// );
|
||||
// if cfg!(miri) {
|
||||
// let _ = writeln!(
|
||||
// out,
|
||||
// "note: in Miri, you may have to set `MIRIFLAGS=-Zmiri-env-forward=RUST_BACKTRACE` \
|
||||
// for the environment variable to have an effect"
|
||||
// );
|
||||
// }
|
||||
// }
|
||||
// // If backtraces aren't supported or are forced-off, do nothing.
|
||||
// None => {}
|
||||
// }
|
||||
// }
|
||||
|
||||
// #[cfg(not(test))]
|
||||
// #[doc(hidden)]
|
||||
// #[alloc_error_handler]
|
||||
// #[unstable(feature = "alloc_internals", issue = "none")]
|
||||
// pub fn rust_oom(layout: Layout) -> ! {
|
||||
// crate::sys::backtrace::__rust_end_short_backtrace(|| {
|
||||
// let hook = HOOK.load(Ordering::Acquire);
|
||||
// let hook: fn(Layout) =
|
||||
// if hook.is_null() { default_alloc_error_hook } else { unsafe { mem::transmute(hook) } };
|
||||
// hook(layout);
|
||||
// crate::process::abort()
|
||||
// })
|
||||
// }
|
||||
|
||||
#[cfg(not(test))]
|
||||
#[doc(hidden)]
|
||||
#[allow(unused_attributes)]
|
||||
#[unstable(feature = "alloc_internals", issue = "none")]
|
||||
pub mod __default_lib_allocator {
|
||||
use super::{GlobalAlloc, Layout, System};
|
||||
// These magic symbol names are used as a fallback for implementing the
|
||||
// `__rust_alloc` etc symbols (see `src/liballoc/alloc.rs`) when there is
|
||||
// no `#[global_allocator]` attribute.
|
||||
|
||||
// for symbol names src/librustc_ast/expand/allocator.rs
|
||||
// for signatures src/librustc_allocator/lib.rs
|
||||
|
||||
// linkage directives are provided as part of the current compiler allocator
|
||||
// ABI
|
||||
|
||||
#[rustc_std_internal_symbol]
|
||||
pub unsafe extern "C" fn __rdl_alloc(size: usize, align: usize) -> *mut u8 {
|
||||
// SAFETY: see the guarantees expected by `Layout::from_size_align` and
|
||||
// `GlobalAlloc::alloc`.
|
||||
unsafe {
|
||||
let layout = Layout::from_size_align_unchecked(size, align);
|
||||
System.alloc(layout)
|
||||
}
|
||||
}
|
||||
|
||||
#[rustc_std_internal_symbol]
|
||||
pub unsafe extern "C" fn __rdl_dealloc(ptr: *mut u8, size: usize, align: usize) {
|
||||
// SAFETY: see the guarantees expected by `Layout::from_size_align` and
|
||||
// `GlobalAlloc::dealloc`.
|
||||
unsafe { System.dealloc(ptr, Layout::from_size_align_unchecked(size, align)) }
|
||||
}
|
||||
|
||||
#[rustc_std_internal_symbol]
|
||||
pub unsafe extern "C" fn __rdl_realloc(
|
||||
ptr: *mut u8,
|
||||
old_size: usize,
|
||||
align: usize,
|
||||
new_size: usize,
|
||||
) -> *mut u8 {
|
||||
// SAFETY: see the guarantees expected by `Layout::from_size_align` and
|
||||
// `GlobalAlloc::realloc`.
|
||||
unsafe {
|
||||
let old_layout = Layout::from_size_align_unchecked(old_size, align);
|
||||
System.realloc(ptr, old_layout, new_size)
|
||||
}
|
||||
}
|
||||
|
||||
#[rustc_std_internal_symbol]
|
||||
pub unsafe extern "C" fn __rdl_alloc_zeroed(size: usize, align: usize) -> *mut u8 {
|
||||
// SAFETY: see the guarantees expected by `Layout::from_size_align` and
|
||||
// `GlobalAlloc::alloc_zeroed`.
|
||||
unsafe {
|
||||
let layout = Layout::from_size_align_unchecked(size, align);
|
||||
System.alloc_zeroed(layout)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,4 +0,0 @@
|
||||
//! The `ByteStr` and `ByteString` types and trait implementations.
|
||||
|
||||
#[unstable(feature = "bstr", issue = "134915")]
|
||||
pub use alloc_crate::bstr::{ByteStr, ByteString};
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,4 +0,0 @@
|
||||
//! Unordered containers, implemented as hash-tables
|
||||
|
||||
pub mod map;
|
||||
pub mod set;
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,6 +0,0 @@
|
||||
use core::marker::PhantomData;
|
||||
|
||||
pub struct HashMap<K, V, T> {
|
||||
_phantom: PhantomData<(K, V, T)>,
|
||||
}
|
||||
pub use alloc_crate::collections;
|
||||
@@ -1,5 +0,0 @@
|
||||
use crate::ffi::OsString;
|
||||
|
||||
pub fn var_os(s: &str) -> Option<OsString> {
|
||||
None
|
||||
}
|
||||
@@ -1,4 +0,0 @@
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
pub use core::error::Error;
|
||||
#[unstable(feature = "error_generic_member_access", issue = "99301")]
|
||||
pub use core::error::{Request, request_ref, request_value};
|
||||
@@ -1,14 +0,0 @@
|
||||
//! [`CStr`], [`CString`], and related types.
|
||||
|
||||
#[stable(feature = "cstring_from_vec_with_nul", since = "1.58.0")]
|
||||
pub use alloc_crate::ffi::c_str::FromVecWithNulError;
|
||||
#[stable(feature = "cstring_into", since = "1.7.0")]
|
||||
pub use alloc_crate::ffi::c_str::IntoStringError;
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
pub use alloc_crate::ffi::c_str::{CString, NulError};
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
pub use core::ffi::c_str::CStr;
|
||||
#[stable(feature = "cstr_from_bytes_until_nul", since = "1.69.0")]
|
||||
pub use core::ffi::c_str::FromBytesUntilNulError;
|
||||
#[stable(feature = "cstr_from_bytes", since = "1.10.0")]
|
||||
pub use core::ffi::c_str::FromBytesWithNulError;
|
||||
@@ -1,207 +0,0 @@
|
||||
//! Utilities related to FFI bindings.
|
||||
//!
|
||||
//! This module provides utilities to handle data across non-Rust
|
||||
//! interfaces, like other programming languages and the underlying
|
||||
//! operating system. It is mainly of use for FFI (Foreign Function
|
||||
//! Interface) bindings and code that needs to exchange C-like strings
|
||||
//! with other languages.
|
||||
//!
|
||||
//! # Overview
|
||||
//!
|
||||
//! Rust represents owned strings with the [`String`] type, and
|
||||
//! borrowed slices of strings with the [`str`] primitive. Both are
|
||||
//! always in UTF-8 encoding, and may contain nul bytes in the middle,
|
||||
//! i.e., if you look at the bytes that make up the string, there may
|
||||
//! be a `\0` among them. Both `String` and `str` store their length
|
||||
//! explicitly; there are no nul terminators at the end of strings
|
||||
//! like in C.
|
||||
//!
|
||||
//! C strings are different from Rust strings:
|
||||
//!
|
||||
//! * **Encodings** - Rust strings are UTF-8, but C strings may use
|
||||
//! other encodings. If you are using a string from C, you should
|
||||
//! check its encoding explicitly, rather than just assuming that it
|
||||
//! is UTF-8 like you can do in Rust.
|
||||
//!
|
||||
//! * **Character size** - C strings may use `char` or `wchar_t`-sized
|
||||
//! characters; please **note** that C's `char` is different from Rust's.
|
||||
//! The C standard leaves the actual sizes of those types open to
|
||||
//! interpretation, but defines different APIs for strings made up of
|
||||
//! each character type. Rust strings are always UTF-8, so different
|
||||
//! Unicode characters will be encoded in a variable number of bytes
|
||||
//! each. The Rust type [`char`] represents a '[Unicode scalar
|
||||
//! value]', which is similar to, but not the same as, a '[Unicode
|
||||
//! code point]'.
|
||||
//!
|
||||
//! * **Nul terminators and implicit string lengths** - Often, C
|
||||
//! strings are nul-terminated, i.e., they have a `\0` character at the
|
||||
//! end. The length of a string buffer is not stored, but has to be
|
||||
//! calculated; to compute the length of a string, C code must
|
||||
//! manually call a function like `strlen()` for `char`-based strings,
|
||||
//! or `wcslen()` for `wchar_t`-based ones. Those functions return
|
||||
//! the number of characters in the string excluding the nul
|
||||
//! terminator, so the buffer length is really `len+1` characters.
|
||||
//! Rust strings don't have a nul terminator; their length is always
|
||||
//! stored and does not need to be calculated. While in Rust
|
||||
//! accessing a string's length is an *O*(1) operation (because the
|
||||
//! length is stored); in C it is an *O*(*n*) operation because the
|
||||
//! length needs to be computed by scanning the string for the nul
|
||||
//! terminator.
|
||||
//!
|
||||
//! * **Internal nul characters** - When C strings have a nul
|
||||
//! terminator character, this usually means that they cannot have nul
|
||||
//! characters in the middle — a nul character would essentially
|
||||
//! truncate the string. Rust strings *can* have nul characters in
|
||||
//! the middle, because nul does not have to mark the end of the
|
||||
//! string in Rust.
|
||||
//!
|
||||
//! # Representations of non-Rust strings
|
||||
//!
|
||||
//! [`CString`] and [`CStr`] are useful when you need to transfer
|
||||
//! UTF-8 strings to and from languages with a C ABI, like Python.
|
||||
//!
|
||||
//! * **From Rust to C:** [`CString`] represents an owned, C-friendly
|
||||
//! string: it is nul-terminated, and has no internal nul characters.
|
||||
//! Rust code can create a [`CString`] out of a normal string (provided
|
||||
//! that the string doesn't have nul characters in the middle), and
|
||||
//! then use a variety of methods to obtain a raw <code>\*mut [u8]</code> that can
|
||||
//! then be passed as an argument to functions which use the C
|
||||
//! conventions for strings.
|
||||
//!
|
||||
//! * **From C to Rust:** [`CStr`] represents a borrowed C string; it
|
||||
//! is what you would use to wrap a raw <code>\*const [u8]</code> that you got from
|
||||
//! a C function. A [`CStr`] is guaranteed to be a nul-terminated array
|
||||
//! of bytes. Once you have a [`CStr`], you can convert it to a Rust
|
||||
//! <code>&[str]</code> if it's valid UTF-8, or lossily convert it by adding
|
||||
//! replacement characters.
|
||||
//!
|
||||
//! [`OsString`] and [`OsStr`] are useful when you need to transfer
|
||||
//! strings to and from the operating system itself, or when capturing
|
||||
//! the output of external commands. Conversions between [`OsString`],
|
||||
//! [`OsStr`] and Rust strings work similarly to those for [`CString`]
|
||||
//! and [`CStr`].
|
||||
//!
|
||||
//! * [`OsString`] losslessly represents an owned platform string. However, this
|
||||
//! representation is not necessarily in a form native to the platform.
|
||||
//! In the Rust standard library, various APIs that transfer strings to/from the operating
|
||||
//! system use [`OsString`] instead of plain strings. For example,
|
||||
//! [`env::var_os()`] is used to query environment variables; it
|
||||
//! returns an <code>[Option]<[OsString]></code>. If the environment variable
|
||||
//! exists you will get a <code>[Some]\(os_string)</code>, which you can
|
||||
//! *then* try to convert to a Rust string. This yields a [`Result`], so that
|
||||
//! your code can detect errors in case the environment variable did
|
||||
//! not in fact contain valid Unicode data.
|
||||
//!
|
||||
//! * [`OsStr`] losslessly represents a borrowed reference to a platform string.
|
||||
//! However, this representation is not necessarily in a form native to the platform.
|
||||
//! It can be converted into a UTF-8 Rust string slice in a similar way to
|
||||
//! [`OsString`].
|
||||
//!
|
||||
//! # Conversions
|
||||
//!
|
||||
//! ## On Unix
|
||||
//!
|
||||
//! On Unix, [`OsStr`] implements the
|
||||
//! <code>std::os::unix::ffi::[OsStrExt][unix.OsStrExt]</code> trait, which
|
||||
//! augments it with two methods, [`from_bytes`] and [`as_bytes`].
|
||||
//! These do inexpensive conversions from and to byte slices.
|
||||
//!
|
||||
//! Additionally, on Unix [`OsString`] implements the
|
||||
//! <code>std::os::unix::ffi::[OsStringExt][unix.OsStringExt]</code> trait,
|
||||
//! which provides [`from_vec`] and [`into_vec`] methods that consume
|
||||
//! their arguments, and take or produce vectors of [`u8`].
|
||||
//!
|
||||
//! ## On Windows
|
||||
//!
|
||||
//! An [`OsStr`] can be losslessly converted to a native Windows string. And
|
||||
//! a native Windows string can be losslessly converted to an [`OsString`].
|
||||
//!
|
||||
//! On Windows, [`OsStr`] implements the
|
||||
//! <code>std::os::windows::ffi::[OsStrExt][windows.OsStrExt]</code> trait,
|
||||
//! which provides an [`encode_wide`] method. This provides an
|
||||
//! iterator that can be [`collect`]ed into a vector of [`u16`]. After a nul
|
||||
//! characters is appended, this is the same as a native Windows string.
|
||||
//!
|
||||
//! Additionally, on Windows [`OsString`] implements the
|
||||
//! <code>std::os::windows:ffi::[OsStringExt][windows.OsStringExt]</code>
|
||||
//! trait, which provides a [`from_wide`] method to convert a native Windows
|
||||
//! string (without the terminating nul character) to an [`OsString`].
|
||||
//!
|
||||
//! ## Other platforms
|
||||
//!
|
||||
//! Many other platforms provide their own extension traits in a
|
||||
//! `std::os::*::ffi` module.
|
||||
//!
|
||||
//! ## On all platforms
|
||||
//!
|
||||
//! On all platforms, [`OsStr`] consists of a sequence of bytes that is encoded as a superset of
|
||||
//! UTF-8; see [`OsString`] for more details on its encoding on different platforms.
|
||||
//!
|
||||
//! For limited, inexpensive conversions from and to bytes, see [`OsStr::as_encoded_bytes`] and
|
||||
//! [`OsStr::from_encoded_bytes_unchecked`].
|
||||
//!
|
||||
//! For basic string processing, see [`OsStr::slice_encoded_bytes`].
|
||||
//!
|
||||
//! [Unicode scalar value]: https://www.unicode.org/glossary/#unicode_scalar_value
|
||||
//! [Unicode code point]: https://www.unicode.org/glossary/#code_point
|
||||
//! [`env::set_var()`]: crate::env::set_var "env::set_var"
|
||||
//! [`env::var_os()`]: crate::env::var_os "env::var_os"
|
||||
//! [unix.OsStringExt]: crate::os::unix::ffi::OsStringExt "os::unix::ffi::OsStringExt"
|
||||
//! [`from_vec`]: crate::os::unix::ffi::OsStringExt::from_vec "os::unix::ffi::OsStringExt::from_vec"
|
||||
//! [`into_vec`]: crate::os::unix::ffi::OsStringExt::into_vec "os::unix::ffi::OsStringExt::into_vec"
|
||||
//! [unix.OsStrExt]: crate::os::unix::ffi::OsStrExt "os::unix::ffi::OsStrExt"
|
||||
//! [`from_bytes`]: crate::os::unix::ffi::OsStrExt::from_bytes "os::unix::ffi::OsStrExt::from_bytes"
|
||||
//! [`as_bytes`]: crate::os::unix::ffi::OsStrExt::as_bytes "os::unix::ffi::OsStrExt::as_bytes"
|
||||
//! [`OsStrExt`]: crate::os::unix::ffi::OsStrExt "os::unix::ffi::OsStrExt"
|
||||
//! [windows.OsStrExt]: crate::os::windows::ffi::OsStrExt "os::windows::ffi::OsStrExt"
|
||||
//! [`encode_wide`]: crate::os::windows::ffi::OsStrExt::encode_wide "os::windows::ffi::OsStrExt::encode_wide"
|
||||
//! [`collect`]: crate::iter::Iterator::collect "iter::Iterator::collect"
|
||||
//! [windows.OsStringExt]: crate::os::windows::ffi::OsStringExt "os::windows::ffi::OsStringExt"
|
||||
//! [`from_wide`]: crate::os::windows::ffi::OsStringExt::from_wide "os::windows::ffi::OsStringExt::from_wide"
|
||||
|
||||
#![stable(feature = "rust1", since = "1.0.0")]
|
||||
|
||||
#[stable(feature = "c_str_module", since = "1.88.0")]
|
||||
pub mod c_str;
|
||||
|
||||
#[stable(feature = "core_c_void", since = "1.30.0")]
|
||||
pub use core::ffi::c_void;
|
||||
#[unstable(
|
||||
feature = "c_variadic",
|
||||
reason = "the `c_variadic` feature has not been properly tested on \
|
||||
all supported platforms",
|
||||
issue = "44930"
|
||||
)]
|
||||
pub use core::ffi::{VaArgSafe, VaList};
|
||||
#[stable(feature = "core_ffi_c", since = "1.64.0")]
|
||||
pub use core::ffi::{
|
||||
c_char, c_double, c_float, c_int, c_long, c_longlong, c_schar, c_short, c_uchar, c_uint,
|
||||
c_ulong, c_ulonglong, c_ushort,
|
||||
};
|
||||
#[unstable(feature = "c_size_t", issue = "88345")]
|
||||
pub use core::ffi::{c_ptrdiff_t, c_size_t, c_ssize_t};
|
||||
|
||||
#[doc(inline)]
|
||||
#[stable(feature = "cstr_from_bytes_until_nul", since = "1.69.0")]
|
||||
pub use self::c_str::FromBytesUntilNulError;
|
||||
#[doc(inline)]
|
||||
#[stable(feature = "cstr_from_bytes", since = "1.10.0")]
|
||||
pub use self::c_str::FromBytesWithNulError;
|
||||
#[doc(inline)]
|
||||
#[stable(feature = "cstring_from_vec_with_nul", since = "1.58.0")]
|
||||
pub use self::c_str::FromVecWithNulError;
|
||||
#[doc(inline)]
|
||||
#[stable(feature = "cstring_into", since = "1.7.0")]
|
||||
pub use self::c_str::IntoStringError;
|
||||
#[doc(inline)]
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
pub use self::c_str::NulError;
|
||||
#[doc(inline)]
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
pub use self::c_str::{CStr, CString};
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
#[doc(inline)]
|
||||
pub use self::os_str::{OsStr, OsString};
|
||||
|
||||
#[stable(feature = "os_str_display", since = "1.87.0")]
|
||||
pub mod os_str;
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,311 +0,0 @@
|
||||
use super::*;
|
||||
use crate::mem::MaybeUninit;
|
||||
use crate::ptr;
|
||||
|
||||
#[test]
|
||||
fn test_os_string_with_capacity() {
|
||||
let os_string = OsString::with_capacity(0);
|
||||
assert_eq!(0, os_string.inner.into_inner().capacity());
|
||||
|
||||
let os_string = OsString::with_capacity(10);
|
||||
assert_eq!(10, os_string.inner.into_inner().capacity());
|
||||
|
||||
let mut os_string = OsString::with_capacity(0);
|
||||
os_string.push("abc");
|
||||
assert!(os_string.inner.into_inner().capacity() >= 3);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_os_string_clear() {
|
||||
let mut os_string = OsString::from("abc");
|
||||
assert_eq!(3, os_string.inner.as_inner().len());
|
||||
|
||||
os_string.clear();
|
||||
assert_eq!(&os_string, "");
|
||||
assert_eq!(0, os_string.inner.as_inner().len());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_os_string_leak() {
|
||||
let os_string = OsString::from("have a cake");
|
||||
let (len, cap) = (os_string.len(), os_string.capacity());
|
||||
let leaked = os_string.leak();
|
||||
assert_eq!(leaked.as_encoded_bytes(), b"have a cake");
|
||||
unsafe { drop(String::from_raw_parts(leaked as *mut OsStr as _, len, cap)) }
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_os_string_capacity() {
|
||||
let os_string = OsString::with_capacity(0);
|
||||
assert_eq!(0, os_string.capacity());
|
||||
|
||||
let os_string = OsString::with_capacity(10);
|
||||
assert_eq!(10, os_string.capacity());
|
||||
|
||||
let mut os_string = OsString::with_capacity(0);
|
||||
os_string.push("abc");
|
||||
assert!(os_string.capacity() >= 3);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_os_string_reserve() {
|
||||
let mut os_string = OsString::new();
|
||||
assert_eq!(os_string.capacity(), 0);
|
||||
|
||||
os_string.reserve(2);
|
||||
assert!(os_string.capacity() >= 2);
|
||||
|
||||
for _ in 0..16 {
|
||||
os_string.push("a");
|
||||
}
|
||||
|
||||
assert!(os_string.capacity() >= 16);
|
||||
os_string.reserve(16);
|
||||
assert!(os_string.capacity() >= 32);
|
||||
|
||||
os_string.push("a");
|
||||
|
||||
os_string.reserve(16);
|
||||
assert!(os_string.capacity() >= 33)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_os_string_reserve_exact() {
|
||||
let mut os_string = OsString::new();
|
||||
assert_eq!(os_string.capacity(), 0);
|
||||
|
||||
os_string.reserve_exact(2);
|
||||
assert!(os_string.capacity() >= 2);
|
||||
|
||||
for _ in 0..16 {
|
||||
os_string.push("a");
|
||||
}
|
||||
|
||||
assert!(os_string.capacity() >= 16);
|
||||
os_string.reserve_exact(16);
|
||||
assert!(os_string.capacity() >= 32);
|
||||
|
||||
os_string.push("a");
|
||||
|
||||
os_string.reserve_exact(16);
|
||||
assert!(os_string.capacity() >= 33)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_os_string_join() {
|
||||
let strings = [OsStr::new("hello"), OsStr::new("dear"), OsStr::new("world")];
|
||||
assert_eq!("hello", strings[..1].join(OsStr::new(" ")));
|
||||
assert_eq!("hello dear world", strings.join(OsStr::new(" ")));
|
||||
assert_eq!("hellodearworld", strings.join(OsStr::new("")));
|
||||
assert_eq!("hello.\n dear.\n world", strings.join(OsStr::new(".\n ")));
|
||||
|
||||
assert_eq!("dear world", strings[1..].join(&OsString::from(" ")));
|
||||
|
||||
let strings_abc = [OsString::from("a"), OsString::from("b"), OsString::from("c")];
|
||||
assert_eq!("a b c", strings_abc.join(OsStr::new(" ")));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_os_string_default() {
|
||||
let os_string: OsString = Default::default();
|
||||
assert_eq!("", &os_string);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_os_str_is_empty() {
|
||||
let mut os_string = OsString::new();
|
||||
assert!(os_string.is_empty());
|
||||
|
||||
os_string.push("abc");
|
||||
assert!(!os_string.is_empty());
|
||||
|
||||
os_string.clear();
|
||||
assert!(os_string.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_os_str_len() {
|
||||
let mut os_string = OsString::new();
|
||||
assert_eq!(0, os_string.len());
|
||||
|
||||
os_string.push("abc");
|
||||
assert_eq!(3, os_string.len());
|
||||
|
||||
os_string.clear();
|
||||
assert_eq!(0, os_string.len());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_os_str_default() {
|
||||
let os_str: &OsStr = Default::default();
|
||||
assert_eq!("", os_str);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn into_boxed() {
|
||||
let orig = "Hello, world!";
|
||||
let os_str = OsStr::new(orig);
|
||||
let boxed: Box<OsStr> = Box::from(os_str);
|
||||
let os_string = os_str.to_owned().into_boxed_os_str().into_os_string();
|
||||
assert_eq!(os_str, &*boxed);
|
||||
assert_eq!(&*boxed, &*os_string);
|
||||
assert_eq!(&*os_string, os_str);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn boxed_default() {
|
||||
let boxed = <Box<OsStr>>::default();
|
||||
assert!(boxed.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_os_str_clone_into() {
|
||||
let mut os_string = OsString::with_capacity(123);
|
||||
os_string.push("hello");
|
||||
let os_str = OsStr::new("bonjour");
|
||||
os_str.clone_into(&mut os_string);
|
||||
assert_eq!(os_str, os_string);
|
||||
assert!(os_string.capacity() >= 123);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn into_rc() {
|
||||
let orig = "Hello, world!";
|
||||
let os_str = OsStr::new(orig);
|
||||
let rc: Rc<OsStr> = Rc::from(os_str);
|
||||
let arc: Arc<OsStr> = Arc::from(os_str);
|
||||
|
||||
assert_eq!(&*rc, os_str);
|
||||
assert_eq!(&*arc, os_str);
|
||||
|
||||
let rc2: Rc<OsStr> = Rc::from(os_str.to_owned());
|
||||
let arc2: Arc<OsStr> = Arc::from(os_str.to_owned());
|
||||
|
||||
assert_eq!(&*rc2, os_str);
|
||||
assert_eq!(&*arc2, os_str);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn slice_encoded_bytes() {
|
||||
let os_str = OsStr::new("123θგ🦀");
|
||||
// ASCII
|
||||
let digits = os_str.slice_encoded_bytes(..3);
|
||||
assert_eq!(digits, "123");
|
||||
let three = os_str.slice_encoded_bytes(2..3);
|
||||
assert_eq!(three, "3");
|
||||
// 2-byte UTF-8
|
||||
let theta = os_str.slice_encoded_bytes(3..5);
|
||||
assert_eq!(theta, "θ");
|
||||
// 3-byte UTF-8
|
||||
let gani = os_str.slice_encoded_bytes(5..8);
|
||||
assert_eq!(gani, "გ");
|
||||
// 4-byte UTF-8
|
||||
let crab = os_str.slice_encoded_bytes(8..);
|
||||
assert_eq!(crab, "🦀");
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn slice_out_of_bounds() {
|
||||
let crab = OsStr::new("🦀");
|
||||
let _ = crab.slice_encoded_bytes(..5);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn slice_mid_char() {
|
||||
let crab = OsStr::new("🦀");
|
||||
let _ = crab.slice_encoded_bytes(..2);
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
#[test]
|
||||
#[should_panic(expected = "byte index 1 is not an OsStr boundary")]
|
||||
fn slice_invalid_data() {
|
||||
use crate::os::unix::ffi::OsStrExt;
|
||||
|
||||
let os_string = OsStr::from_bytes(b"\xFF\xFF");
|
||||
let _ = os_string.slice_encoded_bytes(1..);
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
#[test]
|
||||
#[should_panic(expected = "byte index 1 is not an OsStr boundary")]
|
||||
fn slice_partial_utf8() {
|
||||
use crate::os::unix::ffi::{OsStrExt, OsStringExt};
|
||||
|
||||
let part_crab = OsStr::from_bytes(&"🦀".as_bytes()[..3]);
|
||||
let mut os_string = OsString::from_vec(vec![0xFF]);
|
||||
os_string.push(part_crab);
|
||||
let _ = os_string.slice_encoded_bytes(1..);
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
#[test]
|
||||
fn slice_invalid_edge() {
|
||||
use crate::os::unix::ffi::{OsStrExt, OsStringExt};
|
||||
|
||||
let os_string = OsStr::from_bytes(b"a\xFFa");
|
||||
assert_eq!(os_string.slice_encoded_bytes(..1), "a");
|
||||
assert_eq!(os_string.slice_encoded_bytes(1..), OsStr::from_bytes(b"\xFFa"));
|
||||
assert_eq!(os_string.slice_encoded_bytes(..2), OsStr::from_bytes(b"a\xFF"));
|
||||
assert_eq!(os_string.slice_encoded_bytes(2..), "a");
|
||||
|
||||
let os_string = OsStr::from_bytes(&"abc🦀".as_bytes()[..6]);
|
||||
assert_eq!(os_string.slice_encoded_bytes(..3), "abc");
|
||||
assert_eq!(os_string.slice_encoded_bytes(3..), OsStr::from_bytes(b"\xF0\x9F\xA6"));
|
||||
|
||||
let mut os_string = OsString::from_vec(vec![0xFF]);
|
||||
os_string.push("🦀");
|
||||
assert_eq!(os_string.slice_encoded_bytes(..1), OsStr::from_bytes(b"\xFF"));
|
||||
assert_eq!(os_string.slice_encoded_bytes(1..), "🦀");
|
||||
}
|
||||
|
||||
#[cfg(windows)]
|
||||
#[test]
|
||||
#[should_panic(expected = "byte index 3 lies between surrogate codepoints")]
|
||||
fn slice_between_surrogates() {
|
||||
use crate::os::windows::ffi::OsStringExt;
|
||||
|
||||
let os_string = OsString::from_wide(&[0xD800, 0xD800]);
|
||||
assert_eq!(os_string.as_encoded_bytes(), &[0xED, 0xA0, 0x80, 0xED, 0xA0, 0x80]);
|
||||
let _ = os_string.slice_encoded_bytes(..3);
|
||||
}
|
||||
|
||||
#[cfg(windows)]
|
||||
#[test]
|
||||
fn slice_surrogate_edge() {
|
||||
use crate::os::windows::ffi::OsStringExt;
|
||||
|
||||
let surrogate = OsString::from_wide(&[0xD800]);
|
||||
let mut pre_crab = surrogate.clone();
|
||||
pre_crab.push("🦀");
|
||||
assert_eq!(pre_crab.slice_encoded_bytes(..3), surrogate);
|
||||
assert_eq!(pre_crab.slice_encoded_bytes(3..), "🦀");
|
||||
|
||||
let mut post_crab = OsString::from("🦀");
|
||||
post_crab.push(&surrogate);
|
||||
assert_eq!(post_crab.slice_encoded_bytes(..4), "🦀");
|
||||
assert_eq!(post_crab.slice_encoded_bytes(4..), surrogate);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn clone_to_uninit() {
|
||||
let a = OsStr::new("hello.txt");
|
||||
|
||||
let mut storage = vec![MaybeUninit::<u8>::uninit(); size_of_val::<OsStr>(a)];
|
||||
unsafe { a.clone_to_uninit(ptr::from_mut::<[_]>(storage.as_mut_slice()).cast()) };
|
||||
assert_eq!(a.as_encoded_bytes(), unsafe { storage.assume_init_ref() });
|
||||
|
||||
let mut b: Box<OsStr> = OsStr::new("world.exe").into();
|
||||
assert_eq!(size_of_val::<OsStr>(a), size_of_val::<OsStr>(&b));
|
||||
assert_ne!(a, &*b);
|
||||
unsafe { a.clone_to_uninit(ptr::from_mut::<OsStr>(&mut b).cast()) };
|
||||
assert_eq!(a, &*b);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn debug() {
|
||||
let s = "'single quotes'";
|
||||
assert_eq!(format!("{:?}", OsStr::new(s)), format!("{:?}", s));
|
||||
}
|
||||
3532
crates/std/src/fs.rs
3532
crates/std/src/fs.rs
File diff suppressed because it is too large
Load Diff
@@ -1,91 +0,0 @@
|
||||
//! Generic hashing support.
|
||||
//!
|
||||
//! This module provides a generic way to compute the [hash] of a value.
|
||||
//! Hashes are most commonly used with [`HashMap`] and [`HashSet`].
|
||||
//!
|
||||
//! [hash]: https://en.wikipedia.org/wiki/Hash_function
|
||||
//! [`HashMap`]: ../../std/collections/struct.HashMap.html
|
||||
//! [`HashSet`]: ../../std/collections/struct.HashSet.html
|
||||
//!
|
||||
//! The simplest way to make a type hashable is to use `#[derive(Hash)]`:
|
||||
//!
|
||||
//! # Examples
|
||||
//!
|
||||
//! ```rust
|
||||
//! use std::hash::{DefaultHasher, Hash, Hasher};
|
||||
//!
|
||||
//! #[derive(Hash)]
|
||||
//! struct Person {
|
||||
//! id: u32,
|
||||
//! name: String,
|
||||
//! phone: u64,
|
||||
//! }
|
||||
//!
|
||||
//! let person1 = Person {
|
||||
//! id: 5,
|
||||
//! name: "Janet".to_string(),
|
||||
//! phone: 555_666_7777,
|
||||
//! };
|
||||
//! let person2 = Person {
|
||||
//! id: 5,
|
||||
//! name: "Bob".to_string(),
|
||||
//! phone: 555_666_7777,
|
||||
//! };
|
||||
//!
|
||||
//! assert!(calculate_hash(&person1) != calculate_hash(&person2));
|
||||
//!
|
||||
//! fn calculate_hash<T: Hash>(t: &T) -> u64 {
|
||||
//! let mut s = DefaultHasher::new();
|
||||
//! t.hash(&mut s);
|
||||
//! s.finish()
|
||||
//! }
|
||||
//! ```
|
||||
//!
|
||||
//! If you need more control over how a value is hashed, you need to implement
|
||||
//! the [`Hash`] trait:
|
||||
//!
|
||||
//! ```rust
|
||||
//! use std::hash::{DefaultHasher, Hash, Hasher};
|
||||
//!
|
||||
//! struct Person {
|
||||
//! id: u32,
|
||||
//! # #[allow(dead_code)]
|
||||
//! name: String,
|
||||
//! phone: u64,
|
||||
//! }
|
||||
//!
|
||||
//! impl Hash for Person {
|
||||
//! fn hash<H: Hasher>(&self, state: &mut H) {
|
||||
//! self.id.hash(state);
|
||||
//! self.phone.hash(state);
|
||||
//! }
|
||||
//! }
|
||||
//!
|
||||
//! let person1 = Person {
|
||||
//! id: 5,
|
||||
//! name: "Janet".to_string(),
|
||||
//! phone: 555_666_7777,
|
||||
//! };
|
||||
//! let person2 = Person {
|
||||
//! id: 5,
|
||||
//! name: "Bob".to_string(),
|
||||
//! phone: 555_666_7777,
|
||||
//! };
|
||||
//!
|
||||
//! assert_eq!(calculate_hash(&person1), calculate_hash(&person2));
|
||||
//!
|
||||
//! fn calculate_hash<T: Hash>(t: &T) -> u64 {
|
||||
//! let mut s = DefaultHasher::new();
|
||||
//! t.hash(&mut s);
|
||||
//! s.finish()
|
||||
//! }
|
||||
//! ```
|
||||
#![stable(feature = "rust1", since = "1.0.0")]
|
||||
|
||||
pub(crate) mod random;
|
||||
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
pub use core::hash::*;
|
||||
|
||||
#[stable(feature = "std_hash_exports", since = "1.76.0")]
|
||||
pub use self::random::{DefaultHasher, RandomState};
|
||||
@@ -1,159 +0,0 @@
|
||||
//! This module exists to isolate [`RandomState`] and [`DefaultHasher`] outside of the
|
||||
//! [`collections`] module without actually publicly exporting them, so that parts of that
|
||||
//! implementation can more easily be moved to the [`alloc`] crate.
|
||||
//!
|
||||
//! Although its items are public and contain stability attributes, they can't actually be accessed
|
||||
//! outside this crate.
|
||||
//!
|
||||
//! [`collections`]: crate::collections
|
||||
|
||||
use super::{BuildHasher, Hasher, SipHasher13};
|
||||
use crate::cell::Cell;
|
||||
use crate::fmt;
|
||||
use crate::sys::random::hashmap_random_keys;
|
||||
|
||||
/// `RandomState` is the default state for [`HashMap`] types.
|
||||
///
|
||||
/// A particular instance `RandomState` will create the same instances of
|
||||
/// [`Hasher`], but the hashers created by two different `RandomState`
|
||||
/// instances are unlikely to produce the same result for the same values.
|
||||
///
|
||||
/// [`HashMap`]: crate::collections::HashMap
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use std::collections::HashMap;
|
||||
/// use std::hash::RandomState;
|
||||
///
|
||||
/// let s = RandomState::new();
|
||||
/// let mut map = HashMap::with_hasher(s);
|
||||
/// map.insert(1, 2);
|
||||
/// ```
|
||||
#[stable(feature = "hashmap_build_hasher", since = "1.7.0")]
|
||||
#[derive(Clone)]
|
||||
pub struct RandomState {
|
||||
k0: u64,
|
||||
k1: u64,
|
||||
}
|
||||
|
||||
impl RandomState {
|
||||
/// Constructs a new `RandomState` that is initialized with random keys.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use std::hash::RandomState;
|
||||
///
|
||||
/// let s = RandomState::new();
|
||||
/// ```
|
||||
#[inline]
|
||||
#[allow(deprecated)]
|
||||
// rand
|
||||
#[must_use]
|
||||
#[stable(feature = "hashmap_build_hasher", since = "1.7.0")]
|
||||
pub fn new() -> RandomState {
|
||||
// Historically this function did not cache keys from the OS and instead
|
||||
// simply always called `rand::thread_rng().gen()` twice. In #31356 it
|
||||
// was discovered, however, that because we re-seed the thread-local RNG
|
||||
// from the OS periodically that this can cause excessive slowdown when
|
||||
// many hash maps are created on a thread. To solve this performance
|
||||
// trap we cache the first set of randomly generated keys per-thread.
|
||||
//
|
||||
// Later in #36481 it was discovered that exposing a deterministic
|
||||
// iteration order allows a form of DOS attack. To counter that we
|
||||
// increment one of the seeds on every RandomState creation, giving
|
||||
// every corresponding HashMap a different iteration order.
|
||||
thread_local!(static KEYS: Cell<(u64, u64)> = {
|
||||
Cell::new(hashmap_random_keys())
|
||||
});
|
||||
|
||||
KEYS.with(|keys| {
|
||||
let (k0, k1) = keys.get();
|
||||
keys.set((k0.wrapping_add(1), k1));
|
||||
RandomState { k0, k1 }
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[stable(feature = "hashmap_build_hasher", since = "1.7.0")]
|
||||
impl BuildHasher for RandomState {
|
||||
type Hasher = DefaultHasher;
|
||||
#[inline]
|
||||
fn build_hasher(&self) -> DefaultHasher {
|
||||
DefaultHasher(SipHasher13::new_with_keys(self.k0, self.k1))
|
||||
}
|
||||
}
|
||||
|
||||
/// The default [`Hasher`] used by [`RandomState`].
|
||||
///
|
||||
/// The internal algorithm is not specified, and so it and its hashes should
|
||||
/// not be relied upon over releases.
|
||||
#[derive(Clone, Debug)]
|
||||
#[stable(feature = "hashmap_build_hasher", since = "1.7.0")]
|
||||
pub struct DefaultHasher(SipHasher13);
|
||||
|
||||
impl DefaultHasher {
|
||||
/// Creates a new `DefaultHasher`.
|
||||
///
|
||||
/// This hasher is not guaranteed to be the same as all other
|
||||
/// `DefaultHasher` instances, but is the same as all other `DefaultHasher`
|
||||
/// instances created through `new` or `default`.
|
||||
#[stable(feature = "hashmap_default_hasher", since = "1.13.0")]
|
||||
#[inline]
|
||||
#[rustc_const_unstable(feature = "const_default", issue = "143894")]
|
||||
#[must_use]
|
||||
pub const fn new() -> DefaultHasher {
|
||||
DefaultHasher(SipHasher13::new_with_keys(0, 0))
|
||||
}
|
||||
}
|
||||
|
||||
#[stable(feature = "hashmap_default_hasher", since = "1.13.0")]
|
||||
#[rustc_const_unstable(feature = "const_default", issue = "143894")]
|
||||
impl const Default for DefaultHasher {
|
||||
/// Creates a new `DefaultHasher` using [`new`].
|
||||
/// See its documentation for more.
|
||||
///
|
||||
/// [`new`]: DefaultHasher::new
|
||||
#[inline]
|
||||
fn default() -> DefaultHasher {
|
||||
DefaultHasher::new()
|
||||
}
|
||||
}
|
||||
|
||||
#[stable(feature = "hashmap_default_hasher", since = "1.13.0")]
|
||||
impl Hasher for DefaultHasher {
|
||||
// The underlying `SipHasher13` doesn't override the other
|
||||
// `write_*` methods, so it's ok not to forward them here.
|
||||
|
||||
#[inline]
|
||||
fn write(&mut self, msg: &[u8]) {
|
||||
self.0.write(msg)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn write_str(&mut self, s: &str) {
|
||||
self.0.write_str(s);
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn finish(&self) -> u64 {
|
||||
self.0.finish()
|
||||
}
|
||||
}
|
||||
|
||||
#[stable(feature = "hashmap_build_hasher", since = "1.7.0")]
|
||||
impl Default for RandomState {
|
||||
/// Constructs a new `RandomState`.
|
||||
#[inline]
|
||||
fn default() -> RandomState {
|
||||
RandomState::new()
|
||||
}
|
||||
}
|
||||
|
||||
#[stable(feature = "std_debug", since = "1.16.0")]
|
||||
impl fmt::Debug for RandomState {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.debug_struct("RandomState").finish_non_exhaustive()
|
||||
}
|
||||
}
|
||||
3017
crates/std/src/io.rs
3017
crates/std/src/io.rs
File diff suppressed because it is too large
Load Diff
@@ -1,592 +0,0 @@
|
||||
mod buffer;
|
||||
|
||||
use buffer::Buffer;
|
||||
|
||||
use crate::fmt;
|
||||
use crate::io::{
|
||||
self, BorrowedCursor, BufRead, DEFAULT_BUF_SIZE, IoSliceMut, Read, Seek, SeekFrom, SizeHint,
|
||||
SpecReadByte, uninlined_slow_read_byte,
|
||||
};
|
||||
|
||||
/// The `BufReader<R>` struct adds buffering to any reader.
|
||||
///
|
||||
/// It can be excessively inefficient to work directly with a [`Read`] instance.
|
||||
/// For example, every call to [`read`][`TcpStream::read`] on [`TcpStream`]
|
||||
/// results in a system call. A `BufReader<R>` performs large, infrequent reads on
|
||||
/// the underlying [`Read`] and maintains an in-memory buffer of the results.
|
||||
///
|
||||
/// `BufReader<R>` can improve the speed of programs that make *small* and
|
||||
/// *repeated* read calls to the same file or network socket. It does not
|
||||
/// help when reading very large amounts at once, or reading just one or a few
|
||||
/// times. It also provides no advantage when reading from a source that is
|
||||
/// already in memory, like a <code>[Vec]\<u8></code>.
|
||||
///
|
||||
/// When the `BufReader<R>` is dropped, the contents of its buffer will be
|
||||
/// discarded. Creating multiple instances of a `BufReader<R>` on the same
|
||||
/// stream can cause data loss. Reading from the underlying reader after
|
||||
/// unwrapping the `BufReader<R>` with [`BufReader::into_inner`] can also cause
|
||||
/// data loss.
|
||||
///
|
||||
/// [`TcpStream::read`]: crate::net::TcpStream::read
|
||||
/// [`TcpStream`]: crate::net::TcpStream
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```no_run
|
||||
/// use std::io::prelude::*;
|
||||
/// use std::io::BufReader;
|
||||
/// use std::fs::File;
|
||||
///
|
||||
/// fn main() -> std::io::Result<()> {
|
||||
/// let f = File::open("log.txt")?;
|
||||
/// let mut reader = BufReader::new(f);
|
||||
///
|
||||
/// let mut line = String::new();
|
||||
/// let len = reader.read_line(&mut line)?;
|
||||
/// println!("First line is {len} bytes long");
|
||||
/// Ok(())
|
||||
/// }
|
||||
/// ```
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
pub struct BufReader<R: ?Sized> {
|
||||
buf: Buffer,
|
||||
inner: R,
|
||||
}
|
||||
|
||||
impl<R: Read> BufReader<R> {
|
||||
/// Creates a new `BufReader<R>` with a default buffer capacity. The default is currently 8 KiB,
|
||||
/// but may change in the future.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```no_run
|
||||
/// use std::io::BufReader;
|
||||
/// use std::fs::File;
|
||||
///
|
||||
/// fn main() -> std::io::Result<()> {
|
||||
/// let f = File::open("log.txt")?;
|
||||
/// let reader = BufReader::new(f);
|
||||
/// Ok(())
|
||||
/// }
|
||||
/// ```
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
pub fn new(inner: R) -> BufReader<R> {
|
||||
BufReader::with_capacity(DEFAULT_BUF_SIZE, inner)
|
||||
}
|
||||
|
||||
pub(crate) fn try_new_buffer() -> io::Result<Buffer> {
|
||||
Buffer::try_with_capacity(DEFAULT_BUF_SIZE)
|
||||
}
|
||||
|
||||
pub(crate) fn with_buffer(inner: R, buf: Buffer) -> Self {
|
||||
Self { inner, buf }
|
||||
}
|
||||
|
||||
/// Creates a new `BufReader<R>` with the specified buffer capacity.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// Creating a buffer with ten bytes of capacity:
|
||||
///
|
||||
/// ```no_run
|
||||
/// use std::io::BufReader;
|
||||
/// use std::fs::File;
|
||||
///
|
||||
/// fn main() -> std::io::Result<()> {
|
||||
/// let f = File::open("log.txt")?;
|
||||
/// let reader = BufReader::with_capacity(10, f);
|
||||
/// Ok(())
|
||||
/// }
|
||||
/// ```
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
pub fn with_capacity(capacity: usize, inner: R) -> BufReader<R> {
|
||||
BufReader { inner, buf: Buffer::with_capacity(capacity) }
|
||||
}
|
||||
}
|
||||
|
||||
impl<R: Read + ?Sized> BufReader<R> {
|
||||
/// Attempt to look ahead `n` bytes.
|
||||
///
|
||||
/// `n` must be less than or equal to `capacity`.
|
||||
///
|
||||
/// The returned slice may be less than `n` bytes long if
|
||||
/// end of file is reached.
|
||||
///
|
||||
/// After calling this method, you may call [`consume`](BufRead::consume)
|
||||
/// with a value less than or equal to `n` to advance over some or all of
|
||||
/// the returned bytes.
|
||||
///
|
||||
/// ## Examples
|
||||
///
|
||||
/// ```rust
|
||||
/// #![feature(bufreader_peek)]
|
||||
/// use std::io::{Read, BufReader};
|
||||
///
|
||||
/// let mut bytes = &b"oh, hello there"[..];
|
||||
/// let mut rdr = BufReader::with_capacity(6, &mut bytes);
|
||||
/// assert_eq!(rdr.peek(2).unwrap(), b"oh");
|
||||
/// let mut buf = [0; 4];
|
||||
/// rdr.read(&mut buf[..]).unwrap();
|
||||
/// assert_eq!(&buf, b"oh, ");
|
||||
/// assert_eq!(rdr.peek(5).unwrap(), b"hello");
|
||||
/// let mut s = String::new();
|
||||
/// rdr.read_to_string(&mut s).unwrap();
|
||||
/// assert_eq!(&s, "hello there");
|
||||
/// assert_eq!(rdr.peek(1).unwrap().len(), 0);
|
||||
/// ```
|
||||
#[unstable(feature = "bufreader_peek", issue = "128405")]
|
||||
pub fn peek(&mut self, n: usize) -> io::Result<&[u8]> {
|
||||
assert!(n <= self.capacity());
|
||||
while n > self.buf.buffer().len() {
|
||||
if self.buf.pos() > 0 {
|
||||
self.buf.backshift();
|
||||
}
|
||||
let new = self.buf.read_more(&mut self.inner)?;
|
||||
if new == 0 {
|
||||
// end of file, no more bytes to read
|
||||
return Ok(&self.buf.buffer()[..]);
|
||||
}
|
||||
debug_assert_eq!(self.buf.pos(), 0);
|
||||
}
|
||||
Ok(&self.buf.buffer()[..n])
|
||||
}
|
||||
}
|
||||
|
||||
impl<R: ?Sized> BufReader<R> {
|
||||
/// Gets a reference to the underlying reader.
|
||||
///
|
||||
/// It is inadvisable to directly read from the underlying reader.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```no_run
|
||||
/// use std::io::BufReader;
|
||||
/// use std::fs::File;
|
||||
///
|
||||
/// fn main() -> std::io::Result<()> {
|
||||
/// let f1 = File::open("log.txt")?;
|
||||
/// let reader = BufReader::new(f1);
|
||||
///
|
||||
/// let f2 = reader.get_ref();
|
||||
/// Ok(())
|
||||
/// }
|
||||
/// ```
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
pub fn get_ref(&self) -> &R {
|
||||
&self.inner
|
||||
}
|
||||
|
||||
/// Gets a mutable reference to the underlying reader.
|
||||
///
|
||||
/// It is inadvisable to directly read from the underlying reader.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```no_run
|
||||
/// use std::io::BufReader;
|
||||
/// use std::fs::File;
|
||||
///
|
||||
/// fn main() -> std::io::Result<()> {
|
||||
/// let f1 = File::open("log.txt")?;
|
||||
/// let mut reader = BufReader::new(f1);
|
||||
///
|
||||
/// let f2 = reader.get_mut();
|
||||
/// Ok(())
|
||||
/// }
|
||||
/// ```
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
pub fn get_mut(&mut self) -> &mut R {
|
||||
&mut self.inner
|
||||
}
|
||||
|
||||
/// Returns a reference to the internally buffered data.
|
||||
///
|
||||
/// Unlike [`fill_buf`], this will not attempt to fill the buffer if it is empty.
|
||||
///
|
||||
/// [`fill_buf`]: BufRead::fill_buf
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```no_run
|
||||
/// use std::io::{BufReader, BufRead};
|
||||
/// use std::fs::File;
|
||||
///
|
||||
/// fn main() -> std::io::Result<()> {
|
||||
/// let f = File::open("log.txt")?;
|
||||
/// let mut reader = BufReader::new(f);
|
||||
/// assert!(reader.buffer().is_empty());
|
||||
///
|
||||
/// if reader.fill_buf()?.len() > 0 {
|
||||
/// assert!(!reader.buffer().is_empty());
|
||||
/// }
|
||||
/// Ok(())
|
||||
/// }
|
||||
/// ```
|
||||
#[stable(feature = "bufreader_buffer", since = "1.37.0")]
|
||||
pub fn buffer(&self) -> &[u8] {
|
||||
self.buf.buffer()
|
||||
}
|
||||
|
||||
/// Returns the number of bytes the internal buffer can hold at once.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```no_run
|
||||
/// use std::io::{BufReader, BufRead};
|
||||
/// use std::fs::File;
|
||||
///
|
||||
/// fn main() -> std::io::Result<()> {
|
||||
/// let f = File::open("log.txt")?;
|
||||
/// let mut reader = BufReader::new(f);
|
||||
///
|
||||
/// let capacity = reader.capacity();
|
||||
/// let buffer = reader.fill_buf()?;
|
||||
/// assert!(buffer.len() <= capacity);
|
||||
/// Ok(())
|
||||
/// }
|
||||
/// ```
|
||||
#[stable(feature = "buffered_io_capacity", since = "1.46.0")]
|
||||
pub fn capacity(&self) -> usize {
|
||||
self.buf.capacity()
|
||||
}
|
||||
|
||||
/// Unwraps this `BufReader<R>`, returning the underlying reader.
|
||||
///
|
||||
/// Note that any leftover data in the internal buffer is lost. Therefore,
|
||||
/// a following read from the underlying reader may lead to data loss.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```no_run
|
||||
/// use std::io::BufReader;
|
||||
/// use std::fs::File;
|
||||
///
|
||||
/// fn main() -> std::io::Result<()> {
|
||||
/// let f1 = File::open("log.txt")?;
|
||||
/// let reader = BufReader::new(f1);
|
||||
///
|
||||
/// let f2 = reader.into_inner();
|
||||
/// Ok(())
|
||||
/// }
|
||||
/// ```
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
pub fn into_inner(self) -> R
|
||||
where
|
||||
R: Sized,
|
||||
{
|
||||
self.inner
|
||||
}
|
||||
|
||||
/// Invalidates all data in the internal buffer.
|
||||
#[inline]
|
||||
pub(in crate::io) fn discard_buffer(&mut self) {
|
||||
self.buf.discard_buffer()
|
||||
}
|
||||
}
|
||||
|
||||
// This is only used by a test which asserts that the initialization-tracking is correct.
|
||||
#[cfg(test)]
|
||||
impl<R: ?Sized> BufReader<R> {
|
||||
#[allow(missing_docs)]
|
||||
pub fn initialized(&self) -> usize {
|
||||
self.buf.initialized()
|
||||
}
|
||||
}
|
||||
|
||||
impl<R: ?Sized + Seek> BufReader<R> {
|
||||
/// Seeks relative to the current position. If the new position lies within the buffer,
|
||||
/// the buffer will not be flushed, allowing for more efficient seeks.
|
||||
/// This method does not return the location of the underlying reader, so the caller
|
||||
/// must track this information themselves if it is required.
|
||||
#[stable(feature = "bufreader_seek_relative", since = "1.53.0")]
|
||||
pub fn seek_relative(&mut self, offset: i64) -> io::Result<()> {
|
||||
let pos = self.buf.pos() as u64;
|
||||
if offset < 0 {
|
||||
if let Some(_) = pos.checked_sub((-offset) as u64) {
|
||||
self.buf.unconsume((-offset) as usize);
|
||||
return Ok(());
|
||||
}
|
||||
} else if let Some(new_pos) = pos.checked_add(offset as u64) {
|
||||
if new_pos <= self.buf.filled() as u64 {
|
||||
self.buf.consume(offset as usize);
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
|
||||
self.seek(SeekFrom::Current(offset)).map(drop)
|
||||
}
|
||||
}
|
||||
|
||||
impl<R> SpecReadByte for BufReader<R>
|
||||
where
|
||||
Self: Read,
|
||||
{
|
||||
#[inline]
|
||||
fn spec_read_byte(&mut self) -> Option<io::Result<u8>> {
|
||||
let mut byte = 0;
|
||||
if self.buf.consume_with(1, |claimed| byte = claimed[0]) {
|
||||
return Some(Ok(byte));
|
||||
}
|
||||
|
||||
// Fallback case, only reached once per buffer refill.
|
||||
uninlined_slow_read_byte(self)
|
||||
}
|
||||
}
|
||||
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
impl<R: ?Sized + Read> Read for BufReader<R> {
|
||||
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
|
||||
// If we don't have any buffered data and we're doing a massive read
|
||||
// (larger than our internal buffer), bypass our internal buffer
|
||||
// entirely.
|
||||
if self.buf.pos() == self.buf.filled() && buf.len() >= self.capacity() {
|
||||
self.discard_buffer();
|
||||
return self.inner.read(buf);
|
||||
}
|
||||
let mut rem = self.fill_buf()?;
|
||||
let nread = rem.read(buf)?;
|
||||
self.consume(nread);
|
||||
Ok(nread)
|
||||
}
|
||||
|
||||
fn read_buf(&mut self, mut cursor: BorrowedCursor<'_>) -> io::Result<()> {
|
||||
// If we don't have any buffered data and we're doing a massive read
|
||||
// (larger than our internal buffer), bypass our internal buffer
|
||||
// entirely.
|
||||
if self.buf.pos() == self.buf.filled() && cursor.capacity() >= self.capacity() {
|
||||
self.discard_buffer();
|
||||
return self.inner.read_buf(cursor);
|
||||
}
|
||||
|
||||
let prev = cursor.written();
|
||||
|
||||
let mut rem = self.fill_buf()?;
|
||||
rem.read_buf(cursor.reborrow())?; // actually never fails
|
||||
|
||||
self.consume(cursor.written() - prev); //slice impl of read_buf known to never unfill buf
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Small read_exacts from a BufReader are extremely common when used with a deserializer.
|
||||
// The default implementation calls read in a loop, which results in surprisingly poor code
|
||||
// generation for the common path where the buffer has enough bytes to fill the passed-in
|
||||
// buffer.
|
||||
fn read_exact(&mut self, buf: &mut [u8]) -> io::Result<()> {
|
||||
if self.buf.consume_with(buf.len(), |claimed| buf.copy_from_slice(claimed)) {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
crate::io::default_read_exact(self, buf)
|
||||
}
|
||||
|
||||
fn read_buf_exact(&mut self, mut cursor: BorrowedCursor<'_>) -> io::Result<()> {
|
||||
if self.buf.consume_with(cursor.capacity(), |claimed| cursor.append(claimed)) {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
crate::io::default_read_buf_exact(self, cursor)
|
||||
}
|
||||
|
||||
fn read_vectored(&mut self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
|
||||
let total_len = bufs.iter().map(|b| b.len()).sum::<usize>();
|
||||
if self.buf.pos() == self.buf.filled() && total_len >= self.capacity() {
|
||||
self.discard_buffer();
|
||||
return self.inner.read_vectored(bufs);
|
||||
}
|
||||
let mut rem = self.fill_buf()?;
|
||||
let nread = rem.read_vectored(bufs)?;
|
||||
|
||||
self.consume(nread);
|
||||
Ok(nread)
|
||||
}
|
||||
|
||||
fn is_read_vectored(&self) -> bool {
|
||||
self.inner.is_read_vectored()
|
||||
}
|
||||
|
||||
// The inner reader might have an optimized `read_to_end`. Drain our buffer and then
|
||||
// delegate to the inner implementation.
|
||||
fn read_to_end(&mut self, buf: &mut Vec<u8>) -> io::Result<usize> {
|
||||
let inner_buf = self.buffer();
|
||||
buf.try_reserve(inner_buf.len())?;
|
||||
buf.extend_from_slice(inner_buf);
|
||||
let nread = inner_buf.len();
|
||||
self.discard_buffer();
|
||||
Ok(nread + self.inner.read_to_end(buf)?)
|
||||
}
|
||||
|
||||
// The inner reader might have an optimized `read_to_end`. Drain our buffer and then
|
||||
// delegate to the inner implementation.
|
||||
fn read_to_string(&mut self, buf: &mut String) -> io::Result<usize> {
|
||||
// In the general `else` case below we must read bytes into a side buffer, check
|
||||
// that they are valid UTF-8, and then append them to `buf`. This requires a
|
||||
// potentially large memcpy.
|
||||
//
|
||||
// If `buf` is empty--the most common case--we can leverage `append_to_string`
|
||||
// to read directly into `buf`'s internal byte buffer, saving an allocation and
|
||||
// a memcpy.
|
||||
if buf.is_empty() {
|
||||
// `append_to_string`'s safety relies on the buffer only being appended to since
|
||||
// it only checks the UTF-8 validity of new data. If there were existing content in
|
||||
// `buf` then an untrustworthy reader (i.e. `self.inner`) could not only append
|
||||
// bytes but also modify existing bytes and render them invalid. On the other hand,
|
||||
// if `buf` is empty then by definition any writes must be appends and
|
||||
// `append_to_string` will validate all of the new bytes.
|
||||
unsafe { crate::io::append_to_string(buf, |b| self.read_to_end(b)) }
|
||||
} else {
|
||||
// We cannot append our byte buffer directly onto the `buf` String as there could
|
||||
// be an incomplete UTF-8 sequence that has only been partially read. We must read
|
||||
// everything into a side buffer first and then call `from_utf8` on the complete
|
||||
// buffer.
|
||||
let mut bytes = Vec::new();
|
||||
self.read_to_end(&mut bytes)?;
|
||||
let string = crate::str::from_utf8(&bytes).map_err(|_| io::Error::INVALID_UTF8)?;
|
||||
*buf += string;
|
||||
Ok(string.len())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
impl<R: ?Sized + Read> BufRead for BufReader<R> {
|
||||
fn fill_buf(&mut self) -> io::Result<&[u8]> {
|
||||
self.buf.fill_buf(&mut self.inner)
|
||||
}
|
||||
|
||||
fn consume(&mut self, amt: usize) {
|
||||
self.buf.consume(amt)
|
||||
}
|
||||
}
|
||||
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
impl<R> fmt::Debug for BufReader<R>
|
||||
where
|
||||
R: ?Sized + fmt::Debug,
|
||||
{
|
||||
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
fmt.debug_struct("BufReader")
|
||||
.field("reader", &&self.inner)
|
||||
.field(
|
||||
"buffer",
|
||||
&format_args!("{}/{}", self.buf.filled() - self.buf.pos(), self.capacity()),
|
||||
)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
impl<R: ?Sized + Seek> Seek for BufReader<R> {
|
||||
/// Seek to an offset, in bytes, in the underlying reader.
|
||||
///
|
||||
/// The position used for seeking with <code>[SeekFrom::Current]\(_)</code> is the
|
||||
/// position the underlying reader would be at if the `BufReader<R>` had no
|
||||
/// internal buffer.
|
||||
///
|
||||
/// Seeking always discards the internal buffer, even if the seek position
|
||||
/// would otherwise fall within it. This guarantees that calling
|
||||
/// [`BufReader::into_inner()`] immediately after a seek yields the underlying reader
|
||||
/// at the same position.
|
||||
///
|
||||
/// To seek without discarding the internal buffer, use [`BufReader::seek_relative`].
|
||||
///
|
||||
/// See [`std::io::Seek`] for more details.
|
||||
///
|
||||
/// Note: In the edge case where you're seeking with <code>[SeekFrom::Current]\(n)</code>
|
||||
/// where `n` minus the internal buffer length overflows an `i64`, two
|
||||
/// seeks will be performed instead of one. If the second seek returns
|
||||
/// [`Err`], the underlying reader will be left at the same position it would
|
||||
/// have if you called `seek` with <code>[SeekFrom::Current]\(0)</code>.
|
||||
///
|
||||
/// [`std::io::Seek`]: Seek
|
||||
fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
|
||||
let result: u64;
|
||||
if let SeekFrom::Current(n) = pos {
|
||||
let remainder = (self.buf.filled() - self.buf.pos()) as i64;
|
||||
// it should be safe to assume that remainder fits within an i64 as the alternative
|
||||
// means we managed to allocate 8 exbibytes and that's absurd.
|
||||
// But it's not out of the realm of possibility for some weird underlying reader to
|
||||
// support seeking by i64::MIN so we need to handle underflow when subtracting
|
||||
// remainder.
|
||||
if let Some(offset) = n.checked_sub(remainder) {
|
||||
result = self.inner.seek(SeekFrom::Current(offset))?;
|
||||
} else {
|
||||
// seek backwards by our remainder, and then by the offset
|
||||
self.inner.seek(SeekFrom::Current(-remainder))?;
|
||||
self.discard_buffer();
|
||||
result = self.inner.seek(SeekFrom::Current(n))?;
|
||||
}
|
||||
} else {
|
||||
// Seeking with Start/End doesn't care about our buffer length.
|
||||
result = self.inner.seek(pos)?;
|
||||
}
|
||||
self.discard_buffer();
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
/// Returns the current seek position from the start of the stream.
|
||||
///
|
||||
/// The value returned is equivalent to `self.seek(SeekFrom::Current(0))`
|
||||
/// but does not flush the internal buffer. Due to this optimization the
|
||||
/// function does not guarantee that calling `.into_inner()` immediately
|
||||
/// afterwards will yield the underlying reader at the same position. Use
|
||||
/// [`BufReader::seek`] instead if you require that guarantee.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This function will panic if the position of the inner reader is smaller
|
||||
/// than the amount of buffered data. That can happen if the inner reader
|
||||
/// has an incorrect implementation of [`Seek::stream_position`], or if the
|
||||
/// position has gone out of sync due to calling [`Seek::seek`] directly on
|
||||
/// the underlying reader.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```no_run
|
||||
/// use std::{
|
||||
/// io::{self, BufRead, BufReader, Seek},
|
||||
/// fs::File,
|
||||
/// };
|
||||
///
|
||||
/// fn main() -> io::Result<()> {
|
||||
/// let mut f = BufReader::new(File::open("foo.txt")?);
|
||||
///
|
||||
/// let before = f.stream_position()?;
|
||||
/// f.read_line(&mut String::new())?;
|
||||
/// let after = f.stream_position()?;
|
||||
///
|
||||
/// println!("The first line was {} bytes long", after - before);
|
||||
/// Ok(())
|
||||
/// }
|
||||
/// ```
|
||||
fn stream_position(&mut self) -> io::Result<u64> {
|
||||
let remainder = (self.buf.filled() - self.buf.pos()) as u64;
|
||||
self.inner.stream_position().map(|pos| {
|
||||
pos.checked_sub(remainder).expect(
|
||||
"overflow when subtracting remaining buffer size from inner stream position",
|
||||
)
|
||||
})
|
||||
}
|
||||
|
||||
/// Seeks relative to the current position.
|
||||
///
|
||||
/// If the new position lies within the buffer, the buffer will not be
|
||||
/// flushed, allowing for more efficient seeks. This method does not return
|
||||
/// the location of the underlying reader, so the caller must track this
|
||||
/// information themselves if it is required.
|
||||
fn seek_relative(&mut self, offset: i64) -> io::Result<()> {
|
||||
self.seek_relative(offset)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: ?Sized> SizeHint for BufReader<T> {
|
||||
#[inline]
|
||||
fn lower_bound(&self) -> usize {
|
||||
SizeHint::lower_bound(self.get_ref()) + self.buffer().len()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn upper_bound(&self) -> Option<usize> {
|
||||
SizeHint::upper_bound(self.get_ref()).and_then(|up| self.buffer().len().checked_add(up))
|
||||
}
|
||||
}
|
||||
@@ -1,155 +0,0 @@
|
||||
//! An encapsulation of `BufReader`'s buffer management logic.
|
||||
//!
|
||||
//! This module factors out the basic functionality of `BufReader` in order to protect two core
|
||||
//! invariants:
|
||||
//! * `filled` bytes of `buf` are always initialized
|
||||
//! * `pos` is always <= `filled`
|
||||
//! Since this module encapsulates the buffer management logic, we can ensure that the range
|
||||
//! `pos..filled` is always a valid index into the initialized region of the buffer. This means
|
||||
//! that user code which wants to do reads from a `BufReader` via `buffer` + `consume` can do so
|
||||
//! without encountering any runtime bounds checks.
|
||||
|
||||
use crate::cmp;
|
||||
use crate::io::{self, BorrowedBuf, ErrorKind, Read};
|
||||
use crate::mem::MaybeUninit;
|
||||
|
||||
pub struct Buffer {
|
||||
// The buffer.
|
||||
buf: Box<[MaybeUninit<u8>]>,
|
||||
// The current seek offset into `buf`, must always be <= `filled`.
|
||||
pos: usize,
|
||||
// Each call to `fill_buf` sets `filled` to indicate how many bytes at the start of `buf` are
|
||||
// initialized with bytes from a read.
|
||||
filled: usize,
|
||||
// This is the max number of bytes returned across all `fill_buf` calls. We track this so that we
|
||||
// can accurately tell `read_buf` how many bytes of buf are initialized, to bypass as much of its
|
||||
// defensive initialization as possible. Note that while this often the same as `filled`, it
|
||||
// doesn't need to be. Calls to `fill_buf` are not required to actually fill the buffer, and
|
||||
// omitting this is a huge perf regression for `Read` impls that do not.
|
||||
initialized: usize,
|
||||
}
|
||||
|
||||
impl Buffer {
|
||||
#[inline]
|
||||
pub fn with_capacity(capacity: usize) -> Self {
|
||||
let buf = Box::new_uninit_slice(capacity);
|
||||
Self { buf, pos: 0, filled: 0, initialized: 0 }
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn try_with_capacity(capacity: usize) -> io::Result<Self> {
|
||||
match Box::try_new_uninit_slice(capacity) {
|
||||
Ok(buf) => Ok(Self { buf, pos: 0, filled: 0, initialized: 0 }),
|
||||
Err(_) => {
|
||||
Err(io::const_error!(ErrorKind::OutOfMemory, "failed to allocate read buffer"))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn buffer(&self) -> &[u8] {
|
||||
// SAFETY: self.pos and self.filled are valid, and self.filled >= self.pos, and
|
||||
// that region is initialized because those are all invariants of this type.
|
||||
unsafe { self.buf.get_unchecked(self.pos..self.filled).assume_init_ref() }
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn capacity(&self) -> usize {
|
||||
self.buf.len()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn filled(&self) -> usize {
|
||||
self.filled
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn pos(&self) -> usize {
|
||||
self.pos
|
||||
}
|
||||
|
||||
// This is only used by a test which asserts that the initialization-tracking is correct.
|
||||
#[cfg(test)]
|
||||
pub fn initialized(&self) -> usize {
|
||||
self.initialized
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn discard_buffer(&mut self) {
|
||||
self.pos = 0;
|
||||
self.filled = 0;
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn consume(&mut self, amt: usize) {
|
||||
self.pos = cmp::min(self.pos + amt, self.filled);
|
||||
}
|
||||
|
||||
/// If there are `amt` bytes available in the buffer, pass a slice containing those bytes to
|
||||
/// `visitor` and return true. If there are not enough bytes available, return false.
|
||||
#[inline]
|
||||
pub fn consume_with<V>(&mut self, amt: usize, mut visitor: V) -> bool
|
||||
where
|
||||
V: FnMut(&[u8]),
|
||||
{
|
||||
if let Some(claimed) = self.buffer().get(..amt) {
|
||||
visitor(claimed);
|
||||
// If the indexing into self.buffer() succeeds, amt must be a valid increment.
|
||||
self.pos += amt;
|
||||
true
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn unconsume(&mut self, amt: usize) {
|
||||
self.pos = self.pos.saturating_sub(amt);
|
||||
}
|
||||
|
||||
/// Read more bytes into the buffer without discarding any of its contents
|
||||
pub fn read_more(&mut self, mut reader: impl Read) -> io::Result<usize> {
|
||||
let mut buf = BorrowedBuf::from(&mut self.buf[self.filled..]);
|
||||
let old_init = self.initialized - self.filled;
|
||||
unsafe {
|
||||
buf.set_init(old_init);
|
||||
}
|
||||
reader.read_buf(buf.unfilled())?;
|
||||
self.filled += buf.len();
|
||||
self.initialized += buf.init_len() - old_init;
|
||||
Ok(buf.len())
|
||||
}
|
||||
|
||||
/// Remove bytes that have already been read from the buffer.
|
||||
pub fn backshift(&mut self) {
|
||||
self.buf.copy_within(self.pos..self.filled, 0);
|
||||
self.filled -= self.pos;
|
||||
self.pos = 0;
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn fill_buf(&mut self, mut reader: impl Read) -> io::Result<&[u8]> {
|
||||
// If we've reached the end of our internal buffer then we need to fetch
|
||||
// some more data from the reader.
|
||||
// Branch using `>=` instead of the more correct `==`
|
||||
// to tell the compiler that the pos..cap slice is always valid.
|
||||
if self.pos >= self.filled {
|
||||
debug_assert!(self.pos == self.filled);
|
||||
|
||||
let mut buf = BorrowedBuf::from(&mut *self.buf);
|
||||
// SAFETY: `self.filled` bytes will always have been initialized.
|
||||
unsafe {
|
||||
buf.set_init(self.initialized);
|
||||
}
|
||||
|
||||
let result = reader.read_buf(buf.unfilled());
|
||||
|
||||
self.pos = 0;
|
||||
self.filled = buf.len();
|
||||
self.initialized = buf.init_len();
|
||||
|
||||
result?;
|
||||
}
|
||||
Ok(self.buffer())
|
||||
}
|
||||
}
|
||||
@@ -1,680 +0,0 @@
|
||||
use crate::io::{
|
||||
self, DEFAULT_BUF_SIZE, ErrorKind, IntoInnerError, IoSlice, Seek, SeekFrom, Write,
|
||||
};
|
||||
use crate::mem::{self, ManuallyDrop};
|
||||
use crate::{error, fmt, ptr};
|
||||
|
||||
/// Wraps a writer and buffers its output.
|
||||
///
|
||||
/// It can be excessively inefficient to work directly with something that
|
||||
/// implements [`Write`]. For example, every call to
|
||||
/// [`write`][`TcpStream::write`] on [`TcpStream`] results in a system call. A
|
||||
/// `BufWriter<W>` keeps an in-memory buffer of data and writes it to an underlying
|
||||
/// writer in large, infrequent batches.
|
||||
///
|
||||
/// `BufWriter<W>` can improve the speed of programs that make *small* and
|
||||
/// *repeated* write calls to the same file or network socket. It does not
|
||||
/// help when writing very large amounts at once, or writing just one or a few
|
||||
/// times. It also provides no advantage when writing to a destination that is
|
||||
/// in memory, like a <code>[Vec]\<u8></code>.
|
||||
///
|
||||
/// It is critical to call [`flush`] before `BufWriter<W>` is dropped. Though
|
||||
/// dropping will attempt to flush the contents of the buffer, any errors
|
||||
/// that happen in the process of dropping will be ignored. Calling [`flush`]
|
||||
/// ensures that the buffer is empty and thus dropping will not even attempt
|
||||
/// file operations.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// Let's write the numbers one through ten to a [`TcpStream`]:
|
||||
///
|
||||
/// ```no_run
|
||||
/// use std::io::prelude::*;
|
||||
/// use std::net::TcpStream;
|
||||
///
|
||||
/// let mut stream = TcpStream::connect("127.0.0.1:34254").unwrap();
|
||||
///
|
||||
/// for i in 0..10 {
|
||||
/// stream.write(&[i+1]).unwrap();
|
||||
/// }
|
||||
/// ```
|
||||
///
|
||||
/// Because we're not buffering, we write each one in turn, incurring the
|
||||
/// overhead of a system call per byte written. We can fix this with a
|
||||
/// `BufWriter<W>`:
|
||||
///
|
||||
/// ```no_run
|
||||
/// use std::io::prelude::*;
|
||||
/// use std::io::BufWriter;
|
||||
/// use std::net::TcpStream;
|
||||
///
|
||||
/// let mut stream = BufWriter::new(TcpStream::connect("127.0.0.1:34254").unwrap());
|
||||
///
|
||||
/// for i in 0..10 {
|
||||
/// stream.write(&[i+1]).unwrap();
|
||||
/// }
|
||||
/// stream.flush().unwrap();
|
||||
/// ```
|
||||
///
|
||||
/// By wrapping the stream with a `BufWriter<W>`, these ten writes are all grouped
|
||||
/// together by the buffer and will all be written out in one system call when
|
||||
/// the `stream` is flushed.
|
||||
///
|
||||
/// [`TcpStream::write`]: crate::net::TcpStream::write
|
||||
/// [`TcpStream`]: crate::net::TcpStream
|
||||
/// [`flush`]: BufWriter::flush
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
pub struct BufWriter<W: ?Sized + Write> {
|
||||
// The buffer. Avoid using this like a normal `Vec` in common code paths.
|
||||
// That is, don't use `buf.push`, `buf.extend_from_slice`, or any other
|
||||
// methods that require bounds checking or the like. This makes an enormous
|
||||
// difference to performance (we may want to stop using a `Vec` entirely).
|
||||
buf: Vec<u8>,
|
||||
// #30888: If the inner writer panics in a call to write, we don't want to
|
||||
// write the buffered data a second time in BufWriter's destructor. This
|
||||
// flag tells the Drop impl if it should skip the flush.
|
||||
panicked: bool,
|
||||
inner: W,
|
||||
}
|
||||
|
||||
impl<W: Write> BufWriter<W> {
|
||||
/// Creates a new `BufWriter<W>` with a default buffer capacity. The default is currently 8 KiB,
|
||||
/// but may change in the future.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```no_run
|
||||
/// use std::io::BufWriter;
|
||||
/// use std::net::TcpStream;
|
||||
///
|
||||
/// let mut buffer = BufWriter::new(TcpStream::connect("127.0.0.1:34254").unwrap());
|
||||
/// ```
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
pub fn new(inner: W) -> BufWriter<W> {
|
||||
BufWriter::with_capacity(DEFAULT_BUF_SIZE, inner)
|
||||
}
|
||||
|
||||
pub(crate) fn try_new_buffer() -> io::Result<Vec<u8>> {
|
||||
Vec::try_with_capacity(DEFAULT_BUF_SIZE).map_err(|_| {
|
||||
io::const_error!(ErrorKind::OutOfMemory, "failed to allocate write buffer")
|
||||
})
|
||||
}
|
||||
|
||||
pub(crate) fn with_buffer(inner: W, buf: Vec<u8>) -> Self {
|
||||
Self { inner, buf, panicked: false }
|
||||
}
|
||||
|
||||
/// Creates a new `BufWriter<W>` with at least the specified buffer capacity.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// Creating a buffer with a buffer of at least a hundred bytes.
|
||||
///
|
||||
/// ```no_run
|
||||
/// use std::io::BufWriter;
|
||||
/// use std::net::TcpStream;
|
||||
///
|
||||
/// let stream = TcpStream::connect("127.0.0.1:34254").unwrap();
|
||||
/// let mut buffer = BufWriter::with_capacity(100, stream);
|
||||
/// ```
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
pub fn with_capacity(capacity: usize, inner: W) -> BufWriter<W> {
|
||||
BufWriter { inner, buf: Vec::with_capacity(capacity), panicked: false }
|
||||
}
|
||||
|
||||
/// Unwraps this `BufWriter<W>`, returning the underlying writer.
|
||||
///
|
||||
/// The buffer is written out before returning the writer.
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// An [`Err`] will be returned if an error occurs while flushing the buffer.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```no_run
|
||||
/// use std::io::BufWriter;
|
||||
/// use std::net::TcpStream;
|
||||
///
|
||||
/// let mut buffer = BufWriter::new(TcpStream::connect("127.0.0.1:34254").unwrap());
|
||||
///
|
||||
/// // unwrap the TcpStream and flush the buffer
|
||||
/// let stream = buffer.into_inner().unwrap();
|
||||
/// ```
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
pub fn into_inner(mut self) -> Result<W, IntoInnerError<BufWriter<W>>> {
|
||||
match self.flush_buf() {
|
||||
Err(e) => Err(IntoInnerError::new(self, e)),
|
||||
Ok(()) => Ok(self.into_parts().0),
|
||||
}
|
||||
}
|
||||
|
||||
/// Disassembles this `BufWriter<W>`, returning the underlying writer, and any buffered but
|
||||
/// unwritten data.
|
||||
///
|
||||
/// If the underlying writer panicked, it is not known what portion of the data was written.
|
||||
/// In this case, we return `WriterPanicked` for the buffered data (from which the buffer
|
||||
/// contents can still be recovered).
|
||||
///
|
||||
/// `into_parts` makes no attempt to flush data and cannot fail.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use std::io::{BufWriter, Write};
|
||||
///
|
||||
/// let mut buffer = [0u8; 10];
|
||||
/// let mut stream = BufWriter::new(buffer.as_mut());
|
||||
/// write!(stream, "too much data").unwrap();
|
||||
/// stream.flush().expect_err("it doesn't fit");
|
||||
/// let (recovered_writer, buffered_data) = stream.into_parts();
|
||||
/// assert_eq!(recovered_writer.len(), 0);
|
||||
/// assert_eq!(&buffered_data.unwrap(), b"ata");
|
||||
/// ```
|
||||
#[stable(feature = "bufwriter_into_parts", since = "1.56.0")]
|
||||
pub fn into_parts(self) -> (W, Result<Vec<u8>, WriterPanicked>) {
|
||||
let mut this = ManuallyDrop::new(self);
|
||||
let buf = mem::take(&mut this.buf);
|
||||
let buf = if !this.panicked { Ok(buf) } else { Err(WriterPanicked { buf }) };
|
||||
|
||||
// SAFETY: double-drops are prevented by putting `this` in a ManuallyDrop that is never dropped
|
||||
let inner = unsafe { ptr::read(&this.inner) };
|
||||
|
||||
(inner, buf)
|
||||
}
|
||||
}
|
||||
|
||||
impl<W: ?Sized + Write> BufWriter<W> {
|
||||
/// Send data in our local buffer into the inner writer, looping as
|
||||
/// necessary until either it's all been sent or an error occurs.
|
||||
///
|
||||
/// Because all the data in the buffer has been reported to our owner as
|
||||
/// "successfully written" (by returning nonzero success values from
|
||||
/// `write`), any 0-length writes from `inner` must be reported as i/o
|
||||
/// errors from this method.
|
||||
pub(in crate::io) fn flush_buf(&mut self) -> io::Result<()> {
|
||||
/// Helper struct to ensure the buffer is updated after all the writes
|
||||
/// are complete. It tracks the number of written bytes and drains them
|
||||
/// all from the front of the buffer when dropped.
|
||||
struct BufGuard<'a> {
|
||||
buffer: &'a mut Vec<u8>,
|
||||
written: usize,
|
||||
}
|
||||
|
||||
impl<'a> BufGuard<'a> {
|
||||
fn new(buffer: &'a mut Vec<u8>) -> Self {
|
||||
Self { buffer, written: 0 }
|
||||
}
|
||||
|
||||
/// The unwritten part of the buffer
|
||||
fn remaining(&self) -> &[u8] {
|
||||
&self.buffer[self.written..]
|
||||
}
|
||||
|
||||
/// Flag some bytes as removed from the front of the buffer
|
||||
fn consume(&mut self, amt: usize) {
|
||||
self.written += amt;
|
||||
}
|
||||
|
||||
/// true if all of the bytes have been written
|
||||
fn done(&self) -> bool {
|
||||
self.written >= self.buffer.len()
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for BufGuard<'_> {
|
||||
fn drop(&mut self) {
|
||||
if self.written > 0 {
|
||||
self.buffer.drain(..self.written);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let mut guard = BufGuard::new(&mut self.buf);
|
||||
while !guard.done() {
|
||||
self.panicked = true;
|
||||
let r = self.inner.write(guard.remaining());
|
||||
self.panicked = false;
|
||||
|
||||
match r {
|
||||
Ok(0) => {
|
||||
return Err(io::const_error!(
|
||||
ErrorKind::WriteZero,
|
||||
"failed to write the buffered data",
|
||||
));
|
||||
}
|
||||
Ok(n) => guard.consume(n),
|
||||
Err(ref e) if e.is_interrupted() => {}
|
||||
Err(e) => return Err(e),
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Buffer some data without flushing it, regardless of the size of the
|
||||
/// data. Writes as much as possible without exceeding capacity. Returns
|
||||
/// the number of bytes written.
|
||||
pub(super) fn write_to_buf(&mut self, buf: &[u8]) -> usize {
|
||||
let available = self.spare_capacity();
|
||||
let amt_to_buffer = available.min(buf.len());
|
||||
|
||||
// SAFETY: `amt_to_buffer` is <= buffer's spare capacity by construction.
|
||||
unsafe {
|
||||
self.write_to_buffer_unchecked(&buf[..amt_to_buffer]);
|
||||
}
|
||||
|
||||
amt_to_buffer
|
||||
}
|
||||
|
||||
/// Gets a reference to the underlying writer.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```no_run
|
||||
/// use std::io::BufWriter;
|
||||
/// use std::net::TcpStream;
|
||||
///
|
||||
/// let mut buffer = BufWriter::new(TcpStream::connect("127.0.0.1:34254").unwrap());
|
||||
///
|
||||
/// // we can use reference just like buffer
|
||||
/// let reference = buffer.get_ref();
|
||||
/// ```
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
pub fn get_ref(&self) -> &W {
|
||||
&self.inner
|
||||
}
|
||||
|
||||
/// Gets a mutable reference to the underlying writer.
|
||||
///
|
||||
/// It is inadvisable to directly write to the underlying writer.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```no_run
|
||||
/// use std::io::BufWriter;
|
||||
/// use std::net::TcpStream;
|
||||
///
|
||||
/// let mut buffer = BufWriter::new(TcpStream::connect("127.0.0.1:34254").unwrap());
|
||||
///
|
||||
/// // we can use reference just like buffer
|
||||
/// let reference = buffer.get_mut();
|
||||
/// ```
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
pub fn get_mut(&mut self) -> &mut W {
|
||||
&mut self.inner
|
||||
}
|
||||
|
||||
/// Returns a reference to the internally buffered data.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```no_run
|
||||
/// use std::io::BufWriter;
|
||||
/// use std::net::TcpStream;
|
||||
///
|
||||
/// let buf_writer = BufWriter::new(TcpStream::connect("127.0.0.1:34254").unwrap());
|
||||
///
|
||||
/// // See how many bytes are currently buffered
|
||||
/// let bytes_buffered = buf_writer.buffer().len();
|
||||
/// ```
|
||||
#[stable(feature = "bufreader_buffer", since = "1.37.0")]
|
||||
pub fn buffer(&self) -> &[u8] {
|
||||
&self.buf
|
||||
}
|
||||
|
||||
/// Returns a mutable reference to the internal buffer.
|
||||
///
|
||||
/// This can be used to write data directly into the buffer without triggering writers
|
||||
/// to the underlying writer.
|
||||
///
|
||||
/// That the buffer is a `Vec` is an implementation detail.
|
||||
/// Callers should not modify the capacity as there currently is no public API to do so
|
||||
/// and thus any capacity changes would be unexpected by the user.
|
||||
pub(in crate::io) fn buffer_mut(&mut self) -> &mut Vec<u8> {
|
||||
&mut self.buf
|
||||
}
|
||||
|
||||
/// Returns the number of bytes the internal buffer can hold without flushing.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```no_run
|
||||
/// use std::io::BufWriter;
|
||||
/// use std::net::TcpStream;
|
||||
///
|
||||
/// let buf_writer = BufWriter::new(TcpStream::connect("127.0.0.1:34254").unwrap());
|
||||
///
|
||||
/// // Check the capacity of the inner buffer
|
||||
/// let capacity = buf_writer.capacity();
|
||||
/// // Calculate how many bytes can be written without flushing
|
||||
/// let without_flush = capacity - buf_writer.buffer().len();
|
||||
/// ```
|
||||
#[stable(feature = "buffered_io_capacity", since = "1.46.0")]
|
||||
pub fn capacity(&self) -> usize {
|
||||
self.buf.capacity()
|
||||
}
|
||||
|
||||
// Ensure this function does not get inlined into `write`, so that it
|
||||
// remains inlineable and its common path remains as short as possible.
|
||||
// If this function ends up being called frequently relative to `write`,
|
||||
// it's likely a sign that the client is using an improperly sized buffer
|
||||
// or their write patterns are somewhat pathological.
|
||||
#[cold]
|
||||
#[inline(never)]
|
||||
fn write_cold(&mut self, buf: &[u8]) -> io::Result<usize> {
|
||||
if buf.len() > self.spare_capacity() {
|
||||
self.flush_buf()?;
|
||||
}
|
||||
|
||||
// Why not len > capacity? To avoid a needless trip through the buffer when the input
|
||||
// exactly fills it. We'd just need to flush it to the underlying writer anyway.
|
||||
if buf.len() >= self.buf.capacity() {
|
||||
self.panicked = true;
|
||||
let r = self.get_mut().write(buf);
|
||||
self.panicked = false;
|
||||
r
|
||||
} else {
|
||||
// Write to the buffer. In this case, we write to the buffer even if it fills it
|
||||
// exactly. Doing otherwise would mean flushing the buffer, then writing this
|
||||
// input to the inner writer, which in many cases would be a worse strategy.
|
||||
|
||||
// SAFETY: There was either enough spare capacity already, or there wasn't and we
|
||||
// flushed the buffer to ensure that there is. In the latter case, we know that there
|
||||
// is because flushing ensured that our entire buffer is spare capacity, and we entered
|
||||
// this block because the input buffer length is less than that capacity. In either
|
||||
// case, it's safe to write the input buffer to our buffer.
|
||||
unsafe {
|
||||
self.write_to_buffer_unchecked(buf);
|
||||
}
|
||||
|
||||
Ok(buf.len())
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure this function does not get inlined into `write_all`, so that it
|
||||
// remains inlineable and its common path remains as short as possible.
|
||||
// If this function ends up being called frequently relative to `write_all`,
|
||||
// it's likely a sign that the client is using an improperly sized buffer
|
||||
// or their write patterns are somewhat pathological.
|
||||
#[cold]
|
||||
#[inline(never)]
|
||||
fn write_all_cold(&mut self, buf: &[u8]) -> io::Result<()> {
|
||||
// Normally, `write_all` just calls `write` in a loop. We can do better
|
||||
// by calling `self.get_mut().write_all()` directly, which avoids
|
||||
// round trips through the buffer in the event of a series of partial
|
||||
// writes in some circumstances.
|
||||
|
||||
if buf.len() > self.spare_capacity() {
|
||||
self.flush_buf()?;
|
||||
}
|
||||
|
||||
// Why not len > capacity? To avoid a needless trip through the buffer when the input
|
||||
// exactly fills it. We'd just need to flush it to the underlying writer anyway.
|
||||
if buf.len() >= self.buf.capacity() {
|
||||
self.panicked = true;
|
||||
let r = self.get_mut().write_all(buf);
|
||||
self.panicked = false;
|
||||
r
|
||||
} else {
|
||||
// Write to the buffer. In this case, we write to the buffer even if it fills it
|
||||
// exactly. Doing otherwise would mean flushing the buffer, then writing this
|
||||
// input to the inner writer, which in many cases would be a worse strategy.
|
||||
|
||||
// SAFETY: There was either enough spare capacity already, or there wasn't and we
|
||||
// flushed the buffer to ensure that there is. In the latter case, we know that there
|
||||
// is because flushing ensured that our entire buffer is spare capacity, and we entered
|
||||
// this block because the input buffer length is less than that capacity. In either
|
||||
// case, it's safe to write the input buffer to our buffer.
|
||||
unsafe {
|
||||
self.write_to_buffer_unchecked(buf);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
// SAFETY: Requires `buf.len() <= self.buf.capacity() - self.buf.len()`,
|
||||
// i.e., that input buffer length is less than or equal to spare capacity.
|
||||
#[inline]
|
||||
unsafe fn write_to_buffer_unchecked(&mut self, buf: &[u8]) {
|
||||
debug_assert!(buf.len() <= self.spare_capacity());
|
||||
let old_len = self.buf.len();
|
||||
let buf_len = buf.len();
|
||||
let src = buf.as_ptr();
|
||||
unsafe {
|
||||
let dst = self.buf.as_mut_ptr().add(old_len);
|
||||
ptr::copy_nonoverlapping(src, dst, buf_len);
|
||||
self.buf.set_len(old_len + buf_len);
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn spare_capacity(&self) -> usize {
|
||||
self.buf.capacity() - self.buf.len()
|
||||
}
|
||||
}
|
||||
|
||||
#[stable(feature = "bufwriter_into_parts", since = "1.56.0")]
|
||||
/// Error returned for the buffered data from `BufWriter::into_parts`, when the underlying
|
||||
/// writer has previously panicked. Contains the (possibly partly written) buffered data.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```
|
||||
/// use std::io::{self, BufWriter, Write};
|
||||
/// use std::panic::{catch_unwind, AssertUnwindSafe};
|
||||
///
|
||||
/// struct PanickingWriter;
|
||||
/// impl Write for PanickingWriter {
|
||||
/// fn write(&mut self, buf: &[u8]) -> io::Result<usize> { panic!() }
|
||||
/// fn flush(&mut self) -> io::Result<()> { panic!() }
|
||||
/// }
|
||||
///
|
||||
/// let mut stream = BufWriter::new(PanickingWriter);
|
||||
/// write!(stream, "some data").unwrap();
|
||||
/// let result = catch_unwind(AssertUnwindSafe(|| {
|
||||
/// stream.flush().unwrap()
|
||||
/// }));
|
||||
/// assert!(result.is_err());
|
||||
/// let (recovered_writer, buffered_data) = stream.into_parts();
|
||||
/// assert!(matches!(recovered_writer, PanickingWriter));
|
||||
/// assert_eq!(buffered_data.unwrap_err().into_inner(), b"some data");
|
||||
/// ```
|
||||
pub struct WriterPanicked {
|
||||
buf: Vec<u8>,
|
||||
}
|
||||
|
||||
impl WriterPanicked {
|
||||
/// Returns the perhaps-unwritten data. Some of this data may have been written by the
|
||||
/// panicking call(s) to the underlying writer, so simply writing it again is not a good idea.
|
||||
#[must_use = "`self` will be dropped if the result is not used"]
|
||||
#[stable(feature = "bufwriter_into_parts", since = "1.56.0")]
|
||||
pub fn into_inner(self) -> Vec<u8> {
|
||||
self.buf
|
||||
}
|
||||
}
|
||||
|
||||
#[stable(feature = "bufwriter_into_parts", since = "1.56.0")]
|
||||
impl error::Error for WriterPanicked {}
|
||||
|
||||
#[stable(feature = "bufwriter_into_parts", since = "1.56.0")]
|
||||
impl fmt::Display for WriterPanicked {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
"BufWriter inner writer panicked, what data remains unwritten is not known".fmt(f)
|
||||
}
|
||||
}
|
||||
|
||||
#[stable(feature = "bufwriter_into_parts", since = "1.56.0")]
|
||||
impl fmt::Debug for WriterPanicked {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.debug_struct("WriterPanicked")
|
||||
.field("buffer", &format_args!("{}/{}", self.buf.len(), self.buf.capacity()))
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
impl<W: ?Sized + Write> Write for BufWriter<W> {
|
||||
#[inline]
|
||||
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
|
||||
// Use < instead of <= to avoid a needless trip through the buffer in some cases.
|
||||
// See `write_cold` for details.
|
||||
if buf.len() < self.spare_capacity() {
|
||||
// SAFETY: safe by above conditional.
|
||||
unsafe {
|
||||
self.write_to_buffer_unchecked(buf);
|
||||
}
|
||||
|
||||
Ok(buf.len())
|
||||
} else {
|
||||
self.write_cold(buf)
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn write_all(&mut self, buf: &[u8]) -> io::Result<()> {
|
||||
// Use < instead of <= to avoid a needless trip through the buffer in some cases.
|
||||
// See `write_all_cold` for details.
|
||||
if buf.len() < self.spare_capacity() {
|
||||
// SAFETY: safe by above conditional.
|
||||
unsafe {
|
||||
self.write_to_buffer_unchecked(buf);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
} else {
|
||||
self.write_all_cold(buf)
|
||||
}
|
||||
}
|
||||
|
||||
fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
|
||||
// FIXME: Consider applying `#[inline]` / `#[inline(never)]` optimizations already applied
|
||||
// to `write` and `write_all`. The performance benefits can be significant. See #79930.
|
||||
if self.get_ref().is_write_vectored() {
|
||||
// We have to handle the possibility that the total length of the buffers overflows
|
||||
// `usize` (even though this can only happen if multiple `IoSlice`s reference the
|
||||
// same underlying buffer, as otherwise the buffers wouldn't fit in memory). If the
|
||||
// computation overflows, then surely the input cannot fit in our buffer, so we forward
|
||||
// to the inner writer's `write_vectored` method to let it handle it appropriately.
|
||||
let mut saturated_total_len: usize = 0;
|
||||
|
||||
for buf in bufs {
|
||||
saturated_total_len = saturated_total_len.saturating_add(buf.len());
|
||||
|
||||
if saturated_total_len > self.spare_capacity() && !self.buf.is_empty() {
|
||||
// Flush if the total length of the input exceeds our buffer's spare capacity.
|
||||
// If we would have overflowed, this condition also holds, and we need to flush.
|
||||
self.flush_buf()?;
|
||||
}
|
||||
|
||||
if saturated_total_len >= self.buf.capacity() {
|
||||
// Forward to our inner writer if the total length of the input is greater than or
|
||||
// equal to our buffer capacity. If we would have overflowed, this condition also
|
||||
// holds, and we punt to the inner writer.
|
||||
self.panicked = true;
|
||||
let r = self.get_mut().write_vectored(bufs);
|
||||
self.panicked = false;
|
||||
return r;
|
||||
}
|
||||
}
|
||||
|
||||
// `saturated_total_len < self.buf.capacity()` implies that we did not saturate.
|
||||
|
||||
// SAFETY: We checked whether or not the spare capacity was large enough above. If
|
||||
// it was, then we're safe already. If it wasn't, we flushed, making sufficient
|
||||
// room for any input <= the buffer size, which includes this input.
|
||||
unsafe {
|
||||
bufs.iter().for_each(|b| self.write_to_buffer_unchecked(b));
|
||||
};
|
||||
|
||||
Ok(saturated_total_len)
|
||||
} else {
|
||||
let mut iter = bufs.iter();
|
||||
let mut total_written = if let Some(buf) = iter.by_ref().find(|&buf| !buf.is_empty()) {
|
||||
// This is the first non-empty slice to write, so if it does
|
||||
// not fit in the buffer, we still get to flush and proceed.
|
||||
if buf.len() > self.spare_capacity() {
|
||||
self.flush_buf()?;
|
||||
}
|
||||
if buf.len() >= self.buf.capacity() {
|
||||
// The slice is at least as large as the buffering capacity,
|
||||
// so it's better to write it directly, bypassing the buffer.
|
||||
self.panicked = true;
|
||||
let r = self.get_mut().write(buf);
|
||||
self.panicked = false;
|
||||
return r;
|
||||
} else {
|
||||
// SAFETY: We checked whether or not the spare capacity was large enough above.
|
||||
// If it was, then we're safe already. If it wasn't, we flushed, making
|
||||
// sufficient room for any input <= the buffer size, which includes this input.
|
||||
unsafe {
|
||||
self.write_to_buffer_unchecked(buf);
|
||||
}
|
||||
|
||||
buf.len()
|
||||
}
|
||||
} else {
|
||||
return Ok(0);
|
||||
};
|
||||
debug_assert!(total_written != 0);
|
||||
for buf in iter {
|
||||
if buf.len() <= self.spare_capacity() {
|
||||
// SAFETY: safe by above conditional.
|
||||
unsafe {
|
||||
self.write_to_buffer_unchecked(buf);
|
||||
}
|
||||
|
||||
// This cannot overflow `usize`. If we are here, we've written all of the bytes
|
||||
// so far to our buffer, and we've ensured that we never exceed the buffer's
|
||||
// capacity. Therefore, `total_written` <= `self.buf.capacity()` <= `usize::MAX`.
|
||||
total_written += buf.len();
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
Ok(total_written)
|
||||
}
|
||||
}
|
||||
|
||||
fn is_write_vectored(&self) -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
fn flush(&mut self) -> io::Result<()> {
|
||||
self.flush_buf().and_then(|()| self.get_mut().flush())
|
||||
}
|
||||
}
|
||||
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
impl<W: ?Sized + Write> fmt::Debug for BufWriter<W>
|
||||
where
|
||||
W: fmt::Debug,
|
||||
{
|
||||
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
fmt.debug_struct("BufWriter")
|
||||
.field("writer", &&self.inner)
|
||||
.field("buffer", &format_args!("{}/{}", self.buf.len(), self.buf.capacity()))
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
impl<W: ?Sized + Write + Seek> Seek for BufWriter<W> {
|
||||
/// Seek to the offset, in bytes, in the underlying writer.
|
||||
///
|
||||
/// Seeking always writes out the internal buffer before seeking.
|
||||
fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
|
||||
self.flush_buf()?;
|
||||
self.get_mut().seek(pos)
|
||||
}
|
||||
}
|
||||
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
impl<W: ?Sized + Write> Drop for BufWriter<W> {
|
||||
fn drop(&mut self) {
|
||||
if !self.panicked {
|
||||
// dtors should not panic, so we ignore a failed flush
|
||||
let _r = self.flush_buf();
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,235 +0,0 @@
|
||||
use crate::fmt;
|
||||
use crate::io::buffered::LineWriterShim;
|
||||
use crate::io::{self, BufWriter, IntoInnerError, IoSlice, Write};
|
||||
|
||||
/// Wraps a writer and buffers output to it, flushing whenever a newline
|
||||
/// (`0x0a`, `'\n'`) is detected.
|
||||
///
|
||||
/// The [`BufWriter`] struct wraps a writer and buffers its output.
|
||||
/// But it only does this batched write when it goes out of scope, or when the
|
||||
/// internal buffer is full. Sometimes, you'd prefer to write each line as it's
|
||||
/// completed, rather than the entire buffer at once. Enter `LineWriter`. It
|
||||
/// does exactly that.
|
||||
///
|
||||
/// Like [`BufWriter`], a `LineWriter`’s buffer will also be flushed when the
|
||||
/// `LineWriter` goes out of scope or when its internal buffer is full.
|
||||
///
|
||||
/// If there's still a partial line in the buffer when the `LineWriter` is
|
||||
/// dropped, it will flush those contents.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// We can use `LineWriter` to write one line at a time, significantly
|
||||
/// reducing the number of actual writes to the file.
|
||||
///
|
||||
/// ```no_run
|
||||
/// use std::fs::{self, File};
|
||||
/// use std::io::prelude::*;
|
||||
/// use std::io::LineWriter;
|
||||
///
|
||||
/// fn main() -> std::io::Result<()> {
|
||||
/// let road_not_taken = b"I shall be telling this with a sigh
|
||||
/// Somewhere ages and ages hence:
|
||||
/// Two roads diverged in a wood, and I -
|
||||
/// I took the one less traveled by,
|
||||
/// And that has made all the difference.";
|
||||
///
|
||||
/// let file = File::create("poem.txt")?;
|
||||
/// let mut file = LineWriter::new(file);
|
||||
///
|
||||
/// file.write_all(b"I shall be telling this with a sigh")?;
|
||||
///
|
||||
/// // No bytes are written until a newline is encountered (or
|
||||
/// // the internal buffer is filled).
|
||||
/// assert_eq!(fs::read_to_string("poem.txt")?, "");
|
||||
/// file.write_all(b"\n")?;
|
||||
/// assert_eq!(
|
||||
/// fs::read_to_string("poem.txt")?,
|
||||
/// "I shall be telling this with a sigh\n",
|
||||
/// );
|
||||
///
|
||||
/// // Write the rest of the poem.
|
||||
/// file.write_all(b"Somewhere ages and ages hence:
|
||||
/// Two roads diverged in a wood, and I -
|
||||
/// I took the one less traveled by,
|
||||
/// And that has made all the difference.")?;
|
||||
///
|
||||
/// // The last line of the poem doesn't end in a newline, so
|
||||
/// // we have to flush or drop the `LineWriter` to finish
|
||||
/// // writing.
|
||||
/// file.flush()?;
|
||||
///
|
||||
/// // Confirm the whole poem was written.
|
||||
/// assert_eq!(fs::read("poem.txt")?, &road_not_taken[..]);
|
||||
/// Ok(())
|
||||
/// }
|
||||
/// ```
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
pub struct LineWriter<W: ?Sized + Write> {
|
||||
inner: BufWriter<W>,
|
||||
}
|
||||
|
||||
impl<W: Write> LineWriter<W> {
|
||||
/// Creates a new `LineWriter`.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```no_run
|
||||
/// use std::fs::File;
|
||||
/// use std::io::LineWriter;
|
||||
///
|
||||
/// fn main() -> std::io::Result<()> {
|
||||
/// let file = File::create("poem.txt")?;
|
||||
/// let file = LineWriter::new(file);
|
||||
/// Ok(())
|
||||
/// }
|
||||
/// ```
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
pub fn new(inner: W) -> LineWriter<W> {
|
||||
// Lines typically aren't that long, don't use a giant buffer
|
||||
LineWriter::with_capacity(1024, inner)
|
||||
}
|
||||
|
||||
/// Creates a new `LineWriter` with at least the specified capacity for the
|
||||
/// internal buffer.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```no_run
|
||||
/// use std::fs::File;
|
||||
/// use std::io::LineWriter;
|
||||
///
|
||||
/// fn main() -> std::io::Result<()> {
|
||||
/// let file = File::create("poem.txt")?;
|
||||
/// let file = LineWriter::with_capacity(100, file);
|
||||
/// Ok(())
|
||||
/// }
|
||||
/// ```
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
pub fn with_capacity(capacity: usize, inner: W) -> LineWriter<W> {
|
||||
LineWriter { inner: BufWriter::with_capacity(capacity, inner) }
|
||||
}
|
||||
|
||||
/// Gets a mutable reference to the underlying writer.
|
||||
///
|
||||
/// Caution must be taken when calling methods on the mutable reference
|
||||
/// returned as extra writes could corrupt the output stream.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```no_run
|
||||
/// use std::fs::File;
|
||||
/// use std::io::LineWriter;
|
||||
///
|
||||
/// fn main() -> std::io::Result<()> {
|
||||
/// let file = File::create("poem.txt")?;
|
||||
/// let mut file = LineWriter::new(file);
|
||||
///
|
||||
/// // we can use reference just like file
|
||||
/// let reference = file.get_mut();
|
||||
/// Ok(())
|
||||
/// }
|
||||
/// ```
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
pub fn get_mut(&mut self) -> &mut W {
|
||||
self.inner.get_mut()
|
||||
}
|
||||
|
||||
/// Unwraps this `LineWriter`, returning the underlying writer.
|
||||
///
|
||||
/// The internal buffer is written out before returning the writer.
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// An [`Err`] will be returned if an error occurs while flushing the buffer.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```no_run
|
||||
/// use std::fs::File;
|
||||
/// use std::io::LineWriter;
|
||||
///
|
||||
/// fn main() -> std::io::Result<()> {
|
||||
/// let file = File::create("poem.txt")?;
|
||||
///
|
||||
/// let writer: LineWriter<File> = LineWriter::new(file);
|
||||
///
|
||||
/// let file: File = writer.into_inner()?;
|
||||
/// Ok(())
|
||||
/// }
|
||||
/// ```
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
pub fn into_inner(self) -> Result<W, IntoInnerError<LineWriter<W>>> {
|
||||
self.inner.into_inner().map_err(|err| err.new_wrapped(|inner| LineWriter { inner }))
|
||||
}
|
||||
}
|
||||
|
||||
impl<W: ?Sized + Write> LineWriter<W> {
|
||||
/// Gets a reference to the underlying writer.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```no_run
|
||||
/// use std::fs::File;
|
||||
/// use std::io::LineWriter;
|
||||
///
|
||||
/// fn main() -> std::io::Result<()> {
|
||||
/// let file = File::create("poem.txt")?;
|
||||
/// let file = LineWriter::new(file);
|
||||
///
|
||||
/// let reference = file.get_ref();
|
||||
/// Ok(())
|
||||
/// }
|
||||
/// ```
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
pub fn get_ref(&self) -> &W {
|
||||
self.inner.get_ref()
|
||||
}
|
||||
}
|
||||
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
impl<W: ?Sized + Write> Write for LineWriter<W> {
|
||||
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
|
||||
LineWriterShim::new(&mut self.inner).write(buf)
|
||||
}
|
||||
|
||||
fn flush(&mut self) -> io::Result<()> {
|
||||
self.inner.flush()
|
||||
}
|
||||
|
||||
fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
|
||||
LineWriterShim::new(&mut self.inner).write_vectored(bufs)
|
||||
}
|
||||
|
||||
fn is_write_vectored(&self) -> bool {
|
||||
self.inner.is_write_vectored()
|
||||
}
|
||||
|
||||
fn write_all(&mut self, buf: &[u8]) -> io::Result<()> {
|
||||
LineWriterShim::new(&mut self.inner).write_all(buf)
|
||||
}
|
||||
|
||||
fn write_all_vectored(&mut self, bufs: &mut [IoSlice<'_>]) -> io::Result<()> {
|
||||
LineWriterShim::new(&mut self.inner).write_all_vectored(bufs)
|
||||
}
|
||||
|
||||
fn write_fmt(&mut self, fmt: fmt::Arguments<'_>) -> io::Result<()> {
|
||||
LineWriterShim::new(&mut self.inner).write_fmt(fmt)
|
||||
}
|
||||
}
|
||||
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
impl<W: ?Sized + Write> fmt::Debug for LineWriter<W>
|
||||
where
|
||||
W: fmt::Debug,
|
||||
{
|
||||
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
fmt.debug_struct("LineWriter")
|
||||
.field("writer", &self.get_ref())
|
||||
.field(
|
||||
"buffer",
|
||||
&format_args!("{}/{}", self.inner.buffer().len(), self.inner.capacity()),
|
||||
)
|
||||
.finish_non_exhaustive()
|
||||
}
|
||||
}
|
||||
@@ -1,297 +0,0 @@
|
||||
use core::slice::memchr;
|
||||
|
||||
use crate::io::{self, BufWriter, IoSlice, Write};
|
||||
|
||||
/// Private helper struct for implementing the line-buffered writing logic.
|
||||
///
|
||||
/// This shim temporarily wraps a BufWriter, and uses its internals to
|
||||
/// implement a line-buffered writer (specifically by using the internal
|
||||
/// methods like write_to_buf and flush_buf). In this way, a more
|
||||
/// efficient abstraction can be created than one that only had access to
|
||||
/// `write` and `flush`, without needlessly duplicating a lot of the
|
||||
/// implementation details of BufWriter. This also allows existing
|
||||
/// `BufWriters` to be temporarily given line-buffering logic; this is what
|
||||
/// enables Stdout to be alternately in line-buffered or block-buffered mode.
|
||||
#[derive(Debug)]
|
||||
pub struct LineWriterShim<'a, W: ?Sized + Write> {
|
||||
buffer: &'a mut BufWriter<W>,
|
||||
}
|
||||
|
||||
impl<'a, W: ?Sized + Write> LineWriterShim<'a, W> {
|
||||
pub fn new(buffer: &'a mut BufWriter<W>) -> Self {
|
||||
Self { buffer }
|
||||
}
|
||||
|
||||
/// Gets a reference to the inner writer (that is, the writer
|
||||
/// wrapped by the BufWriter).
|
||||
fn inner(&self) -> &W {
|
||||
self.buffer.get_ref()
|
||||
}
|
||||
|
||||
/// Gets a mutable reference to the inner writer (that is, the writer
|
||||
/// wrapped by the BufWriter). Be careful with this writer, as writes to
|
||||
/// it will bypass the buffer.
|
||||
fn inner_mut(&mut self) -> &mut W {
|
||||
self.buffer.get_mut()
|
||||
}
|
||||
|
||||
/// Gets the content currently buffered in self.buffer
|
||||
fn buffered(&self) -> &[u8] {
|
||||
self.buffer.buffer()
|
||||
}
|
||||
|
||||
/// Flushes the buffer iff the last byte is a newline (indicating that an
|
||||
/// earlier write only succeeded partially, and we want to retry flushing
|
||||
/// the buffered line before continuing with a subsequent write).
|
||||
fn flush_if_completed_line(&mut self) -> io::Result<()> {
|
||||
match self.buffered().last().copied() {
|
||||
Some(b'\n') => self.buffer.flush_buf(),
|
||||
_ => Ok(()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, W: ?Sized + Write> Write for LineWriterShim<'a, W> {
|
||||
/// Writes some data into this BufWriter with line buffering.
|
||||
///
|
||||
/// This means that, if any newlines are present in the data, the data up to
|
||||
/// the last newline is sent directly to the underlying writer, and data
|
||||
/// after it is buffered. Returns the number of bytes written.
|
||||
///
|
||||
/// This function operates on a "best effort basis"; in keeping with the
|
||||
/// convention of `Write::write`, it makes at most one attempt to write
|
||||
/// new data to the underlying writer. If that write only reports a partial
|
||||
/// success, the remaining data will be buffered.
|
||||
///
|
||||
/// Because this function attempts to send completed lines to the underlying
|
||||
/// writer, it will also flush the existing buffer if it ends with a
|
||||
/// newline, even if the incoming data does not contain any newlines.
|
||||
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
|
||||
let newline_idx = match memchr::memrchr(b'\n', buf) {
|
||||
// If there are no new newlines (that is, if this write is less than
|
||||
// one line), just do a regular buffered write (which may flush if
|
||||
// we exceed the inner buffer's size)
|
||||
None => {
|
||||
self.flush_if_completed_line()?;
|
||||
return self.buffer.write(buf);
|
||||
}
|
||||
// Otherwise, arrange for the lines to be written directly to the
|
||||
// inner writer.
|
||||
Some(newline_idx) => newline_idx + 1,
|
||||
};
|
||||
|
||||
// Flush existing content to prepare for our write. We have to do this
|
||||
// before attempting to write `buf` in order to maintain consistency;
|
||||
// if we add `buf` to the buffer then try to flush it all at once,
|
||||
// we're obligated to return Ok(), which would mean suppressing any
|
||||
// errors that occur during flush.
|
||||
self.buffer.flush_buf()?;
|
||||
|
||||
// This is what we're going to try to write directly to the inner
|
||||
// writer. The rest will be buffered, if nothing goes wrong.
|
||||
let lines = &buf[..newline_idx];
|
||||
|
||||
// Write `lines` directly to the inner writer. In keeping with the
|
||||
// `write` convention, make at most one attempt to add new (unbuffered)
|
||||
// data. Because this write doesn't touch the BufWriter state directly,
|
||||
// and the buffer is known to be empty, we don't need to worry about
|
||||
// self.buffer.panicked here.
|
||||
let flushed = self.inner_mut().write(lines)?;
|
||||
|
||||
// If buffer returns Ok(0), propagate that to the caller without
|
||||
// doing additional buffering; otherwise we're just guaranteeing
|
||||
// an "ErrorKind::WriteZero" later.
|
||||
if flushed == 0 {
|
||||
return Ok(0);
|
||||
}
|
||||
|
||||
// Now that the write has succeeded, buffer the rest (or as much of
|
||||
// the rest as possible). If there were any unwritten newlines, we
|
||||
// only buffer out to the last unwritten newline that fits in the
|
||||
// buffer; this helps prevent flushing partial lines on subsequent
|
||||
// calls to LineWriterShim::write.
|
||||
|
||||
// Handle the cases in order of most-common to least-common, under
|
||||
// the presumption that most writes succeed in totality, and that most
|
||||
// writes are smaller than the buffer.
|
||||
// - Is this a partial line (ie, no newlines left in the unwritten tail)
|
||||
// - If not, does the data out to the last unwritten newline fit in
|
||||
// the buffer?
|
||||
// - If not, scan for the last newline that *does* fit in the buffer
|
||||
let tail = if flushed >= newline_idx {
|
||||
let tail = &buf[flushed..];
|
||||
// Avoid unnecessary short writes by not splitting the remaining
|
||||
// bytes if they're larger than the buffer.
|
||||
// They can be written in full by the next call to write.
|
||||
if tail.len() >= self.buffer.capacity() {
|
||||
return Ok(flushed);
|
||||
}
|
||||
tail
|
||||
} else if newline_idx - flushed <= self.buffer.capacity() {
|
||||
&buf[flushed..newline_idx]
|
||||
} else {
|
||||
let scan_area = &buf[flushed..];
|
||||
let scan_area = &scan_area[..self.buffer.capacity()];
|
||||
match memchr::memrchr(b'\n', scan_area) {
|
||||
Some(newline_idx) => &scan_area[..newline_idx + 1],
|
||||
None => scan_area,
|
||||
}
|
||||
};
|
||||
|
||||
let buffered = self.buffer.write_to_buf(tail);
|
||||
Ok(flushed + buffered)
|
||||
}
|
||||
|
||||
fn flush(&mut self) -> io::Result<()> {
|
||||
self.buffer.flush()
|
||||
}
|
||||
|
||||
/// Writes some vectored data into this BufWriter with line buffering.
|
||||
///
|
||||
/// This means that, if any newlines are present in the data, the data up to
|
||||
/// and including the buffer containing the last newline is sent directly to
|
||||
/// the inner writer, and the data after it is buffered. Returns the number
|
||||
/// of bytes written.
|
||||
///
|
||||
/// This function operates on a "best effort basis"; in keeping with the
|
||||
/// convention of `Write::write`, it makes at most one attempt to write
|
||||
/// new data to the underlying writer.
|
||||
///
|
||||
/// Because this function attempts to send completed lines to the underlying
|
||||
/// writer, it will also flush the existing buffer if it contains any
|
||||
/// newlines.
|
||||
///
|
||||
/// Because sorting through an array of `IoSlice` can be a bit convoluted,
|
||||
/// This method differs from write in the following ways:
|
||||
///
|
||||
/// - It attempts to write the full content of all the buffers up to and
|
||||
/// including the one containing the last newline. This means that it
|
||||
/// may attempt to write a partial line, that buffer has data past the
|
||||
/// newline.
|
||||
/// - If the write only reports partial success, it does not attempt to
|
||||
/// find the precise location of the written bytes and buffer the rest.
|
||||
///
|
||||
/// If the underlying vector doesn't support vectored writing, we instead
|
||||
/// simply write the first non-empty buffer with `write`. This way, we
|
||||
/// get the benefits of more granular partial-line handling without losing
|
||||
/// anything in efficiency
|
||||
fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
|
||||
// If there's no specialized behavior for write_vectored, just use
|
||||
// write. This has the benefit of more granular partial-line handling.
|
||||
if !self.is_write_vectored() {
|
||||
return match bufs.iter().find(|buf| !buf.is_empty()) {
|
||||
Some(buf) => self.write(buf),
|
||||
None => Ok(0),
|
||||
};
|
||||
}
|
||||
|
||||
// Find the buffer containing the last newline
|
||||
// FIXME: This is overly slow if there are very many bufs and none contain
|
||||
// newlines. e.g. writev() on Linux only writes up to 1024 slices, so
|
||||
// scanning the rest is wasted effort. This makes write_all_vectored()
|
||||
// quadratic.
|
||||
let last_newline_buf_idx = bufs
|
||||
.iter()
|
||||
.enumerate()
|
||||
.rev()
|
||||
.find_map(|(i, buf)| memchr::memchr(b'\n', buf).map(|_| i));
|
||||
|
||||
// If there are no new newlines (that is, if this write is less than
|
||||
// one line), just do a regular buffered write
|
||||
let last_newline_buf_idx = match last_newline_buf_idx {
|
||||
// No newlines; just do a normal buffered write
|
||||
None => {
|
||||
self.flush_if_completed_line()?;
|
||||
return self.buffer.write_vectored(bufs);
|
||||
}
|
||||
Some(i) => i,
|
||||
};
|
||||
|
||||
// Flush existing content to prepare for our write
|
||||
self.buffer.flush_buf()?;
|
||||
|
||||
// This is what we're going to try to write directly to the inner
|
||||
// writer. The rest will be buffered, if nothing goes wrong.
|
||||
let (lines, tail) = bufs.split_at(last_newline_buf_idx + 1);
|
||||
|
||||
// Write `lines` directly to the inner writer. In keeping with the
|
||||
// `write` convention, make at most one attempt to add new (unbuffered)
|
||||
// data. Because this write doesn't touch the BufWriter state directly,
|
||||
// and the buffer is known to be empty, we don't need to worry about
|
||||
// self.panicked here.
|
||||
let flushed = self.inner_mut().write_vectored(lines)?;
|
||||
|
||||
// If inner returns Ok(0), propagate that to the caller without
|
||||
// doing additional buffering; otherwise we're just guaranteeing
|
||||
// an "ErrorKind::WriteZero" later.
|
||||
if flushed == 0 {
|
||||
return Ok(0);
|
||||
}
|
||||
|
||||
// Don't try to reconstruct the exact amount written; just bail
|
||||
// in the event of a partial write
|
||||
let mut lines_len: usize = 0;
|
||||
for buf in lines {
|
||||
// With overlapping/duplicate slices the total length may in theory
|
||||
// exceed usize::MAX
|
||||
lines_len = lines_len.saturating_add(buf.len());
|
||||
if flushed < lines_len {
|
||||
return Ok(flushed);
|
||||
}
|
||||
}
|
||||
|
||||
// Now that the write has succeeded, buffer the rest (or as much of the
|
||||
// rest as possible)
|
||||
let buffered: usize = tail
|
||||
.iter()
|
||||
.filter(|buf| !buf.is_empty())
|
||||
.map(|buf| self.buffer.write_to_buf(buf))
|
||||
.take_while(|&n| n > 0)
|
||||
.sum();
|
||||
|
||||
Ok(flushed + buffered)
|
||||
}
|
||||
|
||||
fn is_write_vectored(&self) -> bool {
|
||||
self.inner().is_write_vectored()
|
||||
}
|
||||
|
||||
/// Writes some data into this BufWriter with line buffering.
|
||||
///
|
||||
/// This means that, if any newlines are present in the data, the data up to
|
||||
/// the last newline is sent directly to the underlying writer, and data
|
||||
/// after it is buffered.
|
||||
///
|
||||
/// Because this function attempts to send completed lines to the underlying
|
||||
/// writer, it will also flush the existing buffer if it contains any
|
||||
/// newlines, even if the incoming data does not contain any newlines.
|
||||
fn write_all(&mut self, buf: &[u8]) -> io::Result<()> {
|
||||
match memchr::memrchr(b'\n', buf) {
|
||||
// If there are no new newlines (that is, if this write is less than
|
||||
// one line), just do a regular buffered write (which may flush if
|
||||
// we exceed the inner buffer's size)
|
||||
None => {
|
||||
self.flush_if_completed_line()?;
|
||||
self.buffer.write_all(buf)
|
||||
}
|
||||
Some(newline_idx) => {
|
||||
let (lines, tail) = buf.split_at(newline_idx + 1);
|
||||
|
||||
if self.buffered().is_empty() {
|
||||
self.inner_mut().write_all(lines)?;
|
||||
} else {
|
||||
// If there is any buffered data, we add the incoming lines
|
||||
// to that buffer before flushing, which saves us at least
|
||||
// one write call. We can't really do this with `write`,
|
||||
// since we can't do this *and* not suppress errors *and*
|
||||
// report a consistent state to the caller in a return
|
||||
// value, but here in write_all it's fine.
|
||||
self.buffer.write_all(lines)?;
|
||||
self.buffer.flush_buf()?;
|
||||
}
|
||||
|
||||
self.buffer.write_all(tail)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,189 +0,0 @@
|
||||
//! Buffering wrappers for I/O traits
|
||||
|
||||
mod bufreader;
|
||||
mod bufwriter;
|
||||
mod linewriter;
|
||||
mod linewritershim;
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests;
|
||||
|
||||
#[stable(feature = "bufwriter_into_parts", since = "1.56.0")]
|
||||
pub use bufwriter::WriterPanicked;
|
||||
use linewritershim::LineWriterShim;
|
||||
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
pub use self::{bufreader::BufReader, bufwriter::BufWriter, linewriter::LineWriter};
|
||||
use crate::io::Error;
|
||||
use crate::{error, fmt};
|
||||
|
||||
/// An error returned by [`BufWriter::into_inner`] which combines an error that
|
||||
/// happened while writing out the buffer, and the buffered writer object
|
||||
/// which may be used to recover from the condition.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```no_run
|
||||
/// use std::io::BufWriter;
|
||||
/// use std::net::TcpStream;
|
||||
///
|
||||
/// let mut stream = BufWriter::new(TcpStream::connect("127.0.0.1:34254").unwrap());
|
||||
///
|
||||
/// // do stuff with the stream
|
||||
///
|
||||
/// // we want to get our `TcpStream` back, so let's try:
|
||||
///
|
||||
/// let stream = match stream.into_inner() {
|
||||
/// Ok(s) => s,
|
||||
/// Err(e) => {
|
||||
/// // Here, e is an IntoInnerError
|
||||
/// panic!("An error occurred");
|
||||
/// }
|
||||
/// };
|
||||
/// ```
|
||||
#[derive(Debug)]
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
pub struct IntoInnerError<W>(W, Error);
|
||||
|
||||
impl<W> IntoInnerError<W> {
|
||||
/// Constructs a new IntoInnerError
|
||||
fn new(writer: W, error: Error) -> Self {
|
||||
Self(writer, error)
|
||||
}
|
||||
|
||||
/// Helper to construct a new IntoInnerError; intended to help with
|
||||
/// adapters that wrap other adapters
|
||||
fn new_wrapped<W2>(self, f: impl FnOnce(W) -> W2) -> IntoInnerError<W2> {
|
||||
let Self(writer, error) = self;
|
||||
IntoInnerError::new(f(writer), error)
|
||||
}
|
||||
|
||||
/// Returns the error which caused the call to [`BufWriter::into_inner()`]
|
||||
/// to fail.
|
||||
///
|
||||
/// This error was returned when attempting to write the internal buffer.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```no_run
|
||||
/// use std::io::BufWriter;
|
||||
/// use std::net::TcpStream;
|
||||
///
|
||||
/// let mut stream = BufWriter::new(TcpStream::connect("127.0.0.1:34254").unwrap());
|
||||
///
|
||||
/// // do stuff with the stream
|
||||
///
|
||||
/// // we want to get our `TcpStream` back, so let's try:
|
||||
///
|
||||
/// let stream = match stream.into_inner() {
|
||||
/// Ok(s) => s,
|
||||
/// Err(e) => {
|
||||
/// // Here, e is an IntoInnerError, let's log the inner error.
|
||||
/// //
|
||||
/// // We'll just 'log' to stdout for this example.
|
||||
/// println!("{}", e.error());
|
||||
///
|
||||
/// panic!("An unexpected error occurred.");
|
||||
/// }
|
||||
/// };
|
||||
/// ```
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
pub fn error(&self) -> &Error {
|
||||
&self.1
|
||||
}
|
||||
|
||||
/// Returns the buffered writer instance which generated the error.
|
||||
///
|
||||
/// The returned object can be used for error recovery, such as
|
||||
/// re-inspecting the buffer.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```no_run
|
||||
/// use std::io::BufWriter;
|
||||
/// use std::net::TcpStream;
|
||||
///
|
||||
/// let mut stream = BufWriter::new(TcpStream::connect("127.0.0.1:34254").unwrap());
|
||||
///
|
||||
/// // do stuff with the stream
|
||||
///
|
||||
/// // we want to get our `TcpStream` back, so let's try:
|
||||
///
|
||||
/// let stream = match stream.into_inner() {
|
||||
/// Ok(s) => s,
|
||||
/// Err(e) => {
|
||||
/// // Here, e is an IntoInnerError, let's re-examine the buffer:
|
||||
/// let buffer = e.into_inner();
|
||||
///
|
||||
/// // do stuff to try to recover
|
||||
///
|
||||
/// // afterwards, let's just return the stream
|
||||
/// buffer.into_inner().unwrap()
|
||||
/// }
|
||||
/// };
|
||||
/// ```
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
pub fn into_inner(self) -> W {
|
||||
self.0
|
||||
}
|
||||
|
||||
/// Consumes the [`IntoInnerError`] and returns the error which caused the call to
|
||||
/// [`BufWriter::into_inner()`] to fail. Unlike `error`, this can be used to
|
||||
/// obtain ownership of the underlying error.
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// use std::io::{BufWriter, ErrorKind, Write};
|
||||
///
|
||||
/// let mut not_enough_space = [0u8; 10];
|
||||
/// let mut stream = BufWriter::new(not_enough_space.as_mut());
|
||||
/// write!(stream, "this cannot be actually written").unwrap();
|
||||
/// let into_inner_err = stream.into_inner().expect_err("now we discover it's too small");
|
||||
/// let err = into_inner_err.into_error();
|
||||
/// assert_eq!(err.kind(), ErrorKind::WriteZero);
|
||||
/// ```
|
||||
#[stable(feature = "io_into_inner_error_parts", since = "1.55.0")]
|
||||
pub fn into_error(self) -> Error {
|
||||
self.1
|
||||
}
|
||||
|
||||
/// Consumes the [`IntoInnerError`] and returns the error which caused the call to
|
||||
/// [`BufWriter::into_inner()`] to fail, and the underlying writer.
|
||||
///
|
||||
/// This can be used to simply obtain ownership of the underlying error; it can also be used for
|
||||
/// advanced error recovery.
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// use std::io::{BufWriter, ErrorKind, Write};
|
||||
///
|
||||
/// let mut not_enough_space = [0u8; 10];
|
||||
/// let mut stream = BufWriter::new(not_enough_space.as_mut());
|
||||
/// write!(stream, "this cannot be actually written").unwrap();
|
||||
/// let into_inner_err = stream.into_inner().expect_err("now we discover it's too small");
|
||||
/// let (err, recovered_writer) = into_inner_err.into_parts();
|
||||
/// assert_eq!(err.kind(), ErrorKind::WriteZero);
|
||||
/// assert_eq!(recovered_writer.buffer(), b"t be actually written");
|
||||
/// ```
|
||||
#[stable(feature = "io_into_inner_error_parts", since = "1.55.0")]
|
||||
pub fn into_parts(self) -> (Error, W) {
|
||||
(self.1, self.0)
|
||||
}
|
||||
}
|
||||
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
impl<W> From<IntoInnerError<W>> for Error {
|
||||
fn from(iie: IntoInnerError<W>) -> Error {
|
||||
iie.1
|
||||
}
|
||||
}
|
||||
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
impl<W: Send + fmt::Debug> error::Error for IntoInnerError<W> {}
|
||||
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
impl<W> fmt::Display for IntoInnerError<W> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
self.error().fmt(f)
|
||||
}
|
||||
}
|
||||
@@ -1,297 +0,0 @@
|
||||
use super::{BorrowedBuf, BufReader, BufWriter, DEFAULT_BUF_SIZE, Read, Result, Write};
|
||||
use crate::alloc::Allocator;
|
||||
use crate::cmp;
|
||||
use alloc_crate::collections::VecDeque;
|
||||
use crate::io::IoSlice;
|
||||
use crate::mem::MaybeUninit;
|
||||
use crate::sys::io::{CopyState, kernel_copy};
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests;
|
||||
|
||||
/// Copies the entire contents of a reader into a writer.
|
||||
///
|
||||
/// This function will continuously read data from `reader` and then
|
||||
/// write it into `writer` in a streaming fashion until `reader`
|
||||
/// returns EOF.
|
||||
///
|
||||
/// On success, the total number of bytes that were copied from
|
||||
/// `reader` to `writer` is returned.
|
||||
///
|
||||
/// If you want to copy the contents of one file to another and you’re
|
||||
/// working with filesystem paths, see the [`fs::copy`] function.
|
||||
///
|
||||
/// [`fs::copy`]: crate::fs::copy
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// This function will return an error immediately if any call to [`read`] or
|
||||
/// [`write`] returns an error. All instances of [`ErrorKind::Interrupted`] are
|
||||
/// handled by this function and the underlying operation is retried.
|
||||
///
|
||||
/// [`read`]: Read::read
|
||||
/// [`write`]: Write::write
|
||||
/// [`ErrorKind::Interrupted`]: crate::io::ErrorKind::Interrupted
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use std::io;
|
||||
///
|
||||
/// fn main() -> io::Result<()> {
|
||||
/// let mut reader: &[u8] = b"hello";
|
||||
/// let mut writer: Vec<u8> = vec![];
|
||||
///
|
||||
/// io::copy(&mut reader, &mut writer)?;
|
||||
///
|
||||
/// assert_eq!(&b"hello"[..], &writer[..]);
|
||||
/// Ok(())
|
||||
/// }
|
||||
/// ```
|
||||
///
|
||||
/// # Platform-specific behavior
|
||||
///
|
||||
/// On Linux (including Android), this function uses `copy_file_range(2)`,
|
||||
/// `sendfile(2)` or `splice(2)` syscalls to move data directly between file
|
||||
/// descriptors if possible.
|
||||
///
|
||||
/// Note that platform-specific behavior [may change in the future][changes].
|
||||
///
|
||||
/// [changes]: crate::io#platform-specific-behavior
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
pub fn copy<R: ?Sized, W: ?Sized>(reader: &mut R, writer: &mut W) -> Result<u64>
|
||||
where
|
||||
R: Read,
|
||||
W: Write,
|
||||
{
|
||||
match kernel_copy(reader, writer)? {
|
||||
CopyState::Ended(copied) => Ok(copied),
|
||||
CopyState::Fallback(copied) => {
|
||||
generic_copy(reader, writer).map(|additional| copied + additional)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// The userspace read-write-loop implementation of `io::copy` that is used when
|
||||
/// OS-specific specializations for copy offloading are not available or not applicable.
|
||||
fn generic_copy<R: ?Sized, W: ?Sized>(reader: &mut R, writer: &mut W) -> Result<u64>
|
||||
where
|
||||
R: Read,
|
||||
W: Write,
|
||||
{
|
||||
let read_buf = BufferedReaderSpec::buffer_size(reader);
|
||||
let write_buf = BufferedWriterSpec::buffer_size(writer);
|
||||
|
||||
if read_buf >= DEFAULT_BUF_SIZE && read_buf >= write_buf {
|
||||
return BufferedReaderSpec::copy_to(reader, writer);
|
||||
}
|
||||
|
||||
BufferedWriterSpec::copy_from(writer, reader)
|
||||
}
|
||||
|
||||
/// Specialization of the read-write loop that reuses the internal
|
||||
/// buffer of a BufReader. If there's no buffer then the writer side
|
||||
/// should be used instead.
|
||||
trait BufferedReaderSpec {
|
||||
fn buffer_size(&self) -> usize;
|
||||
|
||||
fn copy_to(&mut self, to: &mut (impl Write + ?Sized)) -> Result<u64>;
|
||||
}
|
||||
|
||||
impl<T> BufferedReaderSpec for T
|
||||
where
|
||||
Self: Read,
|
||||
T: ?Sized,
|
||||
{
|
||||
#[inline]
|
||||
default fn buffer_size(&self) -> usize {
|
||||
0
|
||||
}
|
||||
|
||||
default fn copy_to(&mut self, _to: &mut (impl Write + ?Sized)) -> Result<u64> {
|
||||
unreachable!("only called from specializations")
|
||||
}
|
||||
}
|
||||
|
||||
impl BufferedReaderSpec for &[u8] {
|
||||
fn buffer_size(&self) -> usize {
|
||||
// prefer this specialization since the source "buffer" is all we'll ever need,
|
||||
// even if it's small
|
||||
usize::MAX
|
||||
}
|
||||
|
||||
fn copy_to(&mut self, to: &mut (impl Write + ?Sized)) -> Result<u64> {
|
||||
let len = self.len();
|
||||
to.write_all(self)?;
|
||||
*self = &self[len..];
|
||||
Ok(len as u64)
|
||||
}
|
||||
}
|
||||
|
||||
impl<A: Allocator> BufferedReaderSpec for VecDeque<u8, A> {
|
||||
fn buffer_size(&self) -> usize {
|
||||
// prefer this specialization since the source "buffer" is all we'll ever need,
|
||||
// even if it's small
|
||||
usize::MAX
|
||||
}
|
||||
|
||||
fn copy_to(&mut self, to: &mut (impl Write + ?Sized)) -> Result<u64> {
|
||||
let len = self.len();
|
||||
let (front, back) = self.as_slices();
|
||||
let bufs = &mut [IoSlice::new(front), IoSlice::new(back)];
|
||||
to.write_all_vectored(bufs)?;
|
||||
self.clear();
|
||||
Ok(len as u64)
|
||||
}
|
||||
}
|
||||
|
||||
impl<I> BufferedReaderSpec for BufReader<I>
|
||||
where
|
||||
Self: Read,
|
||||
I: ?Sized,
|
||||
{
|
||||
fn buffer_size(&self) -> usize {
|
||||
self.capacity()
|
||||
}
|
||||
|
||||
fn copy_to(&mut self, to: &mut (impl Write + ?Sized)) -> Result<u64> {
|
||||
let mut len = 0;
|
||||
|
||||
loop {
|
||||
// Hack: this relies on `impl Read for BufReader` always calling fill_buf
|
||||
// if the buffer is empty, even for empty slices.
|
||||
// It can't be called directly here since specialization prevents us
|
||||
// from adding I: Read
|
||||
match self.read(&mut []) {
|
||||
Ok(_) => {}
|
||||
Err(e) if e.is_interrupted() => continue,
|
||||
Err(e) => return Err(e),
|
||||
}
|
||||
let buf = self.buffer();
|
||||
if self.buffer().len() == 0 {
|
||||
return Ok(len);
|
||||
}
|
||||
|
||||
// In case the writer side is a BufWriter then its write_all
|
||||
// implements an optimization that passes through large
|
||||
// buffers to the underlying writer. That code path is #[cold]
|
||||
// but we're still avoiding redundant memcopies when doing
|
||||
// a copy between buffered inputs and outputs.
|
||||
to.write_all(buf)?;
|
||||
len += buf.len() as u64;
|
||||
self.discard_buffer();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Specialization of the read-write loop that either uses a stack buffer
|
||||
/// or reuses the internal buffer of a BufWriter
|
||||
trait BufferedWriterSpec: Write {
|
||||
fn buffer_size(&self) -> usize;
|
||||
|
||||
fn copy_from<R: Read + ?Sized>(&mut self, reader: &mut R) -> Result<u64>;
|
||||
}
|
||||
|
||||
impl<W: Write + ?Sized> BufferedWriterSpec for W {
|
||||
#[inline]
|
||||
default fn buffer_size(&self) -> usize {
|
||||
0
|
||||
}
|
||||
|
||||
default fn copy_from<R: Read + ?Sized>(&mut self, reader: &mut R) -> Result<u64> {
|
||||
stack_buffer_copy(reader, self)
|
||||
}
|
||||
}
|
||||
|
||||
impl<I: Write + ?Sized> BufferedWriterSpec for BufWriter<I> {
|
||||
fn buffer_size(&self) -> usize {
|
||||
self.capacity()
|
||||
}
|
||||
|
||||
fn copy_from<R: Read + ?Sized>(&mut self, reader: &mut R) -> Result<u64> {
|
||||
if self.capacity() < DEFAULT_BUF_SIZE {
|
||||
return stack_buffer_copy(reader, self);
|
||||
}
|
||||
|
||||
let mut len = 0;
|
||||
let mut init = 0;
|
||||
|
||||
loop {
|
||||
let buf = self.buffer_mut();
|
||||
let mut read_buf: BorrowedBuf<'_> = buf.spare_capacity_mut().into();
|
||||
|
||||
unsafe {
|
||||
// SAFETY: init is either 0 or the init_len from the previous iteration.
|
||||
read_buf.set_init(init);
|
||||
}
|
||||
|
||||
if read_buf.capacity() >= DEFAULT_BUF_SIZE {
|
||||
let mut cursor = read_buf.unfilled();
|
||||
match reader.read_buf(cursor.reborrow()) {
|
||||
Ok(()) => {
|
||||
let bytes_read = cursor.written();
|
||||
|
||||
if bytes_read == 0 {
|
||||
return Ok(len);
|
||||
}
|
||||
|
||||
init = read_buf.init_len() - bytes_read;
|
||||
len += bytes_read as u64;
|
||||
|
||||
// SAFETY: BorrowedBuf guarantees all of its filled bytes are init
|
||||
unsafe { buf.set_len(buf.len() + bytes_read) };
|
||||
|
||||
// Read again if the buffer still has enough capacity, as BufWriter itself would do
|
||||
// This will occur if the reader returns short reads
|
||||
}
|
||||
Err(ref e) if e.is_interrupted() => {}
|
||||
Err(e) => return Err(e),
|
||||
}
|
||||
} else {
|
||||
// All the bytes that were already in the buffer are initialized,
|
||||
// treat them as such when the buffer is flushed.
|
||||
init += buf.len();
|
||||
|
||||
self.flush_buf()?;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl BufferedWriterSpec for Vec<u8> {
|
||||
fn buffer_size(&self) -> usize {
|
||||
cmp::max(DEFAULT_BUF_SIZE, self.capacity() - self.len())
|
||||
}
|
||||
|
||||
fn copy_from<R: Read + ?Sized>(&mut self, reader: &mut R) -> Result<u64> {
|
||||
reader.read_to_end(self).map(|bytes| u64::try_from(bytes).expect("usize overflowed u64"))
|
||||
}
|
||||
}
|
||||
|
||||
fn stack_buffer_copy<R: Read + ?Sized, W: Write + ?Sized>(
|
||||
reader: &mut R,
|
||||
writer: &mut W,
|
||||
) -> Result<u64> {
|
||||
let buf: &mut [_] = &mut [MaybeUninit::uninit(); DEFAULT_BUF_SIZE];
|
||||
let mut buf: BorrowedBuf<'_> = buf.into();
|
||||
|
||||
let mut len = 0;
|
||||
|
||||
loop {
|
||||
match reader.read_buf(buf.unfilled()) {
|
||||
Ok(()) => {}
|
||||
Err(e) if e.is_interrupted() => continue,
|
||||
Err(e) => return Err(e),
|
||||
};
|
||||
|
||||
if buf.filled().is_empty() {
|
||||
break;
|
||||
}
|
||||
|
||||
len += buf.filled().len() as u64;
|
||||
writer.write_all(buf.filled())?;
|
||||
buf.clear();
|
||||
}
|
||||
|
||||
Ok(len)
|
||||
}
|
||||
@@ -1,147 +0,0 @@
|
||||
use crate::cmp::{max, min};
|
||||
use alloc_crate::collections::VecDeque;
|
||||
use crate::io;
|
||||
use crate::io::*;
|
||||
|
||||
#[test]
|
||||
fn copy_copies() {
|
||||
let mut r = repeat(0).take(4);
|
||||
let mut w = sink();
|
||||
assert_eq!(copy(&mut r, &mut w).unwrap(), 4);
|
||||
|
||||
let mut r = repeat(0).take(1 << 17);
|
||||
assert_eq!(copy(&mut r as &mut dyn Read, &mut w as &mut dyn Write).unwrap(), 1 << 17);
|
||||
}
|
||||
|
||||
struct ShortReader {
|
||||
cap: usize,
|
||||
read_size: usize,
|
||||
observed_buffer: usize,
|
||||
}
|
||||
|
||||
impl Read for ShortReader {
|
||||
fn read(&mut self, buf: &mut [u8]) -> Result<usize> {
|
||||
let bytes = min(self.cap, self.read_size).min(buf.len());
|
||||
self.cap -= bytes;
|
||||
self.observed_buffer = max(self.observed_buffer, buf.len());
|
||||
Ok(bytes)
|
||||
}
|
||||
}
|
||||
|
||||
struct WriteObserver {
|
||||
observed_buffer: usize,
|
||||
}
|
||||
|
||||
impl Write for WriteObserver {
|
||||
fn write(&mut self, buf: &[u8]) -> Result<usize> {
|
||||
self.observed_buffer = max(self.observed_buffer, buf.len());
|
||||
Ok(buf.len())
|
||||
}
|
||||
|
||||
fn flush(&mut self) -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn copy_specializes_bufwriter() {
|
||||
let cap = 117 * 1024;
|
||||
let buf_sz = 16 * 1024;
|
||||
let mut r = ShortReader { cap, observed_buffer: 0, read_size: 1337 };
|
||||
let mut w = BufWriter::with_capacity(buf_sz, WriteObserver { observed_buffer: 0 });
|
||||
assert_eq!(
|
||||
copy(&mut r, &mut w).unwrap(),
|
||||
cap as u64,
|
||||
"expected the whole capacity to be copied"
|
||||
);
|
||||
assert_eq!(r.observed_buffer, buf_sz, "expected a large buffer to be provided to the reader");
|
||||
assert!(w.get_mut().observed_buffer > DEFAULT_BUF_SIZE, "expected coalesced writes");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn copy_specializes_bufreader() {
|
||||
let mut source = vec![0; 768 * 1024];
|
||||
source[1] = 42;
|
||||
let mut buffered = BufReader::with_capacity(256 * 1024, Cursor::new(&mut source));
|
||||
|
||||
let mut sink = Vec::new();
|
||||
assert_eq!(crate::io::copy(&mut buffered, &mut sink).unwrap(), source.len() as u64);
|
||||
assert_eq!(source.as_slice(), sink.as_slice());
|
||||
|
||||
let buf_sz = 71 * 1024;
|
||||
assert!(buf_sz > DEFAULT_BUF_SIZE, "test precondition");
|
||||
|
||||
let mut buffered = BufReader::with_capacity(buf_sz, Cursor::new(&mut source));
|
||||
let mut sink = WriteObserver { observed_buffer: 0 };
|
||||
assert_eq!(crate::io::copy(&mut buffered, &mut sink).unwrap(), source.len() as u64);
|
||||
assert_eq!(
|
||||
sink.observed_buffer, buf_sz,
|
||||
"expected a large buffer to be provided to the writer"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn copy_specializes_to_vec() {
|
||||
let cap = DEFAULT_BUF_SIZE * 10;
|
||||
let mut source = ShortReader { cap, observed_buffer: 0, read_size: DEFAULT_BUF_SIZE };
|
||||
let mut sink = Vec::new();
|
||||
let copied = io::copy(&mut source, &mut sink).unwrap();
|
||||
assert_eq!(cap as u64, copied);
|
||||
assert_eq!(sink.len() as u64, copied);
|
||||
assert!(
|
||||
source.observed_buffer > DEFAULT_BUF_SIZE,
|
||||
"expected a large buffer to be provided to the reader, got {}",
|
||||
source.observed_buffer
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn copy_specializes_from_vecdeque() {
|
||||
let mut source = VecDeque::with_capacity(100 * 1024);
|
||||
for _ in 0..20 * 1024 {
|
||||
source.push_front(0);
|
||||
}
|
||||
for _ in 0..20 * 1024 {
|
||||
source.push_back(0);
|
||||
}
|
||||
let mut sink = WriteObserver { observed_buffer: 0 };
|
||||
assert_eq!(40 * 1024u64, io::copy(&mut source, &mut sink).unwrap());
|
||||
assert_eq!(20 * 1024, sink.observed_buffer);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn copy_specializes_from_slice() {
|
||||
let mut source = [1; 60 * 1024].as_slice();
|
||||
let mut sink = WriteObserver { observed_buffer: 0 };
|
||||
assert_eq!(60 * 1024u64, io::copy(&mut source, &mut sink).unwrap());
|
||||
assert_eq!(60 * 1024, sink.observed_buffer);
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
mod io_benches {
|
||||
use test::Bencher;
|
||||
|
||||
use crate::fs::{File, OpenOptions};
|
||||
use crate::io::BufReader;
|
||||
use crate::io::prelude::*;
|
||||
|
||||
#[bench]
|
||||
#[cfg_attr(target_os = "emscripten", ignore)] // no /dev
|
||||
fn bench_copy_buf_reader(b: &mut Bencher) {
|
||||
let mut file_in = File::open("/dev/zero").expect("opening /dev/zero failed");
|
||||
// use dyn to avoid specializations unrelated to readbuf
|
||||
let dyn_in = &mut file_in as &mut dyn Read;
|
||||
let mut reader = BufReader::with_capacity(256 * 1024, dyn_in.take(0));
|
||||
let mut writer =
|
||||
OpenOptions::new().write(true).open("/dev/null").expect("opening /dev/null failed");
|
||||
|
||||
const BYTES: u64 = 1024 * 1024;
|
||||
|
||||
b.bytes = BYTES;
|
||||
|
||||
b.iter(|| {
|
||||
reader.get_mut().set_limit(BYTES);
|
||||
crate::io::copy(&mut reader, &mut writer).unwrap()
|
||||
});
|
||||
}
|
||||
}
|
||||
@@ -1,757 +0,0 @@
|
||||
#[cfg(test)]
|
||||
mod tests;
|
||||
|
||||
use crate::alloc::Allocator;
|
||||
use crate::cmp;
|
||||
use crate::io::prelude::*;
|
||||
use crate::io::{self, BorrowedCursor, ErrorKind, IoSlice, IoSliceMut, SeekFrom};
|
||||
|
||||
/// A `Cursor` wraps an in-memory buffer and provides it with a
|
||||
/// [`Seek`] implementation.
|
||||
///
|
||||
/// `Cursor`s are used with in-memory buffers, anything implementing
|
||||
/// <code>[AsRef]<\[u8]></code>, to allow them to implement [`Read`] and/or [`Write`],
|
||||
/// allowing these buffers to be used anywhere you might use a reader or writer
|
||||
/// that does actual I/O.
|
||||
///
|
||||
/// The standard library implements some I/O traits on various types which
|
||||
/// are commonly used as a buffer, like <code>Cursor<[Vec]\<u8>></code> and
|
||||
/// <code>Cursor<[&\[u8\]][bytes]></code>.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// We may want to write bytes to a [`File`] in our production
|
||||
/// code, but use an in-memory buffer in our tests. We can do this with
|
||||
/// `Cursor`:
|
||||
///
|
||||
/// [bytes]: crate::slice "slice"
|
||||
/// [`File`]: crate::fs::File
|
||||
///
|
||||
/// ```no_run
|
||||
/// use std::io::prelude::*;
|
||||
/// use std::io::{self, SeekFrom};
|
||||
/// use std::fs::File;
|
||||
///
|
||||
/// // a library function we've written
|
||||
/// fn write_ten_bytes_at_end<W: Write + Seek>(mut writer: W) -> io::Result<()> {
|
||||
/// writer.seek(SeekFrom::End(-10))?;
|
||||
///
|
||||
/// for i in 0..10 {
|
||||
/// writer.write(&[i])?;
|
||||
/// }
|
||||
///
|
||||
/// // all went well
|
||||
/// Ok(())
|
||||
/// }
|
||||
///
|
||||
/// # fn foo() -> io::Result<()> {
|
||||
/// // Here's some code that uses this library function.
|
||||
/// //
|
||||
/// // We might want to use a BufReader here for efficiency, but let's
|
||||
/// // keep this example focused.
|
||||
/// let mut file = File::create("foo.txt")?;
|
||||
/// // First, we need to allocate 10 bytes to be able to write into.
|
||||
/// file.set_len(10)?;
|
||||
///
|
||||
/// write_ten_bytes_at_end(&mut file)?;
|
||||
/// # Ok(())
|
||||
/// # }
|
||||
///
|
||||
/// // now let's write a test
|
||||
/// #[test]
|
||||
/// fn test_writes_bytes() {
|
||||
/// // setting up a real File is much slower than an in-memory buffer,
|
||||
/// // let's use a cursor instead
|
||||
/// use std::io::Cursor;
|
||||
/// let mut buff = Cursor::new(vec![0; 15]);
|
||||
///
|
||||
/// write_ten_bytes_at_end(&mut buff).unwrap();
|
||||
///
|
||||
/// assert_eq!(&buff.get_ref()[5..15], &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]);
|
||||
/// }
|
||||
/// ```
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
#[derive(Debug, Default, Eq, PartialEq)]
|
||||
pub struct Cursor<T> {
|
||||
inner: T,
|
||||
pos: u64,
|
||||
}
|
||||
|
||||
impl<T> Cursor<T> {
|
||||
/// Creates a new cursor wrapping the provided underlying in-memory buffer.
|
||||
///
|
||||
/// Cursor initial position is `0` even if underlying buffer (e.g., [`Vec`])
|
||||
/// is not empty. So writing to cursor starts with overwriting [`Vec`]
|
||||
/// content, not with appending to it.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use std::io::Cursor;
|
||||
///
|
||||
/// let buff = Cursor::new(Vec::new());
|
||||
/// # fn force_inference(_: &Cursor<Vec<u8>>) {}
|
||||
/// # force_inference(&buff);
|
||||
/// ```
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
#[rustc_const_stable(feature = "const_io_structs", since = "1.79.0")]
|
||||
pub const fn new(inner: T) -> Cursor<T> {
|
||||
Cursor { pos: 0, inner }
|
||||
}
|
||||
|
||||
/// Consumes this cursor, returning the underlying value.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use std::io::Cursor;
|
||||
///
|
||||
/// let buff = Cursor::new(Vec::new());
|
||||
/// # fn force_inference(_: &Cursor<Vec<u8>>) {}
|
||||
/// # force_inference(&buff);
|
||||
///
|
||||
/// let vec = buff.into_inner();
|
||||
/// ```
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
pub fn into_inner(self) -> T {
|
||||
self.inner
|
||||
}
|
||||
|
||||
/// Gets a reference to the underlying value in this cursor.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use std::io::Cursor;
|
||||
///
|
||||
/// let buff = Cursor::new(Vec::new());
|
||||
/// # fn force_inference(_: &Cursor<Vec<u8>>) {}
|
||||
/// # force_inference(&buff);
|
||||
///
|
||||
/// let reference = buff.get_ref();
|
||||
/// ```
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
#[rustc_const_stable(feature = "const_io_structs", since = "1.79.0")]
|
||||
pub const fn get_ref(&self) -> &T {
|
||||
&self.inner
|
||||
}
|
||||
|
||||
/// Gets a mutable reference to the underlying value in this cursor.
|
||||
///
|
||||
/// Care should be taken to avoid modifying the internal I/O state of the
|
||||
/// underlying value as it may corrupt this cursor's position.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use std::io::Cursor;
|
||||
///
|
||||
/// let mut buff = Cursor::new(Vec::new());
|
||||
/// # fn force_inference(_: &Cursor<Vec<u8>>) {}
|
||||
/// # force_inference(&buff);
|
||||
///
|
||||
/// let reference = buff.get_mut();
|
||||
/// ```
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
#[rustc_const_stable(feature = "const_mut_cursor", since = "1.86.0")]
|
||||
pub const fn get_mut(&mut self) -> &mut T {
|
||||
&mut self.inner
|
||||
}
|
||||
|
||||
/// Returns the current position of this cursor.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use std::io::Cursor;
|
||||
/// use std::io::prelude::*;
|
||||
/// use std::io::SeekFrom;
|
||||
///
|
||||
/// let mut buff = Cursor::new(vec![1, 2, 3, 4, 5]);
|
||||
///
|
||||
/// assert_eq!(buff.position(), 0);
|
||||
///
|
||||
/// buff.seek(SeekFrom::Current(2)).unwrap();
|
||||
/// assert_eq!(buff.position(), 2);
|
||||
///
|
||||
/// buff.seek(SeekFrom::Current(-1)).unwrap();
|
||||
/// assert_eq!(buff.position(), 1);
|
||||
/// ```
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
#[rustc_const_stable(feature = "const_io_structs", since = "1.79.0")]
|
||||
pub const fn position(&self) -> u64 {
|
||||
self.pos
|
||||
}
|
||||
|
||||
/// Sets the position of this cursor.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use std::io::Cursor;
|
||||
///
|
||||
/// let mut buff = Cursor::new(vec![1, 2, 3, 4, 5]);
|
||||
///
|
||||
/// assert_eq!(buff.position(), 0);
|
||||
///
|
||||
/// buff.set_position(2);
|
||||
/// assert_eq!(buff.position(), 2);
|
||||
///
|
||||
/// buff.set_position(4);
|
||||
/// assert_eq!(buff.position(), 4);
|
||||
/// ```
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
#[rustc_const_stable(feature = "const_mut_cursor", since = "1.86.0")]
|
||||
pub const fn set_position(&mut self, pos: u64) {
|
||||
self.pos = pos;
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Cursor<T>
|
||||
where
|
||||
T: AsRef<[u8]>,
|
||||
{
|
||||
/// Splits the underlying slice at the cursor position and returns them.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// #![feature(cursor_split)]
|
||||
/// use std::io::Cursor;
|
||||
///
|
||||
/// let mut buff = Cursor::new(vec![1, 2, 3, 4, 5]);
|
||||
///
|
||||
/// assert_eq!(buff.split(), ([].as_slice(), [1, 2, 3, 4, 5].as_slice()));
|
||||
///
|
||||
/// buff.set_position(2);
|
||||
/// assert_eq!(buff.split(), ([1, 2].as_slice(), [3, 4, 5].as_slice()));
|
||||
///
|
||||
/// buff.set_position(6);
|
||||
/// assert_eq!(buff.split(), ([1, 2, 3, 4, 5].as_slice(), [].as_slice()));
|
||||
/// ```
|
||||
#[unstable(feature = "cursor_split", issue = "86369")]
|
||||
pub fn split(&self) -> (&[u8], &[u8]) {
|
||||
let slice = self.inner.as_ref();
|
||||
let pos = self.pos.min(slice.len() as u64);
|
||||
slice.split_at(pos as usize)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Cursor<T>
|
||||
where
|
||||
T: AsMut<[u8]>,
|
||||
{
|
||||
/// Splits the underlying slice at the cursor position and returns them
|
||||
/// mutably.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// #![feature(cursor_split)]
|
||||
/// use std::io::Cursor;
|
||||
///
|
||||
/// let mut buff = Cursor::new(vec![1, 2, 3, 4, 5]);
|
||||
///
|
||||
/// assert_eq!(buff.split_mut(), ([].as_mut_slice(), [1, 2, 3, 4, 5].as_mut_slice()));
|
||||
///
|
||||
/// buff.set_position(2);
|
||||
/// assert_eq!(buff.split_mut(), ([1, 2].as_mut_slice(), [3, 4, 5].as_mut_slice()));
|
||||
///
|
||||
/// buff.set_position(6);
|
||||
/// assert_eq!(buff.split_mut(), ([1, 2, 3, 4, 5].as_mut_slice(), [].as_mut_slice()));
|
||||
/// ```
|
||||
#[unstable(feature = "cursor_split", issue = "86369")]
|
||||
pub fn split_mut(&mut self) -> (&mut [u8], &mut [u8]) {
|
||||
let slice = self.inner.as_mut();
|
||||
let pos = self.pos.min(slice.len() as u64);
|
||||
slice.split_at_mut(pos as usize)
|
||||
}
|
||||
}
|
||||
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
impl<T> Clone for Cursor<T>
|
||||
where
|
||||
T: Clone,
|
||||
{
|
||||
#[inline]
|
||||
fn clone(&self) -> Self {
|
||||
Cursor { inner: self.inner.clone(), pos: self.pos }
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn clone_from(&mut self, other: &Self) {
|
||||
self.inner.clone_from(&other.inner);
|
||||
self.pos = other.pos;
|
||||
}
|
||||
}
|
||||
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
impl<T> io::Seek for Cursor<T>
|
||||
where
|
||||
T: AsRef<[u8]>,
|
||||
{
|
||||
fn seek(&mut self, style: SeekFrom) -> io::Result<u64> {
|
||||
let (base_pos, offset) = match style {
|
||||
SeekFrom::Start(n) => {
|
||||
self.pos = n;
|
||||
return Ok(n);
|
||||
}
|
||||
SeekFrom::End(n) => (self.inner.as_ref().len() as u64, n),
|
||||
SeekFrom::Current(n) => (self.pos, n),
|
||||
};
|
||||
match base_pos.checked_add_signed(offset) {
|
||||
Some(n) => {
|
||||
self.pos = n;
|
||||
Ok(self.pos)
|
||||
}
|
||||
None => Err(io::const_error!(
|
||||
ErrorKind::InvalidInput,
|
||||
"invalid seek to a negative or overflowing position",
|
||||
)),
|
||||
}
|
||||
}
|
||||
|
||||
fn stream_len(&mut self) -> io::Result<u64> {
|
||||
Ok(self.inner.as_ref().len() as u64)
|
||||
}
|
||||
|
||||
fn stream_position(&mut self) -> io::Result<u64> {
|
||||
Ok(self.pos)
|
||||
}
|
||||
}
|
||||
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
impl<T> Read for Cursor<T>
|
||||
where
|
||||
T: AsRef<[u8]>,
|
||||
{
|
||||
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
|
||||
let n = Read::read(&mut Cursor::split(self).1, buf)?;
|
||||
self.pos += n as u64;
|
||||
Ok(n)
|
||||
}
|
||||
|
||||
fn read_buf(&mut self, mut cursor: BorrowedCursor<'_>) -> io::Result<()> {
|
||||
let prev_written = cursor.written();
|
||||
|
||||
Read::read_buf(&mut Cursor::split(self).1, cursor.reborrow())?;
|
||||
|
||||
self.pos += (cursor.written() - prev_written) as u64;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn read_vectored(&mut self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
|
||||
let mut nread = 0;
|
||||
for buf in bufs {
|
||||
let n = self.read(buf)?;
|
||||
nread += n;
|
||||
if n < buf.len() {
|
||||
break;
|
||||
}
|
||||
}
|
||||
Ok(nread)
|
||||
}
|
||||
|
||||
fn is_read_vectored(&self) -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
fn read_exact(&mut self, buf: &mut [u8]) -> io::Result<()> {
|
||||
let result = Read::read_exact(&mut Cursor::split(self).1, buf);
|
||||
|
||||
match result {
|
||||
Ok(_) => self.pos += buf.len() as u64,
|
||||
// The only possible error condition is EOF, so place the cursor at "EOF"
|
||||
Err(_) => self.pos = self.inner.as_ref().len() as u64,
|
||||
}
|
||||
|
||||
result
|
||||
}
|
||||
|
||||
fn read_buf_exact(&mut self, mut cursor: BorrowedCursor<'_>) -> io::Result<()> {
|
||||
let prev_written = cursor.written();
|
||||
|
||||
let result = Read::read_buf_exact(&mut Cursor::split(self).1, cursor.reborrow());
|
||||
self.pos += (cursor.written() - prev_written) as u64;
|
||||
|
||||
result
|
||||
}
|
||||
|
||||
fn read_to_end(&mut self, buf: &mut Vec<u8>) -> io::Result<usize> {
|
||||
let content = Cursor::split(self).1;
|
||||
let len = content.len();
|
||||
buf.try_reserve(len)?;
|
||||
buf.extend_from_slice(content);
|
||||
self.pos += len as u64;
|
||||
|
||||
Ok(len)
|
||||
}
|
||||
|
||||
fn read_to_string(&mut self, buf: &mut String) -> io::Result<usize> {
|
||||
let content =
|
||||
crate::str::from_utf8(Cursor::split(self).1).map_err(|_| io::Error::INVALID_UTF8)?;
|
||||
let len = content.len();
|
||||
buf.try_reserve(len)?;
|
||||
buf.push_str(content);
|
||||
self.pos += len as u64;
|
||||
|
||||
Ok(len)
|
||||
}
|
||||
}
|
||||
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
impl<T> BufRead for Cursor<T>
|
||||
where
|
||||
T: AsRef<[u8]>,
|
||||
{
|
||||
fn fill_buf(&mut self) -> io::Result<&[u8]> {
|
||||
Ok(Cursor::split(self).1)
|
||||
}
|
||||
fn consume(&mut self, amt: usize) {
|
||||
self.pos += amt as u64;
|
||||
}
|
||||
}
|
||||
|
||||
// Non-resizing write implementation
|
||||
#[inline]
|
||||
fn slice_write(pos_mut: &mut u64, slice: &mut [u8], buf: &[u8]) -> io::Result<usize> {
|
||||
let pos = cmp::min(*pos_mut, slice.len() as u64);
|
||||
let amt = (&mut slice[(pos as usize)..]).write(buf)?;
|
||||
*pos_mut += amt as u64;
|
||||
Ok(amt)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn slice_write_vectored(
|
||||
pos_mut: &mut u64,
|
||||
slice: &mut [u8],
|
||||
bufs: &[IoSlice<'_>],
|
||||
) -> io::Result<usize> {
|
||||
let mut nwritten = 0;
|
||||
for buf in bufs {
|
||||
let n = slice_write(pos_mut, slice, buf)?;
|
||||
nwritten += n;
|
||||
if n < buf.len() {
|
||||
break;
|
||||
}
|
||||
}
|
||||
Ok(nwritten)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn slice_write_all(pos_mut: &mut u64, slice: &mut [u8], buf: &[u8]) -> io::Result<()> {
|
||||
let n = slice_write(pos_mut, slice, buf)?;
|
||||
if n < buf.len() { Err(io::Error::WRITE_ALL_EOF) } else { Ok(()) }
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn slice_write_all_vectored(
|
||||
pos_mut: &mut u64,
|
||||
slice: &mut [u8],
|
||||
bufs: &[IoSlice<'_>],
|
||||
) -> io::Result<()> {
|
||||
for buf in bufs {
|
||||
let n = slice_write(pos_mut, slice, buf)?;
|
||||
if n < buf.len() {
|
||||
return Err(io::Error::WRITE_ALL_EOF);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Reserves the required space, and pads the vec with 0s if necessary.
|
||||
fn reserve_and_pad<A: Allocator>(
|
||||
pos_mut: &mut u64,
|
||||
vec: &mut Vec<u8, A>,
|
||||
buf_len: usize,
|
||||
) -> io::Result<usize> {
|
||||
let pos: usize = (*pos_mut).try_into().map_err(|_| {
|
||||
io::const_error!(
|
||||
ErrorKind::InvalidInput,
|
||||
"cursor position exceeds maximum possible vector length",
|
||||
)
|
||||
})?;
|
||||
|
||||
// For safety reasons, we don't want these numbers to overflow
|
||||
// otherwise our allocation won't be enough
|
||||
let desired_cap = pos.saturating_add(buf_len);
|
||||
if desired_cap > vec.capacity() {
|
||||
// We want our vec's total capacity
|
||||
// to have room for (pos+buf_len) bytes. Reserve allocates
|
||||
// based on additional elements from the length, so we need to
|
||||
// reserve the difference
|
||||
vec.reserve(desired_cap - vec.len());
|
||||
}
|
||||
// Pad if pos is above the current len.
|
||||
if pos > vec.len() {
|
||||
let diff = pos - vec.len();
|
||||
// Unfortunately, `resize()` would suffice but the optimiser does not
|
||||
// realise the `reserve` it does can be eliminated. So we do it manually
|
||||
// to eliminate that extra branch
|
||||
let spare = vec.spare_capacity_mut();
|
||||
debug_assert!(spare.len() >= diff);
|
||||
// Safety: we have allocated enough capacity for this.
|
||||
// And we are only writing, not reading
|
||||
unsafe {
|
||||
spare.get_unchecked_mut(..diff).fill(core::mem::MaybeUninit::new(0));
|
||||
vec.set_len(pos);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(pos)
|
||||
}
|
||||
|
||||
/// Writes the slice to the vec without allocating.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// `vec` must have `buf.len()` spare capacity.
|
||||
unsafe fn vec_write_all_unchecked<A>(pos: usize, vec: &mut Vec<u8, A>, buf: &[u8]) -> usize
|
||||
where
|
||||
A: Allocator,
|
||||
{
|
||||
debug_assert!(vec.capacity() >= pos + buf.len());
|
||||
unsafe { vec.as_mut_ptr().add(pos).copy_from(buf.as_ptr(), buf.len()) };
|
||||
pos + buf.len()
|
||||
}
|
||||
|
||||
/// Resizing `write_all` implementation for [`Cursor`].
|
||||
///
|
||||
/// Cursor is allowed to have a pre-allocated and initialised
|
||||
/// vector body, but with a position of 0. This means the [`Write`]
|
||||
/// will overwrite the contents of the vec.
|
||||
///
|
||||
/// This also allows for the vec body to be empty, but with a position of N.
|
||||
/// This means that [`Write`] will pad the vec with 0 initially,
|
||||
/// before writing anything from that point
|
||||
fn vec_write_all<A>(pos_mut: &mut u64, vec: &mut Vec<u8, A>, buf: &[u8]) -> io::Result<usize>
|
||||
where
|
||||
A: Allocator,
|
||||
{
|
||||
let buf_len = buf.len();
|
||||
let mut pos = reserve_and_pad(pos_mut, vec, buf_len)?;
|
||||
|
||||
// Write the buf then progress the vec forward if necessary
|
||||
// Safety: we have ensured that the capacity is available
|
||||
// and that all bytes get written up to pos
|
||||
unsafe {
|
||||
pos = vec_write_all_unchecked(pos, vec, buf);
|
||||
if pos > vec.len() {
|
||||
vec.set_len(pos);
|
||||
}
|
||||
};
|
||||
|
||||
// Bump us forward
|
||||
*pos_mut += buf_len as u64;
|
||||
Ok(buf_len)
|
||||
}
|
||||
|
||||
/// Resizing `write_all_vectored` implementation for [`Cursor`].
|
||||
///
|
||||
/// Cursor is allowed to have a pre-allocated and initialised
|
||||
/// vector body, but with a position of 0. This means the [`Write`]
|
||||
/// will overwrite the contents of the vec.
|
||||
///
|
||||
/// This also allows for the vec body to be empty, but with a position of N.
|
||||
/// This means that [`Write`] will pad the vec with 0 initially,
|
||||
/// before writing anything from that point
|
||||
fn vec_write_all_vectored<A>(
|
||||
pos_mut: &mut u64,
|
||||
vec: &mut Vec<u8, A>,
|
||||
bufs: &[IoSlice<'_>],
|
||||
) -> io::Result<usize>
|
||||
where
|
||||
A: Allocator,
|
||||
{
|
||||
// For safety reasons, we don't want this sum to overflow ever.
|
||||
// If this saturates, the reserve should panic to avoid any unsound writing.
|
||||
let buf_len = bufs.iter().fold(0usize, |a, b| a.saturating_add(b.len()));
|
||||
let mut pos = reserve_and_pad(pos_mut, vec, buf_len)?;
|
||||
|
||||
// Write the buf then progress the vec forward if necessary
|
||||
// Safety: we have ensured that the capacity is available
|
||||
// and that all bytes get written up to the last pos
|
||||
unsafe {
|
||||
for buf in bufs {
|
||||
pos = vec_write_all_unchecked(pos, vec, buf);
|
||||
}
|
||||
if pos > vec.len() {
|
||||
vec.set_len(pos);
|
||||
}
|
||||
}
|
||||
|
||||
// Bump us forward
|
||||
*pos_mut += buf_len as u64;
|
||||
Ok(buf_len)
|
||||
}
|
||||
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
impl Write for Cursor<&mut [u8]> {
|
||||
#[inline]
|
||||
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
|
||||
slice_write(&mut self.pos, self.inner, buf)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
|
||||
slice_write_vectored(&mut self.pos, self.inner, bufs)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn is_write_vectored(&self) -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn write_all(&mut self, buf: &[u8]) -> io::Result<()> {
|
||||
slice_write_all(&mut self.pos, self.inner, buf)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn write_all_vectored(&mut self, bufs: &mut [IoSlice<'_>]) -> io::Result<()> {
|
||||
slice_write_all_vectored(&mut self.pos, self.inner, bufs)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn flush(&mut self) -> io::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[stable(feature = "cursor_mut_vec", since = "1.25.0")]
|
||||
impl<A> Write for Cursor<&mut Vec<u8, A>>
|
||||
where
|
||||
A: Allocator,
|
||||
{
|
||||
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
|
||||
vec_write_all(&mut self.pos, self.inner, buf)
|
||||
}
|
||||
|
||||
fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
|
||||
vec_write_all_vectored(&mut self.pos, self.inner, bufs)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn is_write_vectored(&self) -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
fn write_all(&mut self, buf: &[u8]) -> io::Result<()> {
|
||||
vec_write_all(&mut self.pos, self.inner, buf)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn write_all_vectored(&mut self, bufs: &mut [IoSlice<'_>]) -> io::Result<()> {
|
||||
vec_write_all_vectored(&mut self.pos, self.inner, bufs)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn flush(&mut self) -> io::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
impl<A> Write for Cursor<Vec<u8, A>>
|
||||
where
|
||||
A: Allocator,
|
||||
{
|
||||
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
|
||||
vec_write_all(&mut self.pos, &mut self.inner, buf)
|
||||
}
|
||||
|
||||
fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
|
||||
vec_write_all_vectored(&mut self.pos, &mut self.inner, bufs)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn is_write_vectored(&self) -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
fn write_all(&mut self, buf: &[u8]) -> io::Result<()> {
|
||||
vec_write_all(&mut self.pos, &mut self.inner, buf)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn write_all_vectored(&mut self, bufs: &mut [IoSlice<'_>]) -> io::Result<()> {
|
||||
vec_write_all_vectored(&mut self.pos, &mut self.inner, bufs)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn flush(&mut self) -> io::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[stable(feature = "cursor_box_slice", since = "1.5.0")]
|
||||
impl<A> Write for Cursor<Box<[u8], A>>
|
||||
where
|
||||
A: Allocator,
|
||||
{
|
||||
#[inline]
|
||||
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
|
||||
slice_write(&mut self.pos, &mut self.inner, buf)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
|
||||
slice_write_vectored(&mut self.pos, &mut self.inner, bufs)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn is_write_vectored(&self) -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn write_all(&mut self, buf: &[u8]) -> io::Result<()> {
|
||||
slice_write_all(&mut self.pos, &mut self.inner, buf)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn write_all_vectored(&mut self, bufs: &mut [IoSlice<'_>]) -> io::Result<()> {
|
||||
slice_write_all_vectored(&mut self.pos, &mut self.inner, bufs)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn flush(&mut self) -> io::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[stable(feature = "cursor_array", since = "1.61.0")]
|
||||
impl<const N: usize> Write for Cursor<[u8; N]> {
|
||||
#[inline]
|
||||
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
|
||||
slice_write(&mut self.pos, &mut self.inner, buf)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
|
||||
slice_write_vectored(&mut self.pos, &mut self.inner, bufs)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn is_write_vectored(&self) -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn write_all(&mut self, buf: &[u8]) -> io::Result<()> {
|
||||
slice_write_all(&mut self.pos, &mut self.inner, buf)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn write_all_vectored(&mut self, bufs: &mut [IoSlice<'_>]) -> io::Result<()> {
|
||||
slice_write_all_vectored(&mut self.pos, &mut self.inner, bufs)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn flush(&mut self) -> io::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -1,567 +0,0 @@
|
||||
use crate::io::prelude::*;
|
||||
use crate::io::{Cursor, IoSlice, IoSliceMut, SeekFrom};
|
||||
|
||||
#[test]
|
||||
fn test_vec_writer() {
|
||||
let mut writer = Vec::new();
|
||||
assert_eq!(writer.write(&[0]).unwrap(), 1);
|
||||
assert_eq!(writer.write(&[1, 2, 3]).unwrap(), 3);
|
||||
assert_eq!(writer.write(&[4, 5, 6, 7]).unwrap(), 4);
|
||||
assert_eq!(
|
||||
writer
|
||||
.write_vectored(&[IoSlice::new(&[]), IoSlice::new(&[8, 9]), IoSlice::new(&[10])],)
|
||||
.unwrap(),
|
||||
3
|
||||
);
|
||||
let b: &[_] = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
|
||||
assert_eq!(writer, b);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_mem_writer() {
|
||||
let mut writer = Cursor::new(Vec::new());
|
||||
writer.set_position(10);
|
||||
assert_eq!(writer.write(&[0]).unwrap(), 1);
|
||||
assert_eq!(writer.write(&[1, 2, 3]).unwrap(), 3);
|
||||
assert_eq!(writer.write(&[4, 5, 6, 7]).unwrap(), 4);
|
||||
assert_eq!(
|
||||
writer
|
||||
.write_vectored(&[IoSlice::new(&[]), IoSlice::new(&[8, 9]), IoSlice::new(&[10])],)
|
||||
.unwrap(),
|
||||
3
|
||||
);
|
||||
let b: &[_] = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
|
||||
assert_eq!(&writer.get_ref()[..10], &[0; 10]);
|
||||
assert_eq!(&writer.get_ref()[10..], b);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_mem_writer_preallocated() {
|
||||
let mut writer = Cursor::new(vec![0, 0, 0, 0, 0, 0, 0, 0, 8, 9, 10]);
|
||||
assert_eq!(writer.write(&[0]).unwrap(), 1);
|
||||
assert_eq!(writer.write(&[1, 2, 3]).unwrap(), 3);
|
||||
assert_eq!(writer.write(&[4, 5, 6, 7]).unwrap(), 4);
|
||||
let b: &[_] = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
|
||||
assert_eq!(&writer.get_ref()[..], b);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_mem_mut_writer() {
|
||||
let mut vec = Vec::new();
|
||||
let mut writer = Cursor::new(&mut vec);
|
||||
assert_eq!(writer.write(&[0]).unwrap(), 1);
|
||||
assert_eq!(writer.write(&[1, 2, 3]).unwrap(), 3);
|
||||
assert_eq!(writer.write(&[4, 5, 6, 7]).unwrap(), 4);
|
||||
assert_eq!(
|
||||
writer
|
||||
.write_vectored(&[IoSlice::new(&[]), IoSlice::new(&[8, 9]), IoSlice::new(&[10])],)
|
||||
.unwrap(),
|
||||
3
|
||||
);
|
||||
let b: &[_] = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
|
||||
assert_eq!(&writer.get_ref()[..], b);
|
||||
}
|
||||
|
||||
fn test_slice_writer<T>(writer: &mut Cursor<T>)
|
||||
where
|
||||
T: AsRef<[u8]>,
|
||||
Cursor<T>: Write,
|
||||
{
|
||||
assert_eq!(writer.position(), 0);
|
||||
assert_eq!(writer.write(&[0]).unwrap(), 1);
|
||||
assert_eq!(writer.position(), 1);
|
||||
assert_eq!(writer.write(&[1, 2, 3]).unwrap(), 3);
|
||||
assert_eq!(writer.write(&[4, 5, 6, 7]).unwrap(), 4);
|
||||
assert_eq!(writer.position(), 8);
|
||||
assert_eq!(writer.write(&[]).unwrap(), 0);
|
||||
assert_eq!(writer.position(), 8);
|
||||
|
||||
assert_eq!(writer.write(&[8, 9]).unwrap(), 1);
|
||||
assert_eq!(writer.write(&[10]).unwrap(), 0);
|
||||
let b: &[_] = &[0, 1, 2, 3, 4, 5, 6, 7, 8];
|
||||
assert_eq!(writer.get_ref().as_ref(), b);
|
||||
}
|
||||
|
||||
fn test_slice_writer_vectored<T>(writer: &mut Cursor<T>)
|
||||
where
|
||||
T: AsRef<[u8]>,
|
||||
Cursor<T>: Write,
|
||||
{
|
||||
assert_eq!(writer.position(), 0);
|
||||
assert_eq!(writer.write_vectored(&[IoSlice::new(&[0])]).unwrap(), 1);
|
||||
assert_eq!(writer.position(), 1);
|
||||
assert_eq!(
|
||||
writer.write_vectored(&[IoSlice::new(&[1, 2, 3]), IoSlice::new(&[4, 5, 6, 7]),]).unwrap(),
|
||||
7,
|
||||
);
|
||||
assert_eq!(writer.position(), 8);
|
||||
assert_eq!(writer.write_vectored(&[]).unwrap(), 0);
|
||||
assert_eq!(writer.position(), 8);
|
||||
|
||||
assert_eq!(writer.write_vectored(&[IoSlice::new(&[8, 9])]).unwrap(), 1);
|
||||
assert_eq!(writer.write_vectored(&[IoSlice::new(&[10])]).unwrap(), 0);
|
||||
let b: &[_] = &[0, 1, 2, 3, 4, 5, 6, 7, 8];
|
||||
assert_eq!(writer.get_ref().as_ref(), b);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_box_slice_writer() {
|
||||
let mut writer = Cursor::new(vec![0u8; 9].into_boxed_slice());
|
||||
test_slice_writer(&mut writer);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_box_slice_writer_vectored() {
|
||||
let mut writer = Cursor::new(vec![0u8; 9].into_boxed_slice());
|
||||
test_slice_writer_vectored(&mut writer);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_array_writer() {
|
||||
let mut writer = Cursor::new([0u8; 9]);
|
||||
test_slice_writer(&mut writer);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_array_writer_vectored() {
|
||||
let mut writer = Cursor::new([0u8; 9]);
|
||||
test_slice_writer_vectored(&mut writer);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_buf_writer() {
|
||||
let mut buf = [0 as u8; 9];
|
||||
let mut writer = Cursor::new(&mut buf[..]);
|
||||
test_slice_writer(&mut writer);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_buf_writer_vectored() {
|
||||
let mut buf = [0 as u8; 9];
|
||||
let mut writer = Cursor::new(&mut buf[..]);
|
||||
test_slice_writer_vectored(&mut writer);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_buf_writer_seek() {
|
||||
let mut buf = [0 as u8; 8];
|
||||
{
|
||||
let mut writer = Cursor::new(&mut buf[..]);
|
||||
assert_eq!(writer.position(), 0);
|
||||
assert_eq!(writer.write(&[1]).unwrap(), 1);
|
||||
assert_eq!(writer.position(), 1);
|
||||
|
||||
assert_eq!(writer.seek(SeekFrom::Start(2)).unwrap(), 2);
|
||||
assert_eq!(writer.position(), 2);
|
||||
assert_eq!(writer.write(&[2]).unwrap(), 1);
|
||||
assert_eq!(writer.position(), 3);
|
||||
|
||||
assert_eq!(writer.seek(SeekFrom::Current(-2)).unwrap(), 1);
|
||||
assert_eq!(writer.position(), 1);
|
||||
assert_eq!(writer.write(&[3]).unwrap(), 1);
|
||||
assert_eq!(writer.position(), 2);
|
||||
|
||||
assert_eq!(writer.seek(SeekFrom::End(-1)).unwrap(), 7);
|
||||
assert_eq!(writer.position(), 7);
|
||||
assert_eq!(writer.write(&[4]).unwrap(), 1);
|
||||
assert_eq!(writer.position(), 8);
|
||||
}
|
||||
let b: &[_] = &[1, 3, 2, 0, 0, 0, 0, 4];
|
||||
assert_eq!(buf, b);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_buf_writer_error() {
|
||||
let mut buf = [0 as u8; 2];
|
||||
let mut writer = Cursor::new(&mut buf[..]);
|
||||
assert_eq!(writer.write(&[0]).unwrap(), 1);
|
||||
assert_eq!(writer.write(&[0, 0]).unwrap(), 1);
|
||||
assert_eq!(writer.write(&[0, 0]).unwrap(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_mem_reader() {
|
||||
let mut reader = Cursor::new(vec![0, 1, 2, 3, 4, 5, 6, 7]);
|
||||
let mut buf = [];
|
||||
assert_eq!(reader.read(&mut buf).unwrap(), 0);
|
||||
assert_eq!(reader.position(), 0);
|
||||
let mut buf = [0];
|
||||
assert_eq!(reader.read(&mut buf).unwrap(), 1);
|
||||
assert_eq!(reader.position(), 1);
|
||||
let b: &[_] = &[0];
|
||||
assert_eq!(buf, b);
|
||||
let mut buf = [0; 4];
|
||||
assert_eq!(reader.read(&mut buf).unwrap(), 4);
|
||||
assert_eq!(reader.position(), 5);
|
||||
let b: &[_] = &[1, 2, 3, 4];
|
||||
assert_eq!(buf, b);
|
||||
assert_eq!(reader.read(&mut buf).unwrap(), 3);
|
||||
let b: &[_] = &[5, 6, 7];
|
||||
assert_eq!(&buf[..3], b);
|
||||
assert_eq!(reader.read(&mut buf).unwrap(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_mem_reader_vectored() {
|
||||
let mut reader = Cursor::new(vec![0, 1, 2, 3, 4, 5, 6, 7]);
|
||||
let mut buf = [];
|
||||
assert_eq!(reader.read_vectored(&mut [IoSliceMut::new(&mut buf)]).unwrap(), 0);
|
||||
assert_eq!(reader.position(), 0);
|
||||
let mut buf = [0];
|
||||
assert_eq!(
|
||||
reader.read_vectored(&mut [IoSliceMut::new(&mut []), IoSliceMut::new(&mut buf),]).unwrap(),
|
||||
1,
|
||||
);
|
||||
assert_eq!(reader.position(), 1);
|
||||
let b: &[_] = &[0];
|
||||
assert_eq!(buf, b);
|
||||
let mut buf1 = [0; 4];
|
||||
let mut buf2 = [0; 4];
|
||||
assert_eq!(
|
||||
reader
|
||||
.read_vectored(&mut [IoSliceMut::new(&mut buf1), IoSliceMut::new(&mut buf2),])
|
||||
.unwrap(),
|
||||
7,
|
||||
);
|
||||
let b1: &[_] = &[1, 2, 3, 4];
|
||||
let b2: &[_] = &[5, 6, 7];
|
||||
assert_eq!(buf1, b1);
|
||||
assert_eq!(&buf2[..3], b2);
|
||||
assert_eq!(reader.read(&mut buf).unwrap(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_boxed_slice_reader() {
|
||||
let mut reader = Cursor::new(vec![0, 1, 2, 3, 4, 5, 6, 7].into_boxed_slice());
|
||||
let mut buf = [];
|
||||
assert_eq!(reader.read(&mut buf).unwrap(), 0);
|
||||
assert_eq!(reader.position(), 0);
|
||||
let mut buf = [0];
|
||||
assert_eq!(reader.read(&mut buf).unwrap(), 1);
|
||||
assert_eq!(reader.position(), 1);
|
||||
let b: &[_] = &[0];
|
||||
assert_eq!(buf, b);
|
||||
let mut buf = [0; 4];
|
||||
assert_eq!(reader.read(&mut buf).unwrap(), 4);
|
||||
assert_eq!(reader.position(), 5);
|
||||
let b: &[_] = &[1, 2, 3, 4];
|
||||
assert_eq!(buf, b);
|
||||
assert_eq!(reader.read(&mut buf).unwrap(), 3);
|
||||
let b: &[_] = &[5, 6, 7];
|
||||
assert_eq!(&buf[..3], b);
|
||||
assert_eq!(reader.read(&mut buf).unwrap(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_boxed_slice_reader_vectored() {
|
||||
let mut reader = Cursor::new(vec![0, 1, 2, 3, 4, 5, 6, 7].into_boxed_slice());
|
||||
let mut buf = [];
|
||||
assert_eq!(reader.read_vectored(&mut [IoSliceMut::new(&mut buf)]).unwrap(), 0);
|
||||
assert_eq!(reader.position(), 0);
|
||||
let mut buf = [0];
|
||||
assert_eq!(
|
||||
reader.read_vectored(&mut [IoSliceMut::new(&mut []), IoSliceMut::new(&mut buf),]).unwrap(),
|
||||
1,
|
||||
);
|
||||
assert_eq!(reader.position(), 1);
|
||||
let b: &[_] = &[0];
|
||||
assert_eq!(buf, b);
|
||||
let mut buf1 = [0; 4];
|
||||
let mut buf2 = [0; 4];
|
||||
assert_eq!(
|
||||
reader
|
||||
.read_vectored(&mut [IoSliceMut::new(&mut buf1), IoSliceMut::new(&mut buf2)],)
|
||||
.unwrap(),
|
||||
7,
|
||||
);
|
||||
let b1: &[_] = &[1, 2, 3, 4];
|
||||
let b2: &[_] = &[5, 6, 7];
|
||||
assert_eq!(buf1, b1);
|
||||
assert_eq!(&buf2[..3], b2);
|
||||
assert_eq!(reader.read(&mut buf).unwrap(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn read_to_end() {
|
||||
let mut reader = Cursor::new(vec![0, 1, 2, 3, 4, 5, 6, 7]);
|
||||
let mut v = Vec::new();
|
||||
reader.read_to_end(&mut v).unwrap();
|
||||
assert_eq!(v, [0, 1, 2, 3, 4, 5, 6, 7]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_slice_reader() {
|
||||
let in_buf = vec![0, 1, 2, 3, 4, 5, 6, 7];
|
||||
let reader = &mut &in_buf[..];
|
||||
let mut buf = [];
|
||||
assert_eq!(reader.read(&mut buf).unwrap(), 0);
|
||||
let mut buf = [0];
|
||||
assert_eq!(reader.read(&mut buf).unwrap(), 1);
|
||||
assert_eq!(reader.len(), 7);
|
||||
let b: &[_] = &[0];
|
||||
assert_eq!(&buf[..], b);
|
||||
let mut buf = [0; 4];
|
||||
assert_eq!(reader.read(&mut buf).unwrap(), 4);
|
||||
assert_eq!(reader.len(), 3);
|
||||
let b: &[_] = &[1, 2, 3, 4];
|
||||
assert_eq!(&buf[..], b);
|
||||
assert_eq!(reader.read(&mut buf).unwrap(), 3);
|
||||
let b: &[_] = &[5, 6, 7];
|
||||
assert_eq!(&buf[..3], b);
|
||||
assert_eq!(reader.read(&mut buf).unwrap(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_slice_reader_vectored() {
|
||||
let in_buf = vec![0, 1, 2, 3, 4, 5, 6, 7];
|
||||
let reader = &mut &in_buf[..];
|
||||
let mut buf = [];
|
||||
assert_eq!(reader.read_vectored(&mut [IoSliceMut::new(&mut buf)]).unwrap(), 0);
|
||||
let mut buf = [0];
|
||||
assert_eq!(
|
||||
reader.read_vectored(&mut [IoSliceMut::new(&mut []), IoSliceMut::new(&mut buf),]).unwrap(),
|
||||
1,
|
||||
);
|
||||
assert_eq!(reader.len(), 7);
|
||||
let b: &[_] = &[0];
|
||||
assert_eq!(buf, b);
|
||||
let mut buf1 = [0; 4];
|
||||
let mut buf2 = [0; 4];
|
||||
assert_eq!(
|
||||
reader
|
||||
.read_vectored(&mut [IoSliceMut::new(&mut buf1), IoSliceMut::new(&mut buf2)],)
|
||||
.unwrap(),
|
||||
7,
|
||||
);
|
||||
let b1: &[_] = &[1, 2, 3, 4];
|
||||
let b2: &[_] = &[5, 6, 7];
|
||||
assert_eq!(buf1, b1);
|
||||
assert_eq!(&buf2[..3], b2);
|
||||
assert_eq!(reader.read(&mut buf).unwrap(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_read_exact() {
|
||||
let in_buf = vec![0, 1, 2, 3, 4, 5, 6, 7];
|
||||
let reader = &mut &in_buf[..];
|
||||
let mut buf = [];
|
||||
assert!(reader.read_exact(&mut buf).is_ok());
|
||||
let mut buf = [8];
|
||||
assert!(reader.read_exact(&mut buf).is_ok());
|
||||
assert_eq!(buf[0], 0);
|
||||
assert_eq!(reader.len(), 7);
|
||||
let mut buf = [0, 0, 0, 0, 0, 0, 0];
|
||||
assert!(reader.read_exact(&mut buf).is_ok());
|
||||
assert_eq!(buf, [1, 2, 3, 4, 5, 6, 7]);
|
||||
assert_eq!(reader.len(), 0);
|
||||
let mut buf = [0];
|
||||
assert!(reader.read_exact(&mut buf).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_buf_reader() {
|
||||
let in_buf = vec![0, 1, 2, 3, 4, 5, 6, 7];
|
||||
let mut reader = Cursor::new(&in_buf[..]);
|
||||
let mut buf = [];
|
||||
assert_eq!(reader.read(&mut buf).unwrap(), 0);
|
||||
assert_eq!(reader.position(), 0);
|
||||
let mut buf = [0];
|
||||
assert_eq!(reader.read(&mut buf).unwrap(), 1);
|
||||
assert_eq!(reader.position(), 1);
|
||||
let b: &[_] = &[0];
|
||||
assert_eq!(buf, b);
|
||||
let mut buf = [0; 4];
|
||||
assert_eq!(reader.read(&mut buf).unwrap(), 4);
|
||||
assert_eq!(reader.position(), 5);
|
||||
let b: &[_] = &[1, 2, 3, 4];
|
||||
assert_eq!(buf, b);
|
||||
assert_eq!(reader.read(&mut buf).unwrap(), 3);
|
||||
let b: &[_] = &[5, 6, 7];
|
||||
assert_eq!(&buf[..3], b);
|
||||
assert_eq!(reader.read(&mut buf).unwrap(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn seek_past_end() {
|
||||
let buf = [0xff];
|
||||
let mut r = Cursor::new(&buf[..]);
|
||||
assert_eq!(r.seek(SeekFrom::Start(10)).unwrap(), 10);
|
||||
assert_eq!(r.read(&mut [0]).unwrap(), 0);
|
||||
|
||||
let mut r = Cursor::new(vec![10]);
|
||||
assert_eq!(r.seek(SeekFrom::Start(10)).unwrap(), 10);
|
||||
assert_eq!(r.read(&mut [0]).unwrap(), 0);
|
||||
|
||||
let mut buf = [0];
|
||||
let mut r = Cursor::new(&mut buf[..]);
|
||||
assert_eq!(r.seek(SeekFrom::Start(10)).unwrap(), 10);
|
||||
assert_eq!(r.write(&[3]).unwrap(), 0);
|
||||
|
||||
let mut r = Cursor::new(vec![10].into_boxed_slice());
|
||||
assert_eq!(r.seek(SeekFrom::Start(10)).unwrap(), 10);
|
||||
assert_eq!(r.write(&[3]).unwrap(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn seek_past_i64() {
|
||||
let buf = [0xff];
|
||||
let mut r = Cursor::new(&buf[..]);
|
||||
assert_eq!(r.seek(SeekFrom::Start(6)).unwrap(), 6);
|
||||
assert_eq!(r.seek(SeekFrom::Current(0x7ffffffffffffff0)).unwrap(), 0x7ffffffffffffff6);
|
||||
assert_eq!(r.seek(SeekFrom::Current(0x10)).unwrap(), 0x8000000000000006);
|
||||
assert_eq!(r.seek(SeekFrom::Current(0)).unwrap(), 0x8000000000000006);
|
||||
assert!(r.seek(SeekFrom::Current(0x7ffffffffffffffd)).is_err());
|
||||
assert_eq!(r.seek(SeekFrom::Current(-0x8000000000000000)).unwrap(), 6);
|
||||
|
||||
let mut r = Cursor::new(vec![10]);
|
||||
assert_eq!(r.seek(SeekFrom::Start(6)).unwrap(), 6);
|
||||
assert_eq!(r.seek(SeekFrom::Current(0x7ffffffffffffff0)).unwrap(), 0x7ffffffffffffff6);
|
||||
assert_eq!(r.seek(SeekFrom::Current(0x10)).unwrap(), 0x8000000000000006);
|
||||
assert_eq!(r.seek(SeekFrom::Current(0)).unwrap(), 0x8000000000000006);
|
||||
assert!(r.seek(SeekFrom::Current(0x7ffffffffffffffd)).is_err());
|
||||
assert_eq!(r.seek(SeekFrom::Current(-0x8000000000000000)).unwrap(), 6);
|
||||
|
||||
let mut buf = [0];
|
||||
let mut r = Cursor::new(&mut buf[..]);
|
||||
assert_eq!(r.seek(SeekFrom::Start(6)).unwrap(), 6);
|
||||
assert_eq!(r.seek(SeekFrom::Current(0x7ffffffffffffff0)).unwrap(), 0x7ffffffffffffff6);
|
||||
assert_eq!(r.seek(SeekFrom::Current(0x10)).unwrap(), 0x8000000000000006);
|
||||
assert_eq!(r.seek(SeekFrom::Current(0)).unwrap(), 0x8000000000000006);
|
||||
assert!(r.seek(SeekFrom::Current(0x7ffffffffffffffd)).is_err());
|
||||
assert_eq!(r.seek(SeekFrom::Current(-0x8000000000000000)).unwrap(), 6);
|
||||
|
||||
let mut r = Cursor::new(vec![10].into_boxed_slice());
|
||||
assert_eq!(r.seek(SeekFrom::Start(6)).unwrap(), 6);
|
||||
assert_eq!(r.seek(SeekFrom::Current(0x7ffffffffffffff0)).unwrap(), 0x7ffffffffffffff6);
|
||||
assert_eq!(r.seek(SeekFrom::Current(0x10)).unwrap(), 0x8000000000000006);
|
||||
assert_eq!(r.seek(SeekFrom::Current(0)).unwrap(), 0x8000000000000006);
|
||||
assert!(r.seek(SeekFrom::Current(0x7ffffffffffffffd)).is_err());
|
||||
assert_eq!(r.seek(SeekFrom::Current(-0x8000000000000000)).unwrap(), 6);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn seek_before_0() {
|
||||
let buf = [0xff];
|
||||
let mut r = Cursor::new(&buf[..]);
|
||||
assert!(r.seek(SeekFrom::End(-2)).is_err());
|
||||
|
||||
let mut r = Cursor::new(vec![10]);
|
||||
assert!(r.seek(SeekFrom::End(-2)).is_err());
|
||||
|
||||
let mut buf = [0];
|
||||
let mut r = Cursor::new(&mut buf[..]);
|
||||
assert!(r.seek(SeekFrom::End(-2)).is_err());
|
||||
|
||||
let mut r = Cursor::new(vec![10].into_boxed_slice());
|
||||
assert!(r.seek(SeekFrom::End(-2)).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_seekable_mem_writer() {
|
||||
let mut writer = Cursor::new(Vec::<u8>::new());
|
||||
assert_eq!(writer.position(), 0);
|
||||
assert_eq!(writer.write(&[0]).unwrap(), 1);
|
||||
assert_eq!(writer.position(), 1);
|
||||
assert_eq!(writer.write(&[1, 2, 3]).unwrap(), 3);
|
||||
assert_eq!(writer.write(&[4, 5, 6, 7]).unwrap(), 4);
|
||||
assert_eq!(writer.position(), 8);
|
||||
let b: &[_] = &[0, 1, 2, 3, 4, 5, 6, 7];
|
||||
assert_eq!(&writer.get_ref()[..], b);
|
||||
|
||||
assert_eq!(writer.seek(SeekFrom::Start(0)).unwrap(), 0);
|
||||
assert_eq!(writer.position(), 0);
|
||||
assert_eq!(writer.write(&[3, 4]).unwrap(), 2);
|
||||
let b: &[_] = &[3, 4, 2, 3, 4, 5, 6, 7];
|
||||
assert_eq!(&writer.get_ref()[..], b);
|
||||
|
||||
assert_eq!(writer.seek(SeekFrom::Current(1)).unwrap(), 3);
|
||||
assert_eq!(writer.write(&[0, 1]).unwrap(), 2);
|
||||
let b: &[_] = &[3, 4, 2, 0, 1, 5, 6, 7];
|
||||
assert_eq!(&writer.get_ref()[..], b);
|
||||
|
||||
assert_eq!(writer.seek(SeekFrom::End(-1)).unwrap(), 7);
|
||||
assert_eq!(writer.write(&[1, 2]).unwrap(), 2);
|
||||
let b: &[_] = &[3, 4, 2, 0, 1, 5, 6, 1, 2];
|
||||
assert_eq!(&writer.get_ref()[..], b);
|
||||
|
||||
assert_eq!(writer.seek(SeekFrom::End(1)).unwrap(), 10);
|
||||
assert_eq!(writer.write(&[1]).unwrap(), 1);
|
||||
let b: &[_] = &[3, 4, 2, 0, 1, 5, 6, 1, 2, 0, 1];
|
||||
assert_eq!(&writer.get_ref()[..], b);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn vec_seek_past_end() {
|
||||
let mut r = Cursor::new(Vec::new());
|
||||
assert_eq!(r.seek(SeekFrom::Start(10)).unwrap(), 10);
|
||||
assert_eq!(r.write(&[3]).unwrap(), 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn vec_seek_before_0() {
|
||||
let mut r = Cursor::new(Vec::new());
|
||||
assert!(r.seek(SeekFrom::End(-2)).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[cfg(target_pointer_width = "32")]
|
||||
fn vec_seek_and_write_past_usize_max() {
|
||||
let mut c = Cursor::new(Vec::new());
|
||||
c.set_position(usize::MAX as u64 + 1);
|
||||
assert!(c.write_all(&[1, 2, 3]).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_partial_eq() {
|
||||
assert_eq!(Cursor::new(Vec::<u8>::new()), Cursor::new(Vec::<u8>::new()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_eq() {
|
||||
struct AssertEq<T: Eq>(pub T);
|
||||
|
||||
let _: AssertEq<Cursor<Vec<u8>>> = AssertEq(Cursor::new(Vec::new()));
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
fn const_cursor() {
|
||||
const CURSOR: Cursor<&[u8]> = Cursor::new(&[0]);
|
||||
const _: &&[u8] = CURSOR.get_ref();
|
||||
const _: u64 = CURSOR.position();
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_write_vec(b: &mut test::Bencher) {
|
||||
let slice = &[1; 128];
|
||||
|
||||
b.iter(|| {
|
||||
let mut buf = b"some random data to overwrite".to_vec();
|
||||
let mut cursor = Cursor::new(&mut buf);
|
||||
|
||||
let _ = cursor.write_all(slice);
|
||||
test::black_box(&cursor);
|
||||
})
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_write_vec_vectored(b: &mut test::Bencher) {
|
||||
let slices = [
|
||||
IoSlice::new(&[1; 128]),
|
||||
IoSlice::new(&[2; 256]),
|
||||
IoSlice::new(&[3; 512]),
|
||||
IoSlice::new(&[4; 1024]),
|
||||
IoSlice::new(&[5; 2048]),
|
||||
IoSlice::new(&[6; 4096]),
|
||||
IoSlice::new(&[7; 8192]),
|
||||
IoSlice::new(&[8; 8192 * 2]),
|
||||
];
|
||||
|
||||
b.iter(|| {
|
||||
let mut buf = b"some random data to overwrite".to_vec();
|
||||
let mut cursor = Cursor::new(&mut buf);
|
||||
|
||||
let mut slices = slices;
|
||||
let _ = cursor.write_all_vectored(&mut slices);
|
||||
test::black_box(&cursor);
|
||||
})
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,411 +0,0 @@
|
||||
//! This is a densely packed error representation which is used on targets with
|
||||
//! 64-bit pointers.
|
||||
//!
|
||||
//! (Note that `bitpacked` vs `unpacked` here has no relationship to
|
||||
//! `#[repr(packed)]`, it just refers to attempting to use any available bits in
|
||||
//! a more clever manner than `rustc`'s default layout algorithm would).
|
||||
//!
|
||||
//! Conceptually, it stores the same data as the "unpacked" equivalent we use on
|
||||
//! other targets. Specifically, you can imagine it as an optimized version of
|
||||
//! the following enum (which is roughly equivalent to what's stored by
|
||||
//! `repr_unpacked::Repr`, e.g. `super::ErrorData<Box<Custom>>`):
|
||||
//!
|
||||
//! ```ignore (exposition-only)
|
||||
//! enum ErrorData {
|
||||
//! Os(i32),
|
||||
//! Simple(ErrorKind),
|
||||
//! SimpleMessage(&'static SimpleMessage),
|
||||
//! Custom(Box<Custom>),
|
||||
//! }
|
||||
//! ```
|
||||
//!
|
||||
//! However, it packs this data into a 64bit non-zero value.
|
||||
//!
|
||||
//! This optimization not only allows `io::Error` to occupy a single pointer,
|
||||
//! but improves `io::Result` as well, especially for situations like
|
||||
//! `io::Result<()>` (which is now 64 bits) or `io::Result<u64>` (which is now
|
||||
//! 128 bits), which are quite common.
|
||||
//!
|
||||
//! # Layout
|
||||
//! Tagged values are 64 bits, with the 2 least significant bits used for the
|
||||
//! tag. This means there are 4 "variants":
|
||||
//!
|
||||
//! - **Tag 0b00**: The first variant is equivalent to
|
||||
//! `ErrorData::SimpleMessage`, and holds a `&'static SimpleMessage` directly.
|
||||
//!
|
||||
//! `SimpleMessage` has an alignment >= 4 (which is requested with
|
||||
//! `#[repr(align)]` and checked statically at the bottom of this file), which
|
||||
//! means every `&'static SimpleMessage` should have the both tag bits as 0,
|
||||
//! meaning its tagged and untagged representation are equivalent.
|
||||
//!
|
||||
//! This means we can skip tagging it, which is necessary as this variant can
|
||||
//! be constructed from a `const fn`, which probably cannot tag pointers (or
|
||||
//! at least it would be difficult).
|
||||
//!
|
||||
//! - **Tag 0b01**: The other pointer variant holds the data for
|
||||
//! `ErrorData::Custom` and the remaining 62 bits are used to store a
|
||||
//! `Box<Custom>`. `Custom` also has alignment >= 4, so the bottom two bits
|
||||
//! are free to use for the tag.
|
||||
//!
|
||||
//! The only important thing to note is that `ptr::wrapping_add` and
|
||||
//! `ptr::wrapping_sub` are used to tag the pointer, rather than bitwise
|
||||
//! operations. This should preserve the pointer's provenance, which would
|
||||
//! otherwise be lost.
|
||||
//!
|
||||
//! - **Tag 0b10**: Holds the data for `ErrorData::Os(i32)`. We store the `i32`
|
||||
//! in the pointer's most significant 32 bits, and don't use the bits `2..32`
|
||||
//! for anything. Using the top 32 bits is just to let us easily recover the
|
||||
//! `i32` code with the correct sign.
|
||||
//!
|
||||
//! - **Tag 0b11**: Holds the data for `ErrorData::Simple(ErrorKind)`. This
|
||||
//! stores the `ErrorKind` in the top 32 bits as well, although it doesn't
|
||||
//! occupy nearly that many. Most of the bits are unused here, but it's not
|
||||
//! like we need them for anything else yet.
|
||||
//!
|
||||
//! # Use of `NonNull<()>`
|
||||
//!
|
||||
//! Everything is stored in a `NonNull<()>`, which is odd, but actually serves a
|
||||
//! purpose.
|
||||
//!
|
||||
//! Conceptually you might think of this more like:
|
||||
//!
|
||||
//! ```ignore (exposition-only)
|
||||
//! union Repr {
|
||||
//! // holds integer (Simple/Os) variants, and
|
||||
//! // provides access to the tag bits.
|
||||
//! bits: NonZero<u64>,
|
||||
//! // Tag is 0, so this is stored untagged.
|
||||
//! msg: &'static SimpleMessage,
|
||||
//! // Tagged (offset) `Box<Custom>` pointer.
|
||||
//! tagged_custom: NonNull<()>,
|
||||
//! }
|
||||
//! ```
|
||||
//!
|
||||
//! But there are a few problems with this:
|
||||
//!
|
||||
//! 1. Union access is equivalent to a transmute, so this representation would
|
||||
//! require we transmute between integers and pointers in at least one
|
||||
//! direction, which may be UB (and even if not, it is likely harder for a
|
||||
//! compiler to reason about than explicit ptr->int operations).
|
||||
//!
|
||||
//! 2. Even if all fields of a union have a niche, the union itself doesn't,
|
||||
//! although this may change in the future. This would make things like
|
||||
//! `io::Result<()>` and `io::Result<usize>` larger, which defeats part of
|
||||
//! the motivation of this bitpacking.
|
||||
//!
|
||||
//! Storing everything in a `NonZero<usize>` (or some other integer) would be a
|
||||
//! bit more traditional for pointer tagging, but it would lose provenance
|
||||
//! information, couldn't be constructed from a `const fn`, and would probably
|
||||
//! run into other issues as well.
|
||||
//!
|
||||
//! The `NonNull<()>` seems like the only alternative, even if it's fairly odd
|
||||
//! to use a pointer type to store something that may hold an integer, some of
|
||||
//! the time.
|
||||
|
||||
use core::marker::PhantomData;
|
||||
use core::num::NonZeroUsize;
|
||||
use core::ptr::NonNull;
|
||||
|
||||
use super::{Custom, ErrorData, ErrorKind, RawOsError, SimpleMessage};
|
||||
|
||||
// The 2 least-significant bits are used as tag.
|
||||
const TAG_MASK: usize = 0b11;
|
||||
const TAG_SIMPLE_MESSAGE: usize = 0b00;
|
||||
const TAG_CUSTOM: usize = 0b01;
|
||||
const TAG_OS: usize = 0b10;
|
||||
const TAG_SIMPLE: usize = 0b11;
|
||||
|
||||
/// The internal representation.
|
||||
///
|
||||
/// See the module docs for more, this is just a way to hack in a check that we
|
||||
/// indeed are not unwind-safe.
|
||||
///
|
||||
/// ```compile_fail,E0277
|
||||
/// fn is_unwind_safe<T: core::panic::UnwindSafe>() {}
|
||||
/// is_unwind_safe::<std::io::Error>();
|
||||
/// ```
|
||||
#[repr(transparent)]
|
||||
#[rustc_insignificant_dtor]
|
||||
pub(super) struct Repr(NonNull<()>, PhantomData<ErrorData<Box<Custom>>>);
|
||||
|
||||
// All the types `Repr` stores internally are Send + Sync, and so is it.
|
||||
unsafe impl Send for Repr {}
|
||||
unsafe impl Sync for Repr {}
|
||||
|
||||
impl Repr {
|
||||
pub(super) fn new_custom(b: Box<Custom>) -> Self {
|
||||
let p = Box::into_raw(b).cast::<u8>();
|
||||
// Should only be possible if an allocator handed out a pointer with
|
||||
// wrong alignment.
|
||||
debug_assert_eq!(p.addr() & TAG_MASK, 0);
|
||||
// Note: We know `TAG_CUSTOM <= size_of::<Custom>()` (static_assert at
|
||||
// end of file), and both the start and end of the expression must be
|
||||
// valid without address space wraparound due to `Box`'s semantics.
|
||||
//
|
||||
// This means it would be correct to implement this using `ptr::add`
|
||||
// (rather than `ptr::wrapping_add`), but it's unclear this would give
|
||||
// any benefit, so we just use `wrapping_add` instead.
|
||||
let tagged = p.wrapping_add(TAG_CUSTOM).cast::<()>();
|
||||
// Safety: `TAG_CUSTOM + p` is the same as `TAG_CUSTOM | p`,
|
||||
// because `p`'s alignment means it isn't allowed to have any of the
|
||||
// `TAG_BITS` set (you can verify that addition and bitwise-or are the
|
||||
// same when the operands have no bits in common using a truth table).
|
||||
//
|
||||
// Then, `TAG_CUSTOM | p` is not zero, as that would require
|
||||
// `TAG_CUSTOM` and `p` both be zero, and neither is (as `p` came from a
|
||||
// box, and `TAG_CUSTOM` just... isn't zero -- it's `0b01`). Therefore,
|
||||
// `TAG_CUSTOM + p` isn't zero and so `tagged` can't be, and the
|
||||
// `new_unchecked` is safe.
|
||||
let res = Self(unsafe { NonNull::new_unchecked(tagged) }, PhantomData);
|
||||
// quickly smoke-check we encoded the right thing (This generally will
|
||||
// only run in std's tests, unless the user uses -Zbuild-std)
|
||||
debug_assert!(matches!(res.data(), ErrorData::Custom(_)), "repr(custom) encoding failed");
|
||||
res
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub(super) fn new_os(code: RawOsError) -> Self {
|
||||
let utagged = ((code as usize) << 32) | TAG_OS;
|
||||
// Safety: `TAG_OS` is not zero, so the result of the `|` is not 0.
|
||||
let res = Self(
|
||||
NonNull::without_provenance(unsafe { NonZeroUsize::new_unchecked(utagged) }),
|
||||
PhantomData,
|
||||
);
|
||||
// quickly smoke-check we encoded the right thing (This generally will
|
||||
// only run in std's tests, unless the user uses -Zbuild-std)
|
||||
debug_assert!(
|
||||
matches!(res.data(), ErrorData::Os(c) if c == code),
|
||||
"repr(os) encoding failed for {code}"
|
||||
);
|
||||
res
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub(super) fn new_simple(kind: ErrorKind) -> Self {
|
||||
let utagged = ((kind as usize) << 32) | TAG_SIMPLE;
|
||||
// Safety: `TAG_SIMPLE` is not zero, so the result of the `|` is not 0.
|
||||
let res = Self(
|
||||
NonNull::without_provenance(unsafe { NonZeroUsize::new_unchecked(utagged) }),
|
||||
PhantomData,
|
||||
);
|
||||
// quickly smoke-check we encoded the right thing (This generally will
|
||||
// only run in std's tests, unless the user uses -Zbuild-std)
|
||||
debug_assert!(
|
||||
matches!(res.data(), ErrorData::Simple(k) if k == kind),
|
||||
"repr(simple) encoding failed {:?}",
|
||||
kind,
|
||||
);
|
||||
res
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub(super) const fn new_simple_message(m: &'static SimpleMessage) -> Self {
|
||||
// Safety: References are never null.
|
||||
Self(unsafe { NonNull::new_unchecked(m as *const _ as *mut ()) }, PhantomData)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub(super) fn data(&self) -> ErrorData<&Custom> {
|
||||
// Safety: We're a Repr, decode_repr is fine.
|
||||
unsafe { decode_repr(self.0, |c| &*c) }
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub(super) fn data_mut(&mut self) -> ErrorData<&mut Custom> {
|
||||
// Safety: We're a Repr, decode_repr is fine.
|
||||
unsafe { decode_repr(self.0, |c| &mut *c) }
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub(super) fn into_data(self) -> ErrorData<Box<Custom>> {
|
||||
let this = core::mem::ManuallyDrop::new(self);
|
||||
// Safety: We're a Repr, decode_repr is fine. The `Box::from_raw` is
|
||||
// safe because we prevent double-drop using `ManuallyDrop`.
|
||||
unsafe { decode_repr(this.0, |p| Box::from_raw(p)) }
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for Repr {
|
||||
#[inline]
|
||||
fn drop(&mut self) {
|
||||
// Safety: We're a Repr, decode_repr is fine. The `Box::from_raw` is
|
||||
// safe because we're being dropped.
|
||||
unsafe {
|
||||
let _ = decode_repr(self.0, |p| Box::<Custom>::from_raw(p));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Shared helper to decode a `Repr`'s internal pointer into an ErrorData.
|
||||
//
|
||||
// Safety: `ptr`'s bits should be encoded as described in the document at the
|
||||
// top (it should `some_repr.0`)
|
||||
#[inline]
|
||||
unsafe fn decode_repr<C, F>(ptr: NonNull<()>, make_custom: F) -> ErrorData<C>
|
||||
where
|
||||
F: FnOnce(*mut Custom) -> C,
|
||||
{
|
||||
let bits = ptr.as_ptr().addr();
|
||||
match bits & TAG_MASK {
|
||||
TAG_OS => {
|
||||
let code = ((bits as i64) >> 32) as RawOsError;
|
||||
ErrorData::Os(code)
|
||||
}
|
||||
TAG_SIMPLE => {
|
||||
let kind_bits = (bits >> 32) as u32;
|
||||
let kind = kind_from_prim(kind_bits).unwrap_or_else(|| {
|
||||
debug_assert!(false, "Invalid io::error::Repr bits: `Repr({:#018x})`", bits);
|
||||
// This means the `ptr` passed in was not valid, which violates
|
||||
// the unsafe contract of `decode_repr`.
|
||||
//
|
||||
// Using this rather than unwrap meaningfully improves the code
|
||||
// for callers which only care about one variant (usually
|
||||
// `Custom`)
|
||||
unsafe { core::hint::unreachable_unchecked() };
|
||||
});
|
||||
ErrorData::Simple(kind)
|
||||
}
|
||||
TAG_SIMPLE_MESSAGE => {
|
||||
// SAFETY: per tag
|
||||
unsafe { ErrorData::SimpleMessage(&*ptr.cast::<SimpleMessage>().as_ptr()) }
|
||||
}
|
||||
TAG_CUSTOM => {
|
||||
// It would be correct for us to use `ptr::byte_sub` here (see the
|
||||
// comment above the `wrapping_add` call in `new_custom` for why),
|
||||
// but it isn't clear that it makes a difference, so we don't.
|
||||
let custom = ptr.as_ptr().wrapping_byte_sub(TAG_CUSTOM).cast::<Custom>();
|
||||
ErrorData::Custom(make_custom(custom))
|
||||
}
|
||||
_ => {
|
||||
// Can't happen, and compiler can tell
|
||||
unreachable!();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// This compiles to the same code as the check+transmute, but doesn't require
|
||||
// unsafe, or to hard-code max ErrorKind or its size in a way the compiler
|
||||
// couldn't verify.
|
||||
#[inline]
|
||||
fn kind_from_prim(ek: u32) -> Option<ErrorKind> {
|
||||
macro_rules! from_prim {
|
||||
($prim:expr => $Enum:ident { $($Variant:ident),* $(,)? }) => {{
|
||||
// Force a compile error if the list gets out of date.
|
||||
const _: fn(e: $Enum) = |e: $Enum| match e {
|
||||
$($Enum::$Variant => ()),*
|
||||
};
|
||||
match $prim {
|
||||
$(v if v == ($Enum::$Variant as _) => Some($Enum::$Variant),)*
|
||||
_ => None,
|
||||
}
|
||||
}}
|
||||
}
|
||||
from_prim!(ek => ErrorKind {
|
||||
NotFound,
|
||||
PermissionDenied,
|
||||
ConnectionRefused,
|
||||
ConnectionReset,
|
||||
HostUnreachable,
|
||||
NetworkUnreachable,
|
||||
ConnectionAborted,
|
||||
NotConnected,
|
||||
AddrInUse,
|
||||
AddrNotAvailable,
|
||||
NetworkDown,
|
||||
BrokenPipe,
|
||||
AlreadyExists,
|
||||
WouldBlock,
|
||||
NotADirectory,
|
||||
IsADirectory,
|
||||
DirectoryNotEmpty,
|
||||
ReadOnlyFilesystem,
|
||||
FilesystemLoop,
|
||||
StaleNetworkFileHandle,
|
||||
InvalidInput,
|
||||
InvalidData,
|
||||
TimedOut,
|
||||
WriteZero,
|
||||
StorageFull,
|
||||
NotSeekable,
|
||||
QuotaExceeded,
|
||||
FileTooLarge,
|
||||
ResourceBusy,
|
||||
ExecutableFileBusy,
|
||||
Deadlock,
|
||||
CrossesDevices,
|
||||
TooManyLinks,
|
||||
InvalidFilename,
|
||||
ArgumentListTooLong,
|
||||
Interrupted,
|
||||
Other,
|
||||
UnexpectedEof,
|
||||
Unsupported,
|
||||
OutOfMemory,
|
||||
InProgress,
|
||||
Uncategorized,
|
||||
})
|
||||
}
|
||||
|
||||
// Some static checking to alert us if a change breaks any of the assumptions
|
||||
// that our encoding relies on for correctness and soundness. (Some of these are
|
||||
// a bit overly thorough/cautious, admittedly)
|
||||
//
|
||||
// If any of these are hit on a platform that std supports, we should likely
|
||||
// just use `repr_unpacked.rs` there instead (unless the fix is easy).
|
||||
macro_rules! static_assert {
|
||||
($condition:expr) => {
|
||||
const _: () = assert!($condition);
|
||||
};
|
||||
(@usize_eq: $lhs:expr, $rhs:expr) => {
|
||||
const _: [(); $lhs] = [(); $rhs];
|
||||
};
|
||||
}
|
||||
|
||||
// The bitpacking we use requires pointers be exactly 64 bits.
|
||||
static_assert!(@usize_eq: size_of::<NonNull<()>>(), 8);
|
||||
|
||||
// We also require pointers and usize be the same size.
|
||||
static_assert!(@usize_eq: size_of::<NonNull<()>>(), size_of::<usize>());
|
||||
|
||||
// `Custom` and `SimpleMessage` need to be thin pointers.
|
||||
static_assert!(@usize_eq: size_of::<&'static SimpleMessage>(), 8);
|
||||
static_assert!(@usize_eq: size_of::<Box<Custom>>(), 8);
|
||||
|
||||
static_assert!((TAG_MASK + 1).is_power_of_two());
|
||||
// And they must have sufficient alignment.
|
||||
static_assert!(align_of::<SimpleMessage>() >= TAG_MASK + 1);
|
||||
static_assert!(align_of::<Custom>() >= TAG_MASK + 1);
|
||||
|
||||
static_assert!(@usize_eq: TAG_MASK & TAG_SIMPLE_MESSAGE, TAG_SIMPLE_MESSAGE);
|
||||
static_assert!(@usize_eq: TAG_MASK & TAG_CUSTOM, TAG_CUSTOM);
|
||||
static_assert!(@usize_eq: TAG_MASK & TAG_OS, TAG_OS);
|
||||
static_assert!(@usize_eq: TAG_MASK & TAG_SIMPLE, TAG_SIMPLE);
|
||||
|
||||
// This is obviously true (`TAG_CUSTOM` is `0b01`), but in `Repr::new_custom` we
|
||||
// offset a pointer by this value, and expect it to both be within the same
|
||||
// object, and to not wrap around the address space. See the comment in that
|
||||
// function for further details.
|
||||
//
|
||||
// Actually, at the moment we use `ptr::wrapping_add`, not `ptr::add`, so this
|
||||
// check isn't needed for that one, although the assertion that we don't
|
||||
// actually wrap around in that wrapping_add does simplify the safety reasoning
|
||||
// elsewhere considerably.
|
||||
static_assert!(size_of::<Custom>() >= TAG_CUSTOM);
|
||||
|
||||
// These two store a payload which is allowed to be zero, so they must be
|
||||
// non-zero to preserve the `NonNull`'s range invariant.
|
||||
static_assert!(TAG_OS != 0);
|
||||
static_assert!(TAG_SIMPLE != 0);
|
||||
// We can't tag `SimpleMessage`s, the tag must be 0.
|
||||
static_assert!(@usize_eq: TAG_SIMPLE_MESSAGE, 0);
|
||||
|
||||
// Check that the point of all of this still holds.
|
||||
//
|
||||
// We'd check against `io::Error`, but *technically* it's allowed to vary,
|
||||
// as it's not `#[repr(transparent)]`/`#[repr(C)]`. We could add that, but
|
||||
// the `#[repr()]` would show up in rustdoc, which might be seen as a stable
|
||||
// commitment.
|
||||
static_assert!(@usize_eq: size_of::<Repr>(), 8);
|
||||
static_assert!(@usize_eq: size_of::<Option<Repr>>(), 8);
|
||||
static_assert!(@usize_eq: size_of::<Result<(), Repr>>(), 8);
|
||||
static_assert!(@usize_eq: size_of::<Result<usize, Repr>>(), 16);
|
||||
@@ -1,50 +0,0 @@
|
||||
//! This is a fairly simple unpacked error representation that's used on
|
||||
//! non-64bit targets, where the packed 64 bit representation wouldn't work, and
|
||||
//! would have no benefit.
|
||||
|
||||
use super::{Custom, ErrorData, ErrorKind, RawOsError, SimpleMessage};
|
||||
|
||||
type Inner = ErrorData<Box<Custom>>;
|
||||
|
||||
pub(super) struct Repr(Inner);
|
||||
|
||||
impl Repr {
|
||||
#[inline]
|
||||
pub(super) fn new_custom(b: Box<Custom>) -> Self {
|
||||
Self(Inner::Custom(b))
|
||||
}
|
||||
#[inline]
|
||||
pub(super) fn new_os(code: RawOsError) -> Self {
|
||||
Self(Inner::Os(code))
|
||||
}
|
||||
#[inline]
|
||||
pub(super) fn new_simple(kind: ErrorKind) -> Self {
|
||||
Self(Inner::Simple(kind))
|
||||
}
|
||||
#[inline]
|
||||
pub(super) const fn new_simple_message(m: &'static SimpleMessage) -> Self {
|
||||
Self(Inner::SimpleMessage(m))
|
||||
}
|
||||
#[inline]
|
||||
pub(super) fn into_data(self) -> ErrorData<Box<Custom>> {
|
||||
self.0
|
||||
}
|
||||
#[inline]
|
||||
pub(super) fn data(&self) -> ErrorData<&Custom> {
|
||||
match &self.0 {
|
||||
Inner::Os(c) => ErrorData::Os(*c),
|
||||
Inner::Simple(k) => ErrorData::Simple(*k),
|
||||
Inner::SimpleMessage(m) => ErrorData::SimpleMessage(*m),
|
||||
Inner::Custom(m) => ErrorData::Custom(&*m),
|
||||
}
|
||||
}
|
||||
#[inline]
|
||||
pub(super) fn data_mut(&mut self) -> ErrorData<&mut Custom> {
|
||||
match &mut self.0 {
|
||||
Inner::Os(c) => ErrorData::Os(*c),
|
||||
Inner::Simple(k) => ErrorData::Simple(*k),
|
||||
Inner::SimpleMessage(m) => ErrorData::SimpleMessage(*m),
|
||||
Inner::Custom(m) => ErrorData::Custom(&mut *m),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,191 +0,0 @@
|
||||
use super::{Custom, Error, ErrorData, ErrorKind, Repr, SimpleMessage, const_error};
|
||||
use crate::sys::io::{decode_error_kind, error_string};
|
||||
use crate::{assert_matches, error, fmt};
|
||||
|
||||
#[test]
|
||||
fn test_size() {
|
||||
assert!(size_of::<Error>() <= size_of::<[usize; 2]>());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_debug_error() {
|
||||
let code = 6;
|
||||
let msg = error_string(code);
|
||||
let kind = decode_error_kind(code);
|
||||
let err = Error {
|
||||
repr: Repr::new_custom(Box::new(Custom {
|
||||
kind: ErrorKind::InvalidInput,
|
||||
error: Box::new(Error { repr: super::Repr::new_os(code) }),
|
||||
})),
|
||||
};
|
||||
let expected = format!(
|
||||
"Custom {{ \
|
||||
kind: InvalidInput, \
|
||||
error: Os {{ \
|
||||
code: {:?}, \
|
||||
kind: {:?}, \
|
||||
message: {:?} \
|
||||
}} \
|
||||
}}",
|
||||
code, kind, msg
|
||||
);
|
||||
assert_eq!(format!("{err:?}"), expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_downcasting() {
|
||||
#[derive(Debug)]
|
||||
struct TestError;
|
||||
|
||||
impl fmt::Display for TestError {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.write_str("asdf")
|
||||
}
|
||||
}
|
||||
|
||||
impl error::Error for TestError {}
|
||||
|
||||
// we have to call all of these UFCS style right now since method
|
||||
// resolution won't implicitly drop the Send+Sync bounds
|
||||
let mut err = Error::new(ErrorKind::Other, TestError);
|
||||
assert!(err.get_ref().unwrap().is::<TestError>());
|
||||
assert_eq!("asdf", err.get_ref().unwrap().to_string());
|
||||
assert!(err.get_mut().unwrap().is::<TestError>());
|
||||
let extracted = err.into_inner().unwrap();
|
||||
extracted.downcast::<TestError>().unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_const() {
|
||||
const E: Error = const_error!(ErrorKind::NotFound, "hello");
|
||||
|
||||
assert_eq!(E.kind(), ErrorKind::NotFound);
|
||||
assert_eq!(E.to_string(), "hello");
|
||||
assert!(format!("{E:?}").contains("\"hello\""));
|
||||
assert!(format!("{E:?}").contains("NotFound"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_os_packing() {
|
||||
for code in -20..20 {
|
||||
let e = Error::from_raw_os_error(code);
|
||||
assert_eq!(e.raw_os_error(), Some(code));
|
||||
assert_matches!(
|
||||
e.repr.data(),
|
||||
ErrorData::Os(c) if c == code,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_errorkind_packing() {
|
||||
assert_eq!(Error::from(ErrorKind::NotFound).kind(), ErrorKind::NotFound);
|
||||
assert_eq!(Error::from(ErrorKind::PermissionDenied).kind(), ErrorKind::PermissionDenied);
|
||||
assert_eq!(Error::from(ErrorKind::Uncategorized).kind(), ErrorKind::Uncategorized);
|
||||
// Check that the innards look like what we want.
|
||||
assert_matches!(
|
||||
Error::from(ErrorKind::OutOfMemory).repr.data(),
|
||||
ErrorData::Simple(ErrorKind::OutOfMemory),
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_simple_message_packing() {
|
||||
use super::ErrorKind::*;
|
||||
use super::SimpleMessage;
|
||||
macro_rules! check_simple_msg {
|
||||
($err:expr, $kind:ident, $msg:literal) => {{
|
||||
let e = &$err;
|
||||
// Check that the public api is right.
|
||||
assert_eq!(e.kind(), $kind);
|
||||
assert!(format!("{e:?}").contains($msg));
|
||||
// and we got what we expected
|
||||
assert_matches!(
|
||||
e.repr.data(),
|
||||
ErrorData::SimpleMessage(SimpleMessage { kind: $kind, message: $msg })
|
||||
);
|
||||
}};
|
||||
}
|
||||
|
||||
let not_static = const_error!(Uncategorized, "not a constant!");
|
||||
check_simple_msg!(not_static, Uncategorized, "not a constant!");
|
||||
|
||||
const CONST: Error = const_error!(NotFound, "definitely a constant!");
|
||||
check_simple_msg!(CONST, NotFound, "definitely a constant!");
|
||||
|
||||
static STATIC: Error = const_error!(BrokenPipe, "a constant, sort of!");
|
||||
check_simple_msg!(STATIC, BrokenPipe, "a constant, sort of!");
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
struct Bojji(bool);
|
||||
impl error::Error for Bojji {}
|
||||
impl fmt::Display for Bojji {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "ah! {:?}", self)
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_custom_error_packing() {
|
||||
use super::Custom;
|
||||
let test = Error::new(ErrorKind::Uncategorized, Bojji(true));
|
||||
assert_matches!(
|
||||
test.repr.data(),
|
||||
ErrorData::Custom(Custom {
|
||||
kind: ErrorKind::Uncategorized,
|
||||
error,
|
||||
}) if error.downcast_ref::<Bojji>().as_deref() == Some(&Bojji(true)),
|
||||
);
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct E;
|
||||
|
||||
impl fmt::Display for E {
|
||||
fn fmt(&self, _f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl error::Error for E {}
|
||||
|
||||
#[test]
|
||||
fn test_std_io_error_downcast() {
|
||||
// Case 1: custom error, downcast succeeds
|
||||
let io_error = Error::new(ErrorKind::Other, Bojji(true));
|
||||
let e: Bojji = io_error.downcast().unwrap();
|
||||
assert!(e.0);
|
||||
|
||||
// Case 2: custom error, downcast fails
|
||||
let io_error = Error::new(ErrorKind::Other, Bojji(true));
|
||||
let io_error = io_error.downcast::<E>().unwrap_err();
|
||||
|
||||
// ensures that the custom error is intact
|
||||
assert_eq!(ErrorKind::Other, io_error.kind());
|
||||
let e: Bojji = io_error.downcast().unwrap();
|
||||
assert!(e.0);
|
||||
|
||||
// Case 3: os error
|
||||
let errno = 20;
|
||||
let io_error = Error::from_raw_os_error(errno);
|
||||
let io_error = io_error.downcast::<E>().unwrap_err();
|
||||
|
||||
assert_eq!(errno, io_error.raw_os_error().unwrap());
|
||||
|
||||
// Case 4: simple
|
||||
let kind = ErrorKind::OutOfMemory;
|
||||
let io_error: Error = kind.into();
|
||||
let io_error = io_error.downcast::<E>().unwrap_err();
|
||||
|
||||
assert_eq!(kind, io_error.kind());
|
||||
|
||||
// Case 5: simple message
|
||||
const SIMPLE_MESSAGE: SimpleMessage =
|
||||
SimpleMessage { kind: ErrorKind::Other, message: "simple message error test" };
|
||||
let io_error = Error::from_static_message(&SIMPLE_MESSAGE);
|
||||
let io_error = io_error.downcast::<E>().unwrap_err();
|
||||
|
||||
assert_eq!(SIMPLE_MESSAGE.kind, io_error.kind());
|
||||
assert_eq!(SIMPLE_MESSAGE.message, format!("{io_error}"));
|
||||
}
|
||||
@@ -1,717 +0,0 @@
|
||||
#[cfg(test)]
|
||||
mod tests;
|
||||
|
||||
use crate::alloc::Allocator;
|
||||
use alloc_crate::collections::VecDeque;
|
||||
use crate::io::{self, BorrowedCursor, BufRead, IoSlice, IoSliceMut, Read, Seek, SeekFrom, Write};
|
||||
use crate::{cmp, fmt, mem, str};
|
||||
|
||||
// =============================================================================
|
||||
// Forwarding implementations
|
||||
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
impl<R: Read + ?Sized> Read for &mut R {
|
||||
#[inline]
|
||||
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
|
||||
(**self).read(buf)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn read_buf(&mut self, cursor: BorrowedCursor<'_>) -> io::Result<()> {
|
||||
(**self).read_buf(cursor)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn read_vectored(&mut self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
|
||||
(**self).read_vectored(bufs)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn is_read_vectored(&self) -> bool {
|
||||
(**self).is_read_vectored()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn read_to_end(&mut self, buf: &mut Vec<u8>) -> io::Result<usize> {
|
||||
(**self).read_to_end(buf)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn read_to_string(&mut self, buf: &mut String) -> io::Result<usize> {
|
||||
(**self).read_to_string(buf)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn read_exact(&mut self, buf: &mut [u8]) -> io::Result<()> {
|
||||
(**self).read_exact(buf)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn read_buf_exact(&mut self, cursor: BorrowedCursor<'_>) -> io::Result<()> {
|
||||
(**self).read_buf_exact(cursor)
|
||||
}
|
||||
}
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
impl<W: Write + ?Sized> Write for &mut W {
|
||||
#[inline]
|
||||
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
|
||||
(**self).write(buf)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
|
||||
(**self).write_vectored(bufs)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn is_write_vectored(&self) -> bool {
|
||||
(**self).is_write_vectored()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn flush(&mut self) -> io::Result<()> {
|
||||
(**self).flush()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn write_all(&mut self, buf: &[u8]) -> io::Result<()> {
|
||||
(**self).write_all(buf)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn write_all_vectored(&mut self, bufs: &mut [IoSlice<'_>]) -> io::Result<()> {
|
||||
(**self).write_all_vectored(bufs)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn write_fmt(&mut self, fmt: fmt::Arguments<'_>) -> io::Result<()> {
|
||||
(**self).write_fmt(fmt)
|
||||
}
|
||||
}
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
impl<S: Seek + ?Sized> Seek for &mut S {
|
||||
#[inline]
|
||||
fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
|
||||
(**self).seek(pos)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn rewind(&mut self) -> io::Result<()> {
|
||||
(**self).rewind()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn stream_len(&mut self) -> io::Result<u64> {
|
||||
(**self).stream_len()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn stream_position(&mut self) -> io::Result<u64> {
|
||||
(**self).stream_position()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn seek_relative(&mut self, offset: i64) -> io::Result<()> {
|
||||
(**self).seek_relative(offset)
|
||||
}
|
||||
}
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
impl<B: BufRead + ?Sized> BufRead for &mut B {
|
||||
#[inline]
|
||||
fn fill_buf(&mut self) -> io::Result<&[u8]> {
|
||||
(**self).fill_buf()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn consume(&mut self, amt: usize) {
|
||||
(**self).consume(amt)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn has_data_left(&mut self) -> io::Result<bool> {
|
||||
(**self).has_data_left()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn read_until(&mut self, byte: u8, buf: &mut Vec<u8>) -> io::Result<usize> {
|
||||
(**self).read_until(byte, buf)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn skip_until(&mut self, byte: u8) -> io::Result<usize> {
|
||||
(**self).skip_until(byte)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn read_line(&mut self, buf: &mut String) -> io::Result<usize> {
|
||||
(**self).read_line(buf)
|
||||
}
|
||||
}
|
||||
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
impl<R: Read + ?Sized> Read for Box<R> {
|
||||
#[inline]
|
||||
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
|
||||
(**self).read(buf)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn read_buf(&mut self, cursor: BorrowedCursor<'_>) -> io::Result<()> {
|
||||
(**self).read_buf(cursor)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn read_vectored(&mut self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
|
||||
(**self).read_vectored(bufs)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn is_read_vectored(&self) -> bool {
|
||||
(**self).is_read_vectored()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn read_to_end(&mut self, buf: &mut Vec<u8>) -> io::Result<usize> {
|
||||
(**self).read_to_end(buf)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn read_to_string(&mut self, buf: &mut String) -> io::Result<usize> {
|
||||
(**self).read_to_string(buf)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn read_exact(&mut self, buf: &mut [u8]) -> io::Result<()> {
|
||||
(**self).read_exact(buf)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn read_buf_exact(&mut self, cursor: BorrowedCursor<'_>) -> io::Result<()> {
|
||||
(**self).read_buf_exact(cursor)
|
||||
}
|
||||
}
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
impl<W: Write + ?Sized> Write for Box<W> {
|
||||
#[inline]
|
||||
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
|
||||
(**self).write(buf)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
|
||||
(**self).write_vectored(bufs)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn is_write_vectored(&self) -> bool {
|
||||
(**self).is_write_vectored()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn flush(&mut self) -> io::Result<()> {
|
||||
(**self).flush()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn write_all(&mut self, buf: &[u8]) -> io::Result<()> {
|
||||
(**self).write_all(buf)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn write_all_vectored(&mut self, bufs: &mut [IoSlice<'_>]) -> io::Result<()> {
|
||||
(**self).write_all_vectored(bufs)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn write_fmt(&mut self, fmt: fmt::Arguments<'_>) -> io::Result<()> {
|
||||
(**self).write_fmt(fmt)
|
||||
}
|
||||
}
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
impl<S: Seek + ?Sized> Seek for Box<S> {
|
||||
#[inline]
|
||||
fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
|
||||
(**self).seek(pos)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn rewind(&mut self) -> io::Result<()> {
|
||||
(**self).rewind()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn stream_len(&mut self) -> io::Result<u64> {
|
||||
(**self).stream_len()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn stream_position(&mut self) -> io::Result<u64> {
|
||||
(**self).stream_position()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn seek_relative(&mut self, offset: i64) -> io::Result<()> {
|
||||
(**self).seek_relative(offset)
|
||||
}
|
||||
}
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
impl<B: BufRead + ?Sized> BufRead for Box<B> {
|
||||
#[inline]
|
||||
fn fill_buf(&mut self) -> io::Result<&[u8]> {
|
||||
(**self).fill_buf()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn consume(&mut self, amt: usize) {
|
||||
(**self).consume(amt)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn has_data_left(&mut self) -> io::Result<bool> {
|
||||
(**self).has_data_left()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn read_until(&mut self, byte: u8, buf: &mut Vec<u8>) -> io::Result<usize> {
|
||||
(**self).read_until(byte, buf)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn skip_until(&mut self, byte: u8) -> io::Result<usize> {
|
||||
(**self).skip_until(byte)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn read_line(&mut self, buf: &mut String) -> io::Result<usize> {
|
||||
(**self).read_line(buf)
|
||||
}
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// In-memory buffer implementations
|
||||
|
||||
/// Read is implemented for `&[u8]` by copying from the slice.
|
||||
///
|
||||
/// Note that reading updates the slice to point to the yet unread part.
|
||||
/// The slice will be empty when EOF is reached.
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
impl Read for &[u8] {
|
||||
#[inline]
|
||||
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
|
||||
let amt = cmp::min(buf.len(), self.len());
|
||||
let (a, b) = self.split_at(amt);
|
||||
|
||||
// First check if the amount of bytes we want to read is small:
|
||||
// `copy_from_slice` will generally expand to a call to `memcpy`, and
|
||||
// for a single byte the overhead is significant.
|
||||
if amt == 1 {
|
||||
buf[0] = a[0];
|
||||
} else {
|
||||
buf[..amt].copy_from_slice(a);
|
||||
}
|
||||
|
||||
*self = b;
|
||||
Ok(amt)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn read_buf(&mut self, mut cursor: BorrowedCursor<'_>) -> io::Result<()> {
|
||||
let amt = cmp::min(cursor.capacity(), self.len());
|
||||
let (a, b) = self.split_at(amt);
|
||||
|
||||
cursor.append(a);
|
||||
|
||||
*self = b;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn read_vectored(&mut self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
|
||||
let mut nread = 0;
|
||||
for buf in bufs {
|
||||
nread += self.read(buf)?;
|
||||
if self.is_empty() {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(nread)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn is_read_vectored(&self) -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn read_exact(&mut self, buf: &mut [u8]) -> io::Result<()> {
|
||||
if buf.len() > self.len() {
|
||||
// `read_exact` makes no promise about the content of `buf` if it
|
||||
// fails so don't bother about that.
|
||||
*self = &self[self.len()..];
|
||||
return Err(io::Error::READ_EXACT_EOF);
|
||||
}
|
||||
let (a, b) = self.split_at(buf.len());
|
||||
|
||||
// First check if the amount of bytes we want to read is small:
|
||||
// `copy_from_slice` will generally expand to a call to `memcpy`, and
|
||||
// for a single byte the overhead is significant.
|
||||
if buf.len() == 1 {
|
||||
buf[0] = a[0];
|
||||
} else {
|
||||
buf.copy_from_slice(a);
|
||||
}
|
||||
|
||||
*self = b;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn read_buf_exact(&mut self, mut cursor: BorrowedCursor<'_>) -> io::Result<()> {
|
||||
if cursor.capacity() > self.len() {
|
||||
// Append everything we can to the cursor.
|
||||
cursor.append(*self);
|
||||
*self = &self[self.len()..];
|
||||
return Err(io::Error::READ_EXACT_EOF);
|
||||
}
|
||||
let (a, b) = self.split_at(cursor.capacity());
|
||||
|
||||
cursor.append(a);
|
||||
|
||||
*self = b;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn read_to_end(&mut self, buf: &mut Vec<u8>) -> io::Result<usize> {
|
||||
let len = self.len();
|
||||
buf.try_reserve(len)?;
|
||||
buf.extend_from_slice(*self);
|
||||
*self = &self[len..];
|
||||
Ok(len)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn read_to_string(&mut self, buf: &mut String) -> io::Result<usize> {
|
||||
let content = str::from_utf8(self).map_err(|_| io::Error::INVALID_UTF8)?;
|
||||
let len = self.len();
|
||||
buf.try_reserve(len)?;
|
||||
buf.push_str(content);
|
||||
*self = &self[len..];
|
||||
Ok(len)
|
||||
}
|
||||
}
|
||||
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
impl BufRead for &[u8] {
|
||||
#[inline]
|
||||
fn fill_buf(&mut self) -> io::Result<&[u8]> {
|
||||
Ok(*self)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn consume(&mut self, amt: usize) {
|
||||
*self = &self[amt..];
|
||||
}
|
||||
}
|
||||
|
||||
/// Write is implemented for `&mut [u8]` by copying into the slice, overwriting
|
||||
/// its data.
|
||||
///
|
||||
/// Note that writing updates the slice to point to the yet unwritten part.
|
||||
/// The slice will be empty when it has been completely overwritten.
|
||||
///
|
||||
/// If the number of bytes to be written exceeds the size of the slice, write operations will
|
||||
/// return short writes: ultimately, `Ok(0)`; in this situation, `write_all` returns an error of
|
||||
/// kind `ErrorKind::WriteZero`.
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
impl Write for &mut [u8] {
|
||||
#[inline]
|
||||
fn write(&mut self, data: &[u8]) -> io::Result<usize> {
|
||||
let amt = cmp::min(data.len(), self.len());
|
||||
let (a, b) = mem::take(self).split_at_mut(amt);
|
||||
a.copy_from_slice(&data[..amt]);
|
||||
*self = b;
|
||||
Ok(amt)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
|
||||
let mut nwritten = 0;
|
||||
for buf in bufs {
|
||||
nwritten += self.write(buf)?;
|
||||
if self.is_empty() {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(nwritten)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn is_write_vectored(&self) -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn write_all(&mut self, data: &[u8]) -> io::Result<()> {
|
||||
if self.write(data)? < data.len() { Err(io::Error::WRITE_ALL_EOF) } else { Ok(()) }
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn write_all_vectored(&mut self, bufs: &mut [IoSlice<'_>]) -> io::Result<()> {
|
||||
for buf in bufs {
|
||||
if self.write(buf)? < buf.len() {
|
||||
return Err(io::Error::WRITE_ALL_EOF);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn flush(&mut self) -> io::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Write is implemented for `Vec<u8>` by appending to the vector.
|
||||
/// The vector will grow as needed.
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
impl<A: Allocator> Write for Vec<u8, A> {
|
||||
#[inline]
|
||||
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
|
||||
self.extend_from_slice(buf);
|
||||
Ok(buf.len())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
|
||||
let len = bufs.iter().map(|b| b.len()).sum();
|
||||
self.reserve(len);
|
||||
for buf in bufs {
|
||||
self.extend_from_slice(buf);
|
||||
}
|
||||
Ok(len)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn is_write_vectored(&self) -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn write_all(&mut self, buf: &[u8]) -> io::Result<()> {
|
||||
self.extend_from_slice(buf);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn write_all_vectored(&mut self, bufs: &mut [IoSlice<'_>]) -> io::Result<()> {
|
||||
self.write_vectored(bufs)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn flush(&mut self) -> io::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Read is implemented for `VecDeque<u8>` by consuming bytes from the front of the `VecDeque`.
|
||||
#[stable(feature = "vecdeque_read_write", since = "1.63.0")]
|
||||
impl<A: Allocator> Read for VecDeque<u8, A> {
|
||||
/// Fill `buf` with the contents of the "front" slice as returned by
|
||||
/// [`as_slices`][`VecDeque::as_slices`]. If the contained byte slices of the `VecDeque` are
|
||||
/// discontiguous, multiple calls to `read` will be needed to read the entire content.
|
||||
#[inline]
|
||||
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
|
||||
let (ref mut front, _) = self.as_slices();
|
||||
let n = Read::read(front, buf)?;
|
||||
self.drain(..n);
|
||||
Ok(n)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn read_exact(&mut self, buf: &mut [u8]) -> io::Result<()> {
|
||||
let (front, back) = self.as_slices();
|
||||
|
||||
// Use only the front buffer if it is big enough to fill `buf`, else use
|
||||
// the back buffer too.
|
||||
match buf.split_at_mut_checked(front.len()) {
|
||||
None => buf.copy_from_slice(&front[..buf.len()]),
|
||||
Some((buf_front, buf_back)) => match back.split_at_checked(buf_back.len()) {
|
||||
Some((back, _)) => {
|
||||
buf_front.copy_from_slice(front);
|
||||
buf_back.copy_from_slice(back);
|
||||
}
|
||||
None => {
|
||||
self.clear();
|
||||
return Err(io::Error::READ_EXACT_EOF);
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
self.drain(..buf.len());
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn read_buf(&mut self, cursor: BorrowedCursor<'_>) -> io::Result<()> {
|
||||
let (ref mut front, _) = self.as_slices();
|
||||
let n = cmp::min(cursor.capacity(), front.len());
|
||||
Read::read_buf(front, cursor)?;
|
||||
self.drain(..n);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn read_buf_exact(&mut self, mut cursor: BorrowedCursor<'_>) -> io::Result<()> {
|
||||
let len = cursor.capacity();
|
||||
let (front, back) = self.as_slices();
|
||||
|
||||
match front.split_at_checked(cursor.capacity()) {
|
||||
Some((front, _)) => cursor.append(front),
|
||||
None => {
|
||||
cursor.append(front);
|
||||
match back.split_at_checked(cursor.capacity()) {
|
||||
Some((back, _)) => cursor.append(back),
|
||||
None => {
|
||||
cursor.append(back);
|
||||
self.clear();
|
||||
return Err(io::Error::READ_EXACT_EOF);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
self.drain(..len);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn read_to_end(&mut self, buf: &mut Vec<u8>) -> io::Result<usize> {
|
||||
// The total len is known upfront so we can reserve it in a single call.
|
||||
let len = self.len();
|
||||
buf.try_reserve(len)?;
|
||||
|
||||
let (front, back) = self.as_slices();
|
||||
buf.extend_from_slice(front);
|
||||
buf.extend_from_slice(back);
|
||||
self.clear();
|
||||
Ok(len)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn read_to_string(&mut self, buf: &mut String) -> io::Result<usize> {
|
||||
// SAFETY: We only append to the buffer
|
||||
unsafe { io::append_to_string(buf, |buf| self.read_to_end(buf)) }
|
||||
}
|
||||
}
|
||||
|
||||
/// BufRead is implemented for `VecDeque<u8>` by reading bytes from the front of the `VecDeque`.
|
||||
#[stable(feature = "vecdeque_buf_read", since = "1.75.0")]
|
||||
impl<A: Allocator> BufRead for VecDeque<u8, A> {
|
||||
/// Returns the contents of the "front" slice as returned by
|
||||
/// [`as_slices`][`VecDeque::as_slices`]. If the contained byte slices of the `VecDeque` are
|
||||
/// discontiguous, multiple calls to `fill_buf` will be needed to read the entire content.
|
||||
#[inline]
|
||||
fn fill_buf(&mut self) -> io::Result<&[u8]> {
|
||||
let (front, _) = self.as_slices();
|
||||
Ok(front)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn consume(&mut self, amt: usize) {
|
||||
self.drain(..amt);
|
||||
}
|
||||
}
|
||||
|
||||
/// Write is implemented for `VecDeque<u8>` by appending to the `VecDeque`, growing it as needed.
|
||||
#[stable(feature = "vecdeque_read_write", since = "1.63.0")]
|
||||
impl<A: Allocator> Write for VecDeque<u8, A> {
|
||||
#[inline]
|
||||
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
|
||||
self.extend(buf);
|
||||
Ok(buf.len())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
|
||||
let len = bufs.iter().map(|b| b.len()).sum();
|
||||
self.reserve(len);
|
||||
for buf in bufs {
|
||||
self.extend(&**buf);
|
||||
}
|
||||
Ok(len)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn is_write_vectored(&self) -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn write_all(&mut self, buf: &[u8]) -> io::Result<()> {
|
||||
self.extend(buf);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn write_all_vectored(&mut self, bufs: &mut [IoSlice<'_>]) -> io::Result<()> {
|
||||
self.write_vectored(bufs)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn flush(&mut self) -> io::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[unstable(feature = "read_buf", issue = "78485")]
|
||||
impl<'a> io::Write for core::io::BorrowedCursor<'a> {
|
||||
#[inline]
|
||||
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
|
||||
let amt = cmp::min(buf.len(), self.capacity());
|
||||
self.append(&buf[..amt]);
|
||||
Ok(amt)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
|
||||
let mut nwritten = 0;
|
||||
for buf in bufs {
|
||||
let n = self.write(buf)?;
|
||||
nwritten += n;
|
||||
if n < buf.len() {
|
||||
break;
|
||||
}
|
||||
}
|
||||
Ok(nwritten)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn is_write_vectored(&self) -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn write_all(&mut self, buf: &[u8]) -> io::Result<()> {
|
||||
if self.write(buf)? < buf.len() { Err(io::Error::WRITE_ALL_EOF) } else { Ok(()) }
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn write_all_vectored(&mut self, bufs: &mut [IoSlice<'_>]) -> io::Result<()> {
|
||||
for buf in bufs {
|
||||
if self.write(buf)? < buf.len() {
|
||||
return Err(io::Error::WRITE_ALL_EOF);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn flush(&mut self) -> io::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -1,57 +0,0 @@
|
||||
use crate::io::prelude::*;
|
||||
|
||||
#[bench]
|
||||
fn bench_read_slice(b: &mut test::Bencher) {
|
||||
let buf = [5; 1024];
|
||||
let mut dst = [0; 128];
|
||||
|
||||
b.iter(|| {
|
||||
let mut rd = &buf[..];
|
||||
for _ in 0..8 {
|
||||
let _ = rd.read(&mut dst);
|
||||
test::black_box(&dst);
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_write_slice(b: &mut test::Bencher) {
|
||||
let mut buf = [0; 1024];
|
||||
let src = [5; 128];
|
||||
|
||||
b.iter(|| {
|
||||
let mut wr = &mut buf[..];
|
||||
for _ in 0..8 {
|
||||
let _ = wr.write_all(&src);
|
||||
test::black_box(&wr);
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_read_vec(b: &mut test::Bencher) {
|
||||
let buf = vec![5; 1024];
|
||||
let mut dst = [0; 128];
|
||||
|
||||
b.iter(|| {
|
||||
let mut rd = &buf[..];
|
||||
for _ in 0..8 {
|
||||
let _ = rd.read(&mut dst);
|
||||
test::black_box(&dst);
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_write_vec(b: &mut test::Bencher) {
|
||||
let mut buf = Vec::with_capacity(1024);
|
||||
let src = [5; 128];
|
||||
|
||||
b.iter(|| {
|
||||
let mut wr = &mut buf[..];
|
||||
for _ in 0..8 {
|
||||
let _ = wr.write_all(&src);
|
||||
test::black_box(&wr);
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -1,295 +0,0 @@
|
||||
use crate::io;
|
||||
use crate::sys::{FromInner, IntoInner, pipe as imp};
|
||||
|
||||
/// Creates an anonymous pipe.
|
||||
///
|
||||
/// # Behavior
|
||||
///
|
||||
/// A pipe is a one-way data channel provided by the OS, which works across processes. A pipe is
|
||||
/// typically used to communicate between two or more separate processes, as there are better,
|
||||
/// faster ways to communicate within a single process.
|
||||
///
|
||||
/// In particular:
|
||||
///
|
||||
/// * A read on a [`PipeReader`] blocks until the pipe is non-empty.
|
||||
/// * A write on a [`PipeWriter`] blocks when the pipe is full.
|
||||
/// * When all copies of a [`PipeWriter`] are closed, a read on the corresponding [`PipeReader`]
|
||||
/// returns EOF.
|
||||
/// * [`PipeWriter`] can be shared, and multiple processes or threads can write to it at once, but
|
||||
/// writes (above a target-specific threshold) may have their data interleaved.
|
||||
/// * [`PipeReader`] can be shared, and multiple processes or threads can read it at once. Any
|
||||
/// given byte will only get consumed by one reader. There are no guarantees about data
|
||||
/// interleaving.
|
||||
/// * Portable applications cannot assume any atomicity of messages larger than a single byte.
|
||||
///
|
||||
/// # Platform-specific behavior
|
||||
///
|
||||
/// This function currently corresponds to the `pipe` function on Unix and the
|
||||
/// `CreatePipe` function on Windows.
|
||||
///
|
||||
/// Note that this [may change in the future][changes].
|
||||
///
|
||||
/// # Capacity
|
||||
///
|
||||
/// Pipe capacity is platform dependent. To quote the Linux [man page]:
|
||||
///
|
||||
/// > Different implementations have different limits for the pipe capacity. Applications should
|
||||
/// > not rely on a particular capacity: an application should be designed so that a reading process
|
||||
/// > consumes data as soon as it is available, so that a writing process does not remain blocked.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```no_run
|
||||
/// # #[cfg(miri)] fn main() {}
|
||||
/// # #[cfg(not(miri))]
|
||||
/// # fn main() -> std::io::Result<()> {
|
||||
/// use std::io::{Read, Write, pipe};
|
||||
/// use std::process::Command;
|
||||
/// let (ping_reader, mut ping_writer) = pipe()?;
|
||||
/// let (mut pong_reader, pong_writer) = pipe()?;
|
||||
///
|
||||
/// // Spawn a child process that echoes its input.
|
||||
/// let mut echo_command = Command::new("cat");
|
||||
/// echo_command.stdin(ping_reader);
|
||||
/// echo_command.stdout(pong_writer);
|
||||
/// let mut echo_child = echo_command.spawn()?;
|
||||
///
|
||||
/// // Send input to the child process. Note that because we're writing all the input before we
|
||||
/// // read any output, this could deadlock if the child's input and output pipe buffers both
|
||||
/// // filled up. Those buffers are usually at least a few KB, so "hello" is fine, but for longer
|
||||
/// // inputs we'd need to read and write at the same time, e.g. using threads.
|
||||
/// ping_writer.write_all(b"hello")?;
|
||||
///
|
||||
/// // `cat` exits when it reads EOF from stdin, but that can't happen while any ping writer
|
||||
/// // remains open. We need to drop our ping writer, or read_to_string will deadlock below.
|
||||
/// drop(ping_writer);
|
||||
///
|
||||
/// // The pong reader can't report EOF while any pong writer remains open. Our Command object is
|
||||
/// // holding a pong writer, and again read_to_string will deadlock if we don't drop it.
|
||||
/// drop(echo_command);
|
||||
///
|
||||
/// let mut buf = String::new();
|
||||
/// // Block until `cat` closes its stdout (a pong writer).
|
||||
/// pong_reader.read_to_string(&mut buf)?;
|
||||
/// assert_eq!(&buf, "hello");
|
||||
///
|
||||
/// // At this point we know `cat` has exited, but we still need to wait to clean up the "zombie".
|
||||
/// echo_child.wait()?;
|
||||
/// # Ok(())
|
||||
/// # }
|
||||
/// ```
|
||||
/// [changes]: io#platform-specific-behavior
|
||||
/// [man page]: https://man7.org/linux/man-pages/man7/pipe.7.html
|
||||
#[stable(feature = "anonymous_pipe", since = "1.87.0")]
|
||||
#[inline]
|
||||
pub fn pipe() -> io::Result<(PipeReader, PipeWriter)> {
|
||||
imp::pipe().map(|(reader, writer)| (PipeReader(reader), PipeWriter(writer)))
|
||||
}
|
||||
|
||||
/// Read end of an anonymous pipe.
|
||||
#[stable(feature = "anonymous_pipe", since = "1.87.0")]
|
||||
#[derive(Debug)]
|
||||
pub struct PipeReader(pub(crate) imp::Pipe);
|
||||
|
||||
/// Write end of an anonymous pipe.
|
||||
#[stable(feature = "anonymous_pipe", since = "1.87.0")]
|
||||
#[derive(Debug)]
|
||||
pub struct PipeWriter(pub(crate) imp::Pipe);
|
||||
|
||||
impl FromInner<imp::Pipe> for PipeReader {
|
||||
fn from_inner(inner: imp::Pipe) -> Self {
|
||||
Self(inner)
|
||||
}
|
||||
}
|
||||
|
||||
impl IntoInner<imp::Pipe> for PipeReader {
|
||||
fn into_inner(self) -> imp::Pipe {
|
||||
self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl FromInner<imp::Pipe> for PipeWriter {
|
||||
fn from_inner(inner: imp::Pipe) -> Self {
|
||||
Self(inner)
|
||||
}
|
||||
}
|
||||
|
||||
impl IntoInner<imp::Pipe> for PipeWriter {
|
||||
fn into_inner(self) -> imp::Pipe {
|
||||
self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl PipeReader {
|
||||
/// Creates a new [`PipeReader`] instance that shares the same underlying file description.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```no_run
|
||||
/// # #[cfg(miri)] fn main() {}
|
||||
/// # #[cfg(not(miri))]
|
||||
/// # fn main() -> std::io::Result<()> {
|
||||
/// use std::fs;
|
||||
/// use std::io::{pipe, Write};
|
||||
/// use std::process::Command;
|
||||
/// const NUM_SLOT: u8 = 2;
|
||||
/// const NUM_PROC: u8 = 5;
|
||||
/// const OUTPUT: &str = "work.txt";
|
||||
///
|
||||
/// let mut jobs = vec![];
|
||||
/// let (reader, mut writer) = pipe()?;
|
||||
///
|
||||
/// // Write NUM_SLOT characters the pipe.
|
||||
/// writer.write_all(&[b'|'; NUM_SLOT as usize])?;
|
||||
///
|
||||
/// // Spawn several processes that read a character from the pipe, do some work, then
|
||||
/// // write back to the pipe. When the pipe is empty, the processes block, so only
|
||||
/// // NUM_SLOT processes can be working at any given time.
|
||||
/// for _ in 0..NUM_PROC {
|
||||
/// jobs.push(
|
||||
/// Command::new("bash")
|
||||
/// .args(["-c",
|
||||
/// &format!(
|
||||
/// "read -n 1\n\
|
||||
/// echo -n 'x' >> '{OUTPUT}'\n\
|
||||
/// echo -n '|'",
|
||||
/// ),
|
||||
/// ])
|
||||
/// .stdin(reader.try_clone()?)
|
||||
/// .stdout(writer.try_clone()?)
|
||||
/// .spawn()?,
|
||||
/// );
|
||||
/// }
|
||||
///
|
||||
/// // Wait for all jobs to finish.
|
||||
/// for mut job in jobs {
|
||||
/// job.wait()?;
|
||||
/// }
|
||||
///
|
||||
/// // Check our work and clean up.
|
||||
/// let xs = fs::read_to_string(OUTPUT)?;
|
||||
/// fs::remove_file(OUTPUT)?;
|
||||
/// assert_eq!(xs, "x".repeat(NUM_PROC.into()));
|
||||
/// # Ok(())
|
||||
/// # }
|
||||
/// ```
|
||||
#[stable(feature = "anonymous_pipe", since = "1.87.0")]
|
||||
pub fn try_clone(&self) -> io::Result<Self> {
|
||||
self.0.try_clone().map(Self)
|
||||
}
|
||||
}
|
||||
|
||||
impl PipeWriter {
|
||||
/// Creates a new [`PipeWriter`] instance that shares the same underlying file description.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```no_run
|
||||
/// # #[cfg(miri)] fn main() {}
|
||||
/// # #[cfg(not(miri))]
|
||||
/// # fn main() -> std::io::Result<()> {
|
||||
/// use std::process::Command;
|
||||
/// use std::io::{pipe, Read};
|
||||
/// let (mut reader, writer) = pipe()?;
|
||||
///
|
||||
/// // Spawn a process that writes to stdout and stderr.
|
||||
/// let mut peer = Command::new("bash")
|
||||
/// .args([
|
||||
/// "-c",
|
||||
/// "echo -n foo\n\
|
||||
/// echo -n bar >&2"
|
||||
/// ])
|
||||
/// .stdout(writer.try_clone()?)
|
||||
/// .stderr(writer)
|
||||
/// .spawn()?;
|
||||
///
|
||||
/// // Read and check the result.
|
||||
/// let mut msg = String::new();
|
||||
/// reader.read_to_string(&mut msg)?;
|
||||
/// assert_eq!(&msg, "foobar");
|
||||
///
|
||||
/// peer.wait()?;
|
||||
/// # Ok(())
|
||||
/// # }
|
||||
/// ```
|
||||
#[stable(feature = "anonymous_pipe", since = "1.87.0")]
|
||||
pub fn try_clone(&self) -> io::Result<Self> {
|
||||
self.0.try_clone().map(Self)
|
||||
}
|
||||
}
|
||||
|
||||
#[stable(feature = "anonymous_pipe", since = "1.87.0")]
|
||||
impl io::Read for &PipeReader {
|
||||
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
|
||||
self.0.read(buf)
|
||||
}
|
||||
fn read_vectored(&mut self, bufs: &mut [io::IoSliceMut<'_>]) -> io::Result<usize> {
|
||||
self.0.read_vectored(bufs)
|
||||
}
|
||||
#[inline]
|
||||
fn is_read_vectored(&self) -> bool {
|
||||
self.0.is_read_vectored()
|
||||
}
|
||||
fn read_to_end(&mut self, buf: &mut Vec<u8>) -> io::Result<usize> {
|
||||
self.0.read_to_end(buf)
|
||||
}
|
||||
fn read_buf(&mut self, buf: io::BorrowedCursor<'_>) -> io::Result<()> {
|
||||
self.0.read_buf(buf)
|
||||
}
|
||||
}
|
||||
|
||||
#[stable(feature = "anonymous_pipe", since = "1.87.0")]
|
||||
impl io::Read for PipeReader {
|
||||
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
|
||||
self.0.read(buf)
|
||||
}
|
||||
fn read_vectored(&mut self, bufs: &mut [io::IoSliceMut<'_>]) -> io::Result<usize> {
|
||||
self.0.read_vectored(bufs)
|
||||
}
|
||||
#[inline]
|
||||
fn is_read_vectored(&self) -> bool {
|
||||
self.0.is_read_vectored()
|
||||
}
|
||||
fn read_to_end(&mut self, buf: &mut Vec<u8>) -> io::Result<usize> {
|
||||
self.0.read_to_end(buf)
|
||||
}
|
||||
fn read_buf(&mut self, buf: io::BorrowedCursor<'_>) -> io::Result<()> {
|
||||
self.0.read_buf(buf)
|
||||
}
|
||||
}
|
||||
|
||||
#[stable(feature = "anonymous_pipe", since = "1.87.0")]
|
||||
impl io::Write for &PipeWriter {
|
||||
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
|
||||
self.0.write(buf)
|
||||
}
|
||||
#[inline]
|
||||
fn flush(&mut self) -> io::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
fn write_vectored(&mut self, bufs: &[io::IoSlice<'_>]) -> io::Result<usize> {
|
||||
self.0.write_vectored(bufs)
|
||||
}
|
||||
#[inline]
|
||||
fn is_write_vectored(&self) -> bool {
|
||||
self.0.is_write_vectored()
|
||||
}
|
||||
}
|
||||
|
||||
#[stable(feature = "anonymous_pipe", since = "1.87.0")]
|
||||
impl io::Write for PipeWriter {
|
||||
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
|
||||
self.0.write(buf)
|
||||
}
|
||||
#[inline]
|
||||
fn flush(&mut self) -> io::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
fn write_vectored(&mut self, bufs: &[io::IoSlice<'_>]) -> io::Result<usize> {
|
||||
self.0.write_vectored(bufs)
|
||||
}
|
||||
#[inline]
|
||||
fn is_write_vectored(&self) -> bool {
|
||||
self.0.is_write_vectored()
|
||||
}
|
||||
}
|
||||
@@ -1,18 +0,0 @@
|
||||
use crate::io::{Read, Write, pipe};
|
||||
|
||||
#[test]
|
||||
#[cfg(all(any(unix, windows), not(miri)))]
|
||||
fn pipe_creation_clone_and_rw() {
|
||||
let (rx, tx) = pipe().unwrap();
|
||||
|
||||
tx.try_clone().unwrap().write_all(b"12345").unwrap();
|
||||
drop(tx);
|
||||
|
||||
let mut rx2 = rx.try_clone().unwrap();
|
||||
drop(rx);
|
||||
|
||||
let mut s = String::new();
|
||||
rx2.read_to_string(&mut s).unwrap();
|
||||
drop(rx2);
|
||||
assert_eq!(s, "12345");
|
||||
}
|
||||
@@ -1,14 +0,0 @@
|
||||
//! The I/O Prelude.
|
||||
//!
|
||||
//! The purpose of this module is to alleviate imports of many common I/O traits
|
||||
//! by adding a glob import to the top of I/O heavy modules:
|
||||
//!
|
||||
//! ```
|
||||
//! # #![allow(unused_imports)]
|
||||
//! use std::io::prelude::*;
|
||||
//! ```
|
||||
|
||||
#![stable(feature = "rust1", since = "1.0.0")]
|
||||
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
pub use super::{BufRead, Read, Seek, Write};
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,947 +0,0 @@
|
||||
use super::{BorrowedBuf, Cursor, SeekFrom, repeat};
|
||||
use crate::cmp::{self, min};
|
||||
use crate::io::{
|
||||
self, BufRead, BufReader, DEFAULT_BUF_SIZE, IoSlice, IoSliceMut, Read, Seek, Write,
|
||||
};
|
||||
use crate::mem::MaybeUninit;
|
||||
use crate::ops::Deref;
|
||||
|
||||
#[test]
|
||||
fn read_until() {
|
||||
let mut buf = Cursor::new(&b"12"[..]);
|
||||
let mut v = Vec::new();
|
||||
assert_eq!(buf.read_until(b'3', &mut v).unwrap(), 2);
|
||||
assert_eq!(v, b"12");
|
||||
|
||||
let mut buf = Cursor::new(&b"1233"[..]);
|
||||
let mut v = Vec::new();
|
||||
assert_eq!(buf.read_until(b'3', &mut v).unwrap(), 3);
|
||||
assert_eq!(v, b"123");
|
||||
v.truncate(0);
|
||||
assert_eq!(buf.read_until(b'3', &mut v).unwrap(), 1);
|
||||
assert_eq!(v, b"3");
|
||||
v.truncate(0);
|
||||
assert_eq!(buf.read_until(b'3', &mut v).unwrap(), 0);
|
||||
assert_eq!(v, []);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn skip_until() {
|
||||
let bytes: &[u8] = b"read\0ignore\0read\0ignore\0read\0ignore\0";
|
||||
let mut reader = BufReader::new(bytes);
|
||||
|
||||
// read from the bytes, alternating between
|
||||
// consuming `read\0`s and skipping `ignore\0`s
|
||||
loop {
|
||||
// consume `read\0`
|
||||
let mut out = Vec::new();
|
||||
let read = reader.read_until(0, &mut out).unwrap();
|
||||
if read == 0 {
|
||||
// eof
|
||||
break;
|
||||
} else {
|
||||
assert_eq!(out, b"read\0");
|
||||
assert_eq!(read, b"read\0".len());
|
||||
}
|
||||
|
||||
// skip past `ignore\0`
|
||||
let skipped = reader.skip_until(0).unwrap();
|
||||
assert_eq!(skipped, b"ignore\0".len());
|
||||
}
|
||||
|
||||
// ensure we are at the end of the byte slice and that we can skip no further
|
||||
// also ensure skip_until matches the behavior of read_until at EOF
|
||||
let skipped = reader.skip_until(0).unwrap();
|
||||
assert_eq!(skipped, 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn split() {
|
||||
let buf = Cursor::new(&b"12"[..]);
|
||||
let mut s = buf.split(b'3');
|
||||
assert_eq!(s.next().unwrap().unwrap(), vec![b'1', b'2']);
|
||||
assert!(s.next().is_none());
|
||||
|
||||
let buf = Cursor::new(&b"1233"[..]);
|
||||
let mut s = buf.split(b'3');
|
||||
assert_eq!(s.next().unwrap().unwrap(), vec![b'1', b'2']);
|
||||
assert_eq!(s.next().unwrap().unwrap(), vec![]);
|
||||
assert!(s.next().is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn read_line() {
|
||||
let mut buf = Cursor::new(&b"12"[..]);
|
||||
let mut v = String::new();
|
||||
assert_eq!(buf.read_line(&mut v).unwrap(), 2);
|
||||
assert_eq!(v, "12");
|
||||
|
||||
let mut buf = Cursor::new(&b"12\n\n"[..]);
|
||||
let mut v = String::new();
|
||||
assert_eq!(buf.read_line(&mut v).unwrap(), 3);
|
||||
assert_eq!(v, "12\n");
|
||||
v.truncate(0);
|
||||
assert_eq!(buf.read_line(&mut v).unwrap(), 1);
|
||||
assert_eq!(v, "\n");
|
||||
v.truncate(0);
|
||||
assert_eq!(buf.read_line(&mut v).unwrap(), 0);
|
||||
assert_eq!(v, "");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn lines() {
|
||||
let buf = Cursor::new(&b"12\r"[..]);
|
||||
let mut s = buf.lines();
|
||||
assert_eq!(s.next().unwrap().unwrap(), "12\r".to_string());
|
||||
assert!(s.next().is_none());
|
||||
|
||||
let buf = Cursor::new(&b"12\r\n\n"[..]);
|
||||
let mut s = buf.lines();
|
||||
assert_eq!(s.next().unwrap().unwrap(), "12".to_string());
|
||||
assert_eq!(s.next().unwrap().unwrap(), "".to_string());
|
||||
assert!(s.next().is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn buf_read_has_data_left() {
|
||||
let mut buf = Cursor::new(&b"abcd"[..]);
|
||||
assert!(buf.has_data_left().unwrap());
|
||||
buf.read_exact(&mut [0; 2]).unwrap();
|
||||
assert!(buf.has_data_left().unwrap());
|
||||
buf.read_exact(&mut [0; 2]).unwrap();
|
||||
assert!(!buf.has_data_left().unwrap());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn read_to_end() {
|
||||
let mut c = Cursor::new(&b""[..]);
|
||||
let mut v = Vec::new();
|
||||
assert_eq!(c.read_to_end(&mut v).unwrap(), 0);
|
||||
assert_eq!(v, []);
|
||||
|
||||
let mut c = Cursor::new(&b"1"[..]);
|
||||
let mut v = Vec::new();
|
||||
assert_eq!(c.read_to_end(&mut v).unwrap(), 1);
|
||||
assert_eq!(v, b"1");
|
||||
|
||||
let cap = if cfg!(miri) { 1024 } else { 1024 * 1024 };
|
||||
let data = (0..cap).map(|i| (i / 3) as u8).collect::<Vec<_>>();
|
||||
let mut v = Vec::new();
|
||||
let (a, b) = data.split_at(data.len() / 2);
|
||||
assert_eq!(Cursor::new(a).read_to_end(&mut v).unwrap(), a.len());
|
||||
assert_eq!(Cursor::new(b).read_to_end(&mut v).unwrap(), b.len());
|
||||
assert_eq!(v, data);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn read_to_string() {
|
||||
let mut c = Cursor::new(&b""[..]);
|
||||
let mut v = String::new();
|
||||
assert_eq!(c.read_to_string(&mut v).unwrap(), 0);
|
||||
assert_eq!(v, "");
|
||||
|
||||
let mut c = Cursor::new(&b"1"[..]);
|
||||
let mut v = String::new();
|
||||
assert_eq!(c.read_to_string(&mut v).unwrap(), 1);
|
||||
assert_eq!(v, "1");
|
||||
|
||||
let mut c = Cursor::new(&b"\xff"[..]);
|
||||
let mut v = String::new();
|
||||
assert!(c.read_to_string(&mut v).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn read_exact() {
|
||||
let mut buf = [0; 4];
|
||||
|
||||
let mut c = Cursor::new(&b""[..]);
|
||||
assert_eq!(c.read_exact(&mut buf).unwrap_err().kind(), io::ErrorKind::UnexpectedEof);
|
||||
|
||||
let mut c = Cursor::new(&b"123"[..]).chain(Cursor::new(&b"456789"[..]));
|
||||
c.read_exact(&mut buf).unwrap();
|
||||
assert_eq!(&buf, b"1234");
|
||||
c.read_exact(&mut buf).unwrap();
|
||||
assert_eq!(&buf, b"5678");
|
||||
assert_eq!(c.read_exact(&mut buf).unwrap_err().kind(), io::ErrorKind::UnexpectedEof);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn read_exact_slice() {
|
||||
let mut buf = [0; 4];
|
||||
|
||||
let mut c = &b""[..];
|
||||
assert_eq!(c.read_exact(&mut buf).unwrap_err().kind(), io::ErrorKind::UnexpectedEof);
|
||||
|
||||
let mut c = &b"123"[..];
|
||||
assert_eq!(c.read_exact(&mut buf).unwrap_err().kind(), io::ErrorKind::UnexpectedEof);
|
||||
// make sure the optimized (early returning) method is being used
|
||||
assert_eq!(&buf, &[0; 4]);
|
||||
|
||||
let mut c = &b"1234"[..];
|
||||
c.read_exact(&mut buf).unwrap();
|
||||
assert_eq!(&buf, b"1234");
|
||||
|
||||
let mut c = &b"56789"[..];
|
||||
c.read_exact(&mut buf).unwrap();
|
||||
assert_eq!(&buf, b"5678");
|
||||
assert_eq!(c, b"9");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn read_buf_exact() {
|
||||
let buf: &mut [_] = &mut [0; 4];
|
||||
let mut buf: BorrowedBuf<'_> = buf.into();
|
||||
|
||||
let mut c = Cursor::new(&b""[..]);
|
||||
assert_eq!(c.read_buf_exact(buf.unfilled()).unwrap_err().kind(), io::ErrorKind::UnexpectedEof);
|
||||
|
||||
let mut c = Cursor::new(&b"123456789"[..]);
|
||||
c.read_buf_exact(buf.unfilled()).unwrap();
|
||||
assert_eq!(buf.filled(), b"1234");
|
||||
|
||||
buf.clear();
|
||||
|
||||
c.read_buf_exact(buf.unfilled()).unwrap();
|
||||
assert_eq!(buf.filled(), b"5678");
|
||||
|
||||
buf.clear();
|
||||
|
||||
assert_eq!(c.read_buf_exact(buf.unfilled()).unwrap_err().kind(), io::ErrorKind::UnexpectedEof);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn borrowed_cursor_advance_overflow() {
|
||||
let mut buf = [0; 512];
|
||||
let mut buf = BorrowedBuf::from(&mut buf[..]);
|
||||
buf.unfilled().advance(1);
|
||||
buf.unfilled().advance(usize::MAX);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn take_eof() {
|
||||
struct R;
|
||||
|
||||
impl Read for R {
|
||||
fn read(&mut self, _: &mut [u8]) -> io::Result<usize> {
|
||||
Err(io::const_error!(io::ErrorKind::Other, ""))
|
||||
}
|
||||
}
|
||||
impl BufRead for R {
|
||||
fn fill_buf(&mut self) -> io::Result<&[u8]> {
|
||||
Err(io::const_error!(io::ErrorKind::Other, ""))
|
||||
}
|
||||
fn consume(&mut self, _amt: usize) {}
|
||||
}
|
||||
|
||||
let mut buf = [0; 1];
|
||||
assert_eq!(0, R.take(0).read(&mut buf).unwrap());
|
||||
assert_eq!(b"", R.take(0).fill_buf().unwrap());
|
||||
}
|
||||
|
||||
fn cmp_bufread<Br1: BufRead, Br2: BufRead>(mut br1: Br1, mut br2: Br2, exp: &[u8]) {
|
||||
let mut cat = Vec::new();
|
||||
loop {
|
||||
let consume = {
|
||||
let buf1 = br1.fill_buf().unwrap();
|
||||
let buf2 = br2.fill_buf().unwrap();
|
||||
let minlen = if buf1.len() < buf2.len() { buf1.len() } else { buf2.len() };
|
||||
assert_eq!(buf1[..minlen], buf2[..minlen]);
|
||||
cat.extend_from_slice(&buf1[..minlen]);
|
||||
minlen
|
||||
};
|
||||
if consume == 0 {
|
||||
break;
|
||||
}
|
||||
br1.consume(consume);
|
||||
br2.consume(consume);
|
||||
}
|
||||
assert_eq!(br1.fill_buf().unwrap().len(), 0);
|
||||
assert_eq!(br2.fill_buf().unwrap().len(), 0);
|
||||
assert_eq!(&cat[..], &exp[..])
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn chain_bufread() {
|
||||
let testdata = b"ABCDEFGHIJKL";
|
||||
let chain1 =
|
||||
(&testdata[..3]).chain(&testdata[3..6]).chain(&testdata[6..9]).chain(&testdata[9..]);
|
||||
let chain2 = (&testdata[..4]).chain(&testdata[4..8]).chain(&testdata[8..]);
|
||||
cmp_bufread(chain1, chain2, &testdata[..]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn chain_splitted_char() {
|
||||
let chain = b"\xc3".chain(b"\xa9".as_slice());
|
||||
assert_eq!(crate::io::read_to_string(chain).unwrap(), "é");
|
||||
|
||||
let mut chain = b"\xc3".chain(b"\xa9\n".as_slice());
|
||||
let mut buf = String::new();
|
||||
assert_eq!(chain.read_line(&mut buf).unwrap(), 3);
|
||||
assert_eq!(buf, "é\n");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn bufreader_size_hint() {
|
||||
let testdata = b"ABCDEFGHIJKL";
|
||||
let mut buf_reader = BufReader::new(&testdata[..]);
|
||||
assert_eq!(buf_reader.buffer().len(), 0);
|
||||
|
||||
let buffer_length = testdata.len();
|
||||
buf_reader.fill_buf().unwrap();
|
||||
|
||||
// Check that size hint matches buffer contents
|
||||
let mut buffered_bytes = buf_reader.bytes();
|
||||
let (lower_bound, _upper_bound) = buffered_bytes.size_hint();
|
||||
assert_eq!(lower_bound, buffer_length);
|
||||
|
||||
// Check that size hint matches buffer contents after advancing
|
||||
buffered_bytes.next().unwrap().unwrap();
|
||||
let (lower_bound, _upper_bound) = buffered_bytes.size_hint();
|
||||
assert_eq!(lower_bound, buffer_length - 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn empty_size_hint() {
|
||||
let size_hint = io::empty().bytes().size_hint();
|
||||
assert_eq!(size_hint, (0, Some(0)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn slice_size_hint() {
|
||||
let size_hint = (&[1, 2, 3]).bytes().size_hint();
|
||||
assert_eq!(size_hint, (3, Some(3)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn take_size_hint() {
|
||||
let size_hint = (&[1, 2, 3]).take(2).bytes().size_hint();
|
||||
assert_eq!(size_hint, (2, Some(2)));
|
||||
|
||||
let size_hint = (&[1, 2, 3]).take(4).bytes().size_hint();
|
||||
assert_eq!(size_hint, (3, Some(3)));
|
||||
|
||||
let size_hint = io::repeat(0).take(3).bytes().size_hint();
|
||||
assert_eq!(size_hint, (3, Some(3)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn chain_empty_size_hint() {
|
||||
let chain = io::empty().chain(io::empty());
|
||||
let size_hint = chain.bytes().size_hint();
|
||||
assert_eq!(size_hint, (0, Some(0)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn chain_size_hint() {
|
||||
let testdata = b"ABCDEFGHIJKL";
|
||||
let mut buf_reader_1 = BufReader::new(&testdata[..6]);
|
||||
let mut buf_reader_2 = BufReader::new(&testdata[6..]);
|
||||
|
||||
buf_reader_1.fill_buf().unwrap();
|
||||
buf_reader_2.fill_buf().unwrap();
|
||||
|
||||
let chain = buf_reader_1.chain(buf_reader_2);
|
||||
let size_hint = chain.bytes().size_hint();
|
||||
assert_eq!(size_hint, (testdata.len(), Some(testdata.len())));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn chain_zero_length_read_is_not_eof() {
|
||||
let a = b"A";
|
||||
let b = b"B";
|
||||
let mut s = String::new();
|
||||
let mut chain = (&a[..]).chain(&b[..]);
|
||||
chain.read(&mut []).unwrap();
|
||||
chain.read_to_string(&mut s).unwrap();
|
||||
assert_eq!("AB", s);
|
||||
}
|
||||
|
||||
#[bench]
|
||||
#[cfg_attr(miri, ignore)] // Miri isn't fast...
|
||||
fn bench_read_to_end(b: &mut test::Bencher) {
|
||||
b.iter(|| {
|
||||
let mut lr = repeat(1).take(10000000);
|
||||
let mut vec = Vec::with_capacity(1024);
|
||||
super::default_read_to_end(&mut lr, &mut vec, None)
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn seek_len() -> io::Result<()> {
|
||||
let mut c = Cursor::new(vec![0; 15]);
|
||||
assert_eq!(c.stream_len()?, 15);
|
||||
|
||||
c.seek(SeekFrom::End(0))?;
|
||||
let old_pos = c.stream_position()?;
|
||||
assert_eq!(c.stream_len()?, 15);
|
||||
assert_eq!(c.stream_position()?, old_pos);
|
||||
|
||||
c.seek(SeekFrom::Start(7))?;
|
||||
c.seek(SeekFrom::Current(2))?;
|
||||
let old_pos = c.stream_position()?;
|
||||
assert_eq!(c.stream_len()?, 15);
|
||||
assert_eq!(c.stream_position()?, old_pos);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn seek_position() -> io::Result<()> {
|
||||
// All `asserts` are duplicated here to make sure the method does not
|
||||
// change anything about the seek state.
|
||||
let mut c = Cursor::new(vec![0; 15]);
|
||||
assert_eq!(c.stream_position()?, 0);
|
||||
assert_eq!(c.stream_position()?, 0);
|
||||
|
||||
c.seek(SeekFrom::End(0))?;
|
||||
assert_eq!(c.stream_position()?, 15);
|
||||
assert_eq!(c.stream_position()?, 15);
|
||||
|
||||
c.seek(SeekFrom::Start(7))?;
|
||||
c.seek(SeekFrom::Current(2))?;
|
||||
assert_eq!(c.stream_position()?, 9);
|
||||
assert_eq!(c.stream_position()?, 9);
|
||||
|
||||
c.seek(SeekFrom::End(-3))?;
|
||||
c.seek(SeekFrom::Current(1))?;
|
||||
c.seek(SeekFrom::Current(-5))?;
|
||||
assert_eq!(c.stream_position()?, 8);
|
||||
assert_eq!(c.stream_position()?, 8);
|
||||
|
||||
c.rewind()?;
|
||||
assert_eq!(c.stream_position()?, 0);
|
||||
assert_eq!(c.stream_position()?, 0);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn take_seek() -> io::Result<()> {
|
||||
let mut buf = Cursor::new(b"0123456789");
|
||||
buf.set_position(2);
|
||||
let mut take = buf.by_ref().take(4);
|
||||
let mut buf1 = [0u8; 1];
|
||||
let mut buf2 = [0u8; 2];
|
||||
assert_eq!(take.position(), 0);
|
||||
|
||||
assert_eq!(take.seek(SeekFrom::Start(0))?, 0);
|
||||
take.read_exact(&mut buf2)?;
|
||||
assert_eq!(buf2, [b'2', b'3']);
|
||||
assert_eq!(take.seek(SeekFrom::Start(1))?, 1);
|
||||
take.read_exact(&mut buf2)?;
|
||||
assert_eq!(buf2, [b'3', b'4']);
|
||||
assert_eq!(take.seek(SeekFrom::Start(2))?, 2);
|
||||
take.read_exact(&mut buf2)?;
|
||||
assert_eq!(buf2, [b'4', b'5']);
|
||||
assert_eq!(take.seek(SeekFrom::Start(3))?, 3);
|
||||
take.read_exact(&mut buf1)?;
|
||||
assert_eq!(buf1, [b'5']);
|
||||
assert_eq!(take.seek(SeekFrom::Start(4))?, 4);
|
||||
assert_eq!(take.read(&mut buf1)?, 0);
|
||||
|
||||
assert_eq!(take.seek(SeekFrom::End(0))?, 4);
|
||||
assert_eq!(take.seek(SeekFrom::End(-1))?, 3);
|
||||
take.read_exact(&mut buf1)?;
|
||||
assert_eq!(buf1, [b'5']);
|
||||
assert_eq!(take.seek(SeekFrom::End(-2))?, 2);
|
||||
take.read_exact(&mut buf2)?;
|
||||
assert_eq!(buf2, [b'4', b'5']);
|
||||
assert_eq!(take.seek(SeekFrom::End(-3))?, 1);
|
||||
take.read_exact(&mut buf2)?;
|
||||
assert_eq!(buf2, [b'3', b'4']);
|
||||
assert_eq!(take.seek(SeekFrom::End(-4))?, 0);
|
||||
take.read_exact(&mut buf2)?;
|
||||
assert_eq!(buf2, [b'2', b'3']);
|
||||
|
||||
assert_eq!(take.seek(SeekFrom::Current(0))?, 2);
|
||||
take.read_exact(&mut buf2)?;
|
||||
assert_eq!(buf2, [b'4', b'5']);
|
||||
|
||||
assert_eq!(take.seek(SeekFrom::Current(-3))?, 1);
|
||||
take.read_exact(&mut buf2)?;
|
||||
assert_eq!(buf2, [b'3', b'4']);
|
||||
|
||||
assert_eq!(take.seek(SeekFrom::Current(-1))?, 2);
|
||||
take.read_exact(&mut buf2)?;
|
||||
assert_eq!(buf2, [b'4', b'5']);
|
||||
|
||||
assert_eq!(take.seek(SeekFrom::Current(-4))?, 0);
|
||||
take.read_exact(&mut buf2)?;
|
||||
assert_eq!(buf2, [b'2', b'3']);
|
||||
|
||||
assert_eq!(take.seek(SeekFrom::Current(2))?, 4);
|
||||
assert_eq!(take.read(&mut buf1)?, 0);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn take_seek_error() {
|
||||
let buf = Cursor::new(b"0123456789");
|
||||
let mut take = buf.take(2);
|
||||
assert!(take.seek(SeekFrom::Start(3)).is_err());
|
||||
assert!(take.seek(SeekFrom::End(1)).is_err());
|
||||
assert!(take.seek(SeekFrom::End(-3)).is_err());
|
||||
assert!(take.seek(SeekFrom::Current(-1)).is_err());
|
||||
assert!(take.seek(SeekFrom::Current(3)).is_err());
|
||||
}
|
||||
|
||||
struct ExampleHugeRangeOfZeroes {
|
||||
position: u64,
|
||||
}
|
||||
|
||||
impl Read for ExampleHugeRangeOfZeroes {
|
||||
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
|
||||
let max = buf.len().min(usize::MAX);
|
||||
for i in 0..max {
|
||||
if self.position == u64::MAX {
|
||||
return Ok(i);
|
||||
}
|
||||
self.position += 1;
|
||||
buf[i] = 0;
|
||||
}
|
||||
Ok(max)
|
||||
}
|
||||
}
|
||||
|
||||
impl Seek for ExampleHugeRangeOfZeroes {
|
||||
fn seek(&mut self, pos: io::SeekFrom) -> io::Result<u64> {
|
||||
match pos {
|
||||
io::SeekFrom::Start(i) => self.position = i,
|
||||
io::SeekFrom::End(i) if i >= 0 => self.position = u64::MAX,
|
||||
io::SeekFrom::End(i) => self.position = self.position - i.unsigned_abs(),
|
||||
io::SeekFrom::Current(i) => {
|
||||
self.position = if i >= 0 {
|
||||
self.position.saturating_add(i.unsigned_abs())
|
||||
} else {
|
||||
self.position.saturating_sub(i.unsigned_abs())
|
||||
};
|
||||
}
|
||||
}
|
||||
Ok(self.position)
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn take_seek_big_offsets() -> io::Result<()> {
|
||||
let inner = ExampleHugeRangeOfZeroes { position: 1 };
|
||||
let mut take = inner.take(u64::MAX - 2);
|
||||
assert_eq!(take.seek(io::SeekFrom::Start(u64::MAX - 2))?, u64::MAX - 2);
|
||||
assert_eq!(take.inner.position, u64::MAX - 1);
|
||||
assert_eq!(take.seek(io::SeekFrom::Start(0))?, 0);
|
||||
assert_eq!(take.inner.position, 1);
|
||||
assert_eq!(take.seek(io::SeekFrom::End(-1))?, u64::MAX - 3);
|
||||
assert_eq!(take.inner.position, u64::MAX - 2);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// A simple example reader which uses the default implementation of
|
||||
// read_to_end.
|
||||
struct ExampleSliceReader<'a> {
|
||||
slice: &'a [u8],
|
||||
}
|
||||
|
||||
impl<'a> Read for ExampleSliceReader<'a> {
|
||||
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
|
||||
let len = cmp::min(self.slice.len(), buf.len());
|
||||
buf[..len].copy_from_slice(&self.slice[..len]);
|
||||
self.slice = &self.slice[len..];
|
||||
Ok(len)
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_read_to_end_capacity() -> io::Result<()> {
|
||||
let input = &b"foo"[..];
|
||||
|
||||
// read_to_end() takes care not to over-allocate when a buffer is the
|
||||
// exact size needed.
|
||||
let mut vec1 = Vec::with_capacity(input.len());
|
||||
ExampleSliceReader { slice: input }.read_to_end(&mut vec1)?;
|
||||
assert_eq!(vec1.len(), input.len());
|
||||
assert_eq!(vec1.capacity(), input.len(), "did not allocate more");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn io_slice_mut_advance_slices() {
|
||||
let mut buf1 = [1; 8];
|
||||
let mut buf2 = [2; 16];
|
||||
let mut buf3 = [3; 8];
|
||||
let mut bufs = &mut [
|
||||
IoSliceMut::new(&mut buf1),
|
||||
IoSliceMut::new(&mut buf2),
|
||||
IoSliceMut::new(&mut buf3),
|
||||
][..];
|
||||
|
||||
// Only in a single buffer..
|
||||
IoSliceMut::advance_slices(&mut bufs, 1);
|
||||
assert_eq!(bufs[0].deref(), [1; 7].as_ref());
|
||||
assert_eq!(bufs[1].deref(), [2; 16].as_ref());
|
||||
assert_eq!(bufs[2].deref(), [3; 8].as_ref());
|
||||
|
||||
// Removing a buffer, leaving others as is.
|
||||
IoSliceMut::advance_slices(&mut bufs, 7);
|
||||
assert_eq!(bufs[0].deref(), [2; 16].as_ref());
|
||||
assert_eq!(bufs[1].deref(), [3; 8].as_ref());
|
||||
|
||||
// Removing a buffer and removing from the next buffer.
|
||||
IoSliceMut::advance_slices(&mut bufs, 18);
|
||||
assert_eq!(bufs[0].deref(), [3; 6].as_ref());
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn io_slice_mut_advance_slices_empty_slice() {
|
||||
let mut empty_bufs = &mut [][..];
|
||||
IoSliceMut::advance_slices(&mut empty_bufs, 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn io_slice_mut_advance_slices_beyond_total_length() {
|
||||
let mut buf1 = [1; 8];
|
||||
let mut bufs = &mut [IoSliceMut::new(&mut buf1)][..];
|
||||
|
||||
IoSliceMut::advance_slices(&mut bufs, 9);
|
||||
assert!(bufs.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn io_slice_advance_slices() {
|
||||
let buf1 = [1; 8];
|
||||
let buf2 = [2; 16];
|
||||
let buf3 = [3; 8];
|
||||
let mut bufs = &mut [IoSlice::new(&buf1), IoSlice::new(&buf2), IoSlice::new(&buf3)][..];
|
||||
|
||||
// Only in a single buffer..
|
||||
IoSlice::advance_slices(&mut bufs, 1);
|
||||
assert_eq!(bufs[0].deref(), [1; 7].as_ref());
|
||||
assert_eq!(bufs[1].deref(), [2; 16].as_ref());
|
||||
assert_eq!(bufs[2].deref(), [3; 8].as_ref());
|
||||
|
||||
// Removing a buffer, leaving others as is.
|
||||
IoSlice::advance_slices(&mut bufs, 7);
|
||||
assert_eq!(bufs[0].deref(), [2; 16].as_ref());
|
||||
assert_eq!(bufs[1].deref(), [3; 8].as_ref());
|
||||
|
||||
// Removing a buffer and removing from the next buffer.
|
||||
IoSlice::advance_slices(&mut bufs, 18);
|
||||
assert_eq!(bufs[0].deref(), [3; 6].as_ref());
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn io_slice_advance_slices_empty_slice() {
|
||||
let mut empty_bufs = &mut [][..];
|
||||
IoSlice::advance_slices(&mut empty_bufs, 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn io_slice_advance_slices_beyond_total_length() {
|
||||
let buf1 = [1; 8];
|
||||
let mut bufs = &mut [IoSlice::new(&buf1)][..];
|
||||
|
||||
IoSlice::advance_slices(&mut bufs, 9);
|
||||
assert!(bufs.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn io_slice_as_slice() {
|
||||
let buf = [1; 8];
|
||||
let slice = IoSlice::new(&buf).as_slice();
|
||||
assert_eq!(slice, buf);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn io_slice_into_slice() {
|
||||
let mut buf = [1; 8];
|
||||
let slice = IoSliceMut::new(&mut buf).into_slice();
|
||||
assert_eq!(slice, [1; 8]);
|
||||
}
|
||||
|
||||
/// Creates a new writer that reads from at most `n_bufs` and reads
|
||||
/// `per_call` bytes (in total) per call to write.
|
||||
fn test_writer(n_bufs: usize, per_call: usize) -> TestWriter {
|
||||
TestWriter { n_bufs, per_call, written: Vec::new() }
|
||||
}
|
||||
|
||||
struct TestWriter {
|
||||
n_bufs: usize,
|
||||
per_call: usize,
|
||||
written: Vec<u8>,
|
||||
}
|
||||
|
||||
impl Write for TestWriter {
|
||||
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
|
||||
self.write_vectored(&[IoSlice::new(buf)])
|
||||
}
|
||||
|
||||
fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
|
||||
let mut left = self.per_call;
|
||||
let mut written = 0;
|
||||
for buf in bufs.iter().take(self.n_bufs) {
|
||||
let n = min(left, buf.len());
|
||||
self.written.extend_from_slice(&buf[0..n]);
|
||||
left -= n;
|
||||
written += n;
|
||||
}
|
||||
Ok(written)
|
||||
}
|
||||
|
||||
fn flush(&mut self) -> io::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_writer_read_from_one_buf() {
|
||||
let mut writer = test_writer(1, 2);
|
||||
|
||||
assert_eq!(writer.write(&[]).unwrap(), 0);
|
||||
assert_eq!(writer.write_vectored(&[]).unwrap(), 0);
|
||||
|
||||
// Read at most 2 bytes.
|
||||
assert_eq!(writer.write(&[1, 1, 1]).unwrap(), 2);
|
||||
let bufs = &[IoSlice::new(&[2, 2, 2])];
|
||||
assert_eq!(writer.write_vectored(bufs).unwrap(), 2);
|
||||
|
||||
// Only read from first buf.
|
||||
let bufs = &[IoSlice::new(&[3]), IoSlice::new(&[4, 4])];
|
||||
assert_eq!(writer.write_vectored(bufs).unwrap(), 1);
|
||||
|
||||
assert_eq!(writer.written, &[1, 1, 2, 2, 3]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_writer_read_from_multiple_bufs() {
|
||||
let mut writer = test_writer(3, 3);
|
||||
|
||||
// Read at most 3 bytes from two buffers.
|
||||
let bufs = &[IoSlice::new(&[1]), IoSlice::new(&[2, 2, 2])];
|
||||
assert_eq!(writer.write_vectored(bufs).unwrap(), 3);
|
||||
|
||||
// Read at most 3 bytes from three buffers.
|
||||
let bufs = &[IoSlice::new(&[3]), IoSlice::new(&[4]), IoSlice::new(&[5, 5])];
|
||||
assert_eq!(writer.write_vectored(bufs).unwrap(), 3);
|
||||
|
||||
assert_eq!(writer.written, &[1, 2, 2, 3, 4, 5]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_write_all_vectored() {
|
||||
#[rustfmt::skip] // Becomes unreadable otherwise.
|
||||
let tests: Vec<(_, &'static [u8])> = vec![
|
||||
(vec![], &[]),
|
||||
(vec![IoSlice::new(&[]), IoSlice::new(&[])], &[]),
|
||||
(vec![IoSlice::new(&[1])], &[1]),
|
||||
(vec![IoSlice::new(&[1, 2])], &[1, 2]),
|
||||
(vec![IoSlice::new(&[1, 2, 3])], &[1, 2, 3]),
|
||||
(vec![IoSlice::new(&[1, 2, 3, 4])], &[1, 2, 3, 4]),
|
||||
(vec![IoSlice::new(&[1, 2, 3, 4, 5])], &[1, 2, 3, 4, 5]),
|
||||
(vec![IoSlice::new(&[1]), IoSlice::new(&[2])], &[1, 2]),
|
||||
(vec![IoSlice::new(&[1]), IoSlice::new(&[2, 2])], &[1, 2, 2]),
|
||||
(vec![IoSlice::new(&[1, 1]), IoSlice::new(&[2, 2])], &[1, 1, 2, 2]),
|
||||
(vec![IoSlice::new(&[1, 1]), IoSlice::new(&[2, 2, 2])], &[1, 1, 2, 2, 2]),
|
||||
(vec![IoSlice::new(&[1, 1]), IoSlice::new(&[2, 2, 2])], &[1, 1, 2, 2, 2]),
|
||||
(vec![IoSlice::new(&[1, 1, 1]), IoSlice::new(&[2, 2, 2])], &[1, 1, 1, 2, 2, 2]),
|
||||
(vec![IoSlice::new(&[1, 1, 1]), IoSlice::new(&[2, 2, 2, 2])], &[1, 1, 1, 2, 2, 2, 2]),
|
||||
(vec![IoSlice::new(&[1, 1, 1, 1]), IoSlice::new(&[2, 2, 2, 2])], &[1, 1, 1, 1, 2, 2, 2, 2]),
|
||||
(vec![IoSlice::new(&[1]), IoSlice::new(&[2]), IoSlice::new(&[3])], &[1, 2, 3]),
|
||||
(vec![IoSlice::new(&[1, 1]), IoSlice::new(&[2, 2]), IoSlice::new(&[3, 3])], &[1, 1, 2, 2, 3, 3]),
|
||||
(vec![IoSlice::new(&[1]), IoSlice::new(&[2, 2]), IoSlice::new(&[3, 3, 3])], &[1, 2, 2, 3, 3, 3]),
|
||||
(vec![IoSlice::new(&[1, 1, 1]), IoSlice::new(&[2, 2, 2]), IoSlice::new(&[3, 3, 3])], &[1, 1, 1, 2, 2, 2, 3, 3, 3]),
|
||||
];
|
||||
|
||||
let writer_configs = &[(1, 1), (1, 2), (1, 3), (2, 2), (2, 3), (3, 3)];
|
||||
|
||||
for (n_bufs, per_call) in writer_configs.iter().copied() {
|
||||
for (mut input, wanted) in tests.clone().into_iter() {
|
||||
let mut writer = test_writer(n_bufs, per_call);
|
||||
assert!(writer.write_all_vectored(&mut *input).is_ok());
|
||||
assert_eq!(&*writer.written, &*wanted);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Issue 94981
|
||||
#[test]
|
||||
#[should_panic = "number of read bytes exceeds limit"]
|
||||
fn test_take_wrong_length() {
|
||||
struct LieAboutSize(bool);
|
||||
|
||||
impl Read for LieAboutSize {
|
||||
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
|
||||
// Lie about the read size at first time of read.
|
||||
if core::mem::take(&mut self.0) { Ok(buf.len() + 1) } else { Ok(buf.len()) }
|
||||
}
|
||||
}
|
||||
|
||||
let mut buffer = vec![0; 4];
|
||||
let mut reader = LieAboutSize(true).take(4);
|
||||
// Primed the `Limit` by lying about the read size.
|
||||
let _ = reader.read(&mut buffer[..]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn slice_read_exact_eof() {
|
||||
let slice = &b"123456"[..];
|
||||
|
||||
let mut r = slice;
|
||||
assert!(r.read_exact(&mut [0; 10]).is_err());
|
||||
assert!(r.is_empty());
|
||||
|
||||
let mut r = slice;
|
||||
let buf = &mut [0; 10];
|
||||
let mut buf = BorrowedBuf::from(buf.as_mut_slice());
|
||||
assert!(r.read_buf_exact(buf.unfilled()).is_err());
|
||||
assert!(r.is_empty());
|
||||
assert_eq!(buf.filled(), b"123456");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn cursor_read_exact_eof() {
|
||||
let slice = Cursor::new(b"123456");
|
||||
|
||||
let mut r = slice.clone();
|
||||
assert!(r.read_exact(&mut [0; 10]).is_err());
|
||||
assert!(Cursor::split(&r).1.is_empty());
|
||||
|
||||
let mut r = slice;
|
||||
let buf = &mut [0; 10];
|
||||
let mut buf = BorrowedBuf::from(buf.as_mut_slice());
|
||||
assert!(r.read_buf_exact(buf.unfilled()).is_err());
|
||||
assert!(Cursor::split(&r).1.is_empty());
|
||||
assert_eq!(buf.filled(), b"123456");
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_take_read(b: &mut test::Bencher) {
|
||||
b.iter(|| {
|
||||
let mut buf = [0; 64];
|
||||
|
||||
[255; 128].take(64).read(&mut buf).unwrap();
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_take_read_buf(b: &mut test::Bencher) {
|
||||
b.iter(|| {
|
||||
let buf: &mut [_] = &mut [MaybeUninit::uninit(); 64];
|
||||
|
||||
let mut buf: BorrowedBuf<'_> = buf.into();
|
||||
|
||||
[255; 128].take(64).read_buf(buf.unfilled()).unwrap();
|
||||
});
|
||||
}
|
||||
|
||||
// Issue #120603
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn read_buf_broken_read() {
|
||||
struct MalformedRead;
|
||||
|
||||
impl Read for MalformedRead {
|
||||
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
|
||||
// broken length calculation
|
||||
Ok(buf.len() + 1)
|
||||
}
|
||||
}
|
||||
|
||||
let _ = BufReader::new(MalformedRead).fill_buf();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn read_buf_full_read() {
|
||||
struct FullRead;
|
||||
|
||||
impl Read for FullRead {
|
||||
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
|
||||
Ok(buf.len())
|
||||
}
|
||||
}
|
||||
|
||||
assert_eq!(BufReader::new(FullRead).fill_buf().unwrap().len(), DEFAULT_BUF_SIZE);
|
||||
}
|
||||
|
||||
struct DataAndErrorReader(&'static [u8]);
|
||||
|
||||
impl Read for DataAndErrorReader {
|
||||
fn read(&mut self, _buf: &mut [u8]) -> io::Result<usize> {
|
||||
panic!("We want tests to use `read_buf`")
|
||||
}
|
||||
|
||||
fn read_buf(&mut self, buf: io::BorrowedCursor<'_>) -> io::Result<()> {
|
||||
self.0.read_buf(buf).unwrap();
|
||||
Err(io::Error::other("error"))
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn read_buf_data_and_error_take() {
|
||||
let mut buf = [0; 64];
|
||||
let mut buf = io::BorrowedBuf::from(buf.as_mut_slice());
|
||||
|
||||
let mut r = DataAndErrorReader(&[4, 5, 6]).take(1);
|
||||
assert!(r.read_buf(buf.unfilled()).is_err());
|
||||
assert_eq!(buf.filled(), &[4]);
|
||||
|
||||
assert!(r.read_buf(buf.unfilled()).is_ok());
|
||||
assert_eq!(buf.filled(), &[4]);
|
||||
assert_eq!(r.get_ref().0, &[5, 6]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn read_buf_data_and_error_buf() {
|
||||
let mut r = BufReader::new(DataAndErrorReader(&[4, 5, 6]));
|
||||
|
||||
assert!(r.fill_buf().is_err());
|
||||
assert_eq!(r.fill_buf().unwrap(), &[4, 5, 6]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn read_buf_data_and_error_read_to_end() {
|
||||
let mut r = DataAndErrorReader(&[4, 5, 6]);
|
||||
|
||||
let mut v = Vec::with_capacity(200);
|
||||
assert!(r.read_to_end(&mut v).is_err());
|
||||
|
||||
assert_eq!(v, &[4, 5, 6]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn read_to_end_error() {
|
||||
struct ErrorReader;
|
||||
|
||||
impl Read for ErrorReader {
|
||||
fn read(&mut self, _buf: &mut [u8]) -> io::Result<usize> {
|
||||
Err(io::Error::other("error"))
|
||||
}
|
||||
}
|
||||
|
||||
let mut r = [4, 5, 6].chain(ErrorReader);
|
||||
|
||||
let mut v = Vec::with_capacity(200);
|
||||
assert!(r.read_to_end(&mut v).is_err());
|
||||
|
||||
assert_eq!(v, &[4, 5, 6]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn try_oom_error() {
|
||||
use alloc::alloc::Layout;
|
||||
use alloc::collections::{TryReserveError, TryReserveErrorKind};
|
||||
|
||||
// We simulate a `Vec::try_reserve` error rather than attempting a huge size for real. This way
|
||||
// we're not subject to the whims of optimization that might skip the actual allocation, and it
|
||||
// also works for 32-bit targets and miri that might not OOM at all.
|
||||
let layout = Layout::new::<u8>();
|
||||
let kind = TryReserveErrorKind::AllocError { layout, non_exhaustive: () };
|
||||
let reserve_err = TryReserveError::from(kind);
|
||||
|
||||
let io_err = io::Error::from(reserve_err);
|
||||
assert_eq!(io::ErrorKind::OutOfMemory, io_err.kind());
|
||||
}
|
||||
@@ -1,448 +0,0 @@
|
||||
#![allow(missing_copy_implementations)]
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests;
|
||||
|
||||
use crate::fmt;
|
||||
use crate::io::{
|
||||
self, BorrowedCursor, BufRead, IoSlice, IoSliceMut, Read, Seek, SeekFrom, SizeHint, Write,
|
||||
};
|
||||
|
||||
/// `Empty` ignores any data written via [`Write`], and will always be empty
|
||||
/// (returning zero bytes) when read via [`Read`].
|
||||
///
|
||||
/// This struct is generally created by calling [`empty()`]. Please
|
||||
/// see the documentation of [`empty()`] for more details.
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
#[non_exhaustive]
|
||||
#[derive(Copy, Clone, Debug, Default)]
|
||||
pub struct Empty;
|
||||
|
||||
/// Creates a value that is always at EOF for reads, and ignores all data written.
|
||||
///
|
||||
/// All calls to [`write`] on the returned instance will return [`Ok(buf.len())`]
|
||||
/// and the contents of the buffer will not be inspected.
|
||||
///
|
||||
/// All calls to [`read`] from the returned reader will return [`Ok(0)`].
|
||||
///
|
||||
/// [`Ok(buf.len())`]: Ok
|
||||
/// [`Ok(0)`]: Ok
|
||||
///
|
||||
/// [`write`]: Write::write
|
||||
/// [`read`]: Read::read
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```rust
|
||||
/// use std::io::{self, Write};
|
||||
///
|
||||
/// let buffer = vec![1, 2, 3, 5, 8];
|
||||
/// let num_bytes = io::empty().write(&buffer).unwrap();
|
||||
/// assert_eq!(num_bytes, 5);
|
||||
/// ```
|
||||
///
|
||||
///
|
||||
/// ```rust
|
||||
/// use std::io::{self, Read};
|
||||
///
|
||||
/// let mut buffer = String::new();
|
||||
/// io::empty().read_to_string(&mut buffer).unwrap();
|
||||
/// assert!(buffer.is_empty());
|
||||
/// ```
|
||||
#[must_use]
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
#[rustc_const_stable(feature = "const_io_structs", since = "1.79.0")]
|
||||
pub const fn empty() -> Empty {
|
||||
Empty
|
||||
}
|
||||
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
impl Read for Empty {
|
||||
#[inline]
|
||||
fn read(&mut self, _buf: &mut [u8]) -> io::Result<usize> {
|
||||
Ok(0)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn read_buf(&mut self, _cursor: BorrowedCursor<'_>) -> io::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn read_vectored(&mut self, _bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
|
||||
Ok(0)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn is_read_vectored(&self) -> bool {
|
||||
// Do not force `Chain<Empty, T>` or `Chain<T, Empty>` to use vectored
|
||||
// reads, unless the other reader is vectored.
|
||||
false
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn read_exact(&mut self, buf: &mut [u8]) -> io::Result<()> {
|
||||
if !buf.is_empty() { Err(io::Error::READ_EXACT_EOF) } else { Ok(()) }
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn read_buf_exact(&mut self, cursor: BorrowedCursor<'_>) -> io::Result<()> {
|
||||
if cursor.capacity() != 0 { Err(io::Error::READ_EXACT_EOF) } else { Ok(()) }
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn read_to_end(&mut self, _buf: &mut Vec<u8>) -> io::Result<usize> {
|
||||
Ok(0)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn read_to_string(&mut self, _buf: &mut String) -> io::Result<usize> {
|
||||
Ok(0)
|
||||
}
|
||||
}
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
impl BufRead for Empty {
|
||||
#[inline]
|
||||
fn fill_buf(&mut self) -> io::Result<&[u8]> {
|
||||
Ok(&[])
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn consume(&mut self, _n: usize) {}
|
||||
|
||||
#[inline]
|
||||
fn has_data_left(&mut self) -> io::Result<bool> {
|
||||
Ok(false)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn read_until(&mut self, _byte: u8, _buf: &mut Vec<u8>) -> io::Result<usize> {
|
||||
Ok(0)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn skip_until(&mut self, _byte: u8) -> io::Result<usize> {
|
||||
Ok(0)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn read_line(&mut self, _buf: &mut String) -> io::Result<usize> {
|
||||
Ok(0)
|
||||
}
|
||||
}
|
||||
|
||||
#[stable(feature = "empty_seek", since = "1.51.0")]
|
||||
impl Seek for Empty {
|
||||
#[inline]
|
||||
fn seek(&mut self, _pos: SeekFrom) -> io::Result<u64> {
|
||||
Ok(0)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn stream_len(&mut self) -> io::Result<u64> {
|
||||
Ok(0)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn stream_position(&mut self) -> io::Result<u64> {
|
||||
Ok(0)
|
||||
}
|
||||
}
|
||||
|
||||
impl SizeHint for Empty {
|
||||
#[inline]
|
||||
fn upper_bound(&self) -> Option<usize> {
|
||||
Some(0)
|
||||
}
|
||||
}
|
||||
|
||||
#[stable(feature = "empty_write", since = "1.73.0")]
|
||||
impl Write for Empty {
|
||||
#[inline]
|
||||
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
|
||||
Ok(buf.len())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
|
||||
let total_len = bufs.iter().map(|b| b.len()).sum();
|
||||
Ok(total_len)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn is_write_vectored(&self) -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn write_all(&mut self, _buf: &[u8]) -> io::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn write_all_vectored(&mut self, _bufs: &mut [IoSlice<'_>]) -> io::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn write_fmt(&mut self, _args: fmt::Arguments<'_>) -> io::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn flush(&mut self) -> io::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[stable(feature = "empty_write", since = "1.73.0")]
|
||||
impl Write for &Empty {
|
||||
#[inline]
|
||||
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
|
||||
Ok(buf.len())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
|
||||
let total_len = bufs.iter().map(|b| b.len()).sum();
|
||||
Ok(total_len)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn is_write_vectored(&self) -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn write_all(&mut self, _buf: &[u8]) -> io::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn write_all_vectored(&mut self, _bufs: &mut [IoSlice<'_>]) -> io::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn write_fmt(&mut self, _args: fmt::Arguments<'_>) -> io::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn flush(&mut self) -> io::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// A reader which yields one byte over and over and over and over and over and...
|
||||
///
|
||||
/// This struct is generally created by calling [`repeat()`]. Please
|
||||
/// see the documentation of [`repeat()`] for more details.
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
pub struct Repeat {
|
||||
byte: u8,
|
||||
}
|
||||
|
||||
/// Creates an instance of a reader that infinitely repeats one byte.
|
||||
///
|
||||
/// All reads from this reader will succeed by filling the specified buffer with
|
||||
/// the given byte.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use std::io::{self, Read};
|
||||
///
|
||||
/// let mut buffer = [0; 3];
|
||||
/// io::repeat(0b101).read_exact(&mut buffer).unwrap();
|
||||
/// assert_eq!(buffer, [0b101, 0b101, 0b101]);
|
||||
/// ```
|
||||
#[must_use]
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
#[rustc_const_stable(feature = "const_io_structs", since = "1.79.0")]
|
||||
pub const fn repeat(byte: u8) -> Repeat {
|
||||
Repeat { byte }
|
||||
}
|
||||
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
impl Read for Repeat {
|
||||
#[inline]
|
||||
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
|
||||
buf.fill(self.byte);
|
||||
Ok(buf.len())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn read_exact(&mut self, buf: &mut [u8]) -> io::Result<()> {
|
||||
buf.fill(self.byte);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn read_buf(&mut self, mut buf: BorrowedCursor<'_>) -> io::Result<()> {
|
||||
// SAFETY: No uninit bytes are being written.
|
||||
unsafe { buf.as_mut() }.write_filled(self.byte);
|
||||
// SAFETY: the entire unfilled portion of buf has been initialized.
|
||||
unsafe { buf.advance_unchecked(buf.capacity()) };
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn read_buf_exact(&mut self, buf: BorrowedCursor<'_>) -> io::Result<()> {
|
||||
self.read_buf(buf)
|
||||
}
|
||||
|
||||
/// This function is not supported by `io::Repeat`, because there's no end of its data
|
||||
fn read_to_end(&mut self, _: &mut Vec<u8>) -> io::Result<usize> {
|
||||
Err(io::Error::from(io::ErrorKind::OutOfMemory))
|
||||
}
|
||||
|
||||
/// This function is not supported by `io::Repeat`, because there's no end of its data
|
||||
fn read_to_string(&mut self, _: &mut String) -> io::Result<usize> {
|
||||
Err(io::Error::from(io::ErrorKind::OutOfMemory))
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn read_vectored(&mut self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
|
||||
let mut nwritten = 0;
|
||||
for buf in bufs {
|
||||
nwritten += self.read(buf)?;
|
||||
}
|
||||
Ok(nwritten)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn is_read_vectored(&self) -> bool {
|
||||
true
|
||||
}
|
||||
}
|
||||
|
||||
impl SizeHint for Repeat {
|
||||
#[inline]
|
||||
fn lower_bound(&self) -> usize {
|
||||
usize::MAX
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn upper_bound(&self) -> Option<usize> {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
#[stable(feature = "std_debug", since = "1.16.0")]
|
||||
impl fmt::Debug for Repeat {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.debug_struct("Repeat").finish_non_exhaustive()
|
||||
}
|
||||
}
|
||||
|
||||
/// A writer which will move data into the void.
|
||||
///
|
||||
/// This struct is generally created by calling [`sink()`]. Please
|
||||
/// see the documentation of [`sink()`] for more details.
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
#[non_exhaustive]
|
||||
#[derive(Copy, Clone, Debug, Default)]
|
||||
pub struct Sink;
|
||||
|
||||
/// Creates an instance of a writer which will successfully consume all data.
|
||||
///
|
||||
/// All calls to [`write`] on the returned instance will return [`Ok(buf.len())`]
|
||||
/// and the contents of the buffer will not be inspected.
|
||||
///
|
||||
/// [`write`]: Write::write
|
||||
/// [`Ok(buf.len())`]: Ok
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```rust
|
||||
/// use std::io::{self, Write};
|
||||
///
|
||||
/// let buffer = vec![1, 2, 3, 5, 8];
|
||||
/// let num_bytes = io::sink().write(&buffer).unwrap();
|
||||
/// assert_eq!(num_bytes, 5);
|
||||
/// ```
|
||||
#[must_use]
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
#[rustc_const_stable(feature = "const_io_structs", since = "1.79.0")]
|
||||
pub const fn sink() -> Sink {
|
||||
Sink
|
||||
}
|
||||
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
impl Write for Sink {
|
||||
#[inline]
|
||||
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
|
||||
Ok(buf.len())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
|
||||
let total_len = bufs.iter().map(|b| b.len()).sum();
|
||||
Ok(total_len)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn is_write_vectored(&self) -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn write_all(&mut self, _buf: &[u8]) -> io::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn write_all_vectored(&mut self, _bufs: &mut [IoSlice<'_>]) -> io::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn write_fmt(&mut self, _args: fmt::Arguments<'_>) -> io::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn flush(&mut self) -> io::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[stable(feature = "write_mt", since = "1.48.0")]
|
||||
impl Write for &Sink {
|
||||
#[inline]
|
||||
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
|
||||
Ok(buf.len())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
|
||||
let total_len = bufs.iter().map(|b| b.len()).sum();
|
||||
Ok(total_len)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn is_write_vectored(&self) -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn write_all(&mut self, _buf: &[u8]) -> io::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn write_all_vectored(&mut self, _bufs: &mut [IoSlice<'_>]) -> io::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn write_fmt(&mut self, _args: fmt::Arguments<'_>) -> io::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn flush(&mut self) -> io::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -1,185 +0,0 @@
|
||||
use crate::fmt;
|
||||
use crate::io::prelude::*;
|
||||
use crate::io::{
|
||||
BorrowedBuf, Empty, ErrorKind, IoSlice, IoSliceMut, Repeat, SeekFrom, Sink, empty, repeat, sink,
|
||||
};
|
||||
use crate::mem::MaybeUninit;
|
||||
|
||||
struct ErrorDisplay;
|
||||
|
||||
impl fmt::Display for ErrorDisplay {
|
||||
fn fmt(&self, _f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
Err(fmt::Error)
|
||||
}
|
||||
}
|
||||
|
||||
struct PanicDisplay;
|
||||
|
||||
impl fmt::Display for PanicDisplay {
|
||||
fn fmt(&self, _f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
panic!()
|
||||
}
|
||||
}
|
||||
|
||||
#[track_caller]
|
||||
fn test_sinking<W: Write>(mut w: W) {
|
||||
assert_eq!(w.write(&[]).unwrap(), 0);
|
||||
assert_eq!(w.write(&[0]).unwrap(), 1);
|
||||
assert_eq!(w.write(&[0; 1024]).unwrap(), 1024);
|
||||
w.write_all(&[]).unwrap();
|
||||
w.write_all(&[0]).unwrap();
|
||||
w.write_all(&[0; 1024]).unwrap();
|
||||
let mut bufs =
|
||||
[IoSlice::new(&[]), IoSlice::new(&[0]), IoSlice::new(&[0; 1024]), IoSlice::new(&[])];
|
||||
assert!(w.is_write_vectored());
|
||||
assert_eq!(w.write_vectored(&[]).unwrap(), 0);
|
||||
assert_eq!(w.write_vectored(&bufs).unwrap(), 1025);
|
||||
w.write_all_vectored(&mut []).unwrap();
|
||||
w.write_all_vectored(&mut bufs).unwrap();
|
||||
assert!(w.flush().is_ok());
|
||||
assert_eq!(w.by_ref().write(&[0; 1024]).unwrap(), 1024);
|
||||
// Ignores fmt arguments
|
||||
w.write_fmt(format_args!("{}", ErrorDisplay)).unwrap();
|
||||
w.write_fmt(format_args!("{}", PanicDisplay)).unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn sink_sinks() {
|
||||
test_sinking(sink());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn empty_reads() {
|
||||
let mut e = empty();
|
||||
assert_eq!(e.read(&mut []).unwrap(), 0);
|
||||
assert_eq!(e.read(&mut [0]).unwrap(), 0);
|
||||
assert_eq!(e.read(&mut [0; 1024]).unwrap(), 0);
|
||||
assert_eq!(Read::by_ref(&mut e).read(&mut [0; 1024]).unwrap(), 0);
|
||||
|
||||
e.read_exact(&mut []).unwrap();
|
||||
assert_eq!(e.read_exact(&mut [0]).unwrap_err().kind(), ErrorKind::UnexpectedEof);
|
||||
assert_eq!(e.read_exact(&mut [0; 1024]).unwrap_err().kind(), ErrorKind::UnexpectedEof);
|
||||
|
||||
assert!(!e.is_read_vectored());
|
||||
assert_eq!(e.read_vectored(&mut []).unwrap(), 0);
|
||||
let (mut buf1, mut buf1024) = ([0], [0; 1024]);
|
||||
let bufs = &mut [
|
||||
IoSliceMut::new(&mut []),
|
||||
IoSliceMut::new(&mut buf1),
|
||||
IoSliceMut::new(&mut buf1024),
|
||||
IoSliceMut::new(&mut []),
|
||||
];
|
||||
assert_eq!(e.read_vectored(bufs).unwrap(), 0);
|
||||
|
||||
let buf: &mut [MaybeUninit<_>] = &mut [];
|
||||
let mut buf: BorrowedBuf<'_> = buf.into();
|
||||
e.read_buf(buf.unfilled()).unwrap();
|
||||
assert_eq!(buf.len(), 0);
|
||||
assert_eq!(buf.init_len(), 0);
|
||||
|
||||
let buf: &mut [_] = &mut [MaybeUninit::uninit()];
|
||||
let mut buf: BorrowedBuf<'_> = buf.into();
|
||||
e.read_buf(buf.unfilled()).unwrap();
|
||||
assert_eq!(buf.len(), 0);
|
||||
assert_eq!(buf.init_len(), 0);
|
||||
|
||||
let buf: &mut [_] = &mut [MaybeUninit::uninit(); 1024];
|
||||
let mut buf: BorrowedBuf<'_> = buf.into();
|
||||
e.read_buf(buf.unfilled()).unwrap();
|
||||
assert_eq!(buf.len(), 0);
|
||||
assert_eq!(buf.init_len(), 0);
|
||||
|
||||
let buf: &mut [_] = &mut [MaybeUninit::uninit(); 1024];
|
||||
let mut buf: BorrowedBuf<'_> = buf.into();
|
||||
Read::by_ref(&mut e).read_buf(buf.unfilled()).unwrap();
|
||||
assert_eq!(buf.len(), 0);
|
||||
assert_eq!(buf.init_len(), 0);
|
||||
|
||||
let buf: &mut [MaybeUninit<_>] = &mut [];
|
||||
let mut buf: BorrowedBuf<'_> = buf.into();
|
||||
e.read_buf_exact(buf.unfilled()).unwrap();
|
||||
assert_eq!(buf.len(), 0);
|
||||
assert_eq!(buf.init_len(), 0);
|
||||
|
||||
let buf: &mut [_] = &mut [MaybeUninit::uninit()];
|
||||
let mut buf: BorrowedBuf<'_> = buf.into();
|
||||
assert_eq!(e.read_buf_exact(buf.unfilled()).unwrap_err().kind(), ErrorKind::UnexpectedEof);
|
||||
assert_eq!(buf.len(), 0);
|
||||
assert_eq!(buf.init_len(), 0);
|
||||
|
||||
let buf: &mut [_] = &mut [MaybeUninit::uninit(); 1024];
|
||||
let mut buf: BorrowedBuf<'_> = buf.into();
|
||||
assert_eq!(e.read_buf_exact(buf.unfilled()).unwrap_err().kind(), ErrorKind::UnexpectedEof);
|
||||
assert_eq!(buf.len(), 0);
|
||||
assert_eq!(buf.init_len(), 0);
|
||||
|
||||
let buf: &mut [_] = &mut [MaybeUninit::uninit(); 1024];
|
||||
let mut buf: BorrowedBuf<'_> = buf.into();
|
||||
assert_eq!(
|
||||
Read::by_ref(&mut e).read_buf_exact(buf.unfilled()).unwrap_err().kind(),
|
||||
ErrorKind::UnexpectedEof,
|
||||
);
|
||||
assert_eq!(buf.len(), 0);
|
||||
assert_eq!(buf.init_len(), 0);
|
||||
|
||||
let mut buf = Vec::new();
|
||||
assert_eq!(e.read_to_end(&mut buf).unwrap(), 0);
|
||||
assert_eq!(buf, vec![]);
|
||||
let mut buf = vec![1, 2, 3];
|
||||
assert_eq!(e.read_to_end(&mut buf).unwrap(), 0);
|
||||
assert_eq!(buf, vec![1, 2, 3]);
|
||||
|
||||
let mut buf = String::new();
|
||||
assert_eq!(e.read_to_string(&mut buf).unwrap(), 0);
|
||||
assert_eq!(buf, "");
|
||||
let mut buf = "hello".to_owned();
|
||||
assert_eq!(e.read_to_string(&mut buf).unwrap(), 0);
|
||||
assert_eq!(buf, "hello");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn empty_seeks() {
|
||||
let mut e = empty();
|
||||
assert!(matches!(e.seek(SeekFrom::Start(0)), Ok(0)));
|
||||
assert!(matches!(e.seek(SeekFrom::Start(1)), Ok(0)));
|
||||
assert!(matches!(e.seek(SeekFrom::Start(u64::MAX)), Ok(0)));
|
||||
|
||||
assert!(matches!(e.seek(SeekFrom::End(i64::MIN)), Ok(0)));
|
||||
assert!(matches!(e.seek(SeekFrom::End(-1)), Ok(0)));
|
||||
assert!(matches!(e.seek(SeekFrom::End(0)), Ok(0)));
|
||||
assert!(matches!(e.seek(SeekFrom::End(1)), Ok(0)));
|
||||
assert!(matches!(e.seek(SeekFrom::End(i64::MAX)), Ok(0)));
|
||||
|
||||
assert!(matches!(e.seek(SeekFrom::Current(i64::MIN)), Ok(0)));
|
||||
assert!(matches!(e.seek(SeekFrom::Current(-1)), Ok(0)));
|
||||
assert!(matches!(e.seek(SeekFrom::Current(0)), Ok(0)));
|
||||
assert!(matches!(e.seek(SeekFrom::Current(1)), Ok(0)));
|
||||
assert!(matches!(e.seek(SeekFrom::Current(i64::MAX)), Ok(0)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn empty_sinks() {
|
||||
test_sinking(empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn repeat_repeats() {
|
||||
let mut r = repeat(4);
|
||||
let mut b = [0; 1024];
|
||||
assert_eq!(r.read(&mut b).unwrap(), 1024);
|
||||
assert!(b.iter().all(|b| *b == 4));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn take_some_bytes() {
|
||||
assert_eq!(repeat(4).take(100).bytes().count(), 100);
|
||||
assert_eq!(repeat(4).take(100).bytes().next().unwrap().unwrap(), 4);
|
||||
assert_eq!(repeat(1).take(10).chain(repeat(2).take(10)).bytes().count(), 20);
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
fn const_utils() {
|
||||
const _: Empty = empty();
|
||||
const _: Repeat = repeat(b'c');
|
||||
const _: Sink = sink();
|
||||
}
|
||||
@@ -1,329 +0,0 @@
|
||||
#![unstable(feature = "custom_std", issue = "none")]
|
||||
#![no_std]
|
||||
//
|
||||
// Lints:
|
||||
#![warn(deprecated_in_future)]
|
||||
// #![warn(missing_docs)]
|
||||
// #![warn(missing_debug_implementations)]
|
||||
#![allow(explicit_outlives_requirements)]
|
||||
#![allow(unused_lifetimes)]
|
||||
#![allow(internal_features)]
|
||||
#![deny(fuzzy_provenance_casts)]
|
||||
#![deny(unsafe_op_in_unsafe_fn)]
|
||||
#![allow(rustdoc::redundant_explicit_links)]
|
||||
#![warn(rustdoc::unescaped_backticks)]
|
||||
// Ensure that std can be linked against panic_abort despite compiled with `-C panic=unwind`
|
||||
#![deny(ffi_unwind_calls)]
|
||||
// std may use features in a platform-specific way
|
||||
#![allow(unused_features)]
|
||||
//
|
||||
// Features:
|
||||
#![cfg_attr(
|
||||
test,
|
||||
feature(internal_output_capture, print_internals, update_panic_count, rt)
|
||||
)]
|
||||
#![cfg_attr(
|
||||
all(target_vendor = "fortanix", target_env = "sgx"),
|
||||
feature(slice_index_methods, coerce_unsized, sgx_platform)
|
||||
)]
|
||||
#![cfg_attr(all(test, target_os = "uefi"), feature(uefi_std))]
|
||||
#![cfg_attr(target_family = "wasm", feature(stdarch_wasm_atomic_wait))]
|
||||
#![cfg_attr(target_arch = "wasm64", feature(simd_wasm64))]
|
||||
//
|
||||
// Language features:
|
||||
// tidy-alphabetical-start
|
||||
#![feature(alloc_error_handler)]
|
||||
#![feature(allocator_internals)]
|
||||
#![feature(allow_internal_unsafe)]
|
||||
#![feature(allow_internal_unstable)]
|
||||
#![feature(asm_experimental_arch)]
|
||||
#![feature(autodiff)]
|
||||
#![feature(cfg_sanitizer_cfi)]
|
||||
#![feature(cfg_target_thread_local)]
|
||||
#![feature(cfi_encoding)]
|
||||
#![feature(const_default)]
|
||||
#![feature(const_trait_impl)]
|
||||
#![feature(core_float_math)]
|
||||
#![feature(decl_macro)]
|
||||
#![feature(deprecated_suggestion)]
|
||||
#![feature(doc_cfg)]
|
||||
#![feature(doc_masked)]
|
||||
#![feature(doc_notable_trait)]
|
||||
#![feature(dropck_eyepatch)]
|
||||
#![feature(f16)]
|
||||
#![feature(f128)]
|
||||
#![feature(ffi_const)]
|
||||
#![feature(formatting_options)]
|
||||
#![feature(funnel_shifts)]
|
||||
#![feature(if_let_guard)]
|
||||
#![feature(intra_doc_pointers)]
|
||||
#![feature(iter_advance_by)]
|
||||
#![feature(iter_next_chunk)]
|
||||
#![feature(lang_items)]
|
||||
#![feature(link_cfg)]
|
||||
#![feature(linkage)]
|
||||
#![feature(macro_metavar_expr_concat)]
|
||||
#![feature(maybe_uninit_fill)]
|
||||
#![feature(min_specialization)]
|
||||
#![feature(must_not_suspend)]
|
||||
#![feature(needs_panic_runtime)]
|
||||
#![feature(negative_impls)]
|
||||
#![feature(never_type)]
|
||||
#![feature(optimize_attribute)]
|
||||
#![feature(prelude_import)]
|
||||
#![feature(rustc_attrs)]
|
||||
#![feature(rustdoc_internals)]
|
||||
#![feature(staged_api)]
|
||||
#![feature(stmt_expr_attributes)]
|
||||
#![feature(strict_provenance_lints)]
|
||||
#![feature(thread_local)]
|
||||
#![feature(try_blocks)]
|
||||
#![feature(try_trait_v2)]
|
||||
#![feature(type_alias_impl_trait)]
|
||||
// tidy-alphabetical-end
|
||||
//
|
||||
// Library features (core):
|
||||
// tidy-alphabetical-start
|
||||
#![feature(bstr)]
|
||||
#![feature(bstr_internals)]
|
||||
#![feature(cast_maybe_uninit)]
|
||||
#![feature(cfg_select)]
|
||||
#![feature(char_internals)]
|
||||
#![feature(clone_to_uninit)]
|
||||
#![feature(const_convert)]
|
||||
#![feature(core_intrinsics)]
|
||||
#![feature(core_io_borrowed_buf)]
|
||||
#![feature(drop_guard)]
|
||||
#![feature(duration_constants)]
|
||||
#![feature(error_generic_member_access)]
|
||||
#![feature(error_iter)]
|
||||
#![feature(exact_size_is_empty)]
|
||||
#![feature(exclusive_wrapper)]
|
||||
#![feature(extend_one)]
|
||||
#![feature(float_algebraic)]
|
||||
// #![feature(float_gamma)]
|
||||
#![feature(float_minimum_maximum)]
|
||||
#![feature(fmt_internals)]
|
||||
#![feature(fn_ptr_trait)]
|
||||
#![feature(generic_atomic)]
|
||||
#![feature(hasher_prefixfree_extras)]
|
||||
#![feature(hashmap_internals)]
|
||||
#![feature(hint_must_use)]
|
||||
#![feature(int_from_ascii)]
|
||||
#![feature(ip)]
|
||||
#![feature(maybe_uninit_array_assume_init)]
|
||||
#![feature(panic_can_unwind)]
|
||||
#![feature(panic_internals)]
|
||||
#![feature(pin_coerce_unsized_trait)]
|
||||
#![feature(pointer_is_aligned_to)]
|
||||
#![feature(portable_simd)]
|
||||
#![feature(ptr_as_uninit)]
|
||||
#![feature(ptr_mask)]
|
||||
#![feature(random)]
|
||||
#![feature(slice_internals)]
|
||||
#![feature(slice_ptr_get)]
|
||||
#![feature(slice_range)]
|
||||
#![feature(slice_split_once)]
|
||||
#![feature(std_internals)]
|
||||
#![feature(str_internals)]
|
||||
#![feature(sync_unsafe_cell)]
|
||||
#![feature(temporary_niche_types)]
|
||||
#![feature(ub_checks)]
|
||||
#![feature(used_with_arg)]
|
||||
// tidy-alphabetical-end
|
||||
//
|
||||
// Library features (alloc):
|
||||
// tidy-alphabetical-start
|
||||
#![feature(alloc_layout_extra)]
|
||||
#![feature(allocator_api)]
|
||||
#![feature(clone_from_ref)]
|
||||
#![feature(get_mut_unchecked)]
|
||||
#![feature(map_try_insert)]
|
||||
#![feature(slice_concat_trait)]
|
||||
#![feature(thin_box)]
|
||||
#![feature(try_reserve_kind)]
|
||||
#![feature(try_with_capacity)]
|
||||
#![feature(unique_rc_arc)]
|
||||
#![feature(wtf8_internals)]
|
||||
// tidy-alphabetical-end
|
||||
//
|
||||
// Library features (unwind):
|
||||
// tidy-alphabetical-start
|
||||
// #![feature(panic_unwind)]
|
||||
// tidy-alphabetical-end
|
||||
//
|
||||
// Library features (std_detect):
|
||||
// tidy-alphabetical-start
|
||||
// #![feature(stdarch_internal)]
|
||||
// tidy-alphabetical-end
|
||||
//
|
||||
// Only for re-exporting:
|
||||
// tidy-alphabetical-start
|
||||
#![feature(assert_matches)]
|
||||
#![feature(async_iterator)]
|
||||
#![feature(c_variadic)]
|
||||
#![feature(cfg_accessible)]
|
||||
#![feature(cfg_eval)]
|
||||
#![feature(concat_bytes)]
|
||||
#![feature(const_format_args)]
|
||||
#![feature(custom_test_frameworks)]
|
||||
#![feature(edition_panic)]
|
||||
#![feature(format_args_nl)]
|
||||
#![feature(log_syntax)]
|
||||
#![feature(test)]
|
||||
#![feature(trace_macros)]
|
||||
// tidy-alphabetical-end
|
||||
//
|
||||
// Only used in tests/benchmarks:
|
||||
//
|
||||
// Only for const-ness:
|
||||
// tidy-alphabetical-start
|
||||
#![feature(io_const_error)]
|
||||
// tidy-alphabetical-end
|
||||
//
|
||||
#![feature(c_size_t, unsafe_binders)]
|
||||
#![allow(clippy::doc_lazy_continuation, clippy::all)]
|
||||
#![allow(
|
||||
stable_features,
|
||||
incomplete_features,
|
||||
unexpected_cfgs,
|
||||
unfulfilled_lint_expectations
|
||||
)]
|
||||
#![allow(unused)]
|
||||
|
||||
#[macro_use]
|
||||
extern crate alloc as alloc_crate;
|
||||
|
||||
pub use core::any;
|
||||
pub use core::array;
|
||||
pub use core::async_iter;
|
||||
pub use core::cell;
|
||||
pub use core::char;
|
||||
pub use core::clone;
|
||||
pub use core::cmp;
|
||||
pub use core::convert;
|
||||
pub use core::default;
|
||||
pub use core::future;
|
||||
pub use core::hint;
|
||||
#[allow(deprecated, deprecated_in_future)]
|
||||
pub use core::i8;
|
||||
#[allow(deprecated, deprecated_in_future)]
|
||||
pub use core::i16;
|
||||
#[allow(deprecated, deprecated_in_future)]
|
||||
pub use core::i32;
|
||||
#[allow(deprecated, deprecated_in_future)]
|
||||
pub use core::i64;
|
||||
#[allow(deprecated, deprecated_in_future)]
|
||||
pub use core::i128;
|
||||
pub use core::intrinsics;
|
||||
#[allow(deprecated, deprecated_in_future)]
|
||||
pub use core::isize;
|
||||
pub use core::iter;
|
||||
pub use core::marker;
|
||||
pub use core::mem;
|
||||
pub use core::ops;
|
||||
pub use core::option;
|
||||
pub use core::pin;
|
||||
pub use core::ptr;
|
||||
pub use core::range;
|
||||
pub use core::result;
|
||||
#[allow(deprecated, deprecated_in_future)]
|
||||
pub use core::u8;
|
||||
#[allow(deprecated, deprecated_in_future)]
|
||||
pub use core::u16;
|
||||
#[allow(deprecated, deprecated_in_future)]
|
||||
pub use core::u32;
|
||||
#[allow(deprecated, deprecated_in_future)]
|
||||
pub use core::u64;
|
||||
#[allow(deprecated, deprecated_in_future)]
|
||||
pub use core::u128;
|
||||
pub use core::unsafe_binder;
|
||||
#[allow(deprecated, deprecated_in_future)]
|
||||
pub use core::usize;
|
||||
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
pub use alloc_crate::borrow;
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
pub use alloc_crate::boxed;
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
pub use alloc_crate::fmt;
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
pub use alloc_crate::format;
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
pub use alloc_crate::rc;
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
pub use alloc_crate::slice;
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
pub use alloc_crate::str;
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
pub use alloc_crate::string;
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
pub use alloc_crate::vec;
|
||||
|
||||
#[stable(feature = "builtin_macro_prelude", since = "1.38.0")]
|
||||
pub use core::{
|
||||
assert, cfg, column, compile_error, concat, const_format_args, env, file, format_args,
|
||||
format_args_nl, include, include_bytes, include_str, line, log_syntax, module_path, option_env,
|
||||
stringify, trace_macros,
|
||||
};
|
||||
|
||||
#[rustc_std_internal_symbol]
|
||||
pub unsafe fn __rust_start_panic(_payload: &mut dyn core::panic::PanicPayload) -> u32 {
|
||||
todo!()
|
||||
}
|
||||
|
||||
pub mod ffi;
|
||||
pub mod hash;
|
||||
pub mod io;
|
||||
// pub mod fs;
|
||||
pub mod error;
|
||||
pub mod num;
|
||||
pub mod path;
|
||||
pub mod prelude;
|
||||
pub mod process;
|
||||
#[macro_use]
|
||||
pub mod rt;
|
||||
pub mod alloc;
|
||||
pub mod bstr;
|
||||
pub mod collections;
|
||||
pub mod env;
|
||||
pub mod panic;
|
||||
pub mod panicking;
|
||||
pub mod sync;
|
||||
pub mod sys;
|
||||
pub mod thread;
|
||||
pub mod time;
|
||||
|
||||
#[prelude_import]
|
||||
#[allow(unused_imports)]
|
||||
pub use prelude::rust_2024::*;
|
||||
|
||||
#[allow(unused)]
|
||||
mod sealed {
|
||||
/// This trait being unreachable from outside the crate
|
||||
/// prevents outside implementations of our extension traits.
|
||||
/// This allows adding more trait methods in the future.
|
||||
#[unstable(feature = "sealed", issue = "none")]
|
||||
pub trait Sealed {}
|
||||
}
|
||||
|
||||
pub use shared::fs;
|
||||
pub use shared::syscall;
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! print {
|
||||
($($args:expr),*) => {
|
||||
$crate::syscall::write_string_temp(&format!($($args),*))
|
||||
};
|
||||
}
|
||||
#[macro_export]
|
||||
macro_rules! println {
|
||||
() => {
|
||||
$crate::print!("");
|
||||
// $crate::print!("\n\r");
|
||||
};
|
||||
($($args:expr),*) => {
|
||||
$crate::print!($($args),*);
|
||||
// $crate::println!();
|
||||
};
|
||||
}
|
||||
@@ -1,28 +0,0 @@
|
||||
//! Additional functionality for numerics.
|
||||
//!
|
||||
//! This module provides some extra types that are useful when doing numerical
|
||||
//! work. See the individual documentation for each piece for more information.
|
||||
|
||||
#![stable(feature = "rust1", since = "1.0.0")]
|
||||
#![allow(missing_docs)]
|
||||
|
||||
#[stable(feature = "int_error_matching", since = "1.55.0")]
|
||||
pub use core::num::IntErrorKind;
|
||||
#[stable(feature = "generic_nonzero", since = "1.79.0")]
|
||||
pub use core::num::NonZero;
|
||||
#[stable(feature = "saturating_int_impl", since = "1.74.0")]
|
||||
pub use core::num::Saturating;
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
pub use core::num::Wrapping;
|
||||
#[unstable(
|
||||
feature = "nonzero_internals",
|
||||
reason = "implementation detail which may disappear or be replaced at any time",
|
||||
issue = "none"
|
||||
)]
|
||||
pub use core::num::ZeroablePrimitive;
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
pub use core::num::{FpCategory, ParseFloatError, ParseIntError, TryFromIntError};
|
||||
#[stable(feature = "signed_nonzero", since = "1.34.0")]
|
||||
pub use core::num::{NonZeroI8, NonZeroI16, NonZeroI32, NonZeroI64, NonZeroI128, NonZeroIsize};
|
||||
#[stable(feature = "nonzero", since = "1.28.0")]
|
||||
pub use core::num::{NonZeroU8, NonZeroU16, NonZeroU32, NonZeroU64, NonZeroU128, NonZeroUsize};
|
||||
@@ -1,534 +0,0 @@
|
||||
//! Panic support in the standard library.
|
||||
|
||||
#![stable(feature = "std_panic", since = "1.9.0")]
|
||||
|
||||
use crate::any::Any;
|
||||
use crate::sync::atomic::{Atomic, AtomicU8, Ordering};
|
||||
use crate::sync::{Condvar, Mutex, RwLock};
|
||||
use crate::thread::Result;
|
||||
use crate::{collections, fmt, panicking};
|
||||
|
||||
#[stable(feature = "panic_hooks", since = "1.10.0")]
|
||||
#[deprecated(
|
||||
since = "1.82.0",
|
||||
note = "use `PanicHookInfo` instead",
|
||||
suggestion = "std::panic::PanicHookInfo"
|
||||
)]
|
||||
/// A struct providing information about a panic.
|
||||
///
|
||||
/// `PanicInfo` has been renamed to [`PanicHookInfo`] to avoid confusion with
|
||||
/// [`core::panic::PanicInfo`].
|
||||
pub type PanicInfo<'a> = PanicHookInfo<'a>;
|
||||
|
||||
/// A struct providing information about a panic.
|
||||
///
|
||||
/// `PanicHookInfo` structure is passed to a panic hook set by the [`set_hook`] function.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```should_panic
|
||||
/// use std::panic;
|
||||
///
|
||||
/// panic::set_hook(Box::new(|panic_info| {
|
||||
/// println!("panic occurred: {panic_info}");
|
||||
/// }));
|
||||
///
|
||||
/// panic!("critical system failure");
|
||||
/// ```
|
||||
///
|
||||
/// [`set_hook`]: ../../std/panic/fn.set_hook.html
|
||||
#[stable(feature = "panic_hook_info", since = "1.81.0")]
|
||||
#[derive(Debug)]
|
||||
pub struct PanicHookInfo<'a> {
|
||||
payload: &'a (dyn Any + Send),
|
||||
location: &'a Location<'a>,
|
||||
can_unwind: bool,
|
||||
force_no_backtrace: bool,
|
||||
}
|
||||
|
||||
impl<'a> PanicHookInfo<'a> {
|
||||
#[inline]
|
||||
pub(crate) fn new(
|
||||
location: &'a Location<'a>,
|
||||
payload: &'a (dyn Any + Send),
|
||||
can_unwind: bool,
|
||||
force_no_backtrace: bool,
|
||||
) -> Self {
|
||||
PanicHookInfo { payload, location, can_unwind, force_no_backtrace }
|
||||
}
|
||||
|
||||
/// Returns the payload associated with the panic.
|
||||
///
|
||||
/// This will commonly, but not always, be a `&'static str` or [`String`].
|
||||
/// If you only care about such payloads, use [`payload_as_str`] instead.
|
||||
///
|
||||
/// A invocation of the `panic!()` macro in Rust 2021 or later will always result in a
|
||||
/// panic payload of type `&'static str` or `String`.
|
||||
///
|
||||
/// Only an invocation of [`panic_any`]
|
||||
/// (or, in Rust 2018 and earlier, `panic!(x)` where `x` is something other than a string)
|
||||
/// can result in a panic payload other than a `&'static str` or `String`.
|
||||
///
|
||||
/// [`String`]: ../../std/string/struct.String.html
|
||||
/// [`payload_as_str`]: PanicHookInfo::payload_as_str
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```should_panic
|
||||
/// use std::panic;
|
||||
///
|
||||
/// panic::set_hook(Box::new(|panic_info| {
|
||||
/// if let Some(s) = panic_info.payload().downcast_ref::<&str>() {
|
||||
/// println!("panic occurred: {s:?}");
|
||||
/// } else if let Some(s) = panic_info.payload().downcast_ref::<String>() {
|
||||
/// println!("panic occurred: {s:?}");
|
||||
/// } else {
|
||||
/// println!("panic occurred");
|
||||
/// }
|
||||
/// }));
|
||||
///
|
||||
/// panic!("Normal panic");
|
||||
/// ```
|
||||
#[must_use]
|
||||
#[inline]
|
||||
#[stable(feature = "panic_hooks", since = "1.10.0")]
|
||||
pub fn payload(&self) -> &(dyn Any + Send) {
|
||||
self.payload
|
||||
}
|
||||
|
||||
/// Returns the payload associated with the panic, if it is a string.
|
||||
///
|
||||
/// This returns the payload if it is of type `&'static str` or `String`.
|
||||
///
|
||||
/// A invocation of the `panic!()` macro in Rust 2021 or later will always result in a
|
||||
/// panic payload where `payload_as_str` returns `Some`.
|
||||
///
|
||||
/// Only an invocation of [`panic_any`]
|
||||
/// (or, in Rust 2018 and earlier, `panic!(x)` where `x` is something other than a string)
|
||||
/// can result in a panic payload where `payload_as_str` returns `None`.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```should_panic
|
||||
/// std::panic::set_hook(Box::new(|panic_info| {
|
||||
/// if let Some(s) = panic_info.payload_as_str() {
|
||||
/// println!("panic occurred: {s:?}");
|
||||
/// } else {
|
||||
/// println!("panic occurred");
|
||||
/// }
|
||||
/// }));
|
||||
///
|
||||
/// panic!("Normal panic");
|
||||
/// ```
|
||||
#[must_use]
|
||||
#[inline]
|
||||
#[stable(feature = "panic_payload_as_str", since = "1.91.0")]
|
||||
pub fn payload_as_str(&self) -> Option<&str> {
|
||||
if let Some(s) = self.payload.downcast_ref::<&str>() {
|
||||
Some(s)
|
||||
} else if let Some(s) = self.payload.downcast_ref::<String>() {
|
||||
Some(s)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns information about the location from which the panic originated,
|
||||
/// if available.
|
||||
///
|
||||
/// This method will currently always return [`Some`], but this may change
|
||||
/// in future versions.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```should_panic
|
||||
/// use std::panic;
|
||||
///
|
||||
/// panic::set_hook(Box::new(|panic_info| {
|
||||
/// if let Some(location) = panic_info.location() {
|
||||
/// println!("panic occurred in file '{}' at line {}",
|
||||
/// location.file(),
|
||||
/// location.line(),
|
||||
/// );
|
||||
/// } else {
|
||||
/// println!("panic occurred but can't get location information...");
|
||||
/// }
|
||||
/// }));
|
||||
///
|
||||
/// panic!("Normal panic");
|
||||
/// ```
|
||||
#[must_use]
|
||||
#[inline]
|
||||
#[stable(feature = "panic_hooks", since = "1.10.0")]
|
||||
pub fn location(&self) -> Option<&Location<'_>> {
|
||||
// NOTE: If this is changed to sometimes return None,
|
||||
// deal with that case in std::panicking::default_hook and core::panicking::panic_fmt.
|
||||
Some(&self.location)
|
||||
}
|
||||
|
||||
/// Returns whether the panic handler is allowed to unwind the stack from
|
||||
/// the point where the panic occurred.
|
||||
///
|
||||
/// This is true for most kinds of panics with the exception of panics
|
||||
/// caused by trying to unwind out of a `Drop` implementation or a function
|
||||
/// whose ABI does not support unwinding.
|
||||
///
|
||||
/// It is safe for a panic handler to unwind even when this function returns
|
||||
/// false, however this will simply cause the panic handler to be called
|
||||
/// again.
|
||||
#[must_use]
|
||||
#[inline]
|
||||
#[unstable(feature = "panic_can_unwind", issue = "92988")]
|
||||
pub fn can_unwind(&self) -> bool {
|
||||
self.can_unwind
|
||||
}
|
||||
|
||||
#[unstable(
|
||||
feature = "panic_internals",
|
||||
reason = "internal details of the implementation of the `panic!` and related macros",
|
||||
issue = "none"
|
||||
)]
|
||||
#[doc(hidden)]
|
||||
#[inline]
|
||||
pub fn force_no_backtrace(&self) -> bool {
|
||||
self.force_no_backtrace
|
||||
}
|
||||
}
|
||||
|
||||
#[stable(feature = "panic_hook_display", since = "1.26.0")]
|
||||
impl fmt::Display for PanicHookInfo<'_> {
|
||||
fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
formatter.write_str("panicked at ")?;
|
||||
self.location.fmt(formatter)?;
|
||||
if let Some(payload) = self.payload_as_str() {
|
||||
formatter.write_str(":\n")?;
|
||||
formatter.write_str(payload)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[doc(hidden)]
|
||||
#[unstable(feature = "edition_panic", issue = "none", reason = "use panic!() instead")]
|
||||
#[allow_internal_unstable(libstd_sys_internals, const_format_args, panic_internals, rt)]
|
||||
#[cfg_attr(not(test), rustc_diagnostic_item = "std_panic_2015_macro")]
|
||||
#[rustc_macro_transparency = "semiopaque"]
|
||||
pub macro panic_2015 {
|
||||
() => ({
|
||||
$crate::rt::begin_panic("explicit panic")
|
||||
}),
|
||||
($msg:expr $(,)?) => ({
|
||||
$crate::rt::begin_panic($msg);
|
||||
}),
|
||||
// Special-case the single-argument case for const_panic.
|
||||
("{}", $arg:expr $(,)?) => ({
|
||||
$crate::rt::panic_display(&$arg);
|
||||
}),
|
||||
($fmt:expr, $($arg:tt)+) => ({
|
||||
// Semicolon to prevent temporaries inside the formatting machinery from
|
||||
// being considered alive in the caller after the panic_fmt call.
|
||||
$crate::rt::panic_fmt($crate::const_format_args!($fmt, $($arg)+));
|
||||
}),
|
||||
}
|
||||
|
||||
#[stable(feature = "panic_hooks", since = "1.10.0")]
|
||||
pub use core::panic::Location;
|
||||
#[doc(hidden)]
|
||||
#[unstable(feature = "edition_panic", issue = "none", reason = "use panic!() instead")]
|
||||
pub use core::panic::panic_2021;
|
||||
#[stable(feature = "catch_unwind", since = "1.9.0")]
|
||||
pub use core::panic::{AssertUnwindSafe, RefUnwindSafe, UnwindSafe};
|
||||
|
||||
#[unstable(feature = "panic_update_hook", issue = "92649")]
|
||||
pub use crate::panicking::update_hook;
|
||||
#[stable(feature = "panic_hooks", since = "1.10.0")]
|
||||
pub use crate::panicking::{set_hook, take_hook};
|
||||
|
||||
/// Panics the current thread with the given message as the panic payload.
|
||||
///
|
||||
/// The message can be of any (`Any + Send`) type, not just strings.
|
||||
///
|
||||
/// The message is wrapped in a `Box<'static + Any + Send>`, which can be
|
||||
/// accessed later using [`PanicHookInfo::payload`].
|
||||
///
|
||||
/// See the [`panic!`] macro for more information about panicking.
|
||||
#[stable(feature = "panic_any", since = "1.51.0")]
|
||||
#[inline]
|
||||
#[track_caller]
|
||||
#[cfg_attr(not(test), rustc_diagnostic_item = "panic_any")]
|
||||
pub fn panic_any<M: 'static + Any + Send>(msg: M) -> ! {
|
||||
crate::panicking::begin_panic(msg);
|
||||
}
|
||||
|
||||
#[stable(feature = "catch_unwind", since = "1.9.0")]
|
||||
impl<T: ?Sized> UnwindSafe for Mutex<T> {}
|
||||
#[stable(feature = "catch_unwind", since = "1.9.0")]
|
||||
impl<T: ?Sized> UnwindSafe for RwLock<T> {}
|
||||
#[stable(feature = "catch_unwind", since = "1.9.0")]
|
||||
impl UnwindSafe for Condvar {}
|
||||
|
||||
#[stable(feature = "unwind_safe_lock_refs", since = "1.12.0")]
|
||||
impl<T: ?Sized> RefUnwindSafe for Mutex<T> {}
|
||||
#[stable(feature = "unwind_safe_lock_refs", since = "1.12.0")]
|
||||
impl<T: ?Sized> RefUnwindSafe for RwLock<T> {}
|
||||
#[stable(feature = "unwind_safe_lock_refs", since = "1.12.0")]
|
||||
impl RefUnwindSafe for Condvar {}
|
||||
|
||||
// https://github.com/rust-lang/rust/issues/62301
|
||||
#[stable(feature = "hashbrown", since = "1.36.0")]
|
||||
impl<K, V, S> UnwindSafe for collections::HashMap<K, V, S>
|
||||
where
|
||||
K: UnwindSafe,
|
||||
V: UnwindSafe,
|
||||
S: UnwindSafe,
|
||||
{
|
||||
}
|
||||
|
||||
#[unstable(feature = "abort_unwind", issue = "130338")]
|
||||
pub use core::panic::abort_unwind;
|
||||
|
||||
/// Invokes a closure, capturing the cause of an unwinding panic if one occurs.
|
||||
///
|
||||
/// This function will return `Ok` with the closure's result if the closure does
|
||||
/// not panic, and will return `Err(cause)` if the closure panics. The `cause`
|
||||
/// returned is the object with which panic was originally invoked.
|
||||
///
|
||||
/// Rust functions that are expected to be called from foreign code that does
|
||||
/// not support unwinding (such as C compiled with `-fno-exceptions`) should be
|
||||
/// defined using `extern "C"`, which ensures that if the Rust code panics, it
|
||||
/// is automatically caught and the process is aborted. If this is the desired
|
||||
/// behavior, it is not necessary to use `catch_unwind` explicitly. This
|
||||
/// function should instead be used when more graceful error-handling is needed.
|
||||
///
|
||||
/// It is **not** recommended to use this function for a general try/catch
|
||||
/// mechanism. The [`Result`] type is more appropriate to use for functions that
|
||||
/// can fail on a regular basis. Additionally, this function is not guaranteed
|
||||
/// to catch all panics, see the "Notes" section below.
|
||||
///
|
||||
/// The closure provided is required to adhere to the [`UnwindSafe`] trait to
|
||||
/// ensure that all captured variables are safe to cross this boundary. The
|
||||
/// purpose of this bound is to encode the concept of [exception safety][rfc] in
|
||||
/// the type system. Most usage of this function should not need to worry about
|
||||
/// this bound as programs are naturally unwind safe without `unsafe` code. If
|
||||
/// it becomes a problem the [`AssertUnwindSafe`] wrapper struct can be used to
|
||||
/// quickly assert that the usage here is indeed unwind safe.
|
||||
///
|
||||
/// [rfc]: https://github.com/rust-lang/rfcs/blob/master/text/1236-stabilize-catch-panic.md
|
||||
///
|
||||
/// # Notes
|
||||
///
|
||||
/// This function **might not catch all Rust panics**. A Rust panic is not
|
||||
/// always implemented via unwinding, but can be implemented by aborting the
|
||||
/// process as well. This function *only* catches unwinding panics, not those
|
||||
/// that abort the process.
|
||||
///
|
||||
/// If a custom panic hook has been set, it will be invoked before the panic is
|
||||
/// caught, before unwinding.
|
||||
///
|
||||
/// Although unwinding into Rust code with a foreign exception (e.g. an
|
||||
/// exception thrown from C++ code, or a `panic!` in Rust code compiled or
|
||||
/// linked with a different runtime) via an appropriate ABI (e.g. `"C-unwind"`)
|
||||
/// is permitted, catching such an exception using this function will have one
|
||||
/// of two behaviors, and it is unspecified which will occur:
|
||||
///
|
||||
/// * The process aborts, after executing all destructors of `f` and the
|
||||
/// functions it called.
|
||||
/// * The function returns a `Result::Err` containing an opaque type.
|
||||
///
|
||||
/// Finally, be **careful in how you drop the result of this function**. If it
|
||||
/// is `Err`, it contains the panic payload, and dropping that may in turn
|
||||
/// panic!
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use std::panic;
|
||||
///
|
||||
/// let result = panic::catch_unwind(|| {
|
||||
/// println!("hello!");
|
||||
/// });
|
||||
/// assert!(result.is_ok());
|
||||
///
|
||||
/// let result = panic::catch_unwind(|| {
|
||||
/// panic!("oh no!");
|
||||
/// });
|
||||
/// assert!(result.is_err());
|
||||
/// ```
|
||||
#[stable(feature = "catch_unwind", since = "1.9.0")]
|
||||
pub fn catch_unwind<F: FnOnce() -> R + UnwindSafe, R>(f: F) -> Result<R> {
|
||||
unsafe { panicking::catch_unwind(f) }
|
||||
}
|
||||
|
||||
/// Triggers a panic without invoking the panic hook.
|
||||
///
|
||||
/// This is designed to be used in conjunction with [`catch_unwind`] to, for
|
||||
/// example, carry a panic across a layer of C code.
|
||||
///
|
||||
/// # Notes
|
||||
///
|
||||
/// Note that panics in Rust are not always implemented via unwinding, but they
|
||||
/// may be implemented by aborting the process. If this function is called when
|
||||
/// panics are implemented this way then this function will abort the process,
|
||||
/// not trigger an unwind.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```should_panic
|
||||
/// use std::panic;
|
||||
///
|
||||
/// let result = panic::catch_unwind(|| {
|
||||
/// if 1 != 2 {
|
||||
/// panic!("oh no!");
|
||||
/// }
|
||||
/// });
|
||||
///
|
||||
/// if let Err(err) = result {
|
||||
/// panic::resume_unwind(err);
|
||||
/// }
|
||||
/// ```
|
||||
#[stable(feature = "resume_unwind", since = "1.9.0")]
|
||||
pub fn resume_unwind(payload: Box<dyn Any + Send>) -> ! {
|
||||
panicking::resume_unwind(payload)
|
||||
}
|
||||
|
||||
/// Makes all future panics abort directly without running the panic hook or unwinding.
|
||||
///
|
||||
/// There is no way to undo this; the effect lasts until the process exits or
|
||||
/// execs (or the equivalent).
|
||||
///
|
||||
/// # Use after fork
|
||||
///
|
||||
/// This function is particularly useful for calling after `libc::fork`. After `fork`, in a
|
||||
/// multithreaded program it is (on many platforms) not safe to call the allocator. It is also
|
||||
/// generally highly undesirable for an unwind to unwind past the `fork`, because that results in
|
||||
/// the unwind propagating to code that was only ever expecting to run in the parent.
|
||||
///
|
||||
/// `panic::always_abort()` helps avoid both of these. It directly avoids any further unwinding,
|
||||
/// and if there is a panic, the abort will occur without allocating provided that the arguments to
|
||||
/// panic can be formatted without allocating.
|
||||
///
|
||||
/// Examples
|
||||
///
|
||||
/// ```no_run
|
||||
/// #![feature(panic_always_abort)]
|
||||
/// use std::panic;
|
||||
///
|
||||
/// panic::always_abort();
|
||||
///
|
||||
/// let _ = panic::catch_unwind(|| {
|
||||
/// panic!("inside the catch");
|
||||
/// });
|
||||
///
|
||||
/// // We will have aborted already, due to the panic.
|
||||
/// unreachable!();
|
||||
/// ```
|
||||
#[unstable(feature = "panic_always_abort", issue = "84438")]
|
||||
pub fn always_abort() {
|
||||
crate::panicking::panic_count::set_always_abort();
|
||||
}
|
||||
|
||||
/// The configuration for whether and how the default panic hook will capture
|
||||
/// and display the backtrace.
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
|
||||
#[unstable(feature = "panic_backtrace_config", issue = "93346")]
|
||||
#[non_exhaustive]
|
||||
pub enum BacktraceStyle {
|
||||
/// Prints a terser backtrace which ideally only contains relevant
|
||||
/// information.
|
||||
Short,
|
||||
/// Prints a backtrace with all possible information.
|
||||
Full,
|
||||
/// Disable collecting and displaying backtraces.
|
||||
Off,
|
||||
}
|
||||
|
||||
impl BacktraceStyle {
|
||||
pub(crate) fn full() -> Option<Self> {
|
||||
if cfg!(feature = "backtrace") { Some(BacktraceStyle::Full) } else { None }
|
||||
}
|
||||
|
||||
fn as_u8(self) -> u8 {
|
||||
match self {
|
||||
BacktraceStyle::Short => 1,
|
||||
BacktraceStyle::Full => 2,
|
||||
BacktraceStyle::Off => 3,
|
||||
}
|
||||
}
|
||||
|
||||
fn from_u8(s: u8) -> Option<Self> {
|
||||
match s {
|
||||
1 => Some(BacktraceStyle::Short),
|
||||
2 => Some(BacktraceStyle::Full),
|
||||
3 => Some(BacktraceStyle::Off),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Tracks whether we should/can capture a backtrace, and how we should display
|
||||
// that backtrace.
|
||||
//
|
||||
// Internally stores equivalent of an Option<BacktraceStyle>.
|
||||
static SHOULD_CAPTURE: Atomic<u8> = AtomicU8::new(0);
|
||||
|
||||
/// Configures whether the default panic hook will capture and display a
|
||||
/// backtrace.
|
||||
///
|
||||
/// The default value for this setting may be set by the `RUST_BACKTRACE`
|
||||
/// environment variable; see the details in [`get_backtrace_style`].
|
||||
#[unstable(feature = "panic_backtrace_config", issue = "93346")]
|
||||
pub fn set_backtrace_style(style: BacktraceStyle) {
|
||||
if cfg!(feature = "backtrace") {
|
||||
// If the `backtrace` feature of this crate is enabled, set the backtrace style.
|
||||
SHOULD_CAPTURE.store(style.as_u8(), Ordering::Relaxed);
|
||||
}
|
||||
}
|
||||
|
||||
/// Checks whether the standard library's panic hook will capture and print a
|
||||
/// backtrace.
|
||||
///
|
||||
/// This function will, if a backtrace style has not been set via
|
||||
/// [`set_backtrace_style`], read the environment variable `RUST_BACKTRACE` to
|
||||
/// determine a default value for the backtrace formatting:
|
||||
///
|
||||
/// The first call to `get_backtrace_style` may read the `RUST_BACKTRACE`
|
||||
/// environment variable if `set_backtrace_style` has not been called to
|
||||
/// override the default value. After a call to `set_backtrace_style` or
|
||||
/// `get_backtrace_style`, any changes to `RUST_BACKTRACE` will have no effect.
|
||||
///
|
||||
/// `RUST_BACKTRACE` is read according to these rules:
|
||||
///
|
||||
/// * `0` for `BacktraceStyle::Off`
|
||||
/// * `full` for `BacktraceStyle::Full`
|
||||
/// * `1` for `BacktraceStyle::Short`
|
||||
/// * Other values are currently `BacktraceStyle::Short`, but this may change in
|
||||
/// the future
|
||||
///
|
||||
/// Returns `None` if backtraces aren't currently supported.
|
||||
#[unstable(feature = "panic_backtrace_config", issue = "93346")]
|
||||
pub fn get_backtrace_style() -> Option<BacktraceStyle> {
|
||||
if !cfg!(feature = "backtrace") {
|
||||
// If the `backtrace` feature of this crate isn't enabled quickly return
|
||||
// `Unsupported` so this can be constant propagated all over the place
|
||||
// to optimize away callers.
|
||||
return None;
|
||||
}
|
||||
|
||||
let current = SHOULD_CAPTURE.load(Ordering::Relaxed);
|
||||
if let Some(style) = BacktraceStyle::from_u8(current) {
|
||||
return Some(style);
|
||||
}
|
||||
|
||||
let format = match crate::env::var_os("RUST_BACKTRACE") {
|
||||
Some(x) if &x == "0" => BacktraceStyle::Off,
|
||||
Some(x) if &x == "full" => BacktraceStyle::Full,
|
||||
Some(_) => BacktraceStyle::Short,
|
||||
None if crate::sys::backtrace::FULL_BACKTRACE_DEFAULT => BacktraceStyle::Full,
|
||||
None => BacktraceStyle::Off,
|
||||
};
|
||||
|
||||
match SHOULD_CAPTURE.compare_exchange(0, format.as_u8(), Ordering::Relaxed, Ordering::Relaxed) {
|
||||
Ok(_) => Some(format),
|
||||
Err(new) => BacktraceStyle::from_u8(new),
|
||||
}
|
||||
}
|
||||
@@ -1,894 +0,0 @@
|
||||
//! Implementation of various bits and pieces of the `panic!` macro and
|
||||
//! associated runtime pieces.
|
||||
//!
|
||||
//! Specifically, this module contains the implementation of:
|
||||
//!
|
||||
//! * Panic hooks
|
||||
//! * Executing a panic up to doing the actual implementation
|
||||
//! * Shims around "try"
|
||||
|
||||
#![deny(unsafe_op_in_unsafe_fn)]
|
||||
|
||||
use core::panic::{Location, PanicPayload};
|
||||
|
||||
// make sure to use the stderr output configured
|
||||
// by libtest in the real copy of std
|
||||
#[cfg(test)]
|
||||
use realstd::io::try_set_output_capture;
|
||||
|
||||
use crate::any::Any;
|
||||
#[cfg(not(test))]
|
||||
use crate::io::try_set_output_capture;
|
||||
use crate::mem::{self, ManuallyDrop};
|
||||
use crate::panic::{BacktraceStyle, PanicHookInfo};
|
||||
use crate::sync::atomic::{Atomic, AtomicBool, Ordering};
|
||||
use crate::sync::nonpoison::RwLock;
|
||||
use crate::sys::backtrace;
|
||||
use crate::sys::stdio::panic_output;
|
||||
use crate::{fmt, intrinsics, process, thread};
|
||||
|
||||
// This forces codegen of the function called by panic!() inside the std crate, rather than in
|
||||
// downstream crates. Primarily this is useful for rustc's codegen tests, which rely on noticing
|
||||
// complete removal of panic from generated IR. Since begin_panic is inline(never), it's only
|
||||
// codegen'd once per crate-graph so this pushes that to std rather than our codegen test crates.
|
||||
//
|
||||
// (See https://github.com/rust-lang/rust/pull/123244 for more info on why).
|
||||
//
|
||||
// If this is causing problems we can also modify those codegen tests to use a crate type like
|
||||
// cdylib which doesn't export "Rust" symbols to downstream linkage units.
|
||||
#[unstable(feature = "libstd_sys_internals", reason = "used by the panic! macro", issue = "none")]
|
||||
#[doc(hidden)]
|
||||
#[allow(dead_code)]
|
||||
#[used(compiler)]
|
||||
pub static EMPTY_PANIC: fn(&'static str) -> ! =
|
||||
begin_panic::<&'static str> as fn(&'static str) -> !;
|
||||
|
||||
// Binary interface to the panic runtime that the standard library depends on.
|
||||
//
|
||||
// The standard library is tagged with `#![needs_panic_runtime]` (introduced in
|
||||
// RFC 1513) to indicate that it requires some other crate tagged with
|
||||
// `#![panic_runtime]` to exist somewhere. Each panic runtime is intended to
|
||||
// implement these symbols (with the same signatures) so we can get matched up
|
||||
// to them.
|
||||
//
|
||||
// One day this may look a little less ad-hoc with the compiler helping out to
|
||||
// hook up these functions, but it is not this day!
|
||||
#[allow(improper_ctypes)]
|
||||
unsafe extern "C" {
|
||||
#[rustc_std_internal_symbol]
|
||||
fn __rust_panic_cleanup(payload: *mut u8) -> *mut (dyn Any + Send + 'static);
|
||||
}
|
||||
|
||||
unsafe extern "Rust" {
|
||||
/// `PanicPayload` lazily performs allocation only when needed (this avoids
|
||||
/// allocations when using the "abort" panic runtime).
|
||||
#[rustc_std_internal_symbol]
|
||||
fn __rust_start_panic(payload: &mut dyn PanicPayload) -> u32;
|
||||
}
|
||||
|
||||
/// This function is called by the panic runtime if FFI code catches a Rust
|
||||
/// panic but doesn't rethrow it. We don't support this case since it messes
|
||||
/// with our panic count.
|
||||
#[cfg(not(test))]
|
||||
#[rustc_std_internal_symbol]
|
||||
extern "C" fn __rust_drop_panic() -> ! {
|
||||
rtabort!("Rust panics must be rethrown");
|
||||
}
|
||||
|
||||
/// This function is called by the panic runtime if it catches an exception
|
||||
/// object which does not correspond to a Rust panic.
|
||||
#[cfg(not(test))]
|
||||
#[rustc_std_internal_symbol]
|
||||
extern "C" fn __rust_foreign_exception() -> ! {
|
||||
rtabort!("Rust cannot catch foreign exceptions");
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
enum Hook {
|
||||
#[default]
|
||||
Default,
|
||||
Custom(Box<dyn Fn(&PanicHookInfo<'_>) + 'static + Sync + Send>),
|
||||
}
|
||||
|
||||
impl Hook {
|
||||
#[inline]
|
||||
fn into_box(self) -> Box<dyn Fn(&PanicHookInfo<'_>) + 'static + Sync + Send> {
|
||||
match self {
|
||||
Hook::Default => Box::new(default_hook),
|
||||
Hook::Custom(hook) => hook,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static HOOK: RwLock<Hook> = RwLock::new(Hook::Default);
|
||||
|
||||
/// Registers a custom panic hook, replacing the previously registered hook.
|
||||
///
|
||||
/// The panic hook is invoked when a thread panics, but before the panic runtime
|
||||
/// is invoked. As such, the hook will run with both the aborting and unwinding
|
||||
/// runtimes.
|
||||
///
|
||||
/// The default hook, which is registered at startup, prints a message to standard error and
|
||||
/// generates a backtrace if requested. This behavior can be customized using the `set_hook` function.
|
||||
/// The current hook can be retrieved while reinstating the default hook with the [`take_hook`]
|
||||
/// function.
|
||||
///
|
||||
/// [`take_hook`]: ./fn.take_hook.html
|
||||
///
|
||||
/// The hook is provided with a `PanicHookInfo` struct which contains information
|
||||
/// about the origin of the panic, including the payload passed to `panic!` and
|
||||
/// the source code location from which the panic originated.
|
||||
///
|
||||
/// The panic hook is a global resource.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// Panics if called from a panicking thread.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// The following will print "Custom panic hook":
|
||||
///
|
||||
/// ```should_panic
|
||||
/// use std::panic;
|
||||
///
|
||||
/// panic::set_hook(Box::new(|_| {
|
||||
/// println!("Custom panic hook");
|
||||
/// }));
|
||||
///
|
||||
/// panic!("Normal panic");
|
||||
/// ```
|
||||
#[stable(feature = "panic_hooks", since = "1.10.0")]
|
||||
pub fn set_hook(hook: Box<dyn Fn(&PanicHookInfo<'_>) + 'static + Sync + Send>) {
|
||||
if thread::panicking() {
|
||||
panic!("cannot modify the panic hook from a panicking thread");
|
||||
}
|
||||
|
||||
// Drop the old hook after changing the hook to avoid deadlocking if its
|
||||
// destructor panics.
|
||||
drop(HOOK.replace(Hook::Custom(hook)));
|
||||
}
|
||||
|
||||
/// Unregisters the current panic hook and returns it, registering the default hook
|
||||
/// in its place.
|
||||
///
|
||||
/// *See also the function [`set_hook`].*
|
||||
///
|
||||
/// [`set_hook`]: ./fn.set_hook.html
|
||||
///
|
||||
/// If the default hook is registered it will be returned, but remain registered.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// Panics if called from a panicking thread.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// The following will print "Normal panic":
|
||||
///
|
||||
/// ```should_panic
|
||||
/// use std::panic;
|
||||
///
|
||||
/// panic::set_hook(Box::new(|_| {
|
||||
/// println!("Custom panic hook");
|
||||
/// }));
|
||||
///
|
||||
/// let _ = panic::take_hook();
|
||||
///
|
||||
/// panic!("Normal panic");
|
||||
/// ```
|
||||
#[must_use]
|
||||
#[stable(feature = "panic_hooks", since = "1.10.0")]
|
||||
pub fn take_hook() -> Box<dyn Fn(&PanicHookInfo<'_>) + 'static + Sync + Send> {
|
||||
if thread::panicking() {
|
||||
panic!("cannot modify the panic hook from a panicking thread");
|
||||
}
|
||||
|
||||
HOOK.replace(Hook::Default).into_box()
|
||||
}
|
||||
|
||||
/// Atomic combination of [`take_hook`] and [`set_hook`]. Use this to replace the panic handler with
|
||||
/// a new panic handler that does something and then executes the old handler.
|
||||
///
|
||||
/// [`take_hook`]: ./fn.take_hook.html
|
||||
/// [`set_hook`]: ./fn.set_hook.html
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// Panics if called from a panicking thread.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// The following will print the custom message, and then the normal output of panic.
|
||||
///
|
||||
/// ```should_panic
|
||||
/// #![feature(panic_update_hook)]
|
||||
/// use std::panic;
|
||||
///
|
||||
/// // Equivalent to
|
||||
/// // let prev = panic::take_hook();
|
||||
/// // panic::set_hook(Box::new(move |info| {
|
||||
/// // println!("...");
|
||||
/// // prev(info);
|
||||
/// // }));
|
||||
/// panic::update_hook(move |prev, info| {
|
||||
/// println!("Print custom message and execute panic handler as usual");
|
||||
/// prev(info);
|
||||
/// });
|
||||
///
|
||||
/// panic!("Custom and then normal");
|
||||
/// ```
|
||||
#[unstable(feature = "panic_update_hook", issue = "92649")]
|
||||
pub fn update_hook<F>(hook_fn: F)
|
||||
where
|
||||
F: Fn(&(dyn Fn(&PanicHookInfo<'_>) + Send + Sync + 'static), &PanicHookInfo<'_>)
|
||||
+ Sync
|
||||
+ Send
|
||||
+ 'static,
|
||||
{
|
||||
if thread::panicking() {
|
||||
panic!("cannot modify the panic hook from a panicking thread");
|
||||
}
|
||||
|
||||
let mut hook = HOOK.write();
|
||||
let prev = mem::take(&mut *hook).into_box();
|
||||
*hook = Hook::Custom(Box::new(move |info| hook_fn(&prev, info)));
|
||||
}
|
||||
|
||||
/// The default panic handler.
|
||||
#[optimize(size)]
|
||||
fn default_hook(info: &PanicHookInfo<'_>) {
|
||||
// If this is a double panic, make sure that we print a backtrace
|
||||
// for this panic. Otherwise only print it if logging is enabled.
|
||||
let backtrace = if info.force_no_backtrace() {
|
||||
None
|
||||
} else if panic_count::get_count() >= 2 {
|
||||
BacktraceStyle::full()
|
||||
} else {
|
||||
crate::panic::get_backtrace_style()
|
||||
};
|
||||
|
||||
// The current implementation always returns `Some`.
|
||||
let location = info.location().unwrap();
|
||||
|
||||
let msg = payload_as_str(info.payload());
|
||||
|
||||
let write = #[optimize(size)]
|
||||
|err: &mut dyn crate::io::Write| {
|
||||
// Use a lock to prevent mixed output in multithreading context.
|
||||
// Some platforms also require it when printing a backtrace, like `SymFromAddr` on Windows.
|
||||
let mut lock = backtrace::lock();
|
||||
|
||||
thread::with_current_name(|name| {
|
||||
let name = name.unwrap_or("<unnamed>");
|
||||
let tid = thread::current_os_id();
|
||||
|
||||
// Try to write the panic message to a buffer first to prevent other concurrent outputs
|
||||
// interleaving with it.
|
||||
let mut buffer = [0u8; 512];
|
||||
let mut cursor = crate::io::Cursor::new(&mut buffer[..]);
|
||||
|
||||
let write_msg = |dst: &mut dyn crate::io::Write| {
|
||||
// We add a newline to ensure the panic message appears at the start of a line.
|
||||
writeln!(dst, "\nthread '{name}' ({tid}) panicked at {location}:\n{msg}")
|
||||
};
|
||||
|
||||
if write_msg(&mut cursor).is_ok() {
|
||||
let pos = cursor.position() as usize;
|
||||
let _ = err.write_all(&buffer[0..pos]);
|
||||
} else {
|
||||
// The message did not fit into the buffer, write it directly instead.
|
||||
let _ = write_msg(err);
|
||||
};
|
||||
});
|
||||
|
||||
static FIRST_PANIC: Atomic<bool> = AtomicBool::new(true);
|
||||
|
||||
match backtrace {
|
||||
Some(BacktraceStyle::Short) => {
|
||||
todo!()
|
||||
}
|
||||
Some(BacktraceStyle::Full) => {
|
||||
todo!()
|
||||
}
|
||||
Some(BacktraceStyle::Off) => {
|
||||
if FIRST_PANIC.swap(false, Ordering::Relaxed) {
|
||||
let _ = writeln!(
|
||||
err,
|
||||
"note: run with `RUST_BACKTRACE=1` environment variable to display a \
|
||||
backtrace"
|
||||
);
|
||||
if cfg!(miri) {
|
||||
let _ = writeln!(
|
||||
err,
|
||||
"note: in Miri, you may have to set `MIRIFLAGS=-Zmiri-env-forward=RUST_BACKTRACE` \
|
||||
for the environment variable to have an effect"
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
// If backtraces aren't supported or are forced-off, do nothing.
|
||||
None => {}
|
||||
}
|
||||
};
|
||||
|
||||
if let Ok(Some(local)) = try_set_output_capture(None) {
|
||||
write(&mut *local.lock().unwrap_or_else(|e| e.into_inner()));
|
||||
try_set_output_capture(Some(local)).ok();
|
||||
} else if let Some(mut out) = panic_output() {
|
||||
write(&mut out);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(not(test))]
|
||||
#[doc(hidden)]
|
||||
#[cfg(panic = "immediate-abort")]
|
||||
#[unstable(feature = "update_panic_count", issue = "none")]
|
||||
pub mod panic_count {
|
||||
/// A reason for forcing an immediate abort on panic.
|
||||
#[derive(Debug)]
|
||||
pub enum MustAbort {
|
||||
AlwaysAbort,
|
||||
PanicInHook,
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn increase(run_panic_hook: bool) -> Option<MustAbort> {
|
||||
None
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn finished_panic_hook() {}
|
||||
|
||||
#[inline]
|
||||
pub fn decrease() {}
|
||||
|
||||
#[inline]
|
||||
pub fn set_always_abort() {}
|
||||
|
||||
// Disregards ALWAYS_ABORT_FLAG
|
||||
#[inline]
|
||||
#[must_use]
|
||||
pub fn get_count() -> usize {
|
||||
0
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
#[inline]
|
||||
pub fn count_is_zero() -> bool {
|
||||
true
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(not(test))]
|
||||
#[doc(hidden)]
|
||||
#[cfg(not(panic = "immediate-abort"))]
|
||||
#[unstable(feature = "update_panic_count", issue = "none")]
|
||||
pub mod panic_count {
|
||||
use crate::cell::Cell;
|
||||
use crate::sync::atomic::{Atomic, AtomicUsize, Ordering};
|
||||
|
||||
const ALWAYS_ABORT_FLAG: usize = 1 << (usize::BITS - 1);
|
||||
|
||||
/// A reason for forcing an immediate abort on panic.
|
||||
#[derive(Debug)]
|
||||
pub enum MustAbort {
|
||||
AlwaysAbort,
|
||||
PanicInHook,
|
||||
}
|
||||
|
||||
// Panic count for the current thread and whether a panic hook is currently
|
||||
// being executed..
|
||||
thread_local! {
|
||||
static LOCAL_PANIC_COUNT: Cell<(usize, bool)> = const { Cell::new((0, false)) }
|
||||
}
|
||||
|
||||
// Sum of panic counts from all threads. The purpose of this is to have
|
||||
// a fast path in `count_is_zero` (which is used by `panicking`). In any particular
|
||||
// thread, if that thread currently views `GLOBAL_PANIC_COUNT` as being zero,
|
||||
// then `LOCAL_PANIC_COUNT` in that thread is zero. This invariant holds before
|
||||
// and after increase and decrease, but not necessarily during their execution.
|
||||
//
|
||||
// Additionally, the top bit of GLOBAL_PANIC_COUNT (GLOBAL_ALWAYS_ABORT_FLAG)
|
||||
// records whether panic::always_abort() has been called. This can only be
|
||||
// set, never cleared.
|
||||
// panic::always_abort() is usually called to prevent memory allocations done by
|
||||
// the panic handling in the child created by `libc::fork`.
|
||||
// Memory allocations performed in a child created with `libc::fork` are undefined
|
||||
// behavior in most operating systems.
|
||||
// Accessing LOCAL_PANIC_COUNT in a child created by `libc::fork` would lead to a memory
|
||||
// allocation. Only GLOBAL_PANIC_COUNT can be accessed in this situation. This is
|
||||
// sufficient because a child process will always have exactly one thread only.
|
||||
// See also #85261 for details.
|
||||
//
|
||||
// This could be viewed as a struct containing a single bit and an n-1-bit
|
||||
// value, but if we wrote it like that it would be more than a single word,
|
||||
// and even a newtype around usize would be clumsy because we need atomics.
|
||||
// But we use such a tuple for the return type of increase().
|
||||
//
|
||||
// Stealing a bit is fine because it just amounts to assuming that each
|
||||
// panicking thread consumes at least 2 bytes of address space.
|
||||
static GLOBAL_PANIC_COUNT: Atomic<usize> = AtomicUsize::new(0);
|
||||
|
||||
// Increases the global and local panic count, and returns whether an
|
||||
// immediate abort is required.
|
||||
//
|
||||
// This also updates thread-local state to keep track of whether a panic
|
||||
// hook is currently executing.
|
||||
pub fn increase(run_panic_hook: bool) -> Option<MustAbort> {
|
||||
let global_count = GLOBAL_PANIC_COUNT.fetch_add(1, Ordering::Relaxed);
|
||||
if global_count & ALWAYS_ABORT_FLAG != 0 {
|
||||
// Do *not* access thread-local state, we might be after a `fork`.
|
||||
return Some(MustAbort::AlwaysAbort);
|
||||
}
|
||||
|
||||
LOCAL_PANIC_COUNT.with(|c| {
|
||||
let (count, in_panic_hook) = c.get();
|
||||
if in_panic_hook {
|
||||
return Some(MustAbort::PanicInHook);
|
||||
}
|
||||
c.set((count + 1, run_panic_hook));
|
||||
None
|
||||
})
|
||||
}
|
||||
|
||||
pub fn finished_panic_hook() {
|
||||
LOCAL_PANIC_COUNT.with(|c| {
|
||||
let (count, _) = c.get();
|
||||
c.set((count, false));
|
||||
});
|
||||
}
|
||||
|
||||
pub fn decrease() {
|
||||
GLOBAL_PANIC_COUNT.fetch_sub(1, Ordering::Relaxed);
|
||||
LOCAL_PANIC_COUNT.with(|c| {
|
||||
let (count, _) = c.get();
|
||||
c.set((count - 1, false));
|
||||
});
|
||||
}
|
||||
|
||||
pub fn set_always_abort() {
|
||||
GLOBAL_PANIC_COUNT.fetch_or(ALWAYS_ABORT_FLAG, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
// Disregards ALWAYS_ABORT_FLAG
|
||||
#[must_use]
|
||||
pub fn get_count() -> usize {
|
||||
LOCAL_PANIC_COUNT.with(|c| c.get().0)
|
||||
}
|
||||
|
||||
// Disregards ALWAYS_ABORT_FLAG
|
||||
#[must_use]
|
||||
#[inline]
|
||||
pub fn count_is_zero() -> bool {
|
||||
if GLOBAL_PANIC_COUNT.load(Ordering::Relaxed) & !ALWAYS_ABORT_FLAG == 0 {
|
||||
// Fast path: if `GLOBAL_PANIC_COUNT` is zero, all threads
|
||||
// (including the current one) will have `LOCAL_PANIC_COUNT`
|
||||
// equal to zero, so TLS access can be avoided.
|
||||
//
|
||||
// In terms of performance, a relaxed atomic load is similar to a normal
|
||||
// aligned memory read (e.g., a mov instruction in x86), but with some
|
||||
// compiler optimization restrictions. On the other hand, a TLS access
|
||||
// might require calling a non-inlinable function (such as `__tls_get_addr`
|
||||
// when using the GD TLS model).
|
||||
true
|
||||
} else {
|
||||
is_zero_slow_path()
|
||||
}
|
||||
}
|
||||
|
||||
// Slow path is in a separate function to reduce the amount of code
|
||||
// inlined from `count_is_zero`.
|
||||
#[inline(never)]
|
||||
#[cold]
|
||||
fn is_zero_slow_path() -> bool {
|
||||
LOCAL_PANIC_COUNT.with(|c| c.get().0 == 0)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub use realstd::rt::panic_count;
|
||||
|
||||
/// Invoke a closure, capturing the cause of an unwinding panic if one occurs.
|
||||
#[cfg(panic = "immediate-abort")]
|
||||
pub unsafe fn catch_unwind<R, F: FnOnce() -> R>(f: F) -> Result<R, Box<dyn Any + Send>> {
|
||||
Ok(f())
|
||||
}
|
||||
|
||||
/// Invoke a closure, capturing the cause of an unwinding panic if one occurs.
|
||||
#[cfg(not(panic = "immediate-abort"))]
|
||||
pub unsafe fn catch_unwind<R, F: FnOnce() -> R>(f: F) -> Result<R, Box<dyn Any + Send>> {
|
||||
union Data<F, R> {
|
||||
f: ManuallyDrop<F>,
|
||||
r: ManuallyDrop<R>,
|
||||
p: ManuallyDrop<Box<dyn Any + Send>>,
|
||||
}
|
||||
|
||||
// We do some sketchy operations with ownership here for the sake of
|
||||
// performance. We can only pass pointers down to `do_call` (can't pass
|
||||
// objects by value), so we do all the ownership tracking here manually
|
||||
// using a union.
|
||||
//
|
||||
// We go through a transition where:
|
||||
//
|
||||
// * First, we set the data field `f` to be the argumentless closure that we're going to call.
|
||||
// * When we make the function call, the `do_call` function below, we take
|
||||
// ownership of the function pointer. At this point the `data` union is
|
||||
// entirely uninitialized.
|
||||
// * If the closure successfully returns, we write the return value into the
|
||||
// data's return slot (field `r`).
|
||||
// * If the closure panics (`do_catch` below), we write the panic payload into field `p`.
|
||||
// * Finally, when we come back out of the `try` intrinsic we're
|
||||
// in one of two states:
|
||||
//
|
||||
// 1. The closure didn't panic, in which case the return value was
|
||||
// filled in. We move it out of `data.r` and return it.
|
||||
// 2. The closure panicked, in which case the panic payload was
|
||||
// filled in. We move it out of `data.p` and return it.
|
||||
//
|
||||
// Once we stack all that together we should have the "most efficient'
|
||||
// method of calling a catch panic whilst juggling ownership.
|
||||
let mut data = Data { f: ManuallyDrop::new(f) };
|
||||
|
||||
let data_ptr = (&raw mut data) as *mut u8;
|
||||
// SAFETY:
|
||||
//
|
||||
// Access to the union's fields: this is `std` and we know that the `catch_unwind`
|
||||
// intrinsic fills in the `r` or `p` union field based on its return value.
|
||||
//
|
||||
// The call to `intrinsics::catch_unwind` is made safe by:
|
||||
// - `do_call`, the first argument, can be called with the initial `data_ptr`.
|
||||
// - `do_catch`, the second argument, can be called with the `data_ptr` as well.
|
||||
// See their safety preconditions for more information
|
||||
unsafe {
|
||||
return if intrinsics::catch_unwind(do_call::<F, R>, data_ptr, do_catch::<F, R>) == 0 {
|
||||
Ok(ManuallyDrop::into_inner(data.r))
|
||||
} else {
|
||||
Err(ManuallyDrop::into_inner(data.p))
|
||||
};
|
||||
}
|
||||
|
||||
// We consider unwinding to be rare, so mark this function as cold. However,
|
||||
// do not mark it no-inline -- that decision is best to leave to the
|
||||
// optimizer (in most cases this function is not inlined even as a normal,
|
||||
// non-cold function, though, as of the writing of this comment).
|
||||
#[cold]
|
||||
#[optimize(size)]
|
||||
unsafe fn cleanup(payload: *mut u8) -> Box<dyn Any + Send + 'static> {
|
||||
// SAFETY: The whole unsafe block hinges on a correct implementation of
|
||||
// the panic handler `__rust_panic_cleanup`. As such we can only
|
||||
// assume it returns the correct thing for `Box::from_raw` to work
|
||||
// without undefined behavior.
|
||||
let obj = unsafe { Box::from_raw(__rust_panic_cleanup(payload)) };
|
||||
panic_count::decrease();
|
||||
obj
|
||||
}
|
||||
|
||||
// SAFETY:
|
||||
// data must be non-NUL, correctly aligned, and a pointer to a `Data<F, R>`
|
||||
// Its must contains a valid `f` (type: F) value that can be use to fill
|
||||
// `data.r`.
|
||||
//
|
||||
// This function cannot be marked as `unsafe` because `intrinsics::catch_unwind`
|
||||
// expects normal function pointers.
|
||||
#[inline]
|
||||
fn do_call<F: FnOnce() -> R, R>(data: *mut u8) {
|
||||
// SAFETY: this is the responsibility of the caller, see above.
|
||||
unsafe {
|
||||
let data = data as *mut Data<F, R>;
|
||||
let data = &mut (*data);
|
||||
let f = ManuallyDrop::take(&mut data.f);
|
||||
data.r = ManuallyDrop::new(f());
|
||||
}
|
||||
}
|
||||
|
||||
// We *do* want this part of the catch to be inlined: this allows the
|
||||
// compiler to properly track accesses to the Data union and optimize it
|
||||
// away most of the time.
|
||||
//
|
||||
// SAFETY:
|
||||
// data must be non-NUL, correctly aligned, and a pointer to a `Data<F, R>`
|
||||
// Since this uses `cleanup` it also hinges on a correct implementation of
|
||||
// `__rustc_panic_cleanup`.
|
||||
//
|
||||
// This function cannot be marked as `unsafe` because `intrinsics::catch_unwind`
|
||||
// expects normal function pointers.
|
||||
#[inline]
|
||||
#[rustc_nounwind] // `intrinsic::catch_unwind` requires catch fn to be nounwind
|
||||
fn do_catch<F: FnOnce() -> R, R>(data: *mut u8, payload: *mut u8) {
|
||||
// SAFETY: this is the responsibility of the caller, see above.
|
||||
//
|
||||
// When `__rustc_panic_cleaner` is correctly implemented we can rely
|
||||
// on `obj` being the correct thing to pass to `data.p` (after wrapping
|
||||
// in `ManuallyDrop`).
|
||||
unsafe {
|
||||
let data = data as *mut Data<F, R>;
|
||||
let data = &mut (*data);
|
||||
let obj = cleanup(payload);
|
||||
data.p = ManuallyDrop::new(obj);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Determines whether the current thread is unwinding because of panic.
|
||||
#[inline]
|
||||
pub fn panicking() -> bool {
|
||||
!panic_count::count_is_zero()
|
||||
}
|
||||
|
||||
/// Entry point of panics from the core crate (`panic_impl` lang item).
|
||||
#[cfg(not(any(test, doctest)))]
|
||||
#[panic_handler]
|
||||
pub fn panic_handler(info: &core::panic::PanicInfo<'_>) -> ! {
|
||||
struct FormatStringPayload<'a> {
|
||||
inner: &'a core::panic::PanicMessage<'a>,
|
||||
string: Option<String>,
|
||||
}
|
||||
|
||||
impl FormatStringPayload<'_> {
|
||||
fn fill(&mut self) -> &mut String {
|
||||
let inner = self.inner;
|
||||
// Lazily, the first time this gets called, run the actual string formatting.
|
||||
self.string.get_or_insert_with(|| {
|
||||
let mut s = String::new();
|
||||
let mut fmt = fmt::Formatter::new(&mut s, fmt::FormattingOptions::new());
|
||||
let _err = fmt::Display::fmt(&inner, &mut fmt);
|
||||
s
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl PanicPayload for FormatStringPayload<'_> {
|
||||
fn take_box(&mut self) -> *mut (dyn Any + Send) {
|
||||
// We do two allocations here, unfortunately. But (a) they're required with the current
|
||||
// scheme, and (b) we don't handle panic + OOM properly anyway (see comment in
|
||||
// begin_panic below).
|
||||
let contents = mem::take(self.fill());
|
||||
Box::into_raw(Box::new(contents))
|
||||
}
|
||||
|
||||
fn get(&mut self) -> &(dyn Any + Send) {
|
||||
self.fill()
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for FormatStringPayload<'_> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
if let Some(s) = &self.string {
|
||||
f.write_str(s)
|
||||
} else {
|
||||
fmt::Display::fmt(&self.inner, f)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
struct StaticStrPayload(&'static str);
|
||||
|
||||
unsafe impl PanicPayload for StaticStrPayload {
|
||||
fn take_box(&mut self) -> *mut (dyn Any + Send) {
|
||||
Box::into_raw(Box::new(self.0))
|
||||
}
|
||||
|
||||
fn get(&mut self) -> &(dyn Any + Send) {
|
||||
&self.0
|
||||
}
|
||||
|
||||
fn as_str(&mut self) -> Option<&str> {
|
||||
Some(self.0)
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for StaticStrPayload {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.write_str(self.0)
|
||||
}
|
||||
}
|
||||
|
||||
let loc = info.location().unwrap(); // The current implementation always returns Some
|
||||
let msg = info.message();
|
||||
crate::sys::backtrace::__rust_end_short_backtrace(move || {
|
||||
if let Some(s) = msg.as_str() {
|
||||
panic_with_hook(
|
||||
&mut StaticStrPayload(s),
|
||||
loc,
|
||||
info.can_unwind(),
|
||||
info.force_no_backtrace(),
|
||||
);
|
||||
} else {
|
||||
panic_with_hook(
|
||||
&mut FormatStringPayload { inner: &msg, string: None },
|
||||
loc,
|
||||
info.can_unwind(),
|
||||
info.force_no_backtrace(),
|
||||
);
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
/// This is the entry point of panicking for the non-format-string variants of
|
||||
/// panic!() and assert!(). In particular, this is the only entry point that supports
|
||||
/// arbitrary payloads, not just format strings.
|
||||
#[unstable(feature = "libstd_sys_internals", reason = "used by the panic! macro", issue = "none")]
|
||||
#[cfg_attr(not(any(test, doctest)), lang = "begin_panic")]
|
||||
// lang item for CTFE panic support
|
||||
// never inline unless panic=immediate-abort to avoid code
|
||||
// bloat at the call sites as much as possible
|
||||
#[cfg_attr(not(panic = "immediate-abort"), inline(never), cold, optimize(size))]
|
||||
#[cfg_attr(panic = "immediate-abort", inline)]
|
||||
#[track_caller]
|
||||
#[rustc_do_not_const_check] // hooked by const-eval
|
||||
pub const fn begin_panic<M: Any + Send>(msg: M) -> ! {
|
||||
if cfg!(panic = "immediate-abort") {
|
||||
intrinsics::abort()
|
||||
}
|
||||
|
||||
struct Payload<A> {
|
||||
inner: Option<A>,
|
||||
}
|
||||
|
||||
unsafe impl<A: Send + 'static> PanicPayload for Payload<A> {
|
||||
fn take_box(&mut self) -> *mut (dyn Any + Send) {
|
||||
// Note that this should be the only allocation performed in this code path. Currently
|
||||
// this means that panic!() on OOM will invoke this code path, but then again we're not
|
||||
// really ready for panic on OOM anyway. If we do start doing this, then we should
|
||||
// propagate this allocation to be performed in the parent of this thread instead of the
|
||||
// thread that's panicking.
|
||||
let data = match self.inner.take() {
|
||||
Some(a) => Box::new(a) as Box<dyn Any + Send>,
|
||||
None => process::abort(),
|
||||
};
|
||||
Box::into_raw(data)
|
||||
}
|
||||
|
||||
fn get(&mut self) -> &(dyn Any + Send) {
|
||||
match self.inner {
|
||||
Some(ref a) => a,
|
||||
None => process::abort(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<A: 'static> fmt::Display for Payload<A> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match &self.inner {
|
||||
Some(a) => f.write_str(payload_as_str(a)),
|
||||
None => process::abort(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let loc = Location::caller();
|
||||
crate::sys::backtrace::__rust_end_short_backtrace(move || {
|
||||
panic_with_hook(
|
||||
&mut Payload { inner: Some(msg) },
|
||||
loc,
|
||||
/* can_unwind */ true,
|
||||
/* force_no_backtrace */ false,
|
||||
)
|
||||
})
|
||||
}
|
||||
|
||||
fn payload_as_str(payload: &dyn Any) -> &str {
|
||||
if let Some(&s) = payload.downcast_ref::<&'static str>() {
|
||||
s
|
||||
} else if let Some(s) = payload.downcast_ref::<String>() {
|
||||
s.as_str()
|
||||
} else {
|
||||
"Box<dyn Any>"
|
||||
}
|
||||
}
|
||||
|
||||
/// Central point for dispatching panics.
|
||||
///
|
||||
/// Executes the primary logic for a panic, including checking for recursive
|
||||
/// panics, panic hooks, and finally dispatching to the panic runtime to either
|
||||
/// abort or unwind.
|
||||
#[optimize(size)]
|
||||
fn panic_with_hook(
|
||||
payload: &mut dyn PanicPayload,
|
||||
location: &Location<'_>,
|
||||
can_unwind: bool,
|
||||
force_no_backtrace: bool,
|
||||
) -> ! {
|
||||
let must_abort = panic_count::increase(true);
|
||||
|
||||
// Check if we need to abort immediately.
|
||||
if let Some(must_abort) = must_abort {
|
||||
match must_abort {
|
||||
panic_count::MustAbort::PanicInHook => {
|
||||
// Don't try to format the message in this case, perhaps that is causing the
|
||||
// recursive panics. However if the message is just a string, no user-defined
|
||||
// code is involved in printing it, so that is risk-free.
|
||||
let message: &str = payload.as_str().unwrap_or_default();
|
||||
rtprintpanic!(
|
||||
"panicked at {location}:\n{message}\nthread panicked while processing panic. aborting.\n"
|
||||
);
|
||||
}
|
||||
panic_count::MustAbort::AlwaysAbort => {
|
||||
// Unfortunately, this does not print a backtrace, because creating
|
||||
// a `Backtrace` will allocate, which we must avoid here.
|
||||
rtprintpanic!("aborting due to panic at {location}:\n{payload}\n");
|
||||
}
|
||||
}
|
||||
crate::process::abort();
|
||||
}
|
||||
|
||||
match *HOOK.read() {
|
||||
// Some platforms (like wasm) know that printing to stderr won't ever actually
|
||||
// print anything, and if that's the case we can skip the default
|
||||
// hook. Since string formatting happens lazily when calling `payload`
|
||||
// methods, this means we avoid formatting the string at all!
|
||||
// (The panic runtime might still call `payload.take_box()` though and trigger
|
||||
// formatting.)
|
||||
Hook::Default if panic_output().is_none() => {}
|
||||
Hook::Default => {
|
||||
default_hook(&PanicHookInfo::new(
|
||||
location,
|
||||
payload.get(),
|
||||
can_unwind,
|
||||
force_no_backtrace,
|
||||
));
|
||||
}
|
||||
Hook::Custom(ref hook) => {
|
||||
hook(&PanicHookInfo::new(location, payload.get(), can_unwind, force_no_backtrace));
|
||||
}
|
||||
}
|
||||
|
||||
// Indicate that we have finished executing the panic hook. After this point
|
||||
// it is fine if there is a panic while executing destructors, as long as it
|
||||
// it contained within a `catch_unwind`.
|
||||
panic_count::finished_panic_hook();
|
||||
|
||||
if !can_unwind {
|
||||
// If a thread panics while running destructors or tries to unwind
|
||||
// through a nounwind function (e.g. extern "C") then we cannot continue
|
||||
// unwinding and have to abort immediately.
|
||||
rtprintpanic!("thread caused non-unwinding panic. aborting.\n");
|
||||
crate::process::abort();
|
||||
}
|
||||
|
||||
rust_panic(payload)
|
||||
}
|
||||
|
||||
/// This is the entry point for `resume_unwind`.
|
||||
/// It just forwards the payload to the panic runtime.
|
||||
#[cfg_attr(panic = "immediate-abort", inline)]
|
||||
pub fn resume_unwind(payload: Box<dyn Any + Send>) -> ! {
|
||||
panic_count::increase(false);
|
||||
|
||||
struct RewrapBox(Box<dyn Any + Send>);
|
||||
|
||||
unsafe impl PanicPayload for RewrapBox {
|
||||
fn take_box(&mut self) -> *mut (dyn Any + Send) {
|
||||
Box::into_raw(mem::replace(&mut self.0, Box::new(())))
|
||||
}
|
||||
|
||||
fn get(&mut self) -> &(dyn Any + Send) {
|
||||
&*self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for RewrapBox {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.write_str(payload_as_str(&self.0))
|
||||
}
|
||||
}
|
||||
|
||||
rust_panic(&mut RewrapBox(payload))
|
||||
}
|
||||
|
||||
/// A function with a fixed suffix (through `rustc_std_internal_symbol`)
|
||||
/// on which to slap yer breakpoints.
|
||||
#[inline(never)]
|
||||
#[cfg_attr(not(test), rustc_std_internal_symbol)]
|
||||
#[cfg(not(panic = "immediate-abort"))]
|
||||
fn rust_panic(msg: &mut dyn PanicPayload) -> ! {
|
||||
let code = unsafe { __rust_start_panic(msg) };
|
||||
rtabort!("failed to initiate panic, error {code}")
|
||||
}
|
||||
|
||||
#[cfg_attr(not(test), rustc_std_internal_symbol)]
|
||||
#[cfg(panic = "immediate-abort")]
|
||||
fn rust_panic(_: &mut dyn PanicPayload) -> ! {
|
||||
crate::intrinsics::abort();
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,181 +0,0 @@
|
||||
pub mod rust_2024 {
|
||||
pub use crate::print;
|
||||
pub use crate::println;
|
||||
pub use alloc::format;
|
||||
pub use alloc::vec;
|
||||
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
#[doc(no_inline)]
|
||||
pub use crate::borrow::ToOwned;
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
#[doc(no_inline)]
|
||||
pub use crate::boxed::Box;
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
#[doc(no_inline)]
|
||||
pub use crate::string::{String, ToString};
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
#[doc(no_inline)]
|
||||
pub use crate::vec::Vec;
|
||||
|
||||
// Re-exported built-in macros and traits
|
||||
#[stable(feature = "builtin_macro_prelude", since = "1.38.0")]
|
||||
#[doc(no_inline)]
|
||||
#[expect(deprecated)]
|
||||
pub use core::prelude::v1::{
|
||||
Clone, Copy, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd, assert, assert_eq,
|
||||
assert_ne, cfg, column, compile_error, concat, debug_assert, debug_assert_eq,
|
||||
debug_assert_ne, env, file, format_args, include, include_bytes, include_str, line,
|
||||
matches, module_path, option_env, stringify, todo, r#try, unimplemented, unreachable,
|
||||
write, writeln,
|
||||
};
|
||||
|
||||
#[stable(feature = "builtin_macro_prelude", since = "1.38.0")]
|
||||
#[doc(no_inline)]
|
||||
pub use crate::thread_local;
|
||||
|
||||
#[stable(feature = "cfg_select", since = "1.95.0")]
|
||||
#[doc(no_inline)]
|
||||
pub use core::prelude::v1::cfg_select;
|
||||
|
||||
#[unstable(
|
||||
feature = "concat_bytes",
|
||||
issue = "87555",
|
||||
reason = "`concat_bytes` is not stable enough for use and is subject to change"
|
||||
)]
|
||||
#[doc(no_inline)]
|
||||
pub use core::prelude::v1::concat_bytes;
|
||||
|
||||
#[unstable(feature = "const_format_args", issue = "none")]
|
||||
#[doc(no_inline)]
|
||||
pub use core::prelude::v1::const_format_args;
|
||||
|
||||
#[unstable(
|
||||
feature = "log_syntax",
|
||||
issue = "29598",
|
||||
reason = "`log_syntax!` is not stable enough for use and is subject to change"
|
||||
)]
|
||||
#[doc(no_inline)]
|
||||
pub use core::prelude::v1::log_syntax;
|
||||
|
||||
#[unstable(
|
||||
feature = "trace_macros",
|
||||
issue = "29598",
|
||||
reason = "`trace_macros` is not stable enough for use and is subject to change"
|
||||
)]
|
||||
#[doc(no_inline)]
|
||||
pub use core::prelude::v1::trace_macros;
|
||||
|
||||
// Do not `doc(no_inline)` so that they become doc items on their own
|
||||
// (no public module for them to be re-exported from).
|
||||
#[stable(feature = "builtin_macro_prelude", since = "1.38.0")]
|
||||
pub use core::prelude::v1::{
|
||||
alloc_error_handler, bench, derive, global_allocator, test, test_case,
|
||||
};
|
||||
|
||||
#[unstable(feature = "derive_const", issue = "118304")]
|
||||
pub use core::prelude::v1::derive_const;
|
||||
|
||||
// Do not `doc(no_inline)` either.
|
||||
#[unstable(
|
||||
feature = "cfg_accessible",
|
||||
issue = "64797",
|
||||
reason = "`cfg_accessible` is not fully implemented"
|
||||
)]
|
||||
pub use core::prelude::v1::cfg_accessible;
|
||||
|
||||
// Do not `doc(no_inline)` either.
|
||||
#[unstable(
|
||||
feature = "cfg_eval",
|
||||
issue = "82679",
|
||||
reason = "`cfg_eval` is a recently implemented feature"
|
||||
)]
|
||||
pub use core::prelude::v1::cfg_eval;
|
||||
|
||||
// Do not `doc(no_inline)` either.
|
||||
#[unstable(
|
||||
feature = "type_ascription",
|
||||
issue = "23416",
|
||||
reason = "placeholder syntax for type ascription"
|
||||
)]
|
||||
pub use core::prelude::v1::type_ascribe;
|
||||
|
||||
// Do not `doc(no_inline)` either.
|
||||
#[unstable(
|
||||
feature = "deref_patterns",
|
||||
issue = "87121",
|
||||
reason = "placeholder syntax for deref patterns"
|
||||
)]
|
||||
pub use core::prelude::v1::deref;
|
||||
|
||||
// Do not `doc(no_inline)` either.
|
||||
#[unstable(
|
||||
feature = "type_alias_impl_trait",
|
||||
issue = "63063",
|
||||
reason = "`type_alias_impl_trait` has open design concerns"
|
||||
)]
|
||||
pub use core::prelude::v1::define_opaque;
|
||||
|
||||
#[unstable(feature = "extern_item_impls", issue = "125418")]
|
||||
pub use core::prelude::v1::{eii, unsafe_eii};
|
||||
|
||||
#[unstable(feature = "eii_internals", issue = "none")]
|
||||
pub use core::prelude::v1::eii_declaration;
|
||||
|
||||
#[stable(feature = "prelude_2021", since = "1.55.0")]
|
||||
#[doc(no_inline)]
|
||||
pub use core::prelude::rust_2021::*;
|
||||
|
||||
#[stable(feature = "prelude_2024", since = "1.85.0")]
|
||||
#[doc(no_inline)]
|
||||
pub use core::prelude::rust_2024::*;
|
||||
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
#[doc(no_inline)]
|
||||
pub use crate::convert::{AsMut, AsRef, From, Into};
|
||||
|
||||
extern crate alloc;
|
||||
|
||||
struct GlobalAllocator;
|
||||
|
||||
#[core::prelude::v1::global_allocator]
|
||||
static GLOBAL_ALLOCATOR: GlobalAllocator = GlobalAllocator;
|
||||
|
||||
unsafe impl core::alloc::GlobalAlloc for GlobalAllocator {
|
||||
unsafe fn alloc(&self, layout: core::alloc::Layout) -> *mut u8 {
|
||||
crate::syscall::alloc(layout)
|
||||
}
|
||||
|
||||
unsafe fn dealloc(&self, ptr: *mut u8, layout: core::alloc::Layout) {
|
||||
crate::syscall::dealloc(ptr, layout)
|
||||
}
|
||||
}
|
||||
|
||||
// #[panic_handler]
|
||||
// fn panic(_panic_info: &core::panic::PanicInfo) -> ! {
|
||||
// // TODO print
|
||||
// loop {}
|
||||
// }
|
||||
|
||||
/// # Safety
|
||||
/// `argc` and `argv` are passed by the kernel
|
||||
#[unsafe(no_mangle)]
|
||||
pub unsafe extern "C" fn _start(argc: isize, argv: *const *const u8) -> isize {
|
||||
unsafe extern "Rust" {
|
||||
fn main(argc: isize, argv: *const *const u8) -> isize;
|
||||
}
|
||||
|
||||
unsafe { main(argc, argv) }
|
||||
}
|
||||
|
||||
#[lang = "start"]
|
||||
pub fn lang_start<T: crate::process::Termination + 'static>(
|
||||
main: fn() -> T,
|
||||
argc: isize,
|
||||
argv: *const *const u8,
|
||||
_sigpipe: u8,
|
||||
) -> isize {
|
||||
println!("{}", argc);
|
||||
println!("{:?}", argv);
|
||||
main().report().to_isize()
|
||||
}
|
||||
}
|
||||
@@ -1,33 +0,0 @@
|
||||
pub struct ExitCode(isize);
|
||||
|
||||
impl ExitCode {
|
||||
pub const SUCCESS: ExitCode = ExitCode(0);
|
||||
|
||||
pub fn to_isize(self) -> isize {
|
||||
self.0
|
||||
}
|
||||
}
|
||||
|
||||
#[lang = "termination"]
|
||||
pub trait Termination {
|
||||
/// Is called to get the representation of the value as status code.
|
||||
/// This status code is returned to the operating system.
|
||||
fn report(self) -> ExitCode;
|
||||
}
|
||||
|
||||
impl Termination for () {
|
||||
#[inline]
|
||||
fn report(self) -> ExitCode {
|
||||
ExitCode::SUCCESS
|
||||
}
|
||||
}
|
||||
impl Termination for isize {
|
||||
#[inline]
|
||||
fn report(self) -> ExitCode {
|
||||
ExitCode(self)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn abort() -> ! {
|
||||
loop {}
|
||||
}
|
||||
@@ -1,8 +0,0 @@
|
||||
macro_rules! rtabort {
|
||||
($($t:tt)*) => {{
|
||||
loop {}
|
||||
}};
|
||||
}
|
||||
macro_rules! rtprintpanic {
|
||||
($($t:tt)*) => {{}};
|
||||
}
|
||||
@@ -1,85 +0,0 @@
|
||||
pub mod barrier;
|
||||
pub mod lazy_lock;
|
||||
pub mod mpmc;
|
||||
pub mod mpsc;
|
||||
pub mod nonpoison;
|
||||
pub mod once;
|
||||
pub mod once_lock;
|
||||
pub mod poison;
|
||||
pub mod reentrant_lock;
|
||||
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
pub use core::sync::atomic;
|
||||
|
||||
pub use once::Once;
|
||||
pub use once::OnceState;
|
||||
|
||||
pub use poison::LockResult;
|
||||
pub use poison::Mutex;
|
||||
pub use poison::MutexGuard;
|
||||
pub use poison::PoisonError;
|
||||
pub use poison::TryLockError;
|
||||
pub use poison::TryLockResult;
|
||||
pub use poison::Condvar;
|
||||
pub use poison::RwLock;
|
||||
pub use once_lock::OnceLock;
|
||||
pub use reentrant_lock::ReentrantLock;
|
||||
pub use reentrant_lock::ReentrantLockGuard;
|
||||
pub use alloc_crate::sync::Arc;
|
||||
|
||||
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
|
||||
#[stable(feature = "wait_timeout", since = "1.5.0")]
|
||||
pub struct WaitTimeoutResult(bool);
|
||||
|
||||
impl WaitTimeoutResult {
|
||||
/// Returns `true` if the wait was known to have timed out.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// This example spawns a thread which will sleep 20 milliseconds before
|
||||
/// updating a boolean value and then notifying the condvar.
|
||||
///
|
||||
/// The main thread will wait with a 10 millisecond timeout on the condvar
|
||||
/// and will leave the loop upon timeout.
|
||||
///
|
||||
/// ```
|
||||
/// use std::sync::{Arc, Condvar, Mutex};
|
||||
/// use std::thread;
|
||||
/// use std::time::Duration;
|
||||
///
|
||||
/// let pair = Arc::new((Mutex::new(false), Condvar::new()));
|
||||
/// let pair2 = Arc::clone(&pair);
|
||||
///
|
||||
/// # let handle =
|
||||
/// thread::spawn(move || {
|
||||
/// let (lock, cvar) = &*pair2;
|
||||
///
|
||||
/// // Let's wait 20 milliseconds before notifying the condvar.
|
||||
/// thread::sleep(Duration::from_millis(20));
|
||||
///
|
||||
/// let mut started = lock.lock().unwrap();
|
||||
/// // We update the boolean value.
|
||||
/// *started = true;
|
||||
/// cvar.notify_one();
|
||||
/// });
|
||||
///
|
||||
/// // Wait for the thread to start up.
|
||||
/// let (lock, cvar) = &*pair;
|
||||
/// loop {
|
||||
/// // Let's put a timeout on the condvar's wait.
|
||||
/// let result = cvar.wait_timeout(lock.lock().unwrap(), Duration::from_millis(10)).unwrap();
|
||||
/// // 10 milliseconds have passed.
|
||||
/// if result.1.timed_out() {
|
||||
/// // timed out now and we can leave.
|
||||
/// break
|
||||
/// }
|
||||
/// }
|
||||
/// # // Prevent leaks for Miri.
|
||||
/// # let _ = handle.join();
|
||||
/// ```
|
||||
#[must_use]
|
||||
#[stable(feature = "wait_timeout", since = "1.5.0")]
|
||||
pub fn timed_out(&self) -> bool {
|
||||
self.0
|
||||
}
|
||||
}
|
||||
@@ -1,167 +0,0 @@
|
||||
use crate::fmt;
|
||||
use crate::panic::RefUnwindSafe;
|
||||
use crate::sync::nonpoison::{Condvar, Mutex};
|
||||
|
||||
/// A barrier enables multiple threads to synchronize the beginning
|
||||
/// of some computation.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use std::sync::Barrier;
|
||||
/// use std::thread;
|
||||
///
|
||||
/// let n = 10;
|
||||
/// let barrier = Barrier::new(n);
|
||||
/// thread::scope(|s| {
|
||||
/// for _ in 0..n {
|
||||
/// // The same messages will be printed together.
|
||||
/// // You will NOT see any interleaving.
|
||||
/// s.spawn(|| {
|
||||
/// println!("before wait");
|
||||
/// barrier.wait();
|
||||
/// println!("after wait");
|
||||
/// });
|
||||
/// }
|
||||
/// });
|
||||
/// ```
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
pub struct Barrier {
|
||||
lock: Mutex<BarrierState>,
|
||||
cvar: Condvar,
|
||||
num_threads: usize,
|
||||
}
|
||||
|
||||
#[stable(feature = "unwind_safe_lock_refs", since = "1.12.0")]
|
||||
impl RefUnwindSafe for Barrier {}
|
||||
|
||||
// The inner state of a double barrier
|
||||
struct BarrierState {
|
||||
count: usize,
|
||||
generation_id: usize,
|
||||
}
|
||||
|
||||
/// A `BarrierWaitResult` is returned by [`Barrier::wait()`] when all threads
|
||||
/// in the [`Barrier`] have rendezvoused.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use std::sync::Barrier;
|
||||
///
|
||||
/// let barrier = Barrier::new(1);
|
||||
/// let barrier_wait_result = barrier.wait();
|
||||
/// ```
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
pub struct BarrierWaitResult(bool);
|
||||
|
||||
#[stable(feature = "std_debug", since = "1.16.0")]
|
||||
impl fmt::Debug for Barrier {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.debug_struct("Barrier").finish_non_exhaustive()
|
||||
}
|
||||
}
|
||||
|
||||
impl Barrier {
|
||||
/// Creates a new barrier that can block a given number of threads.
|
||||
///
|
||||
/// A barrier will block all threads which call [`wait()`] until the `n`th thread calls [`wait()`],
|
||||
/// and then wake up all threads at once.
|
||||
///
|
||||
/// [`wait()`]: Barrier::wait
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use std::sync::Barrier;
|
||||
///
|
||||
/// let barrier = Barrier::new(10);
|
||||
/// ```
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
#[rustc_const_stable(feature = "const_barrier", since = "1.78.0")]
|
||||
#[must_use]
|
||||
#[inline]
|
||||
pub const fn new(n: usize) -> Barrier {
|
||||
Barrier {
|
||||
lock: Mutex::new(BarrierState { count: 0, generation_id: 0 }),
|
||||
cvar: Condvar::new(),
|
||||
num_threads: n,
|
||||
}
|
||||
}
|
||||
|
||||
/// Blocks the current thread until all threads have rendezvoused here.
|
||||
///
|
||||
/// Barriers are re-usable after all threads have rendezvoused once, and can
|
||||
/// be used continuously.
|
||||
///
|
||||
/// A single (arbitrary) thread will receive a [`BarrierWaitResult`] that
|
||||
/// returns `true` from [`BarrierWaitResult::is_leader()`] when returning
|
||||
/// from this function, and all other threads will receive a result that
|
||||
/// will return `false` from [`BarrierWaitResult::is_leader()`].
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use std::sync::Barrier;
|
||||
/// use std::thread;
|
||||
///
|
||||
/// let n = 10;
|
||||
/// let barrier = Barrier::new(n);
|
||||
/// thread::scope(|s| {
|
||||
/// for _ in 0..n {
|
||||
/// // The same messages will be printed together.
|
||||
/// // You will NOT see any interleaving.
|
||||
/// s.spawn(|| {
|
||||
/// println!("before wait");
|
||||
/// barrier.wait();
|
||||
/// println!("after wait");
|
||||
/// });
|
||||
/// }
|
||||
/// });
|
||||
/// ```
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
pub fn wait(&self) -> BarrierWaitResult {
|
||||
let mut lock = self.lock.lock();
|
||||
let local_gen = lock.generation_id;
|
||||
lock.count += 1;
|
||||
if lock.count < self.num_threads {
|
||||
self.cvar.wait_while(&mut lock, |state| local_gen == state.generation_id);
|
||||
BarrierWaitResult(false)
|
||||
} else {
|
||||
lock.count = 0;
|
||||
lock.generation_id = lock.generation_id.wrapping_add(1);
|
||||
self.cvar.notify_all();
|
||||
BarrierWaitResult(true)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[stable(feature = "std_debug", since = "1.16.0")]
|
||||
impl fmt::Debug for BarrierWaitResult {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.debug_struct("BarrierWaitResult").field("is_leader", &self.is_leader()).finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl BarrierWaitResult {
|
||||
/// Returns `true` if this thread is the "leader thread" for the call to
|
||||
/// [`Barrier::wait()`].
|
||||
///
|
||||
/// Only one thread will have `true` returned from their result, all other
|
||||
/// threads will have `false` returned.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use std::sync::Barrier;
|
||||
///
|
||||
/// let barrier = Barrier::new(1);
|
||||
/// let barrier_wait_result = barrier.wait();
|
||||
/// println!("{:?}", barrier_wait_result.is_leader());
|
||||
/// ```
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
#[must_use]
|
||||
pub fn is_leader(&self) -> bool {
|
||||
self.0
|
||||
}
|
||||
}
|
||||
@@ -1,422 +0,0 @@
|
||||
use super::once::OnceExclusiveState;
|
||||
use crate::cell::UnsafeCell;
|
||||
use crate::mem::ManuallyDrop;
|
||||
use crate::ops::{Deref, DerefMut};
|
||||
use crate::panic::{RefUnwindSafe, UnwindSafe};
|
||||
use crate::sync::Once;
|
||||
use crate::{fmt, ptr};
|
||||
|
||||
// We use the state of a Once as discriminant value. Upon creation, the state is
|
||||
// "incomplete" and `f` contains the initialization closure. In the first call to
|
||||
// `call_once`, `f` is taken and run. If it succeeds, `value` is set and the state
|
||||
// is changed to "complete". If it panics, the Once is poisoned, so none of the
|
||||
// two fields is initialized.
|
||||
union Data<T, F> {
|
||||
value: ManuallyDrop<T>,
|
||||
f: ManuallyDrop<F>,
|
||||
}
|
||||
|
||||
/// A value which is initialized on the first access.
|
||||
///
|
||||
/// This type is a thread-safe [`LazyCell`], and can be used in statics.
|
||||
/// Since initialization may be called from multiple threads, any
|
||||
/// dereferencing call will block the calling thread if another
|
||||
/// initialization routine is currently running.
|
||||
///
|
||||
/// [`LazyCell`]: crate::cell::LazyCell
|
||||
///
|
||||
/// # Poisoning
|
||||
///
|
||||
/// If the initialization closure passed to [`LazyLock::new`] panics, the lock will be poisoned.
|
||||
/// Once the lock is poisoned, any threads that attempt to access this lock (via a dereference
|
||||
/// or via an explicit call to [`force()`]) will panic.
|
||||
///
|
||||
/// This concept is similar to that of poisoning in the [`std::sync::poison`] module. A key
|
||||
/// difference, however, is that poisoning in `LazyLock` is _unrecoverable_. All future accesses of
|
||||
/// the lock from other threads will panic, whereas a type in [`std::sync::poison`] like
|
||||
/// [`std::sync::poison::Mutex`] allows recovery via [`PoisonError::into_inner()`].
|
||||
///
|
||||
/// [`force()`]: LazyLock::force
|
||||
/// [`std::sync::poison`]: crate::sync::poison
|
||||
/// [`std::sync::poison::Mutex`]: crate::sync::poison::Mutex
|
||||
/// [`PoisonError::into_inner()`]: crate::sync::poison::PoisonError::into_inner
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// Initialize static variables with `LazyLock`.
|
||||
/// ```
|
||||
/// use std::sync::LazyLock;
|
||||
///
|
||||
/// // Note: static items do not call [`Drop`] on program termination, so this won't be deallocated.
|
||||
/// // this is fine, as the OS can deallocate the terminated program faster than we can free memory
|
||||
/// // but tools like valgrind might report "memory leaks" as it isn't obvious this is intentional.
|
||||
/// static DEEP_THOUGHT: LazyLock<String> = LazyLock::new(|| {
|
||||
/// # mod another_crate {
|
||||
/// # pub fn great_question() -> String { "42".to_string() }
|
||||
/// # }
|
||||
/// // M3 Ultra takes about 16 million years in --release config
|
||||
/// another_crate::great_question()
|
||||
/// });
|
||||
///
|
||||
/// // The `String` is built, stored in the `LazyLock`, and returned as `&String`.
|
||||
/// let _ = &*DEEP_THOUGHT;
|
||||
/// ```
|
||||
///
|
||||
/// Initialize fields with `LazyLock`.
|
||||
/// ```
|
||||
/// use std::sync::LazyLock;
|
||||
///
|
||||
/// #[derive(Debug)]
|
||||
/// struct UseCellLock {
|
||||
/// number: LazyLock<u32>,
|
||||
/// }
|
||||
/// fn main() {
|
||||
/// let lock: LazyLock<u32> = LazyLock::new(|| 0u32);
|
||||
///
|
||||
/// let data = UseCellLock { number: lock };
|
||||
/// println!("{}", *data.number);
|
||||
/// }
|
||||
/// ```
|
||||
#[stable(feature = "lazy_cell", since = "1.80.0")]
|
||||
pub struct LazyLock<T, F = fn() -> T> {
|
||||
// FIXME(nonpoison_once): if possible, switch to nonpoison version once it is available
|
||||
once: Once,
|
||||
data: UnsafeCell<Data<T, F>>,
|
||||
}
|
||||
|
||||
impl<T, F: FnOnce() -> T> LazyLock<T, F> {
|
||||
/// Creates a new lazy value with the given initializing function.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use std::sync::LazyLock;
|
||||
///
|
||||
/// let hello = "Hello, World!".to_string();
|
||||
///
|
||||
/// let lazy = LazyLock::new(|| hello.to_uppercase());
|
||||
///
|
||||
/// assert_eq!(&*lazy, "HELLO, WORLD!");
|
||||
/// ```
|
||||
#[inline]
|
||||
#[stable(feature = "lazy_cell", since = "1.80.0")]
|
||||
#[rustc_const_stable(feature = "lazy_cell", since = "1.80.0")]
|
||||
pub const fn new(f: F) -> LazyLock<T, F> {
|
||||
LazyLock { once: Once::new(), data: UnsafeCell::new(Data { f: ManuallyDrop::new(f) }) }
|
||||
}
|
||||
|
||||
/// Creates a new lazy value that is already initialized.
|
||||
#[inline]
|
||||
#[cfg(test)]
|
||||
pub(crate) fn preinit(value: T) -> LazyLock<T, F> {
|
||||
let once = Once::new();
|
||||
once.call_once(|| {});
|
||||
LazyLock { once, data: UnsafeCell::new(Data { value: ManuallyDrop::new(value) }) }
|
||||
}
|
||||
|
||||
/// Consumes this `LazyLock` returning the stored value.
|
||||
///
|
||||
/// Returns `Ok(value)` if `Lazy` is initialized and `Err(f)` otherwise.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// Panics if the lock is poisoned.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// #![feature(lazy_cell_into_inner)]
|
||||
///
|
||||
/// use std::sync::LazyLock;
|
||||
///
|
||||
/// let hello = "Hello, World!".to_string();
|
||||
///
|
||||
/// let lazy = LazyLock::new(|| hello.to_uppercase());
|
||||
///
|
||||
/// assert_eq!(&*lazy, "HELLO, WORLD!");
|
||||
/// assert_eq!(LazyLock::into_inner(lazy).ok(), Some("HELLO, WORLD!".to_string()));
|
||||
/// ```
|
||||
#[unstable(feature = "lazy_cell_into_inner", issue = "125623")]
|
||||
pub fn into_inner(mut this: Self) -> Result<T, F> {
|
||||
let state = this.once.state();
|
||||
match state {
|
||||
OnceExclusiveState::Poisoned => panic_poisoned(),
|
||||
state => {
|
||||
let this = ManuallyDrop::new(this);
|
||||
let data = unsafe { ptr::read(&this.data) }.into_inner();
|
||||
match state {
|
||||
OnceExclusiveState::Incomplete => {
|
||||
Err(ManuallyDrop::into_inner(unsafe { data.f }))
|
||||
}
|
||||
OnceExclusiveState::Complete => {
|
||||
Ok(ManuallyDrop::into_inner(unsafe { data.value }))
|
||||
}
|
||||
OnceExclusiveState::Poisoned => unreachable!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Forces the evaluation of this lazy value and returns a mutable reference to
|
||||
/// the result.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// If the initialization closure panics (the one that is passed to the [`new()`] method), the
|
||||
/// panic is propagated to the caller, and the lock becomes poisoned. This will cause all future
|
||||
/// accesses of the lock (via [`force()`] or a dereference) to panic.
|
||||
///
|
||||
/// [`new()`]: LazyLock::new
|
||||
/// [`force()`]: LazyLock::force
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use std::sync::LazyLock;
|
||||
///
|
||||
/// let mut lazy = LazyLock::new(|| 92);
|
||||
///
|
||||
/// let p = LazyLock::force_mut(&mut lazy);
|
||||
/// assert_eq!(*p, 92);
|
||||
/// *p = 44;
|
||||
/// assert_eq!(*lazy, 44);
|
||||
/// ```
|
||||
#[inline]
|
||||
#[stable(feature = "lazy_get", since = "1.94.0")]
|
||||
pub fn force_mut(this: &mut LazyLock<T, F>) -> &mut T {
|
||||
#[cold]
|
||||
/// # Safety
|
||||
/// May only be called when the state is `Incomplete`.
|
||||
unsafe fn really_init_mut<T, F: FnOnce() -> T>(this: &mut LazyLock<T, F>) -> &mut T {
|
||||
struct PoisonOnPanic<'a, T, F>(&'a mut LazyLock<T, F>);
|
||||
impl<T, F> Drop for PoisonOnPanic<'_, T, F> {
|
||||
#[inline]
|
||||
fn drop(&mut self) {
|
||||
self.0.once.set_state(OnceExclusiveState::Poisoned);
|
||||
}
|
||||
}
|
||||
|
||||
// SAFETY: We always poison if the initializer panics (then we never check the data),
|
||||
// or set the data on success.
|
||||
let f = unsafe { ManuallyDrop::take(&mut this.data.get_mut().f) };
|
||||
// INVARIANT: Initiated from mutable reference, don't drop because we read it.
|
||||
let guard = PoisonOnPanic(this);
|
||||
let data = f();
|
||||
guard.0.data.get_mut().value = ManuallyDrop::new(data);
|
||||
guard.0.once.set_state(OnceExclusiveState::Complete);
|
||||
core::mem::forget(guard);
|
||||
// SAFETY: We put the value there above.
|
||||
unsafe { &mut this.data.get_mut().value }
|
||||
}
|
||||
|
||||
let state = this.once.state();
|
||||
match state {
|
||||
OnceExclusiveState::Poisoned => panic_poisoned(),
|
||||
// SAFETY: The `Once` states we completed the initialization.
|
||||
OnceExclusiveState::Complete => unsafe { &mut this.data.get_mut().value },
|
||||
// SAFETY: The state is `Incomplete`.
|
||||
OnceExclusiveState::Incomplete => unsafe { really_init_mut(this) },
|
||||
}
|
||||
}
|
||||
|
||||
/// Forces the evaluation of this lazy value and returns a reference to
|
||||
/// result. This is equivalent to the `Deref` impl, but is explicit.
|
||||
///
|
||||
/// This method will block the calling thread if another initialization
|
||||
/// routine is currently running.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// If the initialization closure panics (the one that is passed to the [`new()`] method), the
|
||||
/// panic is propagated to the caller, and the lock becomes poisoned. This will cause all future
|
||||
/// accesses of the lock (via [`force()`] or a dereference) to panic.
|
||||
///
|
||||
/// [`new()`]: LazyLock::new
|
||||
/// [`force()`]: LazyLock::force
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use std::sync::LazyLock;
|
||||
///
|
||||
/// let lazy = LazyLock::new(|| 92);
|
||||
///
|
||||
/// assert_eq!(LazyLock::force(&lazy), &92);
|
||||
/// assert_eq!(&*lazy, &92);
|
||||
/// ```
|
||||
#[inline]
|
||||
#[stable(feature = "lazy_cell", since = "1.80.0")]
|
||||
#[rustc_should_not_be_called_on_const_items]
|
||||
pub fn force(this: &LazyLock<T, F>) -> &T {
|
||||
this.once.call_once_force(|state| {
|
||||
if state.is_poisoned() {
|
||||
panic_poisoned();
|
||||
}
|
||||
|
||||
// SAFETY: `call_once` only runs this closure once, ever.
|
||||
let data = unsafe { &mut *this.data.get() };
|
||||
let f = unsafe { ManuallyDrop::take(&mut data.f) };
|
||||
let value = f();
|
||||
data.value = ManuallyDrop::new(value);
|
||||
});
|
||||
|
||||
// SAFETY:
|
||||
// There are four possible scenarios:
|
||||
// * the closure was called and initialized `value`.
|
||||
// * the closure was called and panicked, so this point is never reached.
|
||||
// * the closure was not called, but a previous call initialized `value`.
|
||||
// * the closure was not called because the Once is poisoned, which we handled above.
|
||||
// So `value` has definitely been initialized and will not be modified again.
|
||||
unsafe { &*(*this.data.get()).value }
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, F> LazyLock<T, F> {
|
||||
/// Returns a mutable reference to the value if initialized. Otherwise (if uninitialized or
|
||||
/// poisoned), returns `None`.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use std::sync::LazyLock;
|
||||
///
|
||||
/// let mut lazy = LazyLock::new(|| 92);
|
||||
///
|
||||
/// assert_eq!(LazyLock::get_mut(&mut lazy), None);
|
||||
/// let _ = LazyLock::force(&lazy);
|
||||
/// *LazyLock::get_mut(&mut lazy).unwrap() = 44;
|
||||
/// assert_eq!(*lazy, 44);
|
||||
/// ```
|
||||
#[inline]
|
||||
#[stable(feature = "lazy_get", since = "1.94.0")]
|
||||
pub fn get_mut(this: &mut LazyLock<T, F>) -> Option<&mut T> {
|
||||
// `state()` does not perform an atomic load, so prefer it over `is_complete()`.
|
||||
let state = this.once.state();
|
||||
match state {
|
||||
// SAFETY:
|
||||
// The closure has been run successfully, so `value` has been initialized.
|
||||
OnceExclusiveState::Complete => Some(unsafe { &mut this.data.get_mut().value }),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns a reference to the value if initialized. Otherwise (if uninitialized or poisoned),
|
||||
/// returns `None`.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use std::sync::LazyLock;
|
||||
///
|
||||
/// let lazy = LazyLock::new(|| 92);
|
||||
///
|
||||
/// assert_eq!(LazyLock::get(&lazy), None);
|
||||
/// let _ = LazyLock::force(&lazy);
|
||||
/// assert_eq!(LazyLock::get(&lazy), Some(&92));
|
||||
/// ```
|
||||
#[inline]
|
||||
#[stable(feature = "lazy_get", since = "1.94.0")]
|
||||
#[rustc_should_not_be_called_on_const_items]
|
||||
pub fn get(this: &LazyLock<T, F>) -> Option<&T> {
|
||||
if this.once.is_completed() {
|
||||
// SAFETY:
|
||||
// The closure has been run successfully, so `value` has been initialized
|
||||
// and will not be modified again.
|
||||
Some(unsafe { &(*this.data.get()).value })
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[stable(feature = "lazy_cell", since = "1.80.0")]
|
||||
impl<T, F> Drop for LazyLock<T, F> {
|
||||
fn drop(&mut self) {
|
||||
match self.once.state() {
|
||||
OnceExclusiveState::Incomplete => unsafe {
|
||||
ManuallyDrop::drop(&mut self.data.get_mut().f)
|
||||
},
|
||||
OnceExclusiveState::Complete => unsafe {
|
||||
ManuallyDrop::drop(&mut self.data.get_mut().value)
|
||||
},
|
||||
OnceExclusiveState::Poisoned => {}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[stable(feature = "lazy_cell", since = "1.80.0")]
|
||||
impl<T, F: FnOnce() -> T> Deref for LazyLock<T, F> {
|
||||
type Target = T;
|
||||
|
||||
/// Dereferences the value.
|
||||
///
|
||||
/// This method will block the calling thread if another initialization
|
||||
/// routine is currently running.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// If the initialization closure panics (the one that is passed to the [`new()`] method), the
|
||||
/// panic is propagated to the caller, and the lock becomes poisoned. This will cause all future
|
||||
/// accesses of the lock (via [`force()`] or a dereference) to panic.
|
||||
///
|
||||
/// [`new()`]: LazyLock::new
|
||||
/// [`force()`]: LazyLock::force
|
||||
#[inline]
|
||||
fn deref(&self) -> &T {
|
||||
LazyLock::force(self)
|
||||
}
|
||||
}
|
||||
|
||||
#[stable(feature = "lazy_deref_mut", since = "1.89.0")]
|
||||
impl<T, F: FnOnce() -> T> DerefMut for LazyLock<T, F> {
|
||||
/// # Panics
|
||||
///
|
||||
/// If the initialization closure panics (the one that is passed to the [`new()`] method), the
|
||||
/// panic is propagated to the caller, and the lock becomes poisoned. This will cause all future
|
||||
/// accesses of the lock (via [`force()`] or a dereference) to panic.
|
||||
///
|
||||
/// [`new()`]: LazyLock::new
|
||||
/// [`force()`]: LazyLock::force
|
||||
#[inline]
|
||||
fn deref_mut(&mut self) -> &mut T {
|
||||
LazyLock::force_mut(self)
|
||||
}
|
||||
}
|
||||
|
||||
#[stable(feature = "lazy_cell", since = "1.80.0")]
|
||||
impl<T: Default> Default for LazyLock<T> {
|
||||
/// Creates a new lazy value using `Default` as the initializing function.
|
||||
#[inline]
|
||||
fn default() -> LazyLock<T> {
|
||||
LazyLock::new(T::default)
|
||||
}
|
||||
}
|
||||
|
||||
#[stable(feature = "lazy_cell", since = "1.80.0")]
|
||||
impl<T: fmt::Debug, F> fmt::Debug for LazyLock<T, F> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
let mut d = f.debug_tuple("LazyLock");
|
||||
match LazyLock::get(self) {
|
||||
Some(v) => d.field(v),
|
||||
None => d.field(&format_args!("<uninit>")),
|
||||
};
|
||||
d.finish()
|
||||
}
|
||||
}
|
||||
|
||||
#[cold]
|
||||
#[inline(never)]
|
||||
fn panic_poisoned() -> ! {
|
||||
panic!("LazyLock instance has previously been poisoned")
|
||||
}
|
||||
|
||||
// We never create a `&F` from a `&LazyLock<T, F>` so it is fine
|
||||
// to not impl `Sync` for `F`.
|
||||
#[stable(feature = "lazy_cell", since = "1.80.0")]
|
||||
unsafe impl<T: Sync + Send, F: Send> Sync for LazyLock<T, F> {}
|
||||
// auto-derived `Send` impl is OK.
|
||||
|
||||
#[stable(feature = "lazy_cell", since = "1.80.0")]
|
||||
impl<T: RefUnwindSafe + UnwindSafe, F: UnwindSafe> RefUnwindSafe for LazyLock<T, F> {}
|
||||
#[stable(feature = "lazy_cell", since = "1.80.0")]
|
||||
impl<T: UnwindSafe, F: UnwindSafe> UnwindSafe for LazyLock<T, F> {}
|
||||
@@ -1,569 +0,0 @@
|
||||
//! Bounded channel based on a preallocated array.
|
||||
//!
|
||||
//! This flavor has a fixed, positive capacity.
|
||||
//!
|
||||
//! The implementation is based on Dmitry Vyukov's bounded MPMC queue.
|
||||
//!
|
||||
//! Source:
|
||||
//! - <http://www.1024cores.net/home/lock-free-algorithms/queues/bounded-mpmc-queue>
|
||||
//! - <https://docs.google.com/document/d/1yIAYmbvL3JxOKOjuCyon7JhW4cSv1wy5hC0ApeGMV9s/pub>
|
||||
|
||||
use super::context::Context;
|
||||
use super::error::*;
|
||||
use super::select::{Operation, Selected, Token};
|
||||
use super::utils::{Backoff, CachePadded};
|
||||
use super::waker::SyncWaker;
|
||||
use crate::cell::UnsafeCell;
|
||||
use crate::mem::MaybeUninit;
|
||||
use crate::ptr;
|
||||
use crate::sync::atomic::{self, Atomic, AtomicUsize, Ordering};
|
||||
use crate::time::Instant;
|
||||
|
||||
/// A slot in a channel.
|
||||
struct Slot<T> {
|
||||
/// The current stamp.
|
||||
stamp: Atomic<usize>,
|
||||
|
||||
/// The message in this slot. Either read out in `read` or dropped through
|
||||
/// `discard_all_messages`.
|
||||
msg: UnsafeCell<MaybeUninit<T>>,
|
||||
}
|
||||
|
||||
/// The token type for the array flavor.
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct ArrayToken {
|
||||
/// Slot to read from or write to.
|
||||
slot: *const u8,
|
||||
|
||||
/// Stamp to store into the slot after reading or writing.
|
||||
stamp: usize,
|
||||
}
|
||||
|
||||
impl Default for ArrayToken {
|
||||
#[inline]
|
||||
fn default() -> Self {
|
||||
ArrayToken { slot: ptr::null(), stamp: 0 }
|
||||
}
|
||||
}
|
||||
|
||||
/// Bounded channel based on a preallocated array.
|
||||
pub(crate) struct Channel<T> {
|
||||
/// The head of the channel.
|
||||
///
|
||||
/// This value is a "stamp" consisting of an index into the buffer, a mark bit, and a lap, but
|
||||
/// packed into a single `usize`. The lower bits represent the index, while the upper bits
|
||||
/// represent the lap. The mark bit in the head is always zero.
|
||||
///
|
||||
/// Messages are popped from the head of the channel.
|
||||
head: CachePadded<Atomic<usize>>,
|
||||
|
||||
/// The tail of the channel.
|
||||
///
|
||||
/// This value is a "stamp" consisting of an index into the buffer, a mark bit, and a lap, but
|
||||
/// packed into a single `usize`. The lower bits represent the index, while the upper bits
|
||||
/// represent the lap. The mark bit indicates that the channel is disconnected.
|
||||
///
|
||||
/// Messages are pushed into the tail of the channel.
|
||||
tail: CachePadded<Atomic<usize>>,
|
||||
|
||||
/// The buffer holding slots.
|
||||
buffer: Box<[Slot<T>]>,
|
||||
|
||||
/// The channel capacity.
|
||||
cap: usize,
|
||||
|
||||
/// A stamp with the value of `{ lap: 1, mark: 0, index: 0 }`.
|
||||
one_lap: usize,
|
||||
|
||||
/// If this bit is set in the tail, that means the channel is disconnected.
|
||||
mark_bit: usize,
|
||||
|
||||
/// Senders waiting while the channel is full.
|
||||
senders: SyncWaker,
|
||||
|
||||
/// Receivers waiting while the channel is empty and not disconnected.
|
||||
receivers: SyncWaker,
|
||||
}
|
||||
|
||||
impl<T> Channel<T> {
|
||||
/// Creates a bounded channel of capacity `cap`.
|
||||
pub(crate) fn with_capacity(cap: usize) -> Self {
|
||||
assert!(cap > 0, "capacity must be positive");
|
||||
|
||||
// Compute constants `mark_bit` and `one_lap`.
|
||||
let mark_bit = (cap + 1).next_power_of_two();
|
||||
let one_lap = mark_bit * 2;
|
||||
|
||||
// Head is initialized to `{ lap: 0, mark: 0, index: 0 }`.
|
||||
let head = 0;
|
||||
// Tail is initialized to `{ lap: 0, mark: 0, index: 0 }`.
|
||||
let tail = 0;
|
||||
|
||||
// Allocate a buffer of `cap` slots initialized
|
||||
// with stamps.
|
||||
let buffer: Box<[Slot<T>]> = (0..cap)
|
||||
.map(|i| {
|
||||
// Set the stamp to `{ lap: 0, mark: 0, index: i }`.
|
||||
Slot { stamp: AtomicUsize::new(i), msg: UnsafeCell::new(MaybeUninit::uninit()) }
|
||||
})
|
||||
.collect();
|
||||
|
||||
Channel {
|
||||
buffer,
|
||||
cap,
|
||||
one_lap,
|
||||
mark_bit,
|
||||
head: CachePadded::new(AtomicUsize::new(head)),
|
||||
tail: CachePadded::new(AtomicUsize::new(tail)),
|
||||
senders: SyncWaker::new(),
|
||||
receivers: SyncWaker::new(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Attempts to reserve a slot for sending a message.
|
||||
fn start_send(&self, token: &mut Token) -> bool {
|
||||
let backoff = Backoff::new();
|
||||
let mut tail = self.tail.load(Ordering::Relaxed);
|
||||
|
||||
loop {
|
||||
// Check if the channel is disconnected.
|
||||
if tail & self.mark_bit != 0 {
|
||||
token.array.slot = ptr::null();
|
||||
token.array.stamp = 0;
|
||||
return true;
|
||||
}
|
||||
|
||||
// Deconstruct the tail.
|
||||
let index = tail & (self.mark_bit - 1);
|
||||
let lap = tail & !(self.one_lap - 1);
|
||||
|
||||
// Inspect the corresponding slot.
|
||||
debug_assert!(index < self.buffer.len());
|
||||
let slot = unsafe { self.buffer.get_unchecked(index) };
|
||||
let stamp = slot.stamp.load(Ordering::Acquire);
|
||||
|
||||
// If the tail and the stamp match, we may attempt to push.
|
||||
if tail == stamp {
|
||||
let new_tail = if index + 1 < self.cap {
|
||||
// Same lap, incremented index.
|
||||
// Set to `{ lap: lap, mark: 0, index: index + 1 }`.
|
||||
tail + 1
|
||||
} else {
|
||||
// One lap forward, index wraps around to zero.
|
||||
// Set to `{ lap: lap.wrapping_add(1), mark: 0, index: 0 }`.
|
||||
lap.wrapping_add(self.one_lap)
|
||||
};
|
||||
|
||||
// Try moving the tail.
|
||||
match self.tail.compare_exchange_weak(
|
||||
tail,
|
||||
new_tail,
|
||||
Ordering::SeqCst,
|
||||
Ordering::Relaxed,
|
||||
) {
|
||||
Ok(_) => {
|
||||
// Prepare the token for the follow-up call to `write`.
|
||||
token.array.slot = slot as *const Slot<T> as *const u8;
|
||||
token.array.stamp = tail + 1;
|
||||
return true;
|
||||
}
|
||||
Err(_) => {
|
||||
backoff.spin_light();
|
||||
tail = self.tail.load(Ordering::Relaxed);
|
||||
}
|
||||
}
|
||||
} else if stamp.wrapping_add(self.one_lap) == tail + 1 {
|
||||
atomic::fence(Ordering::SeqCst);
|
||||
let head = self.head.load(Ordering::Relaxed);
|
||||
|
||||
// If the head lags one lap behind the tail as well...
|
||||
if head.wrapping_add(self.one_lap) == tail {
|
||||
// ...then the channel is full.
|
||||
return false;
|
||||
}
|
||||
|
||||
backoff.spin_light();
|
||||
tail = self.tail.load(Ordering::Relaxed);
|
||||
} else {
|
||||
// Snooze because we need to wait for the stamp to get updated.
|
||||
backoff.spin_heavy();
|
||||
tail = self.tail.load(Ordering::Relaxed);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Writes a message into the channel.
|
||||
pub(crate) unsafe fn write(&self, token: &mut Token, msg: T) -> Result<(), T> {
|
||||
// If there is no slot, the channel is disconnected.
|
||||
if token.array.slot.is_null() {
|
||||
return Err(msg);
|
||||
}
|
||||
|
||||
// Write the message into the slot and update the stamp.
|
||||
unsafe {
|
||||
let slot: &Slot<T> = &*(token.array.slot as *const Slot<T>);
|
||||
slot.msg.get().write(MaybeUninit::new(msg));
|
||||
slot.stamp.store(token.array.stamp, Ordering::Release);
|
||||
}
|
||||
|
||||
// Wake a sleeping receiver.
|
||||
self.receivers.notify();
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Attempts to reserve a slot for receiving a message.
|
||||
fn start_recv(&self, token: &mut Token) -> bool {
|
||||
let backoff = Backoff::new();
|
||||
let mut head = self.head.load(Ordering::Relaxed);
|
||||
|
||||
loop {
|
||||
// Deconstruct the head.
|
||||
let index = head & (self.mark_bit - 1);
|
||||
let lap = head & !(self.one_lap - 1);
|
||||
|
||||
// Inspect the corresponding slot.
|
||||
debug_assert!(index < self.buffer.len());
|
||||
let slot = unsafe { self.buffer.get_unchecked(index) };
|
||||
let stamp = slot.stamp.load(Ordering::Acquire);
|
||||
|
||||
// If the stamp is ahead of the head by 1, we may attempt to pop.
|
||||
if head + 1 == stamp {
|
||||
let new = if index + 1 < self.cap {
|
||||
// Same lap, incremented index.
|
||||
// Set to `{ lap: lap, mark: 0, index: index + 1 }`.
|
||||
head + 1
|
||||
} else {
|
||||
// One lap forward, index wraps around to zero.
|
||||
// Set to `{ lap: lap.wrapping_add(1), mark: 0, index: 0 }`.
|
||||
lap.wrapping_add(self.one_lap)
|
||||
};
|
||||
|
||||
// Try moving the head.
|
||||
match self.head.compare_exchange_weak(
|
||||
head,
|
||||
new,
|
||||
Ordering::SeqCst,
|
||||
Ordering::Relaxed,
|
||||
) {
|
||||
Ok(_) => {
|
||||
// Prepare the token for the follow-up call to `read`.
|
||||
token.array.slot = slot as *const Slot<T> as *const u8;
|
||||
token.array.stamp = head.wrapping_add(self.one_lap);
|
||||
return true;
|
||||
}
|
||||
Err(_) => {
|
||||
backoff.spin_light();
|
||||
head = self.head.load(Ordering::Relaxed);
|
||||
}
|
||||
}
|
||||
} else if stamp == head {
|
||||
atomic::fence(Ordering::SeqCst);
|
||||
let tail = self.tail.load(Ordering::Relaxed);
|
||||
|
||||
// If the tail equals the head, that means the channel is empty.
|
||||
if (tail & !self.mark_bit) == head {
|
||||
// If the channel is disconnected...
|
||||
if tail & self.mark_bit != 0 {
|
||||
// ...then receive an error.
|
||||
token.array.slot = ptr::null();
|
||||
token.array.stamp = 0;
|
||||
return true;
|
||||
} else {
|
||||
// Otherwise, the receive operation is not ready.
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
backoff.spin_light();
|
||||
head = self.head.load(Ordering::Relaxed);
|
||||
} else {
|
||||
// Snooze because we need to wait for the stamp to get updated.
|
||||
backoff.spin_heavy();
|
||||
head = self.head.load(Ordering::Relaxed);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Reads a message from the channel.
|
||||
pub(crate) unsafe fn read(&self, token: &mut Token) -> Result<T, ()> {
|
||||
if token.array.slot.is_null() {
|
||||
// The channel is disconnected.
|
||||
return Err(());
|
||||
}
|
||||
|
||||
// Read the message from the slot and update the stamp.
|
||||
let msg = unsafe {
|
||||
let slot: &Slot<T> = &*(token.array.slot as *const Slot<T>);
|
||||
|
||||
let msg = slot.msg.get().read().assume_init();
|
||||
slot.stamp.store(token.array.stamp, Ordering::Release);
|
||||
msg
|
||||
};
|
||||
|
||||
// Wake a sleeping sender.
|
||||
self.senders.notify();
|
||||
Ok(msg)
|
||||
}
|
||||
|
||||
/// Attempts to send a message into the channel.
|
||||
pub(crate) fn try_send(&self, msg: T) -> Result<(), TrySendError<T>> {
|
||||
let token = &mut Token::default();
|
||||
if self.start_send(token) {
|
||||
unsafe { self.write(token, msg).map_err(TrySendError::Disconnected) }
|
||||
} else {
|
||||
Err(TrySendError::Full(msg))
|
||||
}
|
||||
}
|
||||
|
||||
/// Sends a message into the channel.
|
||||
pub(crate) fn send(
|
||||
&self,
|
||||
msg: T,
|
||||
deadline: Option<Instant>,
|
||||
) -> Result<(), SendTimeoutError<T>> {
|
||||
let token = &mut Token::default();
|
||||
loop {
|
||||
// Try sending a message.
|
||||
if self.start_send(token) {
|
||||
let res = unsafe { self.write(token, msg) };
|
||||
return res.map_err(SendTimeoutError::Disconnected);
|
||||
}
|
||||
|
||||
if let Some(d) = deadline {
|
||||
if Instant::now() >= d {
|
||||
return Err(SendTimeoutError::Timeout(msg));
|
||||
}
|
||||
}
|
||||
|
||||
Context::with(|cx| {
|
||||
// Prepare for blocking until a receiver wakes us up.
|
||||
let oper = Operation::hook(token);
|
||||
self.senders.register(oper, cx);
|
||||
|
||||
// Has the channel become ready just now?
|
||||
if !self.is_full() || self.is_disconnected() {
|
||||
let _ = cx.try_select(Selected::Aborted);
|
||||
}
|
||||
|
||||
// Block the current thread.
|
||||
// SAFETY: the context belongs to the current thread.
|
||||
let sel = unsafe { cx.wait_until(deadline) };
|
||||
|
||||
match sel {
|
||||
Selected::Waiting => unreachable!(),
|
||||
Selected::Aborted | Selected::Disconnected => {
|
||||
self.senders.unregister(oper).unwrap();
|
||||
}
|
||||
Selected::Operation(_) => {}
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
/// Attempts to receive a message without blocking.
|
||||
pub(crate) fn try_recv(&self) -> Result<T, TryRecvError> {
|
||||
let token = &mut Token::default();
|
||||
|
||||
if self.start_recv(token) {
|
||||
unsafe { self.read(token).map_err(|_| TryRecvError::Disconnected) }
|
||||
} else {
|
||||
Err(TryRecvError::Empty)
|
||||
}
|
||||
}
|
||||
|
||||
/// Receives a message from the channel.
|
||||
pub(crate) fn recv(&self, deadline: Option<Instant>) -> Result<T, RecvTimeoutError> {
|
||||
let token = &mut Token::default();
|
||||
loop {
|
||||
// Try receiving a message.
|
||||
if self.start_recv(token) {
|
||||
let res = unsafe { self.read(token) };
|
||||
return res.map_err(|_| RecvTimeoutError::Disconnected);
|
||||
}
|
||||
|
||||
if let Some(d) = deadline {
|
||||
if Instant::now() >= d {
|
||||
return Err(RecvTimeoutError::Timeout);
|
||||
}
|
||||
}
|
||||
|
||||
Context::with(|cx| {
|
||||
// Prepare for blocking until a sender wakes us up.
|
||||
let oper = Operation::hook(token);
|
||||
self.receivers.register(oper, cx);
|
||||
|
||||
// Has the channel become ready just now?
|
||||
if !self.is_empty() || self.is_disconnected() {
|
||||
let _ = cx.try_select(Selected::Aborted);
|
||||
}
|
||||
|
||||
// Block the current thread.
|
||||
// SAFETY: the context belongs to the current thread.
|
||||
let sel = unsafe { cx.wait_until(deadline) };
|
||||
|
||||
match sel {
|
||||
Selected::Waiting => unreachable!(),
|
||||
Selected::Aborted | Selected::Disconnected => {
|
||||
self.receivers.unregister(oper).unwrap();
|
||||
// If the channel was disconnected, we still have to check for remaining
|
||||
// messages.
|
||||
}
|
||||
Selected::Operation(_) => {}
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the current number of messages inside the channel.
|
||||
pub(crate) fn len(&self) -> usize {
|
||||
loop {
|
||||
// Load the tail, then load the head.
|
||||
let tail = self.tail.load(Ordering::SeqCst);
|
||||
let head = self.head.load(Ordering::SeqCst);
|
||||
|
||||
// If the tail didn't change, we've got consistent values to work with.
|
||||
if self.tail.load(Ordering::SeqCst) == tail {
|
||||
let hix = head & (self.mark_bit - 1);
|
||||
let tix = tail & (self.mark_bit - 1);
|
||||
|
||||
return if hix < tix {
|
||||
tix - hix
|
||||
} else if hix > tix {
|
||||
self.cap - hix + tix
|
||||
} else if (tail & !self.mark_bit) == head {
|
||||
0
|
||||
} else {
|
||||
self.cap
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the capacity of the channel.
|
||||
#[allow(clippy::unnecessary_wraps)] // This is intentional.
|
||||
pub(crate) fn capacity(&self) -> Option<usize> {
|
||||
Some(self.cap)
|
||||
}
|
||||
|
||||
/// Disconnects senders and wakes up all blocked receivers.
|
||||
///
|
||||
/// Returns `true` if this call disconnected the channel.
|
||||
pub(crate) fn disconnect_senders(&self) -> bool {
|
||||
let tail = self.tail.fetch_or(self.mark_bit, Ordering::SeqCst);
|
||||
|
||||
if tail & self.mark_bit == 0 {
|
||||
self.receivers.disconnect();
|
||||
true
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
/// Disconnects receivers and wakes up all blocked senders.
|
||||
///
|
||||
/// Returns `true` if this call disconnected the channel.
|
||||
///
|
||||
/// # Safety
|
||||
/// May only be called once upon dropping the last receiver. The
|
||||
/// destruction of all other receivers must have been observed with acquire
|
||||
/// ordering or stronger.
|
||||
pub(crate) unsafe fn disconnect_receivers(&self) -> bool {
|
||||
let tail = self.tail.fetch_or(self.mark_bit, Ordering::SeqCst);
|
||||
let disconnected = if tail & self.mark_bit == 0 {
|
||||
self.senders.disconnect();
|
||||
true
|
||||
} else {
|
||||
false
|
||||
};
|
||||
|
||||
unsafe { self.discard_all_messages(tail) };
|
||||
disconnected
|
||||
}
|
||||
|
||||
/// Discards all messages.
|
||||
///
|
||||
/// `tail` should be the current (and therefore last) value of `tail`.
|
||||
///
|
||||
/// # Panicking
|
||||
/// If a destructor panics, the remaining messages are leaked, matching the
|
||||
/// behavior of the unbounded channel.
|
||||
///
|
||||
/// # Safety
|
||||
/// This method must only be called when dropping the last receiver. The
|
||||
/// destruction of all other receivers must have been observed with acquire
|
||||
/// ordering or stronger.
|
||||
unsafe fn discard_all_messages(&self, tail: usize) {
|
||||
debug_assert!(self.is_disconnected());
|
||||
|
||||
// Only receivers modify `head`, so since we are the last one,
|
||||
// this value will not change and will not be observed (since
|
||||
// no new messages can be sent after disconnection).
|
||||
let mut head = self.head.load(Ordering::Relaxed);
|
||||
let tail = tail & !self.mark_bit;
|
||||
|
||||
let backoff = Backoff::new();
|
||||
loop {
|
||||
// Deconstruct the head.
|
||||
let index = head & (self.mark_bit - 1);
|
||||
let lap = head & !(self.one_lap - 1);
|
||||
|
||||
// Inspect the corresponding slot.
|
||||
debug_assert!(index < self.buffer.len());
|
||||
let slot = unsafe { self.buffer.get_unchecked(index) };
|
||||
let stamp = slot.stamp.load(Ordering::Acquire);
|
||||
|
||||
// If the stamp is ahead of the head by 1, we may drop the message.
|
||||
if head + 1 == stamp {
|
||||
head = if index + 1 < self.cap {
|
||||
// Same lap, incremented index.
|
||||
// Set to `{ lap: lap, mark: 0, index: index + 1 }`.
|
||||
head + 1
|
||||
} else {
|
||||
// One lap forward, index wraps around to zero.
|
||||
// Set to `{ lap: lap.wrapping_add(1), mark: 0, index: 0 }`.
|
||||
lap.wrapping_add(self.one_lap)
|
||||
};
|
||||
|
||||
unsafe {
|
||||
(*slot.msg.get()).assume_init_drop();
|
||||
}
|
||||
// If the tail equals the head, that means the channel is empty.
|
||||
} else if tail == head {
|
||||
return;
|
||||
// Otherwise, a sender is about to write into the slot, so we need
|
||||
// to wait for it to update the stamp.
|
||||
} else {
|
||||
backoff.spin_heavy();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns `true` if the channel is disconnected.
|
||||
pub(crate) fn is_disconnected(&self) -> bool {
|
||||
self.tail.load(Ordering::SeqCst) & self.mark_bit != 0
|
||||
}
|
||||
|
||||
/// Returns `true` if the channel is empty.
|
||||
pub(crate) fn is_empty(&self) -> bool {
|
||||
let head = self.head.load(Ordering::SeqCst);
|
||||
let tail = self.tail.load(Ordering::SeqCst);
|
||||
|
||||
// Is the tail equal to the head?
|
||||
//
|
||||
// Note: If the head changes just before we load the tail, that means there was a moment
|
||||
// when the channel was not empty, so it is safe to just return `false`.
|
||||
(tail & !self.mark_bit) == head
|
||||
}
|
||||
|
||||
/// Returns `true` if the channel is full.
|
||||
pub(crate) fn is_full(&self) -> bool {
|
||||
let tail = self.tail.load(Ordering::SeqCst);
|
||||
let head = self.head.load(Ordering::SeqCst);
|
||||
|
||||
// Is the head lagging one lap behind tail?
|
||||
//
|
||||
// Note: If the tail changes just before we load the head, that means there was a moment
|
||||
// when the channel was not full, so it is safe to just return `false`.
|
||||
head.wrapping_add(self.one_lap) == tail & !self.mark_bit
|
||||
}
|
||||
}
|
||||
@@ -1,159 +0,0 @@
|
||||
//! Thread-local channel context.
|
||||
|
||||
use super::select::Selected;
|
||||
use super::waker::current_thread_id;
|
||||
use crate::cell::Cell;
|
||||
use crate::ptr;
|
||||
use alloc_crate::sync::Arc;
|
||||
use crate::sync::atomic::{Atomic, AtomicPtr, AtomicUsize, Ordering};
|
||||
use crate::thread::{self, Thread};
|
||||
use crate::time::Instant;
|
||||
|
||||
/// Thread-local context.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct Context {
|
||||
inner: Arc<Inner>,
|
||||
}
|
||||
|
||||
/// Inner representation of `Context`.
|
||||
#[derive(Debug)]
|
||||
struct Inner {
|
||||
/// Selected operation.
|
||||
select: Atomic<usize>,
|
||||
|
||||
/// A slot into which another thread may store a pointer to its `Packet`.
|
||||
packet: Atomic<*mut ()>,
|
||||
|
||||
/// Thread handle.
|
||||
thread: Thread,
|
||||
|
||||
/// Thread id.
|
||||
thread_id: usize,
|
||||
}
|
||||
|
||||
impl Context {
|
||||
/// Creates a new context for the duration of the closure.
|
||||
#[inline]
|
||||
pub fn with<F, R>(f: F) -> R
|
||||
where
|
||||
F: FnOnce(&Context) -> R,
|
||||
{
|
||||
thread_local! {
|
||||
/// Cached thread-local context.
|
||||
static CONTEXT: Cell<Option<Context>> = Cell::new(Some(Context::new()));
|
||||
}
|
||||
|
||||
let mut f = Some(f);
|
||||
let mut f = |cx: &Context| -> R {
|
||||
let f = f.take().unwrap();
|
||||
f(cx)
|
||||
};
|
||||
|
||||
CONTEXT
|
||||
.try_with(|cell| match cell.take() {
|
||||
None => f(&Context::new()),
|
||||
Some(cx) => {
|
||||
cx.reset();
|
||||
let res = f(&cx);
|
||||
cell.set(Some(cx));
|
||||
res
|
||||
}
|
||||
})
|
||||
.unwrap_or_else(|_| f(&Context::new()))
|
||||
}
|
||||
|
||||
/// Creates a new `Context`.
|
||||
#[cold]
|
||||
fn new() -> Context {
|
||||
Context {
|
||||
inner: Arc::new(Inner {
|
||||
select: AtomicUsize::new(Selected::Waiting.into()),
|
||||
packet: AtomicPtr::new(ptr::null_mut()),
|
||||
thread: thread::current_or_unnamed(),
|
||||
thread_id: current_thread_id(),
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
/// Resets `select` and `packet`.
|
||||
#[inline]
|
||||
fn reset(&self) {
|
||||
self.inner.select.store(Selected::Waiting.into(), Ordering::Release);
|
||||
self.inner.packet.store(ptr::null_mut(), Ordering::Release);
|
||||
}
|
||||
|
||||
/// Attempts to select an operation.
|
||||
///
|
||||
/// On failure, the previously selected operation is returned.
|
||||
#[inline]
|
||||
pub fn try_select(&self, select: Selected) -> Result<(), Selected> {
|
||||
self.inner
|
||||
.select
|
||||
.compare_exchange(
|
||||
Selected::Waiting.into(),
|
||||
select.into(),
|
||||
Ordering::AcqRel,
|
||||
Ordering::Acquire,
|
||||
)
|
||||
.map(|_| ())
|
||||
.map_err(|e| e.into())
|
||||
}
|
||||
|
||||
/// Stores a packet.
|
||||
///
|
||||
/// This method must be called after `try_select` succeeds and there is a packet to provide.
|
||||
#[inline]
|
||||
pub fn store_packet(&self, packet: *mut ()) {
|
||||
if !packet.is_null() {
|
||||
self.inner.packet.store(packet, Ordering::Release);
|
||||
}
|
||||
}
|
||||
|
||||
/// Waits until an operation is selected and returns it.
|
||||
///
|
||||
/// If the deadline is reached, `Selected::Aborted` will be selected.
|
||||
///
|
||||
/// # Safety
|
||||
/// This may only be called from the thread this `Context` belongs to.
|
||||
#[inline]
|
||||
pub unsafe fn wait_until(&self, deadline: Option<Instant>) -> Selected {
|
||||
loop {
|
||||
// Check whether an operation has been selected.
|
||||
let sel = Selected::from(self.inner.select.load(Ordering::Acquire));
|
||||
if sel != Selected::Waiting {
|
||||
return sel;
|
||||
}
|
||||
|
||||
// If there's a deadline, park the current thread until the deadline is reached.
|
||||
if let Some(end) = deadline {
|
||||
let now = Instant::now();
|
||||
|
||||
if now < end {
|
||||
// SAFETY: guaranteed by caller.
|
||||
unsafe { self.inner.thread.park_timeout(end - now) };
|
||||
} else {
|
||||
// The deadline has been reached. Try aborting select.
|
||||
return match self.try_select(Selected::Aborted) {
|
||||
Ok(()) => Selected::Aborted,
|
||||
Err(s) => s,
|
||||
};
|
||||
}
|
||||
} else {
|
||||
// SAFETY: guaranteed by caller.
|
||||
unsafe { self.inner.thread.park() };
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Unparks the thread this context belongs to.
|
||||
#[inline]
|
||||
pub fn unpark(&self) {
|
||||
self.inner.thread.unpark();
|
||||
}
|
||||
|
||||
/// Returns the id of the thread this context belongs to.
|
||||
#[inline]
|
||||
pub fn thread_id(&self) -> usize {
|
||||
self.inner.thread_id
|
||||
}
|
||||
}
|
||||
@@ -1,136 +0,0 @@
|
||||
use crate::sync::atomic::{Atomic, AtomicBool, AtomicUsize, Ordering};
|
||||
use crate::{ops, process};
|
||||
|
||||
/// Reference counter internals.
|
||||
struct Counter<C> {
|
||||
/// The number of senders associated with the channel.
|
||||
senders: Atomic<usize>,
|
||||
|
||||
/// The number of receivers associated with the channel.
|
||||
receivers: Atomic<usize>,
|
||||
|
||||
/// Set to `true` if the last sender or the last receiver reference deallocates the channel.
|
||||
destroy: Atomic<bool>,
|
||||
|
||||
/// The internal channel.
|
||||
chan: C,
|
||||
}
|
||||
|
||||
/// Wraps a channel into the reference counter.
|
||||
pub(crate) fn new<C>(chan: C) -> (Sender<C>, Receiver<C>) {
|
||||
let counter = Box::into_raw(Box::new(Counter {
|
||||
senders: AtomicUsize::new(1),
|
||||
receivers: AtomicUsize::new(1),
|
||||
destroy: AtomicBool::new(false),
|
||||
chan,
|
||||
}));
|
||||
let s = Sender { counter };
|
||||
let r = Receiver { counter };
|
||||
(s, r)
|
||||
}
|
||||
|
||||
/// The sending side.
|
||||
pub(crate) struct Sender<C> {
|
||||
counter: *mut Counter<C>,
|
||||
}
|
||||
|
||||
impl<C> Sender<C> {
|
||||
/// Returns the internal `Counter`.
|
||||
fn counter(&self) -> &Counter<C> {
|
||||
unsafe { &*self.counter }
|
||||
}
|
||||
|
||||
/// Acquires another sender reference.
|
||||
pub(crate) fn acquire(&self) -> Sender<C> {
|
||||
let count = self.counter().senders.fetch_add(1, Ordering::Relaxed);
|
||||
|
||||
// Cloning senders and calling `mem::forget` on the clones could potentially overflow the
|
||||
// counter. It's very difficult to recover sensibly from such degenerate scenarios so we
|
||||
// just abort when the count becomes very large.
|
||||
if count > isize::MAX as usize {
|
||||
process::abort();
|
||||
}
|
||||
|
||||
Sender { counter: self.counter }
|
||||
}
|
||||
|
||||
/// Releases the sender reference.
|
||||
///
|
||||
/// Function `disconnect` will be called if this is the last sender reference.
|
||||
pub(crate) unsafe fn release<F: FnOnce(&C) -> bool>(&self, disconnect: F) {
|
||||
if self.counter().senders.fetch_sub(1, Ordering::AcqRel) == 1 {
|
||||
disconnect(&self.counter().chan);
|
||||
|
||||
if self.counter().destroy.swap(true, Ordering::AcqRel) {
|
||||
drop(unsafe { Box::from_raw(self.counter) });
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<C> ops::Deref for Sender<C> {
|
||||
type Target = C;
|
||||
|
||||
fn deref(&self) -> &C {
|
||||
&self.counter().chan
|
||||
}
|
||||
}
|
||||
|
||||
impl<C> PartialEq for Sender<C> {
|
||||
fn eq(&self, other: &Sender<C>) -> bool {
|
||||
self.counter == other.counter
|
||||
}
|
||||
}
|
||||
|
||||
/// The receiving side.
|
||||
pub(crate) struct Receiver<C> {
|
||||
counter: *mut Counter<C>,
|
||||
}
|
||||
|
||||
impl<C> Receiver<C> {
|
||||
/// Returns the internal `Counter`.
|
||||
fn counter(&self) -> &Counter<C> {
|
||||
unsafe { &*self.counter }
|
||||
}
|
||||
|
||||
/// Acquires another receiver reference.
|
||||
pub(crate) fn acquire(&self) -> Receiver<C> {
|
||||
let count = self.counter().receivers.fetch_add(1, Ordering::Relaxed);
|
||||
|
||||
// Cloning receivers and calling `mem::forget` on the clones could potentially overflow the
|
||||
// counter. It's very difficult to recover sensibly from such degenerate scenarios so we
|
||||
// just abort when the count becomes very large.
|
||||
if count > isize::MAX as usize {
|
||||
process::abort();
|
||||
}
|
||||
|
||||
Receiver { counter: self.counter }
|
||||
}
|
||||
|
||||
/// Releases the receiver reference.
|
||||
///
|
||||
/// Function `disconnect` will be called if this is the last receiver reference.
|
||||
pub(crate) unsafe fn release<F: FnOnce(&C) -> bool>(&self, disconnect: F) {
|
||||
if self.counter().receivers.fetch_sub(1, Ordering::AcqRel) == 1 {
|
||||
disconnect(&self.counter().chan);
|
||||
|
||||
if self.counter().destroy.swap(true, Ordering::AcqRel) {
|
||||
drop(unsafe { Box::from_raw(self.counter) });
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<C> ops::Deref for Receiver<C> {
|
||||
type Target = C;
|
||||
|
||||
fn deref(&self) -> &C {
|
||||
&self.counter().chan
|
||||
}
|
||||
}
|
||||
|
||||
impl<C> PartialEq for Receiver<C> {
|
||||
fn eq(&self, other: &Receiver<C>) -> bool {
|
||||
self.counter == other.counter
|
||||
}
|
||||
}
|
||||
@@ -1,49 +0,0 @@
|
||||
pub use crate::sync::mpsc::{RecvError, RecvTimeoutError, SendError, TryRecvError, TrySendError};
|
||||
use crate::{error, fmt};
|
||||
|
||||
/// An error returned from the [`send_timeout`] method.
|
||||
///
|
||||
/// The error contains the message being sent so it can be recovered.
|
||||
///
|
||||
/// [`send_timeout`]: super::Sender::send_timeout
|
||||
#[derive(PartialEq, Eq, Clone, Copy)]
|
||||
#[unstable(feature = "mpmc_channel", issue = "126840")]
|
||||
pub enum SendTimeoutError<T> {
|
||||
/// The message could not be sent because the channel is full and the operation timed out.
|
||||
///
|
||||
/// If this is a zero-capacity channel, then the error indicates that there was no receiver
|
||||
/// available to receive the message and the operation timed out.
|
||||
Timeout(T),
|
||||
|
||||
/// The message could not be sent because the channel is disconnected.
|
||||
Disconnected(T),
|
||||
}
|
||||
|
||||
#[unstable(feature = "mpmc_channel", issue = "126840")]
|
||||
impl<T> fmt::Debug for SendTimeoutError<T> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
"SendTimeoutError(..)".fmt(f)
|
||||
}
|
||||
}
|
||||
|
||||
#[unstable(feature = "mpmc_channel", issue = "126840")]
|
||||
impl<T> fmt::Display for SendTimeoutError<T> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match *self {
|
||||
SendTimeoutError::Timeout(..) => "timed out waiting on send operation".fmt(f),
|
||||
SendTimeoutError::Disconnected(..) => "sending on a disconnected channel".fmt(f),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[unstable(feature = "mpmc_channel", issue = "126840")]
|
||||
impl<T> error::Error for SendTimeoutError<T> {}
|
||||
|
||||
#[unstable(feature = "mpmc_channel", issue = "126840")]
|
||||
impl<T> From<SendError<T>> for SendTimeoutError<T> {
|
||||
fn from(err: SendError<T>) -> SendTimeoutError<T> {
|
||||
match err {
|
||||
SendError(e) => SendTimeoutError::Disconnected(e),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,668 +0,0 @@
|
||||
//! Unbounded channel implemented as a linked list.
|
||||
|
||||
use super::context::Context;
|
||||
use super::error::*;
|
||||
use super::select::{Operation, Selected, Token};
|
||||
use super::utils::{Backoff, CachePadded};
|
||||
use super::waker::SyncWaker;
|
||||
use crate::cell::UnsafeCell;
|
||||
use crate::marker::PhantomData;
|
||||
use crate::mem::MaybeUninit;
|
||||
use crate::ptr;
|
||||
use crate::sync::atomic::{self, Atomic, AtomicPtr, AtomicUsize, Ordering};
|
||||
use crate::time::Instant;
|
||||
|
||||
// Bits indicating the state of a slot:
|
||||
// * If a message has been written into the slot, `WRITE` is set.
|
||||
// * If a message has been read from the slot, `READ` is set.
|
||||
// * If the block is being destroyed, `DESTROY` is set.
|
||||
const WRITE: usize = 1;
|
||||
const READ: usize = 2;
|
||||
const DESTROY: usize = 4;
|
||||
|
||||
// Each block covers one "lap" of indices.
|
||||
const LAP: usize = 32;
|
||||
// The maximum number of messages a block can hold.
|
||||
const BLOCK_CAP: usize = LAP - 1;
|
||||
// How many lower bits are reserved for metadata.
|
||||
const SHIFT: usize = 1;
|
||||
// Has two different purposes:
|
||||
// * If set in head, indicates that the block is not the last one.
|
||||
// * If set in tail, indicates that the channel is disconnected.
|
||||
const MARK_BIT: usize = 1;
|
||||
|
||||
/// A slot in a block.
|
||||
struct Slot<T> {
|
||||
/// The message.
|
||||
msg: UnsafeCell<MaybeUninit<T>>,
|
||||
|
||||
/// The state of the slot.
|
||||
state: Atomic<usize>,
|
||||
}
|
||||
|
||||
impl<T> Slot<T> {
|
||||
/// Waits until a message is written into the slot.
|
||||
fn wait_write(&self) {
|
||||
let backoff = Backoff::new();
|
||||
while self.state.load(Ordering::Acquire) & WRITE == 0 {
|
||||
backoff.spin_heavy();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A block in a linked list.
|
||||
///
|
||||
/// Each block in the list can hold up to `BLOCK_CAP` messages.
|
||||
struct Block<T> {
|
||||
/// The next block in the linked list.
|
||||
next: Atomic<*mut Block<T>>,
|
||||
|
||||
/// Slots for messages.
|
||||
slots: [Slot<T>; BLOCK_CAP],
|
||||
}
|
||||
|
||||
impl<T> Block<T> {
|
||||
/// Creates an empty block.
|
||||
fn new() -> Box<Block<T>> {
|
||||
// SAFETY: This is safe because:
|
||||
// [1] `Block::next` (Atomic<*mut _>) may be safely zero initialized.
|
||||
// [2] `Block::slots` (Array) may be safely zero initialized because of [3, 4].
|
||||
// [3] `Slot::msg` (UnsafeCell) may be safely zero initialized because it
|
||||
// holds a MaybeUninit.
|
||||
// [4] `Slot::state` (Atomic<usize>) may be safely zero initialized.
|
||||
unsafe { Box::new_zeroed().assume_init() }
|
||||
}
|
||||
|
||||
/// Waits until the next pointer is set.
|
||||
fn wait_next(&self) -> *mut Block<T> {
|
||||
let backoff = Backoff::new();
|
||||
loop {
|
||||
let next = self.next.load(Ordering::Acquire);
|
||||
if !next.is_null() {
|
||||
return next;
|
||||
}
|
||||
backoff.spin_heavy();
|
||||
}
|
||||
}
|
||||
|
||||
/// Sets the `DESTROY` bit in slots starting from `start` and destroys the block.
|
||||
unsafe fn destroy(this: *mut Block<T>, start: usize) {
|
||||
// It is not necessary to set the `DESTROY` bit in the last slot because that slot has
|
||||
// begun destruction of the block.
|
||||
for i in start..BLOCK_CAP - 1 {
|
||||
let slot = unsafe { (*this).slots.get_unchecked(i) };
|
||||
|
||||
// Mark the `DESTROY` bit if a thread is still using the slot.
|
||||
if slot.state.load(Ordering::Acquire) & READ == 0
|
||||
&& slot.state.fetch_or(DESTROY, Ordering::AcqRel) & READ == 0
|
||||
{
|
||||
// If a thread is still using the slot, it will continue destruction of the block.
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// No thread is using the block, now it is safe to destroy it.
|
||||
drop(unsafe { Box::from_raw(this) });
|
||||
}
|
||||
}
|
||||
|
||||
/// A position in a channel.
|
||||
#[derive(Debug)]
|
||||
struct Position<T> {
|
||||
/// The index in the channel.
|
||||
index: Atomic<usize>,
|
||||
|
||||
/// The block in the linked list.
|
||||
block: Atomic<*mut Block<T>>,
|
||||
}
|
||||
|
||||
/// The token type for the list flavor.
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct ListToken {
|
||||
/// The block of slots.
|
||||
block: *const u8,
|
||||
|
||||
/// The offset into the block.
|
||||
offset: usize,
|
||||
}
|
||||
|
||||
impl Default for ListToken {
|
||||
#[inline]
|
||||
fn default() -> Self {
|
||||
ListToken { block: ptr::null(), offset: 0 }
|
||||
}
|
||||
}
|
||||
|
||||
/// Unbounded channel implemented as a linked list.
|
||||
///
|
||||
/// Each message sent into the channel is assigned a sequence number, i.e. an index. Indices are
|
||||
/// represented as numbers of type `usize` and wrap on overflow.
|
||||
///
|
||||
/// Consecutive messages are grouped into blocks in order to put less pressure on the allocator and
|
||||
/// improve cache efficiency.
|
||||
pub(crate) struct Channel<T> {
|
||||
/// The head of the channel.
|
||||
head: CachePadded<Position<T>>,
|
||||
|
||||
/// The tail of the channel.
|
||||
tail: CachePadded<Position<T>>,
|
||||
|
||||
/// Receivers waiting while the channel is empty and not disconnected.
|
||||
receivers: SyncWaker,
|
||||
|
||||
/// Indicates that dropping a `Channel<T>` may drop messages of type `T`.
|
||||
_marker: PhantomData<T>,
|
||||
}
|
||||
|
||||
impl<T> Channel<T> {
|
||||
/// Creates a new unbounded channel.
|
||||
pub(crate) fn new() -> Self {
|
||||
Channel {
|
||||
head: CachePadded::new(Position {
|
||||
block: AtomicPtr::new(ptr::null_mut()),
|
||||
index: AtomicUsize::new(0),
|
||||
}),
|
||||
tail: CachePadded::new(Position {
|
||||
block: AtomicPtr::new(ptr::null_mut()),
|
||||
index: AtomicUsize::new(0),
|
||||
}),
|
||||
receivers: SyncWaker::new(),
|
||||
_marker: PhantomData,
|
||||
}
|
||||
}
|
||||
|
||||
/// Attempts to reserve a slot for sending a message.
|
||||
fn start_send(&self, token: &mut Token) -> bool {
|
||||
let backoff = Backoff::new();
|
||||
let mut tail = self.tail.index.load(Ordering::Acquire);
|
||||
let mut block = self.tail.block.load(Ordering::Acquire);
|
||||
let mut next_block = None;
|
||||
|
||||
loop {
|
||||
// Check if the channel is disconnected.
|
||||
if tail & MARK_BIT != 0 {
|
||||
token.list.block = ptr::null();
|
||||
return true;
|
||||
}
|
||||
|
||||
// Calculate the offset of the index into the block.
|
||||
let offset = (tail >> SHIFT) % LAP;
|
||||
|
||||
// If we reached the end of the block, wait until the next one is installed.
|
||||
if offset == BLOCK_CAP {
|
||||
backoff.spin_heavy();
|
||||
tail = self.tail.index.load(Ordering::Acquire);
|
||||
block = self.tail.block.load(Ordering::Acquire);
|
||||
continue;
|
||||
}
|
||||
|
||||
// If we're going to have to install the next block, allocate it in advance in order to
|
||||
// make the wait for other threads as short as possible.
|
||||
if offset + 1 == BLOCK_CAP && next_block.is_none() {
|
||||
next_block = Some(Block::<T>::new());
|
||||
}
|
||||
|
||||
// If this is the first message to be sent into the channel, we need to allocate the
|
||||
// first block and install it.
|
||||
if block.is_null() {
|
||||
let new = Box::into_raw(Block::<T>::new());
|
||||
|
||||
if self
|
||||
.tail
|
||||
.block
|
||||
.compare_exchange(block, new, Ordering::Release, Ordering::Relaxed)
|
||||
.is_ok()
|
||||
{
|
||||
// This yield point leaves the channel in a half-initialized state where the
|
||||
// tail.block pointer is set but the head.block is not. This is used to
|
||||
// facilitate the test in src/tools/miri/tests/pass/issues/issue-139553.rs
|
||||
#[cfg(miri)]
|
||||
crate::thread::yield_now();
|
||||
self.head.block.store(new, Ordering::Release);
|
||||
block = new;
|
||||
} else {
|
||||
next_block = unsafe { Some(Box::from_raw(new)) };
|
||||
tail = self.tail.index.load(Ordering::Acquire);
|
||||
block = self.tail.block.load(Ordering::Acquire);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
let new_tail = tail + (1 << SHIFT);
|
||||
|
||||
// Try advancing the tail forward.
|
||||
match self.tail.index.compare_exchange_weak(
|
||||
tail,
|
||||
new_tail,
|
||||
Ordering::SeqCst,
|
||||
Ordering::Acquire,
|
||||
) {
|
||||
Ok(_) => unsafe {
|
||||
// If we've reached the end of the block, install the next one.
|
||||
if offset + 1 == BLOCK_CAP {
|
||||
let next_block = Box::into_raw(next_block.unwrap());
|
||||
self.tail.block.store(next_block, Ordering::Release);
|
||||
self.tail.index.fetch_add(1 << SHIFT, Ordering::Release);
|
||||
(*block).next.store(next_block, Ordering::Release);
|
||||
}
|
||||
|
||||
token.list.block = block as *const u8;
|
||||
token.list.offset = offset;
|
||||
return true;
|
||||
},
|
||||
Err(_) => {
|
||||
backoff.spin_light();
|
||||
tail = self.tail.index.load(Ordering::Acquire);
|
||||
block = self.tail.block.load(Ordering::Acquire);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Writes a message into the channel.
|
||||
pub(crate) unsafe fn write(&self, token: &mut Token, msg: T) -> Result<(), T> {
|
||||
// If there is no slot, the channel is disconnected.
|
||||
if token.list.block.is_null() {
|
||||
return Err(msg);
|
||||
}
|
||||
|
||||
// Write the message into the slot.
|
||||
let block = token.list.block as *mut Block<T>;
|
||||
let offset = token.list.offset;
|
||||
unsafe {
|
||||
let slot = (*block).slots.get_unchecked(offset);
|
||||
slot.msg.get().write(MaybeUninit::new(msg));
|
||||
slot.state.fetch_or(WRITE, Ordering::Release);
|
||||
}
|
||||
|
||||
// Wake a sleeping receiver.
|
||||
self.receivers.notify();
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Attempts to reserve a slot for receiving a message.
|
||||
fn start_recv(&self, token: &mut Token) -> bool {
|
||||
let backoff = Backoff::new();
|
||||
let mut head = self.head.index.load(Ordering::Acquire);
|
||||
let mut block = self.head.block.load(Ordering::Acquire);
|
||||
|
||||
loop {
|
||||
// Calculate the offset of the index into the block.
|
||||
let offset = (head >> SHIFT) % LAP;
|
||||
|
||||
// If we reached the end of the block, wait until the next one is installed.
|
||||
if offset == BLOCK_CAP {
|
||||
backoff.spin_heavy();
|
||||
head = self.head.index.load(Ordering::Acquire);
|
||||
block = self.head.block.load(Ordering::Acquire);
|
||||
continue;
|
||||
}
|
||||
|
||||
let mut new_head = head + (1 << SHIFT);
|
||||
|
||||
if new_head & MARK_BIT == 0 {
|
||||
atomic::fence(Ordering::SeqCst);
|
||||
let tail = self.tail.index.load(Ordering::Relaxed);
|
||||
|
||||
// If the tail equals the head, that means the channel is empty.
|
||||
if head >> SHIFT == tail >> SHIFT {
|
||||
// If the channel is disconnected...
|
||||
if tail & MARK_BIT != 0 {
|
||||
// ...then receive an error.
|
||||
token.list.block = ptr::null();
|
||||
return true;
|
||||
} else {
|
||||
// Otherwise, the receive operation is not ready.
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// If head and tail are not in the same block, set `MARK_BIT` in head.
|
||||
if (head >> SHIFT) / LAP != (tail >> SHIFT) / LAP {
|
||||
new_head |= MARK_BIT;
|
||||
}
|
||||
}
|
||||
|
||||
// The block can be null here only if the first message is being sent into the channel.
|
||||
// In that case, just wait until it gets initialized.
|
||||
if block.is_null() {
|
||||
backoff.spin_heavy();
|
||||
head = self.head.index.load(Ordering::Acquire);
|
||||
block = self.head.block.load(Ordering::Acquire);
|
||||
continue;
|
||||
}
|
||||
|
||||
// Try moving the head index forward.
|
||||
match self.head.index.compare_exchange_weak(
|
||||
head,
|
||||
new_head,
|
||||
Ordering::SeqCst,
|
||||
Ordering::Acquire,
|
||||
) {
|
||||
Ok(_) => unsafe {
|
||||
// If we've reached the end of the block, move to the next one.
|
||||
if offset + 1 == BLOCK_CAP {
|
||||
let next = (*block).wait_next();
|
||||
let mut next_index = (new_head & !MARK_BIT).wrapping_add(1 << SHIFT);
|
||||
if !(*next).next.load(Ordering::Relaxed).is_null() {
|
||||
next_index |= MARK_BIT;
|
||||
}
|
||||
|
||||
self.head.block.store(next, Ordering::Release);
|
||||
self.head.index.store(next_index, Ordering::Release);
|
||||
}
|
||||
|
||||
token.list.block = block as *const u8;
|
||||
token.list.offset = offset;
|
||||
return true;
|
||||
},
|
||||
Err(_) => {
|
||||
backoff.spin_light();
|
||||
head = self.head.index.load(Ordering::Acquire);
|
||||
block = self.head.block.load(Ordering::Acquire);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Reads a message from the channel.
|
||||
pub(crate) unsafe fn read(&self, token: &mut Token) -> Result<T, ()> {
|
||||
if token.list.block.is_null() {
|
||||
// The channel is disconnected.
|
||||
return Err(());
|
||||
}
|
||||
|
||||
// Read the message.
|
||||
let block = token.list.block as *mut Block<T>;
|
||||
let offset = token.list.offset;
|
||||
unsafe {
|
||||
let slot = (*block).slots.get_unchecked(offset);
|
||||
slot.wait_write();
|
||||
let msg = slot.msg.get().read().assume_init();
|
||||
|
||||
// Destroy the block if we've reached the end, or if another thread wanted to destroy but
|
||||
// couldn't because we were busy reading from the slot.
|
||||
if offset + 1 == BLOCK_CAP {
|
||||
Block::destroy(block, 0);
|
||||
} else if slot.state.fetch_or(READ, Ordering::AcqRel) & DESTROY != 0 {
|
||||
Block::destroy(block, offset + 1);
|
||||
}
|
||||
|
||||
Ok(msg)
|
||||
}
|
||||
}
|
||||
|
||||
/// Attempts to send a message into the channel.
|
||||
pub(crate) fn try_send(&self, msg: T) -> Result<(), TrySendError<T>> {
|
||||
self.send(msg, None).map_err(|err| match err {
|
||||
SendTimeoutError::Disconnected(msg) => TrySendError::Disconnected(msg),
|
||||
SendTimeoutError::Timeout(_) => unreachable!(),
|
||||
})
|
||||
}
|
||||
|
||||
/// Sends a message into the channel.
|
||||
pub(crate) fn send(
|
||||
&self,
|
||||
msg: T,
|
||||
_deadline: Option<Instant>,
|
||||
) -> Result<(), SendTimeoutError<T>> {
|
||||
let token = &mut Token::default();
|
||||
assert!(self.start_send(token));
|
||||
unsafe { self.write(token, msg).map_err(SendTimeoutError::Disconnected) }
|
||||
}
|
||||
|
||||
/// Attempts to receive a message without blocking.
|
||||
pub(crate) fn try_recv(&self) -> Result<T, TryRecvError> {
|
||||
let token = &mut Token::default();
|
||||
|
||||
if self.start_recv(token) {
|
||||
unsafe { self.read(token).map_err(|_| TryRecvError::Disconnected) }
|
||||
} else {
|
||||
Err(TryRecvError::Empty)
|
||||
}
|
||||
}
|
||||
|
||||
/// Receives a message from the channel.
|
||||
pub(crate) fn recv(&self, deadline: Option<Instant>) -> Result<T, RecvTimeoutError> {
|
||||
let token = &mut Token::default();
|
||||
loop {
|
||||
if self.start_recv(token) {
|
||||
unsafe {
|
||||
return self.read(token).map_err(|_| RecvTimeoutError::Disconnected);
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(d) = deadline {
|
||||
if Instant::now() >= d {
|
||||
return Err(RecvTimeoutError::Timeout);
|
||||
}
|
||||
}
|
||||
|
||||
// Prepare for blocking until a sender wakes us up.
|
||||
Context::with(|cx| {
|
||||
let oper = Operation::hook(token);
|
||||
self.receivers.register(oper, cx);
|
||||
|
||||
// Has the channel become ready just now?
|
||||
if !self.is_empty() || self.is_disconnected() {
|
||||
let _ = cx.try_select(Selected::Aborted);
|
||||
}
|
||||
|
||||
// Block the current thread.
|
||||
// SAFETY: the context belongs to the current thread.
|
||||
let sel = unsafe { cx.wait_until(deadline) };
|
||||
|
||||
match sel {
|
||||
Selected::Waiting => unreachable!(),
|
||||
Selected::Aborted | Selected::Disconnected => {
|
||||
self.receivers.unregister(oper).unwrap();
|
||||
// If the channel was disconnected, we still have to check for remaining
|
||||
// messages.
|
||||
}
|
||||
Selected::Operation(_) => {}
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the current number of messages inside the channel.
|
||||
pub(crate) fn len(&self) -> usize {
|
||||
loop {
|
||||
// Load the tail index, then load the head index.
|
||||
let mut tail = self.tail.index.load(Ordering::SeqCst);
|
||||
let mut head = self.head.index.load(Ordering::SeqCst);
|
||||
|
||||
// If the tail index didn't change, we've got consistent indices to work with.
|
||||
if self.tail.index.load(Ordering::SeqCst) == tail {
|
||||
// Erase the lower bits.
|
||||
tail &= !((1 << SHIFT) - 1);
|
||||
head &= !((1 << SHIFT) - 1);
|
||||
|
||||
// Fix up indices if they fall onto block ends.
|
||||
if (tail >> SHIFT) & (LAP - 1) == LAP - 1 {
|
||||
tail = tail.wrapping_add(1 << SHIFT);
|
||||
}
|
||||
if (head >> SHIFT) & (LAP - 1) == LAP - 1 {
|
||||
head = head.wrapping_add(1 << SHIFT);
|
||||
}
|
||||
|
||||
// Rotate indices so that head falls into the first block.
|
||||
let lap = (head >> SHIFT) / LAP;
|
||||
tail = tail.wrapping_sub((lap * LAP) << SHIFT);
|
||||
head = head.wrapping_sub((lap * LAP) << SHIFT);
|
||||
|
||||
// Remove the lower bits.
|
||||
tail >>= SHIFT;
|
||||
head >>= SHIFT;
|
||||
|
||||
// Return the difference minus the number of blocks between tail and head.
|
||||
return tail - head - tail / LAP;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the capacity of the channel.
|
||||
pub(crate) fn capacity(&self) -> Option<usize> {
|
||||
None
|
||||
}
|
||||
|
||||
/// Disconnects senders and wakes up all blocked receivers.
|
||||
///
|
||||
/// Returns `true` if this call disconnected the channel.
|
||||
pub(crate) fn disconnect_senders(&self) -> bool {
|
||||
let tail = self.tail.index.fetch_or(MARK_BIT, Ordering::SeqCst);
|
||||
|
||||
if tail & MARK_BIT == 0 {
|
||||
self.receivers.disconnect();
|
||||
true
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
/// Disconnects receivers.
|
||||
///
|
||||
/// Returns `true` if this call disconnected the channel.
|
||||
pub(crate) fn disconnect_receivers(&self) -> bool {
|
||||
let tail = self.tail.index.fetch_or(MARK_BIT, Ordering::SeqCst);
|
||||
|
||||
if tail & MARK_BIT == 0 {
|
||||
// If receivers are dropped first, discard all messages to free
|
||||
// memory eagerly.
|
||||
self.discard_all_messages();
|
||||
true
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
/// Discards all messages.
|
||||
///
|
||||
/// This method should only be called when all receivers are dropped.
|
||||
fn discard_all_messages(&self) {
|
||||
let backoff = Backoff::new();
|
||||
let mut tail = self.tail.index.load(Ordering::Acquire);
|
||||
loop {
|
||||
let offset = (tail >> SHIFT) % LAP;
|
||||
if offset != BLOCK_CAP {
|
||||
break;
|
||||
}
|
||||
|
||||
// New updates to tail will be rejected by MARK_BIT and aborted unless it's
|
||||
// at boundary. We need to wait for the updates take affect otherwise there
|
||||
// can be memory leaks.
|
||||
backoff.spin_heavy();
|
||||
tail = self.tail.index.load(Ordering::Acquire);
|
||||
}
|
||||
|
||||
let mut head = self.head.index.load(Ordering::Acquire);
|
||||
// The channel may be uninitialized, so we have to swap to avoid overwriting any sender's attempts
|
||||
// to initialize the first block before noticing that the receivers disconnected. Late allocations
|
||||
// will be deallocated by the sender in Drop.
|
||||
let mut block = self.head.block.swap(ptr::null_mut(), Ordering::AcqRel);
|
||||
|
||||
// If we're going to be dropping messages we need to synchronize with initialization
|
||||
if head >> SHIFT != tail >> SHIFT {
|
||||
// The block can be null here only if a sender is in the process of initializing the
|
||||
// channel while another sender managed to send a message by inserting it into the
|
||||
// semi-initialized channel and advanced the tail.
|
||||
// In that case, just wait until it gets initialized.
|
||||
while block.is_null() {
|
||||
backoff.spin_heavy();
|
||||
block = self.head.block.swap(ptr::null_mut(), Ordering::AcqRel);
|
||||
}
|
||||
}
|
||||
// After this point `head.block` is not modified again and it will be deallocated if it's
|
||||
// non-null. The `Drop` code of the channel, which runs after this function, also attempts
|
||||
// to deallocate `head.block` if it's non-null. Therefore this function must maintain the
|
||||
// invariant that if a deallocation of head.block is attempted then it must also be set to
|
||||
// NULL. Failing to do so will lead to the Drop code attempting a double free. For this
|
||||
// reason both reads above do an atomic swap instead of a simple atomic load.
|
||||
|
||||
unsafe {
|
||||
// Drop all messages between head and tail and deallocate the heap-allocated blocks.
|
||||
while head >> SHIFT != tail >> SHIFT {
|
||||
let offset = (head >> SHIFT) % LAP;
|
||||
|
||||
if offset < BLOCK_CAP {
|
||||
// Drop the message in the slot.
|
||||
let slot = (*block).slots.get_unchecked(offset);
|
||||
slot.wait_write();
|
||||
let p = &mut *slot.msg.get();
|
||||
p.as_mut_ptr().drop_in_place();
|
||||
} else {
|
||||
(*block).wait_next();
|
||||
// Deallocate the block and move to the next one.
|
||||
let next = (*block).next.load(Ordering::Acquire);
|
||||
drop(Box::from_raw(block));
|
||||
block = next;
|
||||
}
|
||||
|
||||
head = head.wrapping_add(1 << SHIFT);
|
||||
}
|
||||
|
||||
// Deallocate the last remaining block.
|
||||
if !block.is_null() {
|
||||
drop(Box::from_raw(block));
|
||||
}
|
||||
}
|
||||
|
||||
head &= !MARK_BIT;
|
||||
self.head.index.store(head, Ordering::Release);
|
||||
}
|
||||
|
||||
/// Returns `true` if the channel is disconnected.
|
||||
pub(crate) fn is_disconnected(&self) -> bool {
|
||||
self.tail.index.load(Ordering::SeqCst) & MARK_BIT != 0
|
||||
}
|
||||
|
||||
/// Returns `true` if the channel is empty.
|
||||
pub(crate) fn is_empty(&self) -> bool {
|
||||
let head = self.head.index.load(Ordering::SeqCst);
|
||||
let tail = self.tail.index.load(Ordering::SeqCst);
|
||||
head >> SHIFT == tail >> SHIFT
|
||||
}
|
||||
|
||||
/// Returns `true` if the channel is full.
|
||||
pub(crate) fn is_full(&self) -> bool {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Drop for Channel<T> {
|
||||
fn drop(&mut self) {
|
||||
let mut head = self.head.index.load(Ordering::Relaxed);
|
||||
let mut tail = self.tail.index.load(Ordering::Relaxed);
|
||||
let mut block = self.head.block.load(Ordering::Relaxed);
|
||||
|
||||
// Erase the lower bits.
|
||||
head &= !((1 << SHIFT) - 1);
|
||||
tail &= !((1 << SHIFT) - 1);
|
||||
|
||||
unsafe {
|
||||
// Drop all messages between head and tail and deallocate the heap-allocated blocks.
|
||||
while head != tail {
|
||||
let offset = (head >> SHIFT) % LAP;
|
||||
|
||||
if offset < BLOCK_CAP {
|
||||
// Drop the message in the slot.
|
||||
let slot = (*block).slots.get_unchecked(offset);
|
||||
let p = &mut *slot.msg.get();
|
||||
p.as_mut_ptr().drop_in_place();
|
||||
} else {
|
||||
// Deallocate the block and move to the next one.
|
||||
let next = (*block).next.load(Ordering::Relaxed);
|
||||
drop(Box::from_raw(block));
|
||||
block = next;
|
||||
}
|
||||
|
||||
head = head.wrapping_add(1 << SHIFT);
|
||||
}
|
||||
|
||||
// Deallocate the last remaining block.
|
||||
if !block.is_null() {
|
||||
drop(Box::from_raw(block));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,71 +0,0 @@
|
||||
/// Temporary data that gets initialized during a blocking operation, and is consumed by
|
||||
/// `read` or `write`.
|
||||
///
|
||||
/// Each field contains data associated with a specific channel flavor.
|
||||
#[derive(Debug, Default)]
|
||||
pub struct Token {
|
||||
pub(crate) array: super::array::ArrayToken,
|
||||
pub(crate) list: super::list::ListToken,
|
||||
#[allow(dead_code)]
|
||||
pub(crate) zero: super::zero::ZeroToken,
|
||||
}
|
||||
|
||||
/// Identifier associated with an operation by a specific thread on a specific channel.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub struct Operation(usize);
|
||||
|
||||
impl Operation {
|
||||
/// Creates an operation identifier from a mutable reference.
|
||||
///
|
||||
/// This function essentially just turns the address of the reference into a number. The
|
||||
/// reference should point to a variable that is specific to the thread and the operation,
|
||||
/// and is alive for the entire duration of a blocking operation.
|
||||
#[inline]
|
||||
pub fn hook<T>(r: &mut T) -> Operation {
|
||||
let val = r as *mut T as usize;
|
||||
// Make sure that the pointer address doesn't equal the numerical representation of
|
||||
// `Selected::{Waiting, Aborted, Disconnected}`.
|
||||
assert!(val > 2);
|
||||
Operation(val)
|
||||
}
|
||||
}
|
||||
|
||||
/// Current state of a blocking operation.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum Selected {
|
||||
/// Still waiting for an operation.
|
||||
Waiting,
|
||||
|
||||
/// The attempt to block the current thread has been aborted.
|
||||
Aborted,
|
||||
|
||||
/// An operation became ready because a channel is disconnected.
|
||||
Disconnected,
|
||||
|
||||
/// An operation became ready because a message can be sent or received.
|
||||
Operation(Operation),
|
||||
}
|
||||
|
||||
impl From<usize> for Selected {
|
||||
#[inline]
|
||||
fn from(val: usize) -> Selected {
|
||||
match val {
|
||||
0 => Selected::Waiting,
|
||||
1 => Selected::Aborted,
|
||||
2 => Selected::Disconnected,
|
||||
oper => Selected::Operation(Operation(oper)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Into<usize> for Selected {
|
||||
#[inline]
|
||||
fn into(self) -> usize {
|
||||
match self {
|
||||
Selected::Waiting => 0,
|
||||
Selected::Aborted => 1,
|
||||
Selected::Disconnected => 2,
|
||||
Selected::Operation(Operation(val)) => val,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,14 +0,0 @@
|
||||
// Ensure that thread_local init with `const { 0 }` still has unique address at run-time
|
||||
#[test]
|
||||
fn waker_current_thread_id() {
|
||||
let first = super::waker::current_thread_id();
|
||||
let t = crate::thread::spawn(move || {
|
||||
let second = super::waker::current_thread_id();
|
||||
assert_ne!(first, second);
|
||||
assert_eq!(second, super::waker::current_thread_id());
|
||||
});
|
||||
|
||||
assert_eq!(first, super::waker::current_thread_id());
|
||||
t.join().unwrap();
|
||||
assert_eq!(first, super::waker::current_thread_id());
|
||||
}
|
||||
@@ -1,137 +0,0 @@
|
||||
use crate::cell::Cell;
|
||||
use crate::ops::{Deref, DerefMut};
|
||||
|
||||
/// Pads and aligns a value to the length of a cache line.
|
||||
#[derive(Clone, Copy, Default, Hash, PartialEq, Eq)]
|
||||
// Starting from Intel's Sandy Bridge, spatial prefetcher is now pulling pairs of 64-byte cache
|
||||
// lines at a time, so we have to align to 128 bytes rather than 64.
|
||||
//
|
||||
// Sources:
|
||||
// - https://www.intel.com/content/dam/www/public/us/en/documents/manuals/64-ia-32-architectures-optimization-manual.pdf
|
||||
// - https://github.com/facebook/folly/blob/1b5288e6eea6df074758f877c849b6e73bbb9fbb/folly/lang/Align.h#L107
|
||||
//
|
||||
// ARM's big.LITTLE architecture has asymmetric cores and "big" cores have 128-byte cache line size.
|
||||
//
|
||||
// Sources:
|
||||
// - https://www.mono-project.com/news/2016/09/12/arm64-icache/
|
||||
//
|
||||
// powerpc64 has 128-byte cache line size.
|
||||
//
|
||||
// Sources:
|
||||
// - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_ppc64x.go#L9
|
||||
#[cfg_attr(
|
||||
any(target_arch = "x86_64", target_arch = "aarch64", target_arch = "powerpc64",),
|
||||
repr(align(128))
|
||||
)]
|
||||
// arm, mips and mips64 have 32-byte cache line size.
|
||||
//
|
||||
// Sources:
|
||||
// - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_arm.go#L7
|
||||
// - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_mips.go#L7
|
||||
// - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_mipsle.go#L7
|
||||
// - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_mips64x.go#L9
|
||||
#[cfg_attr(
|
||||
any(
|
||||
target_arch = "arm",
|
||||
target_arch = "mips",
|
||||
target_arch = "mips32r6",
|
||||
target_arch = "mips64",
|
||||
target_arch = "mips64r6",
|
||||
),
|
||||
repr(align(32))
|
||||
)]
|
||||
// s390x has 256-byte cache line size.
|
||||
//
|
||||
// Sources:
|
||||
// - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_s390x.go#L7
|
||||
#[cfg_attr(target_arch = "s390x", repr(align(256)))]
|
||||
// x86, wasm and riscv have 64-byte cache line size.
|
||||
//
|
||||
// Sources:
|
||||
// - https://github.com/golang/go/blob/dda2991c2ea0c5914714469c4defc2562a907230/src/internal/cpu/cpu_x86.go#L9
|
||||
// - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_wasm.go#L7
|
||||
// - https://github.com/golang/go/blob/5e31f78c8a4ed1b872ddc194f0cd1ae931b37d7e/src/internal/cpu/cpu_riscv64.go#L7
|
||||
//
|
||||
// All others are assumed to have 64-byte cache line size.
|
||||
#[cfg_attr(
|
||||
not(any(
|
||||
target_arch = "x86_64",
|
||||
target_arch = "aarch64",
|
||||
target_arch = "powerpc64",
|
||||
target_arch = "arm",
|
||||
target_arch = "mips",
|
||||
target_arch = "mips32r6",
|
||||
target_arch = "mips64",
|
||||
target_arch = "mips64r6",
|
||||
target_arch = "s390x",
|
||||
)),
|
||||
repr(align(64))
|
||||
)]
|
||||
pub struct CachePadded<T> {
|
||||
value: T,
|
||||
}
|
||||
|
||||
impl<T> CachePadded<T> {
|
||||
/// Pads and aligns a value to the length of a cache line.
|
||||
pub fn new(value: T) -> CachePadded<T> {
|
||||
CachePadded::<T> { value }
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Deref for CachePadded<T> {
|
||||
type Target = T;
|
||||
|
||||
fn deref(&self) -> &T {
|
||||
&self.value
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> DerefMut for CachePadded<T> {
|
||||
fn deref_mut(&mut self) -> &mut T {
|
||||
&mut self.value
|
||||
}
|
||||
}
|
||||
|
||||
const SPIN_LIMIT: u32 = 6;
|
||||
|
||||
/// Performs quadratic backoff in spin loops.
|
||||
pub struct Backoff {
|
||||
step: Cell<u32>,
|
||||
}
|
||||
|
||||
impl Backoff {
|
||||
/// Creates a new `Backoff`.
|
||||
pub fn new() -> Self {
|
||||
Backoff { step: Cell::new(0) }
|
||||
}
|
||||
|
||||
/// Backs off using lightweight spinning.
|
||||
///
|
||||
/// This method should be used for retrying an operation because another thread made
|
||||
/// progress. i.e. on CAS failure.
|
||||
#[inline]
|
||||
pub fn spin_light(&self) {
|
||||
let step = self.step.get().min(SPIN_LIMIT);
|
||||
for _ in 0..step.pow(2) {
|
||||
crate::hint::spin_loop();
|
||||
}
|
||||
|
||||
self.step.set(self.step.get() + 1);
|
||||
}
|
||||
|
||||
/// Backs off using heavyweight spinning.
|
||||
///
|
||||
/// This method should be used in blocking loops where parking the thread is not an option.
|
||||
#[inline]
|
||||
pub fn spin_heavy(&self) {
|
||||
if self.step.get() <= SPIN_LIMIT {
|
||||
for _ in 0..self.step.get().pow(2) {
|
||||
crate::hint::spin_loop()
|
||||
}
|
||||
} else {
|
||||
crate::thread::yield_now();
|
||||
}
|
||||
|
||||
self.step.set(self.step.get() + 1);
|
||||
}
|
||||
}
|
||||
@@ -1,209 +0,0 @@
|
||||
//! Waking mechanism for threads blocked on channel operations.
|
||||
|
||||
use super::context::Context;
|
||||
use super::select::{Operation, Selected};
|
||||
use crate::ptr;
|
||||
use crate::sync::Mutex;
|
||||
use crate::sync::atomic::{Atomic, AtomicBool, Ordering};
|
||||
|
||||
/// Represents a thread blocked on a specific channel operation.
|
||||
pub(crate) struct Entry {
|
||||
/// The operation.
|
||||
pub(crate) oper: Operation,
|
||||
|
||||
/// Optional packet.
|
||||
pub(crate) packet: *mut (),
|
||||
|
||||
/// Context associated with the thread owning this operation.
|
||||
pub(crate) cx: Context,
|
||||
}
|
||||
|
||||
/// A queue of threads blocked on channel operations.
|
||||
///
|
||||
/// This data structure is used by threads to register blocking operations and get woken up once
|
||||
/// an operation becomes ready.
|
||||
pub(crate) struct Waker {
|
||||
/// A list of select operations.
|
||||
selectors: Vec<Entry>,
|
||||
|
||||
/// A list of operations waiting to be ready.
|
||||
observers: Vec<Entry>,
|
||||
}
|
||||
|
||||
impl Waker {
|
||||
/// Creates a new `Waker`.
|
||||
#[inline]
|
||||
pub(crate) fn new() -> Self {
|
||||
Waker { selectors: Vec::new(), observers: Vec::new() }
|
||||
}
|
||||
|
||||
/// Registers a select operation.
|
||||
#[inline]
|
||||
pub(crate) fn register(&mut self, oper: Operation, cx: &Context) {
|
||||
self.register_with_packet(oper, ptr::null_mut(), cx);
|
||||
}
|
||||
|
||||
/// Registers a select operation and a packet.
|
||||
#[inline]
|
||||
pub(crate) fn register_with_packet(&mut self, oper: Operation, packet: *mut (), cx: &Context) {
|
||||
self.selectors.push(Entry { oper, packet, cx: cx.clone() });
|
||||
}
|
||||
|
||||
/// Unregisters a select operation.
|
||||
#[inline]
|
||||
pub(crate) fn unregister(&mut self, oper: Operation) -> Option<Entry> {
|
||||
if let Some((i, _)) =
|
||||
self.selectors.iter().enumerate().find(|&(_, entry)| entry.oper == oper)
|
||||
{
|
||||
let entry = self.selectors.remove(i);
|
||||
Some(entry)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// Attempts to find another thread's entry, select the operation, and wake it up.
|
||||
#[inline]
|
||||
pub(crate) fn try_select(&mut self) -> Option<Entry> {
|
||||
if self.selectors.is_empty() {
|
||||
None
|
||||
} else {
|
||||
let thread_id = current_thread_id();
|
||||
|
||||
self.selectors
|
||||
.iter()
|
||||
.position(|selector| {
|
||||
// Does the entry belong to a different thread?
|
||||
selector.cx.thread_id() != thread_id
|
||||
&& selector // Try selecting this operation.
|
||||
.cx
|
||||
.try_select(Selected::Operation(selector.oper))
|
||||
.is_ok()
|
||||
&& {
|
||||
// Provide the packet.
|
||||
selector.cx.store_packet(selector.packet);
|
||||
// Wake the thread up.
|
||||
selector.cx.unpark();
|
||||
true
|
||||
}
|
||||
})
|
||||
// Remove the entry from the queue to keep it clean and improve
|
||||
// performance.
|
||||
.map(|pos| self.selectors.remove(pos))
|
||||
}
|
||||
}
|
||||
|
||||
/// Notifies all operations waiting to be ready.
|
||||
#[inline]
|
||||
pub(crate) fn notify(&mut self) {
|
||||
for entry in self.observers.drain(..) {
|
||||
if entry.cx.try_select(Selected::Operation(entry.oper)).is_ok() {
|
||||
entry.cx.unpark();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Notifies all registered operations that the channel is disconnected.
|
||||
#[inline]
|
||||
pub(crate) fn disconnect(&mut self) {
|
||||
for entry in self.selectors.iter() {
|
||||
if entry.cx.try_select(Selected::Disconnected).is_ok() {
|
||||
// Wake the thread up.
|
||||
//
|
||||
// Here we don't remove the entry from the queue. Registered threads must
|
||||
// unregister from the waker by themselves. They might also want to recover the
|
||||
// packet value and destroy it, if necessary.
|
||||
entry.cx.unpark();
|
||||
}
|
||||
}
|
||||
|
||||
self.notify();
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for Waker {
|
||||
#[inline]
|
||||
fn drop(&mut self) {
|
||||
debug_assert_eq!(self.selectors.len(), 0);
|
||||
debug_assert_eq!(self.observers.len(), 0);
|
||||
}
|
||||
}
|
||||
|
||||
/// A waker that can be shared among threads without locking.
|
||||
///
|
||||
/// This is a simple wrapper around `Waker` that internally uses a mutex for synchronization.
|
||||
pub(crate) struct SyncWaker {
|
||||
/// The inner `Waker`.
|
||||
inner: Mutex<Waker>,
|
||||
|
||||
/// `true` if the waker is empty.
|
||||
is_empty: Atomic<bool>,
|
||||
}
|
||||
|
||||
impl SyncWaker {
|
||||
/// Creates a new `SyncWaker`.
|
||||
#[inline]
|
||||
pub(crate) fn new() -> Self {
|
||||
SyncWaker { inner: Mutex::new(Waker::new()), is_empty: AtomicBool::new(true) }
|
||||
}
|
||||
|
||||
/// Registers the current thread with an operation.
|
||||
#[inline]
|
||||
pub(crate) fn register(&self, oper: Operation, cx: &Context) {
|
||||
let mut inner = self.inner.lock().unwrap();
|
||||
inner.register(oper, cx);
|
||||
self.is_empty
|
||||
.store(inner.selectors.is_empty() && inner.observers.is_empty(), Ordering::SeqCst);
|
||||
}
|
||||
|
||||
/// Unregisters an operation previously registered by the current thread.
|
||||
#[inline]
|
||||
pub(crate) fn unregister(&self, oper: Operation) -> Option<Entry> {
|
||||
let mut inner = self.inner.lock().unwrap();
|
||||
let entry = inner.unregister(oper);
|
||||
self.is_empty
|
||||
.store(inner.selectors.is_empty() && inner.observers.is_empty(), Ordering::SeqCst);
|
||||
entry
|
||||
}
|
||||
|
||||
/// Attempts to find one thread (not the current one), select its operation, and wake it up.
|
||||
#[inline]
|
||||
pub(crate) fn notify(&self) {
|
||||
if !self.is_empty.load(Ordering::SeqCst) {
|
||||
let mut inner = self.inner.lock().unwrap();
|
||||
if !self.is_empty.load(Ordering::SeqCst) {
|
||||
inner.try_select();
|
||||
inner.notify();
|
||||
self.is_empty.store(
|
||||
inner.selectors.is_empty() && inner.observers.is_empty(),
|
||||
Ordering::SeqCst,
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Notifies all threads that the channel is disconnected.
|
||||
#[inline]
|
||||
pub(crate) fn disconnect(&self) {
|
||||
let mut inner = self.inner.lock().unwrap();
|
||||
inner.disconnect();
|
||||
self.is_empty
|
||||
.store(inner.selectors.is_empty() && inner.observers.is_empty(), Ordering::SeqCst);
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for SyncWaker {
|
||||
#[inline]
|
||||
fn drop(&mut self) {
|
||||
debug_assert!(self.is_empty.load(Ordering::SeqCst));
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns a unique id for the current thread.
|
||||
#[inline]
|
||||
pub fn current_thread_id() -> usize {
|
||||
// `u8` is not drop so this variable will be available during thread destruction,
|
||||
// whereas `thread::current()` would not be
|
||||
thread_local! { static DUMMY: u8 = const { 0 } }
|
||||
DUMMY.with(|x| (x as *const u8).addr())
|
||||
}
|
||||
@@ -1,319 +0,0 @@
|
||||
//! Zero-capacity channel.
|
||||
//!
|
||||
//! This kind of channel is also known as *rendezvous* channel.
|
||||
|
||||
use super::context::Context;
|
||||
use super::error::*;
|
||||
use super::select::{Operation, Selected, Token};
|
||||
use super::utils::Backoff;
|
||||
use super::waker::Waker;
|
||||
use crate::cell::UnsafeCell;
|
||||
use crate::marker::PhantomData;
|
||||
use crate::sync::Mutex;
|
||||
use crate::sync::atomic::{Atomic, AtomicBool, Ordering};
|
||||
use crate::time::Instant;
|
||||
use crate::{fmt, ptr};
|
||||
|
||||
/// A pointer to a packet.
|
||||
pub(crate) struct ZeroToken(*mut ());
|
||||
|
||||
impl Default for ZeroToken {
|
||||
fn default() -> Self {
|
||||
Self(ptr::null_mut())
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for ZeroToken {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
fmt::Debug::fmt(&(self.0 as usize), f)
|
||||
}
|
||||
}
|
||||
|
||||
/// A slot for passing one message from a sender to a receiver.
|
||||
struct Packet<T> {
|
||||
/// Equals `true` if the packet is allocated on the stack.
|
||||
on_stack: bool,
|
||||
|
||||
/// Equals `true` once the packet is ready for reading or writing.
|
||||
ready: Atomic<bool>,
|
||||
|
||||
/// The message.
|
||||
msg: UnsafeCell<Option<T>>,
|
||||
}
|
||||
|
||||
impl<T> Packet<T> {
|
||||
/// Creates an empty packet on the stack.
|
||||
fn empty_on_stack() -> Packet<T> {
|
||||
Packet { on_stack: true, ready: AtomicBool::new(false), msg: UnsafeCell::new(None) }
|
||||
}
|
||||
|
||||
/// Creates a packet on the stack, containing a message.
|
||||
fn message_on_stack(msg: T) -> Packet<T> {
|
||||
Packet { on_stack: true, ready: AtomicBool::new(false), msg: UnsafeCell::new(Some(msg)) }
|
||||
}
|
||||
|
||||
/// Waits until the packet becomes ready for reading or writing.
|
||||
fn wait_ready(&self) {
|
||||
let backoff = Backoff::new();
|
||||
while !self.ready.load(Ordering::Acquire) {
|
||||
backoff.spin_heavy();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Inner representation of a zero-capacity channel.
|
||||
struct Inner {
|
||||
/// Senders waiting to pair up with a receive operation.
|
||||
senders: Waker,
|
||||
|
||||
/// Receivers waiting to pair up with a send operation.
|
||||
receivers: Waker,
|
||||
|
||||
/// Equals `true` when the channel is disconnected.
|
||||
is_disconnected: bool,
|
||||
}
|
||||
|
||||
/// Zero-capacity channel.
|
||||
pub(crate) struct Channel<T> {
|
||||
/// Inner representation of the channel.
|
||||
inner: Mutex<Inner>,
|
||||
|
||||
/// Indicates that dropping a `Channel<T>` may drop values of type `T`.
|
||||
_marker: PhantomData<T>,
|
||||
}
|
||||
|
||||
impl<T> Channel<T> {
|
||||
/// Constructs a new zero-capacity channel.
|
||||
pub(crate) fn new() -> Self {
|
||||
Channel {
|
||||
inner: Mutex::new(Inner {
|
||||
senders: Waker::new(),
|
||||
receivers: Waker::new(),
|
||||
is_disconnected: false,
|
||||
}),
|
||||
_marker: PhantomData,
|
||||
}
|
||||
}
|
||||
|
||||
/// Writes a message into the packet.
|
||||
pub(crate) unsafe fn write(&self, token: &mut Token, msg: T) -> Result<(), T> {
|
||||
// If there is no packet, the channel is disconnected.
|
||||
if token.zero.0.is_null() {
|
||||
return Err(msg);
|
||||
}
|
||||
|
||||
unsafe {
|
||||
let packet = &*(token.zero.0 as *const Packet<T>);
|
||||
packet.msg.get().write(Some(msg));
|
||||
packet.ready.store(true, Ordering::Release);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Reads a message from the packet.
|
||||
pub(crate) unsafe fn read(&self, token: &mut Token) -> Result<T, ()> {
|
||||
// If there is no packet, the channel is disconnected.
|
||||
if token.zero.0.is_null() {
|
||||
return Err(());
|
||||
}
|
||||
|
||||
let packet = unsafe { &*(token.zero.0 as *const Packet<T>) };
|
||||
|
||||
if packet.on_stack {
|
||||
// The message has been in the packet from the beginning, so there is no need to wait
|
||||
// for it. However, after reading the message, we need to set `ready` to `true` in
|
||||
// order to signal that the packet can be destroyed.
|
||||
let msg = unsafe { packet.msg.get().replace(None) }.unwrap();
|
||||
packet.ready.store(true, Ordering::Release);
|
||||
Ok(msg)
|
||||
} else {
|
||||
// Wait until the message becomes available, then read it and destroy the
|
||||
// heap-allocated packet.
|
||||
packet.wait_ready();
|
||||
unsafe {
|
||||
let msg = packet.msg.get().replace(None).unwrap();
|
||||
drop(Box::from_raw(token.zero.0 as *mut Packet<T>));
|
||||
Ok(msg)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Attempts to send a message into the channel.
|
||||
pub(crate) fn try_send(&self, msg: T) -> Result<(), TrySendError<T>> {
|
||||
let token = &mut Token::default();
|
||||
let mut inner = self.inner.lock().unwrap();
|
||||
|
||||
// If there's a waiting receiver, pair up with it.
|
||||
if let Some(operation) = inner.receivers.try_select() {
|
||||
token.zero.0 = operation.packet;
|
||||
drop(inner);
|
||||
unsafe {
|
||||
self.write(token, msg).ok().unwrap();
|
||||
}
|
||||
Ok(())
|
||||
} else if inner.is_disconnected {
|
||||
Err(TrySendError::Disconnected(msg))
|
||||
} else {
|
||||
Err(TrySendError::Full(msg))
|
||||
}
|
||||
}
|
||||
|
||||
/// Sends a message into the channel.
|
||||
pub(crate) fn send(
|
||||
&self,
|
||||
msg: T,
|
||||
deadline: Option<Instant>,
|
||||
) -> Result<(), SendTimeoutError<T>> {
|
||||
let token = &mut Token::default();
|
||||
let mut inner = self.inner.lock().unwrap();
|
||||
|
||||
// If there's a waiting receiver, pair up with it.
|
||||
if let Some(operation) = inner.receivers.try_select() {
|
||||
token.zero.0 = operation.packet;
|
||||
drop(inner);
|
||||
unsafe {
|
||||
self.write(token, msg).ok().unwrap();
|
||||
}
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
if inner.is_disconnected {
|
||||
return Err(SendTimeoutError::Disconnected(msg));
|
||||
}
|
||||
|
||||
Context::with(|cx| {
|
||||
// Prepare for blocking until a receiver wakes us up.
|
||||
let oper = Operation::hook(token);
|
||||
let mut packet = Packet::<T>::message_on_stack(msg);
|
||||
inner.senders.register_with_packet(oper, (&raw mut packet) as *mut (), cx);
|
||||
inner.receivers.notify();
|
||||
drop(inner);
|
||||
|
||||
// Block the current thread.
|
||||
// SAFETY: the context belongs to the current thread.
|
||||
let sel = unsafe { cx.wait_until(deadline) };
|
||||
|
||||
match sel {
|
||||
Selected::Waiting => unreachable!(),
|
||||
Selected::Aborted => {
|
||||
self.inner.lock().unwrap().senders.unregister(oper).unwrap();
|
||||
let msg = unsafe { packet.msg.get().replace(None).unwrap() };
|
||||
Err(SendTimeoutError::Timeout(msg))
|
||||
}
|
||||
Selected::Disconnected => {
|
||||
self.inner.lock().unwrap().senders.unregister(oper).unwrap();
|
||||
let msg = unsafe { packet.msg.get().replace(None).unwrap() };
|
||||
Err(SendTimeoutError::Disconnected(msg))
|
||||
}
|
||||
Selected::Operation(_) => {
|
||||
// Wait until the message is read, then drop the packet.
|
||||
packet.wait_ready();
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
/// Attempts to receive a message without blocking.
|
||||
pub(crate) fn try_recv(&self) -> Result<T, TryRecvError> {
|
||||
let token = &mut Token::default();
|
||||
let mut inner = self.inner.lock().unwrap();
|
||||
|
||||
// If there's a waiting sender, pair up with it.
|
||||
if let Some(operation) = inner.senders.try_select() {
|
||||
token.zero.0 = operation.packet;
|
||||
drop(inner);
|
||||
unsafe { self.read(token).map_err(|_| TryRecvError::Disconnected) }
|
||||
} else if inner.is_disconnected {
|
||||
Err(TryRecvError::Disconnected)
|
||||
} else {
|
||||
Err(TryRecvError::Empty)
|
||||
}
|
||||
}
|
||||
|
||||
/// Receives a message from the channel.
|
||||
pub(crate) fn recv(&self, deadline: Option<Instant>) -> Result<T, RecvTimeoutError> {
|
||||
let token = &mut Token::default();
|
||||
let mut inner = self.inner.lock().unwrap();
|
||||
|
||||
// If there's a waiting sender, pair up with it.
|
||||
if let Some(operation) = inner.senders.try_select() {
|
||||
token.zero.0 = operation.packet;
|
||||
drop(inner);
|
||||
unsafe {
|
||||
return self.read(token).map_err(|_| RecvTimeoutError::Disconnected);
|
||||
}
|
||||
}
|
||||
|
||||
if inner.is_disconnected {
|
||||
return Err(RecvTimeoutError::Disconnected);
|
||||
}
|
||||
|
||||
Context::with(|cx| {
|
||||
// Prepare for blocking until a sender wakes us up.
|
||||
let oper = Operation::hook(token);
|
||||
let mut packet = Packet::<T>::empty_on_stack();
|
||||
inner.receivers.register_with_packet(oper, (&raw mut packet) as *mut (), cx);
|
||||
inner.senders.notify();
|
||||
drop(inner);
|
||||
|
||||
// Block the current thread.
|
||||
// SAFETY: the context belongs to the current thread.
|
||||
let sel = unsafe { cx.wait_until(deadline) };
|
||||
|
||||
match sel {
|
||||
Selected::Waiting => unreachable!(),
|
||||
Selected::Aborted => {
|
||||
self.inner.lock().unwrap().receivers.unregister(oper).unwrap();
|
||||
Err(RecvTimeoutError::Timeout)
|
||||
}
|
||||
Selected::Disconnected => {
|
||||
self.inner.lock().unwrap().receivers.unregister(oper).unwrap();
|
||||
Err(RecvTimeoutError::Disconnected)
|
||||
}
|
||||
Selected::Operation(_) => {
|
||||
// Wait until the message is provided, then read it.
|
||||
packet.wait_ready();
|
||||
unsafe { Ok(packet.msg.get().replace(None).unwrap()) }
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
/// Disconnects the channel and wakes up all blocked senders and receivers.
|
||||
///
|
||||
/// Returns `true` if this call disconnected the channel.
|
||||
pub(crate) fn disconnect(&self) -> bool {
|
||||
let mut inner = self.inner.lock().unwrap();
|
||||
|
||||
if !inner.is_disconnected {
|
||||
inner.is_disconnected = true;
|
||||
inner.senders.disconnect();
|
||||
inner.receivers.disconnect();
|
||||
true
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the current number of messages inside the channel.
|
||||
pub(crate) fn len(&self) -> usize {
|
||||
0
|
||||
}
|
||||
|
||||
/// Returns the capacity of the channel.
|
||||
#[allow(clippy::unnecessary_wraps)] // This is intentional.
|
||||
pub(crate) fn capacity(&self) -> Option<usize> {
|
||||
Some(0)
|
||||
}
|
||||
|
||||
/// Returns `true` if the channel is empty.
|
||||
pub(crate) fn is_empty(&self) -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
/// Returns `true` if the channel is full.
|
||||
pub(crate) fn is_full(&self) -> bool {
|
||||
true
|
||||
}
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,45 +0,0 @@
|
||||
//! Non-poisoning synchronous locks.
|
||||
//!
|
||||
//! The difference from the locks in the [`poison`] module is that the locks in this module will not
|
||||
//! become poisoned when a thread panics while holding a guard.
|
||||
//!
|
||||
//! [`poison`]: super::poison
|
||||
|
||||
use crate::fmt;
|
||||
|
||||
/// A type alias for the result of a nonblocking locking method.
|
||||
#[unstable(feature = "sync_nonpoison", issue = "134645")]
|
||||
pub type TryLockResult<Guard> = Result<Guard, WouldBlock>;
|
||||
|
||||
/// A lock could not be acquired at this time because the operation would otherwise block.
|
||||
#[unstable(feature = "sync_nonpoison", issue = "134645")]
|
||||
pub struct WouldBlock;
|
||||
|
||||
#[unstable(feature = "sync_nonpoison", issue = "134645")]
|
||||
impl fmt::Debug for WouldBlock {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
"WouldBlock".fmt(f)
|
||||
}
|
||||
}
|
||||
|
||||
#[unstable(feature = "sync_nonpoison", issue = "134645")]
|
||||
impl fmt::Display for WouldBlock {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
"try_lock failed because the operation would block".fmt(f)
|
||||
}
|
||||
}
|
||||
|
||||
#[unstable(feature = "nonpoison_condvar", issue = "134645")]
|
||||
pub use self::condvar::Condvar;
|
||||
#[unstable(feature = "mapped_lock_guards", issue = "117108")]
|
||||
pub use self::mutex::MappedMutexGuard;
|
||||
#[unstable(feature = "nonpoison_mutex", issue = "134645")]
|
||||
pub use self::mutex::{Mutex, MutexGuard};
|
||||
#[unstable(feature = "mapped_lock_guards", issue = "117108")]
|
||||
pub use self::rwlock::{MappedRwLockReadGuard, MappedRwLockWriteGuard};
|
||||
#[unstable(feature = "nonpoison_rwlock", issue = "134645")]
|
||||
pub use self::rwlock::{RwLock, RwLockReadGuard, RwLockWriteGuard};
|
||||
|
||||
mod condvar;
|
||||
mod mutex;
|
||||
mod rwlock;
|
||||
@@ -1,444 +0,0 @@
|
||||
use crate::fmt;
|
||||
use crate::ops::DerefMut;
|
||||
use crate::sync::WaitTimeoutResult;
|
||||
use crate::sync::nonpoison::{MutexGuard, mutex};
|
||||
use crate::sys::sync as sys;
|
||||
use crate::time::{Duration, Instant};
|
||||
|
||||
/// A Condition Variable
|
||||
///
|
||||
/// For more information about condition variables, check out the documentation for the poisoning
|
||||
/// variant of this type at [`poison::Condvar`].
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// Note that this `Condvar` does **not** propagate information about threads that panic while
|
||||
/// holding a lock. If you need this functionality, see [`poison::Mutex`] and [`poison::Condvar`].
|
||||
///
|
||||
/// ```
|
||||
/// #![feature(nonpoison_mutex)]
|
||||
/// #![feature(nonpoison_condvar)]
|
||||
///
|
||||
/// use std::sync::nonpoison::{Mutex, Condvar};
|
||||
/// use std::sync::Arc;
|
||||
/// use std::thread;
|
||||
///
|
||||
/// let pair = Arc::new((Mutex::new(false), Condvar::new()));
|
||||
/// let pair2 = Arc::clone(&pair);
|
||||
///
|
||||
/// // Inside of our lock, spawn a new thread, and then wait for it to start.
|
||||
/// thread::spawn(move || {
|
||||
/// let (lock, cvar) = &*pair2;
|
||||
/// let mut started = lock.lock();
|
||||
/// *started = true;
|
||||
/// // We notify the condvar that the value has changed.
|
||||
/// cvar.notify_one();
|
||||
/// });
|
||||
///
|
||||
/// // Wait for the thread to start up.
|
||||
/// let (lock, cvar) = &*pair;
|
||||
/// let mut started = lock.lock();
|
||||
/// while !*started {
|
||||
/// cvar.wait(&mut started);
|
||||
/// }
|
||||
/// ```
|
||||
///
|
||||
/// [`poison::Mutex`]: crate::sync::poison::Mutex
|
||||
/// [`poison::Condvar`]: crate::sync::poison::Condvar
|
||||
#[unstable(feature = "nonpoison_condvar", issue = "134645")]
|
||||
pub struct Condvar {
|
||||
inner: sys::Condvar,
|
||||
}
|
||||
|
||||
impl Condvar {
|
||||
/// Creates a new condition variable which is ready to be waited on and
|
||||
/// notified.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use std::sync::Condvar;
|
||||
///
|
||||
/// let condvar = Condvar::new();
|
||||
/// ```
|
||||
#[unstable(feature = "nonpoison_condvar", issue = "134645")]
|
||||
#[must_use]
|
||||
#[inline]
|
||||
pub const fn new() -> Condvar {
|
||||
Condvar { inner: sys::Condvar::new() }
|
||||
}
|
||||
|
||||
/// Blocks the current thread until this condition variable receives a
|
||||
/// notification.
|
||||
///
|
||||
/// This function will atomically unlock the mutex specified (represented by
|
||||
/// `guard`) and block the current thread. This means that any calls
|
||||
/// to [`notify_one`] or [`notify_all`] which happen logically after the
|
||||
/// mutex is unlocked are candidates to wake this thread up. When this
|
||||
/// function call returns, the lock specified will have been re-acquired.
|
||||
///
|
||||
/// Note that this function is susceptible to spurious wakeups. Condition
|
||||
/// variables normally have a boolean predicate associated with them, and
|
||||
/// the predicate must always be checked each time this function returns to
|
||||
/// protect against spurious wakeups.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This function may [`panic!`] if it is used with more than one mutex
|
||||
/// over time.
|
||||
///
|
||||
/// [`notify_one`]: Self::notify_one
|
||||
/// [`notify_all`]: Self::notify_all
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// #![feature(nonpoison_mutex)]
|
||||
/// #![feature(nonpoison_condvar)]
|
||||
///
|
||||
/// use std::sync::nonpoison::{Mutex, Condvar};
|
||||
/// use std::sync::Arc;
|
||||
/// use std::thread;
|
||||
///
|
||||
/// let pair = Arc::new((Mutex::new(false), Condvar::new()));
|
||||
/// let pair2 = Arc::clone(&pair);
|
||||
///
|
||||
/// thread::spawn(move || {
|
||||
/// let (lock, cvar) = &*pair2;
|
||||
/// let mut started = lock.lock();
|
||||
/// *started = true;
|
||||
/// // We notify the condvar that the value has changed.
|
||||
/// cvar.notify_one();
|
||||
/// });
|
||||
///
|
||||
/// // Wait for the thread to start up.
|
||||
/// let (lock, cvar) = &*pair;
|
||||
/// let mut started = lock.lock();
|
||||
/// // As long as the value inside the `Mutex<bool>` is `false`, we wait.
|
||||
/// while !*started {
|
||||
/// cvar.wait(&mut started);
|
||||
/// }
|
||||
/// ```
|
||||
#[unstable(feature = "nonpoison_condvar", issue = "134645")]
|
||||
pub fn wait<T>(&self, guard: &mut MutexGuard<'_, T>) {
|
||||
unsafe {
|
||||
let lock = mutex::guard_lock(guard);
|
||||
self.inner.wait(lock);
|
||||
}
|
||||
}
|
||||
|
||||
/// Blocks the current thread until the provided condition becomes false.
|
||||
///
|
||||
/// `condition` is checked immediately; if not met (returns `true`), this
|
||||
/// will [`wait`] for the next notification then check again. This repeats
|
||||
/// until `condition` returns `false`, in which case this function returns.
|
||||
///
|
||||
/// This function will atomically unlock the mutex specified (represented by
|
||||
/// `guard`) and block the current thread. This means that any calls
|
||||
/// to [`notify_one`] or [`notify_all`] which happen logically after the
|
||||
/// mutex is unlocked are candidates to wake this thread up. When this
|
||||
/// function call returns, the lock specified will have been re-acquired.
|
||||
///
|
||||
/// [`wait`]: Self::wait
|
||||
/// [`notify_one`]: Self::notify_one
|
||||
/// [`notify_all`]: Self::notify_all
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// #![feature(nonpoison_mutex)]
|
||||
/// #![feature(nonpoison_condvar)]
|
||||
///
|
||||
/// use std::sync::nonpoison::{Mutex, Condvar};
|
||||
/// use std::sync::Arc;
|
||||
/// use std::thread;
|
||||
///
|
||||
/// let pair = Arc::new((Mutex::new(true), Condvar::new()));
|
||||
/// let pair2 = Arc::clone(&pair);
|
||||
///
|
||||
/// thread::spawn(move || {
|
||||
/// let (lock, cvar) = &*pair2;
|
||||
/// let mut pending = lock.lock();
|
||||
/// *pending = false;
|
||||
/// // We notify the condvar that the value has changed.
|
||||
/// cvar.notify_one();
|
||||
/// });
|
||||
///
|
||||
/// // Wait for the thread to start up.
|
||||
/// let (lock, cvar) = &*pair;
|
||||
/// // As long as the value inside the `Mutex<bool>` is `true`, we wait.
|
||||
/// let mut guard = lock.lock();
|
||||
/// cvar.wait_while(&mut guard, |pending| { *pending });
|
||||
/// ```
|
||||
#[unstable(feature = "nonpoison_condvar", issue = "134645")]
|
||||
pub fn wait_while<T, F>(&self, guard: &mut MutexGuard<'_, T>, mut condition: F)
|
||||
where
|
||||
F: FnMut(&mut T) -> bool,
|
||||
{
|
||||
while condition(guard.deref_mut()) {
|
||||
self.wait(guard);
|
||||
}
|
||||
}
|
||||
|
||||
/// Waits on this condition variable for a notification, timing out after a
|
||||
/// specified duration.
|
||||
///
|
||||
/// The semantics of this function are equivalent to [`wait`] except that
|
||||
/// the thread will be blocked for roughly no longer than `dur`. This
|
||||
/// method should not be used for precise timing due to anomalies such as
|
||||
/// preemption or platform differences that might not cause the maximum
|
||||
/// amount of time waited to be precisely `dur`.
|
||||
///
|
||||
/// Note that the best effort is made to ensure that the time waited is
|
||||
/// measured with a monotonic clock, and not affected by the changes made to
|
||||
/// the system time. This function is susceptible to spurious wakeups.
|
||||
/// Condition variables normally have a boolean predicate associated with
|
||||
/// them, and the predicate must always be checked each time this function
|
||||
/// returns to protect against spurious wakeups. Furthermore, since the timeout
|
||||
/// is given relative to the moment this function is called, it needs to be adjusted
|
||||
/// when this function is called in a loop. The [`wait_timeout_while`] method
|
||||
/// lets you wait with a timeout while a predicate is true, taking care of all these concerns.
|
||||
///
|
||||
/// The returned [`WaitTimeoutResult`] value indicates if the timeout is
|
||||
/// known to have elapsed.
|
||||
///
|
||||
/// Like [`wait`], the lock specified will have been re-acquired when this function
|
||||
/// returns, regardless of whether the timeout elapsed or not.
|
||||
///
|
||||
/// [`wait`]: Self::wait
|
||||
/// [`wait_timeout_while`]: Self::wait_timeout_while
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// #![feature(nonpoison_mutex)]
|
||||
/// #![feature(nonpoison_condvar)]
|
||||
///
|
||||
/// use std::sync::nonpoison::{Mutex, Condvar};
|
||||
/// use std::sync::Arc;
|
||||
/// use std::thread;
|
||||
/// use std::time::Duration;
|
||||
///
|
||||
/// let pair = Arc::new((Mutex::new(false), Condvar::new()));
|
||||
/// let pair2 = Arc::clone(&pair);
|
||||
///
|
||||
/// thread::spawn(move || {
|
||||
/// let (lock, cvar) = &*pair2;
|
||||
/// let mut started = lock.lock();
|
||||
/// *started = true;
|
||||
/// // We notify the condvar that the value has changed.
|
||||
/// cvar.notify_one();
|
||||
/// });
|
||||
///
|
||||
/// // wait for the thread to start up
|
||||
/// let (lock, cvar) = &*pair;
|
||||
/// let mut started = lock.lock();
|
||||
/// // as long as the value inside the `Mutex<bool>` is `false`, we wait
|
||||
/// loop {
|
||||
/// let result = cvar.wait_timeout(&mut started, Duration::from_millis(10));
|
||||
/// // 10 milliseconds have passed, or maybe the value changed!
|
||||
/// if *started == true {
|
||||
/// // We received the notification and the value has been updated, we can leave.
|
||||
/// break
|
||||
/// }
|
||||
/// }
|
||||
/// ```
|
||||
#[unstable(feature = "nonpoison_condvar", issue = "134645")]
|
||||
pub fn wait_timeout<T>(
|
||||
&self,
|
||||
guard: &mut MutexGuard<'_, T>,
|
||||
dur: Duration,
|
||||
) -> WaitTimeoutResult {
|
||||
let success = unsafe {
|
||||
let lock = mutex::guard_lock(guard);
|
||||
self.inner.wait_timeout(lock, dur)
|
||||
};
|
||||
WaitTimeoutResult(!success)
|
||||
}
|
||||
|
||||
/// Waits on this condition variable for a notification, timing out after a
|
||||
/// specified duration.
|
||||
///
|
||||
/// The semantics of this function are equivalent to [`wait_while`] except
|
||||
/// that the thread will be blocked for roughly no longer than `dur`. This
|
||||
/// method should not be used for precise timing due to anomalies such as
|
||||
/// preemption or platform differences that might not cause the maximum
|
||||
/// amount of time waited to be precisely `dur`.
|
||||
///
|
||||
/// Note that the best effort is made to ensure that the time waited is
|
||||
/// measured with a monotonic clock, and not affected by the changes made to
|
||||
/// the system time.
|
||||
///
|
||||
/// The returned [`WaitTimeoutResult`] value indicates if the timeout is
|
||||
/// known to have elapsed without the condition being met.
|
||||
///
|
||||
/// Like [`wait_while`], the lock specified will have been re-acquired when this
|
||||
/// function returns, regardless of whether the timeout elapsed or not.
|
||||
///
|
||||
/// [`wait_while`]: Self::wait_while
|
||||
/// [`wait_timeout`]: Self::wait_timeout
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// #![feature(nonpoison_mutex)]
|
||||
/// #![feature(nonpoison_condvar)]
|
||||
///
|
||||
/// use std::sync::nonpoison::{Mutex, Condvar};
|
||||
/// use std::sync::Arc;
|
||||
/// use std::thread;
|
||||
/// use std::time::Duration;
|
||||
///
|
||||
/// let pair = Arc::new((Mutex::new(true), Condvar::new()));
|
||||
/// let pair2 = Arc::clone(&pair);
|
||||
///
|
||||
/// thread::spawn(move || {
|
||||
/// let (lock, cvar) = &*pair2;
|
||||
/// let mut pending = lock.lock();
|
||||
/// *pending = false;
|
||||
/// // We notify the condvar that the value has changed.
|
||||
/// cvar.notify_one();
|
||||
/// });
|
||||
///
|
||||
/// // wait for the thread to start up
|
||||
/// let (lock, cvar) = &*pair;
|
||||
/// let mut guard = lock.lock();
|
||||
/// let result = cvar.wait_timeout_while(
|
||||
/// &mut guard,
|
||||
/// Duration::from_millis(100),
|
||||
/// |&mut pending| pending,
|
||||
/// );
|
||||
/// if result.timed_out() {
|
||||
/// // timed-out without the condition ever evaluating to false.
|
||||
/// }
|
||||
/// // access the locked mutex via guard
|
||||
/// ```
|
||||
#[unstable(feature = "nonpoison_condvar", issue = "134645")]
|
||||
pub fn wait_timeout_while<T, F>(
|
||||
&self,
|
||||
guard: &mut MutexGuard<'_, T>,
|
||||
dur: Duration,
|
||||
mut condition: F,
|
||||
) -> WaitTimeoutResult
|
||||
where
|
||||
F: FnMut(&mut T) -> bool,
|
||||
{
|
||||
let start = Instant::now();
|
||||
|
||||
while condition(guard.deref_mut()) {
|
||||
let timeout = match dur.checked_sub(start.elapsed()) {
|
||||
Some(timeout) => timeout,
|
||||
None => return WaitTimeoutResult(true),
|
||||
};
|
||||
|
||||
self.wait_timeout(guard, timeout);
|
||||
}
|
||||
|
||||
WaitTimeoutResult(false)
|
||||
}
|
||||
|
||||
/// Wakes up one blocked thread on this condvar.
|
||||
///
|
||||
/// If there is a blocked thread on this condition variable, then it will
|
||||
/// be woken up from its call to [`wait`] or [`wait_timeout`]. Calls to
|
||||
/// `notify_one` are not buffered in any way.
|
||||
///
|
||||
/// To wake up all threads, see [`notify_all`].
|
||||
///
|
||||
/// [`wait`]: Self::wait
|
||||
/// [`wait_timeout`]: Self::wait_timeout
|
||||
/// [`notify_all`]: Self::notify_all
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// #![feature(nonpoison_mutex)]
|
||||
/// #![feature(nonpoison_condvar)]
|
||||
///
|
||||
/// use std::sync::nonpoison::{Mutex, Condvar};
|
||||
/// use std::sync::Arc;
|
||||
/// use std::thread;
|
||||
///
|
||||
/// let pair = Arc::new((Mutex::new(false), Condvar::new()));
|
||||
/// let pair2 = Arc::clone(&pair);
|
||||
///
|
||||
/// thread::spawn(move || {
|
||||
/// let (lock, cvar) = &*pair2;
|
||||
/// let mut started = lock.lock();
|
||||
/// *started = true;
|
||||
/// // We notify the condvar that the value has changed.
|
||||
/// cvar.notify_one();
|
||||
/// });
|
||||
///
|
||||
/// // Wait for the thread to start up.
|
||||
/// let (lock, cvar) = &*pair;
|
||||
/// let mut started = lock.lock();
|
||||
/// // As long as the value inside the `Mutex<bool>` is `false`, we wait.
|
||||
/// while !*started {
|
||||
/// cvar.wait(&mut started);
|
||||
/// }
|
||||
/// ```
|
||||
#[unstable(feature = "nonpoison_condvar", issue = "134645")]
|
||||
pub fn notify_one(&self) {
|
||||
self.inner.notify_one()
|
||||
}
|
||||
|
||||
/// Wakes up all blocked threads on this condvar.
|
||||
///
|
||||
/// This method will ensure that any current waiters on the condition
|
||||
/// variable are awoken. Calls to `notify_all()` are not buffered in any
|
||||
/// way.
|
||||
///
|
||||
/// To wake up only one thread, see [`notify_one`].
|
||||
///
|
||||
/// [`notify_one`]: Self::notify_one
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// #![feature(nonpoison_mutex)]
|
||||
/// #![feature(nonpoison_condvar)]
|
||||
///
|
||||
/// use std::sync::nonpoison::{Mutex, Condvar};
|
||||
/// use std::sync::Arc;
|
||||
/// use std::thread;
|
||||
///
|
||||
/// let pair = Arc::new((Mutex::new(false), Condvar::new()));
|
||||
/// let pair2 = Arc::clone(&pair);
|
||||
///
|
||||
/// thread::spawn(move || {
|
||||
/// let (lock, cvar) = &*pair2;
|
||||
/// let mut started = lock.lock();
|
||||
/// *started = true;
|
||||
/// // We notify the condvar that the value has changed.
|
||||
/// cvar.notify_all();
|
||||
/// });
|
||||
///
|
||||
/// // Wait for the thread to start up.
|
||||
/// let (lock, cvar) = &*pair;
|
||||
/// let mut started = lock.lock();
|
||||
/// // As long as the value inside the `Mutex<bool>` is `false`, we wait.
|
||||
/// while !*started {
|
||||
/// cvar.wait(&mut started);
|
||||
/// }
|
||||
/// ```
|
||||
#[unstable(feature = "nonpoison_condvar", issue = "134645")]
|
||||
pub fn notify_all(&self) {
|
||||
self.inner.notify_all()
|
||||
}
|
||||
}
|
||||
|
||||
#[unstable(feature = "nonpoison_condvar", issue = "134645")]
|
||||
impl fmt::Debug for Condvar {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.debug_struct("Condvar").finish_non_exhaustive()
|
||||
}
|
||||
}
|
||||
|
||||
#[unstable(feature = "nonpoison_condvar", issue = "134645")]
|
||||
impl Default for Condvar {
|
||||
/// Creates a `Condvar` which is ready to be waited on and notified.
|
||||
fn default() -> Condvar {
|
||||
Condvar::new()
|
||||
}
|
||||
}
|
||||
@@ -1,649 +0,0 @@
|
||||
use crate::cell::UnsafeCell;
|
||||
use crate::fmt;
|
||||
use crate::marker::PhantomData;
|
||||
use crate::mem::{self, ManuallyDrop};
|
||||
use crate::ops::{Deref, DerefMut};
|
||||
use crate::ptr::NonNull;
|
||||
use crate::sync::nonpoison::{TryLockResult, WouldBlock};
|
||||
use crate::sys::sync as sys;
|
||||
|
||||
/// A mutual exclusion primitive useful for protecting shared data that does not keep track of
|
||||
/// lock poisoning.
|
||||
///
|
||||
/// For more information about mutexes, check out the documentation for the poisoning variant of
|
||||
/// this lock at [`poison::Mutex`].
|
||||
///
|
||||
/// [`poison::Mutex`]: crate::sync::poison::Mutex
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// Note that this `Mutex` does **not** propagate threads that panic while holding the lock via
|
||||
/// poisoning. If you need this functionality, see [`poison::Mutex`].
|
||||
///
|
||||
/// ```
|
||||
/// #![feature(nonpoison_mutex)]
|
||||
///
|
||||
/// use std::thread;
|
||||
/// use std::sync::{Arc, nonpoison::Mutex};
|
||||
///
|
||||
/// let mutex = Arc::new(Mutex::new(0u32));
|
||||
/// let mut handles = Vec::new();
|
||||
///
|
||||
/// for n in 0..10 {
|
||||
/// let m = Arc::clone(&mutex);
|
||||
/// let handle = thread::spawn(move || {
|
||||
/// let mut guard = m.lock();
|
||||
/// *guard += 1;
|
||||
/// panic!("panic from thread {n} {guard}")
|
||||
/// });
|
||||
/// handles.push(handle);
|
||||
/// }
|
||||
///
|
||||
/// for h in handles {
|
||||
/// let _ = h.join();
|
||||
/// }
|
||||
///
|
||||
/// println!("Finished, locked {} times", mutex.lock());
|
||||
/// ```
|
||||
#[unstable(feature = "nonpoison_mutex", issue = "134645")]
|
||||
#[cfg_attr(not(test), rustc_diagnostic_item = "NonPoisonMutex")]
|
||||
pub struct Mutex<T: ?Sized> {
|
||||
inner: sys::Mutex,
|
||||
data: UnsafeCell<T>,
|
||||
}
|
||||
|
||||
/// `T` must be `Send` for a [`Mutex`] to be `Send` because it is possible to acquire
|
||||
/// the owned `T` from the `Mutex` via [`into_inner`].
|
||||
///
|
||||
/// [`into_inner`]: Mutex::into_inner
|
||||
#[unstable(feature = "nonpoison_mutex", issue = "134645")]
|
||||
unsafe impl<T: ?Sized + Send> Send for Mutex<T> {}
|
||||
|
||||
/// `T` must be `Send` for [`Mutex`] to be `Sync`.
|
||||
/// This ensures that the protected data can be accessed safely from multiple threads
|
||||
/// without causing data races or other unsafe behavior.
|
||||
///
|
||||
/// [`Mutex<T>`] provides mutable access to `T` to one thread at a time. However, it's essential
|
||||
/// for `T` to be `Send` because it's not safe for non-`Send` structures to be accessed in
|
||||
/// this manner. For instance, consider [`Rc`], a non-atomic reference counted smart pointer,
|
||||
/// which is not `Send`. With `Rc`, we can have multiple copies pointing to the same heap
|
||||
/// allocation with a non-atomic reference count. If we were to use `Mutex<Rc<_>>`, it would
|
||||
/// only protect one instance of `Rc` from shared access, leaving other copies vulnerable
|
||||
/// to potential data races.
|
||||
///
|
||||
/// Also note that it is not necessary for `T` to be `Sync` as `&T` is only made available
|
||||
/// to one thread at a time if `T` is not `Sync`.
|
||||
///
|
||||
/// [`Rc`]: crate::rc::Rc
|
||||
#[unstable(feature = "nonpoison_mutex", issue = "134645")]
|
||||
unsafe impl<T: ?Sized + Send> Sync for Mutex<T> {}
|
||||
|
||||
/// An RAII implementation of a "scoped lock" of a mutex. When this structure is
|
||||
/// dropped (falls out of scope), the lock will be unlocked.
|
||||
///
|
||||
/// The data protected by the mutex can be accessed through this guard via its
|
||||
/// [`Deref`] and [`DerefMut`] implementations.
|
||||
///
|
||||
/// This structure is created by the [`lock`] and [`try_lock`] methods on
|
||||
/// [`Mutex`].
|
||||
///
|
||||
/// [`lock`]: Mutex::lock
|
||||
/// [`try_lock`]: Mutex::try_lock
|
||||
#[must_use = "if unused the Mutex will immediately unlock"]
|
||||
#[must_not_suspend = "holding a MutexGuard across suspend \
|
||||
points can cause deadlocks, delays, \
|
||||
and cause Futures to not implement `Send`"]
|
||||
#[unstable(feature = "nonpoison_mutex", issue = "134645")]
|
||||
#[clippy::has_significant_drop]
|
||||
#[cfg_attr(not(test), rustc_diagnostic_item = "NonPoisonMutexGuard")]
|
||||
pub struct MutexGuard<'a, T: ?Sized + 'a> {
|
||||
lock: &'a Mutex<T>,
|
||||
}
|
||||
|
||||
/// A [`MutexGuard`] is not `Send` to maximize platform portability.
|
||||
///
|
||||
/// On platforms that use POSIX threads (commonly referred to as pthreads) there is a requirement to
|
||||
/// release mutex locks on the same thread they were acquired.
|
||||
/// For this reason, [`MutexGuard`] must not implement `Send` to prevent it being dropped from
|
||||
/// another thread.
|
||||
#[unstable(feature = "nonpoison_mutex", issue = "134645")]
|
||||
impl<T: ?Sized> !Send for MutexGuard<'_, T> {}
|
||||
|
||||
/// `T` must be `Sync` for a [`MutexGuard<T>`] to be `Sync`
|
||||
/// because it is possible to get a `&T` from `&MutexGuard` (via `Deref`).
|
||||
#[unstable(feature = "nonpoison_mutex", issue = "134645")]
|
||||
unsafe impl<T: ?Sized + Sync> Sync for MutexGuard<'_, T> {}
|
||||
|
||||
/// An RAII mutex guard returned by `MutexGuard::map`, which can point to a
|
||||
/// subfield of the protected data. When this structure is dropped (falls out
|
||||
/// of scope), the lock will be unlocked.
|
||||
///
|
||||
/// The main difference between `MappedMutexGuard` and [`MutexGuard`] is that the
|
||||
/// former cannot be used with [`Condvar`], since that could introduce soundness issues if the
|
||||
/// locked object is modified by another thread while the `Mutex` is unlocked.
|
||||
///
|
||||
/// The data protected by the mutex can be accessed through this guard via its
|
||||
/// [`Deref`] and [`DerefMut`] implementations.
|
||||
///
|
||||
/// This structure is created by the [`map`] and [`filter_map`] methods on
|
||||
/// [`MutexGuard`].
|
||||
///
|
||||
/// [`map`]: MutexGuard::map
|
||||
/// [`filter_map`]: MutexGuard::filter_map
|
||||
/// [`Condvar`]: crate::sync::nonpoison::Condvar
|
||||
#[must_use = "if unused the Mutex will immediately unlock"]
|
||||
#[must_not_suspend = "holding a MappedMutexGuard across suspend \
|
||||
points can cause deadlocks, delays, \
|
||||
and cause Futures to not implement `Send`"]
|
||||
#[unstable(feature = "mapped_lock_guards", issue = "117108")]
|
||||
// #[unstable(feature = "nonpoison_mutex", issue = "134645")]
|
||||
#[clippy::has_significant_drop]
|
||||
pub struct MappedMutexGuard<'a, T: ?Sized + 'a> {
|
||||
// NB: we use a pointer instead of `&'a mut T` to avoid `noalias` violations, because a
|
||||
// `MappedMutexGuard` argument doesn't hold uniqueness for its whole scope, only until it drops.
|
||||
// `NonNull` is covariant over `T`, so we add a `PhantomData<&'a mut T>` field
|
||||
// below for the correct variance over `T` (invariance).
|
||||
data: NonNull<T>,
|
||||
inner: &'a sys::Mutex,
|
||||
_variance: PhantomData<&'a mut T>,
|
||||
}
|
||||
|
||||
#[unstable(feature = "mapped_lock_guards", issue = "117108")]
|
||||
// #[unstable(feature = "nonpoison_mutex", issue = "134645")]
|
||||
impl<T: ?Sized> !Send for MappedMutexGuard<'_, T> {}
|
||||
#[unstable(feature = "mapped_lock_guards", issue = "117108")]
|
||||
// #[unstable(feature = "nonpoison_mutex", issue = "134645")]
|
||||
unsafe impl<T: ?Sized + Sync> Sync for MappedMutexGuard<'_, T> {}
|
||||
|
||||
impl<T> Mutex<T> {
|
||||
/// Creates a new mutex in an unlocked state ready for use.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// #![feature(nonpoison_mutex)]
|
||||
///
|
||||
/// use std::sync::nonpoison::Mutex;
|
||||
///
|
||||
/// let mutex = Mutex::new(0);
|
||||
/// ```
|
||||
#[unstable(feature = "nonpoison_mutex", issue = "134645")]
|
||||
#[inline]
|
||||
pub const fn new(t: T) -> Mutex<T> {
|
||||
Mutex { inner: sys::Mutex::new(), data: UnsafeCell::new(t) }
|
||||
}
|
||||
|
||||
/// Returns the contained value by cloning it.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// #![feature(nonpoison_mutex)]
|
||||
/// #![feature(lock_value_accessors)]
|
||||
///
|
||||
/// use std::sync::nonpoison::Mutex;
|
||||
///
|
||||
/// let mut mutex = Mutex::new(7);
|
||||
///
|
||||
/// assert_eq!(mutex.get_cloned(), 7);
|
||||
/// ```
|
||||
#[unstable(feature = "lock_value_accessors", issue = "133407")]
|
||||
// #[unstable(feature = "nonpoison_mutex", issue = "134645")]
|
||||
pub fn get_cloned(&self) -> T
|
||||
where
|
||||
T: Clone,
|
||||
{
|
||||
self.lock().clone()
|
||||
}
|
||||
|
||||
/// Sets the contained value.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// #![feature(nonpoison_mutex)]
|
||||
/// #![feature(lock_value_accessors)]
|
||||
///
|
||||
/// use std::sync::nonpoison::Mutex;
|
||||
///
|
||||
/// let mut mutex = Mutex::new(7);
|
||||
///
|
||||
/// assert_eq!(mutex.get_cloned(), 7);
|
||||
/// mutex.set(11);
|
||||
/// assert_eq!(mutex.get_cloned(), 11);
|
||||
/// ```
|
||||
#[unstable(feature = "lock_value_accessors", issue = "133407")]
|
||||
// #[unstable(feature = "nonpoison_mutex", issue = "134645")]
|
||||
pub fn set(&self, value: T) {
|
||||
if mem::needs_drop::<T>() {
|
||||
// If the contained value has a non-trivial destructor, we
|
||||
// call that destructor after the lock has been released.
|
||||
drop(self.replace(value))
|
||||
} else {
|
||||
*self.lock() = value;
|
||||
}
|
||||
}
|
||||
|
||||
/// Replaces the contained value with `value`, and returns the old contained value.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// #![feature(nonpoison_mutex)]
|
||||
/// #![feature(lock_value_accessors)]
|
||||
///
|
||||
/// use std::sync::nonpoison::Mutex;
|
||||
///
|
||||
/// let mut mutex = Mutex::new(7);
|
||||
///
|
||||
/// assert_eq!(mutex.replace(11), 7);
|
||||
/// assert_eq!(mutex.get_cloned(), 11);
|
||||
/// ```
|
||||
#[unstable(feature = "lock_value_accessors", issue = "133407")]
|
||||
// #[unstable(feature = "nonpoison_mutex", issue = "134645")]
|
||||
pub fn replace(&self, value: T) -> T {
|
||||
let mut guard = self.lock();
|
||||
mem::replace(&mut *guard, value)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: ?Sized> Mutex<T> {
|
||||
/// Acquires a mutex, blocking the current thread until it is able to do so.
|
||||
///
|
||||
/// This function will block the local thread until it is available to acquire
|
||||
/// the mutex. Upon returning, the thread is the only thread with the lock
|
||||
/// held. An RAII guard is returned to allow scoped unlock of the lock. When
|
||||
/// the guard goes out of scope, the mutex will be unlocked.
|
||||
///
|
||||
/// The exact behavior on locking a mutex in the thread which already holds
|
||||
/// the lock is left unspecified. However, this function will not return on
|
||||
/// the second call (it might panic or deadlock, for example).
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This function might panic when called if the lock is already held by
|
||||
/// the current thread.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// #![feature(nonpoison_mutex)]
|
||||
///
|
||||
/// use std::sync::{Arc, nonpoison::Mutex};
|
||||
/// use std::thread;
|
||||
///
|
||||
/// let mutex = Arc::new(Mutex::new(0));
|
||||
/// let c_mutex = Arc::clone(&mutex);
|
||||
///
|
||||
/// thread::spawn(move || {
|
||||
/// *c_mutex.lock() = 10;
|
||||
/// }).join().expect("thread::spawn failed");
|
||||
/// assert_eq!(*mutex.lock(), 10);
|
||||
/// ```
|
||||
#[unstable(feature = "nonpoison_mutex", issue = "134645")]
|
||||
pub fn lock(&self) -> MutexGuard<'_, T> {
|
||||
unsafe {
|
||||
self.inner.lock();
|
||||
MutexGuard::new(self)
|
||||
}
|
||||
}
|
||||
|
||||
/// Attempts to acquire this lock.
|
||||
///
|
||||
/// This function does not block. If the lock could not be acquired at this time, then
|
||||
/// [`WouldBlock`] is returned. Otherwise, an RAII guard is returned.
|
||||
///
|
||||
/// The lock will be unlocked when the guard is dropped.
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// If the mutex could not be acquired because it is already locked, then this call will return
|
||||
/// the [`WouldBlock`] error.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use std::sync::{Arc, Mutex};
|
||||
/// use std::thread;
|
||||
///
|
||||
/// let mutex = Arc::new(Mutex::new(0));
|
||||
/// let c_mutex = Arc::clone(&mutex);
|
||||
///
|
||||
/// thread::spawn(move || {
|
||||
/// let mut lock = c_mutex.try_lock();
|
||||
/// if let Ok(ref mut mutex) = lock {
|
||||
/// **mutex = 10;
|
||||
/// } else {
|
||||
/// println!("try_lock failed");
|
||||
/// }
|
||||
/// }).join().expect("thread::spawn failed");
|
||||
/// assert_eq!(*mutex.lock().unwrap(), 10);
|
||||
/// ```
|
||||
#[unstable(feature = "nonpoison_mutex", issue = "134645")]
|
||||
pub fn try_lock(&self) -> TryLockResult<MutexGuard<'_, T>> {
|
||||
unsafe { if self.inner.try_lock() { Ok(MutexGuard::new(self)) } else { Err(WouldBlock) } }
|
||||
}
|
||||
|
||||
/// Consumes this mutex, returning the underlying data.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// #![feature(nonpoison_mutex)]
|
||||
///
|
||||
/// use std::sync::nonpoison::Mutex;
|
||||
///
|
||||
/// let mutex = Mutex::new(0);
|
||||
/// assert_eq!(mutex.into_inner(), 0);
|
||||
/// ```
|
||||
#[unstable(feature = "nonpoison_mutex", issue = "134645")]
|
||||
pub fn into_inner(self) -> T
|
||||
where
|
||||
T: Sized,
|
||||
{
|
||||
self.data.into_inner()
|
||||
}
|
||||
|
||||
/// Returns a mutable reference to the underlying data.
|
||||
///
|
||||
/// Since this call borrows the `Mutex` mutably, no actual locking needs to
|
||||
/// take place -- the mutable borrow statically guarantees no locks exist.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// #![feature(nonpoison_mutex)]
|
||||
///
|
||||
/// use std::sync::nonpoison::Mutex;
|
||||
///
|
||||
/// let mut mutex = Mutex::new(0);
|
||||
/// *mutex.get_mut() = 10;
|
||||
/// assert_eq!(*mutex.lock(), 10);
|
||||
/// ```
|
||||
#[unstable(feature = "nonpoison_mutex", issue = "134645")]
|
||||
pub fn get_mut(&mut self) -> &mut T {
|
||||
self.data.get_mut()
|
||||
}
|
||||
|
||||
/// Returns a raw pointer to the underlying data.
|
||||
///
|
||||
/// The returned pointer is always non-null and properly aligned, but it is
|
||||
/// the user's responsibility to ensure that any reads and writes through it
|
||||
/// are properly synchronized to avoid data races, and that it is not read
|
||||
/// or written through after the mutex is dropped.
|
||||
#[unstable(feature = "mutex_data_ptr", issue = "140368")]
|
||||
// #[unstable(feature = "nonpoison_mutex", issue = "134645")]
|
||||
pub const fn data_ptr(&self) -> *mut T {
|
||||
self.data.get()
|
||||
}
|
||||
|
||||
/// Acquires the mutex and provides mutable access to the underlying data by passing
|
||||
/// a mutable reference to the given closure.
|
||||
///
|
||||
/// This method acquires the lock, calls the provided closure with a mutable reference
|
||||
/// to the data, and returns the result of the closure. The lock is released after
|
||||
/// the closure completes, even if it panics.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// #![feature(lock_value_accessors, nonpoison_mutex)]
|
||||
///
|
||||
/// use std::sync::nonpoison::Mutex;
|
||||
///
|
||||
/// let mutex = Mutex::new(2);
|
||||
///
|
||||
/// let result = mutex.with_mut(|data| {
|
||||
/// *data += 3;
|
||||
///
|
||||
/// *data + 5
|
||||
/// });
|
||||
///
|
||||
/// assert_eq!(*mutex.lock(), 5);
|
||||
/// assert_eq!(result, 10);
|
||||
/// ```
|
||||
#[unstable(feature = "lock_value_accessors", issue = "133407")]
|
||||
// #[unstable(feature = "nonpoison_mutex", issue = "134645")]
|
||||
pub fn with_mut<F, R>(&self, f: F) -> R
|
||||
where
|
||||
F: FnOnce(&mut T) -> R,
|
||||
{
|
||||
f(&mut self.lock())
|
||||
}
|
||||
}
|
||||
|
||||
#[unstable(feature = "nonpoison_mutex", issue = "134645")]
|
||||
impl<T> From<T> for Mutex<T> {
|
||||
/// Creates a new mutex in an unlocked state ready for use.
|
||||
/// This is equivalent to [`Mutex::new`].
|
||||
fn from(t: T) -> Self {
|
||||
Mutex::new(t)
|
||||
}
|
||||
}
|
||||
|
||||
#[unstable(feature = "nonpoison_mutex", issue = "134645")]
|
||||
impl<T: Default> Default for Mutex<T> {
|
||||
/// Creates a `Mutex<T>`, with the `Default` value for T.
|
||||
fn default() -> Mutex<T> {
|
||||
Mutex::new(Default::default())
|
||||
}
|
||||
}
|
||||
|
||||
#[unstable(feature = "nonpoison_mutex", issue = "134645")]
|
||||
impl<T: ?Sized + fmt::Debug> fmt::Debug for Mutex<T> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
let mut d = f.debug_struct("Mutex");
|
||||
match self.try_lock() {
|
||||
Ok(guard) => {
|
||||
d.field("data", &&*guard);
|
||||
}
|
||||
Err(WouldBlock) => {
|
||||
d.field("data", &"<locked>");
|
||||
}
|
||||
}
|
||||
d.finish_non_exhaustive()
|
||||
}
|
||||
}
|
||||
|
||||
impl<'mutex, T: ?Sized> MutexGuard<'mutex, T> {
|
||||
unsafe fn new(lock: &'mutex Mutex<T>) -> MutexGuard<'mutex, T> {
|
||||
return MutexGuard { lock };
|
||||
}
|
||||
}
|
||||
|
||||
#[unstable(feature = "nonpoison_mutex", issue = "134645")]
|
||||
impl<T: ?Sized> Deref for MutexGuard<'_, T> {
|
||||
type Target = T;
|
||||
|
||||
fn deref(&self) -> &T {
|
||||
unsafe { &*self.lock.data.get() }
|
||||
}
|
||||
}
|
||||
|
||||
#[unstable(feature = "nonpoison_mutex", issue = "134645")]
|
||||
impl<T: ?Sized> DerefMut for MutexGuard<'_, T> {
|
||||
fn deref_mut(&mut self) -> &mut T {
|
||||
unsafe { &mut *self.lock.data.get() }
|
||||
}
|
||||
}
|
||||
|
||||
#[unstable(feature = "nonpoison_mutex", issue = "134645")]
|
||||
impl<T: ?Sized> Drop for MutexGuard<'_, T> {
|
||||
#[inline]
|
||||
fn drop(&mut self) {
|
||||
unsafe {
|
||||
self.lock.inner.unlock();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[unstable(feature = "nonpoison_mutex", issue = "134645")]
|
||||
impl<T: ?Sized + fmt::Debug> fmt::Debug for MutexGuard<'_, T> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
fmt::Debug::fmt(&**self, f)
|
||||
}
|
||||
}
|
||||
|
||||
#[unstable(feature = "nonpoison_mutex", issue = "134645")]
|
||||
impl<T: ?Sized + fmt::Display> fmt::Display for MutexGuard<'_, T> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
(**self).fmt(f)
|
||||
}
|
||||
}
|
||||
|
||||
/// For use in [`nonpoison::condvar`](super::condvar).
|
||||
pub(super) fn guard_lock<'a, T: ?Sized>(guard: &MutexGuard<'a, T>) -> &'a sys::Mutex {
|
||||
&guard.lock.inner
|
||||
}
|
||||
|
||||
impl<'a, T: ?Sized> MutexGuard<'a, T> {
|
||||
/// Makes a [`MappedMutexGuard`] for a component of the borrowed data, e.g.
|
||||
/// an enum variant.
|
||||
///
|
||||
/// The `Mutex` is already locked, so this cannot fail.
|
||||
///
|
||||
/// This is an associated function that needs to be used as
|
||||
/// `MutexGuard::map(...)`. A method would interfere with methods of the
|
||||
/// same name on the contents of the `MutexGuard` used through `Deref`.
|
||||
#[unstable(feature = "mapped_lock_guards", issue = "117108")]
|
||||
// #[unstable(feature = "nonpoison_mutex", issue = "134645")]
|
||||
pub fn map<U, F>(orig: Self, f: F) -> MappedMutexGuard<'a, U>
|
||||
where
|
||||
F: FnOnce(&mut T) -> &mut U,
|
||||
U: ?Sized,
|
||||
{
|
||||
// SAFETY: the conditions of `MutexGuard::new` were satisfied when the original guard
|
||||
// was created, and have been upheld throughout `map` and/or `filter_map`.
|
||||
// The signature of the closure guarantees that it will not "leak" the lifetime of the reference
|
||||
// passed to it. If the closure panics, the guard will be dropped.
|
||||
let data = NonNull::from(f(unsafe { &mut *orig.lock.data.get() }));
|
||||
let orig = ManuallyDrop::new(orig);
|
||||
MappedMutexGuard { data, inner: &orig.lock.inner, _variance: PhantomData }
|
||||
}
|
||||
|
||||
/// Makes a [`MappedMutexGuard`] for a component of the borrowed data. The
|
||||
/// original guard is returned as an `Err(...)` if the closure returns
|
||||
/// `None`.
|
||||
///
|
||||
/// The `Mutex` is already locked, so this cannot fail.
|
||||
///
|
||||
/// This is an associated function that needs to be used as
|
||||
/// `MutexGuard::filter_map(...)`. A method would interfere with methods of the
|
||||
/// same name on the contents of the `MutexGuard` used through `Deref`.
|
||||
#[unstable(feature = "mapped_lock_guards", issue = "117108")]
|
||||
// #[unstable(feature = "nonpoison_mutex", issue = "134645")]
|
||||
pub fn filter_map<U, F>(orig: Self, f: F) -> Result<MappedMutexGuard<'a, U>, Self>
|
||||
where
|
||||
F: FnOnce(&mut T) -> Option<&mut U>,
|
||||
U: ?Sized,
|
||||
{
|
||||
// SAFETY: the conditions of `MutexGuard::new` were satisfied when the original guard
|
||||
// was created, and have been upheld throughout `map` and/or `filter_map`.
|
||||
// The signature of the closure guarantees that it will not "leak" the lifetime of the reference
|
||||
// passed to it. If the closure panics, the guard will be dropped.
|
||||
match f(unsafe { &mut *orig.lock.data.get() }) {
|
||||
Some(data) => {
|
||||
let data = NonNull::from(data);
|
||||
let orig = ManuallyDrop::new(orig);
|
||||
Ok(MappedMutexGuard { data, inner: &orig.lock.inner, _variance: PhantomData })
|
||||
}
|
||||
None => Err(orig),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[unstable(feature = "mapped_lock_guards", issue = "117108")]
|
||||
impl<T: ?Sized> Deref for MappedMutexGuard<'_, T> {
|
||||
type Target = T;
|
||||
|
||||
fn deref(&self) -> &T {
|
||||
unsafe { self.data.as_ref() }
|
||||
}
|
||||
}
|
||||
|
||||
#[unstable(feature = "mapped_lock_guards", issue = "117108")]
|
||||
impl<T: ?Sized> DerefMut for MappedMutexGuard<'_, T> {
|
||||
fn deref_mut(&mut self) -> &mut T {
|
||||
unsafe { self.data.as_mut() }
|
||||
}
|
||||
}
|
||||
|
||||
#[unstable(feature = "mapped_lock_guards", issue = "117108")]
|
||||
impl<T: ?Sized> Drop for MappedMutexGuard<'_, T> {
|
||||
#[inline]
|
||||
fn drop(&mut self) {
|
||||
unsafe {
|
||||
self.inner.unlock();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[unstable(feature = "mapped_lock_guards", issue = "117108")]
|
||||
impl<T: ?Sized + fmt::Debug> fmt::Debug for MappedMutexGuard<'_, T> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
fmt::Debug::fmt(&**self, f)
|
||||
}
|
||||
}
|
||||
|
||||
#[unstable(feature = "mapped_lock_guards", issue = "117108")]
|
||||
impl<T: ?Sized + fmt::Display> fmt::Display for MappedMutexGuard<'_, T> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
(**self).fmt(f)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T: ?Sized> MappedMutexGuard<'a, T> {
|
||||
/// Makes a [`MappedMutexGuard`] for a component of the borrowed data, e.g.
|
||||
/// an enum variant.
|
||||
///
|
||||
/// The `Mutex` is already locked, so this cannot fail.
|
||||
///
|
||||
/// This is an associated function that needs to be used as
|
||||
/// `MappedMutexGuard::map(...)`. A method would interfere with methods of the
|
||||
/// same name on the contents of the `MutexGuard` used through `Deref`.
|
||||
#[unstable(feature = "mapped_lock_guards", issue = "117108")]
|
||||
// #[unstable(feature = "nonpoison_mutex", issue = "134645")]
|
||||
pub fn map<U, F>(mut orig: Self, f: F) -> MappedMutexGuard<'a, U>
|
||||
where
|
||||
F: FnOnce(&mut T) -> &mut U,
|
||||
U: ?Sized,
|
||||
{
|
||||
// SAFETY: the conditions of `MutexGuard::new` were satisfied when the original guard
|
||||
// was created, and have been upheld throughout `map` and/or `filter_map`.
|
||||
// The signature of the closure guarantees that it will not "leak" the lifetime of the reference
|
||||
// passed to it. If the closure panics, the guard will be dropped.
|
||||
let data = NonNull::from(f(unsafe { orig.data.as_mut() }));
|
||||
let orig = ManuallyDrop::new(orig);
|
||||
MappedMutexGuard { data, inner: orig.inner, _variance: PhantomData }
|
||||
}
|
||||
|
||||
/// Makes a [`MappedMutexGuard`] for a component of the borrowed data. The
|
||||
/// original guard is returned as an `Err(...)` if the closure returns
|
||||
/// `None`.
|
||||
///
|
||||
/// The `Mutex` is already locked, so this cannot fail.
|
||||
///
|
||||
/// This is an associated function that needs to be used as
|
||||
/// `MappedMutexGuard::filter_map(...)`. A method would interfere with methods of the
|
||||
/// same name on the contents of the `MutexGuard` used through `Deref`.
|
||||
#[unstable(feature = "mapped_lock_guards", issue = "117108")]
|
||||
// #[unstable(feature = "nonpoison_mutex", issue = "134645")]
|
||||
pub fn filter_map<U, F>(mut orig: Self, f: F) -> Result<MappedMutexGuard<'a, U>, Self>
|
||||
where
|
||||
F: FnOnce(&mut T) -> Option<&mut U>,
|
||||
U: ?Sized,
|
||||
{
|
||||
// SAFETY: the conditions of `MutexGuard::new` were satisfied when the original guard
|
||||
// was created, and have been upheld throughout `map` and/or `filter_map`.
|
||||
// The signature of the closure guarantees that it will not "leak" the lifetime of the reference
|
||||
// passed to it. If the closure panics, the guard will be dropped.
|
||||
match f(unsafe { orig.data.as_mut() }) {
|
||||
Some(data) => {
|
||||
let data = NonNull::from(data);
|
||||
let orig = ManuallyDrop::new(orig);
|
||||
Ok(MappedMutexGuard { data, inner: orig.inner, _variance: PhantomData })
|
||||
}
|
||||
None => Err(orig),
|
||||
}
|
||||
}
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,395 +0,0 @@
|
||||
//! A "once initialization" primitive
|
||||
//!
|
||||
//! This primitive is meant to be used to run one-time initialization. An
|
||||
//! example use case would be for initializing an FFI library.
|
||||
|
||||
use crate::fmt;
|
||||
use crate::panic::{RefUnwindSafe, UnwindSafe};
|
||||
use crate::sys::sync as sys;
|
||||
|
||||
/// A low-level synchronization primitive for one-time global execution.
|
||||
///
|
||||
/// Previously this was the only "execute once" synchronization in `std`.
|
||||
/// Other libraries implemented novel synchronizing types with `Once`, like
|
||||
/// [`OnceLock<T>`] or [`LazyLock<T, F>`], before those were added to `std`.
|
||||
/// `OnceLock<T>` in particular supersedes `Once` in functionality and should
|
||||
/// be preferred for the common case where the `Once` is associated with data.
|
||||
///
|
||||
/// This type can only be constructed with [`Once::new()`].
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use std::sync::Once;
|
||||
///
|
||||
/// static START: Once = Once::new();
|
||||
///
|
||||
/// START.call_once(|| {
|
||||
/// // run initialization here
|
||||
/// });
|
||||
/// ```
|
||||
///
|
||||
/// [`OnceLock<T>`]: crate::sync::OnceLock
|
||||
/// [`LazyLock<T, F>`]: crate::sync::LazyLock
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
pub struct Once {
|
||||
inner: sys::Once,
|
||||
}
|
||||
|
||||
#[stable(feature = "sync_once_unwind_safe", since = "1.59.0")]
|
||||
impl UnwindSafe for Once {}
|
||||
|
||||
#[stable(feature = "sync_once_unwind_safe", since = "1.59.0")]
|
||||
impl RefUnwindSafe for Once {}
|
||||
|
||||
/// State yielded to [`Once::call_once_force()`]’s closure parameter. The state
|
||||
/// can be used to query the poison status of the [`Once`].
|
||||
#[stable(feature = "once_poison", since = "1.51.0")]
|
||||
pub struct OnceState {
|
||||
pub(crate) inner: sys::OnceState,
|
||||
}
|
||||
|
||||
/// Used for the internal implementation of `sys::sync::once` on different platforms and the
|
||||
/// [`LazyLock`](crate::sync::LazyLock) implementation.
|
||||
pub(crate) enum OnceExclusiveState {
|
||||
Incomplete,
|
||||
Poisoned,
|
||||
Complete,
|
||||
}
|
||||
|
||||
/// Initialization value for static [`Once`] values.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use std::sync::{Once, ONCE_INIT};
|
||||
///
|
||||
/// static START: Once = ONCE_INIT;
|
||||
/// ```
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
#[deprecated(
|
||||
since = "1.38.0",
|
||||
note = "the `Once::new()` function is now preferred",
|
||||
suggestion = "Once::new()"
|
||||
)]
|
||||
pub const ONCE_INIT: Once = Once::new();
|
||||
|
||||
impl Once {
|
||||
/// Creates a new `Once` value.
|
||||
#[inline]
|
||||
#[stable(feature = "once_new", since = "1.2.0")]
|
||||
#[rustc_const_stable(feature = "const_once_new", since = "1.32.0")]
|
||||
#[must_use]
|
||||
pub const fn new() -> Once {
|
||||
Once { inner: sys::Once::new() }
|
||||
}
|
||||
|
||||
/// Performs an initialization routine once and only once. The given closure
|
||||
/// will be executed if this is the first time `call_once` has been called,
|
||||
/// and otherwise the routine will *not* be invoked.
|
||||
///
|
||||
/// This method will block the calling thread if another initialization
|
||||
/// routine is currently running.
|
||||
///
|
||||
/// When this function returns, it is guaranteed that some initialization
|
||||
/// has run and completed (it might not be the closure specified). It is also
|
||||
/// guaranteed that any memory writes performed by the executed closure can
|
||||
/// be reliably observed by other threads at this point (there is a
|
||||
/// happens-before relation between the closure and code executing after the
|
||||
/// return).
|
||||
///
|
||||
/// If the given closure recursively invokes `call_once` on the same [`Once`]
|
||||
/// instance, the exact behavior is not specified: allowed outcomes are
|
||||
/// a panic or a deadlock.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use std::sync::Once;
|
||||
///
|
||||
/// static mut VAL: usize = 0;
|
||||
/// static INIT: Once = Once::new();
|
||||
///
|
||||
/// // Accessing a `static mut` is unsafe much of the time, but if we do so
|
||||
/// // in a synchronized fashion (e.g., write once or read all) then we're
|
||||
/// // good to go!
|
||||
/// //
|
||||
/// // This function will only call `expensive_computation` once, and will
|
||||
/// // otherwise always return the value returned from the first invocation.
|
||||
/// fn get_cached_val() -> usize {
|
||||
/// unsafe {
|
||||
/// INIT.call_once(|| {
|
||||
/// VAL = expensive_computation();
|
||||
/// });
|
||||
/// VAL
|
||||
/// }
|
||||
/// }
|
||||
///
|
||||
/// fn expensive_computation() -> usize {
|
||||
/// // ...
|
||||
/// # 2
|
||||
/// }
|
||||
/// ```
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// The closure `f` will only be executed once even if this is called
|
||||
/// concurrently amongst many threads. If that closure panics, however, then
|
||||
/// it will *poison* this [`Once`] instance, causing all future invocations of
|
||||
/// `call_once` to also panic.
|
||||
///
|
||||
/// This is similar to [poisoning with mutexes][poison], but this mechanism
|
||||
/// is guaranteed to never skip panics within `f`.
|
||||
///
|
||||
/// [poison]: struct.Mutex.html#poisoning
|
||||
#[inline]
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
#[track_caller]
|
||||
#[rustc_should_not_be_called_on_const_items]
|
||||
pub fn call_once<F>(&self, f: F)
|
||||
where
|
||||
F: FnOnce(),
|
||||
{
|
||||
// Fast path check
|
||||
if self.inner.is_completed() {
|
||||
return;
|
||||
}
|
||||
|
||||
let mut f = Some(f);
|
||||
self.inner.call(false, &mut |_| f.take().unwrap()());
|
||||
}
|
||||
|
||||
/// Performs the same function as [`call_once()`] except ignores poisoning.
|
||||
///
|
||||
/// Unlike [`call_once()`], if this [`Once`] has been poisoned (i.e., a previous
|
||||
/// call to [`call_once()`] or [`call_once_force()`] caused a panic), calling
|
||||
/// [`call_once_force()`] will still invoke the closure `f` and will _not_
|
||||
/// result in an immediate panic. If `f` panics, the [`Once`] will remain
|
||||
/// in a poison state. If `f` does _not_ panic, the [`Once`] will no
|
||||
/// longer be in a poison state and all future calls to [`call_once()`] or
|
||||
/// [`call_once_force()`] will be no-ops.
|
||||
///
|
||||
/// The closure `f` is yielded a [`OnceState`] structure which can be used
|
||||
/// to query the poison status of the [`Once`].
|
||||
///
|
||||
/// [`call_once()`]: Once::call_once
|
||||
/// [`call_once_force()`]: Once::call_once_force
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use std::sync::Once;
|
||||
/// use std::thread;
|
||||
///
|
||||
/// static INIT: Once = Once::new();
|
||||
///
|
||||
/// // poison the once
|
||||
/// let handle = thread::spawn(|| {
|
||||
/// INIT.call_once(|| panic!());
|
||||
/// });
|
||||
/// assert!(handle.join().is_err());
|
||||
///
|
||||
/// // poisoning propagates
|
||||
/// let handle = thread::spawn(|| {
|
||||
/// INIT.call_once(|| {});
|
||||
/// });
|
||||
/// assert!(handle.join().is_err());
|
||||
///
|
||||
/// // call_once_force will still run and reset the poisoned state
|
||||
/// INIT.call_once_force(|state| {
|
||||
/// assert!(state.is_poisoned());
|
||||
/// });
|
||||
///
|
||||
/// // once any success happens, we stop propagating the poison
|
||||
/// INIT.call_once(|| {});
|
||||
/// ```
|
||||
#[inline]
|
||||
#[stable(feature = "once_poison", since = "1.51.0")]
|
||||
#[rustc_should_not_be_called_on_const_items]
|
||||
pub fn call_once_force<F>(&self, f: F)
|
||||
where
|
||||
F: FnOnce(&OnceState),
|
||||
{
|
||||
// Fast path check
|
||||
if self.inner.is_completed() {
|
||||
return;
|
||||
}
|
||||
|
||||
let mut f = Some(f);
|
||||
self.inner.call(true, &mut |p| f.take().unwrap()(p));
|
||||
}
|
||||
|
||||
/// Returns `true` if some [`call_once()`] call has completed
|
||||
/// successfully. Specifically, `is_completed` will return false in
|
||||
/// the following situations:
|
||||
/// * [`call_once()`] was not called at all,
|
||||
/// * [`call_once()`] was called, but has not yet completed,
|
||||
/// * the [`Once`] instance is poisoned
|
||||
///
|
||||
/// This function returning `false` does not mean that [`Once`] has not been
|
||||
/// executed. For example, it may have been executed in the time between
|
||||
/// when `is_completed` starts executing and when it returns, in which case
|
||||
/// the `false` return value would be stale (but still permissible).
|
||||
///
|
||||
/// [`call_once()`]: Once::call_once
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use std::sync::Once;
|
||||
///
|
||||
/// static INIT: Once = Once::new();
|
||||
///
|
||||
/// assert_eq!(INIT.is_completed(), false);
|
||||
/// INIT.call_once(|| {
|
||||
/// assert_eq!(INIT.is_completed(), false);
|
||||
/// });
|
||||
/// assert_eq!(INIT.is_completed(), true);
|
||||
/// ```
|
||||
///
|
||||
/// ```
|
||||
/// use std::sync::Once;
|
||||
/// use std::thread;
|
||||
///
|
||||
/// static INIT: Once = Once::new();
|
||||
///
|
||||
/// assert_eq!(INIT.is_completed(), false);
|
||||
/// let handle = thread::spawn(|| {
|
||||
/// INIT.call_once(|| panic!());
|
||||
/// });
|
||||
/// assert!(handle.join().is_err());
|
||||
/// assert_eq!(INIT.is_completed(), false);
|
||||
/// ```
|
||||
#[stable(feature = "once_is_completed", since = "1.43.0")]
|
||||
#[inline]
|
||||
pub fn is_completed(&self) -> bool {
|
||||
self.inner.is_completed()
|
||||
}
|
||||
|
||||
/// Blocks the current thread until initialization has completed.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```rust
|
||||
/// use std::sync::Once;
|
||||
/// use std::thread;
|
||||
///
|
||||
/// static READY: Once = Once::new();
|
||||
///
|
||||
/// let thread = thread::spawn(|| {
|
||||
/// READY.wait();
|
||||
/// println!("everything is ready");
|
||||
/// });
|
||||
///
|
||||
/// READY.call_once(|| println!("performing setup"));
|
||||
/// ```
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// If this [`Once`] has been poisoned because an initialization closure has
|
||||
/// panicked, this method will also panic. Use [`wait_force`](Self::wait_force)
|
||||
/// if this behavior is not desired.
|
||||
#[stable(feature = "once_wait", since = "1.86.0")]
|
||||
#[rustc_should_not_be_called_on_const_items]
|
||||
pub fn wait(&self) {
|
||||
if !self.inner.is_completed() {
|
||||
self.inner.wait(false);
|
||||
}
|
||||
}
|
||||
|
||||
/// Blocks the current thread until initialization has completed, ignoring
|
||||
/// poisoning.
|
||||
///
|
||||
/// If this [`Once`] has been poisoned, this function blocks until it
|
||||
/// becomes completed, unlike [`Once::wait()`], which panics in this case.
|
||||
#[stable(feature = "once_wait", since = "1.86.0")]
|
||||
#[rustc_should_not_be_called_on_const_items]
|
||||
pub fn wait_force(&self) {
|
||||
if !self.inner.is_completed() {
|
||||
self.inner.wait(true);
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the current state of the `Once` instance.
|
||||
///
|
||||
/// Since this takes a mutable reference, no initialization can currently
|
||||
/// be running, so the state must be either "incomplete", "poisoned" or
|
||||
/// "complete".
|
||||
#[inline]
|
||||
pub(crate) fn state(&mut self) -> OnceExclusiveState {
|
||||
self.inner.state()
|
||||
}
|
||||
|
||||
/// Sets current state of the `Once` instance.
|
||||
///
|
||||
/// Since this takes a mutable reference, no initialization can currently
|
||||
/// be running, so the state must be either "incomplete", "poisoned" or
|
||||
/// "complete".
|
||||
#[inline]
|
||||
pub(crate) fn set_state(&mut self, new_state: OnceExclusiveState) {
|
||||
self.inner.set_state(new_state);
|
||||
}
|
||||
}
|
||||
|
||||
#[stable(feature = "std_debug", since = "1.16.0")]
|
||||
impl fmt::Debug for Once {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.debug_struct("Once").finish_non_exhaustive()
|
||||
}
|
||||
}
|
||||
|
||||
impl OnceState {
|
||||
/// Returns `true` if the associated [`Once`] was poisoned prior to the
|
||||
/// invocation of the closure passed to [`Once::call_once_force()`].
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// A poisoned [`Once`]:
|
||||
///
|
||||
/// ```
|
||||
/// use std::sync::Once;
|
||||
/// use std::thread;
|
||||
///
|
||||
/// static INIT: Once = Once::new();
|
||||
///
|
||||
/// // poison the once
|
||||
/// let handle = thread::spawn(|| {
|
||||
/// INIT.call_once(|| panic!());
|
||||
/// });
|
||||
/// assert!(handle.join().is_err());
|
||||
///
|
||||
/// INIT.call_once_force(|state| {
|
||||
/// assert!(state.is_poisoned());
|
||||
/// });
|
||||
/// ```
|
||||
///
|
||||
/// An unpoisoned [`Once`]:
|
||||
///
|
||||
/// ```
|
||||
/// use std::sync::Once;
|
||||
///
|
||||
/// static INIT: Once = Once::new();
|
||||
///
|
||||
/// INIT.call_once_force(|state| {
|
||||
/// assert!(!state.is_poisoned());
|
||||
/// });
|
||||
#[stable(feature = "once_poison", since = "1.51.0")]
|
||||
#[inline]
|
||||
pub fn is_poisoned(&self) -> bool {
|
||||
self.inner.is_poisoned()
|
||||
}
|
||||
|
||||
/// Poison the associated [`Once`] without explicitly panicking.
|
||||
// NOTE: This is currently only exposed for `OnceLock`.
|
||||
#[inline]
|
||||
pub(crate) fn poison(&self) {
|
||||
self.inner.poison();
|
||||
}
|
||||
}
|
||||
|
||||
#[stable(feature = "std_debug", since = "1.16.0")]
|
||||
impl fmt::Debug for OnceState {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.debug_struct("OnceState").field("poisoned", &self.is_poisoned()).finish()
|
||||
}
|
||||
}
|
||||
@@ -1,709 +0,0 @@
|
||||
use super::once::OnceExclusiveState;
|
||||
use crate::cell::UnsafeCell;
|
||||
use crate::fmt;
|
||||
use crate::marker::PhantomData;
|
||||
use crate::mem::MaybeUninit;
|
||||
use crate::panic::{RefUnwindSafe, UnwindSafe};
|
||||
use crate::sync::Once;
|
||||
|
||||
/// A synchronization primitive which can nominally be written to only once.
|
||||
///
|
||||
/// This type is a thread-safe [`OnceCell`], and can be used in statics.
|
||||
/// In many simple cases, you can use [`LazyLock<T, F>`] instead to get the benefits of this type
|
||||
/// with less effort: `LazyLock<T, F>` "looks like" `&T` because it initializes with `F` on deref!
|
||||
/// Where OnceLock shines is when LazyLock is too simple to support a given case, as LazyLock
|
||||
/// doesn't allow additional inputs to its function after you call [`LazyLock::new(|| ...)`].
|
||||
///
|
||||
/// A `OnceLock` can be thought of as a safe abstraction over uninitialized data that becomes
|
||||
/// initialized once written.
|
||||
///
|
||||
/// Unlike [`Mutex`](crate::sync::Mutex), `OnceLock` is never poisoned on panic.
|
||||
///
|
||||
/// [`OnceCell`]: crate::cell::OnceCell
|
||||
/// [`LazyLock<T, F>`]: crate::sync::LazyLock
|
||||
/// [`LazyLock::new(|| ...)`]: crate::sync::LazyLock::new
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// Writing to a `OnceLock` from a separate thread:
|
||||
///
|
||||
/// ```
|
||||
/// use std::sync::OnceLock;
|
||||
///
|
||||
/// static CELL: OnceLock<usize> = OnceLock::new();
|
||||
///
|
||||
/// // `OnceLock` has not been written to yet.
|
||||
/// assert!(CELL.get().is_none());
|
||||
///
|
||||
/// // Spawn a thread and write to `OnceLock`.
|
||||
/// std::thread::spawn(|| {
|
||||
/// let value = CELL.get_or_init(|| 12345);
|
||||
/// assert_eq!(value, &12345);
|
||||
/// })
|
||||
/// .join()
|
||||
/// .unwrap();
|
||||
///
|
||||
/// // `OnceLock` now contains the value.
|
||||
/// assert_eq!(
|
||||
/// CELL.get(),
|
||||
/// Some(&12345),
|
||||
/// );
|
||||
/// ```
|
||||
///
|
||||
/// You can use `OnceLock` to implement a type that requires "append-only" logic:
|
||||
///
|
||||
/// ```
|
||||
/// use std::sync::{OnceLock, atomic::{AtomicU32, Ordering}};
|
||||
/// use std::thread;
|
||||
///
|
||||
/// struct OnceList<T> {
|
||||
/// data: OnceLock<T>,
|
||||
/// next: OnceLock<Box<OnceList<T>>>,
|
||||
/// }
|
||||
/// impl<T> OnceList<T> {
|
||||
/// const fn new() -> OnceList<T> {
|
||||
/// OnceList { data: OnceLock::new(), next: OnceLock::new() }
|
||||
/// }
|
||||
/// fn push(&self, value: T) {
|
||||
/// // FIXME: this impl is concise, but is also slow for long lists or many threads.
|
||||
/// // as an exercise, consider how you might improve on it while preserving the behavior
|
||||
/// if let Err(value) = self.data.set(value) {
|
||||
/// let next = self.next.get_or_init(|| Box::new(OnceList::new()));
|
||||
/// next.push(value)
|
||||
/// };
|
||||
/// }
|
||||
/// fn contains(&self, example: &T) -> bool
|
||||
/// where
|
||||
/// T: PartialEq,
|
||||
/// {
|
||||
/// self.data.get().map(|item| item == example).filter(|v| *v).unwrap_or_else(|| {
|
||||
/// self.next.get().map(|next| next.contains(example)).unwrap_or(false)
|
||||
/// })
|
||||
/// }
|
||||
/// }
|
||||
///
|
||||
/// // Let's exercise this new Sync append-only list by doing a little counting
|
||||
/// static LIST: OnceList<u32> = OnceList::new();
|
||||
/// static COUNTER: AtomicU32 = AtomicU32::new(0);
|
||||
///
|
||||
/// # const LEN: u32 = if cfg!(miri) { 50 } else { 1000 };
|
||||
/// # /*
|
||||
/// const LEN: u32 = 1000;
|
||||
/// # */
|
||||
/// thread::scope(|s| {
|
||||
/// for _ in 0..thread::available_parallelism().unwrap().get() {
|
||||
/// s.spawn(|| {
|
||||
/// while let i @ 0..LEN = COUNTER.fetch_add(1, Ordering::Relaxed) {
|
||||
/// LIST.push(i);
|
||||
/// }
|
||||
/// });
|
||||
/// }
|
||||
/// });
|
||||
///
|
||||
/// for i in 0..LEN {
|
||||
/// assert!(LIST.contains(&i));
|
||||
/// }
|
||||
///
|
||||
/// ```
|
||||
#[stable(feature = "once_cell", since = "1.70.0")]
|
||||
pub struct OnceLock<T> {
|
||||
// FIXME(nonpoison_once): switch to nonpoison version once it is available
|
||||
once: Once,
|
||||
// Whether or not the value is initialized is tracked by `once.is_completed()`.
|
||||
value: UnsafeCell<MaybeUninit<T>>,
|
||||
/// `PhantomData` to make sure dropck understands we're dropping T in our Drop impl.
|
||||
///
|
||||
/// ```compile_fail,E0597
|
||||
/// use std::sync::OnceLock;
|
||||
///
|
||||
/// struct A<'a>(&'a str);
|
||||
///
|
||||
/// impl<'a> Drop for A<'a> {
|
||||
/// fn drop(&mut self) {}
|
||||
/// }
|
||||
///
|
||||
/// let cell = OnceLock::new();
|
||||
/// {
|
||||
/// let s = String::new();
|
||||
/// let _ = cell.set(A(&s));
|
||||
/// }
|
||||
/// ```
|
||||
_marker: PhantomData<T>,
|
||||
}
|
||||
|
||||
impl<T> OnceLock<T> {
|
||||
/// Creates a new uninitialized cell.
|
||||
#[inline]
|
||||
#[must_use]
|
||||
#[stable(feature = "once_cell", since = "1.70.0")]
|
||||
#[rustc_const_stable(feature = "once_cell", since = "1.70.0")]
|
||||
pub const fn new() -> OnceLock<T> {
|
||||
OnceLock {
|
||||
once: Once::new(),
|
||||
value: UnsafeCell::new(MaybeUninit::uninit()),
|
||||
_marker: PhantomData,
|
||||
}
|
||||
}
|
||||
|
||||
/// Gets the reference to the underlying value.
|
||||
///
|
||||
/// Returns `None` if the cell is uninitialized, or being initialized.
|
||||
/// This method never blocks.
|
||||
#[inline]
|
||||
#[stable(feature = "once_cell", since = "1.70.0")]
|
||||
#[rustc_should_not_be_called_on_const_items]
|
||||
pub fn get(&self) -> Option<&T> {
|
||||
if self.initialized() {
|
||||
// Safe b/c checked initialized
|
||||
Some(unsafe { self.get_unchecked() })
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// Gets the mutable reference to the underlying value.
|
||||
///
|
||||
/// Returns `None` if the cell is uninitialized.
|
||||
///
|
||||
/// This method never blocks. Since it borrows the `OnceLock` mutably,
|
||||
/// it is statically guaranteed that no active borrows to the `OnceLock`
|
||||
/// exist, including from other threads.
|
||||
#[inline]
|
||||
#[stable(feature = "once_cell", since = "1.70.0")]
|
||||
pub fn get_mut(&mut self) -> Option<&mut T> {
|
||||
if self.initialized_mut() {
|
||||
// Safe b/c checked initialized and we have a unique access
|
||||
Some(unsafe { self.get_unchecked_mut() })
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// Blocks the current thread until the cell is initialized.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// Waiting for a computation on another thread to finish:
|
||||
/// ```rust
|
||||
/// use std::thread;
|
||||
/// use std::sync::OnceLock;
|
||||
///
|
||||
/// let value = OnceLock::new();
|
||||
///
|
||||
/// thread::scope(|s| {
|
||||
/// s.spawn(|| value.set(1 + 1));
|
||||
///
|
||||
/// let result = value.wait();
|
||||
/// assert_eq!(result, &2);
|
||||
/// })
|
||||
/// ```
|
||||
#[inline]
|
||||
#[stable(feature = "once_wait", since = "1.86.0")]
|
||||
#[rustc_should_not_be_called_on_const_items]
|
||||
pub fn wait(&self) -> &T {
|
||||
self.once.wait_force();
|
||||
|
||||
unsafe { self.get_unchecked() }
|
||||
}
|
||||
|
||||
/// Initializes the contents of the cell to `value`.
|
||||
///
|
||||
/// May block if another thread is currently attempting to initialize the cell. The cell is
|
||||
/// guaranteed to contain a value when `set` returns, though not necessarily the one provided.
|
||||
///
|
||||
/// Returns `Ok(())` if the cell was uninitialized and
|
||||
/// `Err(value)` if the cell was already initialized.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use std::sync::OnceLock;
|
||||
///
|
||||
/// static CELL: OnceLock<i32> = OnceLock::new();
|
||||
///
|
||||
/// fn main() {
|
||||
/// assert!(CELL.get().is_none());
|
||||
///
|
||||
/// std::thread::spawn(|| {
|
||||
/// assert_eq!(CELL.set(92), Ok(()));
|
||||
/// }).join().unwrap();
|
||||
///
|
||||
/// assert_eq!(CELL.set(62), Err(62));
|
||||
/// assert_eq!(CELL.get(), Some(&92));
|
||||
/// }
|
||||
/// ```
|
||||
#[inline]
|
||||
#[stable(feature = "once_cell", since = "1.70.0")]
|
||||
#[rustc_should_not_be_called_on_const_items]
|
||||
pub fn set(&self, value: T) -> Result<(), T> {
|
||||
match self.try_insert(value) {
|
||||
Ok(_) => Ok(()),
|
||||
Err((_, value)) => Err(value),
|
||||
}
|
||||
}
|
||||
|
||||
/// Initializes the contents of the cell to `value` if the cell was uninitialized,
|
||||
/// then returns a reference to it.
|
||||
///
|
||||
/// May block if another thread is currently attempting to initialize the cell. The cell is
|
||||
/// guaranteed to contain a value when `try_insert` returns, though not necessarily the
|
||||
/// one provided.
|
||||
///
|
||||
/// Returns `Ok(&value)` if the cell was uninitialized and
|
||||
/// `Err((¤t_value, value))` if it was already initialized.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// #![feature(once_cell_try_insert)]
|
||||
///
|
||||
/// use std::sync::OnceLock;
|
||||
///
|
||||
/// static CELL: OnceLock<i32> = OnceLock::new();
|
||||
///
|
||||
/// fn main() {
|
||||
/// assert!(CELL.get().is_none());
|
||||
///
|
||||
/// std::thread::spawn(|| {
|
||||
/// assert_eq!(CELL.try_insert(92), Ok(&92));
|
||||
/// }).join().unwrap();
|
||||
///
|
||||
/// assert_eq!(CELL.try_insert(62), Err((&92, 62)));
|
||||
/// assert_eq!(CELL.get(), Some(&92));
|
||||
/// }
|
||||
/// ```
|
||||
#[inline]
|
||||
#[unstable(feature = "once_cell_try_insert", issue = "116693")]
|
||||
#[rustc_should_not_be_called_on_const_items]
|
||||
pub fn try_insert(&self, value: T) -> Result<&T, (&T, T)> {
|
||||
let mut value = Some(value);
|
||||
let res = self.get_or_init(|| value.take().unwrap());
|
||||
match value {
|
||||
None => Ok(res),
|
||||
Some(value) => Err((res, value)),
|
||||
}
|
||||
}
|
||||
|
||||
/// Gets the contents of the cell, initializing it to `f()` if the cell
|
||||
/// was uninitialized.
|
||||
///
|
||||
/// Many threads may call `get_or_init` concurrently with different
|
||||
/// initializing functions, but it is guaranteed that only one function
|
||||
/// will be executed if the function doesn't panic.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// If `f()` panics, the panic is propagated to the caller, and the cell
|
||||
/// remains uninitialized.
|
||||
///
|
||||
/// It is an error to reentrantly initialize the cell from `f`. The
|
||||
/// exact outcome is unspecified. Current implementation deadlocks, but
|
||||
/// this may be changed to a panic in the future.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use std::sync::OnceLock;
|
||||
///
|
||||
/// let cell = OnceLock::new();
|
||||
/// let value = cell.get_or_init(|| 92);
|
||||
/// assert_eq!(value, &92);
|
||||
/// let value = cell.get_or_init(|| unreachable!());
|
||||
/// assert_eq!(value, &92);
|
||||
/// ```
|
||||
#[inline]
|
||||
#[stable(feature = "once_cell", since = "1.70.0")]
|
||||
#[rustc_should_not_be_called_on_const_items]
|
||||
pub fn get_or_init<F>(&self, f: F) -> &T
|
||||
where
|
||||
F: FnOnce() -> T,
|
||||
{
|
||||
match self.get_or_try_init(|| Ok::<T, !>(f())) {
|
||||
Ok(val) => val,
|
||||
}
|
||||
}
|
||||
|
||||
/// Gets the mutable reference of the contents of the cell, initializing
|
||||
/// it to `f()` if the cell was uninitialized.
|
||||
///
|
||||
/// This method never blocks. Since it borrows the `OnceLock` mutably,
|
||||
/// it is statically guaranteed that no active borrows to the `OnceLock`
|
||||
/// exist, including from other threads.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// If `f()` panics, the panic is propagated to the caller, and the cell
|
||||
/// remains uninitialized.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// #![feature(once_cell_get_mut)]
|
||||
///
|
||||
/// use std::sync::OnceLock;
|
||||
///
|
||||
/// let mut cell = OnceLock::new();
|
||||
/// let value = cell.get_mut_or_init(|| 92);
|
||||
/// assert_eq!(*value, 92);
|
||||
///
|
||||
/// *value += 2;
|
||||
/// assert_eq!(*value, 94);
|
||||
///
|
||||
/// let value = cell.get_mut_or_init(|| unreachable!());
|
||||
/// assert_eq!(*value, 94);
|
||||
/// ```
|
||||
#[inline]
|
||||
#[unstable(feature = "once_cell_get_mut", issue = "121641")]
|
||||
pub fn get_mut_or_init<F>(&mut self, f: F) -> &mut T
|
||||
where
|
||||
F: FnOnce() -> T,
|
||||
{
|
||||
match self.get_mut_or_try_init(|| Ok::<T, !>(f())) {
|
||||
Ok(val) => val,
|
||||
}
|
||||
}
|
||||
|
||||
/// Gets the contents of the cell, initializing it to `f()` if
|
||||
/// the cell was uninitialized. If the cell was uninitialized
|
||||
/// and `f()` failed, an error is returned.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// If `f()` panics, the panic is propagated to the caller, and
|
||||
/// the cell remains uninitialized.
|
||||
///
|
||||
/// It is an error to reentrantly initialize the cell from `f`.
|
||||
/// The exact outcome is unspecified. Current implementation
|
||||
/// deadlocks, but this may be changed to a panic in the future.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// #![feature(once_cell_try)]
|
||||
///
|
||||
/// use std::sync::OnceLock;
|
||||
///
|
||||
/// let cell = OnceLock::new();
|
||||
/// assert_eq!(cell.get_or_try_init(|| Err(())), Err(()));
|
||||
/// assert!(cell.get().is_none());
|
||||
/// let value = cell.get_or_try_init(|| -> Result<i32, ()> {
|
||||
/// Ok(92)
|
||||
/// });
|
||||
/// assert_eq!(value, Ok(&92));
|
||||
/// assert_eq!(cell.get(), Some(&92))
|
||||
/// ```
|
||||
#[inline]
|
||||
#[unstable(feature = "once_cell_try", issue = "109737")]
|
||||
#[rustc_should_not_be_called_on_const_items]
|
||||
pub fn get_or_try_init<F, E>(&self, f: F) -> Result<&T, E>
|
||||
where
|
||||
F: FnOnce() -> Result<T, E>,
|
||||
{
|
||||
// Fast path check
|
||||
// NOTE: We need to perform an acquire on the state in this method
|
||||
// in order to correctly synchronize `LazyLock::force`. This is
|
||||
// currently done by calling `self.get()`, which in turn calls
|
||||
// `self.initialized()`, which in turn performs the acquire.
|
||||
if let Some(value) = self.get() {
|
||||
return Ok(value);
|
||||
}
|
||||
self.initialize(f)?;
|
||||
|
||||
// SAFETY: The inner value has been initialized
|
||||
Ok(unsafe { self.get_unchecked() })
|
||||
}
|
||||
|
||||
/// Gets the mutable reference of the contents of the cell, initializing
|
||||
/// it to `f()` if the cell was uninitialized. If the cell was uninitialized
|
||||
/// and `f()` failed, an error is returned.
|
||||
///
|
||||
/// This method never blocks. Since it borrows the `OnceLock` mutably,
|
||||
/// it is statically guaranteed that no active borrows to the `OnceLock`
|
||||
/// exist, including from other threads.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// If `f()` panics, the panic is propagated to the caller, and
|
||||
/// the cell remains uninitialized.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// #![feature(once_cell_get_mut)]
|
||||
///
|
||||
/// use std::sync::OnceLock;
|
||||
///
|
||||
/// let mut cell: OnceLock<u32> = OnceLock::new();
|
||||
///
|
||||
/// // Failed attempts to initialize the cell do not change its contents
|
||||
/// assert!(cell.get_mut_or_try_init(|| "not a number!".parse()).is_err());
|
||||
/// assert!(cell.get().is_none());
|
||||
///
|
||||
/// let value = cell.get_mut_or_try_init(|| "1234".parse());
|
||||
/// assert_eq!(value, Ok(&mut 1234));
|
||||
/// *value.unwrap() += 2;
|
||||
/// assert_eq!(cell.get(), Some(&1236))
|
||||
/// ```
|
||||
#[inline]
|
||||
#[unstable(feature = "once_cell_get_mut", issue = "121641")]
|
||||
pub fn get_mut_or_try_init<F, E>(&mut self, f: F) -> Result<&mut T, E>
|
||||
where
|
||||
F: FnOnce() -> Result<T, E>,
|
||||
{
|
||||
if self.get_mut().is_none() {
|
||||
self.initialize(f)?;
|
||||
}
|
||||
|
||||
// SAFETY: The inner value has been initialized
|
||||
Ok(unsafe { self.get_unchecked_mut() })
|
||||
}
|
||||
|
||||
/// Consumes the `OnceLock`, returning the wrapped value. Returns
|
||||
/// `None` if the cell was uninitialized.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use std::sync::OnceLock;
|
||||
///
|
||||
/// let cell: OnceLock<String> = OnceLock::new();
|
||||
/// assert_eq!(cell.into_inner(), None);
|
||||
///
|
||||
/// let cell = OnceLock::new();
|
||||
/// cell.set("hello".to_string()).unwrap();
|
||||
/// assert_eq!(cell.into_inner(), Some("hello".to_string()));
|
||||
/// ```
|
||||
#[inline]
|
||||
#[stable(feature = "once_cell", since = "1.70.0")]
|
||||
pub fn into_inner(mut self) -> Option<T> {
|
||||
self.take()
|
||||
}
|
||||
|
||||
/// Takes the value out of this `OnceLock`, moving it back to an uninitialized state.
|
||||
///
|
||||
/// Has no effect and returns `None` if the `OnceLock` was uninitialized.
|
||||
///
|
||||
/// Since this method borrows the `OnceLock` mutably, it is statically guaranteed that
|
||||
/// no active borrows to the `OnceLock` exist, including from other threads.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use std::sync::OnceLock;
|
||||
///
|
||||
/// let mut cell: OnceLock<String> = OnceLock::new();
|
||||
/// assert_eq!(cell.take(), None);
|
||||
///
|
||||
/// let mut cell = OnceLock::new();
|
||||
/// cell.set("hello".to_string()).unwrap();
|
||||
/// assert_eq!(cell.take(), Some("hello".to_string()));
|
||||
/// assert_eq!(cell.get(), None);
|
||||
/// ```
|
||||
#[inline]
|
||||
#[stable(feature = "once_cell", since = "1.70.0")]
|
||||
pub fn take(&mut self) -> Option<T> {
|
||||
if self.initialized_mut() {
|
||||
self.once = Once::new();
|
||||
// SAFETY: `self.value` is initialized and contains a valid `T`.
|
||||
// `self.once` is reset, so `initialized()` will be false again
|
||||
// which prevents the value from being read twice.
|
||||
unsafe { Some(self.value.get_mut().assume_init_read()) }
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn initialized(&self) -> bool {
|
||||
self.once.is_completed()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn initialized_mut(&mut self) -> bool {
|
||||
// `state()` does not perform an atomic load, so prefer it over `is_complete()`.
|
||||
let state = self.once.state();
|
||||
match state {
|
||||
OnceExclusiveState::Complete => true,
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
|
||||
#[cold]
|
||||
#[optimize(size)]
|
||||
fn initialize<F, E>(&self, f: F) -> Result<(), E>
|
||||
where
|
||||
F: FnOnce() -> Result<T, E>,
|
||||
{
|
||||
let mut res: Result<(), E> = Ok(());
|
||||
let slot = &self.value;
|
||||
|
||||
// Ignore poisoning from other threads
|
||||
// If another thread panics, then we'll be able to run our closure
|
||||
self.once.call_once_force(|p| {
|
||||
match f() {
|
||||
Ok(value) => {
|
||||
unsafe { (&mut *slot.get()).write(value) };
|
||||
}
|
||||
Err(e) => {
|
||||
res = Err(e);
|
||||
|
||||
// Treat the underlying `Once` as poisoned since we
|
||||
// failed to initialize our value.
|
||||
p.poison();
|
||||
}
|
||||
}
|
||||
});
|
||||
res
|
||||
}
|
||||
|
||||
/// # Safety
|
||||
///
|
||||
/// The cell must be initialized
|
||||
#[inline]
|
||||
unsafe fn get_unchecked(&self) -> &T {
|
||||
debug_assert!(self.initialized());
|
||||
unsafe { (&*self.value.get()).assume_init_ref() }
|
||||
}
|
||||
|
||||
/// # Safety
|
||||
///
|
||||
/// The cell must be initialized
|
||||
#[inline]
|
||||
unsafe fn get_unchecked_mut(&mut self) -> &mut T {
|
||||
debug_assert!(self.initialized_mut());
|
||||
unsafe { self.value.get_mut().assume_init_mut() }
|
||||
}
|
||||
}
|
||||
|
||||
// Why do we need `T: Send`?
|
||||
// Thread A creates a `OnceLock` and shares it with
|
||||
// scoped thread B, which fills the cell, which is
|
||||
// then destroyed by A. That is, destructor observes
|
||||
// a sent value.
|
||||
#[stable(feature = "once_cell", since = "1.70.0")]
|
||||
unsafe impl<T: Sync + Send> Sync for OnceLock<T> {}
|
||||
#[stable(feature = "once_cell", since = "1.70.0")]
|
||||
unsafe impl<T: Send> Send for OnceLock<T> {}
|
||||
|
||||
#[stable(feature = "once_cell", since = "1.70.0")]
|
||||
impl<T: RefUnwindSafe + UnwindSafe> RefUnwindSafe for OnceLock<T> {}
|
||||
#[stable(feature = "once_cell", since = "1.70.0")]
|
||||
impl<T: UnwindSafe> UnwindSafe for OnceLock<T> {}
|
||||
|
||||
#[stable(feature = "once_cell", since = "1.70.0")]
|
||||
#[rustc_const_unstable(feature = "const_default", issue = "143894")]
|
||||
impl<T> const Default for OnceLock<T> {
|
||||
/// Creates a new uninitialized cell.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```
|
||||
/// use std::sync::OnceLock;
|
||||
///
|
||||
/// fn main() {
|
||||
/// assert_eq!(OnceLock::<()>::new(), OnceLock::default());
|
||||
/// }
|
||||
/// ```
|
||||
#[inline]
|
||||
fn default() -> OnceLock<T> {
|
||||
OnceLock::new()
|
||||
}
|
||||
}
|
||||
|
||||
#[stable(feature = "once_cell", since = "1.70.0")]
|
||||
impl<T: fmt::Debug> fmt::Debug for OnceLock<T> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
let mut d = f.debug_tuple("OnceLock");
|
||||
match self.get() {
|
||||
Some(v) => d.field(v),
|
||||
None => d.field(&format_args!("<uninit>")),
|
||||
};
|
||||
d.finish()
|
||||
}
|
||||
}
|
||||
|
||||
#[stable(feature = "once_cell", since = "1.70.0")]
|
||||
impl<T: Clone> Clone for OnceLock<T> {
|
||||
#[inline]
|
||||
fn clone(&self) -> OnceLock<T> {
|
||||
let cell = Self::new();
|
||||
if let Some(value) = self.get() {
|
||||
match cell.set(value.clone()) {
|
||||
Ok(()) => (),
|
||||
Err(_) => unreachable!(),
|
||||
}
|
||||
}
|
||||
cell
|
||||
}
|
||||
}
|
||||
|
||||
#[stable(feature = "once_cell", since = "1.70.0")]
|
||||
impl<T> From<T> for OnceLock<T> {
|
||||
/// Creates a new cell with its contents set to `value`.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```
|
||||
/// use std::sync::OnceLock;
|
||||
///
|
||||
/// # fn main() -> Result<(), i32> {
|
||||
/// let a = OnceLock::from(3);
|
||||
/// let b = OnceLock::new();
|
||||
/// b.set(3)?;
|
||||
/// assert_eq!(a, b);
|
||||
/// Ok(())
|
||||
/// # }
|
||||
/// ```
|
||||
#[inline]
|
||||
fn from(value: T) -> Self {
|
||||
let cell = Self::new();
|
||||
match cell.set(value) {
|
||||
Ok(()) => cell,
|
||||
Err(_) => unreachable!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[stable(feature = "once_cell", since = "1.70.0")]
|
||||
impl<T: PartialEq> PartialEq for OnceLock<T> {
|
||||
/// Equality for two `OnceLock`s.
|
||||
///
|
||||
/// Two `OnceLock`s are equal if they either both contain values and their
|
||||
/// values are equal, or if neither contains a value.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use std::sync::OnceLock;
|
||||
///
|
||||
/// let five = OnceLock::new();
|
||||
/// five.set(5).unwrap();
|
||||
///
|
||||
/// let also_five = OnceLock::new();
|
||||
/// also_five.set(5).unwrap();
|
||||
///
|
||||
/// assert!(five == also_five);
|
||||
///
|
||||
/// assert!(OnceLock::<u32>::new() == OnceLock::<u32>::new());
|
||||
/// ```
|
||||
#[inline]
|
||||
fn eq(&self, other: &OnceLock<T>) -> bool {
|
||||
self.get() == other.get()
|
||||
}
|
||||
}
|
||||
|
||||
#[stable(feature = "once_cell", since = "1.70.0")]
|
||||
impl<T: Eq> Eq for OnceLock<T> {}
|
||||
|
||||
#[stable(feature = "once_cell", since = "1.70.0")]
|
||||
unsafe impl<#[may_dangle] T> Drop for OnceLock<T> {
|
||||
#[inline]
|
||||
fn drop(&mut self) {
|
||||
if self.initialized_mut() {
|
||||
// SAFETY: The cell is initialized and being dropped, so it can't
|
||||
// be accessed again. We also don't touch the `T` other than
|
||||
// dropping it, which validates our usage of #[may_dangle].
|
||||
unsafe { self.value.get_mut().assume_init_drop() };
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,389 +0,0 @@
|
||||
//! Synchronization objects that employ poisoning.
|
||||
//!
|
||||
//! # Poisoning
|
||||
//!
|
||||
//! All synchronization objects in this module implement a strategy called
|
||||
//! "poisoning" where a primitive becomes poisoned if it recognizes that some
|
||||
//! thread has panicked while holding the exclusive access granted by the
|
||||
//! primitive. This information is then propagated to all other threads
|
||||
//! to signify that the data protected by this primitive is likely tainted
|
||||
//! (some invariant is not being upheld).
|
||||
//!
|
||||
//! The specifics of how this "poisoned" state affects other threads and whether
|
||||
//! the panics are recognized reliably or on a best-effort basis depend on the
|
||||
//! primitive. See [Overview](#overview) below.
|
||||
//!
|
||||
//! The synchronization objects in this module have alternative implementations that do not employ
|
||||
//! poisoning in the [`std::sync::nonpoison`] module.
|
||||
//!
|
||||
//! [`std::sync::nonpoison`]: crate::sync::nonpoison
|
||||
//!
|
||||
//! # Overview
|
||||
//!
|
||||
//! Below is a list of synchronization objects provided by this module
|
||||
//! with a high-level overview for each object and a description
|
||||
//! of how it employs "poisoning".
|
||||
//!
|
||||
//! - [`Condvar`]: Condition Variable, providing the ability to block
|
||||
//! a thread while waiting for an event to occur.
|
||||
//!
|
||||
//! Condition variables are typically associated with
|
||||
//! a boolean predicate (a condition) and a mutex.
|
||||
//! This implementation is associated with [`poison::Mutex`](Mutex),
|
||||
//! which employs poisoning.
|
||||
//! For this reason, [`Condvar::wait()`] will return a [`LockResult`],
|
||||
//! just like [`poison::Mutex::lock()`](Mutex::lock) does.
|
||||
//!
|
||||
//! - [`Mutex`]: Mutual Exclusion mechanism, which ensures that at
|
||||
//! most one thread at a time is able to access some data.
|
||||
//!
|
||||
//! Panicking while holding the lock typically poisons the mutex, but it is
|
||||
//! not guaranteed to detect this condition in all circumstances.
|
||||
//! [`Mutex::lock()`] returns a [`LockResult`], providing a way to deal with
|
||||
//! the poisoned state. See [`Mutex`'s documentation](Mutex#poisoning) for more.
|
||||
//!
|
||||
//! - [`RwLock`]: Provides a mutual exclusion mechanism which allows
|
||||
//! multiple readers at the same time, while allowing only one
|
||||
//! writer at a time. In some cases, this can be more efficient than
|
||||
//! a mutex.
|
||||
//!
|
||||
//! This implementation, like [`Mutex`], usually becomes poisoned on a panic.
|
||||
//! Note, however, that an `RwLock` may only be poisoned if a panic occurs
|
||||
//! while it is locked exclusively (write mode). If a panic occurs in any reader,
|
||||
//! then the lock will not be poisoned.
|
||||
//!
|
||||
//! Note that the [`Once`] type also employs poisoning, but since it has non-poisoning `force`
|
||||
//! methods available on it, there is no separate `nonpoison` and `poison` version.
|
||||
//!
|
||||
//! [`Once`]: crate::sync::Once
|
||||
|
||||
// If we are not unwinding, `PoisonError` is uninhabited.
|
||||
#![cfg_attr(not(panic = "unwind"), expect(unreachable_code))]
|
||||
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
pub use self::condvar::Condvar;
|
||||
#[unstable(feature = "mapped_lock_guards", issue = "117108")]
|
||||
pub use self::mutex::MappedMutexGuard;
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
pub use self::mutex::{Mutex, MutexGuard};
|
||||
#[unstable(feature = "mapped_lock_guards", issue = "117108")]
|
||||
pub use self::rwlock::{MappedRwLockReadGuard, MappedRwLockWriteGuard};
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
pub use self::rwlock::{RwLock, RwLockReadGuard, RwLockWriteGuard};
|
||||
use crate::error::Error;
|
||||
use crate::fmt;
|
||||
#[cfg(panic = "unwind")]
|
||||
use crate::sync::atomic::{Atomic, AtomicBool, Ordering};
|
||||
#[cfg(panic = "unwind")]
|
||||
use crate::thread;
|
||||
|
||||
mod condvar;
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
mod mutex;
|
||||
mod rwlock;
|
||||
|
||||
pub(crate) struct Flag {
|
||||
#[cfg(panic = "unwind")]
|
||||
failed: Atomic<bool>,
|
||||
}
|
||||
|
||||
// Note that the Ordering uses to access the `failed` field of `Flag` below is
|
||||
// always `Relaxed`, and that's because this isn't actually protecting any data,
|
||||
// it's just a flag whether we've panicked or not.
|
||||
//
|
||||
// The actual location that this matters is when a mutex is **locked** which is
|
||||
// where we have external synchronization ensuring that we see memory
|
||||
// reads/writes to this flag.
|
||||
//
|
||||
// As a result, if it matters, we should see the correct value for `failed` in
|
||||
// all cases.
|
||||
|
||||
impl Flag {
|
||||
#[inline]
|
||||
pub const fn new() -> Flag {
|
||||
Flag {
|
||||
#[cfg(panic = "unwind")]
|
||||
failed: AtomicBool::new(false),
|
||||
}
|
||||
}
|
||||
|
||||
/// Checks the flag for an unguarded borrow, where we only care about existing poison.
|
||||
#[inline]
|
||||
pub fn borrow(&self) -> LockResult<()> {
|
||||
if self.get() { Err(PoisonError::new(())) } else { Ok(()) }
|
||||
}
|
||||
|
||||
/// Checks the flag for a guarded borrow, where we may also set poison when `done`.
|
||||
#[inline]
|
||||
pub fn guard(&self) -> LockResult<Guard> {
|
||||
let ret = Guard {
|
||||
#[cfg(panic = "unwind")]
|
||||
panicking: thread::panicking(),
|
||||
};
|
||||
if self.get() { Err(PoisonError::new(ret)) } else { Ok(ret) }
|
||||
}
|
||||
|
||||
#[inline]
|
||||
#[cfg(panic = "unwind")]
|
||||
pub fn done(&self, guard: &Guard) {
|
||||
if !guard.panicking && thread::panicking() {
|
||||
self.failed.store(true, Ordering::Relaxed);
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
#[cfg(not(panic = "unwind"))]
|
||||
pub fn done(&self, _guard: &Guard) {}
|
||||
|
||||
#[inline]
|
||||
#[cfg(panic = "unwind")]
|
||||
pub fn get(&self) -> bool {
|
||||
self.failed.load(Ordering::Relaxed)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
#[cfg(not(panic = "unwind"))]
|
||||
pub fn get(&self) -> bool {
|
||||
false
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn clear(&self) {
|
||||
#[cfg(panic = "unwind")]
|
||||
self.failed.store(false, Ordering::Relaxed)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct Guard {
|
||||
#[cfg(panic = "unwind")]
|
||||
panicking: bool,
|
||||
}
|
||||
|
||||
/// A type of error which can be returned whenever a lock is acquired.
|
||||
///
|
||||
/// Both [`Mutex`]es and [`RwLock`]s are poisoned whenever a thread fails while the lock
|
||||
/// is held. The precise semantics for when a lock is poisoned is documented on
|
||||
/// each lock. For a lock in the poisoned state, unless the state is cleared manually,
|
||||
/// all future acquisitions will return this error.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use std::sync::{Arc, Mutex};
|
||||
/// use std::thread;
|
||||
///
|
||||
/// let mutex = Arc::new(Mutex::new(1));
|
||||
///
|
||||
/// // poison the mutex
|
||||
/// let c_mutex = Arc::clone(&mutex);
|
||||
/// let _ = thread::spawn(move || {
|
||||
/// let mut data = c_mutex.lock().unwrap();
|
||||
/// *data = 2;
|
||||
/// panic!();
|
||||
/// }).join();
|
||||
///
|
||||
/// match mutex.lock() {
|
||||
/// Ok(_) => unreachable!(),
|
||||
/// Err(p_err) => {
|
||||
/// let data = p_err.get_ref();
|
||||
/// println!("recovered: {data}");
|
||||
/// }
|
||||
/// };
|
||||
/// ```
|
||||
/// [`Mutex`]: crate::sync::Mutex
|
||||
/// [`RwLock`]: crate::sync::RwLock
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
pub struct PoisonError<T> {
|
||||
data: T,
|
||||
#[cfg(not(panic = "unwind"))]
|
||||
_never: !,
|
||||
}
|
||||
|
||||
/// An enumeration of possible errors associated with a [`TryLockResult`] which
|
||||
/// can occur while trying to acquire a lock, from the [`try_lock`] method on a
|
||||
/// [`Mutex`] or the [`try_read`] and [`try_write`] methods on an [`RwLock`].
|
||||
///
|
||||
/// [`try_lock`]: crate::sync::Mutex::try_lock
|
||||
/// [`try_read`]: crate::sync::RwLock::try_read
|
||||
/// [`try_write`]: crate::sync::RwLock::try_write
|
||||
/// [`Mutex`]: crate::sync::Mutex
|
||||
/// [`RwLock`]: crate::sync::RwLock
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
pub enum TryLockError<T> {
|
||||
/// The lock could not be acquired because another thread failed while holding
|
||||
/// the lock.
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
Poisoned(#[stable(feature = "rust1", since = "1.0.0")] PoisonError<T>),
|
||||
/// The lock could not be acquired at this time because the operation would
|
||||
/// otherwise block.
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
WouldBlock,
|
||||
}
|
||||
|
||||
/// A type alias for the result of a lock method which can be poisoned.
|
||||
///
|
||||
/// The [`Ok`] variant of this result indicates that the primitive was not
|
||||
/// poisoned, and the operation result is contained within. The [`Err`] variant indicates
|
||||
/// that the primitive was poisoned. Note that the [`Err`] variant *also* carries
|
||||
/// an associated value assigned by the lock method, and it can be acquired through the
|
||||
/// [`into_inner`] method. The semantics of the associated value depends on the corresponding
|
||||
/// lock method.
|
||||
///
|
||||
/// [`into_inner`]: PoisonError::into_inner
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
pub type LockResult<T> = Result<T, PoisonError<T>>;
|
||||
|
||||
/// A type alias for the result of a nonblocking locking method.
|
||||
///
|
||||
/// For more information, see [`LockResult`]. A `TryLockResult` doesn't
|
||||
/// necessarily hold the associated guard in the [`Err`] type as the lock might not
|
||||
/// have been acquired for other reasons.
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
pub type TryLockResult<Guard> = Result<Guard, TryLockError<Guard>>;
|
||||
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
impl<T> fmt::Debug for PoisonError<T> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.debug_struct("PoisonError").finish_non_exhaustive()
|
||||
}
|
||||
}
|
||||
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
impl<T> fmt::Display for PoisonError<T> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
"poisoned lock: another task failed inside".fmt(f)
|
||||
}
|
||||
}
|
||||
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
impl<T> Error for PoisonError<T> {}
|
||||
|
||||
impl<T> PoisonError<T> {
|
||||
/// Creates a `PoisonError`.
|
||||
///
|
||||
/// This is generally created by methods like [`Mutex::lock`](crate::sync::Mutex::lock)
|
||||
/// or [`RwLock::read`](crate::sync::RwLock::read).
|
||||
///
|
||||
/// This method may panic if std was built with `panic="abort"`.
|
||||
#[cfg(panic = "unwind")]
|
||||
#[stable(feature = "sync_poison", since = "1.2.0")]
|
||||
pub fn new(data: T) -> PoisonError<T> {
|
||||
PoisonError { data }
|
||||
}
|
||||
|
||||
/// Creates a `PoisonError`.
|
||||
///
|
||||
/// This is generally created by methods like [`Mutex::lock`](crate::sync::Mutex::lock)
|
||||
/// or [`RwLock::read`](crate::sync::RwLock::read).
|
||||
///
|
||||
/// This method may panic if std was built with `panic="abort"`.
|
||||
#[cfg(not(panic = "unwind"))]
|
||||
#[stable(feature = "sync_poison", since = "1.2.0")]
|
||||
#[track_caller]
|
||||
pub fn new(_data: T) -> PoisonError<T> {
|
||||
panic!("PoisonError created in a libstd built with panic=\"abort\"")
|
||||
}
|
||||
|
||||
/// Consumes this error indicating that a lock is poisoned, returning the
|
||||
/// associated data.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use std::collections::HashSet;
|
||||
/// use std::sync::{Arc, Mutex};
|
||||
/// use std::thread;
|
||||
///
|
||||
/// let mutex = Arc::new(Mutex::new(HashSet::new()));
|
||||
///
|
||||
/// // poison the mutex
|
||||
/// let c_mutex = Arc::clone(&mutex);
|
||||
/// let _ = thread::spawn(move || {
|
||||
/// let mut data = c_mutex.lock().unwrap();
|
||||
/// data.insert(10);
|
||||
/// panic!();
|
||||
/// }).join();
|
||||
///
|
||||
/// let p_err = mutex.lock().unwrap_err();
|
||||
/// let data = p_err.into_inner();
|
||||
/// println!("recovered {} items", data.len());
|
||||
/// ```
|
||||
#[stable(feature = "sync_poison", since = "1.2.0")]
|
||||
pub fn into_inner(self) -> T {
|
||||
self.data
|
||||
}
|
||||
|
||||
/// Reaches into this error indicating that a lock is poisoned, returning a
|
||||
/// reference to the associated data.
|
||||
#[stable(feature = "sync_poison", since = "1.2.0")]
|
||||
pub fn get_ref(&self) -> &T {
|
||||
&self.data
|
||||
}
|
||||
|
||||
/// Reaches into this error indicating that a lock is poisoned, returning a
|
||||
/// mutable reference to the associated data.
|
||||
#[stable(feature = "sync_poison", since = "1.2.0")]
|
||||
pub fn get_mut(&mut self) -> &mut T {
|
||||
&mut self.data
|
||||
}
|
||||
}
|
||||
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
impl<T> From<PoisonError<T>> for TryLockError<T> {
|
||||
fn from(err: PoisonError<T>) -> TryLockError<T> {
|
||||
TryLockError::Poisoned(err)
|
||||
}
|
||||
}
|
||||
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
impl<T> fmt::Debug for TryLockError<T> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match *self {
|
||||
#[cfg(panic = "unwind")]
|
||||
TryLockError::Poisoned(..) => "Poisoned(..)".fmt(f),
|
||||
#[cfg(not(panic = "unwind"))]
|
||||
TryLockError::Poisoned(ref p) => match p._never {},
|
||||
TryLockError::WouldBlock => "WouldBlock".fmt(f),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
impl<T> fmt::Display for TryLockError<T> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match *self {
|
||||
#[cfg(panic = "unwind")]
|
||||
TryLockError::Poisoned(..) => "poisoned lock: another task failed inside",
|
||||
#[cfg(not(panic = "unwind"))]
|
||||
TryLockError::Poisoned(ref p) => match p._never {},
|
||||
TryLockError::WouldBlock => "try_lock failed because the operation would block",
|
||||
}
|
||||
.fmt(f)
|
||||
}
|
||||
}
|
||||
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
impl<T> Error for TryLockError<T> {
|
||||
#[allow(deprecated)]
|
||||
fn cause(&self) -> Option<&dyn Error> {
|
||||
match *self {
|
||||
#[cfg(panic = "unwind")]
|
||||
TryLockError::Poisoned(ref p) => Some(p),
|
||||
#[cfg(not(panic = "unwind"))]
|
||||
TryLockError::Poisoned(ref p) => match p._never {},
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn map_result<T, U, F>(result: LockResult<T>, f: F) -> LockResult<U>
|
||||
where
|
||||
F: FnOnce(T) -> U,
|
||||
{
|
||||
match result {
|
||||
Ok(t) => Ok(f(t)),
|
||||
#[cfg(panic = "unwind")]
|
||||
Err(PoisonError { data }) => Err(PoisonError::new(f(data))),
|
||||
}
|
||||
}
|
||||
@@ -1,510 +0,0 @@
|
||||
use crate::fmt;
|
||||
use crate::sync::WaitTimeoutResult;
|
||||
use crate::sync::poison::{self, LockResult, MutexGuard, PoisonError, mutex};
|
||||
use crate::sys::sync as sys;
|
||||
use crate::time::{Duration, Instant};
|
||||
|
||||
/// A Condition Variable
|
||||
///
|
||||
/// Condition variables represent the ability to block a thread such that it
|
||||
/// consumes no CPU time while waiting for an event to occur. Condition
|
||||
/// variables are typically associated with a boolean predicate (a condition)
|
||||
/// and a mutex. The predicate is always verified inside of the mutex before
|
||||
/// determining that a thread must block.
|
||||
///
|
||||
/// Functions in this module will block the current **thread** of execution.
|
||||
/// Note that any attempt to use multiple mutexes on the same condition
|
||||
/// variable may result in a runtime panic.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use std::sync::{Arc, Mutex, Condvar};
|
||||
/// use std::thread;
|
||||
///
|
||||
/// let pair = Arc::new((Mutex::new(false), Condvar::new()));
|
||||
/// let pair2 = Arc::clone(&pair);
|
||||
///
|
||||
/// // Inside of our lock, spawn a new thread, and then wait for it to start.
|
||||
/// thread::spawn(move || {
|
||||
/// let (lock, cvar) = &*pair2;
|
||||
/// let mut started = lock.lock().unwrap();
|
||||
/// *started = true;
|
||||
/// // We notify the condvar that the value has changed.
|
||||
/// cvar.notify_one();
|
||||
/// });
|
||||
///
|
||||
/// // Wait for the thread to start up.
|
||||
/// let (lock, cvar) = &*pair;
|
||||
/// let mut started = lock.lock().unwrap();
|
||||
/// while !*started {
|
||||
/// started = cvar.wait(started).unwrap();
|
||||
/// }
|
||||
/// ```
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
pub struct Condvar {
|
||||
inner: sys::Condvar,
|
||||
}
|
||||
|
||||
impl Condvar {
|
||||
/// Creates a new condition variable which is ready to be waited on and
|
||||
/// notified.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use std::sync::Condvar;
|
||||
///
|
||||
/// let condvar = Condvar::new();
|
||||
/// ```
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
#[rustc_const_stable(feature = "const_locks", since = "1.63.0")]
|
||||
#[must_use]
|
||||
#[inline]
|
||||
pub const fn new() -> Condvar {
|
||||
Condvar { inner: sys::Condvar::new() }
|
||||
}
|
||||
|
||||
/// Blocks the current thread until this condition variable receives a
|
||||
/// notification.
|
||||
///
|
||||
/// This function will atomically unlock the mutex specified (represented by
|
||||
/// `guard`) and block the current thread. This means that any calls
|
||||
/// to [`notify_one`] or [`notify_all`] which happen logically after the
|
||||
/// mutex is unlocked are candidates to wake this thread up. When this
|
||||
/// function call returns, the lock specified will have been re-acquired.
|
||||
///
|
||||
/// Note that this function is susceptible to spurious wakeups. Condition
|
||||
/// variables normally have a boolean predicate associated with them, and
|
||||
/// the predicate must always be checked each time this function returns to
|
||||
/// protect against spurious wakeups.
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// This function will return an error if the mutex being waited on is
|
||||
/// poisoned when this thread re-acquires the lock. For more information,
|
||||
/// see information about [poisoning] on the [`Mutex`] type.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This function may [`panic!`] if it is used with more than one mutex
|
||||
/// over time.
|
||||
///
|
||||
/// [`notify_one`]: Self::notify_one
|
||||
/// [`notify_all`]: Self::notify_all
|
||||
/// [poisoning]: super::Mutex#poisoning
|
||||
/// [`Mutex`]: super::Mutex
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use std::sync::{Arc, Mutex, Condvar};
|
||||
/// use std::thread;
|
||||
///
|
||||
/// let pair = Arc::new((Mutex::new(false), Condvar::new()));
|
||||
/// let pair2 = Arc::clone(&pair);
|
||||
///
|
||||
/// thread::spawn(move || {
|
||||
/// let (lock, cvar) = &*pair2;
|
||||
/// let mut started = lock.lock().unwrap();
|
||||
/// *started = true;
|
||||
/// // We notify the condvar that the value has changed.
|
||||
/// cvar.notify_one();
|
||||
/// });
|
||||
///
|
||||
/// // Wait for the thread to start up.
|
||||
/// let (lock, cvar) = &*pair;
|
||||
/// let mut started = lock.lock().unwrap();
|
||||
/// // As long as the value inside the `Mutex<bool>` is `false`, we wait.
|
||||
/// while !*started {
|
||||
/// started = cvar.wait(started).unwrap();
|
||||
/// }
|
||||
/// ```
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
#[rustc_should_not_be_called_on_const_items]
|
||||
pub fn wait<'a, T>(&self, guard: MutexGuard<'a, T>) -> LockResult<MutexGuard<'a, T>> {
|
||||
let poisoned = unsafe {
|
||||
let lock = mutex::guard_lock(&guard);
|
||||
self.inner.wait(lock);
|
||||
mutex::guard_poison(&guard).get()
|
||||
};
|
||||
if poisoned { Err(PoisonError::new(guard)) } else { Ok(guard) }
|
||||
}
|
||||
|
||||
/// Blocks the current thread until the provided condition becomes false.
|
||||
///
|
||||
/// `condition` is checked immediately; if not met (returns `true`), this
|
||||
/// will [`wait`] for the next notification then check again. This repeats
|
||||
/// until `condition` returns `false`, in which case this function returns.
|
||||
///
|
||||
/// This function will atomically unlock the mutex specified (represented by
|
||||
/// `guard`) and block the current thread. This means that any calls
|
||||
/// to [`notify_one`] or [`notify_all`] which happen logically after the
|
||||
/// mutex is unlocked are candidates to wake this thread up. When this
|
||||
/// function call returns, the lock specified will have been re-acquired.
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// This function will return an error if the mutex being waited on is
|
||||
/// poisoned when this thread re-acquires the lock. For more information,
|
||||
/// see information about [poisoning] on the [`Mutex`] type.
|
||||
///
|
||||
/// [`wait`]: Self::wait
|
||||
/// [`notify_one`]: Self::notify_one
|
||||
/// [`notify_all`]: Self::notify_all
|
||||
/// [poisoning]: super::Mutex#poisoning
|
||||
/// [`Mutex`]: super::Mutex
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use std::sync::{Arc, Mutex, Condvar};
|
||||
/// use std::thread;
|
||||
///
|
||||
/// let pair = Arc::new((Mutex::new(true), Condvar::new()));
|
||||
/// let pair2 = Arc::clone(&pair);
|
||||
///
|
||||
/// thread::spawn(move || {
|
||||
/// let (lock, cvar) = &*pair2;
|
||||
/// let mut pending = lock.lock().unwrap();
|
||||
/// *pending = false;
|
||||
/// // We notify the condvar that the value has changed.
|
||||
/// cvar.notify_one();
|
||||
/// });
|
||||
///
|
||||
/// // Wait for the thread to start up.
|
||||
/// let (lock, cvar) = &*pair;
|
||||
/// // As long as the value inside the `Mutex<bool>` is `true`, we wait.
|
||||
/// let _guard = cvar.wait_while(lock.lock().unwrap(), |pending| { *pending }).unwrap();
|
||||
/// ```
|
||||
#[stable(feature = "wait_until", since = "1.42.0")]
|
||||
#[rustc_should_not_be_called_on_const_items]
|
||||
pub fn wait_while<'a, T, F>(
|
||||
&self,
|
||||
mut guard: MutexGuard<'a, T>,
|
||||
mut condition: F,
|
||||
) -> LockResult<MutexGuard<'a, T>>
|
||||
where
|
||||
F: FnMut(&mut T) -> bool,
|
||||
{
|
||||
while condition(&mut *guard) {
|
||||
guard = self.wait(guard)?;
|
||||
}
|
||||
Ok(guard)
|
||||
}
|
||||
|
||||
/// Waits on this condition variable for a notification, timing out after a
|
||||
/// specified duration.
|
||||
///
|
||||
/// The semantics of this function are equivalent to [`wait`]
|
||||
/// except that the thread will be blocked for roughly no longer
|
||||
/// than `ms` milliseconds. This method should not be used for
|
||||
/// precise timing due to anomalies such as preemption or platform
|
||||
/// differences that might not cause the maximum amount of time
|
||||
/// waited to be precisely `ms`.
|
||||
///
|
||||
/// Note that the best effort is made to ensure that the time waited is
|
||||
/// measured with a monotonic clock, and not affected by the changes made to
|
||||
/// the system time.
|
||||
///
|
||||
/// The returned boolean is `false` only if the timeout is known
|
||||
/// to have elapsed.
|
||||
///
|
||||
/// Like [`wait`], the lock specified will be re-acquired when this function
|
||||
/// returns, regardless of whether the timeout elapsed or not.
|
||||
///
|
||||
/// [`wait`]: Self::wait
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use std::sync::{Arc, Mutex, Condvar};
|
||||
/// use std::thread;
|
||||
///
|
||||
/// let pair = Arc::new((Mutex::new(false), Condvar::new()));
|
||||
/// let pair2 = Arc::clone(&pair);
|
||||
///
|
||||
/// thread::spawn(move || {
|
||||
/// let (lock, cvar) = &*pair2;
|
||||
/// let mut started = lock.lock().unwrap();
|
||||
/// *started = true;
|
||||
/// // We notify the condvar that the value has changed.
|
||||
/// cvar.notify_one();
|
||||
/// });
|
||||
///
|
||||
/// // Wait for the thread to start up.
|
||||
/// let (lock, cvar) = &*pair;
|
||||
/// let mut started = lock.lock().unwrap();
|
||||
/// // As long as the value inside the `Mutex<bool>` is `false`, we wait.
|
||||
/// loop {
|
||||
/// let result = cvar.wait_timeout_ms(started, 10).unwrap();
|
||||
/// // 10 milliseconds have passed, or maybe the value changed!
|
||||
/// started = result.0;
|
||||
/// if *started == true {
|
||||
/// // We received the notification and the value has been updated, we can leave.
|
||||
/// break
|
||||
/// }
|
||||
/// }
|
||||
/// ```
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
#[rustc_should_not_be_called_on_const_items]
|
||||
#[deprecated(since = "1.6.0", note = "replaced by `std::sync::Condvar::wait_timeout`")]
|
||||
pub fn wait_timeout_ms<'a, T>(
|
||||
&self,
|
||||
guard: MutexGuard<'a, T>,
|
||||
ms: u32,
|
||||
) -> LockResult<(MutexGuard<'a, T>, bool)> {
|
||||
let res = self.wait_timeout(guard, Duration::from_millis(ms as u64));
|
||||
poison::map_result(res, |(a, b)| (a, !b.timed_out()))
|
||||
}
|
||||
|
||||
/// Waits on this condition variable for a notification, timing out after a
|
||||
/// specified duration.
|
||||
///
|
||||
/// The semantics of this function are equivalent to [`wait`] except that
|
||||
/// the thread will be blocked for roughly no longer than `dur`. This
|
||||
/// method should not be used for precise timing due to anomalies such as
|
||||
/// preemption or platform differences that might not cause the maximum
|
||||
/// amount of time waited to be precisely `dur`.
|
||||
///
|
||||
/// Note that the best effort is made to ensure that the time waited is
|
||||
/// measured with a monotonic clock, and not affected by the changes made to
|
||||
/// the system time. This function is susceptible to spurious wakeups.
|
||||
/// Condition variables normally have a boolean predicate associated with
|
||||
/// them, and the predicate must always be checked each time this function
|
||||
/// returns to protect against spurious wakeups. Furthermore, since the timeout
|
||||
/// is given relative to the moment this function is called, it needs to be adjusted
|
||||
/// when this function is called in a loop. The [`wait_timeout_while`] method
|
||||
/// lets you wait with a timeout while a predicate is true, taking care of all these concerns.
|
||||
///
|
||||
/// The returned [`WaitTimeoutResult`] value indicates if the timeout is
|
||||
/// known to have elapsed.
|
||||
///
|
||||
/// Like [`wait`], the lock specified will be re-acquired when this function
|
||||
/// returns, regardless of whether the timeout elapsed or not.
|
||||
///
|
||||
/// [`wait`]: Self::wait
|
||||
/// [`wait_timeout_while`]: Self::wait_timeout_while
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use std::sync::{Arc, Mutex, Condvar};
|
||||
/// use std::thread;
|
||||
/// use std::time::Duration;
|
||||
///
|
||||
/// let pair = Arc::new((Mutex::new(false), Condvar::new()));
|
||||
/// let pair2 = Arc::clone(&pair);
|
||||
///
|
||||
/// thread::spawn(move || {
|
||||
/// let (lock, cvar) = &*pair2;
|
||||
/// let mut started = lock.lock().unwrap();
|
||||
/// *started = true;
|
||||
/// // We notify the condvar that the value has changed.
|
||||
/// cvar.notify_one();
|
||||
/// });
|
||||
///
|
||||
/// // wait for the thread to start up
|
||||
/// let (lock, cvar) = &*pair;
|
||||
/// let mut started = lock.lock().unwrap();
|
||||
/// // as long as the value inside the `Mutex<bool>` is `false`, we wait
|
||||
/// loop {
|
||||
/// let result = cvar.wait_timeout(started, Duration::from_millis(10)).unwrap();
|
||||
/// // 10 milliseconds have passed, or maybe the value changed!
|
||||
/// started = result.0;
|
||||
/// if *started == true {
|
||||
/// // We received the notification and the value has been updated, we can leave.
|
||||
/// break
|
||||
/// }
|
||||
/// }
|
||||
/// ```
|
||||
#[stable(feature = "wait_timeout", since = "1.5.0")]
|
||||
#[rustc_should_not_be_called_on_const_items]
|
||||
pub fn wait_timeout<'a, T>(
|
||||
&self,
|
||||
guard: MutexGuard<'a, T>,
|
||||
dur: Duration,
|
||||
) -> LockResult<(MutexGuard<'a, T>, WaitTimeoutResult)> {
|
||||
let (poisoned, result) = unsafe {
|
||||
let lock = mutex::guard_lock(&guard);
|
||||
let success = self.inner.wait_timeout(lock, dur);
|
||||
(mutex::guard_poison(&guard).get(), WaitTimeoutResult(!success))
|
||||
};
|
||||
if poisoned { Err(PoisonError::new((guard, result))) } else { Ok((guard, result)) }
|
||||
}
|
||||
|
||||
/// Waits on this condition variable for a notification, timing out after a
|
||||
/// specified duration.
|
||||
///
|
||||
/// The semantics of this function are equivalent to [`wait_while`] except
|
||||
/// that the thread will be blocked for roughly no longer than `dur`. This
|
||||
/// method should not be used for precise timing due to anomalies such as
|
||||
/// preemption or platform differences that might not cause the maximum
|
||||
/// amount of time waited to be precisely `dur`.
|
||||
///
|
||||
/// Note that the best effort is made to ensure that the time waited is
|
||||
/// measured with a monotonic clock, and not affected by the changes made to
|
||||
/// the system time.
|
||||
///
|
||||
/// The returned [`WaitTimeoutResult`] value indicates if the timeout is
|
||||
/// known to have elapsed without the condition being met.
|
||||
///
|
||||
/// Like [`wait_while`], the lock specified will be re-acquired when this
|
||||
/// function returns, regardless of whether the timeout elapsed or not.
|
||||
///
|
||||
/// [`wait_while`]: Self::wait_while
|
||||
/// [`wait_timeout`]: Self::wait_timeout
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use std::sync::{Arc, Mutex, Condvar};
|
||||
/// use std::thread;
|
||||
/// use std::time::Duration;
|
||||
///
|
||||
/// let pair = Arc::new((Mutex::new(true), Condvar::new()));
|
||||
/// let pair2 = Arc::clone(&pair);
|
||||
///
|
||||
/// thread::spawn(move || {
|
||||
/// let (lock, cvar) = &*pair2;
|
||||
/// let mut pending = lock.lock().unwrap();
|
||||
/// *pending = false;
|
||||
/// // We notify the condvar that the value has changed.
|
||||
/// cvar.notify_one();
|
||||
/// });
|
||||
///
|
||||
/// // wait for the thread to start up
|
||||
/// let (lock, cvar) = &*pair;
|
||||
/// let result = cvar.wait_timeout_while(
|
||||
/// lock.lock().unwrap(),
|
||||
/// Duration::from_millis(100),
|
||||
/// |&mut pending| pending,
|
||||
/// ).unwrap();
|
||||
/// if result.1.timed_out() {
|
||||
/// // timed-out without the condition ever evaluating to false.
|
||||
/// }
|
||||
/// // access the locked mutex via result.0
|
||||
/// ```
|
||||
#[stable(feature = "wait_timeout_until", since = "1.42.0")]
|
||||
#[rustc_should_not_be_called_on_const_items]
|
||||
pub fn wait_timeout_while<'a, T, F>(
|
||||
&self,
|
||||
mut guard: MutexGuard<'a, T>,
|
||||
dur: Duration,
|
||||
mut condition: F,
|
||||
) -> LockResult<(MutexGuard<'a, T>, WaitTimeoutResult)>
|
||||
where
|
||||
F: FnMut(&mut T) -> bool,
|
||||
{
|
||||
let start = Instant::now();
|
||||
loop {
|
||||
if !condition(&mut *guard) {
|
||||
return Ok((guard, WaitTimeoutResult(false)));
|
||||
}
|
||||
let timeout = match dur.checked_sub(start.elapsed()) {
|
||||
Some(timeout) => timeout,
|
||||
None => return Ok((guard, WaitTimeoutResult(true))),
|
||||
};
|
||||
guard = self.wait_timeout(guard, timeout)?.0;
|
||||
}
|
||||
}
|
||||
|
||||
/// Wakes up one blocked thread on this condvar.
|
||||
///
|
||||
/// If there is a blocked thread on this condition variable, then it will
|
||||
/// be woken up from its call to [`wait`] or [`wait_timeout`]. Calls to
|
||||
/// `notify_one` are not buffered in any way.
|
||||
///
|
||||
/// To wake up all threads, see [`notify_all`].
|
||||
///
|
||||
/// [`wait`]: Self::wait
|
||||
/// [`wait_timeout`]: Self::wait_timeout
|
||||
/// [`notify_all`]: Self::notify_all
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use std::sync::{Arc, Mutex, Condvar};
|
||||
/// use std::thread;
|
||||
///
|
||||
/// let pair = Arc::new((Mutex::new(false), Condvar::new()));
|
||||
/// let pair2 = Arc::clone(&pair);
|
||||
///
|
||||
/// thread::spawn(move || {
|
||||
/// let (lock, cvar) = &*pair2;
|
||||
/// let mut started = lock.lock().unwrap();
|
||||
/// *started = true;
|
||||
/// // We notify the condvar that the value has changed.
|
||||
/// cvar.notify_one();
|
||||
/// });
|
||||
///
|
||||
/// // Wait for the thread to start up.
|
||||
/// let (lock, cvar) = &*pair;
|
||||
/// let mut started = lock.lock().unwrap();
|
||||
/// // As long as the value inside the `Mutex<bool>` is `false`, we wait.
|
||||
/// while !*started {
|
||||
/// started = cvar.wait(started).unwrap();
|
||||
/// }
|
||||
/// ```
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
#[rustc_should_not_be_called_on_const_items]
|
||||
pub fn notify_one(&self) {
|
||||
self.inner.notify_one()
|
||||
}
|
||||
|
||||
/// Wakes up all blocked threads on this condvar.
|
||||
///
|
||||
/// This method will ensure that any current waiters on the condition
|
||||
/// variable are awoken. Calls to `notify_all()` are not buffered in any
|
||||
/// way.
|
||||
///
|
||||
/// To wake up only one thread, see [`notify_one`].
|
||||
///
|
||||
/// [`notify_one`]: Self::notify_one
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use std::sync::{Arc, Mutex, Condvar};
|
||||
/// use std::thread;
|
||||
///
|
||||
/// let pair = Arc::new((Mutex::new(false), Condvar::new()));
|
||||
/// let pair2 = Arc::clone(&pair);
|
||||
///
|
||||
/// thread::spawn(move || {
|
||||
/// let (lock, cvar) = &*pair2;
|
||||
/// let mut started = lock.lock().unwrap();
|
||||
/// *started = true;
|
||||
/// // We notify the condvar that the value has changed.
|
||||
/// cvar.notify_all();
|
||||
/// });
|
||||
///
|
||||
/// // Wait for the thread to start up.
|
||||
/// let (lock, cvar) = &*pair;
|
||||
/// let mut started = lock.lock().unwrap();
|
||||
/// // As long as the value inside the `Mutex<bool>` is `false`, we wait.
|
||||
/// while !*started {
|
||||
/// started = cvar.wait(started).unwrap();
|
||||
/// }
|
||||
/// ```
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
#[rustc_should_not_be_called_on_const_items]
|
||||
pub fn notify_all(&self) {
|
||||
self.inner.notify_all()
|
||||
}
|
||||
}
|
||||
|
||||
#[stable(feature = "std_debug", since = "1.16.0")]
|
||||
impl fmt::Debug for Condvar {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.debug_struct("Condvar").finish_non_exhaustive()
|
||||
}
|
||||
}
|
||||
|
||||
#[stable(feature = "condvar_default", since = "1.10.0")]
|
||||
impl Default for Condvar {
|
||||
/// Creates a `Condvar` which is ready to be waited on and notified.
|
||||
fn default() -> Condvar {
|
||||
Condvar::new()
|
||||
}
|
||||
}
|
||||
@@ -1,946 +0,0 @@
|
||||
use crate::cell::UnsafeCell;
|
||||
use crate::fmt;
|
||||
use crate::marker::PhantomData;
|
||||
use crate::mem::{self, ManuallyDrop};
|
||||
use crate::ops::{Deref, DerefMut};
|
||||
use crate::ptr::NonNull;
|
||||
use crate::sync::{LockResult, PoisonError, TryLockError, TryLockResult, poison};
|
||||
use crate::sys::sync as sys;
|
||||
|
||||
/// A mutual exclusion primitive useful for protecting shared data
|
||||
///
|
||||
/// This mutex will block threads waiting for the lock to become available. The
|
||||
/// mutex can be created via a [`new`] constructor. Each mutex has a type parameter
|
||||
/// which represents the data that it is protecting. The data can only be accessed
|
||||
/// through the RAII guards returned from [`lock`] and [`try_lock`], which
|
||||
/// guarantees that the data is only ever accessed when the mutex is locked.
|
||||
///
|
||||
/// # Poisoning
|
||||
///
|
||||
/// The mutexes in this module implement a strategy called "poisoning" where a
|
||||
/// mutex becomes poisoned if it recognizes that the thread holding it has
|
||||
/// panicked.
|
||||
///
|
||||
/// Once a mutex is poisoned, all other threads are unable to access the data by
|
||||
/// default as it is likely tainted (some invariant is not being upheld). For a
|
||||
/// mutex, this means that the [`lock`] and [`try_lock`] methods return a
|
||||
/// [`Result`] which indicates whether a mutex has been poisoned or not. Most
|
||||
/// usage of a mutex will simply [`unwrap()`] these results, propagating panics
|
||||
/// among threads to ensure that a possibly invalid invariant is not witnessed.
|
||||
///
|
||||
/// Poisoning is only advisory: the [`PoisonError`] type has an [`into_inner`]
|
||||
/// method which will return the guard that would have otherwise been returned
|
||||
/// on a successful lock. This allows access to the data, despite the lock being
|
||||
/// poisoned.
|
||||
///
|
||||
/// In addition, the panic detection is not ideal, so even unpoisoned mutexes
|
||||
/// need to be handled with care, since certain panics may have been skipped.
|
||||
/// Here is a non-exhaustive list of situations where this might occur:
|
||||
///
|
||||
/// - If a mutex is locked while a panic is underway, e.g. within a [`Drop`]
|
||||
/// implementation or a [panic hook], panicking for the second time while the
|
||||
/// lock is held will leave the mutex unpoisoned. Note that while double panic
|
||||
/// usually aborts the program, [`catch_unwind`] can prevent this.
|
||||
///
|
||||
/// - Locking and unlocking the mutex across different panic contexts, e.g. by
|
||||
/// storing the guard to a [`Cell`] within [`Drop::drop`] and accessing it
|
||||
/// outside, or vice versa, can affect poisoning status in an unexpected way.
|
||||
///
|
||||
/// - Foreign exceptions do not currently trigger poisoning even in absence of
|
||||
/// other panics.
|
||||
///
|
||||
/// While this rarely happens in realistic code, `unsafe` code cannot rely on
|
||||
/// poisoning for soundness, since the behavior of poisoning can depend on
|
||||
/// outside context. Here's an example of **incorrect** use of poisoning:
|
||||
///
|
||||
/// ```rust
|
||||
/// use std::sync::Mutex;
|
||||
///
|
||||
/// struct MutexBox<T> {
|
||||
/// data: Mutex<*mut T>,
|
||||
/// }
|
||||
///
|
||||
/// impl<T> MutexBox<T> {
|
||||
/// pub fn new(value: T) -> Self {
|
||||
/// Self {
|
||||
/// data: Mutex::new(Box::into_raw(Box::new(value))),
|
||||
/// }
|
||||
/// }
|
||||
///
|
||||
/// pub fn replace_with(&self, f: impl FnOnce(T) -> T) {
|
||||
/// let ptr = self.data.lock().expect("poisoned");
|
||||
/// // While `f` is running, the data is moved out of `*ptr`. If `f`
|
||||
/// // panics, `*ptr` keeps pointing at a dropped value. The intention
|
||||
/// // is that this will poison the mutex, so the following calls to
|
||||
/// // `replace_with` will panic without reading `*ptr`. But since
|
||||
/// // poisoning is not guaranteed to occur if this is run from a panic
|
||||
/// // hook, this can lead to use-after-free.
|
||||
/// unsafe {
|
||||
/// (*ptr).write(f((*ptr).read()));
|
||||
/// }
|
||||
/// }
|
||||
/// }
|
||||
/// ```
|
||||
///
|
||||
/// [`new`]: Self::new
|
||||
/// [`lock`]: Self::lock
|
||||
/// [`try_lock`]: Self::try_lock
|
||||
/// [`unwrap()`]: Result::unwrap
|
||||
/// [`PoisonError`]: super::PoisonError
|
||||
/// [`into_inner`]: super::PoisonError::into_inner
|
||||
/// [panic hook]: crate::panic::set_hook
|
||||
/// [`catch_unwind`]: crate::panic::catch_unwind
|
||||
/// [`Cell`]: crate::cell::Cell
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use std::sync::{Arc, Mutex};
|
||||
/// use std::thread;
|
||||
/// use std::sync::mpsc::channel;
|
||||
///
|
||||
/// const N: usize = 10;
|
||||
///
|
||||
/// // Spawn a few threads to increment a shared variable (non-atomically), and
|
||||
/// // let the main thread know once all increments are done.
|
||||
/// //
|
||||
/// // Here we're using an Arc to share memory among threads, and the data inside
|
||||
/// // the Arc is protected with a mutex.
|
||||
/// let data = Arc::new(Mutex::new(0));
|
||||
///
|
||||
/// let (tx, rx) = channel();
|
||||
/// for _ in 0..N {
|
||||
/// let (data, tx) = (Arc::clone(&data), tx.clone());
|
||||
/// thread::spawn(move || {
|
||||
/// // The shared state can only be accessed once the lock is held.
|
||||
/// // Our non-atomic increment is safe because we're the only thread
|
||||
/// // which can access the shared state when the lock is held.
|
||||
/// //
|
||||
/// // We unwrap() the return value to assert that we are not expecting
|
||||
/// // threads to ever fail while holding the lock.
|
||||
/// let mut data = data.lock().unwrap();
|
||||
/// *data += 1;
|
||||
/// if *data == N {
|
||||
/// tx.send(()).unwrap();
|
||||
/// }
|
||||
/// // the lock is unlocked here when `data` goes out of scope.
|
||||
/// });
|
||||
/// }
|
||||
///
|
||||
/// rx.recv().unwrap();
|
||||
/// ```
|
||||
///
|
||||
/// To recover from a poisoned mutex:
|
||||
///
|
||||
/// ```
|
||||
/// use std::sync::{Arc, Mutex};
|
||||
/// use std::thread;
|
||||
///
|
||||
/// let lock = Arc::new(Mutex::new(0_u32));
|
||||
/// let lock2 = Arc::clone(&lock);
|
||||
///
|
||||
/// let _ = thread::spawn(move || -> () {
|
||||
/// // This thread will acquire the mutex first, unwrapping the result of
|
||||
/// // `lock` because the lock has not been poisoned.
|
||||
/// let _guard = lock2.lock().unwrap();
|
||||
///
|
||||
/// // This panic while holding the lock (`_guard` is in scope) will poison
|
||||
/// // the mutex.
|
||||
/// panic!();
|
||||
/// }).join();
|
||||
///
|
||||
/// // The lock is poisoned by this point, but the returned result can be
|
||||
/// // pattern matched on to return the underlying guard on both branches.
|
||||
/// let mut guard = match lock.lock() {
|
||||
/// Ok(guard) => guard,
|
||||
/// Err(poisoned) => poisoned.into_inner(),
|
||||
/// };
|
||||
///
|
||||
/// *guard += 1;
|
||||
/// ```
|
||||
///
|
||||
/// To unlock a mutex guard sooner than the end of the enclosing scope,
|
||||
/// either create an inner scope or drop the guard manually.
|
||||
///
|
||||
/// ```
|
||||
/// use std::sync::{Arc, Mutex};
|
||||
/// use std::thread;
|
||||
///
|
||||
/// const N: usize = 3;
|
||||
///
|
||||
/// let data_mutex = Arc::new(Mutex::new(vec![1, 2, 3, 4]));
|
||||
/// let res_mutex = Arc::new(Mutex::new(0));
|
||||
///
|
||||
/// let mut threads = Vec::with_capacity(N);
|
||||
/// (0..N).for_each(|_| {
|
||||
/// let data_mutex_clone = Arc::clone(&data_mutex);
|
||||
/// let res_mutex_clone = Arc::clone(&res_mutex);
|
||||
///
|
||||
/// threads.push(thread::spawn(move || {
|
||||
/// // Here we use a block to limit the lifetime of the lock guard.
|
||||
/// let result = {
|
||||
/// let mut data = data_mutex_clone.lock().unwrap();
|
||||
/// // This is the result of some important and long-ish work.
|
||||
/// let result = data.iter().fold(0, |acc, x| acc + x * 2);
|
||||
/// data.push(result);
|
||||
/// result
|
||||
/// // The mutex guard gets dropped here, together with any other values
|
||||
/// // created in the critical section.
|
||||
/// };
|
||||
/// // The guard created here is a temporary dropped at the end of the statement, i.e.
|
||||
/// // the lock would not remain being held even if the thread did some additional work.
|
||||
/// *res_mutex_clone.lock().unwrap() += result;
|
||||
/// }));
|
||||
/// });
|
||||
///
|
||||
/// let mut data = data_mutex.lock().unwrap();
|
||||
/// // This is the result of some important and long-ish work.
|
||||
/// let result = data.iter().fold(0, |acc, x| acc + x * 2);
|
||||
/// data.push(result);
|
||||
/// // We drop the `data` explicitly because it's not necessary anymore and the
|
||||
/// // thread still has work to do. This allows other threads to start working on
|
||||
/// // the data immediately, without waiting for the rest of the unrelated work
|
||||
/// // to be done here.
|
||||
/// //
|
||||
/// // It's even more important here than in the threads because we `.join` the
|
||||
/// // threads after that. If we had not dropped the mutex guard, a thread could
|
||||
/// // be waiting forever for it, causing a deadlock.
|
||||
/// // As in the threads, a block could have been used instead of calling the
|
||||
/// // `drop` function.
|
||||
/// drop(data);
|
||||
/// // Here the mutex guard is not assigned to a variable and so, even if the
|
||||
/// // scope does not end after this line, the mutex is still released: there is
|
||||
/// // no deadlock.
|
||||
/// *res_mutex.lock().unwrap() += result;
|
||||
///
|
||||
/// threads.into_iter().for_each(|thread| {
|
||||
/// thread
|
||||
/// .join()
|
||||
/// .expect("The thread creating or execution failed !")
|
||||
/// });
|
||||
///
|
||||
/// assert_eq!(*res_mutex.lock().unwrap(), 800);
|
||||
/// ```
|
||||
///
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
#[cfg_attr(not(test), rustc_diagnostic_item = "Mutex")]
|
||||
pub struct Mutex<T: ?Sized> {
|
||||
inner: sys::Mutex,
|
||||
poison: poison::Flag,
|
||||
data: UnsafeCell<T>,
|
||||
}
|
||||
|
||||
/// `T` must be `Send` for a [`Mutex`] to be `Send` because it is possible to acquire
|
||||
/// the owned `T` from the `Mutex` via [`into_inner`].
|
||||
///
|
||||
/// [`into_inner`]: Mutex::into_inner
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
unsafe impl<T: ?Sized + Send> Send for Mutex<T> {}
|
||||
|
||||
/// `T` must be `Send` for [`Mutex`] to be `Sync`.
|
||||
/// This ensures that the protected data can be accessed safely from multiple threads
|
||||
/// without causing data races or other unsafe behavior.
|
||||
///
|
||||
/// [`Mutex<T>`] provides mutable access to `T` to one thread at a time. However, it's essential
|
||||
/// for `T` to be `Send` because it's not safe for non-`Send` structures to be accessed in
|
||||
/// this manner. For instance, consider [`Rc`], a non-atomic reference counted smart pointer,
|
||||
/// which is not `Send`. With `Rc`, we can have multiple copies pointing to the same heap
|
||||
/// allocation with a non-atomic reference count. If we were to use `Mutex<Rc<_>>`, it would
|
||||
/// only protect one instance of `Rc` from shared access, leaving other copies vulnerable
|
||||
/// to potential data races.
|
||||
///
|
||||
/// Also note that it is not necessary for `T` to be `Sync` as `&T` is only made available
|
||||
/// to one thread at a time if `T` is not `Sync`.
|
||||
///
|
||||
/// [`Rc`]: crate::rc::Rc
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
unsafe impl<T: ?Sized + Send> Sync for Mutex<T> {}
|
||||
|
||||
/// An RAII implementation of a "scoped lock" of a mutex. When this structure is
|
||||
/// dropped (falls out of scope), the lock will be unlocked.
|
||||
///
|
||||
/// The data protected by the mutex can be accessed through this guard via its
|
||||
/// [`Deref`] and [`DerefMut`] implementations.
|
||||
///
|
||||
/// This structure is created by the [`lock`] and [`try_lock`] methods on
|
||||
/// [`Mutex`].
|
||||
///
|
||||
/// [`lock`]: Mutex::lock
|
||||
/// [`try_lock`]: Mutex::try_lock
|
||||
#[must_use = "if unused the Mutex will immediately unlock"]
|
||||
#[must_not_suspend = "holding a MutexGuard across suspend \
|
||||
points can cause deadlocks, delays, \
|
||||
and cause Futures to not implement `Send`"]
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
#[clippy::has_significant_drop]
|
||||
#[cfg_attr(not(test), rustc_diagnostic_item = "MutexGuard")]
|
||||
pub struct MutexGuard<'a, T: ?Sized + 'a> {
|
||||
lock: &'a Mutex<T>,
|
||||
poison: poison::Guard,
|
||||
}
|
||||
|
||||
/// A [`MutexGuard`] is not `Send` to maximize platform portability.
|
||||
///
|
||||
/// On platforms that use POSIX threads (commonly referred to as pthreads) there is a requirement to
|
||||
/// release mutex locks on the same thread they were acquired.
|
||||
/// For this reason, [`MutexGuard`] must not implement `Send` to prevent it being dropped from
|
||||
/// another thread.
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
impl<T: ?Sized> !Send for MutexGuard<'_, T> {}
|
||||
|
||||
/// `T` must be `Sync` for a [`MutexGuard<T>`] to be `Sync`
|
||||
/// because it is possible to get a `&T` from `&MutexGuard` (via `Deref`).
|
||||
#[stable(feature = "mutexguard", since = "1.19.0")]
|
||||
unsafe impl<T: ?Sized + Sync> Sync for MutexGuard<'_, T> {}
|
||||
|
||||
/// An RAII mutex guard returned by `MutexGuard::map`, which can point to a
|
||||
/// subfield of the protected data. When this structure is dropped (falls out
|
||||
/// of scope), the lock will be unlocked.
|
||||
///
|
||||
/// The main difference between `MappedMutexGuard` and [`MutexGuard`] is that the
|
||||
/// former cannot be used with [`Condvar`], since that
|
||||
/// could introduce soundness issues if the locked object is modified by another
|
||||
/// thread while the `Mutex` is unlocked.
|
||||
///
|
||||
/// The data protected by the mutex can be accessed through this guard via its
|
||||
/// [`Deref`] and [`DerefMut`] implementations.
|
||||
///
|
||||
/// This structure is created by the [`map`] and [`filter_map`] methods on
|
||||
/// [`MutexGuard`].
|
||||
///
|
||||
/// [`map`]: MutexGuard::map
|
||||
/// [`filter_map`]: MutexGuard::filter_map
|
||||
/// [`Condvar`]: crate::sync::Condvar
|
||||
#[must_use = "if unused the Mutex will immediately unlock"]
|
||||
#[must_not_suspend = "holding a MappedMutexGuard across suspend \
|
||||
points can cause deadlocks, delays, \
|
||||
and cause Futures to not implement `Send`"]
|
||||
#[unstable(feature = "mapped_lock_guards", issue = "117108")]
|
||||
#[clippy::has_significant_drop]
|
||||
pub struct MappedMutexGuard<'a, T: ?Sized + 'a> {
|
||||
// NB: we use a pointer instead of `&'a mut T` to avoid `noalias` violations, because a
|
||||
// `MappedMutexGuard` argument doesn't hold uniqueness for its whole scope, only until it drops.
|
||||
// `NonNull` is covariant over `T`, so we add a `PhantomData<&'a mut T>` field
|
||||
// below for the correct variance over `T` (invariance).
|
||||
data: NonNull<T>,
|
||||
inner: &'a sys::Mutex,
|
||||
poison_flag: &'a poison::Flag,
|
||||
poison: poison::Guard,
|
||||
_variance: PhantomData<&'a mut T>,
|
||||
}
|
||||
|
||||
#[unstable(feature = "mapped_lock_guards", issue = "117108")]
|
||||
impl<T: ?Sized> !Send for MappedMutexGuard<'_, T> {}
|
||||
#[unstable(feature = "mapped_lock_guards", issue = "117108")]
|
||||
unsafe impl<T: ?Sized + Sync> Sync for MappedMutexGuard<'_, T> {}
|
||||
|
||||
impl<T> Mutex<T> {
|
||||
/// Creates a new mutex in an unlocked state ready for use.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use std::sync::Mutex;
|
||||
///
|
||||
/// let mutex = Mutex::new(0);
|
||||
/// ```
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
#[rustc_const_stable(feature = "const_locks", since = "1.63.0")]
|
||||
#[inline]
|
||||
pub const fn new(t: T) -> Mutex<T> {
|
||||
Mutex { inner: sys::Mutex::new(), poison: poison::Flag::new(), data: UnsafeCell::new(t) }
|
||||
}
|
||||
|
||||
/// Returns the contained value by cloning it.
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// If another user of this mutex panicked while holding the mutex, then
|
||||
/// this call will return an error instead.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// #![feature(lock_value_accessors)]
|
||||
///
|
||||
/// use std::sync::Mutex;
|
||||
///
|
||||
/// let mut mutex = Mutex::new(7);
|
||||
///
|
||||
/// assert_eq!(mutex.get_cloned().unwrap(), 7);
|
||||
/// ```
|
||||
#[unstable(feature = "lock_value_accessors", issue = "133407")]
|
||||
pub fn get_cloned(&self) -> Result<T, PoisonError<()>>
|
||||
where
|
||||
T: Clone,
|
||||
{
|
||||
match self.lock() {
|
||||
Ok(guard) => Ok((*guard).clone()),
|
||||
Err(_) => Err(PoisonError::new(())),
|
||||
}
|
||||
}
|
||||
|
||||
/// Sets the contained value.
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// If another user of this mutex panicked while holding the mutex, then
|
||||
/// this call will return an error containing the provided `value` instead.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// #![feature(lock_value_accessors)]
|
||||
///
|
||||
/// use std::sync::Mutex;
|
||||
///
|
||||
/// let mut mutex = Mutex::new(7);
|
||||
///
|
||||
/// assert_eq!(mutex.get_cloned().unwrap(), 7);
|
||||
/// mutex.set(11).unwrap();
|
||||
/// assert_eq!(mutex.get_cloned().unwrap(), 11);
|
||||
/// ```
|
||||
#[unstable(feature = "lock_value_accessors", issue = "133407")]
|
||||
#[rustc_should_not_be_called_on_const_items]
|
||||
pub fn set(&self, value: T) -> Result<(), PoisonError<T>> {
|
||||
if mem::needs_drop::<T>() {
|
||||
// If the contained value has non-trivial destructor, we
|
||||
// call that destructor after the lock being released.
|
||||
self.replace(value).map(drop)
|
||||
} else {
|
||||
match self.lock() {
|
||||
Ok(mut guard) => {
|
||||
*guard = value;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
Err(_) => Err(PoisonError::new(value)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Replaces the contained value with `value`, and returns the old contained value.
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// If another user of this mutex panicked while holding the mutex, then
|
||||
/// this call will return an error containing the provided `value` instead.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// #![feature(lock_value_accessors)]
|
||||
///
|
||||
/// use std::sync::Mutex;
|
||||
///
|
||||
/// let mut mutex = Mutex::new(7);
|
||||
///
|
||||
/// assert_eq!(mutex.replace(11).unwrap(), 7);
|
||||
/// assert_eq!(mutex.get_cloned().unwrap(), 11);
|
||||
/// ```
|
||||
#[unstable(feature = "lock_value_accessors", issue = "133407")]
|
||||
#[rustc_should_not_be_called_on_const_items]
|
||||
pub fn replace(&self, value: T) -> LockResult<T> {
|
||||
match self.lock() {
|
||||
Ok(mut guard) => Ok(mem::replace(&mut *guard, value)),
|
||||
Err(_) => Err(PoisonError::new(value)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: ?Sized> Mutex<T> {
|
||||
/// Acquires a mutex, blocking the current thread until it is able to do so.
|
||||
///
|
||||
/// This function will block the local thread until it is available to acquire
|
||||
/// the mutex. Upon returning, the thread is the only thread with the lock
|
||||
/// held. An RAII guard is returned to allow scoped unlock of the lock. When
|
||||
/// the guard goes out of scope, the mutex will be unlocked.
|
||||
///
|
||||
/// The exact behavior on locking a mutex in the thread which already holds
|
||||
/// the lock is left unspecified. However, this function will not return on
|
||||
/// the second call (it might panic or deadlock, for example).
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// If another user of this mutex panicked while holding the mutex, then
|
||||
/// this call will return an error once the mutex is acquired. The acquired
|
||||
/// mutex guard will be contained in the returned error.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This function might panic when called if the lock is already held by
|
||||
/// the current thread.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use std::sync::{Arc, Mutex};
|
||||
/// use std::thread;
|
||||
///
|
||||
/// let mutex = Arc::new(Mutex::new(0));
|
||||
/// let c_mutex = Arc::clone(&mutex);
|
||||
///
|
||||
/// thread::spawn(move || {
|
||||
/// *c_mutex.lock().unwrap() = 10;
|
||||
/// }).join().expect("thread::spawn failed");
|
||||
/// assert_eq!(*mutex.lock().unwrap(), 10);
|
||||
/// ```
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
#[rustc_should_not_be_called_on_const_items]
|
||||
pub fn lock(&self) -> LockResult<MutexGuard<'_, T>> {
|
||||
unsafe {
|
||||
self.inner.lock();
|
||||
MutexGuard::new(self)
|
||||
}
|
||||
}
|
||||
|
||||
/// Attempts to acquire this lock.
|
||||
///
|
||||
/// If the lock could not be acquired at this time, then [`Err`] is returned.
|
||||
/// Otherwise, an RAII guard is returned. The lock will be unlocked when the
|
||||
/// guard is dropped.
|
||||
///
|
||||
/// This function does not block.
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// If another user of this mutex panicked while holding the mutex, then
|
||||
/// this call will return the [`Poisoned`] error if the mutex would
|
||||
/// otherwise be acquired. An acquired lock guard will be contained
|
||||
/// in the returned error.
|
||||
///
|
||||
/// If the mutex could not be acquired because it is already locked, then
|
||||
/// this call will return the [`WouldBlock`] error.
|
||||
///
|
||||
/// [`Poisoned`]: TryLockError::Poisoned
|
||||
/// [`WouldBlock`]: TryLockError::WouldBlock
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use std::sync::{Arc, Mutex};
|
||||
/// use std::thread;
|
||||
///
|
||||
/// let mutex = Arc::new(Mutex::new(0));
|
||||
/// let c_mutex = Arc::clone(&mutex);
|
||||
///
|
||||
/// thread::spawn(move || {
|
||||
/// let mut lock = c_mutex.try_lock();
|
||||
/// if let Ok(ref mut mutex) = lock {
|
||||
/// **mutex = 10;
|
||||
/// } else {
|
||||
/// println!("try_lock failed");
|
||||
/// }
|
||||
/// }).join().expect("thread::spawn failed");
|
||||
/// assert_eq!(*mutex.lock().unwrap(), 10);
|
||||
/// ```
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
#[rustc_should_not_be_called_on_const_items]
|
||||
pub fn try_lock(&self) -> TryLockResult<MutexGuard<'_, T>> {
|
||||
unsafe {
|
||||
if self.inner.try_lock() {
|
||||
Ok(MutexGuard::new(self)?)
|
||||
} else {
|
||||
Err(TryLockError::WouldBlock)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Determines whether the mutex is poisoned.
|
||||
///
|
||||
/// If another thread is active, the mutex can still become poisoned at any
|
||||
/// time. You should not trust a `false` value for program correctness
|
||||
/// without additional synchronization.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use std::sync::{Arc, Mutex};
|
||||
/// use std::thread;
|
||||
///
|
||||
/// let mutex = Arc::new(Mutex::new(0));
|
||||
/// let c_mutex = Arc::clone(&mutex);
|
||||
///
|
||||
/// let _ = thread::spawn(move || {
|
||||
/// let _lock = c_mutex.lock().unwrap();
|
||||
/// panic!(); // the mutex gets poisoned
|
||||
/// }).join();
|
||||
/// assert_eq!(mutex.is_poisoned(), true);
|
||||
/// ```
|
||||
#[inline]
|
||||
#[stable(feature = "sync_poison", since = "1.2.0")]
|
||||
pub fn is_poisoned(&self) -> bool {
|
||||
self.poison.get()
|
||||
}
|
||||
|
||||
/// Clear the poisoned state from a mutex.
|
||||
///
|
||||
/// If the mutex is poisoned, it will remain poisoned until this function is called. This
|
||||
/// allows recovering from a poisoned state and marking that it has recovered. For example, if
|
||||
/// the value is overwritten by a known-good value, then the mutex can be marked as
|
||||
/// un-poisoned. Or possibly, the value could be inspected to determine if it is in a
|
||||
/// consistent state, and if so the poison is removed.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use std::sync::{Arc, Mutex};
|
||||
/// use std::thread;
|
||||
///
|
||||
/// let mutex = Arc::new(Mutex::new(0));
|
||||
/// let c_mutex = Arc::clone(&mutex);
|
||||
///
|
||||
/// let _ = thread::spawn(move || {
|
||||
/// let _lock = c_mutex.lock().unwrap();
|
||||
/// panic!(); // the mutex gets poisoned
|
||||
/// }).join();
|
||||
///
|
||||
/// assert_eq!(mutex.is_poisoned(), true);
|
||||
/// let x = mutex.lock().unwrap_or_else(|mut e| {
|
||||
/// **e.get_mut() = 1;
|
||||
/// mutex.clear_poison();
|
||||
/// e.into_inner()
|
||||
/// });
|
||||
/// assert_eq!(mutex.is_poisoned(), false);
|
||||
/// assert_eq!(*x, 1);
|
||||
/// ```
|
||||
#[inline]
|
||||
#[stable(feature = "mutex_unpoison", since = "1.77.0")]
|
||||
#[rustc_should_not_be_called_on_const_items]
|
||||
pub fn clear_poison(&self) {
|
||||
self.poison.clear();
|
||||
}
|
||||
|
||||
/// Consumes this mutex, returning the underlying data.
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// If another user of this mutex panicked while holding the mutex, then
|
||||
/// this call will return an error containing the underlying data
|
||||
/// instead.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use std::sync::Mutex;
|
||||
///
|
||||
/// let mutex = Mutex::new(0);
|
||||
/// assert_eq!(mutex.into_inner().unwrap(), 0);
|
||||
/// ```
|
||||
#[stable(feature = "mutex_into_inner", since = "1.6.0")]
|
||||
pub fn into_inner(self) -> LockResult<T>
|
||||
where
|
||||
T: Sized,
|
||||
{
|
||||
let data = self.data.into_inner();
|
||||
poison::map_result(self.poison.borrow(), |()| data)
|
||||
}
|
||||
|
||||
/// Returns a mutable reference to the underlying data.
|
||||
///
|
||||
/// Since this call borrows the `Mutex` mutably, no actual locking needs to
|
||||
/// take place -- the mutable borrow statically guarantees no new locks can be acquired
|
||||
/// while this reference exists. Note that this method does not clear any previous abandoned locks
|
||||
/// (e.g., via [`forget()`] on a [`MutexGuard`]).
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// If another user of this mutex panicked while holding the mutex, then
|
||||
/// this call will return an error containing a mutable reference to the
|
||||
/// underlying data instead.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use std::sync::Mutex;
|
||||
///
|
||||
/// let mut mutex = Mutex::new(0);
|
||||
/// *mutex.get_mut().unwrap() = 10;
|
||||
/// assert_eq!(*mutex.lock().unwrap(), 10);
|
||||
/// ```
|
||||
///
|
||||
/// [`forget()`]: mem::forget
|
||||
#[stable(feature = "mutex_get_mut", since = "1.6.0")]
|
||||
pub fn get_mut(&mut self) -> LockResult<&mut T> {
|
||||
let data = self.data.get_mut();
|
||||
poison::map_result(self.poison.borrow(), |()| data)
|
||||
}
|
||||
|
||||
/// Returns a raw pointer to the underlying data.
|
||||
///
|
||||
/// The returned pointer is always non-null and properly aligned, but it is
|
||||
/// the user's responsibility to ensure that any reads and writes through it
|
||||
/// are properly synchronized to avoid data races, and that it is not read
|
||||
/// or written through after the mutex is dropped.
|
||||
#[unstable(feature = "mutex_data_ptr", issue = "140368")]
|
||||
pub const fn data_ptr(&self) -> *mut T {
|
||||
self.data.get()
|
||||
}
|
||||
}
|
||||
|
||||
#[stable(feature = "mutex_from", since = "1.24.0")]
|
||||
impl<T> From<T> for Mutex<T> {
|
||||
/// Creates a new mutex in an unlocked state ready for use.
|
||||
/// This is equivalent to [`Mutex::new`].
|
||||
fn from(t: T) -> Self {
|
||||
Mutex::new(t)
|
||||
}
|
||||
}
|
||||
|
||||
#[stable(feature = "mutex_default", since = "1.10.0")]
|
||||
impl<T: Default> Default for Mutex<T> {
|
||||
/// Creates a `Mutex<T>`, with the `Default` value for T.
|
||||
fn default() -> Mutex<T> {
|
||||
Mutex::new(Default::default())
|
||||
}
|
||||
}
|
||||
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
impl<T: ?Sized + fmt::Debug> fmt::Debug for Mutex<T> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
let mut d = f.debug_struct("Mutex");
|
||||
match self.try_lock() {
|
||||
Ok(guard) => {
|
||||
d.field("data", &&*guard);
|
||||
}
|
||||
Err(TryLockError::Poisoned(err)) => {
|
||||
d.field("data", &&**err.get_ref());
|
||||
}
|
||||
Err(TryLockError::WouldBlock) => {
|
||||
d.field("data", &"<locked>");
|
||||
}
|
||||
}
|
||||
d.field("poisoned", &self.poison.get());
|
||||
d.finish_non_exhaustive()
|
||||
}
|
||||
}
|
||||
|
||||
impl<'mutex, T: ?Sized> MutexGuard<'mutex, T> {
|
||||
unsafe fn new(lock: &'mutex Mutex<T>) -> LockResult<MutexGuard<'mutex, T>> {
|
||||
poison::map_result(lock.poison.guard(), |guard| MutexGuard { lock, poison: guard })
|
||||
}
|
||||
}
|
||||
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
impl<T: ?Sized> Deref for MutexGuard<'_, T> {
|
||||
type Target = T;
|
||||
|
||||
fn deref(&self) -> &T {
|
||||
unsafe { &*self.lock.data.get() }
|
||||
}
|
||||
}
|
||||
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
impl<T: ?Sized> DerefMut for MutexGuard<'_, T> {
|
||||
fn deref_mut(&mut self) -> &mut T {
|
||||
unsafe { &mut *self.lock.data.get() }
|
||||
}
|
||||
}
|
||||
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
impl<T: ?Sized> Drop for MutexGuard<'_, T> {
|
||||
#[inline]
|
||||
fn drop(&mut self) {
|
||||
unsafe {
|
||||
self.lock.poison.done(&self.poison);
|
||||
self.lock.inner.unlock();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[stable(feature = "std_debug", since = "1.16.0")]
|
||||
impl<T: ?Sized + fmt::Debug> fmt::Debug for MutexGuard<'_, T> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
fmt::Debug::fmt(&**self, f)
|
||||
}
|
||||
}
|
||||
|
||||
#[stable(feature = "std_guard_impls", since = "1.20.0")]
|
||||
impl<T: ?Sized + fmt::Display> fmt::Display for MutexGuard<'_, T> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
(**self).fmt(f)
|
||||
}
|
||||
}
|
||||
|
||||
/// For use in [`nonpoison::condvar`](super::condvar).
|
||||
pub(super) fn guard_lock<'a, T: ?Sized>(guard: &MutexGuard<'a, T>) -> &'a sys::Mutex {
|
||||
&guard.lock.inner
|
||||
}
|
||||
|
||||
/// For use in [`nonpoison::condvar`](super::condvar).
|
||||
pub(super) fn guard_poison<'a, T: ?Sized>(guard: &MutexGuard<'a, T>) -> &'a poison::Flag {
|
||||
&guard.lock.poison
|
||||
}
|
||||
|
||||
impl<'a, T: ?Sized> MutexGuard<'a, T> {
|
||||
/// Makes a [`MappedMutexGuard`] for a component of the borrowed data, e.g.
|
||||
/// an enum variant.
|
||||
///
|
||||
/// The `Mutex` is already locked, so this cannot fail.
|
||||
///
|
||||
/// This is an associated function that needs to be used as
|
||||
/// `MutexGuard::map(...)`. A method would interfere with methods of the
|
||||
/// same name on the contents of the `MutexGuard` used through `Deref`.
|
||||
#[unstable(feature = "mapped_lock_guards", issue = "117108")]
|
||||
pub fn map<U, F>(orig: Self, f: F) -> MappedMutexGuard<'a, U>
|
||||
where
|
||||
F: FnOnce(&mut T) -> &mut U,
|
||||
U: ?Sized,
|
||||
{
|
||||
// SAFETY: the conditions of `MutexGuard::new` were satisfied when the original guard
|
||||
// was created, and have been upheld throughout `map` and/or `filter_map`.
|
||||
// The signature of the closure guarantees that it will not "leak" the lifetime of the reference
|
||||
// passed to it. If the closure panics, the guard will be dropped.
|
||||
let data = NonNull::from(f(unsafe { &mut *orig.lock.data.get() }));
|
||||
let orig = ManuallyDrop::new(orig);
|
||||
MappedMutexGuard {
|
||||
data,
|
||||
inner: &orig.lock.inner,
|
||||
poison_flag: &orig.lock.poison,
|
||||
poison: orig.poison.clone(),
|
||||
_variance: PhantomData,
|
||||
}
|
||||
}
|
||||
|
||||
/// Makes a [`MappedMutexGuard`] for a component of the borrowed data. The
|
||||
/// original guard is returned as an `Err(...)` if the closure returns
|
||||
/// `None`.
|
||||
///
|
||||
/// The `Mutex` is already locked, so this cannot fail.
|
||||
///
|
||||
/// This is an associated function that needs to be used as
|
||||
/// `MutexGuard::filter_map(...)`. A method would interfere with methods of the
|
||||
/// same name on the contents of the `MutexGuard` used through `Deref`.
|
||||
#[unstable(feature = "mapped_lock_guards", issue = "117108")]
|
||||
pub fn filter_map<U, F>(orig: Self, f: F) -> Result<MappedMutexGuard<'a, U>, Self>
|
||||
where
|
||||
F: FnOnce(&mut T) -> Option<&mut U>,
|
||||
U: ?Sized,
|
||||
{
|
||||
// SAFETY: the conditions of `MutexGuard::new` were satisfied when the original guard
|
||||
// was created, and have been upheld throughout `map` and/or `filter_map`.
|
||||
// The signature of the closure guarantees that it will not "leak" the lifetime of the reference
|
||||
// passed to it. If the closure panics, the guard will be dropped.
|
||||
match f(unsafe { &mut *orig.lock.data.get() }) {
|
||||
Some(data) => {
|
||||
let data = NonNull::from(data);
|
||||
let orig = ManuallyDrop::new(orig);
|
||||
Ok(MappedMutexGuard {
|
||||
data,
|
||||
inner: &orig.lock.inner,
|
||||
poison_flag: &orig.lock.poison,
|
||||
poison: orig.poison.clone(),
|
||||
_variance: PhantomData,
|
||||
})
|
||||
}
|
||||
None => Err(orig),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[unstable(feature = "mapped_lock_guards", issue = "117108")]
|
||||
impl<T: ?Sized> Deref for MappedMutexGuard<'_, T> {
|
||||
type Target = T;
|
||||
|
||||
fn deref(&self) -> &T {
|
||||
unsafe { self.data.as_ref() }
|
||||
}
|
||||
}
|
||||
|
||||
#[unstable(feature = "mapped_lock_guards", issue = "117108")]
|
||||
impl<T: ?Sized> DerefMut for MappedMutexGuard<'_, T> {
|
||||
fn deref_mut(&mut self) -> &mut T {
|
||||
unsafe { self.data.as_mut() }
|
||||
}
|
||||
}
|
||||
|
||||
#[unstable(feature = "mapped_lock_guards", issue = "117108")]
|
||||
impl<T: ?Sized> Drop for MappedMutexGuard<'_, T> {
|
||||
#[inline]
|
||||
fn drop(&mut self) {
|
||||
unsafe {
|
||||
self.poison_flag.done(&self.poison);
|
||||
self.inner.unlock();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[unstable(feature = "mapped_lock_guards", issue = "117108")]
|
||||
impl<T: ?Sized + fmt::Debug> fmt::Debug for MappedMutexGuard<'_, T> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
fmt::Debug::fmt(&**self, f)
|
||||
}
|
||||
}
|
||||
|
||||
#[unstable(feature = "mapped_lock_guards", issue = "117108")]
|
||||
impl<T: ?Sized + fmt::Display> fmt::Display for MappedMutexGuard<'_, T> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
(**self).fmt(f)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T: ?Sized> MappedMutexGuard<'a, T> {
|
||||
/// Makes a [`MappedMutexGuard`] for a component of the borrowed data, e.g.
|
||||
/// an enum variant.
|
||||
///
|
||||
/// The `Mutex` is already locked, so this cannot fail.
|
||||
///
|
||||
/// This is an associated function that needs to be used as
|
||||
/// `MappedMutexGuard::map(...)`. A method would interfere with methods of the
|
||||
/// same name on the contents of the `MutexGuard` used through `Deref`.
|
||||
#[unstable(feature = "mapped_lock_guards", issue = "117108")]
|
||||
pub fn map<U, F>(mut orig: Self, f: F) -> MappedMutexGuard<'a, U>
|
||||
where
|
||||
F: FnOnce(&mut T) -> &mut U,
|
||||
U: ?Sized,
|
||||
{
|
||||
// SAFETY: the conditions of `MutexGuard::new` were satisfied when the original guard
|
||||
// was created, and have been upheld throughout `map` and/or `filter_map`.
|
||||
// The signature of the closure guarantees that it will not "leak" the lifetime of the reference
|
||||
// passed to it. If the closure panics, the guard will be dropped.
|
||||
let data = NonNull::from(f(unsafe { orig.data.as_mut() }));
|
||||
let orig = ManuallyDrop::new(orig);
|
||||
MappedMutexGuard {
|
||||
data,
|
||||
inner: orig.inner,
|
||||
poison_flag: orig.poison_flag,
|
||||
poison: orig.poison.clone(),
|
||||
_variance: PhantomData,
|
||||
}
|
||||
}
|
||||
|
||||
/// Makes a [`MappedMutexGuard`] for a component of the borrowed data. The
|
||||
/// original guard is returned as an `Err(...)` if the closure returns
|
||||
/// `None`.
|
||||
///
|
||||
/// The `Mutex` is already locked, so this cannot fail.
|
||||
///
|
||||
/// This is an associated function that needs to be used as
|
||||
/// `MappedMutexGuard::filter_map(...)`. A method would interfere with methods of the
|
||||
/// same name on the contents of the `MutexGuard` used through `Deref`.
|
||||
#[unstable(feature = "mapped_lock_guards", issue = "117108")]
|
||||
pub fn filter_map<U, F>(mut orig: Self, f: F) -> Result<MappedMutexGuard<'a, U>, Self>
|
||||
where
|
||||
F: FnOnce(&mut T) -> Option<&mut U>,
|
||||
U: ?Sized,
|
||||
{
|
||||
// SAFETY: the conditions of `MutexGuard::new` were satisfied when the original guard
|
||||
// was created, and have been upheld throughout `map` and/or `filter_map`.
|
||||
// The signature of the closure guarantees that it will not "leak" the lifetime of the reference
|
||||
// passed to it. If the closure panics, the guard will be dropped.
|
||||
match f(unsafe { orig.data.as_mut() }) {
|
||||
Some(data) => {
|
||||
let data = NonNull::from(data);
|
||||
let orig = ManuallyDrop::new(orig);
|
||||
Ok(MappedMutexGuard {
|
||||
data,
|
||||
inner: orig.inner,
|
||||
poison_flag: orig.poison_flag,
|
||||
poison: orig.poison.clone(),
|
||||
_variance: PhantomData,
|
||||
})
|
||||
}
|
||||
None => Err(orig),
|
||||
}
|
||||
}
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,432 +0,0 @@
|
||||
use crate::cell::UnsafeCell;
|
||||
use crate::fmt;
|
||||
use crate::ops::Deref;
|
||||
use crate::panic::{RefUnwindSafe, UnwindSafe};
|
||||
use crate::sys::sync as sys;
|
||||
use crate::thread::{ThreadId, current_id};
|
||||
|
||||
/// A re-entrant mutual exclusion lock
|
||||
///
|
||||
/// This lock will block *other* threads waiting for the lock to become
|
||||
/// available. The thread which has already locked the mutex can lock it
|
||||
/// multiple times without blocking, preventing a common source of deadlocks.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// Allow recursively calling a function needing synchronization from within
|
||||
/// a callback (this is how [`StdoutLock`](crate::io::StdoutLock) is currently
|
||||
/// implemented):
|
||||
///
|
||||
/// ```
|
||||
/// #![feature(reentrant_lock)]
|
||||
///
|
||||
/// use std::cell::RefCell;
|
||||
/// use std::sync::ReentrantLock;
|
||||
///
|
||||
/// pub struct Log {
|
||||
/// data: RefCell<String>,
|
||||
/// }
|
||||
///
|
||||
/// impl Log {
|
||||
/// pub fn append(&self, msg: &str) {
|
||||
/// self.data.borrow_mut().push_str(msg);
|
||||
/// }
|
||||
/// }
|
||||
///
|
||||
/// static LOG: ReentrantLock<Log> = ReentrantLock::new(Log { data: RefCell::new(String::new()) });
|
||||
///
|
||||
/// pub fn with_log<R>(f: impl FnOnce(&Log) -> R) -> R {
|
||||
/// let log = LOG.lock();
|
||||
/// f(&*log)
|
||||
/// }
|
||||
///
|
||||
/// with_log(|log| {
|
||||
/// log.append("Hello");
|
||||
/// with_log(|log| log.append(" there!"));
|
||||
/// });
|
||||
/// ```
|
||||
///
|
||||
// # Implementation details
|
||||
//
|
||||
// The 'owner' field tracks which thread has locked the mutex.
|
||||
//
|
||||
// We use thread::current_id() as the thread identifier, which is just the
|
||||
// current thread's ThreadId, so it's unique across the process lifetime.
|
||||
//
|
||||
// If `owner` is set to the identifier of the current thread,
|
||||
// we assume the mutex is already locked and instead of locking it again,
|
||||
// we increment `lock_count`.
|
||||
//
|
||||
// When unlocking, we decrement `lock_count`, and only unlock the mutex when
|
||||
// it reaches zero.
|
||||
//
|
||||
// `lock_count` is protected by the mutex and only accessed by the thread that has
|
||||
// locked the mutex, so needs no synchronization.
|
||||
//
|
||||
// `owner` can be checked by other threads that want to see if they already
|
||||
// hold the lock, so needs to be atomic. If it compares equal, we're on the
|
||||
// same thread that holds the mutex and memory access can use relaxed ordering
|
||||
// since we're not dealing with multiple threads. If it's not equal,
|
||||
// synchronization is left to the mutex, making relaxed memory ordering for
|
||||
// the `owner` field fine in all cases.
|
||||
//
|
||||
// On systems without 64 bit atomics we also store the address of a TLS variable
|
||||
// along the 64-bit TID. We then first check that address against the address
|
||||
// of that variable on the current thread, and only if they compare equal do we
|
||||
// compare the actual TIDs. Because we only ever read the TID on the same thread
|
||||
// that it was written on (or a thread sharing the TLS block with that writer thread),
|
||||
// we don't need to further synchronize the TID accesses, so they can be regular 64-bit
|
||||
// non-atomic accesses.
|
||||
#[unstable(feature = "reentrant_lock", issue = "121440")]
|
||||
pub struct ReentrantLock<T: ?Sized> {
|
||||
mutex: sys::Mutex,
|
||||
owner: Tid,
|
||||
lock_count: UnsafeCell<u32>,
|
||||
data: T,
|
||||
}
|
||||
|
||||
cfg_select!(
|
||||
target_has_atomic = "64" => {
|
||||
use crate::sync::atomic::{Atomic, AtomicU64, Ordering::Relaxed};
|
||||
|
||||
struct Tid(Atomic<u64>);
|
||||
|
||||
impl Tid {
|
||||
const fn new() -> Self {
|
||||
Self(AtomicU64::new(0))
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn contains(&self, owner: ThreadId) -> bool {
|
||||
owner.as_u64().get() == self.0.load(Relaxed)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
// This is just unsafe to match the API of the Tid type below.
|
||||
unsafe fn set(&self, tid: Option<ThreadId>) {
|
||||
let value = tid.map_or(0, |tid| tid.as_u64().get());
|
||||
self.0.store(value, Relaxed);
|
||||
}
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
/// Returns the address of a TLS variable. This is guaranteed to
|
||||
/// be unique across all currently alive threads.
|
||||
fn tls_addr() -> usize {
|
||||
thread_local! { static X: u8 = const { 0u8 } };
|
||||
|
||||
X.with(|p| <*const u8>::addr(p))
|
||||
}
|
||||
|
||||
use crate::sync::atomic::{
|
||||
Atomic,
|
||||
AtomicUsize,
|
||||
Ordering,
|
||||
};
|
||||
|
||||
struct Tid {
|
||||
// When a thread calls `set()`, this value gets updated to
|
||||
// the address of a thread local on that thread. This is
|
||||
// used as a first check in `contains()`; if the `tls_addr`
|
||||
// doesn't match the TLS address of the current thread, then
|
||||
// the ThreadId also can't match. Only if the TLS addresses do
|
||||
// match do we read out the actual TID.
|
||||
// Note also that we can use relaxed atomic operations here, because
|
||||
// we only ever read from the tid if `tls_addr` matches the current
|
||||
// TLS address. In that case, either the tid has been set by
|
||||
// the current thread, or by a thread that has terminated before
|
||||
// the current thread's `tls_addr` was allocated. In either case, no further
|
||||
// synchronization is needed (as per <https://github.com/rust-lang/miri/issues/3450>)
|
||||
tls_addr: Atomic<usize>,
|
||||
tid: UnsafeCell<u64>,
|
||||
}
|
||||
|
||||
unsafe impl Send for Tid {}
|
||||
unsafe impl Sync for Tid {}
|
||||
|
||||
impl Tid {
|
||||
const fn new() -> Self {
|
||||
Self { tls_addr: AtomicUsize::new(0), tid: UnsafeCell::new(0) }
|
||||
}
|
||||
|
||||
#[inline]
|
||||
// NOTE: This assumes that `owner` is the ID of the current
|
||||
// thread, and may spuriously return `false` if that's not the case.
|
||||
fn contains(&self, owner: ThreadId) -> bool {
|
||||
// We must call `tls_addr()` *before* doing the load to ensure that if we reuse an
|
||||
// earlier thread's address, the `tls_addr.load()` below happens-after everything
|
||||
// that thread did.
|
||||
let tls_addr = tls_addr();
|
||||
// SAFETY: See the comments in the struct definition.
|
||||
self.tls_addr.load(Ordering::Relaxed) == tls_addr
|
||||
&& unsafe { *self.tid.get() } == owner.as_u64().get()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
// This may only be called by one thread at a time, and can lead to
|
||||
// race conditions otherwise.
|
||||
unsafe fn set(&self, tid: Option<ThreadId>) {
|
||||
// It's important that we set `self.tls_addr` to 0 if the tid is
|
||||
// cleared. Otherwise, there might be race conditions between
|
||||
// `set()` and `get()`.
|
||||
let tls_addr = if tid.is_some() { tls_addr() } else { 0 };
|
||||
let value = tid.map_or(0, |tid| tid.as_u64().get());
|
||||
self.tls_addr.store(tls_addr, Ordering::Relaxed);
|
||||
unsafe { *self.tid.get() = value };
|
||||
}
|
||||
}
|
||||
}
|
||||
);
|
||||
|
||||
#[unstable(feature = "reentrant_lock", issue = "121440")]
|
||||
unsafe impl<T: Send + ?Sized> Send for ReentrantLock<T> {}
|
||||
#[unstable(feature = "reentrant_lock", issue = "121440")]
|
||||
unsafe impl<T: Send + ?Sized> Sync for ReentrantLock<T> {}
|
||||
|
||||
// Because of the `UnsafeCell`, these traits are not implemented automatically
|
||||
#[unstable(feature = "reentrant_lock", issue = "121440")]
|
||||
impl<T: UnwindSafe + ?Sized> UnwindSafe for ReentrantLock<T> {}
|
||||
#[unstable(feature = "reentrant_lock", issue = "121440")]
|
||||
impl<T: RefUnwindSafe + ?Sized> RefUnwindSafe for ReentrantLock<T> {}
|
||||
|
||||
/// An RAII implementation of a "scoped lock" of a re-entrant lock. When this
|
||||
/// structure is dropped (falls out of scope), the lock will be unlocked.
|
||||
///
|
||||
/// The data protected by the mutex can be accessed through this guard via its
|
||||
/// [`Deref`] implementation.
|
||||
///
|
||||
/// This structure is created by the [`lock`](ReentrantLock::lock) method on
|
||||
/// [`ReentrantLock`].
|
||||
///
|
||||
/// # Mutability
|
||||
///
|
||||
/// Unlike [`MutexGuard`](super::MutexGuard), `ReentrantLockGuard` does not
|
||||
/// implement [`DerefMut`](crate::ops::DerefMut), because implementation of
|
||||
/// the trait would violate Rust’s reference aliasing rules. Use interior
|
||||
/// mutability (usually [`RefCell`](crate::cell::RefCell)) in order to mutate
|
||||
/// the guarded data.
|
||||
#[must_use = "if unused the ReentrantLock will immediately unlock"]
|
||||
#[unstable(feature = "reentrant_lock", issue = "121440")]
|
||||
pub struct ReentrantLockGuard<'a, T: ?Sized + 'a> {
|
||||
lock: &'a ReentrantLock<T>,
|
||||
}
|
||||
|
||||
#[unstable(feature = "reentrant_lock", issue = "121440")]
|
||||
impl<T: ?Sized> !Send for ReentrantLockGuard<'_, T> {}
|
||||
|
||||
#[unstable(feature = "reentrant_lock", issue = "121440")]
|
||||
unsafe impl<T: ?Sized + Sync> Sync for ReentrantLockGuard<'_, T> {}
|
||||
|
||||
#[unstable(feature = "reentrant_lock", issue = "121440")]
|
||||
impl<T> ReentrantLock<T> {
|
||||
/// Creates a new re-entrant lock in an unlocked state ready for use.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// #![feature(reentrant_lock)]
|
||||
/// use std::sync::ReentrantLock;
|
||||
///
|
||||
/// let lock = ReentrantLock::new(0);
|
||||
/// ```
|
||||
pub const fn new(t: T) -> ReentrantLock<T> {
|
||||
ReentrantLock {
|
||||
mutex: sys::Mutex::new(),
|
||||
owner: Tid::new(),
|
||||
lock_count: UnsafeCell::new(0),
|
||||
data: t,
|
||||
}
|
||||
}
|
||||
|
||||
/// Consumes this lock, returning the underlying data.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// #![feature(reentrant_lock)]
|
||||
///
|
||||
/// use std::sync::ReentrantLock;
|
||||
///
|
||||
/// let lock = ReentrantLock::new(0);
|
||||
/// assert_eq!(lock.into_inner(), 0);
|
||||
/// ```
|
||||
pub fn into_inner(self) -> T {
|
||||
self.data
|
||||
}
|
||||
}
|
||||
|
||||
#[unstable(feature = "reentrant_lock", issue = "121440")]
|
||||
impl<T: ?Sized> ReentrantLock<T> {
|
||||
/// Acquires the lock, blocking the current thread until it is able to do
|
||||
/// so.
|
||||
///
|
||||
/// This function will block the caller until it is available to acquire
|
||||
/// the lock. Upon returning, the thread is the only thread with the lock
|
||||
/// held. When the thread calling this method already holds the lock, the
|
||||
/// call succeeds without blocking.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// #![feature(reentrant_lock)]
|
||||
/// use std::cell::Cell;
|
||||
/// use std::sync::{Arc, ReentrantLock};
|
||||
/// use std::thread;
|
||||
///
|
||||
/// let lock = Arc::new(ReentrantLock::new(Cell::new(0)));
|
||||
/// let c_lock = Arc::clone(&lock);
|
||||
///
|
||||
/// thread::spawn(move || {
|
||||
/// c_lock.lock().set(10);
|
||||
/// }).join().expect("thread::spawn failed");
|
||||
/// assert_eq!(lock.lock().get(), 10);
|
||||
/// ```
|
||||
pub fn lock(&self) -> ReentrantLockGuard<'_, T> {
|
||||
let this_thread = current_id();
|
||||
// Safety: We only touch lock_count when we own the inner mutex.
|
||||
// Additionally, we only call `self.owner.set()` while holding
|
||||
// the inner mutex, so no two threads can call it concurrently.
|
||||
unsafe {
|
||||
if self.owner.contains(this_thread) {
|
||||
self.increment_lock_count().expect("lock count overflow in reentrant mutex");
|
||||
} else {
|
||||
self.mutex.lock();
|
||||
self.owner.set(Some(this_thread));
|
||||
debug_assert_eq!(*self.lock_count.get(), 0);
|
||||
*self.lock_count.get() = 1;
|
||||
}
|
||||
}
|
||||
ReentrantLockGuard { lock: self }
|
||||
}
|
||||
|
||||
/// Returns a mutable reference to the underlying data.
|
||||
///
|
||||
/// Since this call borrows the `ReentrantLock` mutably, no actual locking
|
||||
/// needs to take place -- the mutable borrow statically guarantees no locks
|
||||
/// exist.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// #![feature(reentrant_lock)]
|
||||
/// use std::sync::ReentrantLock;
|
||||
///
|
||||
/// let mut lock = ReentrantLock::new(0);
|
||||
/// *lock.get_mut() = 10;
|
||||
/// assert_eq!(*lock.lock(), 10);
|
||||
/// ```
|
||||
pub fn get_mut(&mut self) -> &mut T {
|
||||
&mut self.data
|
||||
}
|
||||
|
||||
/// Attempts to acquire this lock.
|
||||
///
|
||||
/// If the lock could not be acquired at this time, then `None` is returned.
|
||||
/// Otherwise, an RAII guard is returned.
|
||||
///
|
||||
/// This function does not block.
|
||||
// FIXME maybe make it a public part of the API?
|
||||
#[unstable(issue = "none", feature = "std_internals")]
|
||||
#[doc(hidden)]
|
||||
pub fn try_lock(&self) -> Option<ReentrantLockGuard<'_, T>> {
|
||||
let this_thread = current_id();
|
||||
// Safety: We only touch lock_count when we own the inner mutex.
|
||||
// Additionally, we only call `self.owner.set()` while holding
|
||||
// the inner mutex, so no two threads can call it concurrently.
|
||||
unsafe {
|
||||
if self.owner.contains(this_thread) {
|
||||
self.increment_lock_count()?;
|
||||
Some(ReentrantLockGuard { lock: self })
|
||||
} else if self.mutex.try_lock() {
|
||||
self.owner.set(Some(this_thread));
|
||||
debug_assert_eq!(*self.lock_count.get(), 0);
|
||||
*self.lock_count.get() = 1;
|
||||
Some(ReentrantLockGuard { lock: self })
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns a raw pointer to the underlying data.
|
||||
///
|
||||
/// The returned pointer is always non-null and properly aligned, but it is
|
||||
/// the user's responsibility to ensure that any reads through it are
|
||||
/// properly synchronized to avoid data races, and that it is not read
|
||||
/// through after the lock is dropped.
|
||||
#[unstable(feature = "reentrant_lock_data_ptr", issue = "140368")]
|
||||
pub const fn data_ptr(&self) -> *const T {
|
||||
&raw const self.data
|
||||
}
|
||||
|
||||
unsafe fn increment_lock_count(&self) -> Option<()> {
|
||||
unsafe {
|
||||
*self.lock_count.get() = (*self.lock_count.get()).checked_add(1)?;
|
||||
}
|
||||
Some(())
|
||||
}
|
||||
}
|
||||
|
||||
#[unstable(feature = "reentrant_lock", issue = "121440")]
|
||||
impl<T: fmt::Debug + ?Sized> fmt::Debug for ReentrantLock<T> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
let mut d = f.debug_struct("ReentrantLock");
|
||||
match self.try_lock() {
|
||||
Some(v) => d.field("data", &&*v),
|
||||
None => d.field("data", &format_args!("<locked>")),
|
||||
};
|
||||
d.finish_non_exhaustive()
|
||||
}
|
||||
}
|
||||
|
||||
#[unstable(feature = "reentrant_lock", issue = "121440")]
|
||||
impl<T: Default> Default for ReentrantLock<T> {
|
||||
fn default() -> Self {
|
||||
Self::new(T::default())
|
||||
}
|
||||
}
|
||||
|
||||
#[unstable(feature = "reentrant_lock", issue = "121440")]
|
||||
impl<T> From<T> for ReentrantLock<T> {
|
||||
fn from(t: T) -> Self {
|
||||
Self::new(t)
|
||||
}
|
||||
}
|
||||
|
||||
#[unstable(feature = "reentrant_lock", issue = "121440")]
|
||||
impl<T: ?Sized> Deref for ReentrantLockGuard<'_, T> {
|
||||
type Target = T;
|
||||
|
||||
fn deref(&self) -> &T {
|
||||
&self.lock.data
|
||||
}
|
||||
}
|
||||
|
||||
#[unstable(feature = "reentrant_lock", issue = "121440")]
|
||||
impl<T: fmt::Debug + ?Sized> fmt::Debug for ReentrantLockGuard<'_, T> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
(**self).fmt(f)
|
||||
}
|
||||
}
|
||||
|
||||
#[unstable(feature = "reentrant_lock", issue = "121440")]
|
||||
impl<T: fmt::Display + ?Sized> fmt::Display for ReentrantLockGuard<'_, T> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
(**self).fmt(f)
|
||||
}
|
||||
}
|
||||
|
||||
#[unstable(feature = "reentrant_lock", issue = "121440")]
|
||||
impl<T: ?Sized> Drop for ReentrantLockGuard<'_, T> {
|
||||
#[inline]
|
||||
fn drop(&mut self) {
|
||||
// Safety: We own the lock.
|
||||
unsafe {
|
||||
*self.lock.lock_count.get() -= 1;
|
||||
if *self.lock.lock_count.get() == 0 {
|
||||
self.lock.owner.set(None);
|
||||
self.lock.mutex.unlock();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,113 +0,0 @@
|
||||
#![forbid(unsafe_op_in_unsafe_fn)]
|
||||
|
||||
use crate::alloc::{GlobalAlloc, Layout, System};
|
||||
use crate::ptr;
|
||||
|
||||
// The minimum alignment guaranteed by the architecture. This value is used to
|
||||
// add fast paths for low alignment values.
|
||||
#[allow(dead_code)]
|
||||
const MIN_ALIGN: usize = if cfg!(any(
|
||||
all(target_arch = "riscv32", any(target_os = "espidf", target_os = "zkvm")),
|
||||
all(target_arch = "xtensa", target_os = "espidf"),
|
||||
)) {
|
||||
// The allocator on the esp-idf and zkvm platforms guarantees 4 byte alignment.
|
||||
4
|
||||
} else if cfg!(any(
|
||||
target_arch = "x86",
|
||||
target_arch = "arm",
|
||||
target_arch = "m68k",
|
||||
target_arch = "csky",
|
||||
target_arch = "loongarch32",
|
||||
target_arch = "mips",
|
||||
target_arch = "mips32r6",
|
||||
target_arch = "powerpc",
|
||||
target_arch = "powerpc64",
|
||||
target_arch = "sparc",
|
||||
target_arch = "wasm32",
|
||||
target_arch = "hexagon",
|
||||
target_arch = "riscv32",
|
||||
target_arch = "xtensa",
|
||||
)) {
|
||||
8
|
||||
} else if cfg!(any(
|
||||
target_arch = "x86_64",
|
||||
target_arch = "aarch64",
|
||||
target_arch = "arm64ec",
|
||||
target_arch = "loongarch64",
|
||||
target_arch = "mips64",
|
||||
target_arch = "mips64r6",
|
||||
target_arch = "s390x",
|
||||
target_arch = "sparc64",
|
||||
target_arch = "riscv64",
|
||||
target_arch = "wasm64",
|
||||
)) {
|
||||
16
|
||||
} else {
|
||||
panic!("add a value for MIN_ALIGN")
|
||||
};
|
||||
|
||||
#[allow(dead_code)]
|
||||
unsafe fn realloc_fallback(
|
||||
alloc: &System,
|
||||
ptr: *mut u8,
|
||||
old_layout: Layout,
|
||||
new_size: usize,
|
||||
) -> *mut u8 {
|
||||
// SAFETY: Docs for GlobalAlloc::realloc require this to be valid
|
||||
unsafe {
|
||||
let new_layout = Layout::from_size_align_unchecked(new_size, old_layout.align());
|
||||
|
||||
let new_ptr = GlobalAlloc::alloc(alloc, new_layout);
|
||||
if !new_ptr.is_null() {
|
||||
let size = usize::min(old_layout.size(), new_size);
|
||||
ptr::copy_nonoverlapping(ptr, new_ptr, size);
|
||||
GlobalAlloc::dealloc(alloc, ptr, old_layout);
|
||||
}
|
||||
|
||||
new_ptr
|
||||
}
|
||||
}
|
||||
|
||||
cfg_select! {
|
||||
any(
|
||||
target_family = "unix",
|
||||
target_os = "wasi",
|
||||
target_os = "teeos",
|
||||
target_os = "trusty",
|
||||
) => {
|
||||
mod unix;
|
||||
}
|
||||
target_os = "windows" => {
|
||||
mod windows;
|
||||
}
|
||||
target_os = "hermit" => {
|
||||
mod hermit;
|
||||
}
|
||||
target_os = "motor" => {
|
||||
mod motor;
|
||||
}
|
||||
all(target_vendor = "fortanix", target_env = "sgx") => {
|
||||
mod sgx;
|
||||
}
|
||||
target_os = "solid_asp3" => {
|
||||
mod solid;
|
||||
}
|
||||
target_os = "uefi" => {
|
||||
mod uefi;
|
||||
}
|
||||
target_os = "vexos" => {
|
||||
mod vexos;
|
||||
}
|
||||
target_family = "wasm" => {
|
||||
mod wasm;
|
||||
}
|
||||
target_os = "xous" => {
|
||||
mod xous;
|
||||
}
|
||||
target_os = "zkvm" => {
|
||||
mod zkvm;
|
||||
}
|
||||
target_os = "survos" => {
|
||||
mod survos;
|
||||
}
|
||||
}
|
||||
@@ -1,14 +0,0 @@
|
||||
use crate::alloc::{GlobalAlloc, Layout, System};
|
||||
|
||||
#[stable(feature = "alloc_system_type", since = "1.28.0")]
|
||||
unsafe impl GlobalAlloc for System {
|
||||
#[inline]
|
||||
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
|
||||
unsafe { crate::syscall::alloc(layout) }
|
||||
}
|
||||
|
||||
#[inline]
|
||||
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
|
||||
unsafe { crate::syscall::dealloc(ptr, layout) }
|
||||
}
|
||||
}
|
||||
@@ -1,242 +0,0 @@
|
||||
//! Common code for printing backtraces.
|
||||
#![forbid(unsafe_op_in_unsafe_fn)]
|
||||
|
||||
// use crate::backtrace_rs::{self, BacktraceFmt, BytesOrWideString, PrintFmt};
|
||||
use crate::borrow::Cow;
|
||||
use crate::io::prelude::*;
|
||||
use crate::path::{self, Path, PathBuf};
|
||||
use crate::sync::{Mutex, MutexGuard, PoisonError};
|
||||
use crate::{env, fmt, io};
|
||||
|
||||
/// Max number of frames to print.
|
||||
const MAX_NB_FRAMES: usize = 100;
|
||||
|
||||
pub(crate) const FULL_BACKTRACE_DEFAULT: bool = cfg_select! {
|
||||
// Fuchsia components default to full backtrace.
|
||||
target_os = "fuchsia" => true,
|
||||
_ => false,
|
||||
};
|
||||
|
||||
pub(crate) struct BacktraceLock<'a>(#[allow(dead_code)] MutexGuard<'a, ()>);
|
||||
|
||||
pub(crate) fn lock<'a>() -> BacktraceLock<'a> {
|
||||
static LOCK: Mutex<()> = Mutex::new(());
|
||||
BacktraceLock(LOCK.lock().unwrap_or_else(PoisonError::into_inner))
|
||||
}
|
||||
|
||||
// impl BacktraceLock<'_> {
|
||||
// /// Prints the current backtrace.
|
||||
// pub(crate) fn print(&mut self, w: &mut dyn Write, format: PrintFmt) -> io::Result<()> {
|
||||
// // There are issues currently linking libbacktrace into tests, and in
|
||||
// // general during std's own unit tests we're not testing this path. In
|
||||
// // test mode immediately return here to optimize away any references to the
|
||||
// // libbacktrace symbols
|
||||
// if cfg!(test) {
|
||||
// return Ok(());
|
||||
// }
|
||||
|
||||
// struct DisplayBacktrace {
|
||||
// format: PrintFmt,
|
||||
// }
|
||||
// impl fmt::Display for DisplayBacktrace {
|
||||
// fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
// // SAFETY: the backtrace lock is held
|
||||
// unsafe { _print_fmt(fmt, self.format) }
|
||||
// }
|
||||
// }
|
||||
// write!(w, "{}", DisplayBacktrace { format })
|
||||
// }
|
||||
// }
|
||||
|
||||
// /// # Safety
|
||||
// ///
|
||||
// /// This function is not Sync. The caller must hold a mutex lock, or there must be only one thread in the program.
|
||||
// unsafe fn _print_fmt(fmt: &mut fmt::Formatter<'_>, print_fmt: PrintFmt) -> fmt::Result {
|
||||
// // Always 'fail' to get the cwd when running under Miri -
|
||||
// // this allows Miri to display backtraces in isolation mode
|
||||
// let cwd = if !cfg!(miri) {
|
||||
// env::current_dir().ok()
|
||||
// } else {
|
||||
// None
|
||||
// };
|
||||
|
||||
// let mut print_path = move |fmt: &mut fmt::Formatter<'_>, bows: BytesOrWideString<'_>| {
|
||||
// output_filename(fmt, bows, print_fmt, cwd.as_ref())
|
||||
// };
|
||||
// writeln!(fmt, "stack backtrace:")?;
|
||||
// let mut bt_fmt = BacktraceFmt::new(fmt, print_fmt, &mut print_path);
|
||||
// bt_fmt.add_context()?;
|
||||
// let mut idx = 0;
|
||||
// let mut res = Ok(());
|
||||
// let mut omitted_count: usize = 0;
|
||||
// let mut first_omit = true;
|
||||
// // If we're using a short backtrace, ignore all frames until we're told to start printing.
|
||||
// let mut print = print_fmt != PrintFmt::Short;
|
||||
// set_image_base();
|
||||
// // SAFETY: we roll our own locking in this town
|
||||
// unsafe {
|
||||
// backtrace_rs::trace_unsynchronized(|frame| {
|
||||
// if print_fmt == PrintFmt::Short && idx > MAX_NB_FRAMES {
|
||||
// return false;
|
||||
// }
|
||||
|
||||
// if cfg!(feature = "backtrace-trace-only") {
|
||||
// const HEX_WIDTH: usize = 2 + 2 * size_of::<usize>();
|
||||
// let frame_ip = frame.ip();
|
||||
// res = writeln!(bt_fmt.formatter(), "{idx:4}: {frame_ip:HEX_WIDTH$?}");
|
||||
// } else {
|
||||
// let mut hit = false;
|
||||
// backtrace_rs::resolve_frame_unsynchronized(frame, |symbol| {
|
||||
// hit = true;
|
||||
|
||||
// // `__rust_end_short_backtrace` means we are done hiding symbols
|
||||
// // for now. Print until we see `__rust_begin_short_backtrace`.
|
||||
// if print_fmt == PrintFmt::Short {
|
||||
// if let Some(sym) = symbol.name().and_then(|s| s.as_str()) {
|
||||
// if sym.contains("__rust_end_short_backtrace") {
|
||||
// print = true;
|
||||
// return;
|
||||
// }
|
||||
// if print && sym.contains("__rust_begin_short_backtrace") {
|
||||
// print = false;
|
||||
// return;
|
||||
// }
|
||||
// if !print {
|
||||
// omitted_count += 1;
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
|
||||
// if print {
|
||||
// if omitted_count > 0 {
|
||||
// debug_assert!(print_fmt == PrintFmt::Short);
|
||||
// // only print the message between the middle of frames
|
||||
// if !first_omit {
|
||||
// let _ = writeln!(
|
||||
// bt_fmt.formatter(),
|
||||
// " [... omitted {} frame{} ...]",
|
||||
// omitted_count,
|
||||
// if omitted_count > 1 { "s" } else { "" }
|
||||
// );
|
||||
// }
|
||||
// first_omit = false;
|
||||
// omitted_count = 0;
|
||||
// }
|
||||
// res = bt_fmt.frame().symbol(frame, symbol);
|
||||
// }
|
||||
// });
|
||||
// #[cfg(all(target_os = "nto", any(target_env = "nto70", target_env = "nto71")))]
|
||||
// if libc::__my_thread_exit as *mut libc::c_void == frame.ip() {
|
||||
// if !hit && print {
|
||||
// use crate::backtrace_rs::SymbolName;
|
||||
// res = bt_fmt.frame().print_raw(
|
||||
// frame.ip(),
|
||||
// Some(SymbolName::new("__my_thread_exit".as_bytes())),
|
||||
// None,
|
||||
// None,
|
||||
// );
|
||||
// }
|
||||
// return false;
|
||||
// }
|
||||
// if !hit && print {
|
||||
// res = bt_fmt.frame().print_raw(frame.ip(), None, None, None);
|
||||
// }
|
||||
// }
|
||||
|
||||
// idx += 1;
|
||||
// res.is_ok()
|
||||
// })
|
||||
// };
|
||||
// res?;
|
||||
// bt_fmt.finish()?;
|
||||
// if print_fmt == PrintFmt::Short {
|
||||
// writeln!(
|
||||
// fmt,
|
||||
// "note: Some details are omitted, \
|
||||
// run with `RUST_BACKTRACE=full` for a verbose backtrace."
|
||||
// )?;
|
||||
// }
|
||||
// Ok(())
|
||||
// }
|
||||
|
||||
/// Fixed frame used to clean the backtrace with `RUST_BACKTRACE=1`. Note that
|
||||
/// this is only inline(never) when backtraces in std are enabled, otherwise
|
||||
/// it's fine to optimize away.
|
||||
#[cfg_attr(feature = "backtrace", inline(never))]
|
||||
pub fn __rust_begin_short_backtrace<F, T>(f: F) -> T
|
||||
where
|
||||
F: FnOnce() -> T,
|
||||
{
|
||||
let result = f();
|
||||
|
||||
// prevent this frame from being tail-call optimised away
|
||||
crate::hint::black_box(());
|
||||
|
||||
result
|
||||
}
|
||||
|
||||
/// Fixed frame used to clean the backtrace with `RUST_BACKTRACE=1`. Note that
|
||||
/// this is only inline(never) when backtraces in std are enabled, otherwise
|
||||
/// it's fine to optimize away.
|
||||
#[cfg_attr(feature = "backtrace", inline(never))]
|
||||
pub fn __rust_end_short_backtrace<F, T>(f: F) -> T
|
||||
where
|
||||
F: FnOnce() -> T,
|
||||
{
|
||||
let result = f();
|
||||
|
||||
// prevent this frame from being tail-call optimised away
|
||||
crate::hint::black_box(());
|
||||
|
||||
result
|
||||
}
|
||||
|
||||
// /// Prints the filename of the backtrace frame.
|
||||
// ///
|
||||
// /// See also `output`.
|
||||
// pub fn output_filename(
|
||||
// fmt: &mut fmt::Formatter<'_>,
|
||||
// bows: BytesOrWideString<'_>,
|
||||
// print_fmt: PrintFmt,
|
||||
// cwd: Option<&PathBuf>,
|
||||
// ) -> fmt::Result {
|
||||
// let file: Cow<'_, Path> = match bows {
|
||||
// #[cfg(unix)]
|
||||
// BytesOrWideString::Bytes(bytes) => {
|
||||
// use crate::os::unix::prelude::*;
|
||||
// Path::new(crate::ffi::OsStr::from_bytes(bytes)).into()
|
||||
// }
|
||||
// #[cfg(not(unix))]
|
||||
// BytesOrWideString::Bytes(bytes) => {
|
||||
// Path::new(crate::str::from_utf8(bytes).unwrap_or("<unknown>")).into()
|
||||
// }
|
||||
// #[cfg(windows)]
|
||||
// BytesOrWideString::Wide(wide) => {
|
||||
// use crate::os::windows::prelude::*;
|
||||
// Cow::Owned(crate::ffi::OsString::from_wide(wide).into())
|
||||
// }
|
||||
// #[cfg(not(windows))]
|
||||
// BytesOrWideString::Wide(_wide) => Path::new("<unknown>").into(),
|
||||
// };
|
||||
// if print_fmt == PrintFmt::Short && file.is_absolute() {
|
||||
// if let Some(cwd) = cwd {
|
||||
// if let Ok(stripped) = file.strip_prefix(&cwd) {
|
||||
// if let Some(s) = stripped.to_str() {
|
||||
// return write!(fmt, ".{}{s}", path::MAIN_SEPARATOR);
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// fmt::Display::fmt(&file.display(), fmt)
|
||||
// }
|
||||
|
||||
#[cfg(all(target_vendor = "fortanix", target_env = "sgx"))]
|
||||
pub fn set_image_base() {
|
||||
let image_base = crate::os::fortanix_sgx::mem::image_base();
|
||||
backtrace_rs::set_image_base(crate::ptr::without_provenance_mut(image_base as _));
|
||||
}
|
||||
|
||||
#[cfg(not(all(target_vendor = "fortanix", target_env = "sgx")))]
|
||||
pub fn set_image_base() {
|
||||
// nothing to do for platforms other than SGX
|
||||
}
|
||||
@@ -1,114 +0,0 @@
|
||||
#![cfg(not(test))]
|
||||
|
||||
// These symbols are all defined by `libm`,
|
||||
// or by `compiler-builtins` on unsupported platforms.
|
||||
unsafe extern "C" {
|
||||
pub safe fn acos(n: f64) -> f64;
|
||||
pub safe fn asin(n: f64) -> f64;
|
||||
pub safe fn atan(n: f64) -> f64;
|
||||
pub safe fn atan2(a: f64, b: f64) -> f64;
|
||||
pub safe fn cosh(n: f64) -> f64;
|
||||
pub safe fn expm1(n: f64) -> f64;
|
||||
pub safe fn expm1f(n: f32) -> f32;
|
||||
#[cfg_attr(target_env = "msvc", link_name = "_hypot")]
|
||||
pub safe fn hypot(x: f64, y: f64) -> f64;
|
||||
#[cfg_attr(target_env = "msvc", link_name = "_hypotf")]
|
||||
pub safe fn hypotf(x: f32, y: f32) -> f32;
|
||||
pub safe fn log1p(n: f64) -> f64;
|
||||
pub safe fn log1pf(n: f32) -> f32;
|
||||
pub safe fn sinh(n: f64) -> f64;
|
||||
pub safe fn tan(n: f64) -> f64;
|
||||
pub safe fn tanh(n: f64) -> f64;
|
||||
pub safe fn tgamma(n: f64) -> f64;
|
||||
pub safe fn tgammaf(n: f32) -> f32;
|
||||
pub safe fn lgamma_r(n: f64, s: &mut i32) -> f64;
|
||||
#[cfg(not(target_os = "aix"))]
|
||||
pub safe fn lgammaf_r(n: f32, s: &mut i32) -> f32;
|
||||
pub safe fn erf(n: f64) -> f64;
|
||||
pub safe fn erff(n: f32) -> f32;
|
||||
pub safe fn erfc(n: f64) -> f64;
|
||||
pub safe fn erfcf(n: f32) -> f32;
|
||||
|
||||
pub safe fn acosf128(n: f128) -> f128;
|
||||
pub safe fn asinf128(n: f128) -> f128;
|
||||
pub safe fn atanf128(n: f128) -> f128;
|
||||
pub safe fn atan2f128(a: f128, b: f128) -> f128;
|
||||
pub safe fn cbrtf128(n: f128) -> f128;
|
||||
pub safe fn coshf128(n: f128) -> f128;
|
||||
pub safe fn expm1f128(n: f128) -> f128;
|
||||
pub safe fn hypotf128(x: f128, y: f128) -> f128;
|
||||
pub safe fn log1pf128(n: f128) -> f128;
|
||||
pub safe fn sinhf128(n: f128) -> f128;
|
||||
pub safe fn tanf128(n: f128) -> f128;
|
||||
pub safe fn tanhf128(n: f128) -> f128;
|
||||
pub safe fn tgammaf128(n: f128) -> f128;
|
||||
pub safe fn lgammaf128_r(n: f128, s: &mut i32) -> f128;
|
||||
pub safe fn erff128(n: f128) -> f128;
|
||||
pub safe fn erfcf128(n: f128) -> f128;
|
||||
}
|
||||
|
||||
cfg_select! {
|
||||
all(target_os = "windows", target_env = "msvc", target_arch = "x86") => {
|
||||
// On 32-bit x86 MSVC these functions aren't defined, so we just define shims
|
||||
// which promote everything to f64, perform the calculation, and then demote
|
||||
// back to f32. While not precisely correct should be "correct enough" for now.
|
||||
#[inline]
|
||||
pub fn acosf(n: f32) -> f32 {
|
||||
f64::acos(n as f64) as f32
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn asinf(n: f32) -> f32 {
|
||||
f64::asin(n as f64) as f32
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn atan2f(n: f32, b: f32) -> f32 {
|
||||
f64::atan2(n as f64, b as f64) as f32
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn atanf(n: f32) -> f32 {
|
||||
f64::atan(n as f64) as f32
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn coshf(n: f32) -> f32 {
|
||||
f64::cosh(n as f64) as f32
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn sinhf(n: f32) -> f32 {
|
||||
f64::sinh(n as f64) as f32
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn tanf(n: f32) -> f32 {
|
||||
f64::tan(n as f64) as f32
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn tanhf(n: f32) -> f32 {
|
||||
f64::tanh(n as f64) as f32
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
unsafe extern "C" {
|
||||
pub safe fn acosf(n: f32) -> f32;
|
||||
pub safe fn asinf(n: f32) -> f32;
|
||||
pub safe fn atan2f(a: f32, b: f32) -> f32;
|
||||
pub safe fn atanf(n: f32) -> f32;
|
||||
pub safe fn coshf(n: f32) -> f32;
|
||||
pub safe fn sinhf(n: f32) -> f32;
|
||||
pub safe fn tanf(n: f32) -> f32;
|
||||
pub safe fn tanhf(n: f32) -> f32;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// On AIX, we don't have lgammaf_r only the f64 version, so we can
|
||||
// use the f64 version lgamma_r
|
||||
#[cfg(target_os = "aix")]
|
||||
pub fn lgammaf_r(n: f32, s: &mut i32) -> f32 {
|
||||
lgamma_r(n.into(), s) as f32
|
||||
}
|
||||
@@ -1,62 +0,0 @@
|
||||
//! The configure builtins provides runtime support compiler-builtin features
|
||||
//! which require dynamic initialization to work as expected, e.g. aarch64
|
||||
//! outline-atomics.
|
||||
|
||||
/// Enable LSE atomic operations at startup, if supported.
|
||||
///
|
||||
/// Linker sections are based on what [`ctor`] does, with priorities to run slightly before user
|
||||
/// code:
|
||||
///
|
||||
/// - Apple uses the section `__mod_init_func`, `mod_init_funcs` is needed to set
|
||||
/// `S_MOD_INIT_FUNC_POINTERS`. There doesn't seem to be a way to indicate priorities.
|
||||
/// - Windows uses `.CRT$XCT`, which is run before user constructors (these should use `.CRT$XCU`).
|
||||
/// - ELF uses `.init_array` with a priority of 90, which runs before our `ARGV_INIT_ARRAY`
|
||||
/// initializer (priority 99). Both are within the 0-100 implementation-reserved range, per docs
|
||||
/// for the [`prio-ctor-dtor`] warning, and this matches compiler-rt's `CONSTRUCTOR_PRIORITY`.
|
||||
///
|
||||
/// To save startup time, the initializer is only run if outline atomic routines from
|
||||
/// compiler-builtins may be used. If LSE is known to be available then the calls are never
|
||||
/// emitted, and if we build the C intrinsics then it has its own initializer using the symbol
|
||||
/// `__aarch64_have_lse_atomics`.
|
||||
///
|
||||
/// Initialization is done in a global constructor to so we get the same behavior regardless of
|
||||
/// whether Rust's `init` is used, or if we are in a `dylib` or `no_main` situation (as opposed
|
||||
/// to doing it as part of pre-main startup). This also matches C implementations.
|
||||
///
|
||||
/// Ideally `core` would have something similar, but detecting the CPU features requires the
|
||||
/// auxiliary vector from the OS. We do the initialization in `std` rather than as part of
|
||||
/// `compiler-builtins` because a builtins->std dependency isn't possible, and inlining parts of
|
||||
/// `std-detect` would be much messier.
|
||||
///
|
||||
/// [`ctor`]: https://github.com/mmastrac/rust-ctor/blob/63382b833ddcbfb8b064f4e86bfa1ed4026ff356/shared/src/macros/mod.rs#L522-L534
|
||||
/// [`prio-ctor-dtor`]: https://gcc.gnu.org/onlinedocs/gcc/Warning-Options.html
|
||||
#[cfg(all(
|
||||
target_arch = "aarch64",
|
||||
target_feature = "outline-atomics",
|
||||
not(target_feature = "lse"),
|
||||
not(feature = "compiler-builtins-c"),
|
||||
))]
|
||||
#[used]
|
||||
#[cfg_attr(target_vendor = "apple", unsafe(link_section = "__DATA,__mod_init_func,mod_init_funcs"))]
|
||||
#[cfg_attr(target_os = "windows", unsafe(link_section = ".CRT$XCT"))]
|
||||
#[cfg_attr(
|
||||
not(any(target_vendor = "apple", target_os = "windows")),
|
||||
unsafe(link_section = ".init_array.90")
|
||||
)]
|
||||
static RUST_LSE_INIT: extern "C" fn() = {
|
||||
extern "C" fn init_lse() {
|
||||
use crate::arch;
|
||||
|
||||
// This is provided by compiler-builtins::aarch64_outline_atomics.
|
||||
unsafe extern "C" {
|
||||
fn __rust_enable_lse();
|
||||
}
|
||||
|
||||
if arch::is_aarch64_feature_detected!("lse") {
|
||||
unsafe {
|
||||
__rust_enable_lse();
|
||||
}
|
||||
}
|
||||
}
|
||||
init_lse
|
||||
};
|
||||
31
crates/std/src/sys/env/common.rs
vendored
31
crates/std/src/sys/env/common.rs
vendored
@@ -1,31 +0,0 @@
|
||||
use crate::ffi::OsString;
|
||||
use crate::{fmt, vec};
|
||||
|
||||
pub struct Env {
|
||||
iter: vec::IntoIter<(OsString, OsString)>,
|
||||
}
|
||||
|
||||
impl Env {
|
||||
pub(super) fn new(env: Vec<(OsString, OsString)>) -> Self {
|
||||
Env { iter: env.into_iter() }
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for Env {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.debug_list().entries(self.iter.as_slice()).finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl !Send for Env {}
|
||||
impl !Sync for Env {}
|
||||
|
||||
impl Iterator for Env {
|
||||
type Item = (OsString, OsString);
|
||||
fn next(&mut self) -> Option<(OsString, OsString)> {
|
||||
self.iter.next()
|
||||
}
|
||||
fn size_hint(&self) -> (usize, Option<usize>) {
|
||||
self.iter.size_hint()
|
||||
}
|
||||
}
|
||||
33
crates/std/src/sys/env/unsupported.rs
vendored
33
crates/std/src/sys/env/unsupported.rs
vendored
@@ -1,33 +0,0 @@
|
||||
use crate::ffi::{OsStr, OsString};
|
||||
use crate::{fmt, io};
|
||||
|
||||
pub struct Env(!);
|
||||
|
||||
impl fmt::Debug for Env {
|
||||
fn fmt(&self, _: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl Iterator for Env {
|
||||
type Item = (OsString, OsString);
|
||||
fn next(&mut self) -> Option<(OsString, OsString)> {
|
||||
self.0
|
||||
}
|
||||
}
|
||||
|
||||
pub fn env() -> Env {
|
||||
panic!("not supported on this platform")
|
||||
}
|
||||
|
||||
pub fn getenv(_: &OsStr) -> Option<OsString> {
|
||||
None
|
||||
}
|
||||
|
||||
pub unsafe fn setenv(_: &OsStr, _: &OsStr) -> io::Result<()> {
|
||||
Err(io::const_error!(io::ErrorKind::Unsupported, "cannot set env vars on this platform"))
|
||||
}
|
||||
|
||||
pub unsafe fn unsetenv(_: &OsStr) -> io::Result<()> {
|
||||
Err(io::const_error!(io::ErrorKind::Unsupported, "cannot unset env vars on this platform"))
|
||||
}
|
||||
@@ -1,426 +0,0 @@
|
||||
//! Constants associated with each target.
|
||||
|
||||
// Replaces the #[else] gate with #[cfg(not(any(…)))] of all the other gates.
|
||||
// This ensures that they must be mutually exclusive and do not have precedence
|
||||
// like cfg_select!.
|
||||
macro cfg_unordered(
|
||||
$(#[cfg($cfg:meta)] $os:item)*
|
||||
#[else] $fallback:item
|
||||
) {
|
||||
$(#[cfg($cfg)] $os)*
|
||||
#[cfg(not(any($($cfg),*)))] $fallback
|
||||
}
|
||||
|
||||
// Keep entries sorted alphabetically and mutually exclusive.
|
||||
|
||||
cfg_unordered! {
|
||||
|
||||
#[cfg(target_os = "aix")]
|
||||
pub mod os {
|
||||
pub const FAMILY: &str = "unix";
|
||||
pub const OS: &str = "aix";
|
||||
pub const DLL_PREFIX: &str = "lib";
|
||||
pub const DLL_SUFFIX: &str = ".a";
|
||||
pub const DLL_EXTENSION: &str = "a";
|
||||
pub const EXE_SUFFIX: &str = "";
|
||||
pub const EXE_EXTENSION: &str = "";
|
||||
}
|
||||
|
||||
#[cfg(target_os = "android")]
|
||||
pub mod os {
|
||||
pub const FAMILY: &str = "unix";
|
||||
pub const OS: &str = "android";
|
||||
pub const DLL_PREFIX: &str = "lib";
|
||||
pub const DLL_SUFFIX: &str = ".so";
|
||||
pub const DLL_EXTENSION: &str = "so";
|
||||
pub const EXE_SUFFIX: &str = "";
|
||||
pub const EXE_EXTENSION: &str = "";
|
||||
}
|
||||
|
||||
#[cfg(target_os = "cygwin")]
|
||||
pub mod os {
|
||||
pub const FAMILY: &str = "unix";
|
||||
pub const OS: &str = "cygwin";
|
||||
pub const DLL_PREFIX: &str = "";
|
||||
pub const DLL_SUFFIX: &str = ".dll";
|
||||
pub const DLL_EXTENSION: &str = "dll";
|
||||
pub const EXE_SUFFIX: &str = ".exe";
|
||||
pub const EXE_EXTENSION: &str = "exe";
|
||||
}
|
||||
|
||||
#[cfg(target_os = "dragonfly")]
|
||||
pub mod os {
|
||||
pub const FAMILY: &str = "unix";
|
||||
pub const OS: &str = "dragonfly";
|
||||
pub const DLL_PREFIX: &str = "lib";
|
||||
pub const DLL_SUFFIX: &str = ".so";
|
||||
pub const DLL_EXTENSION: &str = "so";
|
||||
pub const EXE_SUFFIX: &str = "";
|
||||
pub const EXE_EXTENSION: &str = "";
|
||||
}
|
||||
|
||||
#[cfg(target_os = "emscripten")]
|
||||
pub mod os {
|
||||
pub const FAMILY: &str = "unix";
|
||||
pub const OS: &str = "emscripten";
|
||||
pub const DLL_PREFIX: &str = "lib";
|
||||
pub const DLL_SUFFIX: &str = ".so";
|
||||
pub const DLL_EXTENSION: &str = "so";
|
||||
pub const EXE_SUFFIX: &str = ".js";
|
||||
pub const EXE_EXTENSION: &str = "js";
|
||||
}
|
||||
|
||||
#[cfg(target_os = "espidf")]
|
||||
pub mod os {
|
||||
pub const FAMILY: &str = "unix";
|
||||
pub const OS: &str = "espidf";
|
||||
pub const DLL_PREFIX: &str = "lib";
|
||||
pub const DLL_SUFFIX: &str = ".so";
|
||||
pub const DLL_EXTENSION: &str = "so";
|
||||
pub const EXE_SUFFIX: &str = "";
|
||||
pub const EXE_EXTENSION: &str = "";
|
||||
}
|
||||
|
||||
#[cfg(target_os = "freebsd")]
|
||||
pub mod os {
|
||||
pub const FAMILY: &str = "unix";
|
||||
pub const OS: &str = "freebsd";
|
||||
pub const DLL_PREFIX: &str = "lib";
|
||||
pub const DLL_SUFFIX: &str = ".so";
|
||||
pub const DLL_EXTENSION: &str = "so";
|
||||
pub const EXE_SUFFIX: &str = "";
|
||||
pub const EXE_EXTENSION: &str = "";
|
||||
}
|
||||
|
||||
#[cfg(target_os = "fuchsia")]
|
||||
pub mod os {
|
||||
pub const FAMILY: &str = "unix";
|
||||
pub const OS: &str = "fuchsia";
|
||||
pub const DLL_PREFIX: &str = "lib";
|
||||
pub const DLL_SUFFIX: &str = ".so";
|
||||
pub const DLL_EXTENSION: &str = "so";
|
||||
pub const EXE_SUFFIX: &str = "";
|
||||
pub const EXE_EXTENSION: &str = "";
|
||||
}
|
||||
|
||||
#[cfg(target_os = "haiku")]
|
||||
pub mod os {
|
||||
pub const FAMILY: &str = "unix";
|
||||
pub const OS: &str = "haiku";
|
||||
pub const DLL_PREFIX: &str = "lib";
|
||||
pub const DLL_SUFFIX: &str = ".so";
|
||||
pub const DLL_EXTENSION: &str = "so";
|
||||
pub const EXE_SUFFIX: &str = "";
|
||||
pub const EXE_EXTENSION: &str = "";
|
||||
}
|
||||
|
||||
#[cfg(target_os = "hermit")]
|
||||
pub mod os {
|
||||
pub const FAMILY: &str = "";
|
||||
pub const OS: &str = "hermit";
|
||||
pub const DLL_PREFIX: &str = "";
|
||||
pub const DLL_SUFFIX: &str = "";
|
||||
pub const DLL_EXTENSION: &str = "";
|
||||
pub const EXE_SUFFIX: &str = "";
|
||||
pub const EXE_EXTENSION: &str = "";
|
||||
}
|
||||
|
||||
#[cfg(target_os = "horizon")]
|
||||
pub mod os {
|
||||
pub const FAMILY: &str = "unix";
|
||||
pub const OS: &str = "horizon";
|
||||
pub const DLL_PREFIX: &str = "lib";
|
||||
pub const DLL_SUFFIX: &str = ".so";
|
||||
pub const DLL_EXTENSION: &str = "so";
|
||||
pub const EXE_SUFFIX: &str = ".elf";
|
||||
pub const EXE_EXTENSION: &str = "elf";
|
||||
}
|
||||
|
||||
#[cfg(target_os = "hurd")]
|
||||
pub mod os {
|
||||
pub const FAMILY: &str = "unix";
|
||||
pub const OS: &str = "hurd";
|
||||
pub const DLL_PREFIX: &str = "lib";
|
||||
pub const DLL_SUFFIX: &str = ".so";
|
||||
pub const DLL_EXTENSION: &str = "so";
|
||||
pub const EXE_SUFFIX: &str = "";
|
||||
pub const EXE_EXTENSION: &str = "";
|
||||
}
|
||||
|
||||
#[cfg(target_os = "illumos")]
|
||||
pub mod os {
|
||||
pub const FAMILY: &str = "unix";
|
||||
pub const OS: &str = "illumos";
|
||||
pub const DLL_PREFIX: &str = "lib";
|
||||
pub const DLL_SUFFIX: &str = ".so";
|
||||
pub const DLL_EXTENSION: &str = "so";
|
||||
pub const EXE_SUFFIX: &str = "";
|
||||
pub const EXE_EXTENSION: &str = "";
|
||||
}
|
||||
|
||||
#[cfg(target_os = "ios")]
|
||||
pub mod os {
|
||||
pub const FAMILY: &str = "unix";
|
||||
pub const OS: &str = "ios";
|
||||
pub const DLL_PREFIX: &str = "lib";
|
||||
pub const DLL_SUFFIX: &str = ".dylib";
|
||||
pub const DLL_EXTENSION: &str = "dylib";
|
||||
pub const EXE_SUFFIX: &str = "";
|
||||
pub const EXE_EXTENSION: &str = "";
|
||||
}
|
||||
|
||||
#[cfg(target_os = "l4re")]
|
||||
pub mod os {
|
||||
pub const FAMILY: &str = "unix";
|
||||
pub const OS: &str = "l4re";
|
||||
pub const DLL_PREFIX: &str = "lib";
|
||||
pub const DLL_SUFFIX: &str = ".so";
|
||||
pub const DLL_EXTENSION: &str = "so";
|
||||
pub const EXE_SUFFIX: &str = "";
|
||||
pub const EXE_EXTENSION: &str = "";
|
||||
}
|
||||
|
||||
#[cfg(target_os = "linux")]
|
||||
pub mod os {
|
||||
pub const FAMILY: &str = "unix";
|
||||
pub const OS: &str = "linux";
|
||||
pub const DLL_PREFIX: &str = "lib";
|
||||
pub const DLL_SUFFIX: &str = ".so";
|
||||
pub const DLL_EXTENSION: &str = "so";
|
||||
pub const EXE_SUFFIX: &str = "";
|
||||
pub const EXE_EXTENSION: &str = "";
|
||||
}
|
||||
|
||||
#[cfg(target_os = "macos")]
|
||||
pub mod os {
|
||||
pub const FAMILY: &str = "unix";
|
||||
pub const OS: &str = "macos";
|
||||
pub const DLL_PREFIX: &str = "lib";
|
||||
pub const DLL_SUFFIX: &str = ".dylib";
|
||||
pub const DLL_EXTENSION: &str = "dylib";
|
||||
pub const EXE_SUFFIX: &str = "";
|
||||
pub const EXE_EXTENSION: &str = "";
|
||||
}
|
||||
|
||||
#[cfg(target_os = "netbsd")]
|
||||
pub mod os {
|
||||
pub const FAMILY: &str = "unix";
|
||||
pub const OS: &str = "netbsd";
|
||||
pub const DLL_PREFIX: &str = "lib";
|
||||
pub const DLL_SUFFIX: &str = ".so";
|
||||
pub const DLL_EXTENSION: &str = "so";
|
||||
pub const EXE_SUFFIX: &str = "";
|
||||
pub const EXE_EXTENSION: &str = "";
|
||||
}
|
||||
|
||||
#[cfg(target_os = "nto")]
|
||||
pub mod os {
|
||||
pub const FAMILY: &str = "unix";
|
||||
pub const OS: &str = "nto";
|
||||
pub const DLL_PREFIX: &str = "lib";
|
||||
pub const DLL_SUFFIX: &str = ".so";
|
||||
pub const DLL_EXTENSION: &str = "so";
|
||||
pub const EXE_SUFFIX: &str = "";
|
||||
pub const EXE_EXTENSION: &str = "";
|
||||
}
|
||||
|
||||
#[cfg(target_os = "nuttx")]
|
||||
pub mod os {
|
||||
pub const FAMILY: &str = "unix";
|
||||
pub const OS: &str = "nuttx";
|
||||
pub const DLL_PREFIX: &str = "lib";
|
||||
pub const DLL_SUFFIX: &str = ".so";
|
||||
pub const DLL_EXTENSION: &str = "so";
|
||||
pub const EXE_SUFFIX: &str = "";
|
||||
pub const EXE_EXTENSION: &str = "";
|
||||
}
|
||||
|
||||
#[cfg(target_os = "openbsd")]
|
||||
pub mod os {
|
||||
pub const FAMILY: &str = "unix";
|
||||
pub const OS: &str = "openbsd";
|
||||
pub const DLL_PREFIX: &str = "lib";
|
||||
pub const DLL_SUFFIX: &str = ".so";
|
||||
pub const DLL_EXTENSION: &str = "so";
|
||||
pub const EXE_SUFFIX: &str = "";
|
||||
pub const EXE_EXTENSION: &str = "";
|
||||
}
|
||||
|
||||
#[cfg(target_os = "redox")]
|
||||
pub mod os {
|
||||
pub const FAMILY: &str = "unix";
|
||||
pub const OS: &str = "redox";
|
||||
pub const DLL_PREFIX: &str = "lib";
|
||||
pub const DLL_SUFFIX: &str = ".so";
|
||||
pub const DLL_EXTENSION: &str = "so";
|
||||
pub const EXE_SUFFIX: &str = "";
|
||||
pub const EXE_EXTENSION: &str = "";
|
||||
}
|
||||
|
||||
#[cfg(target_os = "rtems")]
|
||||
pub mod os {
|
||||
pub const FAMILY: &str = "unix";
|
||||
pub const OS: &str = "rtems";
|
||||
pub const DLL_PREFIX: &str = "lib";
|
||||
pub const DLL_SUFFIX: &str = ".so";
|
||||
pub const DLL_EXTENSION: &str = "so";
|
||||
pub const EXE_SUFFIX: &str = "";
|
||||
pub const EXE_EXTENSION: &str = "";
|
||||
}
|
||||
|
||||
#[cfg(all(target_vendor = "fortanix", target_env = "sgx"))]
|
||||
pub mod os {
|
||||
pub const FAMILY: &str = "";
|
||||
pub const OS: &str = "";
|
||||
pub const DLL_PREFIX: &str = "";
|
||||
pub const DLL_SUFFIX: &str = ".sgxs";
|
||||
pub const DLL_EXTENSION: &str = "sgxs";
|
||||
pub const EXE_SUFFIX: &str = ".sgxs";
|
||||
pub const EXE_EXTENSION: &str = "sgxs";
|
||||
}
|
||||
|
||||
#[cfg(target_os = "solaris")]
|
||||
pub mod os {
|
||||
pub const FAMILY: &str = "unix";
|
||||
pub const OS: &str = "solaris";
|
||||
pub const DLL_PREFIX: &str = "lib";
|
||||
pub const DLL_SUFFIX: &str = ".so";
|
||||
pub const DLL_EXTENSION: &str = "so";
|
||||
pub const EXE_SUFFIX: &str = "";
|
||||
pub const EXE_EXTENSION: &str = "";
|
||||
}
|
||||
|
||||
#[cfg(target_os = "solid_asp3")]
|
||||
pub mod os {
|
||||
pub const FAMILY: &str = "itron";
|
||||
pub const OS: &str = "solid";
|
||||
pub const DLL_PREFIX: &str = "";
|
||||
pub const DLL_SUFFIX: &str = ".so";
|
||||
pub const DLL_EXTENSION: &str = "so";
|
||||
pub const EXE_SUFFIX: &str = "";
|
||||
pub const EXE_EXTENSION: &str = "";
|
||||
}
|
||||
|
||||
#[cfg(target_os = "tvos")]
|
||||
pub mod os {
|
||||
pub const FAMILY: &str = "unix";
|
||||
pub const OS: &str = "tvos";
|
||||
pub const DLL_PREFIX: &str = "lib";
|
||||
pub const DLL_SUFFIX: &str = ".dylib";
|
||||
pub const DLL_EXTENSION: &str = "dylib";
|
||||
pub const EXE_SUFFIX: &str = "";
|
||||
pub const EXE_EXTENSION: &str = "";
|
||||
}
|
||||
|
||||
#[cfg(target_os = "uefi")]
|
||||
pub mod os {
|
||||
pub const FAMILY: &str = "";
|
||||
pub const OS: &str = "uefi";
|
||||
pub const DLL_PREFIX: &str = "";
|
||||
pub const DLL_SUFFIX: &str = "";
|
||||
pub const DLL_EXTENSION: &str = "";
|
||||
pub const EXE_SUFFIX: &str = ".efi";
|
||||
pub const EXE_EXTENSION: &str = "efi";
|
||||
}
|
||||
|
||||
#[cfg(target_os = "vexos")]
|
||||
pub mod os {
|
||||
pub const FAMILY: &str = "";
|
||||
pub const OS: &str = "vexos";
|
||||
pub const DLL_PREFIX: &str = "";
|
||||
pub const DLL_SUFFIX: &str = "";
|
||||
pub const DLL_EXTENSION: &str = "";
|
||||
pub const EXE_SUFFIX: &str = ".bin";
|
||||
pub const EXE_EXTENSION: &str = "bin";
|
||||
}
|
||||
|
||||
#[cfg(target_os = "visionos")]
|
||||
pub mod os {
|
||||
pub const FAMILY: &str = "unix";
|
||||
pub const OS: &str = "visionos";
|
||||
pub const DLL_PREFIX: &str = "lib";
|
||||
pub const DLL_SUFFIX: &str = ".dylib";
|
||||
pub const DLL_EXTENSION: &str = "dylib";
|
||||
pub const EXE_SUFFIX: &str = "";
|
||||
pub const EXE_EXTENSION: &str = "";
|
||||
}
|
||||
|
||||
#[cfg(target_os = "vita")]
|
||||
pub mod os {
|
||||
pub const FAMILY: &str = "unix";
|
||||
pub const OS: &str = "vita";
|
||||
pub const DLL_PREFIX: &str = "lib";
|
||||
pub const DLL_SUFFIX: &str = ".so";
|
||||
pub const DLL_EXTENSION: &str = "so";
|
||||
pub const EXE_SUFFIX: &str = ".elf";
|
||||
pub const EXE_EXTENSION: &str = "elf";
|
||||
}
|
||||
|
||||
#[cfg(target_os = "vxworks")]
|
||||
pub mod os {
|
||||
pub const FAMILY: &str = "unix";
|
||||
pub const OS: &str = "vxworks";
|
||||
pub const DLL_PREFIX: &str = "lib";
|
||||
pub const DLL_SUFFIX: &str = ".so";
|
||||
pub const DLL_EXTENSION: &str = "so";
|
||||
pub const EXE_SUFFIX: &str = "";
|
||||
pub const EXE_EXTENSION: &str = "";
|
||||
}
|
||||
|
||||
#[cfg(all(target_family = "wasm", not(any(target_os = "emscripten", target_os = "linux"))))]
|
||||
pub mod os {
|
||||
pub const FAMILY: &str = "";
|
||||
pub const OS: &str = "";
|
||||
pub const DLL_PREFIX: &str = "";
|
||||
pub const DLL_SUFFIX: &str = ".wasm";
|
||||
pub const DLL_EXTENSION: &str = "wasm";
|
||||
pub const EXE_SUFFIX: &str = ".wasm";
|
||||
pub const EXE_EXTENSION: &str = "wasm";
|
||||
}
|
||||
|
||||
#[cfg(target_os = "watchos")]
|
||||
pub mod os {
|
||||
pub const FAMILY: &str = "unix";
|
||||
pub const OS: &str = "watchos";
|
||||
pub const DLL_PREFIX: &str = "lib";
|
||||
pub const DLL_SUFFIX: &str = ".dylib";
|
||||
pub const DLL_EXTENSION: &str = "dylib";
|
||||
pub const EXE_SUFFIX: &str = "";
|
||||
pub const EXE_EXTENSION: &str = "";
|
||||
}
|
||||
|
||||
#[cfg(target_os = "windows")]
|
||||
pub mod os {
|
||||
pub const FAMILY: &str = "windows";
|
||||
pub const OS: &str = "windows";
|
||||
pub const DLL_PREFIX: &str = "";
|
||||
pub const DLL_SUFFIX: &str = ".dll";
|
||||
pub const DLL_EXTENSION: &str = "dll";
|
||||
pub const EXE_SUFFIX: &str = ".exe";
|
||||
pub const EXE_EXTENSION: &str = "exe";
|
||||
}
|
||||
|
||||
#[cfg(target_os = "zkvm")]
|
||||
pub mod os {
|
||||
pub const FAMILY: &str = "";
|
||||
pub const OS: &str = "";
|
||||
pub const DLL_PREFIX: &str = "";
|
||||
pub const DLL_SUFFIX: &str = ".elf";
|
||||
pub const DLL_EXTENSION: &str = "elf";
|
||||
pub const EXE_SUFFIX: &str = ".elf";
|
||||
pub const EXE_EXTENSION: &str = "elf";
|
||||
}
|
||||
|
||||
// The fallback when none of the other gates match.
|
||||
#[else]
|
||||
pub mod os {
|
||||
pub const FAMILY: &str = "";
|
||||
pub const OS: &str = "";
|
||||
pub const DLL_PREFIX: &str = "";
|
||||
pub const DLL_SUFFIX: &str = "";
|
||||
pub const DLL_EXTENSION: &str = "";
|
||||
pub const EXE_SUFFIX: &str = "";
|
||||
pub const EXE_EXTENSION: &str = "";
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,143 +0,0 @@
|
||||
cfg_select! {
|
||||
target_os = "linux" => {
|
||||
/// Mitigation for <https://github.com/rust-lang/rust/issues/126600>
|
||||
///
|
||||
/// On glibc, `libc::exit` has been observed to not always be thread-safe.
|
||||
/// It is currently unclear whether that is a glibc bug or allowed by the standard.
|
||||
/// To mitigate this problem, we ensure that only one
|
||||
/// Rust thread calls `libc::exit` (or returns from `main`) by calling this function before
|
||||
/// calling `libc::exit` (or returning from `main`).
|
||||
///
|
||||
/// Technically, this is not enough to ensure soundness, since other code directly calling
|
||||
/// `libc::exit` will still race with this.
|
||||
///
|
||||
/// *This function does not itself call `libc::exit`.* This is so it can also be used
|
||||
/// to guard returning from `main`.
|
||||
///
|
||||
/// This function will return only the first time it is called in a process.
|
||||
///
|
||||
/// * If it is called again on the same thread as the first call, it will abort.
|
||||
/// * If it is called again on a different thread, it will wait in a loop
|
||||
/// (waiting for the process to exit).
|
||||
pub fn unique_thread_exit() {
|
||||
use crate::ffi::c_int;
|
||||
use crate::ptr;
|
||||
use crate::sync::atomic::AtomicPtr;
|
||||
use crate::sync::atomic::Ordering::{Acquire, Relaxed};
|
||||
|
||||
static EXITING_THREAD_ID: AtomicPtr<c_int> = AtomicPtr::new(ptr::null_mut());
|
||||
|
||||
// We use the address of `errno` as a cheap and safe way to identify
|
||||
// threads. As the C standard mandates that `errno` must have thread
|
||||
// storage duration, we can rely on its address not changing over the
|
||||
// lifetime of the thread. Additionally, accesses to `errno` are
|
||||
// async-signal-safe, so this function is available in all imaginable
|
||||
// circumstances.
|
||||
let this_thread_id = crate::sys::io::errno_location();
|
||||
match EXITING_THREAD_ID.compare_exchange(ptr::null_mut(), this_thread_id, Acquire, Relaxed) {
|
||||
Ok(_) => {
|
||||
// This is the first thread to call `unique_thread_exit`,
|
||||
// and this is the first time it is called. Continue exiting.
|
||||
}
|
||||
Err(exiting_thread_id) if exiting_thread_id == this_thread_id => {
|
||||
// This is the first thread to call `unique_thread_exit`,
|
||||
// but this is the second time it is called.
|
||||
// Abort the process.
|
||||
core::panicking::panic_nounwind("std::process::exit called re-entrantly")
|
||||
}
|
||||
Err(_) => {
|
||||
// This is not the first thread to call `unique_thread_exit`.
|
||||
// Pause until the process exits.
|
||||
loop {
|
||||
// Safety: libc::pause is safe to call.
|
||||
unsafe { libc::pause(); }
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
/// Mitigation for <https://github.com/rust-lang/rust/issues/126600>
|
||||
///
|
||||
/// Mitigation is ***NOT*** implemented on this platform, either because this platform
|
||||
/// is not affected, or because mitigation is not yet implemented for this platform.
|
||||
#[cfg_attr(any(test, doctest), expect(dead_code))]
|
||||
pub fn unique_thread_exit() {
|
||||
// Mitigation not required on platforms where `exit` is thread-safe.
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn exit(code: i32) -> ! {
|
||||
cfg_select! {
|
||||
target_os = "hermit" => {
|
||||
unsafe { hermit_abi::exit(code) }
|
||||
}
|
||||
target_os = "linux" => {
|
||||
unsafe {
|
||||
unique_thread_exit();
|
||||
libc::exit(code)
|
||||
}
|
||||
}
|
||||
target_os = "motor" => {
|
||||
moto_rt::process::exit(code)
|
||||
}
|
||||
all(target_vendor = "fortanix", target_env = "sgx") => {
|
||||
crate::sys::pal::abi::exit_with_code(code as _)
|
||||
}
|
||||
target_os = "solid_asp3" => {
|
||||
rtabort!("exit({}) called", code)
|
||||
}
|
||||
target_os = "teeos" => {
|
||||
let _ = code;
|
||||
panic!("TA should not call `exit`")
|
||||
}
|
||||
target_os = "uefi" => {
|
||||
use r_efi::base::Status;
|
||||
|
||||
use crate::os::uefi::env;
|
||||
|
||||
if let (Some(boot_services), Some(handle)) =
|
||||
(env::boot_services(), env::try_image_handle())
|
||||
{
|
||||
let boot_services = boot_services.cast::<r_efi::efi::BootServices>();
|
||||
let _ = unsafe {
|
||||
((*boot_services.as_ptr()).exit)(
|
||||
handle.as_ptr(),
|
||||
Status::from_usize(code as usize),
|
||||
0,
|
||||
crate::ptr::null_mut(),
|
||||
)
|
||||
};
|
||||
}
|
||||
crate::intrinsics::abort()
|
||||
}
|
||||
any(
|
||||
target_family = "unix",
|
||||
target_os = "wasi",
|
||||
) => {
|
||||
unsafe { libc::exit(code as crate::ffi::c_int) }
|
||||
}
|
||||
target_os = "vexos" => {
|
||||
let _ = code;
|
||||
|
||||
unsafe {
|
||||
vex_sdk::vexSystemExitRequest();
|
||||
|
||||
loop {
|
||||
vex_sdk::vexTasksRun();
|
||||
}
|
||||
}
|
||||
}
|
||||
target_os = "windows" => {
|
||||
unsafe { crate::sys::pal::c::ExitProcess(code as u32) }
|
||||
}
|
||||
target_os = "xous" => {
|
||||
crate::os::xous::ffi::exit(code as u32)
|
||||
}
|
||||
_ => {
|
||||
let _ = code;
|
||||
crate::intrinsics::abort()
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,81 +0,0 @@
|
||||
#![allow(dead_code)] // not used on all platforms
|
||||
|
||||
use crate::io::{self, Error, ErrorKind};
|
||||
use crate::path::{Path, PathBuf};
|
||||
use crate::sys::fs::{File, OpenOptions};
|
||||
use crate::sys::helpers::ignore_notfound;
|
||||
use crate::{fmt, fs};
|
||||
|
||||
pub(crate) const NOT_FILE_ERROR: Error = io::const_error!(
|
||||
ErrorKind::InvalidInput,
|
||||
"the source path is neither a regular file nor a symlink to a regular file",
|
||||
);
|
||||
|
||||
pub fn copy(from: &Path, to: &Path) -> io::Result<u64> {
|
||||
let mut reader = fs::File::open(from)?;
|
||||
let metadata = reader.metadata()?;
|
||||
|
||||
if !metadata.is_file() {
|
||||
return Err(NOT_FILE_ERROR);
|
||||
}
|
||||
|
||||
let mut writer = fs::File::create(to)?;
|
||||
let perm = metadata.permissions();
|
||||
|
||||
let ret = io::copy(&mut reader, &mut writer)?;
|
||||
writer.set_permissions(perm)?;
|
||||
Ok(ret)
|
||||
}
|
||||
|
||||
pub fn remove_dir_all(path: &Path) -> io::Result<()> {
|
||||
let filetype = fs::symlink_metadata(path)?.file_type();
|
||||
if filetype.is_symlink() { fs::remove_file(path) } else { remove_dir_all_recursive(path) }
|
||||
}
|
||||
|
||||
fn remove_dir_all_recursive(path: &Path) -> io::Result<()> {
|
||||
for child in fs::read_dir(path)? {
|
||||
let result: io::Result<()> = try {
|
||||
let child = child?;
|
||||
if child.file_type()?.is_dir() {
|
||||
remove_dir_all_recursive(&child.path())?;
|
||||
} else {
|
||||
fs::remove_file(&child.path())?;
|
||||
}
|
||||
};
|
||||
// ignore internal NotFound errors to prevent race conditions
|
||||
if let Err(err) = &result
|
||||
&& err.kind() != io::ErrorKind::NotFound
|
||||
{
|
||||
return result;
|
||||
}
|
||||
}
|
||||
ignore_notfound(fs::remove_dir(path))
|
||||
}
|
||||
|
||||
pub fn exists(path: &Path) -> io::Result<bool> {
|
||||
match fs::metadata(path) {
|
||||
Ok(_) => Ok(true),
|
||||
Err(error) if error.kind() == io::ErrorKind::NotFound => Ok(false),
|
||||
Err(error) => Err(error),
|
||||
}
|
||||
}
|
||||
|
||||
pub struct Dir {
|
||||
path: PathBuf,
|
||||
}
|
||||
|
||||
impl Dir {
|
||||
pub fn open(path: &Path, _opts: &OpenOptions) -> io::Result<Self> {
|
||||
path.canonicalize().map(|path| Self { path })
|
||||
}
|
||||
|
||||
pub fn open_file(&self, path: &Path, opts: &OpenOptions) -> io::Result<File> {
|
||||
File::open(&self.path.join(path), &opts)
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for Dir {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.debug_struct("Dir").field("path", &self.path).finish()
|
||||
}
|
||||
}
|
||||
@@ -1,173 +0,0 @@
|
||||
#![deny(unsafe_op_in_unsafe_fn)]
|
||||
|
||||
use crate::io;
|
||||
use crate::path::{Path, PathBuf};
|
||||
|
||||
pub mod common;
|
||||
|
||||
cfg_select! {
|
||||
any(target_family = "unix", target_os = "wasi") => {
|
||||
mod unix;
|
||||
use unix as imp;
|
||||
#[cfg(not(target_os = "wasi"))]
|
||||
pub use unix::{chown, fchown, lchown, mkfifo};
|
||||
#[cfg(not(any(target_os = "fuchsia", target_os = "wasi")))]
|
||||
pub use unix::chroot;
|
||||
#[cfg(not(target_os = "wasi"))]
|
||||
pub(crate) use unix::debug_assert_fd_is_open;
|
||||
#[cfg(any(target_os = "linux", target_os = "android"))]
|
||||
pub(super) use unix::CachedFileMetadata;
|
||||
use crate::sys::helpers::run_path_with_cstr as with_native_path;
|
||||
}
|
||||
target_os = "windows" => {
|
||||
mod windows;
|
||||
use windows as imp;
|
||||
pub use windows::{symlink_inner, junction_point};
|
||||
use crate::sys::path::with_native_path;
|
||||
}
|
||||
target_os = "hermit" => {
|
||||
mod hermit;
|
||||
use hermit as imp;
|
||||
}
|
||||
target_os = "motor" => {
|
||||
mod motor;
|
||||
use motor as imp;
|
||||
}
|
||||
target_os = "solid_asp3" => {
|
||||
mod solid;
|
||||
use solid as imp;
|
||||
}
|
||||
target_os = "uefi" => {
|
||||
mod uefi;
|
||||
use uefi as imp;
|
||||
}
|
||||
target_os = "vexos" => {
|
||||
mod vexos;
|
||||
use vexos as imp;
|
||||
}
|
||||
_ => {
|
||||
mod unsupported;
|
||||
use unsupported as imp;
|
||||
}
|
||||
}
|
||||
|
||||
// FIXME: Replace this with platform-specific path conversion functions.
|
||||
#[cfg(not(any(target_family = "unix", target_os = "windows", target_os = "wasi")))]
|
||||
#[inline]
|
||||
pub fn with_native_path<T>(path: &Path, f: &dyn Fn(&Path) -> io::Result<T>) -> io::Result<T> {
|
||||
f(path)
|
||||
}
|
||||
|
||||
pub use imp::{
|
||||
Dir, DirBuilder, DirEntry, File, FileAttr, FilePermissions, FileTimes, FileType, OpenOptions,
|
||||
ReadDir,
|
||||
};
|
||||
|
||||
pub fn read_dir(path: &Path) -> io::Result<ReadDir> {
|
||||
// FIXME: use with_native_path on all platforms
|
||||
imp::readdir(path)
|
||||
}
|
||||
|
||||
pub fn remove_file(path: &Path) -> io::Result<()> {
|
||||
with_native_path(path, &imp::unlink)
|
||||
}
|
||||
|
||||
pub fn rename(old: &Path, new: &Path) -> io::Result<()> {
|
||||
with_native_path(old, &|old| with_native_path(new, &|new| imp::rename(old, new)))
|
||||
}
|
||||
|
||||
pub fn remove_dir(path: &Path) -> io::Result<()> {
|
||||
with_native_path(path, &imp::rmdir)
|
||||
}
|
||||
|
||||
pub fn remove_dir_all(path: &Path) -> io::Result<()> {
|
||||
// FIXME: use with_native_path on all platforms
|
||||
#[cfg(not(windows))]
|
||||
return imp::remove_dir_all(path);
|
||||
#[cfg(windows)]
|
||||
with_native_path(path, &imp::remove_dir_all)
|
||||
}
|
||||
|
||||
pub fn read_link(path: &Path) -> io::Result<PathBuf> {
|
||||
with_native_path(path, &imp::readlink)
|
||||
}
|
||||
|
||||
pub fn symlink(original: &Path, link: &Path) -> io::Result<()> {
|
||||
// FIXME: use with_native_path on all platforms
|
||||
#[cfg(windows)]
|
||||
return imp::symlink(original, link);
|
||||
#[cfg(not(windows))]
|
||||
with_native_path(original, &|original| {
|
||||
with_native_path(link, &|link| imp::symlink(original, link))
|
||||
})
|
||||
}
|
||||
|
||||
pub fn hard_link(original: &Path, link: &Path) -> io::Result<()> {
|
||||
with_native_path(original, &|original| {
|
||||
with_native_path(link, &|link| imp::link(original, link))
|
||||
})
|
||||
}
|
||||
|
||||
pub fn metadata(path: &Path) -> io::Result<FileAttr> {
|
||||
with_native_path(path, &imp::stat)
|
||||
}
|
||||
|
||||
pub fn symlink_metadata(path: &Path) -> io::Result<FileAttr> {
|
||||
with_native_path(path, &imp::lstat)
|
||||
}
|
||||
|
||||
pub fn set_permissions(path: &Path, perm: FilePermissions) -> io::Result<()> {
|
||||
with_native_path(path, &|path| imp::set_perm(path, perm.clone()))
|
||||
}
|
||||
|
||||
#[cfg(all(unix, not(target_os = "vxworks")))]
|
||||
pub fn set_permissions_nofollow(path: &Path, perm: crate::fs::Permissions) -> io::Result<()> {
|
||||
use crate::fs::OpenOptions;
|
||||
|
||||
let mut options = OpenOptions::new();
|
||||
|
||||
// ESP-IDF and Horizon do not support O_NOFOLLOW, so we skip setting it.
|
||||
// Their filesystems do not have symbolic links, so no special handling is required.
|
||||
#[cfg(not(any(target_os = "espidf", target_os = "horizon")))]
|
||||
{
|
||||
use crate::os::unix::fs::OpenOptionsExt;
|
||||
options.custom_flags(libc::O_NOFOLLOW);
|
||||
}
|
||||
|
||||
options.open(path)?.set_permissions(perm)
|
||||
}
|
||||
|
||||
#[cfg(any(not(unix), target_os = "vxworks"))]
|
||||
pub fn set_permissions_nofollow(_path: &Path, _perm: crate::fs::Permissions) -> io::Result<()> {
|
||||
crate::unimplemented!(
|
||||
"`set_permissions_nofollow` is currently only implemented on Unix platforms"
|
||||
)
|
||||
}
|
||||
|
||||
pub fn canonicalize(path: &Path) -> io::Result<PathBuf> {
|
||||
with_native_path(path, &imp::canonicalize)
|
||||
}
|
||||
|
||||
pub fn copy(from: &Path, to: &Path) -> io::Result<u64> {
|
||||
// FIXME: use with_native_path on all platforms
|
||||
#[cfg(not(windows))]
|
||||
return imp::copy(from, to);
|
||||
#[cfg(windows)]
|
||||
with_native_path(from, &|from| with_native_path(to, &|to| imp::copy(from, to)))
|
||||
}
|
||||
|
||||
pub fn exists(path: &Path) -> io::Result<bool> {
|
||||
// FIXME: use with_native_path on all platforms
|
||||
#[cfg(not(windows))]
|
||||
return imp::exists(path);
|
||||
#[cfg(windows)]
|
||||
with_native_path(path, &imp::exists)
|
||||
}
|
||||
|
||||
pub fn set_times(path: &Path, times: FileTimes) -> io::Result<()> {
|
||||
with_native_path(path, &|path| imp::set_times(path, times.clone()))
|
||||
}
|
||||
|
||||
pub fn set_times_nofollow(path: &Path, times: FileTimes) -> io::Result<()> {
|
||||
with_native_path(path, &|path| imp::set_times_nofollow(path, times.clone()))
|
||||
}
|
||||
@@ -1,362 +0,0 @@
|
||||
use crate::ffi::OsString;
|
||||
use crate::fmt;
|
||||
use crate::fs::TryLockError;
|
||||
use crate::hash::{Hash, Hasher};
|
||||
use crate::io::{self, BorrowedCursor, IoSlice, IoSliceMut, SeekFrom};
|
||||
use crate::path::{Path, PathBuf};
|
||||
pub use crate::sys::fs::common::Dir;
|
||||
use crate::sys::time::SystemTime;
|
||||
use crate::sys::unsupported;
|
||||
|
||||
pub struct File(!);
|
||||
|
||||
pub struct FileAttr(!);
|
||||
|
||||
pub struct ReadDir(!);
|
||||
|
||||
pub struct DirEntry(!);
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct OpenOptions {}
|
||||
|
||||
#[derive(Copy, Clone, Debug, Default)]
|
||||
pub struct FileTimes {}
|
||||
|
||||
pub struct FilePermissions(!);
|
||||
|
||||
pub struct FileType(!);
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct DirBuilder {}
|
||||
|
||||
impl FileAttr {
|
||||
pub fn size(&self) -> u64 {
|
||||
self.0
|
||||
}
|
||||
|
||||
pub fn perm(&self) -> FilePermissions {
|
||||
self.0
|
||||
}
|
||||
|
||||
pub fn file_type(&self) -> FileType {
|
||||
self.0
|
||||
}
|
||||
|
||||
pub fn modified(&self) -> io::Result<SystemTime> {
|
||||
self.0
|
||||
}
|
||||
|
||||
pub fn accessed(&self) -> io::Result<SystemTime> {
|
||||
self.0
|
||||
}
|
||||
|
||||
pub fn created(&self) -> io::Result<SystemTime> {
|
||||
self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl Clone for FileAttr {
|
||||
fn clone(&self) -> FileAttr {
|
||||
self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl FilePermissions {
|
||||
pub fn readonly(&self) -> bool {
|
||||
self.0
|
||||
}
|
||||
|
||||
pub fn set_readonly(&mut self, _readonly: bool) {
|
||||
self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl Clone for FilePermissions {
|
||||
fn clone(&self) -> FilePermissions {
|
||||
self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialEq for FilePermissions {
|
||||
fn eq(&self, _other: &FilePermissions) -> bool {
|
||||
self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl Eq for FilePermissions {}
|
||||
|
||||
impl fmt::Debug for FilePermissions {
|
||||
fn fmt(&self, _f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl FileTimes {
|
||||
pub fn set_accessed(&mut self, _t: SystemTime) {}
|
||||
pub fn set_modified(&mut self, _t: SystemTime) {}
|
||||
}
|
||||
|
||||
impl FileType {
|
||||
pub fn is_dir(&self) -> bool {
|
||||
self.0
|
||||
}
|
||||
|
||||
pub fn is_file(&self) -> bool {
|
||||
self.0
|
||||
}
|
||||
|
||||
pub fn is_symlink(&self) -> bool {
|
||||
self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl Clone for FileType {
|
||||
fn clone(&self) -> FileType {
|
||||
self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl Copy for FileType {}
|
||||
|
||||
impl PartialEq for FileType {
|
||||
fn eq(&self, _other: &FileType) -> bool {
|
||||
self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl Eq for FileType {}
|
||||
|
||||
impl Hash for FileType {
|
||||
fn hash<H: Hasher>(&self, _h: &mut H) {
|
||||
self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for FileType {
|
||||
fn fmt(&self, _f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for ReadDir {
|
||||
fn fmt(&self, _f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl Iterator for ReadDir {
|
||||
type Item = io::Result<DirEntry>;
|
||||
|
||||
fn next(&mut self) -> Option<io::Result<DirEntry>> {
|
||||
self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl DirEntry {
|
||||
pub fn path(&self) -> PathBuf {
|
||||
self.0
|
||||
}
|
||||
|
||||
pub fn file_name(&self) -> OsString {
|
||||
self.0
|
||||
}
|
||||
|
||||
pub fn metadata(&self) -> io::Result<FileAttr> {
|
||||
self.0
|
||||
}
|
||||
|
||||
pub fn file_type(&self) -> io::Result<FileType> {
|
||||
self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl OpenOptions {
|
||||
pub fn new() -> OpenOptions {
|
||||
OpenOptions {}
|
||||
}
|
||||
|
||||
pub fn read(&mut self, _read: bool) {}
|
||||
pub fn write(&mut self, _write: bool) {}
|
||||
pub fn append(&mut self, _append: bool) {}
|
||||
pub fn truncate(&mut self, _truncate: bool) {}
|
||||
pub fn create(&mut self, _create: bool) {}
|
||||
pub fn create_new(&mut self, _create_new: bool) {}
|
||||
}
|
||||
|
||||
impl File {
|
||||
pub fn open(_path: &Path, _opts: &OpenOptions) -> io::Result<File> {
|
||||
unsupported()
|
||||
}
|
||||
|
||||
pub fn file_attr(&self) -> io::Result<FileAttr> {
|
||||
self.0
|
||||
}
|
||||
|
||||
pub fn fsync(&self) -> io::Result<()> {
|
||||
self.0
|
||||
}
|
||||
|
||||
pub fn datasync(&self) -> io::Result<()> {
|
||||
self.0
|
||||
}
|
||||
|
||||
pub fn lock(&self) -> io::Result<()> {
|
||||
self.0
|
||||
}
|
||||
|
||||
pub fn lock_shared(&self) -> io::Result<()> {
|
||||
self.0
|
||||
}
|
||||
|
||||
pub fn try_lock(&self) -> Result<(), TryLockError> {
|
||||
self.0
|
||||
}
|
||||
|
||||
pub fn try_lock_shared(&self) -> Result<(), TryLockError> {
|
||||
self.0
|
||||
}
|
||||
|
||||
pub fn unlock(&self) -> io::Result<()> {
|
||||
self.0
|
||||
}
|
||||
|
||||
pub fn truncate(&self, _size: u64) -> io::Result<()> {
|
||||
self.0
|
||||
}
|
||||
|
||||
pub fn read(&self, _buf: &mut [u8]) -> io::Result<usize> {
|
||||
self.0
|
||||
}
|
||||
|
||||
pub fn read_vectored(&self, _bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
|
||||
self.0
|
||||
}
|
||||
|
||||
pub fn is_read_vectored(&self) -> bool {
|
||||
self.0
|
||||
}
|
||||
|
||||
pub fn read_buf(&self, _cursor: BorrowedCursor<'_>) -> io::Result<()> {
|
||||
self.0
|
||||
}
|
||||
|
||||
pub fn write(&self, _buf: &[u8]) -> io::Result<usize> {
|
||||
self.0
|
||||
}
|
||||
|
||||
pub fn write_vectored(&self, _bufs: &[IoSlice<'_>]) -> io::Result<usize> {
|
||||
self.0
|
||||
}
|
||||
|
||||
pub fn is_write_vectored(&self) -> bool {
|
||||
self.0
|
||||
}
|
||||
|
||||
pub fn flush(&self) -> io::Result<()> {
|
||||
self.0
|
||||
}
|
||||
|
||||
pub fn seek(&self, _pos: SeekFrom) -> io::Result<u64> {
|
||||
self.0
|
||||
}
|
||||
|
||||
pub fn size(&self) -> Option<io::Result<u64>> {
|
||||
self.0
|
||||
}
|
||||
|
||||
pub fn tell(&self) -> io::Result<u64> {
|
||||
self.0
|
||||
}
|
||||
|
||||
pub fn duplicate(&self) -> io::Result<File> {
|
||||
self.0
|
||||
}
|
||||
|
||||
pub fn set_permissions(&self, _perm: FilePermissions) -> io::Result<()> {
|
||||
self.0
|
||||
}
|
||||
|
||||
pub fn set_times(&self, _times: FileTimes) -> io::Result<()> {
|
||||
self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl DirBuilder {
|
||||
pub fn new() -> DirBuilder {
|
||||
DirBuilder {}
|
||||
}
|
||||
|
||||
pub fn mkdir(&self, _p: &Path) -> io::Result<()> {
|
||||
unsupported()
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for File {
|
||||
fn fmt(&self, _f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
self.0
|
||||
}
|
||||
}
|
||||
|
||||
pub fn readdir(_p: &Path) -> io::Result<ReadDir> {
|
||||
unsupported()
|
||||
}
|
||||
|
||||
pub fn unlink(_p: &Path) -> io::Result<()> {
|
||||
unsupported()
|
||||
}
|
||||
|
||||
pub fn rename(_old: &Path, _new: &Path) -> io::Result<()> {
|
||||
unsupported()
|
||||
}
|
||||
|
||||
pub fn set_perm(_p: &Path, perm: FilePermissions) -> io::Result<()> {
|
||||
match perm.0 {}
|
||||
}
|
||||
|
||||
pub fn set_times(_p: &Path, _times: FileTimes) -> io::Result<()> {
|
||||
unsupported()
|
||||
}
|
||||
|
||||
pub fn set_times_nofollow(_p: &Path, _times: FileTimes) -> io::Result<()> {
|
||||
unsupported()
|
||||
}
|
||||
|
||||
pub fn rmdir(_p: &Path) -> io::Result<()> {
|
||||
unsupported()
|
||||
}
|
||||
|
||||
pub fn remove_dir_all(_path: &Path) -> io::Result<()> {
|
||||
unsupported()
|
||||
}
|
||||
|
||||
pub fn exists(_path: &Path) -> io::Result<bool> {
|
||||
unsupported()
|
||||
}
|
||||
|
||||
pub fn readlink(_p: &Path) -> io::Result<PathBuf> {
|
||||
unsupported()
|
||||
}
|
||||
|
||||
pub fn symlink(_original: &Path, _link: &Path) -> io::Result<()> {
|
||||
unsupported()
|
||||
}
|
||||
|
||||
pub fn link(_src: &Path, _dst: &Path) -> io::Result<()> {
|
||||
unsupported()
|
||||
}
|
||||
|
||||
pub fn stat(_p: &Path) -> io::Result<FileAttr> {
|
||||
unsupported()
|
||||
}
|
||||
|
||||
pub fn lstat(_p: &Path) -> io::Result<FileAttr> {
|
||||
unsupported()
|
||||
}
|
||||
|
||||
pub fn canonicalize(_p: &Path) -> io::Result<PathBuf> {
|
||||
unsupported()
|
||||
}
|
||||
|
||||
pub fn copy(_from: &Path, _to: &Path) -> io::Result<u64> {
|
||||
unsupported()
|
||||
}
|
||||
@@ -1,15 +0,0 @@
|
||||
pub fn errno() -> i32 {
|
||||
0
|
||||
}
|
||||
|
||||
pub fn is_interrupted(_code: i32) -> bool {
|
||||
false
|
||||
}
|
||||
|
||||
pub fn decode_error_kind(_code: i32) -> crate::io::ErrorKind {
|
||||
crate::io::ErrorKind::Uncategorized
|
||||
}
|
||||
|
||||
pub fn error_string(_errno: i32) -> String {
|
||||
"operation successful".to_string()
|
||||
}
|
||||
@@ -1,52 +0,0 @@
|
||||
use crate::mem;
|
||||
|
||||
#[derive(Copy, Clone)]
|
||||
pub struct IoSlice<'a>(&'a [u8]);
|
||||
|
||||
impl<'a> IoSlice<'a> {
|
||||
#[inline]
|
||||
pub fn new(buf: &'a [u8]) -> IoSlice<'a> {
|
||||
IoSlice(buf)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn advance(&mut self, n: usize) {
|
||||
self.0 = &self.0[n..]
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub const fn as_slice(&self) -> &'a [u8] {
|
||||
self.0
|
||||
}
|
||||
}
|
||||
|
||||
pub struct IoSliceMut<'a>(&'a mut [u8]);
|
||||
|
||||
impl<'a> IoSliceMut<'a> {
|
||||
#[inline]
|
||||
pub fn new(buf: &'a mut [u8]) -> IoSliceMut<'a> {
|
||||
IoSliceMut(buf)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn advance(&mut self, n: usize) {
|
||||
let slice = mem::take(&mut self.0);
|
||||
let (_, remaining) = slice.split_at_mut(n);
|
||||
self.0 = remaining;
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn as_slice(&self) -> &[u8] {
|
||||
self.0
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub const fn into_slice(self) -> &'a mut [u8] {
|
||||
self.0
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn as_mut_slice(&mut self) -> &mut [u8] {
|
||||
self.0
|
||||
}
|
||||
}
|
||||
@@ -1,3 +0,0 @@
|
||||
pub fn is_terminal<T>(_: &T) -> bool {
|
||||
false
|
||||
}
|
||||
@@ -1,23 +0,0 @@
|
||||
pub enum CopyState {
|
||||
#[cfg_attr(not(any(target_os = "linux", target_os = "android")), expect(dead_code))]
|
||||
Ended(u64),
|
||||
Fallback(u64),
|
||||
}
|
||||
|
||||
cfg_select! {
|
||||
any(target_os = "linux", target_os = "android") => {
|
||||
mod linux;
|
||||
pub use linux::kernel_copy;
|
||||
}
|
||||
_ => {
|
||||
use crate::io::{Result, Read, Write};
|
||||
|
||||
pub fn kernel_copy<R: ?Sized, W: ?Sized>(_reader: &mut R, _writer: &mut W) -> Result<CopyState>
|
||||
where
|
||||
R: Read,
|
||||
W: Write,
|
||||
{
|
||||
Ok(CopyState::Fallback(0))
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,72 +0,0 @@
|
||||
#![forbid(unsafe_op_in_unsafe_fn)]
|
||||
|
||||
mod error;
|
||||
|
||||
mod io_slice {
|
||||
cfg_select! {
|
||||
any(target_family = "unix", target_os = "hermit", target_os = "solid_asp3", target_os = "trusty", target_os = "wasi") => {
|
||||
mod iovec;
|
||||
pub use iovec::*;
|
||||
}
|
||||
target_os = "windows" => {
|
||||
mod windows;
|
||||
pub use windows::*;
|
||||
}
|
||||
target_os = "uefi" => {
|
||||
mod uefi;
|
||||
pub use uefi::*;
|
||||
}
|
||||
_ => {
|
||||
mod unsupported;
|
||||
pub use unsupported::*;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
mod is_terminal {
|
||||
cfg_select! {
|
||||
any(target_family = "unix", target_os = "wasi") => {
|
||||
mod isatty;
|
||||
pub use isatty::*;
|
||||
}
|
||||
target_os = "windows" => {
|
||||
mod windows;
|
||||
pub use windows::*;
|
||||
}
|
||||
target_os = "hermit" => {
|
||||
mod hermit;
|
||||
pub use hermit::*;
|
||||
}
|
||||
target_os = "motor" => {
|
||||
mod motor;
|
||||
pub use motor::*;
|
||||
}
|
||||
_ => {
|
||||
mod unsupported;
|
||||
pub use unsupported::*;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
mod kernel_copy;
|
||||
|
||||
#[cfg_attr(not(target_os = "linux"), allow(unused_imports))]
|
||||
#[cfg(all(
|
||||
target_family = "unix",
|
||||
not(any(target_os = "dragonfly", target_os = "vxworks", target_os = "rtems"))
|
||||
))]
|
||||
pub use error::errno_location;
|
||||
#[cfg_attr(not(target_os = "linux"), allow(unused_imports))]
|
||||
#[cfg(any(
|
||||
all(target_family = "unix", not(any(target_os = "vxworks", target_os = "rtems"))),
|
||||
target_os = "wasi",
|
||||
))]
|
||||
pub use error::set_errno;
|
||||
pub use error::{RawOsError, decode_error_kind, errno, error_string, is_interrupted};
|
||||
pub use io_slice::{IoSlice, IoSliceMut};
|
||||
pub use is_terminal::is_terminal;
|
||||
pub use kernel_copy::{CopyState, kernel_copy};
|
||||
|
||||
// Bare metal platforms usually have very small amounts of RAM
|
||||
// (in the order of hundreds of KB)
|
||||
pub const DEFAULT_BUF_SIZE: usize = if cfg!(target_os = "espidf") { 512 } else { 8 * 1024 };
|
||||
@@ -1,40 +0,0 @@
|
||||
pub mod cmath;
|
||||
pub mod configure_builtins;
|
||||
pub mod env;
|
||||
pub mod env_consts;
|
||||
pub mod exit;
|
||||
pub mod os_str;
|
||||
pub mod path;
|
||||
pub mod sync;
|
||||
pub mod thread;
|
||||
pub mod time;
|
||||
pub mod random;
|
||||
pub mod thread_local;
|
||||
pub mod alloc;
|
||||
pub mod io;
|
||||
pub mod pipe;
|
||||
pub mod stdio;
|
||||
pub mod backtrace;
|
||||
// pub mod fs;
|
||||
|
||||
/// A trait for viewing representations from std types.
|
||||
#[cfg_attr(not(target_os = "linux"), allow(unused))]
|
||||
pub(crate) trait AsInner<Inner: ?Sized> {
|
||||
fn as_inner(&self) -> &Inner;
|
||||
}
|
||||
|
||||
/// A trait for viewing representations from std types.
|
||||
#[cfg_attr(not(target_os = "linux"), allow(unused))]
|
||||
pub(crate) trait AsInnerMut<Inner: ?Sized> {
|
||||
fn as_inner_mut(&mut self) -> &mut Inner;
|
||||
}
|
||||
|
||||
/// A trait for extracting representations from std types.
|
||||
pub(crate) trait IntoInner<Inner> {
|
||||
fn into_inner(self) -> Inner;
|
||||
}
|
||||
|
||||
/// A trait for creating std types from internal representations.
|
||||
pub(crate) trait FromInner<Inner> {
|
||||
fn from_inner(inner: Inner) -> Self;
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user