Add more from the std

This commit is contained in:
2026-03-17 22:51:36 +01:00
parent 9958b23c89
commit 72989d86a8
29 changed files with 2822 additions and 33 deletions

490
crates/std/src/alloc.rs Normal file
View File

@@ -0,0 +1,490 @@
//! Memory allocation APIs.
//!
//! In a given program, the standard library has one “global” memory allocator
//! that is used for example by `Box<T>` and `Vec<T>`.
//!
//! Currently the default global allocator is unspecified. Libraries, however,
//! like `cdylib`s and `staticlib`s are guaranteed to use the [`System`] by
//! default.
//!
//! # The `#[global_allocator]` attribute
//!
//! This attribute allows configuring the choice of global allocator.
//! You can use this to implement a completely custom global allocator
//! to route all[^system-alloc] default allocation requests to a custom object.
//!
//! ```rust
//! use std::alloc::{GlobalAlloc, System, Layout};
//!
//! struct MyAllocator;
//!
//! unsafe impl GlobalAlloc for MyAllocator {
//! unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
//! unsafe { System.alloc(layout) }
//! }
//!
//! unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
//! unsafe { System.dealloc(ptr, layout) }
//! }
//! }
//!
//! #[global_allocator]
//! static GLOBAL: MyAllocator = MyAllocator;
//!
//! fn main() {
//! // This `Vec` will allocate memory through `GLOBAL` above
//! let mut v = Vec::new();
//! v.push(1);
//! }
//! ```
//!
//! The attribute is used on a `static` item whose type implements the
//! [`GlobalAlloc`] trait. This type can be provided by an external library:
//!
//! ```rust,ignore (demonstrates crates.io usage)
//! use jemallocator::Jemalloc;
//!
//! #[global_allocator]
//! static GLOBAL: Jemalloc = Jemalloc;
//!
//! fn main() {}
//! ```
//!
//! The `#[global_allocator]` can only be used once in a crate
//! or its recursive dependencies.
//!
//! [^system-alloc]: Note that the Rust standard library internals may still
//! directly call [`System`] when necessary (for example for the runtime
//! support typically required to implement a global allocator, see [re-entrance] on [`GlobalAlloc`]
//! for more details).
//!
//! [re-entrance]: trait.GlobalAlloc.html#re-entrance
#![deny(unsafe_op_in_unsafe_fn)]
#![stable(feature = "alloc_module", since = "1.28.0")]
use core::ptr::NonNull;
use core::sync::atomic::{AtomicBool, AtomicPtr, Ordering};
use core::{hint, mem, ptr};
#[stable(feature = "alloc_module", since = "1.28.0")]
#[doc(inline)]
pub use alloc_crate::alloc::*;
/// The default memory allocator provided by the operating system.
///
/// This is based on `malloc` on Unix platforms and `HeapAlloc` on Windows,
/// plus related functions. However, it is not valid to mix use of the backing
/// system allocator with `System`, as this implementation may include extra
/// work, such as to serve alignment requests greater than the alignment
/// provided directly by the backing system allocator.
///
/// This type implements the [`GlobalAlloc`] trait. Currently the default
/// global allocator is unspecified. Libraries, however, like `cdylib`s and
/// `staticlib`s are guaranteed to use the [`System`] by default and as such
/// work as if they had this definition:
///
/// ```rust
/// use std::alloc::System;
///
/// #[global_allocator]
/// static A: System = System;
///
/// fn main() {
/// let a = Box::new(4); // Allocates from the system allocator.
/// println!("{a}");
/// }
/// ```
///
/// You can also define your own wrapper around `System` if you'd like, such as
/// keeping track of the number of all bytes allocated:
///
/// ```rust
/// use std::alloc::{System, GlobalAlloc, Layout};
/// use std::sync::atomic::{AtomicUsize, Ordering::Relaxed};
///
/// struct Counter;
///
/// static ALLOCATED: AtomicUsize = AtomicUsize::new(0);
///
/// unsafe impl GlobalAlloc for Counter {
/// unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
/// let ret = unsafe { System.alloc(layout) };
/// if !ret.is_null() {
/// ALLOCATED.fetch_add(layout.size(), Relaxed);
/// }
/// ret
/// }
///
/// unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
/// unsafe { System.dealloc(ptr, layout); }
/// ALLOCATED.fetch_sub(layout.size(), Relaxed);
/// }
/// }
///
/// #[global_allocator]
/// static A: Counter = Counter;
///
/// fn main() {
/// println!("allocated bytes before main: {}", ALLOCATED.load(Relaxed));
/// }
/// ```
///
/// It can also be used directly to allocate memory independently of whatever
/// global allocator has been selected for a Rust program. For example if a Rust
/// program opts in to using jemalloc as the global allocator, `System` will
/// still allocate memory using `malloc` and `HeapAlloc`.
#[stable(feature = "alloc_system_type", since = "1.28.0")]
#[derive(Debug, Default, Copy, Clone)]
pub struct System;
impl System {
#[inline]
fn alloc_impl(&self, layout: Layout, zeroed: bool) -> Result<NonNull<[u8]>, AllocError> {
match layout.size() {
0 => Ok(NonNull::slice_from_raw_parts(layout.dangling_ptr(), 0)),
// SAFETY: `layout` is non-zero in size,
size => unsafe {
let raw_ptr = if zeroed {
GlobalAlloc::alloc_zeroed(self, layout)
} else {
GlobalAlloc::alloc(self, layout)
};
let ptr = NonNull::new(raw_ptr).ok_or(AllocError)?;
Ok(NonNull::slice_from_raw_parts(ptr, size))
},
}
}
// SAFETY: Same as `Allocator::grow`
#[inline]
unsafe fn grow_impl(
&self,
ptr: NonNull<u8>,
old_layout: Layout,
new_layout: Layout,
zeroed: bool,
) -> Result<NonNull<[u8]>, AllocError> {
debug_assert!(
new_layout.size() >= old_layout.size(),
"`new_layout.size()` must be greater than or equal to `old_layout.size()`"
);
match old_layout.size() {
0 => self.alloc_impl(new_layout, zeroed),
// SAFETY: `new_size` is non-zero as `new_size` is greater than or equal to `old_size`
// as required by safety conditions and the `old_size == 0` case was handled in the
// previous match arm. Other conditions must be upheld by the caller
old_size if old_layout.align() == new_layout.align() => unsafe {
let new_size = new_layout.size();
// `realloc` probably checks for `new_size >= old_layout.size()` or something similar.
hint::assert_unchecked(new_size >= old_layout.size());
let raw_ptr = GlobalAlloc::realloc(self, ptr.as_ptr(), old_layout, new_size);
let ptr = NonNull::new(raw_ptr).ok_or(AllocError)?;
if zeroed {
raw_ptr.add(old_size).write_bytes(0, new_size - old_size);
}
Ok(NonNull::slice_from_raw_parts(ptr, new_size))
},
// SAFETY: because `new_layout.size()` must be greater than or equal to `old_size`,
// both the old and new memory allocation are valid for reads and writes for `old_size`
// bytes. Also, because the old allocation wasn't yet deallocated, it cannot overlap
// `new_ptr`. Thus, the call to `copy_nonoverlapping` is safe. The safety contract
// for `dealloc` must be upheld by the caller.
old_size => unsafe {
let new_ptr = self.alloc_impl(new_layout, zeroed)?;
ptr::copy_nonoverlapping(ptr.as_ptr(), new_ptr.as_mut_ptr(), old_size);
Allocator::deallocate(self, ptr, old_layout);
Ok(new_ptr)
},
}
}
}
// The Allocator impl checks the layout size to be non-zero and forwards to the GlobalAlloc impl,
// which is in `std::sys::*::alloc`.
#[unstable(feature = "allocator_api", issue = "32838")]
unsafe impl Allocator for System {
#[inline]
fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
self.alloc_impl(layout, false)
}
#[inline]
fn allocate_zeroed(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
self.alloc_impl(layout, true)
}
#[inline]
unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) {
if layout.size() != 0 {
// SAFETY: `layout` is non-zero in size,
// other conditions must be upheld by the caller
unsafe { GlobalAlloc::dealloc(self, ptr.as_ptr(), layout) }
}
}
#[inline]
unsafe fn grow(
&self,
ptr: NonNull<u8>,
old_layout: Layout,
new_layout: Layout,
) -> Result<NonNull<[u8]>, AllocError> {
// SAFETY: all conditions must be upheld by the caller
unsafe { self.grow_impl(ptr, old_layout, new_layout, false) }
}
#[inline]
unsafe fn grow_zeroed(
&self,
ptr: NonNull<u8>,
old_layout: Layout,
new_layout: Layout,
) -> Result<NonNull<[u8]>, AllocError> {
// SAFETY: all conditions must be upheld by the caller
unsafe { self.grow_impl(ptr, old_layout, new_layout, true) }
}
#[inline]
unsafe fn shrink(
&self,
ptr: NonNull<u8>,
old_layout: Layout,
new_layout: Layout,
) -> Result<NonNull<[u8]>, AllocError> {
debug_assert!(
new_layout.size() <= old_layout.size(),
"`new_layout.size()` must be smaller than or equal to `old_layout.size()`"
);
match new_layout.size() {
// SAFETY: conditions must be upheld by the caller
0 => unsafe {
Allocator::deallocate(self, ptr, old_layout);
Ok(NonNull::slice_from_raw_parts(new_layout.dangling_ptr(), 0))
},
// SAFETY: `new_size` is non-zero. Other conditions must be upheld by the caller
new_size if old_layout.align() == new_layout.align() => unsafe {
// `realloc` probably checks for `new_size <= old_layout.size()` or something similar.
hint::assert_unchecked(new_size <= old_layout.size());
let raw_ptr = GlobalAlloc::realloc(self, ptr.as_ptr(), old_layout, new_size);
let ptr = NonNull::new(raw_ptr).ok_or(AllocError)?;
Ok(NonNull::slice_from_raw_parts(ptr, new_size))
},
// SAFETY: because `new_size` must be smaller than or equal to `old_layout.size()`,
// both the old and new memory allocation are valid for reads and writes for `new_size`
// bytes. Also, because the old allocation wasn't yet deallocated, it cannot overlap
// `new_ptr`. Thus, the call to `copy_nonoverlapping` is safe. The safety contract
// for `dealloc` must be upheld by the caller.
new_size => unsafe {
let new_ptr = Allocator::allocate(self, new_layout)?;
ptr::copy_nonoverlapping(ptr.as_ptr(), new_ptr.as_mut_ptr(), new_size);
Allocator::deallocate(self, ptr, old_layout);
Ok(new_ptr)
},
}
}
}
static HOOK: AtomicPtr<()> = AtomicPtr::new(ptr::null_mut());
/// Registers a custom allocation error hook, replacing any that was previously registered.
///
/// The allocation error hook is invoked when an infallible memory allocation fails — that is,
/// as a consequence of calling [`handle_alloc_error`] — before the runtime aborts.
///
/// The allocation error hook is a global resource. [`take_alloc_error_hook`] may be used to
/// retrieve a previously registered hook and wrap or discard it.
///
/// # What the provided `hook` function should expect
///
/// The hook function is provided with a [`Layout`] struct which contains information
/// about the allocation that failed.
///
/// The hook function may choose to panic or abort; in the event that it returns normally, this
/// will cause an immediate abort.
///
/// Since [`take_alloc_error_hook`] is a safe function that allows retrieving the hook, the hook
/// function must be _sound_ to call even if no memory allocations were attempted.
///
/// # The default hook
///
/// The default hook, used if [`set_alloc_error_hook`] is never called, prints a message to
/// standard error (and then returns, causing the runtime to abort the process).
/// Compiler options may cause it to panic instead, and the default behavior may be changed
/// to panicking in future versions of Rust.
///
/// # Examples
///
/// ```
/// #![feature(alloc_error_hook)]
///
/// use std::alloc::{Layout, set_alloc_error_hook};
///
/// fn custom_alloc_error_hook(layout: Layout) {
/// panic!("memory allocation of {} bytes failed", layout.size());
/// }
///
/// set_alloc_error_hook(custom_alloc_error_hook);
/// ```
#[unstable(feature = "alloc_error_hook", issue = "51245")]
pub fn set_alloc_error_hook(hook: fn(Layout)) {
HOOK.store(hook as *mut (), Ordering::Release);
}
// /// Unregisters the current allocation error hook, returning it.
// ///
// /// *See also the function [`set_alloc_error_hook`].*
// ///
// /// If no custom hook is registered, the default hook will be returned.
// #[unstable(feature = "alloc_error_hook", issue = "51245")]
// pub fn take_alloc_error_hook() -> fn(Layout) {
// let hook = HOOK.swap(ptr::null_mut(), Ordering::Acquire);
// if hook.is_null() { default_alloc_error_hook } else { unsafe { mem::transmute(hook) } }
// }
// #[optimize(size)]
// fn default_alloc_error_hook(layout: Layout) {
// if cfg!(panic = "immediate-abort") {
// return;
// }
// // This is the default path taken on OOM, and the only path taken on stable with std.
// // Crucially, it does *not* call any user-defined code, and therefore users do not have to
// // worry about allocation failure causing reentrancy issues. That makes it different from
// // the default `__rdl_alloc_error_handler` defined in alloc (i.e., the default alloc error
// // handler that is called when there is no `#[alloc_error_handler]`), which triggers a
// // regular panic and thus can invoke a user-defined panic hook, executing arbitrary
// // user-defined code.
// static PREV_ALLOC_FAILURE: AtomicBool = AtomicBool::new(false);
// if PREV_ALLOC_FAILURE.swap(true, Ordering::Relaxed) {
// // Don't try to print a backtrace if a previous alloc error happened. This likely means
// // there is not enough memory to print a backtrace, although it could also mean that two
// // threads concurrently run out of memory.
// rtprintpanic!(
// "memory allocation of {} bytes failed\nskipping backtrace printing to avoid potential recursion\n",
// layout.size()
// );
// return;
// } else {
// rtprintpanic!("memory allocation of {} bytes failed\n", layout.size());
// }
// let Some(mut out) = crate::sys::stdio::panic_output() else {
// return;
// };
// // Use a lock to prevent mixed output in multithreading context.
// // Some platforms also require it when printing a backtrace, like `SymFromAddr` on Windows.
// // Make sure to not take this lock until after checking PREV_ALLOC_FAILURE to avoid deadlocks
// // when there is too little memory to print a backtrace.
// let mut lock = crate::sys::backtrace::lock();
// match crate::panic::get_backtrace_style() {
// Some(crate::panic::BacktraceStyle::Short) => {
// drop(lock.print(&mut out, crate::backtrace_rs::PrintFmt::Short))
// }
// Some(crate::panic::BacktraceStyle::Full) => {
// drop(lock.print(&mut out, crate::backtrace_rs::PrintFmt::Full))
// }
// Some(crate::panic::BacktraceStyle::Off) => {
// use crate::io::Write;
// let _ = writeln!(
// out,
// "note: run with `RUST_BACKTRACE=1` environment variable to display a \
// backtrace"
// );
// if cfg!(miri) {
// let _ = writeln!(
// out,
// "note: in Miri, you may have to set `MIRIFLAGS=-Zmiri-env-forward=RUST_BACKTRACE` \
// for the environment variable to have an effect"
// );
// }
// }
// // If backtraces aren't supported or are forced-off, do nothing.
// None => {}
// }
// }
// #[cfg(not(test))]
// #[doc(hidden)]
// #[alloc_error_handler]
// #[unstable(feature = "alloc_internals", issue = "none")]
// pub fn rust_oom(layout: Layout) -> ! {
// crate::sys::backtrace::__rust_end_short_backtrace(|| {
// let hook = HOOK.load(Ordering::Acquire);
// let hook: fn(Layout) =
// if hook.is_null() { default_alloc_error_hook } else { unsafe { mem::transmute(hook) } };
// hook(layout);
// crate::process::abort()
// })
// }
#[cfg(not(test))]
#[doc(hidden)]
#[allow(unused_attributes)]
#[unstable(feature = "alloc_internals", issue = "none")]
pub mod __default_lib_allocator {
use super::{GlobalAlloc, Layout, System};
// These magic symbol names are used as a fallback for implementing the
// `__rust_alloc` etc symbols (see `src/liballoc/alloc.rs`) when there is
// no `#[global_allocator]` attribute.
// for symbol names src/librustc_ast/expand/allocator.rs
// for signatures src/librustc_allocator/lib.rs
// linkage directives are provided as part of the current compiler allocator
// ABI
#[rustc_std_internal_symbol]
pub unsafe extern "C" fn __rdl_alloc(size: usize, align: usize) -> *mut u8 {
// SAFETY: see the guarantees expected by `Layout::from_size_align` and
// `GlobalAlloc::alloc`.
unsafe {
let layout = Layout::from_size_align_unchecked(size, align);
System.alloc(layout)
}
}
#[rustc_std_internal_symbol]
pub unsafe extern "C" fn __rdl_dealloc(ptr: *mut u8, size: usize, align: usize) {
// SAFETY: see the guarantees expected by `Layout::from_size_align` and
// `GlobalAlloc::dealloc`.
unsafe { System.dealloc(ptr, Layout::from_size_align_unchecked(size, align)) }
}
#[rustc_std_internal_symbol]
pub unsafe extern "C" fn __rdl_realloc(
ptr: *mut u8,
old_size: usize,
align: usize,
new_size: usize,
) -> *mut u8 {
// SAFETY: see the guarantees expected by `Layout::from_size_align` and
// `GlobalAlloc::realloc`.
unsafe {
let old_layout = Layout::from_size_align_unchecked(old_size, align);
System.realloc(ptr, old_layout, new_size)
}
}
#[rustc_std_internal_symbol]
pub unsafe extern "C" fn __rdl_alloc_zeroed(size: usize, align: usize) -> *mut u8 {
// SAFETY: see the guarantees expected by `Layout::from_size_align` and
// `GlobalAlloc::alloc_zeroed`.
unsafe {
let layout = Layout::from_size_align_unchecked(size, align);
System.alloc_zeroed(layout)
}
}
}

View File

@@ -1,11 +1,11 @@
//! [`CStr`], [`CString`], and related types.
#[stable(feature = "cstring_from_vec_with_nul", since = "1.58.0")]
pub use alloc::ffi::c_str::FromVecWithNulError;
pub use alloc_crate::ffi::c_str::FromVecWithNulError;
#[stable(feature = "cstring_into", since = "1.7.0")]
pub use alloc::ffi::c_str::IntoStringError;
pub use alloc_crate::ffi::c_str::IntoStringError;
#[stable(feature = "rust1", since = "1.0.0")]
pub use alloc::ffi::c_str::{CString, NulError};
pub use alloc_crate::ffi::c_str::{CString, NulError};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::ffi::c_str::CStr;
#[stable(feature = "cstr_from_bytes_until_nul", since = "1.69.0")]

View File

@@ -6,12 +6,12 @@ mod tests;
use core::clone::CloneToUninit;
use crate::borrow::{Borrow, Cow};
use alloc::collections::TryReserveError;
use alloc_crate::collections::TryReserveError;
use crate::hash::{Hash, Hasher};
use crate::ops::{self, Range};
use crate::rc::Rc;
use crate::str::FromStr;
use alloc::sync::Arc;
use alloc_crate::sync::Arc;
use crate::sys::os_str::{Buf, Slice};
use crate::sys::{AsInner, FromInner, IntoInner};
use crate::{cmp, fmt, slice};
@@ -1658,7 +1658,7 @@ impl fmt::Display for Display<'_> {
}
#[unstable(feature = "slice_concat_ext", issue = "27747")]
impl<S: Borrow<OsStr>> alloc::slice::Join<&OsStr> for [S] {
impl<S: Borrow<OsStr>> alloc_crate::slice::Join<&OsStr> for [S] {
type Output = OsString;
fn join(slice: &Self, sep: &OsStr) -> OsString {

View File

@@ -80,12 +80,12 @@
//! s.finish()
//! }
//! ```
// #![stable(feature = "rust1", since = "1.0.0")]
#![stable(feature = "rust1", since = "1.0.0")]
// pub(crate) mod random;
pub(crate) mod random;
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::hash::*;
// #[stable(feature = "std_hash_exports", since = "1.76.0")]
// pub use self::random::{DefaultHasher, RandomState};
#[stable(feature = "std_hash_exports", since = "1.76.0")]
pub use self::random::{DefaultHasher, RandomState};

View File

@@ -0,0 +1,159 @@
//! This module exists to isolate [`RandomState`] and [`DefaultHasher`] outside of the
//! [`collections`] module without actually publicly exporting them, so that parts of that
//! implementation can more easily be moved to the [`alloc`] crate.
//!
//! Although its items are public and contain stability attributes, they can't actually be accessed
//! outside this crate.
//!
//! [`collections`]: crate::collections
use super::{BuildHasher, Hasher, SipHasher13};
use crate::cell::Cell;
use crate::fmt;
use crate::sys::random::hashmap_random_keys;
/// `RandomState` is the default state for [`HashMap`] types.
///
/// A particular instance `RandomState` will create the same instances of
/// [`Hasher`], but the hashers created by two different `RandomState`
/// instances are unlikely to produce the same result for the same values.
///
/// [`HashMap`]: crate::collections::HashMap
///
/// # Examples
///
/// ```
/// use std::collections::HashMap;
/// use std::hash::RandomState;
///
/// let s = RandomState::new();
/// let mut map = HashMap::with_hasher(s);
/// map.insert(1, 2);
/// ```
#[stable(feature = "hashmap_build_hasher", since = "1.7.0")]
#[derive(Clone)]
pub struct RandomState {
k0: u64,
k1: u64,
}
impl RandomState {
/// Constructs a new `RandomState` that is initialized with random keys.
///
/// # Examples
///
/// ```
/// use std::hash::RandomState;
///
/// let s = RandomState::new();
/// ```
#[inline]
#[allow(deprecated)]
// rand
#[must_use]
#[stable(feature = "hashmap_build_hasher", since = "1.7.0")]
pub fn new() -> RandomState {
// Historically this function did not cache keys from the OS and instead
// simply always called `rand::thread_rng().gen()` twice. In #31356 it
// was discovered, however, that because we re-seed the thread-local RNG
// from the OS periodically that this can cause excessive slowdown when
// many hash maps are created on a thread. To solve this performance
// trap we cache the first set of randomly generated keys per-thread.
//
// Later in #36481 it was discovered that exposing a deterministic
// iteration order allows a form of DOS attack. To counter that we
// increment one of the seeds on every RandomState creation, giving
// every corresponding HashMap a different iteration order.
thread_local!(static KEYS: Cell<(u64, u64)> = {
Cell::new(hashmap_random_keys())
});
KEYS.with(|keys| {
let (k0, k1) = keys.get();
keys.set((k0.wrapping_add(1), k1));
RandomState { k0, k1 }
})
}
}
#[stable(feature = "hashmap_build_hasher", since = "1.7.0")]
impl BuildHasher for RandomState {
type Hasher = DefaultHasher;
#[inline]
fn build_hasher(&self) -> DefaultHasher {
DefaultHasher(SipHasher13::new_with_keys(self.k0, self.k1))
}
}
/// The default [`Hasher`] used by [`RandomState`].
///
/// The internal algorithm is not specified, and so it and its hashes should
/// not be relied upon over releases.
#[derive(Clone, Debug)]
#[stable(feature = "hashmap_build_hasher", since = "1.7.0")]
pub struct DefaultHasher(SipHasher13);
impl DefaultHasher {
/// Creates a new `DefaultHasher`.
///
/// This hasher is not guaranteed to be the same as all other
/// `DefaultHasher` instances, but is the same as all other `DefaultHasher`
/// instances created through `new` or `default`.
#[stable(feature = "hashmap_default_hasher", since = "1.13.0")]
#[inline]
#[rustc_const_unstable(feature = "const_default", issue = "143894")]
#[must_use]
pub const fn new() -> DefaultHasher {
DefaultHasher(SipHasher13::new_with_keys(0, 0))
}
}
#[stable(feature = "hashmap_default_hasher", since = "1.13.0")]
#[rustc_const_unstable(feature = "const_default", issue = "143894")]
impl const Default for DefaultHasher {
/// Creates a new `DefaultHasher` using [`new`].
/// See its documentation for more.
///
/// [`new`]: DefaultHasher::new
#[inline]
fn default() -> DefaultHasher {
DefaultHasher::new()
}
}
#[stable(feature = "hashmap_default_hasher", since = "1.13.0")]
impl Hasher for DefaultHasher {
// The underlying `SipHasher13` doesn't override the other
// `write_*` methods, so it's ok not to forward them here.
#[inline]
fn write(&mut self, msg: &[u8]) {
self.0.write(msg)
}
#[inline]
fn write_str(&mut self, s: &str) {
self.0.write_str(s);
}
#[inline]
fn finish(&self) -> u64 {
self.0.finish()
}
}
#[stable(feature = "hashmap_build_hasher", since = "1.7.0")]
impl Default for RandomState {
/// Constructs a new `RandomState`.
#[inline]
fn default() -> RandomState {
RandomState::new()
}
}
#[stable(feature = "std_debug", since = "1.16.0")]
impl fmt::Debug for RandomState {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("RandomState").finish_non_exhaustive()
}
}

View File

@@ -186,7 +186,8 @@
#![allow(stable_features, incomplete_features, unexpected_cfgs)]
#![allow(unused)]
extern crate alloc;
#[macro_use]
extern crate alloc as alloc_crate;
pub use core::any;
pub use core::array;
@@ -235,15 +236,24 @@ pub use core::unsafe_binder;
#[allow(deprecated, deprecated_in_future)]
pub use core::usize;
pub use alloc::borrow;
pub use alloc::boxed;
pub use alloc::fmt;
pub use alloc::format;
pub use alloc::rc;
pub use alloc::slice;
pub use alloc::str;
pub use alloc::string;
pub use alloc::vec;
#[stable(feature = "rust1", since = "1.0.0")]
pub use alloc_crate::borrow;
#[stable(feature = "rust1", since = "1.0.0")]
pub use alloc_crate::boxed;
#[stable(feature = "rust1", since = "1.0.0")]
pub use alloc_crate::fmt;
#[stable(feature = "rust1", since = "1.0.0")]
pub use alloc_crate::format;
#[stable(feature = "rust1", since = "1.0.0")]
pub use alloc_crate::rc;
#[stable(feature = "rust1", since = "1.0.0")]
pub use alloc_crate::slice;
#[stable(feature = "rust1", since = "1.0.0")]
pub use alloc_crate::str;
#[stable(feature = "rust1", since = "1.0.0")]
pub use alloc_crate::string;
#[stable(feature = "rust1", since = "1.0.0")]
pub use alloc_crate::vec;
#[stable(feature = "builtin_macro_prelude", since = "1.38.0")]
pub use core::{
@@ -261,9 +271,13 @@ pub mod num;
pub mod path;
pub mod prelude;
pub mod process;
#[macro_use]
pub mod rt;
pub mod alloc;
pub mod sync;
pub mod sys;
pub mod time;
pub mod thread;
pub mod time;
#[prelude_import]
#[allow(unused_imports)]

View File

@@ -84,7 +84,7 @@
use core::clone::CloneToUninit;
use crate::borrow::{Borrow, Cow};
use alloc::collections::TryReserveError;
use alloc_crate::collections::TryReserveError;
use crate::error::Error;
use crate::ffi::{OsStr, OsString, os_str};
use crate::hash::{Hash, Hasher};
@@ -92,7 +92,7 @@ use crate::iter::FusedIterator;
use crate::ops::{self, Deref};
use crate::rc::Rc;
use crate::str::FromStr;
use alloc::sync::Arc;
use alloc_crate::sync::Arc;
use crate::sys::path::{HAS_PREFIXES, MAIN_SEP_STR, is_sep_byte, is_verbatim_sep, parse_prefix};
use crate::{cmp, fmt, fs, io, sys};

View File

@@ -29,6 +29,10 @@ pub mod rust_2024 {
write, writeln,
};
#[stable(feature = "builtin_macro_prelude", since = "1.38.0")]
#[doc(no_inline)]
pub use crate::thread_local;
#[stable(feature = "cfg_select", since = "1.95.0")]
#[doc(no_inline)]
pub use core::prelude::v1::cfg_select;

6
crates/std/src/rt.rs Normal file
View File

@@ -0,0 +1,6 @@
macro_rules! rtabort {
($($t:tt)*) => {{}};
}
macro_rules! rtprintpanic {
($($t:tt)*) => {{}};
}

14
crates/std/src/sync.rs Normal file
View File

@@ -0,0 +1,14 @@
pub mod once {
use crate::sys::sync as sys;
pub struct OnceState {
pub(crate) inner: sys::OnceState,
}
/// Used for the internal implementation of `sys::sync::once` on different platforms and the
/// [`LazyLock`](crate::sync::LazyLock) implementation.
pub(crate) enum OnceExclusiveState {
Incomplete,
Poisoned,
Complete,
}
}
pub use once::OnceState;

View File

@@ -0,0 +1,113 @@
#![forbid(unsafe_op_in_unsafe_fn)]
use crate::alloc::{GlobalAlloc, Layout, System};
use crate::ptr;
// The minimum alignment guaranteed by the architecture. This value is used to
// add fast paths for low alignment values.
#[allow(dead_code)]
const MIN_ALIGN: usize = if cfg!(any(
all(target_arch = "riscv32", any(target_os = "espidf", target_os = "zkvm")),
all(target_arch = "xtensa", target_os = "espidf"),
)) {
// The allocator on the esp-idf and zkvm platforms guarantees 4 byte alignment.
4
} else if cfg!(any(
target_arch = "x86",
target_arch = "arm",
target_arch = "m68k",
target_arch = "csky",
target_arch = "loongarch32",
target_arch = "mips",
target_arch = "mips32r6",
target_arch = "powerpc",
target_arch = "powerpc64",
target_arch = "sparc",
target_arch = "wasm32",
target_arch = "hexagon",
target_arch = "riscv32",
target_arch = "xtensa",
)) {
8
} else if cfg!(any(
target_arch = "x86_64",
target_arch = "aarch64",
target_arch = "arm64ec",
target_arch = "loongarch64",
target_arch = "mips64",
target_arch = "mips64r6",
target_arch = "s390x",
target_arch = "sparc64",
target_arch = "riscv64",
target_arch = "wasm64",
)) {
16
} else {
panic!("add a value for MIN_ALIGN")
};
#[allow(dead_code)]
unsafe fn realloc_fallback(
alloc: &System,
ptr: *mut u8,
old_layout: Layout,
new_size: usize,
) -> *mut u8 {
// SAFETY: Docs for GlobalAlloc::realloc require this to be valid
unsafe {
let new_layout = Layout::from_size_align_unchecked(new_size, old_layout.align());
let new_ptr = GlobalAlloc::alloc(alloc, new_layout);
if !new_ptr.is_null() {
let size = usize::min(old_layout.size(), new_size);
ptr::copy_nonoverlapping(ptr, new_ptr, size);
GlobalAlloc::dealloc(alloc, ptr, old_layout);
}
new_ptr
}
}
cfg_select! {
any(
target_family = "unix",
target_os = "wasi",
target_os = "teeos",
target_os = "trusty",
) => {
mod unix;
}
target_os = "windows" => {
mod windows;
}
target_os = "hermit" => {
mod hermit;
}
target_os = "motor" => {
mod motor;
}
all(target_vendor = "fortanix", target_env = "sgx") => {
mod sgx;
}
target_os = "solid_asp3" => {
mod solid;
}
target_os = "uefi" => {
mod uefi;
}
target_os = "vexos" => {
mod vexos;
}
target_family = "wasm" => {
mod wasm;
}
target_os = "xous" => {
mod xous;
}
target_os = "zkvm" => {
mod zkvm;
}
target_os = "survos" => {
mod survos;
}
}

View File

@@ -0,0 +1,14 @@
use crate::alloc::{GlobalAlloc, Layout, System};
#[stable(feature = "alloc_system_type", since = "1.28.0")]
unsafe impl GlobalAlloc for System {
#[inline]
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
unsafe { crate::syscall::alloc(layout) }
}
#[inline]
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
unsafe { crate::syscall::dealloc(ptr, layout) }
}
}

View File

@@ -11,7 +11,6 @@
target_os = "uefi",
target_os = "wasi",
target_os = "xous",
target_os = "survos",
))]
mod common;

View File

@@ -8,6 +8,9 @@ pub mod path;
pub mod sync;
pub mod thread;
pub mod time;
pub mod random;
pub mod thread_local;
pub mod alloc;
// pub mod fs;
/// A trait for viewing representations from std types.

View File

@@ -4,10 +4,10 @@
use core::clone::CloneToUninit;
use crate::borrow::Cow;
use alloc::bstr::ByteStr;
use alloc::collections::TryReserveError;
use alloc_crate::bstr::ByteStr;
use alloc_crate::collections::TryReserveError;
use crate::rc::Rc;
use alloc::sync::Arc;
use alloc_crate::sync::Arc;
use crate::sys::{AsInner, FromInner, IntoInner};
use crate::{fmt, mem, str};

View File

@@ -0,0 +1,134 @@
cfg_select! {
// Tier 1
any(target_os = "linux", target_os = "android") => {
mod linux;
pub use linux::{fill_bytes, hashmap_random_keys};
}
target_os = "windows" => {
mod windows;
pub use windows::fill_bytes;
}
target_vendor = "apple" => {
mod apple;
pub use apple::fill_bytes;
// Others, in alphabetical ordering.
}
any(
target_os = "dragonfly",
target_os = "freebsd",
target_os = "haiku",
target_os = "illumos",
target_os = "netbsd",
target_os = "openbsd",
target_os = "rtems",
target_os = "solaris",
target_os = "vita",
target_os = "nuttx",
) => {
mod arc4random;
pub use arc4random::fill_bytes;
}
target_os = "emscripten" => {
mod getentropy;
pub use getentropy::fill_bytes;
}
target_os = "espidf" => {
mod espidf;
pub use espidf::fill_bytes;
}
target_os = "fuchsia" => {
mod fuchsia;
pub use fuchsia::fill_bytes;
}
target_os = "hermit" => {
mod hermit;
pub use hermit::fill_bytes;
}
any(target_os = "horizon", target_os = "cygwin") => {
// FIXME(horizon): add arc4random_buf to shim-3ds
mod getrandom;
pub use getrandom::fill_bytes;
}
any(
target_os = "aix",
target_os = "hurd",
target_os = "l4re",
target_os = "nto",
) => {
mod unix_legacy;
pub use unix_legacy::fill_bytes;
}
target_os = "redox" => {
mod redox;
pub use redox::fill_bytes;
}
target_os = "motor" => {
mod motor;
pub use motor::fill_bytes;
}
all(target_vendor = "fortanix", target_env = "sgx") => {
mod sgx;
pub use sgx::fill_bytes;
}
target_os = "solid_asp3" => {
mod solid;
pub use solid::fill_bytes;
}
target_os = "teeos" => {
mod teeos;
pub use teeos::fill_bytes;
}
target_os = "trusty" => {
mod trusty;
pub use trusty::fill_bytes;
}
target_os = "uefi" => {
mod uefi;
pub use uefi::fill_bytes;
}
target_os = "vxworks" => {
mod vxworks;
pub use vxworks::fill_bytes;
}
all(target_os = "wasi", target_env = "p1") => {
mod wasip1;
pub use wasip1::fill_bytes;
}
all(target_os = "wasi", any(target_env = "p2", target_env = "p3")) => {
mod wasip2;
pub use wasip2::{fill_bytes, hashmap_random_keys};
}
target_os = "zkvm" => {
mod zkvm;
pub use zkvm::fill_bytes;
}
any(
all(target_family = "wasm", target_os = "unknown"),
target_os = "xous",
target_os = "vexos",
target_os = "survos",
) => {
// FIXME: finally remove std support for wasm32-unknown-unknown
// FIXME: add random data generation to xous
mod unsupported;
pub use unsupported::{fill_bytes, hashmap_random_keys};
}
_ => {}
}
#[cfg(not(any(
target_os = "linux",
target_os = "android",
all(target_family = "wasm", target_os = "unknown"),
all(target_os = "wasi", not(target_env = "p1")),
target_os = "xous",
target_os = "vexos",
target_os = "survos",
)))]
pub fn hashmap_random_keys() -> (u64, u64) {
let mut buf = [0; 16];
fill_bytes(&mut buf);
let k1 = u64::from_ne_bytes(buf[..8].try_into().unwrap());
let k2 = u64::from_ne_bytes(buf[8..].try_into().unwrap());
(k1, k2)
}

View File

@@ -0,0 +1,15 @@
use crate::ptr;
pub fn fill_bytes(_: &mut [u8]) {
panic!("this target does not support random data generation");
}
pub fn hashmap_random_keys() -> (u64, u64) {
// Use allocation addresses for a bit of randomness. This isn't
// particularly secure, but there isn't really an alternative.
let stack = 0u8;
let heap = Box::new(0u8);
let k1 = ptr::from_ref(&stack).addr() as u64;
let k2 = ptr::from_ref(&*heap).addr() as u64;
(k1, k2)
}

View File

@@ -1,5 +1,9 @@
mod condvar;
mod mutex;
mod once;
mod rwlock;
pub use condvar::Condvar;
pub use mutex::Mutex;
pub use once::{Once, OnceState};
pub use rwlock::RwLock;

View File

@@ -0,0 +1,40 @@
// A "once" is a relatively simple primitive, and it's also typically provided
// by the OS as well (see `pthread_once` or `InitOnceExecuteOnce`). The OS
// primitives, however, tend to have surprising restrictions, such as the Unix
// one doesn't allow an argument to be passed to the function.
//
// As a result, we end up implementing it ourselves in the standard library.
// This also gives us the opportunity to optimize the implementation a bit which
// should help the fast path on call sites.
cfg_select! {
any(
all(target_os = "windows", not(target_vendor="win7")),
target_os = "linux",
target_os = "android",
all(target_arch = "wasm32", target_feature = "atomics"),
target_os = "freebsd",
target_os = "motor",
target_os = "openbsd",
target_os = "dragonfly",
target_os = "fuchsia",
target_os = "hermit",
) => {
mod futex;
pub use futex::{Once, OnceState};
}
any(
windows,
target_family = "unix",
all(target_vendor = "fortanix", target_env = "sgx"),
target_os = "solid_asp3",
target_os = "xous",
) => {
mod queue;
pub use queue::{Once, OnceState};
}
_ => {
mod no_threads;
pub use no_threads::{Once, OnceState};
}
}

View File

@@ -0,0 +1,114 @@
use crate::cell::Cell;
use crate::sync as public;
use crate::sync::once::OnceExclusiveState;
pub struct Once {
state: Cell<State>,
}
pub struct OnceState {
poisoned: bool,
set_state_to: Cell<State>,
}
#[derive(Clone, Copy, PartialEq, Eq)]
enum State {
Incomplete,
Poisoned,
Running,
Complete,
}
struct CompletionGuard<'a> {
state: &'a Cell<State>,
set_state_on_drop_to: State,
}
impl<'a> Drop for CompletionGuard<'a> {
fn drop(&mut self) {
self.state.set(self.set_state_on_drop_to);
}
}
// Safety: threads are not supported on this platform.
unsafe impl Sync for Once {}
impl Once {
#[inline]
pub const fn new() -> Once {
Once { state: Cell::new(State::Incomplete) }
}
#[inline]
pub fn is_completed(&self) -> bool {
self.state.get() == State::Complete
}
#[inline]
pub(crate) fn state(&mut self) -> OnceExclusiveState {
match self.state.get() {
State::Incomplete => OnceExclusiveState::Incomplete,
State::Poisoned => OnceExclusiveState::Poisoned,
State::Complete => OnceExclusiveState::Complete,
_ => unreachable!("invalid Once state"),
}
}
#[inline]
pub(crate) fn set_state(&mut self, new_state: OnceExclusiveState) {
self.state.set(match new_state {
OnceExclusiveState::Incomplete => State::Incomplete,
OnceExclusiveState::Poisoned => State::Poisoned,
OnceExclusiveState::Complete => State::Complete,
});
}
#[cold]
#[track_caller]
pub fn wait(&self, _ignore_poisoning: bool) {
panic!("not implementable on this target");
}
#[cold]
#[track_caller]
pub fn call(&self, ignore_poisoning: bool, f: &mut impl FnMut(&public::OnceState)) {
let state = self.state.get();
match state {
State::Poisoned if !ignore_poisoning => {
// Panic to propagate the poison.
panic!("Once instance has previously been poisoned");
}
State::Incomplete | State::Poisoned => {
self.state.set(State::Running);
// `guard` will set the new state on drop.
let mut guard =
CompletionGuard { state: &self.state, set_state_on_drop_to: State::Poisoned };
// Run the function, letting it know if we're poisoned or not.
let f_state = public::OnceState {
inner: OnceState {
poisoned: state == State::Poisoned,
set_state_to: Cell::new(State::Complete),
},
};
f(&f_state);
guard.set_state_on_drop_to = f_state.inner.set_state_to.get();
}
State::Running => {
panic!("one-time initialization may not be performed recursively");
}
State::Complete => {}
}
}
}
impl OnceState {
#[inline]
pub fn is_poisoned(&self) -> bool {
self.poisoned
}
#[inline]
pub fn poison(&self) {
self.set_state_to.set(State::Poisoned)
}
}

View File

@@ -0,0 +1,35 @@
cfg_select! {
any(
all(target_os = "windows", not(target_vendor = "win7")),
target_os = "linux",
target_os = "android",
target_os = "freebsd",
target_os = "openbsd",
target_os = "dragonfly",
target_os = "fuchsia",
all(target_family = "wasm", target_feature = "atomics"),
target_os = "hermit",
target_os = "motor",
) => {
mod futex;
pub use futex::RwLock;
}
any(
target_family = "unix",
all(target_os = "windows", target_vendor = "win7"),
all(target_vendor = "fortanix", target_env = "sgx"),
target_os = "xous",
target_os = "teeos",
) => {
mod queue;
pub use queue::RwLock;
}
target_os = "solid_asp3" => {
mod solid;
pub use solid::RwLock;
}
_ => {
mod no_threads;
pub use no_threads::RwLock;
}
}

View File

@@ -0,0 +1,69 @@
use crate::cell::Cell;
pub struct RwLock {
// This platform has no threads, so we can use a Cell here.
mode: Cell<isize>,
}
unsafe impl Send for RwLock {}
unsafe impl Sync for RwLock {} // no threads on this platform
impl RwLock {
#[inline]
pub const fn new() -> RwLock {
RwLock { mode: Cell::new(0) }
}
#[inline]
pub fn read(&self) {
let m = self.mode.get();
if m >= 0 {
self.mode.set(m + 1);
} else {
rtabort!("rwlock locked for writing");
}
}
#[inline]
pub fn try_read(&self) -> bool {
let m = self.mode.get();
if m >= 0 {
self.mode.set(m + 1);
true
} else {
false
}
}
#[inline]
pub fn write(&self) {
if self.mode.replace(-1) != 0 {
rtabort!("rwlock locked for reading")
}
}
#[inline]
pub fn try_write(&self) -> bool {
if self.mode.get() == 0 {
self.mode.set(-1);
true
} else {
false
}
}
#[inline]
pub unsafe fn read_unlock(&self) {
self.mode.set(self.mode.get() - 1);
}
#[inline]
pub unsafe fn write_unlock(&self) {
assert_eq!(self.mode.replace(0), -1);
}
#[inline]
pub unsafe fn downgrade(&self) {
assert_eq!(self.mode.replace(1), -1);
}
}

View File

@@ -0,0 +1,227 @@
//! Implementation of the `thread_local` macro.
//!
//! There are three different thread-local implementations:
//! * Some targets lack threading support, and hence have only one thread, so
//! the TLS data is stored in a normal `static`.
//! * Some targets support TLS natively via the dynamic linker and C runtime.
//! * On some targets, the OS provides a library-based TLS implementation. The
//! TLS data is heap-allocated and referenced using a TLS key.
//!
//! Each implementation provides a macro which generates the `LocalKey` `const`
//! used to reference the TLS variable, along with the necessary helper structs
//! to track the initialization/destruction state of the variable.
//!
//! Additionally, this module contains abstractions for the OS interfaces used
//! for these implementations.
#![cfg_attr(test, allow(unused))]
#![doc(hidden)]
#![forbid(unsafe_op_in_unsafe_fn)]
#![unstable(
feature = "thread_local_internals",
reason = "internal details of the thread_local macro",
issue = "none"
)]
cfg_select! {
any(
all(target_family = "wasm", not(target_feature = "atomics")),
target_os = "uefi",
target_os = "zkvm",
target_os = "trusty",
target_os = "vexos",
target_os = "survos",
) => {
mod no_threads;
pub use no_threads::{EagerStorage, LazyStorage, thread_local_inner};
pub(crate) use no_threads::{LocalPointer, local_pointer};
}
target_thread_local => {
mod native;
pub use native::{EagerStorage, LazyStorage, thread_local_inner};
pub(crate) use native::{LocalPointer, local_pointer};
}
_ => {
mod os;
pub use os::{Storage, thread_local_inner, value_align};
pub(crate) use os::{LocalPointer, local_pointer};
}
}
/// The native TLS implementation needs a way to register destructors for its data.
/// This module contains platform-specific implementations of that register.
///
/// It turns out however that most platforms don't have a way to register a
/// destructor for each variable. On these platforms, we keep track of the
/// destructors ourselves and register (through the [`guard`] module) only a
/// single callback that runs all of the destructors in the list.
#[cfg(all(target_thread_local, not(all(target_family = "wasm", not(target_feature = "atomics")))))]
pub(crate) mod destructors {
cfg_select! {
any(
target_os = "linux",
target_os = "android",
target_os = "fuchsia",
target_os = "redox",
target_os = "hurd",
target_os = "netbsd",
target_os = "dragonfly"
) => {
mod linux_like;
mod list;
pub(super) use linux_like::register;
pub(super) use list::run;
}
_ => {
mod list;
pub(super) use list::register;
pub(crate) use list::run;
}
}
}
/// This module provides a way to schedule the execution of the destructor list
/// and the [runtime cleanup](crate::rt::thread_cleanup) function. Calling `enable`
/// should ensure that these functions are called at the right times.
pub(crate) mod guard {
cfg_select! {
all(target_thread_local, target_vendor = "apple") => {
mod apple;
pub(crate) use apple::enable;
}
target_os = "windows" => {
mod windows;
pub(crate) use windows::enable;
}
any(
all(target_family = "wasm", not(
all(target_os = "wasi", target_env = "p1", target_feature = "atomics")
)),
target_os = "uefi",
target_os = "zkvm",
target_os = "trusty",
target_os = "vexos",
) => {
pub(crate) fn enable() {
// FIXME: Right now there is no concept of "thread exit" on
// wasm, but this is likely going to show up at some point in
// the form of an exported symbol that the wasm runtime is going
// to be expected to call. For now we just leak everything, but
// if such a function starts to exist it will probably need to
// iterate the destructor list with these functions:
#[cfg(all(target_family = "wasm", target_feature = "atomics"))]
#[allow(unused)]
use super::destructors::run;
#[allow(unused)]
use crate::rt::thread_cleanup;
}
}
any(
target_os = "hermit",
target_os = "xous",
) => {
// `std` is the only runtime, so it just calls the destructor functions
// itself when the time comes.
pub(crate) fn enable() {}
}
target_os = "solid_asp3" => {
mod solid;
pub(crate) use solid::enable;
}
target_os = "survos" => {
// todo
pub(crate) fn enable() {}
}
_ => {
mod key;
pub(crate) use key::enable;
}
}
}
/// `const`-creatable TLS keys.
///
/// Most OSs without native TLS will provide a library-based way to create TLS
/// storage. For each TLS variable, we create a key, which can then be used to
/// reference an entry in a thread-local table. This then associates each key
/// with a pointer which we can get and set to store our data.
pub(crate) mod key {
cfg_select! {
any(
all(
not(target_vendor = "apple"),
not(target_family = "wasm"),
target_family = "unix",
),
all(not(target_thread_local), target_vendor = "apple"),
target_os = "teeos",
all(target_os = "wasi", target_env = "p1", target_feature = "atomics"),
) => {
mod racy;
mod unix;
#[cfg(test)]
mod tests;
pub(super) use racy::LazyKey;
pub(super) use unix::{Key, set};
#[cfg(any(not(target_thread_local), test))]
pub(super) use unix::get;
use unix::{create, destroy};
}
all(not(target_thread_local), target_os = "windows") => {
#[cfg(test)]
mod tests;
mod windows;
pub(super) use windows::{Key, LazyKey, get, run_dtors, set};
}
all(target_vendor = "fortanix", target_env = "sgx") => {
mod racy;
mod sgx;
#[cfg(test)]
mod tests;
pub(super) use racy::LazyKey;
pub(super) use sgx::{Key, get, set};
use sgx::{create, destroy};
}
target_os = "xous" => {
mod racy;
#[cfg(test)]
mod tests;
mod xous;
pub(super) use racy::LazyKey;
pub(crate) use xous::destroy_tls;
pub(super) use xous::{Key, get, set};
use xous::{create, destroy};
}
target_os = "motor" => {
mod racy;
#[cfg(test)]
mod tests;
pub(super) use racy::LazyKey;
pub(super) use moto_rt::tls::{Key, get, set};
use moto_rt::tls::{create, destroy};
}
_ => {}
}
}
/// Run a callback in a scenario which must not unwind (such as a `extern "C"
/// fn` declared in a user crate). If the callback unwinds anyway, then
/// `rtabort` with a message about thread local panicking on drop.
#[inline]
#[allow(dead_code)]
fn abort_on_dtor_unwind(f: impl FnOnce()) {
// Using a guard like this is lower cost.
let guard = DtorUnwindGuard;
f();
core::mem::forget(guard);
struct DtorUnwindGuard;
impl Drop for DtorUnwindGuard {
#[inline]
fn drop(&mut self) {
// This is not terribly descriptive, but it doesn't need to be as we'll
// already have printed a panic message at this point.
rtabort!("thread local panicked on drop");
}
}
}

View File

@@ -0,0 +1,150 @@
//! On some targets like wasm there's no threads, so no need to generate
//! thread locals and we can instead just use plain statics!
use crate::cell::{Cell, UnsafeCell};
use crate::mem::MaybeUninit;
use crate::ptr;
#[doc(hidden)]
#[allow_internal_unstable(thread_local_internals)]
#[allow_internal_unsafe]
#[unstable(feature = "thread_local_internals", issue = "none")]
#[rustc_macro_transparency = "semiopaque"]
pub macro thread_local_inner {
// used to generate the `LocalKey` value for const-initialized thread locals
(@key $t:ty, $(#[$align_attr:meta])*, const $init:expr) => {{
const __RUST_STD_INTERNAL_INIT: $t = $init;
// NOTE: Please update the shadowing test in `tests/thread.rs` if these types are renamed.
unsafe {
$crate::thread::LocalKey::new(|_| {
$(#[$align_attr])*
static __RUST_STD_INTERNAL_VAL: $crate::thread::local_impl::EagerStorage<$t> =
$crate::thread::local_impl::EagerStorage { value: __RUST_STD_INTERNAL_INIT };
&__RUST_STD_INTERNAL_VAL.value
})
}
}},
// used to generate the `LocalKey` value for `thread_local!`
(@key $t:ty, $(#[$align_attr:meta])*, $init:expr) => {{
#[inline]
fn __rust_std_internal_init_fn() -> $t { $init }
unsafe {
$crate::thread::LocalKey::new(|__rust_std_internal_init| {
$(#[$align_attr])*
static __RUST_STD_INTERNAL_VAL: $crate::thread::local_impl::LazyStorage<$t> = $crate::thread::local_impl::LazyStorage::new();
__RUST_STD_INTERNAL_VAL.get(__rust_std_internal_init, __rust_std_internal_init_fn)
})
}
}},
}
#[allow(missing_debug_implementations)]
#[repr(transparent)] // Required for correctness of `#[rustc_align_static]`
pub struct EagerStorage<T> {
pub value: T,
}
// SAFETY: the target doesn't have threads.
unsafe impl<T> Sync for EagerStorage<T> {}
#[derive(Clone, Copy, PartialEq, Eq)]
enum State {
Initial,
Alive,
Destroying,
}
#[allow(missing_debug_implementations)]
#[repr(C)]
pub struct LazyStorage<T> {
// This field must be first, for correctness of `#[rustc_align_static]`
value: UnsafeCell<MaybeUninit<T>>,
state: Cell<State>,
}
impl<T> LazyStorage<T> {
pub const fn new() -> LazyStorage<T> {
LazyStorage {
value: UnsafeCell::new(MaybeUninit::uninit()),
state: Cell::new(State::Initial),
}
}
/// Gets a pointer to the TLS value, potentially initializing it with the
/// provided parameters.
///
/// The resulting pointer may not be used after reentrant inialialization
/// has occurred.
#[inline]
pub fn get(&'static self, i: Option<&mut Option<T>>, f: impl FnOnce() -> T) -> *const T {
if self.state.get() == State::Alive {
self.value.get() as *const T
} else {
self.initialize(i, f)
}
}
#[cold]
fn initialize(&'static self, i: Option<&mut Option<T>>, f: impl FnOnce() -> T) -> *const T {
let value = i.and_then(Option::take).unwrap_or_else(f);
// Destroy the old value if it is initialized
// FIXME(#110897): maybe panic on recursive initialization.
if self.state.get() == State::Alive {
self.state.set(State::Destroying);
// Safety: we check for no initialization during drop below
unsafe {
ptr::drop_in_place(self.value.get() as *mut T);
}
self.state.set(State::Initial);
}
// Guard against initialization during drop
if self.state.get() == State::Destroying {
panic!("Attempted to initialize thread-local while it is being dropped");
}
unsafe {
self.value.get().write(MaybeUninit::new(value));
}
self.state.set(State::Alive);
self.value.get() as *const T
}
}
// SAFETY: the target doesn't have threads.
unsafe impl<T> Sync for LazyStorage<T> {}
#[rustc_macro_transparency = "semiopaque"]
pub(crate) macro local_pointer {
() => {},
($vis:vis static $name:ident; $($rest:tt)*) => {
$vis static $name: $crate::sys::thread_local::LocalPointer = $crate::sys::thread_local::LocalPointer::__new();
$crate::sys::thread_local::local_pointer! { $($rest)* }
},
}
pub(crate) struct LocalPointer {
p: Cell<*mut ()>,
}
impl LocalPointer {
pub const fn __new() -> LocalPointer {
LocalPointer { p: Cell::new(ptr::null_mut()) }
}
pub fn get(&self) -> *mut () {
self.p.get()
}
pub fn set(&self, p: *mut ()) {
self.p.set(p)
}
}
// SAFETY: the target doesn't have threads.
unsafe impl Sync for LocalPointer {}

View File

@@ -0,0 +1,289 @@
use super::key::{Key, LazyKey, get, set};
use super::{abort_on_dtor_unwind, guard};
use crate::alloc::{self, GlobalAlloc, Layout, System};
use crate::cell::Cell;
use crate::marker::PhantomData;
use crate::mem::ManuallyDrop;
use crate::ops::Deref;
use crate::panic::{AssertUnwindSafe, catch_unwind, resume_unwind};
use crate::ptr::{self, NonNull};
#[doc(hidden)]
#[allow_internal_unstable(thread_local_internals)]
#[allow_internal_unsafe]
#[unstable(feature = "thread_local_internals", issue = "none")]
#[rustc_macro_transparency = "semiopaque"]
pub macro thread_local_inner {
// NOTE: we cannot import `Storage` or `LocalKey` with a `use` because that can shadow user
// provided type or type alias with a matching name. Please update the shadowing test in
// `tests/thread.rs` if these types are renamed.
// used to generate the `LocalKey` value for `thread_local!`.
(@key $t:ty, $($(#[$($align_attr:tt)*])+)?, $init:expr) => {{
#[inline]
fn __rust_std_internal_init_fn() -> $t { $init }
// NOTE: this cannot import `LocalKey` or `Storage` with a `use` because that can shadow
// user provided type or type alias with a matching name. Please update the shadowing test
// in `tests/thread.rs` if these types are renamed.
unsafe {
$crate::thread::LocalKey::new(|__rust_std_internal_init| {
static __RUST_STD_INTERNAL_VAL: $crate::thread::local_impl::Storage<$t, {
$({
// Ensure that attributes have valid syntax
// and that the proper feature gate is enabled
$(#[$($align_attr)*])+
#[allow(unused)]
static DUMMY: () = ();
})?
#[allow(unused_mut)]
let mut final_align = $crate::thread::local_impl::value_align::<$t>();
$($($crate::thread::local_impl::thread_local_inner!(@align final_align, $($align_attr)*);)+)?
final_align
}>
= $crate::thread::local_impl::Storage::new();
__RUST_STD_INTERNAL_VAL.get(__rust_std_internal_init, __rust_std_internal_init_fn)
})
}
}},
// process a single `rustc_align_static` attribute
(@align $final_align:ident, rustc_align_static($($align:tt)*) $(, $($attr_rest:tt)+)?) => {
let new_align: $crate::primitive::usize = $($align)*;
if new_align > $final_align {
$final_align = new_align;
}
$($crate::thread::local_impl::thread_local_inner!(@align $final_align, $($attr_rest)+);)?
},
// process a single `cfg_attr` attribute
// by translating it into a `cfg`ed block and recursing.
// https://doc.rust-lang.org/reference/conditional-compilation.html#railroad-ConfigurationPredicate
(@align $final_align:ident, cfg_attr(true, $($cfg_rhs:tt)*) $(, $($attr_rest:tt)+)?) => {
#[cfg(true)]
{
$crate::thread::local_impl::thread_local_inner!(@align $final_align, $($cfg_rhs)*);
}
$($crate::thread::local_impl::thread_local_inner!(@align $final_align, $($attr_rest)+);)?
},
(@align $final_align:ident, cfg_attr(false, $($cfg_rhs:tt)*) $(, $($attr_rest:tt)+)?) => {
#[cfg(false)]
{
$crate::thread::local_impl::thread_local_inner!(@align $final_align, $($cfg_rhs)*);
}
$($crate::thread::local_impl::thread_local_inner!(@align $final_align, $($attr_rest)+);)?
},
(@align $final_align:ident, cfg_attr($cfg_pred:meta, $($cfg_rhs:tt)*) $(, $($attr_rest:tt)+)?) => {
#[cfg($cfg_pred)]
{
$crate::thread::local_impl::thread_local_inner!(@align $final_align, $($cfg_rhs)*);
}
$($crate::thread::local_impl::thread_local_inner!(@align $final_align, $($attr_rest)+);)?
},
}
/// Use a regular global static to store this key; the state provided will then be
/// thread-local.
/// INVARIANT: ALIGN must be a valid alignment, and no less than `value_align::<T>`.
#[allow(missing_debug_implementations)]
pub struct Storage<T, const ALIGN: usize> {
key: LazyKey,
marker: PhantomData<Cell<T>>,
}
unsafe impl<T, const ALIGN: usize> Sync for Storage<T, ALIGN> {}
#[repr(C)]
struct Value<T: 'static> {
// This field must be first, for correctness of `#[rustc_align_static]`
value: T,
// INVARIANT: if this value is stored under a TLS key, `key` must be that `key`.
key: Key,
}
pub const fn value_align<T: 'static>() -> usize {
crate::mem::align_of::<Value<T>>()
}
/// Equivalent to `Box<Value<T>, System>`, but potentially over-aligned.
struct AlignedSystemBox<T: 'static, const ALIGN: usize> {
ptr: NonNull<Value<T>>,
}
impl<T: 'static, const ALIGN: usize> AlignedSystemBox<T, ALIGN> {
#[inline]
fn new(v: Value<T>) -> Self {
let layout = Layout::new::<Value<T>>().align_to(ALIGN).unwrap();
// We use the System allocator here to avoid interfering with a potential
// Global allocator using thread-local storage.
let ptr: *mut Value<T> = (unsafe { System.alloc(layout) }).cast();
let Some(ptr) = NonNull::new(ptr) else {
alloc::handle_alloc_error(layout);
};
unsafe { ptr.write(v) };
Self { ptr }
}
#[inline]
fn into_raw(b: Self) -> *mut Value<T> {
let md = ManuallyDrop::new(b);
md.ptr.as_ptr()
}
#[inline]
unsafe fn from_raw(ptr: *mut Value<T>) -> Self {
Self { ptr: unsafe { NonNull::new_unchecked(ptr) } }
}
}
impl<T: 'static, const ALIGN: usize> Deref for AlignedSystemBox<T, ALIGN> {
type Target = Value<T>;
#[inline]
fn deref(&self) -> &Self::Target {
unsafe { &*(self.ptr.as_ptr()) }
}
}
impl<T: 'static, const ALIGN: usize> Drop for AlignedSystemBox<T, ALIGN> {
#[inline]
fn drop(&mut self) {
let layout = Layout::new::<Value<T>>().align_to(ALIGN).unwrap();
unsafe {
let unwind_result = catch_unwind(AssertUnwindSafe(|| self.ptr.drop_in_place()));
System.dealloc(self.ptr.as_ptr().cast(), layout);
if let Err(payload) = unwind_result {
resume_unwind(payload);
}
}
}
}
impl<T: 'static, const ALIGN: usize> Storage<T, ALIGN> {
pub const fn new() -> Storage<T, ALIGN> {
Storage { key: LazyKey::new(Some(destroy_value::<T, ALIGN>)), marker: PhantomData }
}
/// Gets a pointer to the TLS value, potentially initializing it with the
/// provided parameters. If the TLS variable has been destroyed, a null
/// pointer is returned.
///
/// The resulting pointer may not be used after reentrant inialialization
/// or thread destruction has occurred.
pub fn get(&'static self, i: Option<&mut Option<T>>, f: impl FnOnce() -> T) -> *const T {
let key = self.key.force();
let ptr = unsafe { get(key) as *mut Value<T> };
if ptr.addr() > 1 {
// SAFETY: the check ensured the pointer is safe (its destructor
// is not running) + it is coming from a trusted source (self).
unsafe { &(*ptr).value }
} else {
// SAFETY: trivially correct.
unsafe { Self::try_initialize(key, ptr, i, f) }
}
}
/// # Safety
/// * `key` must be the result of calling `self.key.force()`
/// * `ptr` must be the current value associated with `key`.
unsafe fn try_initialize(
key: Key,
ptr: *mut Value<T>,
i: Option<&mut Option<T>>,
f: impl FnOnce() -> T,
) -> *const T {
if ptr.addr() == 1 {
// destructor is running
return ptr::null();
}
let value = AlignedSystemBox::<T, ALIGN>::new(Value {
value: i.and_then(Option::take).unwrap_or_else(f),
key,
});
let ptr = AlignedSystemBox::into_raw(value);
// SAFETY:
// * key came from a `LazyKey` and is thus correct.
// * `ptr` is a correct pointer that can be destroyed by the key destructor.
// * the value is stored under the key that it contains.
let old = unsafe {
let old = get(key) as *mut Value<T>;
set(key, ptr as *mut u8);
old
};
if !old.is_null() {
// If the variable was recursively initialized, drop the old value.
// SAFETY: We cannot be inside a `LocalKey::with` scope, as the
// initializer has already returned and the next scope only starts
// after we return the pointer. Therefore, there can be no references
// to the old value.
drop(unsafe { AlignedSystemBox::<T, ALIGN>::from_raw(old) });
}
// SAFETY: We just created this value above.
unsafe { &(*ptr).value }
}
}
unsafe extern "C" fn destroy_value<T: 'static, const ALIGN: usize>(ptr: *mut u8) {
// SAFETY:
//
// The OS TLS ensures that this key contains a null value when this
// destructor starts to run. We set it back to a sentinel value of 1 to
// ensure that any future calls to `get` for this thread will return
// `None`.
//
// Note that to prevent an infinite loop we reset it back to null right
// before we return from the destructor ourselves.
abort_on_dtor_unwind(|| {
let ptr = unsafe { AlignedSystemBox::<T, ALIGN>::from_raw(ptr as *mut Value<T>) };
let key = ptr.key;
// SAFETY: `key` is the TLS key `ptr` was stored under.
unsafe { set(key, ptr::without_provenance_mut(1)) };
drop(ptr);
// SAFETY: `key` is the TLS key `ptr` was stored under.
unsafe { set(key, ptr::null_mut()) };
// Make sure that the runtime cleanup will be performed
// after the next round of TLS destruction.
guard::enable();
});
}
#[rustc_macro_transparency = "semiopaque"]
pub(crate) macro local_pointer {
() => {},
($vis:vis static $name:ident; $($rest:tt)*) => {
$vis static $name: $crate::sys::thread_local::LocalPointer = $crate::sys::thread_local::LocalPointer::__new();
$crate::sys::thread_local::local_pointer! { $($rest)* }
},
}
pub(crate) struct LocalPointer {
key: LazyKey,
}
impl LocalPointer {
pub const fn __new() -> LocalPointer {
LocalPointer { key: LazyKey::new(None) }
}
pub fn get(&'static self) -> *mut () {
unsafe { get(self.key.force()) as *mut () }
}
pub fn set(&'static self, p: *mut ()) {
unsafe { set(self.key.force(), p as *mut u8) }
}
}

View File

@@ -1,4 +1,15 @@
pub mod local;
pub use crate::sys::thread::*;
pub use local::LocalKey;
// Implementation details used by the thread_local!{} macro.
#[doc(hidden)]
#[unstable(feature = "thread_local_internals", issue = "none")]
pub mod local_impl {
pub use super::local::thread_local_process_attrs;
pub use crate::sys::thread_local::*;
}
pub struct ThreadInit {
pub handle: Thread,

View File

@@ -0,0 +1,865 @@
//! Thread local storage
#![unstable(feature = "thread_local_internals", issue = "none")]
use crate::cell::{Cell, RefCell};
use crate::error::Error;
use crate::fmt;
/// A thread local storage (TLS) key which owns its contents.
///
/// This key uses the fastest implementation available on the target platform.
/// It is instantiated with the [`thread_local!`] macro and the
/// primary method is the [`with`] method, though there are helpers to make
/// working with [`Cell`] types easier.
///
/// The [`with`] method yields a reference to the contained value which cannot
/// outlive the current thread or escape the given closure.
///
/// [`thread_local!`]: crate::thread_local
///
/// # Initialization and Destruction
///
/// Initialization is dynamically performed on the first call to a setter (e.g.
/// [`with`]) within a thread, and values that implement [`Drop`] get
/// destructed when a thread exits. Some platform-specific caveats apply, which
/// are explained below.
/// Note that, should the destructor panic, the whole process will be [aborted].
/// On platforms where initialization requires memory allocation, this is
/// performed directly through [`System`], allowing the [global allocator]
/// to make use of thread local storage.
///
/// A `LocalKey`'s initializer cannot recursively depend on itself. Using a
/// `LocalKey` in this way may cause panics, aborts, or infinite recursion on
/// the first call to `with`.
///
/// [`System`]: crate::alloc::System
/// [global allocator]: crate::alloc
/// [aborted]: crate::process::abort
///
/// # Single-thread Synchronization
///
/// Though there is no potential race with other threads, it is still possible to
/// obtain multiple references to the thread-local data in different places on
/// the call stack. For this reason, only shared (`&T`) references may be obtained.
///
/// To allow obtaining an exclusive mutable reference (`&mut T`), typically a
/// [`Cell`] or [`RefCell`] is used (see the [`std::cell`] for more information
/// on how exactly this works). To make this easier there are specialized
/// implementations for [`LocalKey<Cell<T>>`] and [`LocalKey<RefCell<T>>`].
///
/// [`std::cell`]: `crate::cell`
/// [`LocalKey<Cell<T>>`]: struct.LocalKey.html#impl-LocalKey<Cell<T>>
/// [`LocalKey<RefCell<T>>`]: struct.LocalKey.html#impl-LocalKey<RefCell<T>>
///
///
/// # Examples
///
/// ```
/// use std::cell::Cell;
/// use std::thread;
///
/// // explicit `const {}` block enables more efficient initialization
/// thread_local!(static FOO: Cell<u32> = const { Cell::new(1) });
///
/// assert_eq!(FOO.get(), 1);
/// FOO.set(2);
///
/// // each thread starts out with the initial value of 1
/// let t = thread::spawn(move || {
/// assert_eq!(FOO.get(), 1);
/// FOO.set(3);
/// });
///
/// // wait for the thread to complete and bail out on panic
/// t.join().unwrap();
///
/// // we retain our original value of 2 despite the child thread
/// assert_eq!(FOO.get(), 2);
/// ```
///
/// # Platform-specific behavior
///
/// Note that a "best effort" is made to ensure that destructors for types
/// stored in thread local storage are run, but not all platforms can guarantee
/// that destructors will be run for all types in thread local storage. For
/// example, there are a number of known caveats where destructors are not run:
///
/// 1. On Unix systems when pthread-based TLS is being used, destructors will
/// not be run for TLS values on the main thread when it exits. Note that the
/// application will exit immediately after the main thread exits as well.
/// 2. On all platforms it's possible for TLS to re-initialize other TLS slots
/// during destruction. Some platforms ensure that this cannot happen
/// infinitely by preventing re-initialization of any slot that has been
/// destroyed, but not all platforms have this guard. Those platforms that do
/// not guard typically have a synthetic limit after which point no more
/// destructors are run.
/// 3. When the process exits on Windows systems, TLS destructors may only be
/// run on the thread that causes the process to exit. This is because the
/// other threads may be forcibly terminated.
///
/// ## Synchronization in thread-local destructors
///
/// On Windows, synchronization operations (such as [`JoinHandle::join`]) in
/// thread local destructors are prone to deadlocks and so should be avoided.
/// This is because the [loader lock] is held while a destructor is run. The
/// lock is acquired whenever a thread starts or exits or when a DLL is loaded
/// or unloaded. Therefore these events are blocked for as long as a thread
/// local destructor is running.
///
/// [loader lock]: https://docs.microsoft.com/en-us/windows/win32/dlls/dynamic-link-library-best-practices
/// [`JoinHandle::join`]: crate::thread::JoinHandle::join
/// [`with`]: LocalKey::with
#[cfg_attr(not(test), rustc_diagnostic_item = "LocalKey")]
#[stable(feature = "rust1", since = "1.0.0")]
pub struct LocalKey<T: 'static> {
// This outer `LocalKey<T>` type is what's going to be stored in statics,
// but actual data inside will sometimes be tagged with #[thread_local].
// It's not valid for a true static to reference a #[thread_local] static,
// so we get around that by exposing an accessor through a layer of function
// indirection (this thunk).
//
// Note that the thunk is itself unsafe because the returned lifetime of the
// slot where data lives, `'static`, is not actually valid. The lifetime
// here is actually slightly shorter than the currently running thread!
//
// Although this is an extra layer of indirection, it should in theory be
// trivially devirtualizable by LLVM because the value of `inner` never
// changes and the constant should be readonly within a crate. This mainly
// only runs into problems when TLS statics are exported across crates.
inner: fn(Option<&mut Option<T>>) -> *const T,
}
#[stable(feature = "std_debug", since = "1.16.0")]
impl<T: 'static> fmt::Debug for LocalKey<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("LocalKey").finish_non_exhaustive()
}
}
#[doc(hidden)]
#[allow_internal_unstable(thread_local_internals)]
#[unstable(feature = "thread_local_internals", issue = "none")]
#[rustc_macro_transparency = "semiopaque"]
pub macro thread_local_process_attrs {
// Parse `cfg_attr` to figure out whether it's a `rustc_align_static`.
// Each `cfg_attr` can have zero or more attributes on the RHS, and can be nested.
// finished parsing the `cfg_attr`, it had no `rustc_align_static`
(
[] [$(#[$($prev_other_attrs:tt)*])*];
@processing_cfg_attr { pred: ($($predicate:tt)*), rhs: [] };
[$($prev_align_attrs_ret:tt)*] [$($prev_other_attrs_ret:tt)*];
$($rest:tt)*
) => (
$crate::thread::local_impl::thread_local_process_attrs!(
[$($prev_align_attrs_ret)*] [$($prev_other_attrs_ret)* #[cfg_attr($($predicate)*, $($($prev_other_attrs)*),*)]];
$($rest)*
);
),
// finished parsing the `cfg_attr`, it had nothing but `rustc_align_static`
(
[$(#[$($prev_align_attrs:tt)*])+] [];
@processing_cfg_attr { pred: ($($predicate:tt)*), rhs: [] };
[$($prev_align_attrs_ret:tt)*] [$($prev_other_attrs_ret:tt)*];
$($rest:tt)*
) => (
$crate::thread::local_impl::thread_local_process_attrs!(
[$($prev_align_attrs_ret)* #[cfg_attr($($predicate)*, $($($prev_align_attrs)*),+)]] [$($prev_other_attrs_ret)*];
$($rest)*
);
),
// finished parsing the `cfg_attr`, it had a mix of `rustc_align_static` and other attrs
(
[$(#[$($prev_align_attrs:tt)*])+] [$(#[$($prev_other_attrs:tt)*])+];
@processing_cfg_attr { pred: ($($predicate:tt)*), rhs: [] };
[$($prev_align_attrs_ret:tt)*] [$($prev_other_attrs_ret:tt)*];
$($rest:tt)*
) => (
$crate::thread::local_impl::thread_local_process_attrs!(
[$($prev_align_attrs_ret)* #[cfg_attr($($predicate)*, $($($prev_align_attrs)*),+)]] [$($prev_other_attrs_ret)* #[cfg_attr($($predicate)*, $($($prev_other_attrs)*),+)]];
$($rest)*
);
),
// it's a `rustc_align_static`
(
[$($prev_align_attrs:tt)*] [$($prev_other_attrs:tt)*];
@processing_cfg_attr { pred: ($($predicate:tt)*), rhs: [rustc_align_static($($align_static_args:tt)*) $(, $($attr_rhs:tt)*)?] };
$($rest:tt)*
) => (
$crate::thread::local_impl::thread_local_process_attrs!(
[$($prev_align_attrs)* #[rustc_align_static($($align_static_args)*)]] [$($prev_other_attrs)*];
@processing_cfg_attr { pred: ($($predicate)*), rhs: [$($($attr_rhs)*)?] };
$($rest)*
);
),
// it's a nested `cfg_attr(true, ...)`; recurse into RHS
(
[$($prev_align_attrs:tt)*] [$($prev_other_attrs:tt)*];
@processing_cfg_attr { pred: ($($predicate:tt)*), rhs: [cfg_attr(true, $($cfg_rhs:tt)*) $(, $($attr_rhs:tt)*)?] };
$($rest:tt)*
) => (
$crate::thread::local_impl::thread_local_process_attrs!(
[] [];
@processing_cfg_attr { pred: (true), rhs: [$($cfg_rhs)*] };
[$($prev_align_attrs)*] [$($prev_other_attrs)*];
@processing_cfg_attr { pred: ($($predicate)*), rhs: [$($($attr_rhs)*)?] };
$($rest)*
);
),
// it's a nested `cfg_attr(false, ...)`; recurse into RHS
(
[$($prev_align_attrs:tt)*] [$($prev_other_attrs:tt)*];
@processing_cfg_attr { pred: ($($predicate:tt)*), rhs: [cfg_attr(false, $($cfg_rhs:tt)*) $(, $($attr_rhs:tt)*)?] };
$($rest:tt)*
) => (
$crate::thread::local_impl::thread_local_process_attrs!(
[] [];
@processing_cfg_attr { pred: (false), rhs: [$($cfg_rhs)*] };
[$($prev_align_attrs)*] [$($prev_other_attrs)*];
@processing_cfg_attr { pred: ($($predicate)*), rhs: [$($($attr_rhs)*)?] };
$($rest)*
);
),
// it's a nested `cfg_attr(..., ...)`; recurse into RHS
(
[$($prev_align_attrs:tt)*] [$($prev_other_attrs:tt)*];
@processing_cfg_attr { pred: ($($predicate:tt)*), rhs: [cfg_attr($cfg_lhs:meta, $($cfg_rhs:tt)*) $(, $($attr_rhs:tt)*)?] };
$($rest:tt)*
) => (
$crate::thread::local_impl::thread_local_process_attrs!(
[] [];
@processing_cfg_attr { pred: ($cfg_lhs), rhs: [$($cfg_rhs)*] };
[$($prev_align_attrs)*] [$($prev_other_attrs)*];
@processing_cfg_attr { pred: ($($predicate)*), rhs: [$($($attr_rhs)*)?] };
$($rest)*
);
),
// it's some other attribute
(
[$($prev_align_attrs:tt)*] [$($prev_other_attrs:tt)*];
@processing_cfg_attr { pred: ($($predicate:tt)*), rhs: [$meta:meta $(, $($attr_rhs:tt)*)?] };
$($rest:tt)*
) => (
$crate::thread::local_impl::thread_local_process_attrs!(
[$($prev_align_attrs)*] [$($prev_other_attrs)* #[$meta]];
@processing_cfg_attr { pred: ($($predicate)*), rhs: [$($($attr_rhs)*)?] };
$($rest)*
);
),
// Separate attributes into `rustc_align_static` and everything else:
// `rustc_align_static` attribute
([$($prev_align_attrs:tt)*] [$($prev_other_attrs:tt)*]; #[rustc_align_static $($attr_rest:tt)*] $($rest:tt)*) => (
$crate::thread::local_impl::thread_local_process_attrs!(
[$($prev_align_attrs)* #[rustc_align_static $($attr_rest)*]] [$($prev_other_attrs)*];
$($rest)*
);
),
// `cfg_attr(true, ...)` attribute; parse it
([$($prev_align_attrs:tt)*] [$($prev_other_attrs:tt)*]; #[cfg_attr(true, $($cfg_rhs:tt)*)] $($rest:tt)*) => (
$crate::thread::local_impl::thread_local_process_attrs!(
[] [];
@processing_cfg_attr { pred: (true), rhs: [$($cfg_rhs)*] };
[$($prev_align_attrs)*] [$($prev_other_attrs)*];
$($rest)*
);
),
// `cfg_attr(false, ...)` attribute; parse it
([$($prev_align_attrs:tt)*] [$($prev_other_attrs:tt)*]; #[cfg_attr(false, $($cfg_rhs:tt)*)] $($rest:tt)*) => (
$crate::thread::local_impl::thread_local_process_attrs!(
[] [];
@processing_cfg_attr { pred: (false), rhs: [$($cfg_rhs)*] };
[$($prev_align_attrs)*] [$($prev_other_attrs)*];
$($rest)*
);
),
// `cfg_attr(..., ...)` attribute; parse it
([$($prev_align_attrs:tt)*] [$($prev_other_attrs:tt)*]; #[cfg_attr($cfg_pred:meta, $($cfg_rhs:tt)*)] $($rest:tt)*) => (
$crate::thread::local_impl::thread_local_process_attrs!(
[] [];
@processing_cfg_attr { pred: ($cfg_pred), rhs: [$($cfg_rhs)*] };
[$($prev_align_attrs)*] [$($prev_other_attrs)*];
$($rest)*
);
),
// doc comment not followed by any other attributes; process it all at once to avoid blowing recursion limit
([$($prev_align_attrs:tt)*] [$($prev_other_attrs:tt)*]; $(#[doc $($doc_rhs:tt)*])+ $vis:vis static $($rest:tt)*) => (
$crate::thread::local_impl::thread_local_process_attrs!(
[$($prev_align_attrs)*] [$($prev_other_attrs)* $(#[doc $($doc_rhs)*])+];
$vis static $($rest)*
);
),
// 8 lines of doc comment; process them all at once to avoid blowing recursion limit
([$($prev_align_attrs:tt)*] [$($prev_other_attrs:tt)*];
#[doc $($doc_rhs_1:tt)*] #[doc $($doc_rhs_2:tt)*] #[doc $($doc_rhs_3:tt)*] #[doc $($doc_rhs_4:tt)*]
#[doc $($doc_rhs_5:tt)*] #[doc $($doc_rhs_6:tt)*] #[doc $($doc_rhs_7:tt)*] #[doc $($doc_rhs_8:tt)*]
$($rest:tt)*) => (
$crate::thread::local_impl::thread_local_process_attrs!(
[$($prev_align_attrs)*] [$($prev_other_attrs)*
#[doc $($doc_rhs_1)*] #[doc $($doc_rhs_2)*] #[doc $($doc_rhs_3)*] #[doc $($doc_rhs_4)*]
#[doc $($doc_rhs_5)*] #[doc $($doc_rhs_6)*] #[doc $($doc_rhs_7)*] #[doc $($doc_rhs_8)*]];
$($rest)*
);
),
// other attribute
([$($prev_align_attrs:tt)*] [$($prev_other_attrs:tt)*]; #[$($attr:tt)*] $($rest:tt)*) => (
$crate::thread::local_impl::thread_local_process_attrs!(
[$($prev_align_attrs)*] [$($prev_other_attrs)* #[$($attr)*]];
$($rest)*
);
),
// Delegate to `thread_local_inner` once attributes are fully categorized:
// process `const` declaration and recurse
([$($align_attrs:tt)*] [$($other_attrs:tt)*]; $vis:vis static $name:ident: $t:ty = const $init:block $(; $($($rest:tt)+)?)?) => (
$($other_attrs)* $vis const $name: $crate::thread::LocalKey<$t> =
$crate::thread::local_impl::thread_local_inner!(@key $t, $($align_attrs)*, const $init);
$($($crate::thread::local_impl::thread_local_process_attrs!([] []; $($rest)+);)?)?
),
// process non-`const` declaration and recurse
([$($align_attrs:tt)*] [$($other_attrs:tt)*]; $vis:vis static $name:ident: $t:ty = $init:expr $(; $($($rest:tt)+)?)?) => (
$($other_attrs)* $vis const $name: $crate::thread::LocalKey<$t> =
$crate::thread::local_impl::thread_local_inner!(@key $t, $($align_attrs)*, $init);
$($($crate::thread::local_impl::thread_local_process_attrs!([] []; $($rest)+);)?)?
),
}
/// Declare a new thread local storage key of type [`std::thread::LocalKey`].
///
/// # Syntax
///
/// The macro wraps any number of static declarations and makes them thread local.
/// Publicity and attributes for each static are allowed. Example:
///
/// ```
/// use std::cell::{Cell, RefCell};
///
/// thread_local! {
/// pub static FOO: Cell<u32> = const { Cell::new(1) };
///
/// static BAR: RefCell<Vec<f32>> = RefCell::new(vec![1.0, 2.0]);
/// }
///
/// assert_eq!(FOO.get(), 1);
/// BAR.with_borrow(|v| assert_eq!(v[1], 2.0));
/// ```
///
/// Note that only shared references (`&T`) to the inner data may be obtained, so a
/// type such as [`Cell`] or [`RefCell`] is typically used to allow mutating access.
///
/// This macro supports a special `const {}` syntax that can be used
/// when the initialization expression can be evaluated as a constant.
/// This can enable a more efficient thread local implementation that
/// can avoid lazy initialization. For types that do not
/// [need to be dropped][crate::mem::needs_drop], this can enable an
/// even more efficient implementation that does not need to
/// track any additional state.
///
/// ```
/// use std::cell::RefCell;
///
/// thread_local! {
/// pub static FOO: RefCell<Vec<u32>> = const { RefCell::new(Vec::new()) };
/// }
///
/// FOO.with_borrow(|v| assert_eq!(v.len(), 0));
/// ```
///
/// See [`LocalKey` documentation][`std::thread::LocalKey`] for more
/// information.
///
/// [`std::thread::LocalKey`]: crate::thread::LocalKey
#[macro_export]
#[stable(feature = "rust1", since = "1.0.0")]
#[cfg_attr(not(test), rustc_diagnostic_item = "thread_local_macro")]
#[allow_internal_unstable(thread_local_internals)]
macro_rules! thread_local {
() => {};
($($tt:tt)+) => {
$crate::thread::local_impl::thread_local_process_attrs!([] []; $($tt)+);
};
}
/// An error returned by [`LocalKey::try_with`](struct.LocalKey.html#method.try_with).
#[stable(feature = "thread_local_try_with", since = "1.26.0")]
#[non_exhaustive]
#[derive(Clone, Copy, Eq, PartialEq)]
pub struct AccessError;
#[stable(feature = "thread_local_try_with", since = "1.26.0")]
impl fmt::Debug for AccessError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("AccessError").finish()
}
}
#[stable(feature = "thread_local_try_with", since = "1.26.0")]
impl fmt::Display for AccessError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Display::fmt("already destroyed", f)
}
}
#[stable(feature = "thread_local_try_with", since = "1.26.0")]
impl Error for AccessError {}
// This ensures the panicking code is outlined from `with` for `LocalKey`.
#[cfg_attr(not(panic = "immediate-abort"), inline(never))]
#[track_caller]
#[cold]
fn panic_access_error(err: AccessError) -> ! {
panic!("cannot access a Thread Local Storage value during or after destruction: {err:?}")
}
impl<T: 'static> LocalKey<T> {
#[doc(hidden)]
#[unstable(
feature = "thread_local_internals",
reason = "recently added to create a key",
issue = "none"
)]
pub const unsafe fn new(inner: fn(Option<&mut Option<T>>) -> *const T) -> LocalKey<T> {
LocalKey { inner }
}
/// Acquires a reference to the value in this TLS key.
///
/// This will lazily initialize the value if this thread has not referenced
/// this key yet.
///
/// # Panics
///
/// This function will `panic!()` if the key currently has its
/// destructor running, and it **may** panic if the destructor has
/// previously been run for this thread.
///
/// # Examples
///
/// ```
/// thread_local! {
/// pub static STATIC: String = String::from("I am");
/// }
///
/// assert_eq!(
/// STATIC.with(|original_value| format!("{original_value} initialized")),
/// "I am initialized",
/// );
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn with<F, R>(&'static self, f: F) -> R
where
F: FnOnce(&T) -> R,
{
match self.try_with(f) {
Ok(r) => r,
Err(err) => panic_access_error(err),
}
}
/// Acquires a reference to the value in this TLS key.
///
/// This will lazily initialize the value if this thread has not referenced
/// this key yet. If the key has been destroyed (which may happen if this is called
/// in a destructor), this function will return an [`AccessError`].
///
/// # Panics
///
/// This function will still `panic!()` if the key is uninitialized and the
/// key's initializer panics.
///
/// # Examples
///
/// ```
/// thread_local! {
/// pub static STATIC: String = String::from("I am");
/// }
///
/// assert_eq!(
/// STATIC.try_with(|original_value| format!("{original_value} initialized")),
/// Ok(String::from("I am initialized")),
/// );
/// ```
#[stable(feature = "thread_local_try_with", since = "1.26.0")]
#[inline]
pub fn try_with<F, R>(&'static self, f: F) -> Result<R, AccessError>
where
F: FnOnce(&T) -> R,
{
let thread_local = unsafe { (self.inner)(None).as_ref().ok_or(AccessError)? };
Ok(f(thread_local))
}
/// Acquires a reference to the value in this TLS key, initializing it with
/// `init` if it wasn't already initialized on this thread.
///
/// If `init` was used to initialize the thread local variable, `None` is
/// passed as the first argument to `f`. If it was already initialized,
/// `Some(init)` is passed to `f`.
///
/// # Panics
///
/// This function will panic if the key currently has its destructor
/// running, and it **may** panic if the destructor has previously been run
/// for this thread.
fn initialize_with<F, R>(&'static self, init: T, f: F) -> R
where
F: FnOnce(Option<T>, &T) -> R,
{
let mut init = Some(init);
let reference = unsafe {
match (self.inner)(Some(&mut init)).as_ref() {
Some(r) => r,
None => panic_access_error(AccessError),
}
};
f(init, reference)
}
}
impl<T: 'static> LocalKey<Cell<T>> {
/// Sets or initializes the contained value.
///
/// Unlike the other methods, this will *not* run the lazy initializer of
/// the thread local. Instead, it will be directly initialized with the
/// given value if it wasn't initialized yet.
///
/// # Panics
///
/// Panics if the key currently has its destructor running,
/// and it **may** panic if the destructor has previously been run for this thread.
///
/// # Examples
///
/// ```
/// use std::cell::Cell;
///
/// thread_local! {
/// static X: Cell<i32> = panic!("!");
/// }
///
/// // Calling X.get() here would result in a panic.
///
/// X.set(123); // But X.set() is fine, as it skips the initializer above.
///
/// assert_eq!(X.get(), 123);
/// ```
#[stable(feature = "local_key_cell_methods", since = "1.73.0")]
pub fn set(&'static self, value: T) {
self.initialize_with(Cell::new(value), |value, cell| {
if let Some(value) = value {
// The cell was already initialized, so `value` wasn't used to
// initialize it. So we overwrite the current value with the
// new one instead.
cell.set(value.into_inner());
}
});
}
/// Returns a copy of the contained value.
///
/// This will lazily initialize the value if this thread has not referenced
/// this key yet.
///
/// # Panics
///
/// Panics if the key currently has its destructor running,
/// and it **may** panic if the destructor has previously been run for this thread.
///
/// # Examples
///
/// ```
/// use std::cell::Cell;
///
/// thread_local! {
/// static X: Cell<i32> = const { Cell::new(1) };
/// }
///
/// assert_eq!(X.get(), 1);
/// ```
#[stable(feature = "local_key_cell_methods", since = "1.73.0")]
pub fn get(&'static self) -> T
where
T: Copy,
{
self.with(Cell::get)
}
/// Takes the contained value, leaving `Default::default()` in its place.
///
/// This will lazily initialize the value if this thread has not referenced
/// this key yet.
///
/// # Panics
///
/// Panics if the key currently has its destructor running,
/// and it **may** panic if the destructor has previously been run for this thread.
///
/// # Examples
///
/// ```
/// use std::cell::Cell;
///
/// thread_local! {
/// static X: Cell<Option<i32>> = const { Cell::new(Some(1)) };
/// }
///
/// assert_eq!(X.take(), Some(1));
/// assert_eq!(X.take(), None);
/// ```
#[stable(feature = "local_key_cell_methods", since = "1.73.0")]
pub fn take(&'static self) -> T
where
T: Default,
{
self.with(Cell::take)
}
/// Replaces the contained value, returning the old value.
///
/// This will lazily initialize the value if this thread has not referenced
/// this key yet.
///
/// # Panics
///
/// Panics if the key currently has its destructor running,
/// and it **may** panic if the destructor has previously been run for this thread.
///
/// # Examples
///
/// ```
/// use std::cell::Cell;
///
/// thread_local! {
/// static X: Cell<i32> = const { Cell::new(1) };
/// }
///
/// assert_eq!(X.replace(2), 1);
/// assert_eq!(X.replace(3), 2);
/// ```
#[stable(feature = "local_key_cell_methods", since = "1.73.0")]
#[rustc_confusables("swap")]
pub fn replace(&'static self, value: T) -> T {
self.with(|cell| cell.replace(value))
}
/// Updates the contained value using a function.
///
/// # Examples
///
/// ```
/// #![feature(local_key_cell_update)]
/// use std::cell::Cell;
///
/// thread_local! {
/// static X: Cell<i32> = const { Cell::new(5) };
/// }
///
/// X.update(|x| x + 1);
/// assert_eq!(X.get(), 6);
/// ```
#[unstable(feature = "local_key_cell_update", issue = "143989")]
pub fn update(&'static self, f: impl FnOnce(T) -> T)
where
T: Copy,
{
self.with(|cell| cell.update(f))
}
}
impl<T: 'static> LocalKey<RefCell<T>> {
/// Acquires a reference to the contained value.
///
/// This will lazily initialize the value if this thread has not referenced
/// this key yet.
///
/// # Panics
///
/// Panics if the value is currently mutably borrowed.
///
/// Panics if the key currently has its destructor running,
/// and it **may** panic if the destructor has previously been run for this thread.
///
/// # Examples
///
/// ```
/// use std::cell::RefCell;
///
/// thread_local! {
/// static X: RefCell<Vec<i32>> = RefCell::new(Vec::new());
/// }
///
/// X.with_borrow(|v| assert!(v.is_empty()));
/// ```
#[stable(feature = "local_key_cell_methods", since = "1.73.0")]
pub fn with_borrow<F, R>(&'static self, f: F) -> R
where
F: FnOnce(&T) -> R,
{
self.with(|cell| f(&cell.borrow()))
}
/// Acquires a mutable reference to the contained value.
///
/// This will lazily initialize the value if this thread has not referenced
/// this key yet.
///
/// # Panics
///
/// Panics if the value is currently borrowed.
///
/// Panics if the key currently has its destructor running,
/// and it **may** panic if the destructor has previously been run for this thread.
///
/// # Examples
///
/// ```
/// use std::cell::RefCell;
///
/// thread_local! {
/// static X: RefCell<Vec<i32>> = RefCell::new(Vec::new());
/// }
///
/// X.with_borrow_mut(|v| v.push(1));
///
/// X.with_borrow(|v| assert_eq!(*v, vec![1]));
/// ```
#[stable(feature = "local_key_cell_methods", since = "1.73.0")]
pub fn with_borrow_mut<F, R>(&'static self, f: F) -> R
where
F: FnOnce(&mut T) -> R,
{
self.with(|cell| f(&mut cell.borrow_mut()))
}
/// Sets or initializes the contained value.
///
/// Unlike the other methods, this will *not* run the lazy initializer of
/// the thread local. Instead, it will be directly initialized with the
/// given value if it wasn't initialized yet.
///
/// # Panics
///
/// Panics if the value is currently borrowed.
///
/// Panics if the key currently has its destructor running,
/// and it **may** panic if the destructor has previously been run for this thread.
///
/// # Examples
///
/// ```
/// use std::cell::RefCell;
///
/// thread_local! {
/// static X: RefCell<Vec<i32>> = panic!("!");
/// }
///
/// // Calling X.with() here would result in a panic.
///
/// X.set(vec![1, 2, 3]); // But X.set() is fine, as it skips the initializer above.
///
/// X.with_borrow(|v| assert_eq!(*v, vec![1, 2, 3]));
/// ```
#[stable(feature = "local_key_cell_methods", since = "1.73.0")]
pub fn set(&'static self, value: T) {
self.initialize_with(RefCell::new(value), |value, cell| {
if let Some(value) = value {
// The cell was already initialized, so `value` wasn't used to
// initialize it. So we overwrite the current value with the
// new one instead.
*cell.borrow_mut() = value.into_inner();
}
});
}
/// Takes the contained value, leaving `Default::default()` in its place.
///
/// This will lazily initialize the value if this thread has not referenced
/// this key yet.
///
/// # Panics
///
/// Panics if the value is currently borrowed.
///
/// Panics if the key currently has its destructor running,
/// and it **may** panic if the destructor has previously been run for this thread.
///
/// # Examples
///
/// ```
/// use std::cell::RefCell;
///
/// thread_local! {
/// static X: RefCell<Vec<i32>> = RefCell::new(Vec::new());
/// }
///
/// X.with_borrow_mut(|v| v.push(1));
///
/// let a = X.take();
///
/// assert_eq!(a, vec![1]);
///
/// X.with_borrow(|v| assert!(v.is_empty()));
/// ```
#[stable(feature = "local_key_cell_methods", since = "1.73.0")]
pub fn take(&'static self) -> T
where
T: Default,
{
self.with(RefCell::take)
}
/// Replaces the contained value, returning the old value.
///
/// # Panics
///
/// Panics if the value is currently borrowed.
///
/// Panics if the key currently has its destructor running,
/// and it **may** panic if the destructor has previously been run for this thread.
///
/// # Examples
///
/// ```
/// use std::cell::RefCell;
///
/// thread_local! {
/// static X: RefCell<Vec<i32>> = RefCell::new(Vec::new());
/// }
///
/// let prev = X.replace(vec![1, 2, 3]);
/// assert!(prev.is_empty());
///
/// X.with_borrow(|v| assert_eq!(*v, vec![1, 2, 3]));
/// ```
#[stable(feature = "local_key_cell_methods", since = "1.73.0")]
#[rustc_confusables("swap")]
pub fn replace(&'static self, value: T) -> T {
self.with(|cell| cell.replace(value))
}
}

View File

@@ -19,16 +19,22 @@ cp_std path:
@cp {{ rust_src / path }} {{ "crates/std/src" / path }}
@sed -i -f patches.sed {{ "crates/std/src" / path }}
insert_target line path:
@sed -i '{{ line }}a \target_os = "survos",' {{ "crates/std/src" / path }}
update_std:
@# Not copied : sys/mod.rs, io.rs, error.rs, thread.rs
@# Not copied : sys/mod.rs, io.rs, error.rs, thread.rs, sync.rs
# @just cp_std "process.rs"
# @just cp_std "fs.rs"
@just cp_std "time.rs"
@just cp_std "hash/mod.rs"
@just cp_std "hash/random.rs"
@just cp_std "num/mod.rs"
@just cp_std "ffi/c_str.rs"
@just cp_std "ffi/mod.rs"
@just cp_std "ffi/os_str.rs"
@just cp_std "ffi/os_str/tests.rs"
@just cp_std "thread/local.rs"
@just cp_std "sys/exit.rs"
@just cp_std "sys/env_consts.rs"
@just cp_std "sys/configure_builtins.rs"
@@ -37,10 +43,18 @@ update_std:
@just cp_std "sys/sync/condvar/no_threads.rs"
@just cp_std "sys/sync/mutex/mod.rs"
@just cp_std "sys/sync/mutex/no_threads.rs"
@just cp_std "sys/sync/once/mod.rs"
@just cp_std "sys/sync/once/no_threads.rs"
@just cp_std "sys/sync/rwlock/mod.rs"
@just cp_std "sys/sync/rwlock/no_threads.rs"
@just cp_std "sys/thread/mod.rs"
@just cp_std "sys/thread/unsupported.rs"
@just cp_std "sys/thread_local/no_threads.rs"
@just cp_std "sys/thread_local/os.rs"
@just cp_std "sys/time/mod.rs"
@just cp_std "sys/time/unsupported.rs"
@just cp_std "sys/random/mod.rs"
@just cp_std "sys/random/unsupported.rs"
@just cp_std "sys/env/mod.rs"
@just cp_std "sys/env/common.rs"
@just cp_std "sys/env/unsupported.rs"
@@ -51,10 +65,14 @@ update_std:
@just cp_std "sys/fs/mod.rs"
@just cp_std "sys/fs/common.rs"
@just cp_std "sys/fs/unsupported.rs"
@just insert_target 108 "sys/random/mod.rs"
@just insert_target 125 "sys/random/mod.rs"
@# Copied but edited for the moment
# @just cp_std "sys/thread_local/mod.rs"
# @just cp_std "sys/alloc/mod.rs"
# @just cp_std "alloc.rs"
# @just cp_std "path.rs"
# @just cp_std "sys/path/unix.rs"
# @just cp_std "hash/mod.rs"
build_user_prog prog:
RUSTFLAGS="-C relocation-model=pic -C link-arg=-Tuser.ld -C link-arg=-pie" cargo b {{ cargo_flags }} --package {{ prog }}

View File

@@ -1,7 +1,9 @@
s|crate::bstr::ByteStr|alloc::bstr::ByteStr|g
s|crate::collections::TryReserveError|alloc::collections::TryReserveError|g
s|crate::sync::Arc|alloc::sync::Arc|g
/target_os = "xous",/a \ target_os = "survos",
s|crate::bstr::ByteStr|alloc_crate::bstr::ByteStr|g
s|crate::collections::TryReserveError|alloc_crate::collections::TryReserveError|g
s|crate::sync::Arc|alloc_crate::sync::Arc|g
s|alloc::ffi|alloc_crate::ffi|g
s|alloc::slice::Join|alloc_crate::slice::Join|g
# /target_os = "xous",/a \ target_os = "survos",
# Ajouter d'autres modifications facilement ici :
# s|ancien_texte|nouveau_texte|g