Add more from the std

This commit is contained in:
2026-03-20 09:47:32 +01:00
parent 3121c0b68b
commit 48a75485b6
297 changed files with 598 additions and 60308 deletions

View File

@@ -3,8 +3,8 @@ target = "riscv64.json"
[unstable]
json-target-spec = true
build-std = ["core", "compiler_builtins", "alloc"]
build-std-features = ["compiler-builtins-mem"]
# build-std = []
# build-std-features = ["compiler-builtins-mem"]
[target.riscv64]
rustflags = [

2
.gitignore vendored
View File

@@ -7,3 +7,5 @@
disk.img
**/*.mem
mnt
sysroot/lib/rustlib/riscv64

7
.gitmodules vendored
View File

@@ -1,6 +1,3 @@
[submodule "backtrace-rs"]
path = backtrace-rs
url = https://github.com/rust-lang/backtrace-rs.git
[submodule "crates/std/crates/backtrace-rs"]
path = crates/std/crates/backtrace-rs
[submodule "library/backtrace"]
path = library/backtrace
url = https://github.com/rust-lang/backtrace-rs.git

View File

@@ -1,7 +1,7 @@
[workspace]
resolver = "3"
members = ["crates/bytes-struct","crates/io", "crates/shared", "user/*"]
exclude = ["crates/std/crates/backtrace-rs"]
members = [ "user/*"]
exclude = ["library"]
[package]
name = "kernel-rust"

View File

@@ -1,12 +0,0 @@
[package]
name = "os-std-macros"
version = "0.1.0"
edition = "2024"
[lib]
proc-macro = true
[dependencies]
proc-macro2 = "1"
quote = "1"
syn = { version = "2", features = ["full"] }

View File

@@ -1 +0,0 @@

View File

@@ -1,14 +0,0 @@
[package]
name = "std"
version = "0.1.0"
edition = "2024"
[dependencies]
cfg-if = { version = "1.0" }
rustc-demangle = { version = "0.1.27" }
std_detect = { path = "crates/std_detect" }
panic_abort = { path = "crates/panic_abort" }
hashbrown = { version = "0.16.1", default-features = false, features = ["nightly", "rustc-internal-api"] }
os-std-macros = { path = "../os-std-macros" }
shared = { path = "../shared", features = ["user"] }
io_crate = { package = "io", path = "../io", features = ["alloc"] }

View File

@@ -1,19 +0,0 @@
[package]
name = "panic_abort"
version = "0.0.0"
license = "MIT OR Apache-2.0"
repository = "https://github.com/rust-lang/rust.git"
description = "Implementation of Rust panics via process aborts"
edition = "2024"
[lib]
test = false
bench = false
doc = false
[dependencies]
[target.'cfg(target_os = "android")'.dependencies]
libc = { version = "0.2", default-features = false }
[target.'cfg(any(target_os = "android", target_os = "zkvm"))'.dependencies]

View File

@@ -1,51 +0,0 @@
use alloc::string::String;
use core::mem::transmute;
use core::panic::PanicPayload;
use core::ptr::copy_nonoverlapping;
const ANDROID_SET_ABORT_MESSAGE: &[u8] = b"android_set_abort_message\0";
type SetAbortMessageType = unsafe extern "C" fn(*const libc::c_char) -> ();
// Forward the abort message to libc's android_set_abort_message. We try our best to populate the
// message but as this function may already be called as part of a failed allocation, it might not be
// possible to do so.
//
// Some methods of core are on purpose avoided (such as try_reserve) as these rely on the correct
// resolution of rust_eh_personality which is loosely defined in panic_abort.
//
// Weakly resolve the symbol for android_set_abort_message. This function is only available
// for API >= 21.
pub(crate) unsafe fn android_set_abort_message(payload: &mut dyn PanicPayload) {
let func_addr = unsafe {
libc::dlsym(libc::RTLD_DEFAULT, ANDROID_SET_ABORT_MESSAGE.as_ptr() as *const libc::c_char)
as usize
};
if func_addr == 0 {
return;
}
let payload = payload.get();
let msg = match payload.downcast_ref::<&'static str>() {
Some(msg) => msg.as_bytes(),
None => match payload.downcast_ref::<String>() {
Some(msg) => msg.as_bytes(),
None => &[],
},
};
if msg.is_empty() {
return;
}
// Allocate a new buffer to append the null byte.
let size = msg.len() + 1usize;
let buf = unsafe { libc::malloc(size) as *mut libc::c_char };
if buf.is_null() {
return; // allocation failure
}
unsafe {
copy_nonoverlapping(msg.as_ptr(), buf as *mut u8, msg.len());
buf.add(msg.len()).write(0);
let func = transmute::<usize, SetAbortMessageType>(func_addr);
func(buf);
}
}

View File

@@ -1,94 +0,0 @@
//! Implementation of Rust panics via process aborts
//!
//! When compared to the implementation via unwinding, this crate is *much*
//! simpler! That being said, it's not quite as versatile, but here goes!
#![no_std]
#![unstable(feature = "panic_abort", issue = "32837")]
#![doc(issue_tracker_base_url = "https://github.com/rust-lang/rust/issues/")]
#![panic_runtime]
#![feature(panic_runtime)]
#![feature(std_internals)]
#![feature(staged_api)]
#![feature(rustc_attrs)]
#![allow(internal_features)]
#[cfg(target_os = "android")]
mod android;
#[cfg(target_os = "zkvm")]
mod zkvm;
use core::any::Any;
use core::panic::PanicPayload;
#[rustc_std_internal_symbol]
#[allow(improper_ctypes_definitions)]
pub unsafe extern "C" fn __rust_panic_cleanup(_: *mut u8) -> *mut (dyn Any + Send + 'static) {
unreachable!()
}
// "Leak" the payload and shim to the relevant abort on the platform in question.
#[rustc_std_internal_symbol]
pub unsafe fn __rust_start_panic(_payload: &mut dyn PanicPayload) -> u32 {
// Android has the ability to attach a message as part of the abort.
#[cfg(target_os = "android")]
unsafe {
android::android_set_abort_message(_payload);
}
#[cfg(target_os = "zkvm")]
unsafe {
zkvm::zkvm_set_abort_message(_payload);
}
unsafe extern "Rust" {
// This is defined in std::rt.
#[rustc_std_internal_symbol]
safe fn __rust_abort() -> !;
}
__rust_abort()
}
// This... is a bit of an oddity. The tl;dr; is that this is required to link
// correctly, the longer explanation is below.
//
// Right now the binaries of core/std that we ship are all compiled with
// `-C panic=unwind`. This is done to ensure that the binaries are maximally
// compatible with as many situations as possible. The compiler, however,
// requires a "personality function" for all functions compiled with `-C
// panic=unwind`. This personality function is hardcoded to the symbol
// `rust_eh_personality` and is defined by the `eh_personality` lang item.
//
// So... why not just define that lang item here? Good question! The way that
// panic runtimes are linked in is actually a little subtle in that they're
// "sort of" in the compiler's crate store, but only actually linked if another
// isn't actually linked. This ends up meaning that both this crate and the
// panic_unwind crate can appear in the compiler's crate store, and if both
// define the `eh_personality` lang item then that'll hit an error.
//
// To handle this the compiler only requires the `eh_personality` is defined if
// the panic runtime being linked in is the unwinding runtime, and otherwise
// it's not required to be defined (rightfully so). In this case, however, this
// library just defines this symbol so there's at least some personality
// somewhere.
//
// Essentially this symbol is just defined to get wired up to core/std
// binaries, but it should never be called as we don't link in an unwinding
// runtime at all.
pub mod personalities {
// In the past this module used to contain stubs for the personality
// functions of various platforms, but these where removed when personality
// functions were moved to std.
// This corresponds to the `eh_catch_typeinfo` lang item
// that's only used on Emscripten currently.
//
// Since panics don't generate exceptions and foreign exceptions are
// currently UB with -C panic=abort (although this may be subject to
// change), any catch_unwind calls will never use this typeinfo.
#[rustc_std_internal_symbol]
#[allow(non_upper_case_globals)]
#[cfg(target_os = "emscripten")]
static rust_eh_catch_typeinfo: [usize; 2] = [0; 2];
}

View File

@@ -1,26 +0,0 @@
use alloc::string::String;
use core::panic::PanicPayload;
// Forward the abort message to zkVM's sys_panic. This is implemented by RISC Zero's
// platform crate which exposes system calls specifically for the zkVM.
pub(crate) unsafe fn zkvm_set_abort_message(payload: &mut dyn PanicPayload) {
let payload = payload.get();
let msg = match payload.downcast_ref::<&'static str>() {
Some(msg) => msg.as_bytes(),
None => match payload.downcast_ref::<String>() {
Some(msg) => msg.as_bytes(),
None => &[],
},
};
if msg.is_empty() {
return;
}
unsafe extern "C" {
fn sys_panic(msg_ptr: *const u8, len: usize) -> !;
}
unsafe {
sys_panic(msg.as_ptr(), msg.len());
}
}

View File

@@ -1,24 +0,0 @@
[package]
name = "std_detect"
version = "0.1.5"
authors = [
"Alex Crichton <alex@alexcrichton.com>",
"Andrew Gallant <jamslam@gmail.com>",
"Gonzalo Brito Gadeschi <gonzalobg88@gmail.com>",
]
description = "`std::detect` - Rust's standard library run-time CPU feature detection."
license = "MIT OR Apache-2.0"
edition = "2024"
[badges]
is-it-maintained-issue-resolution = { repository = "rust-lang/stdarch" }
is-it-maintained-open-issues = { repository = "rust-lang/stdarch" }
maintenance = { status = "experimental" }
[dependencies]
[target.'cfg(not(windows))'.dependencies]
libc = { version = "0.2.0", default-features = false }
[features]
std_detect_env_override = []

View File

@@ -1,69 +0,0 @@
`std::detect` - Rust's standard library run-time CPU feature detection
=======
The private `std::detect` module implements run-time feature detection in Rust's
standard library. This allows detecting whether the CPU the binary runs on
supports certain features, like SIMD instructions.
# Usage
`std::detect` APIs are available as part of `libstd`. Prefer using it via the
standard library than through this crate. Unstable features of `std::detect` are
available on nightly Rust behind various feature-gates.
If you need run-time feature detection in `#[no_std]` environments, Rust `core`
library cannot help you. By design, Rust `core` is platform independent, but
performing run-time feature detection requires a certain level of cooperation
from the platform.
You can then manually include `std_detect` as a dependency to get similar
run-time feature detection support than the one offered by Rust's standard
library. We intend to make `std_detect` more flexible and configurable in this
regard to better serve the needs of `#[no_std]` targets.
# Platform support
* All `x86`/`x86_64` targets are supported on all platforms by querying the
`cpuid` instruction directly for the features supported by the hardware and
the operating system. `std_detect` assumes that the binary is an user-space
application.
* Linux/Android:
* `arm{32, 64}`, `mips{32,64}{,el}`, `powerpc{32,64}{,le}`, `loongarch{32,64}`, `s390x`:
`std_detect` supports these on Linux by querying ELF auxiliary vectors (using `getauxval`
when available), and if that fails, by querying `/proc/self/auxv`.
* `arm64`: partial support for doing run-time feature detection by directly
querying `mrs` is implemented for Linux >= 4.11, but not enabled by default.
* `riscv{32,64}`:
`std_detect` supports these on Linux by querying `riscv_hwprobe`, and
by querying ELF auxiliary vectors (using `getauxval` when available).
* FreeBSD:
* `arm32`, `powerpc64`: `std_detect` supports these on FreeBSD by querying ELF
auxiliary vectors using `elf_aux_info`.
* `arm64`: run-time feature detection is implemented by directly querying `mrs`.
* OpenBSD:
* `powerpc64`: `std_detect` supports these on OpenBSD by querying ELF auxiliary
vectors using `elf_aux_info`.
* `arm64`: run-time feature detection is implemented by querying `sysctl`.
* Windows:
* `arm64`: run-time feature detection is implemented by querying `IsProcessorFeaturePresent`.
# License
This project is licensed under either of
* Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
http://www.apache.org/licenses/LICENSE-2.0)
* MIT license ([LICENSE-MIT](LICENSE-MIT) or
http://opensource.org/licenses/MIT)
at your option.
# Contribution
Unless you explicitly state otherwise, any contribution intentionally submitted
for inclusion in `std_detect` by you, as defined in the Apache-2.0 license,
shall be dual licensed as above, without any additional terms or conditions.

View File

@@ -1,264 +0,0 @@
//! Aarch64 run-time features.
features! {
@TARGET: aarch64;
@CFG: any(target_arch = "aarch64", target_arch = "arm64ec");
@MACRO_NAME: is_aarch64_feature_detected;
@MACRO_ATTRS:
/// Check for the presence of a CPU feature at runtime.
///
/// When the feature is known to be enabled at compile time (e.g. via `-Ctarget-feature`)
/// the macro expands to `true`.
///
/// This macro takes one argument which is a string literal of the feature being tested for.
/// The feature names are mostly taken from their FEAT_* definitions in the [ARM Architecture
/// Reference Manual][docs].
///
/// Currently most features are only supported on linux-based platforms: on other platforms the
/// runtime check will always return `false`.
///
/// ## Supported arguments
///
/// * `"aes"` - FEAT_AES & FEAT_PMULL
/// * `"asimd"` or "neon" - FEAT_AdvSIMD
/// * `"bf16"` - FEAT_BF16
/// * `"bti"` - FEAT_BTI
/// * `"crc"` - FEAT_CRC
/// * `"cssc"` - FEAT_CSSC
/// * `"dit"` - FEAT_DIT
/// * `"dotprod"` - FEAT_DotProd
/// * `"dpb"` - FEAT_DPB
/// * `"dpb2"` - FEAT_DPB2
/// * `"ecv"` - FEAT_ECV
/// * `"f32mm"` - FEAT_F32MM
/// * `"f64mm"` - FEAT_F64MM
/// * `"faminmax"` - FEAT_FAMINMAX
/// * `"fcma"` - FEAT_FCMA
/// * `"fhm"` - FEAT_FHM
/// * `"flagm"` - FEAT_FLAGM
/// * `"flagm2"` - FEAT_FLAGM2
/// * `"fp"` - FEAT_FP
/// * `"fp16"` - FEAT_FP16
/// * `"fp8"` - FEAT_FP8
/// * `"fp8dot2"` - FEAT_FP8DOT2
/// * `"fp8dot4"` - FEAT_FP8DOT4
/// * `"fp8fma"` - FEAT_FP8FMA
/// * `"fpmr"` - FEAT_FPMR
/// * `"frintts"` - FEAT_FRINTTS
/// * `"hbc"` - FEAT_HBC
/// * `"i8mm"` - FEAT_I8MM
/// * `"jsconv"` - FEAT_JSCVT
/// * `"lse"` - FEAT_LSE
/// * `"lse128"` - FEAT_LSE128
/// * `"lse2"` - FEAT_LSE2
/// * `"lut"` - FEAT_LUT
/// * `"mops"` - FEAT_MOPS
/// * `"mte"` - FEAT_MTE & FEAT_MTE2
/// * `"paca"` - FEAT_PAuth (address authentication)
/// * `"pacg"` - FEAT_Pauth (generic authentication)
/// * `"pauth-lr"` - FEAT_PAuth_LR
/// * `"pmull"` - FEAT_PMULL
/// * `"rand"` - FEAT_RNG
/// * `"rcpc"` - FEAT_LRCPC
/// * `"rcpc2"` - FEAT_LRCPC2
/// * `"rcpc3"` - FEAT_LRCPC3
/// * `"rdm"` - FEAT_RDM
/// * `"sb"` - FEAT_SB
/// * `"sha2"` - FEAT_SHA1 & FEAT_SHA256
/// * `"sha3"` - FEAT_SHA512 & FEAT_SHA3
/// * `"sm4"` - FEAT_SM3 & FEAT_SM4
/// * `"sme"` - FEAT_SME
/// * `"sme-b16b16"` - FEAT_SME_B16B16
/// * `"sme-f16f16"` - FEAT_SME_F16F16
/// * `"sme-f64f64"` - FEAT_SME_F64F64
/// * `"sme-f8f16"` - FEAT_SME_F8F16
/// * `"sme-f8f32"` - FEAT_SME_F8F32
/// * `"sme-fa64"` - FEAT_SME_FA64
/// * `"sme-i16i64"` - FEAT_SME_I16I64
/// * `"sme-lutv2"` - FEAT_SME_LUTv2
/// * `"sme2"` - FEAT_SME2
/// * `"sme2p1"` - FEAT_SME2p1
/// * `"ssbs"` - FEAT_SSBS & FEAT_SSBS2
/// * `"ssve-fp8dot2"` - FEAT_SSVE_FP8DOT2
/// * `"ssve-fp8dot4"` - FEAT_SSVE_FP8DOT4
/// * `"ssve-fp8fma"` - FEAT_SSVE_FP8FMA
/// * `"sve"` - FEAT_SVE
/// * `"sve-b16b16"` - FEAT_SVE_B16B16 (SVE or SME Z-targeting instructions)
/// * `"sve2"` - FEAT_SVE2
/// * `"sve2-aes"` - FEAT_SVE_AES & FEAT_SVE_PMULL128 (SVE2 AES crypto)
/// * `"sve2-bitperm"` - FEAT_SVE2_BitPerm
/// * `"sve2-sha3"` - FEAT_SVE2_SHA3
/// * `"sve2-sm4"` - FEAT_SVE2_SM4
/// * `"sve2p1"` - FEAT_SVE2p1
/// * `"tme"` - FEAT_TME
/// * `"wfxt"` - FEAT_WFxT
///
/// [docs]: https://developer.arm.com/documentation/ddi0487/latest
#[stable(feature = "simd_aarch64", since = "1.60.0")]
@BIND_FEATURE_NAME: "asimd"; "neon";
@NO_RUNTIME_DETECTION: "ras";
@NO_RUNTIME_DETECTION: "v8.1a";
@NO_RUNTIME_DETECTION: "v8.2a";
@NO_RUNTIME_DETECTION: "v8.3a";
@NO_RUNTIME_DETECTION: "v8.4a";
@NO_RUNTIME_DETECTION: "v8.5a";
@NO_RUNTIME_DETECTION: "v8.6a";
@NO_RUNTIME_DETECTION: "v8.7a";
@NO_RUNTIME_DETECTION: "v8.8a";
@NO_RUNTIME_DETECTION: "v8.9a";
@NO_RUNTIME_DETECTION: "v9.1a";
@NO_RUNTIME_DETECTION: "v9.2a";
@NO_RUNTIME_DETECTION: "v9.3a";
@NO_RUNTIME_DETECTION: "v9.4a";
@NO_RUNTIME_DETECTION: "v9.5a";
@NO_RUNTIME_DETECTION: "v9a";
@FEATURE: #[stable(feature = "simd_aarch64", since = "1.60.0")] asimd: "neon";
/// FEAT_AdvSIMD (Advanced SIMD/NEON)
@FEATURE: #[stable(feature = "simd_aarch64", since = "1.60.0")] pmull: "pmull";
implied by target_features: ["aes"];
/// FEAT_PMULL (Polynomial Multiply) - Implied by `aes` target_feature
@FEATURE: #[stable(feature = "simd_aarch64", since = "1.60.0")] fp: "fp";
implied by target_features: ["neon"];
/// FEAT_FP (Floating point support) - Implied by `neon` target_feature
@FEATURE: #[stable(feature = "simd_aarch64", since = "1.60.0")] aes: "aes";
/// FEAT_AES (AES SIMD instructions) & FEAT_PMULL (PMULL{2}, 64-bit operand variants)
@FEATURE: #[stable(feature = "simd_aarch64", since = "1.60.0")] bf16: "bf16";
/// FEAT_BF16 (BFloat16 type, plus MM instructions, plus ASIMD support)
@FEATURE: #[stable(feature = "simd_aarch64", since = "1.60.0")] bti: "bti";
/// FEAT_BTI (Branch Target Identification)
@FEATURE: #[stable(feature = "simd_aarch64", since = "1.60.0")] crc: "crc";
/// FEAT_CRC32 (Cyclic Redundancy Check)
@FEATURE: #[unstable(feature = "stdarch_aarch64_feature_detection", issue = "127764")] cssc: "cssc";
/// FEAT_CSSC (Common Short Sequence Compression instructions)
@FEATURE: #[stable(feature = "simd_aarch64", since = "1.60.0")] dit: "dit";
/// FEAT_DIT (Data Independent Timing instructions)
@FEATURE: #[stable(feature = "simd_aarch64", since = "1.60.0")] dpb: "dpb";
/// FEAT_DPB (aka dcpop - data cache clean to point of persistence)
@FEATURE: #[stable(feature = "simd_aarch64", since = "1.60.0")] dpb2: "dpb2";
/// FEAT_DPB2 (aka dcpodp - data cache clean to point of deep persistence)
@FEATURE: #[stable(feature = "simd_aarch64", since = "1.60.0")] dotprod: "dotprod";
/// FEAT_DotProd (Vector Dot-Product - ASIMDDP)
@FEATURE: #[unstable(feature = "stdarch_aarch64_feature_detection", issue = "127764")] ecv: "ecv";
/// FEAT_ECV (Enhanced Counter Virtualization)
@FEATURE: #[stable(feature = "simd_aarch64", since = "1.60.0")] f32mm: "f32mm";
/// FEAT_F32MM (single-precision matrix multiplication)
@FEATURE: #[stable(feature = "simd_aarch64", since = "1.60.0")] f64mm: "f64mm";
/// FEAT_F64MM (double-precision matrix multiplication)
@FEATURE: #[unstable(feature = "stdarch_aarch64_feature_detection", issue = "127764")] faminmax: "faminmax";
/// FEAT_FAMINMAX (FAMIN and FAMAX SIMD/SVE/SME instructions)
@FEATURE: #[stable(feature = "simd_aarch64", since = "1.60.0")] fcma: "fcma";
/// FEAT_FCMA (float complex number operations)
@FEATURE: #[stable(feature = "simd_aarch64", since = "1.60.0")] fhm: "fhm";
/// FEAT_FHM (fp16 multiplication instructions)
@FEATURE: #[stable(feature = "simd_aarch64", since = "1.60.0")] flagm: "flagm";
/// FEAT_FLAGM (flag manipulation instructions)
@FEATURE: #[unstable(feature = "stdarch_aarch64_feature_detection", issue = "127764")] flagm2: "flagm2";
/// FEAT_FLAGM2 (flag manipulation instructions)
@FEATURE: #[stable(feature = "simd_aarch64", since = "1.60.0")] fp16: "fp16";
/// FEAT_FP16 (Half-float support)
@FEATURE: #[unstable(feature = "stdarch_aarch64_feature_detection", issue = "127764")] fp8: "fp8";
/// FEAT_FP8 (F8CVT Instructions)
@FEATURE: #[unstable(feature = "stdarch_aarch64_feature_detection", issue = "127764")] fp8dot2: "fp8dot2";
/// FEAT_FP8DOT2 (F8DP2 Instructions)
@FEATURE: #[unstable(feature = "stdarch_aarch64_feature_detection", issue = "127764")] fp8dot4: "fp8dot4";
/// FEAT_FP8DOT4 (F8DP4 Instructions)
@FEATURE: #[unstable(feature = "stdarch_aarch64_feature_detection", issue = "127764")] fp8fma: "fp8fma";
/// FEAT_FP8FMA (F8FMA Instructions)
@FEATURE: #[unstable(feature = "stdarch_aarch64_feature_detection", issue = "127764")] fpmr: "fpmr";
without cfg check: true;
/// FEAT_FPMR (Special-purpose AArch64-FPMR register)
@FEATURE: #[stable(feature = "simd_aarch64", since = "1.60.0")] frintts: "frintts";
/// FEAT_FRINTTS (float to integer rounding instructions)
@FEATURE: #[unstable(feature = "stdarch_aarch64_feature_detection", issue = "127764")] hbc: "hbc";
/// FEAT_HBC (Hinted conditional branches)
@FEATURE: #[stable(feature = "simd_aarch64", since = "1.60.0")] i8mm: "i8mm";
/// FEAT_I8MM (integer matrix multiplication, plus ASIMD support)
@FEATURE: #[stable(feature = "simd_aarch64", since = "1.60.0")] jsconv: "jsconv";
/// FEAT_JSCVT (JavaScript float conversion instructions)
@FEATURE: #[stable(feature = "simd_aarch64", since = "1.60.0")] lse: "lse";
/// FEAT_LSE (Large System Extension - atomics)
@FEATURE: #[unstable(feature = "stdarch_aarch64_feature_detection", issue = "127764")] lse128: "lse128";
/// FEAT_LSE128 (128-bit atomics)
@FEATURE: #[stable(feature = "simd_aarch64", since = "1.60.0")] lse2: "lse2";
/// FEAT_LSE2 (unaligned and register-pair atomics)
@FEATURE: #[unstable(feature = "stdarch_aarch64_feature_detection", issue = "127764")] lut: "lut";
/// FEAT_LUT (Lookup Table Instructions)
@FEATURE: #[unstable(feature = "stdarch_aarch64_feature_detection", issue = "127764")] mops: "mops";
/// FEAT_MOPS (Standardization of memory operations)
@FEATURE: #[stable(feature = "simd_aarch64", since = "1.60.0")] mte: "mte";
/// FEAT_MTE & FEAT_MTE2 (Memory Tagging Extension)
@FEATURE: #[stable(feature = "simd_aarch64", since = "1.60.0")] paca: "paca";
/// FEAT_PAuth (address authentication)
@FEATURE: #[stable(feature = "simd_aarch64", since = "1.60.0")] pacg: "pacg";
/// FEAT_PAuth (generic authentication)
@FEATURE: #[unstable(feature = "stdarch_aarch64_feature_detection", issue = "127764")] pauth_lr: "pauth-lr";
/// FEAT_PAuth_LR
@FEATURE: #[stable(feature = "simd_aarch64", since = "1.60.0")] rand: "rand";
/// FEAT_RNG (Random Number Generator)
@FEATURE: #[stable(feature = "simd_aarch64", since = "1.60.0")] rcpc: "rcpc";
/// FEAT_LRCPC (Release consistent Processor consistent)
@FEATURE: #[stable(feature = "simd_aarch64", since = "1.60.0")] rcpc2: "rcpc2";
/// FEAT_LRCPC2 (RCPC with immediate offsets)
@FEATURE: #[unstable(feature = "stdarch_aarch64_feature_detection", issue = "127764")] rcpc3: "rcpc3";
/// FEAT_LRCPC3 (RCPC Instructions v3)
@FEATURE: #[stable(feature = "simd_aarch64", since = "1.60.0")] rdm: "rdm";
/// FEAT_RDM (Rounding Doubling Multiply - ASIMDRDM)
@FEATURE: #[stable(feature = "simd_aarch64", since = "1.60.0")] sb: "sb";
/// FEAT_SB (speculation barrier)
@FEATURE: #[stable(feature = "simd_aarch64", since = "1.60.0")] sha2: "sha2";
/// FEAT_SHA1 & FEAT_SHA256 (SHA1 & SHA2-256 instructions)
@FEATURE: #[stable(feature = "simd_aarch64", since = "1.60.0")] sha3: "sha3";
/// FEAT_SHA512 & FEAT_SHA3 (SHA2-512 & SHA3 instructions)
@FEATURE: #[stable(feature = "simd_aarch64", since = "1.60.0")] sm4: "sm4";
/// FEAT_SM3 & FEAT_SM4 (SM3 & SM4 instructions)
@FEATURE: #[unstable(feature = "stdarch_aarch64_feature_detection", issue = "127764")] sme: "sme";
/// FEAT_SME (Scalable Matrix Extension)
@FEATURE: #[unstable(feature = "stdarch_aarch64_feature_detection", issue = "127764")] sme2: "sme2";
/// FEAT_SME2 (SME Version 2)
@FEATURE: #[unstable(feature = "stdarch_aarch64_feature_detection", issue = "127764")] sme2p1: "sme2p1";
/// FEAT_SME2p1 (SME Version 2.1)
@FEATURE: #[unstable(feature = "stdarch_aarch64_feature_detection", issue = "127764")] sme_b16b16: "sme-b16b16";
/// FEAT_SME_B16B16
@FEATURE: #[unstable(feature = "stdarch_aarch64_feature_detection", issue = "127764")] sme_f16f16: "sme-f16f16";
/// FEAT_SME_F16F16 (Non-widening half-precision FP16 to FP16 arithmetic for SME2)
@FEATURE: #[unstable(feature = "stdarch_aarch64_feature_detection", issue = "127764")] sme_f64f64: "sme-f64f64";
/// FEAT_SME_F64F64 (Double-precision floating-point outer product instructions)
@FEATURE: #[unstable(feature = "stdarch_aarch64_feature_detection", issue = "127764")] sme_f8f16: "sme-f8f16";
/// FEAT_SME_F8F16
@FEATURE: #[unstable(feature = "stdarch_aarch64_feature_detection", issue = "127764")] sme_f8f32: "sme-f8f32";
/// FEAT_SME_F8F32
@FEATURE: #[unstable(feature = "stdarch_aarch64_feature_detection", issue = "127764")] sme_fa64: "sme-fa64";
/// FEAT_SME_FA64 (Full A64 instruction set support in Streaming SVE mode)
@FEATURE: #[unstable(feature = "stdarch_aarch64_feature_detection", issue = "127764")] sme_i16i64: "sme-i16i64";
/// FEAT_SME_I16I64 (16-bit to 64-bit integer widening outer product instructions)
@FEATURE: #[unstable(feature = "stdarch_aarch64_feature_detection", issue = "127764")] sme_lutv2: "sme-lutv2";
/// FEAT_SME_LUTv2 (LUTI4 Instruction)
@FEATURE: #[stable(feature = "simd_aarch64", since = "1.60.0")] ssbs: "ssbs";
/// FEAT_SSBS & FEAT_SSBS2 (speculative store bypass safe)
@FEATURE: #[unstable(feature = "stdarch_aarch64_feature_detection", issue = "127764")] ssve_fp8dot2: "ssve-fp8dot2";
/// FEAT_SSVE_FP8DOT2
@FEATURE: #[unstable(feature = "stdarch_aarch64_feature_detection", issue = "127764")] ssve_fp8dot4: "ssve-fp8dot4";
/// FEAT_SSVE_FP8DOT4
@FEATURE: #[unstable(feature = "stdarch_aarch64_feature_detection", issue = "127764")] ssve_fp8fma: "ssve-fp8fma";
/// FEAT_SSVE_FP8FMA
@FEATURE: #[stable(feature = "simd_aarch64", since = "1.60.0")] sve: "sve";
/// FEAT_SVE (Scalable Vector Extension)
@FEATURE: #[stable(feature = "simd_aarch64", since = "1.60.0")] sve2: "sve2";
/// FEAT_SVE2 (Scalable Vector Extension 2)
@FEATURE: #[unstable(feature = "stdarch_aarch64_feature_detection", issue = "127764")] sve2p1: "sve2p1";
/// FEAT_SVE2p1 (Scalable Vector Extension 2.1)
@FEATURE: #[stable(feature = "simd_aarch64", since = "1.60.0")] sve2_aes: "sve2-aes";
/// FEAT_SVE_AES & FEAT_SVE_PMULL128 (SVE2 AES crypto)
@FEATURE: #[unstable(feature = "stdarch_aarch64_feature_detection", issue = "127764")] sve_b16b16: "sve-b16b16";
/// FEAT_SVE_B16B16 (SVE or SME Z-targeting instructions)
@FEATURE: #[stable(feature = "simd_aarch64", since = "1.60.0")] sve2_bitperm: "sve2-bitperm";
/// FEAT_SVE_BitPerm (SVE2 bit permutation instructions)
@FEATURE: #[stable(feature = "simd_aarch64", since = "1.60.0")] sve2_sha3: "sve2-sha3";
/// FEAT_SVE_SHA3 (SVE2 SHA3 crypto)
@FEATURE: #[stable(feature = "simd_aarch64", since = "1.60.0")] sve2_sm4: "sve2-sm4";
/// FEAT_SVE_SM4 (SVE2 SM4 crypto)
@FEATURE: #[stable(feature = "simd_aarch64", since = "1.60.0")] tme: "tme";
/// FEAT_TME (Transactional Memory Extensions)
@FEATURE: #[unstable(feature = "stdarch_aarch64_feature_detection", issue = "127764")] wfxt: "wfxt";
/// FEAT_WFxT (WFET and WFIT Instructions)
}

View File

@@ -1,32 +0,0 @@
//! Run-time feature detection on ARM Aarch32.
features! {
@TARGET: arm;
@CFG: target_arch = "arm";
@MACRO_NAME: is_arm_feature_detected;
@MACRO_ATTRS:
/// Check for the presence of a CPU feature at runtime.
///
/// When the feature is known to be enabled at compile time (e.g. via `-Ctarget-feature`)
/// the macro expands to `true`.
#[unstable(feature = "stdarch_arm_feature_detection", issue = "111190")]
@NO_RUNTIME_DETECTION: "v7";
@NO_RUNTIME_DETECTION: "vfp2";
@NO_RUNTIME_DETECTION: "vfp3";
@NO_RUNTIME_DETECTION: "vfp4";
@FEATURE: #[unstable(feature = "stdarch_arm_feature_detection", issue = "111190")] neon: "neon";
/// ARM Advanced SIMD (NEON) - Aarch32
@FEATURE: #[unstable(feature = "stdarch_arm_feature_detection", issue = "111190")] pmull: "pmull";
without cfg check: true;
/// Polynomial Multiply
@FEATURE: #[unstable(feature = "stdarch_arm_feature_detection", issue = "111190")] crc: "crc";
/// CRC32 (Cyclic Redundancy Check)
@FEATURE: #[unstable(feature = "stdarch_arm_feature_detection", issue = "111190")] aes: "aes";
/// FEAT_AES (AES instructions)
@FEATURE: #[unstable(feature = "stdarch_arm_feature_detection", issue = "111190")] sha2: "sha2";
/// FEAT_SHA1 & FEAT_SHA256 (SHA1 & SHA2-256 instructions)
@FEATURE: #[unstable(feature = "stdarch_arm_feature_detection", issue = "111190")] i8mm: "i8mm";
/// FEAT_I8MM (integer matrix multiplication, plus ASIMD support)
@FEATURE: #[unstable(feature = "stdarch_arm_feature_detection", issue = "111190")] dotprod: "dotprod";
/// FEAT_DotProd (Vector Dot-Product - ASIMDDP)
}

View File

@@ -1,58 +0,0 @@
//! Run-time feature detection on LoongArch.
features! {
@TARGET: loongarch;
@CFG: any(target_arch = "loongarch32", target_arch = "loongarch64");
@MACRO_NAME: is_loongarch_feature_detected;
@MACRO_ATTRS:
/// Check for the presence of a CPU feature at runtime.
///
/// When the feature is known to be enabled at compile time (e.g. via `-Ctarget-feature`)
/// the macro expands to `true`.
///
/// Supported arguments are:
///
/// * `"32s"`
/// * `"f"`
/// * `"d"`
/// * `"frecipe"`
/// * `"div32"`
/// * `"lsx"`
/// * `"lasx"`
/// * `"lam-bh"`
/// * `"lamcas"`
/// * `"ld-seq-sa"`
/// * `"scq"`
/// * `"lbt"`
/// * `"lvz"`
/// * `"ual"`
#[stable(feature = "stdarch_loongarch_feature", since = "1.89.0")]
@FEATURE: #[unstable(feature = "stdarch_loongarch_feature_detection", issue = "117425")] _32s: "32s";
/// 32S
@FEATURE: #[stable(feature = "stdarch_loongarch_feature", since = "1.89.0")] f: "f";
/// F
@FEATURE: #[stable(feature = "stdarch_loongarch_feature", since = "1.89.0")] d: "d";
/// D
@FEATURE: #[stable(feature = "stdarch_loongarch_feature", since = "1.89.0")] frecipe: "frecipe";
/// Frecipe
@FEATURE: #[unstable(feature = "stdarch_loongarch_feature_detection", issue = "117425")] div32: "div32";
/// Div32
@FEATURE: #[stable(feature = "stdarch_loongarch_feature", since = "1.89.0")] lsx: "lsx";
/// LSX
@FEATURE: #[stable(feature = "stdarch_loongarch_feature", since = "1.89.0")] lasx: "lasx";
/// LASX
@FEATURE: #[unstable(feature = "stdarch_loongarch_feature_detection", issue = "117425")] lam_bh: "lam-bh";
/// LAM-BH
@FEATURE: #[unstable(feature = "stdarch_loongarch_feature_detection", issue = "117425")] lamcas: "lamcas";
/// LAM-CAS
@FEATURE: #[unstable(feature = "stdarch_loongarch_feature_detection", issue = "117425")] ld_seq_sa: "ld-seq-sa";
/// LD-SEQ-SA
@FEATURE: #[unstable(feature = "stdarch_loongarch_feature_detection", issue = "117425")] scq: "scq";
/// SCQ
@FEATURE: #[stable(feature = "stdarch_loongarch_feature", since = "1.89.0")] lbt: "lbt";
/// LBT
@FEATURE: #[stable(feature = "stdarch_loongarch_feature", since = "1.89.0")] lvz: "lvz";
/// LVZ
@FEATURE: #[unstable(feature = "stdarch_loongarch_feature_detection", issue = "117425")] ual: "ual";
/// UAL
}

View File

@@ -1,15 +0,0 @@
//! Run-time feature detection on MIPS.
features! {
@TARGET: mips;
@CFG: target_arch = "mips";
@MACRO_NAME: is_mips_feature_detected;
@MACRO_ATTRS:
/// Check for the presence of a CPU feature at runtime.
///
/// When the feature is known to be enabled at compile time (e.g. via `-Ctarget-feature`)
/// the macro expands to `true`.
#[unstable(feature = "stdarch_mips_feature_detection", issue = "111188")]
@FEATURE: #[unstable(feature = "stdarch_mips_feature_detection", issue = "111188")] msa: "msa";
/// MIPS SIMD Architecture (MSA)
}

View File

@@ -1,15 +0,0 @@
//! Run-time feature detection on MIPS64.
features! {
@TARGET: mips64;
@CFG: target_arch = "mips64";
@MACRO_NAME: is_mips64_feature_detected;
@MACRO_ATTRS:
/// Check for the presence of a CPU feature at runtime.
///
/// When the feature is known to be enabled at compile time (e.g. via `-Ctarget-feature`)
/// the macro expands to `true`.
#[unstable(feature = "stdarch_mips_feature_detection", issue = "111188")]
@FEATURE: #[unstable(feature = "stdarch_mips_feature_detection", issue = "111188")] msa: "msa";
/// MIPS SIMD Architecture (MSA)
}

View File

@@ -1,83 +0,0 @@
#![allow(dead_code)]
// Export the macros for all supported architectures.
#[macro_use]
mod x86;
#[macro_use]
mod arm;
#[macro_use]
mod aarch64;
#[macro_use]
mod riscv;
#[macro_use]
mod powerpc;
#[macro_use]
mod powerpc64;
#[macro_use]
mod mips;
#[macro_use]
mod mips64;
#[macro_use]
mod loongarch;
#[macro_use]
mod s390x;
cfg_select! {
any(target_arch = "x86", target_arch = "x86_64") => {
#[stable(feature = "simd_x86", since = "1.27.0")]
pub use x86::*;
}
target_arch = "arm" => {
#[unstable(feature = "stdarch_arm_feature_detection", issue = "111190")]
pub use arm::*;
}
any(target_arch = "aarch64", target_arch = "arm64ec") => {
#[stable(feature = "simd_aarch64", since = "1.60.0")]
pub use aarch64::*;
}
any(target_arch = "riscv32", target_arch = "riscv64") => {
#[unstable(feature = "stdarch_riscv_feature_detection", issue = "111192")]
pub use riscv::*;
}
target_arch = "powerpc" => {
#[unstable(feature = "stdarch_powerpc_feature_detection", issue = "111191")]
pub use powerpc::*;
}
target_arch = "powerpc64" => {
#[unstable(feature = "stdarch_powerpc_feature_detection", issue = "111191")]
pub use powerpc64::*;
}
target_arch = "mips" => {
#[unstable(feature = "stdarch_mips_feature_detection", issue = "111188")]
pub use mips::*;
}
target_arch = "mips64" => {
#[unstable(feature = "stdarch_mips_feature_detection", issue = "111188")]
pub use mips64::*;
}
any(target_arch = "loongarch32", target_arch = "loongarch64") => {
#[stable(feature = "stdarch_loongarch_feature", since = "1.89.0")]
pub use loongarch::*;
}
target_arch = "s390x" => {
#[stable(feature = "stdarch_s390x_feature_detection", since = "1.93.0")]
pub use s390x::*;
}
_ => {
// Unimplemented architecture:
#[doc(hidden)]
pub(crate) enum Feature {
Null
}
#[doc(hidden)]
#[unstable(feature = "stdarch_internal", issue = "none")]
pub mod __is_feature_detected {}
impl Feature {
#[doc(hidden)]
pub(crate) fn from_str(_s: &str) -> Result<Feature, ()> { Err(()) }
#[doc(hidden)]
pub(crate) fn to_str(self) -> &'static str { "" }
}
}
}

View File

@@ -1,33 +0,0 @@
//! Run-time feature detection on PowerPC.
features! {
@TARGET: powerpc;
@CFG: target_arch = "powerpc";
@MACRO_NAME: is_powerpc_feature_detected;
@MACRO_ATTRS:
/// Check for the presence of a CPU feature at runtime.
///
/// When the feature is known to be enabled at compile time (e.g. via `-Ctarget-feature`)
/// the macro expands to `true`.
#[unstable(feature = "stdarch_powerpc_feature_detection", issue = "111191")]
@FEATURE: #[unstable(feature = "stdarch_powerpc_feature_detection", issue = "111191")] altivec: "altivec";
/// Altivec
@FEATURE: #[unstable(feature = "stdarch_powerpc_feature_detection", issue = "111191")] vsx: "vsx";
/// VSX
@FEATURE: #[unstable(feature = "stdarch_powerpc_feature_detection", issue = "111191")] power8: "power8";
without cfg check: true;
/// Power8
@FEATURE: #[unstable(feature = "stdarch_powerpc_feature_detection", issue = "111191")] power8_altivec: "power8-altivec";
/// Power8 altivec
@FEATURE: #[unstable(feature = "stdarch_powerpc_feature_detection", issue = "111191")] power8_vector: "power8-vector";
/// Power8 vector
@FEATURE: #[unstable(feature = "stdarch_powerpc_feature_detection", issue = "111191")] power8_crypto: "power8-crypto";
/// Power8 crypto
@FEATURE: #[unstable(feature = "stdarch_powerpc_feature_detection", issue = "111191")] power9: "power9";
without cfg check: true;
/// Power9
@FEATURE: #[unstable(feature = "stdarch_powerpc_feature_detection", issue = "111191")] power9_altivec: "power9-altivec";
/// Power9 altivec
@FEATURE: #[unstable(feature = "stdarch_powerpc_feature_detection", issue = "111191")] power9_vector: "power9-vector";
/// Power9 vector
}

View File

@@ -1,33 +0,0 @@
//! Run-time feature detection on PowerPC64.
features! {
@TARGET: powerpc64;
@CFG: target_arch = "powerpc64";
@MACRO_NAME: is_powerpc64_feature_detected;
@MACRO_ATTRS:
/// Check for the presence of a CPU feature at runtime.
///
/// When the feature is known to be enabled at compile time (e.g. via `-Ctarget-feature`)
/// the macro expands to `true`.
#[unstable(feature = "stdarch_powerpc_feature_detection", issue = "111191")]
@FEATURE: #[unstable(feature = "stdarch_powerpc_feature_detection", issue = "111191")] altivec: "altivec";
/// Altivec
@FEATURE: #[unstable(feature = "stdarch_powerpc_feature_detection", issue = "111191")] vsx: "vsx";
/// VSX
@FEATURE: #[unstable(feature = "stdarch_powerpc_feature_detection", issue = "111191")] power8: "power8";
without cfg check: true;
/// Power8
@FEATURE: #[unstable(feature = "stdarch_powerpc_feature_detection", issue = "111191")] power8_altivec: "power8-altivec";
/// Power8 altivec
@FEATURE: #[unstable(feature = "stdarch_powerpc_feature_detection", issue = "111191")] power8_vector: "power8-vector";
/// Power8 vector
@FEATURE: #[unstable(feature = "stdarch_powerpc_feature_detection", issue = "111191")] power8_crypto: "power8-crypto";
/// Power8 crypto
@FEATURE: #[unstable(feature = "stdarch_powerpc_feature_detection", issue = "111191")] power9: "power9";
without cfg check: true;
/// Power9
@FEATURE: #[unstable(feature = "stdarch_powerpc_feature_detection", issue = "111191")] power9_altivec: "power9-altivec";
/// Power9 altivec
@FEATURE: #[unstable(feature = "stdarch_powerpc_feature_detection", issue = "111191")] power9_vector: "power9-vector";
/// Power9 vector
}

View File

@@ -1,380 +0,0 @@
//! Run-time feature detection on RISC-V.
features! {
@TARGET: riscv;
@CFG: any(target_arch = "riscv32", target_arch = "riscv64");
@MACRO_NAME: is_riscv_feature_detected;
@MACRO_ATTRS:
/// Check for the presence of a CPU feature at runtime.
///
/// When the feature is known to be enabled at compile time (e.g. via `-Ctarget-feature`)
/// the macro expands to `true`.
///
/// RISC-V standard defined the base sets and the extension sets.
/// The base sets are RV32I, RV64I, RV32E or RV128I. Any RISC-V platform
/// must support one base set and/or multiple extension sets.
///
/// Any RISC-V standard instruction sets can be in state of either ratified,
/// frozen or draft. The version and status of current standard instruction
/// sets can be checked out from preface section of the [ISA manual].
///
/// Platform may define and support their own custom instruction sets with
/// ISA prefix X. These sets are highly platform specific and should be
/// detected with their own platform support crates.
///
/// [ISA manual]: https://riscv.org/specifications/ratified/
///
/// # Platform-specific/agnostic Behavior and Availability
///
/// Runtime detection depends on the platform-specific feature detection
/// facility and its availability per feature is
/// highly platform/version-specific.
///
/// Still, a best-effort attempt is performed to enable subset/dependent
/// features if a superset feature is enabled regardless of the platform.
/// For instance, if the A extension (`"a"`) is enabled, its subsets (the
/// Zalrsc and Zaamo extensions; `"zalrsc"` and `"zaamo"`) are also enabled.
/// Likewise, if the F extension (`"f"`) is enabled, one of its dependencies
/// (the Zicsr extension `"zicsr"`) is also enabled.
///
/// # Unprivileged Specification
///
/// The supported ratified RISC-V instruction sets are as follows (OS
/// columns denote runtime feature detection support with or without the
/// minimum supported version):
///
/// | Literal | Base | Linux |
/// |:---------- |:------- |:---------- |
/// | `"rv32e"` | RV32E | No |
/// | `"rv32i"` | RV32I | Yes [^ima] |
/// | `"rv64i"` | RV64I | Yes [^ima] |
///
/// | Literal | Extension | Linux |
/// |:--------------- |:----------- |:------------------- |
/// | `"a"` | A | Yes [^ima] |
/// | `"b"` | B | 6.5 |
/// | `"c"` | C | Yes |
/// | `"d"` | D | Yes |
/// | `"f"` | F | Yes |
/// | `"m"` | M | Yes [^ima] |
/// | `"q"` | Q | No |
/// | `"v"` | V | 6.5 |
/// | `"zaamo"` | Zaamo | 6.15 [^ima] [^dep] |
/// | `"zabha"` | Zabha | 6.16 |
/// | `"zacas"` | Zacas | 6.8 |
/// | `"zalrsc"` | Zalrsc | 6.15 [^ima] [^dep] |
/// | `"zawrs"` | Zawrs | 6.11 |
/// | `"zba"` | Zba | 6.5 |
/// | `"zbb"` | Zbb | 6.5 |
/// | `"zbc"` | Zbc | 6.8 |
/// | `"zbkb"` | Zbkb | 6.8 |
/// | `"zbkc"` | Zbkc | 6.8 |
/// | `"zbkx"` | Zbkx | 6.8 |
/// | `"zbs"` | Zbs | 6.5 |
/// | `"zca"` | Zca | 6.11 [^dep] |
/// | `"zcb"` | Zcb | 6.11 |
/// | `"zcd"` | Zcd | 6.11 [^dep] |
/// | `"zcf"` | Zcf | 6.11 [^dep] |
/// | `"zcmop"` | Zcmop | 6.11 |
/// | `"zdinx"` | Zdinx | No |
/// | `"zfa"` | Zfa | 6.8 |
/// | `"zfbfmin"` | Zfbfmin | 6.15 |
/// | `"zfh"` | Zfh | 6.8 |
/// | `"zfhmin"` | Zfhmin | 6.8 |
/// | `"zfinx"` | Zfinx | No |
/// | `"zhinx"` | Zhinx | No |
/// | `"zhinxmin"` | Zhinxmin | No |
/// | `"zicbom"` | Zicbom | 6.15 |
/// | `"zicboz"` | Zicboz | 6.7 |
/// | `"zicntr"` | Zicntr | 6.15 [^ima] [^cntr] |
/// | `"zicond"` | Zicond | 6.8 |
/// | `"zicsr"` | Zicsr | No [^ima] [^dep] |
/// | `"zifencei"` | Zifencei | No [^ima] |
/// | `"zihintntl"` | Zihintntl | 6.8 |
/// | `"zihintpause"` | Zihintpause | 6.10 |
/// | `"zihpm"` | Zihpm | 6.15 [^cntr] |
/// | `"zimop"` | Zimop | 6.11 |
/// | `"zk"` | Zk | No [^zkr] |
/// | `"zkn"` | Zkn | 6.8 |
/// | `"zknd"` | Zknd | 6.8 |
/// | `"zkne"` | Zkne | 6.8 |
/// | `"zknh"` | Zknh | 6.8 |
/// | `"zkr"` | Zkr | No [^zkr] |
/// | `"zks"` | Zks | 6.8 |
/// | `"zksed"` | Zksed | 6.8 |
/// | `"zksh"` | Zksh | 6.8 |
/// | `"zkt"` | Zkt | 6.8 |
/// | `"ztso"` | Ztso | 6.8 |
/// | `"zvbb"` | Zvbb | 6.8 |
/// | `"zvbc"` | Zvbc | 6.8 |
/// | `"zve32f"` | Zve32f | 6.11 [^dep] |
/// | `"zve32x"` | Zve32x | 6.11 [^dep] |
/// | `"zve64d"` | Zve64d | 6.11 [^dep] |
/// | `"zve64f"` | Zve64f | 6.11 [^dep] |
/// | `"zve64x"` | Zve64x | 6.11 [^dep] |
/// | `"zvfbfmin"` | Zvfbfmin | 6.15 |
/// | `"zvfbfwma"` | Zvfbfwma | 6.15 |
/// | `"zvfh"` | Zvfh | 6.8 |
/// | `"zvfhmin"` | Zvfhmin | 6.8 |
/// | `"zvkb"` | Zvkb | 6.8 |
/// | `"zvkg"` | Zvkg | 6.8 |
/// | `"zvkn"` | Zvkn | 6.8 |
/// | `"zvknc"` | Zvknc | 6.8 |
/// | `"zvkned"` | Zvkned | 6.8 |
/// | `"zvkng"` | Zvkng | 6.8 |
/// | `"zvknha"` | Zvknha | 6.8 |
/// | `"zvknhb"` | Zvknhb | 6.8 |
/// | `"zvks"` | Zvks | 6.8 |
/// | `"zvksc"` | Zvksc | 6.8 |
/// | `"zvksed"` | Zvksed | 6.8 |
/// | `"zvksg"` | Zvksg | 6.8 |
/// | `"zvksh"` | Zvksh | 6.8 |
/// | `"zvkt"` | Zvkt | 6.8 |
///
/// [^ima]: Or enabled when the IMA base behavior is detected on the Linux
/// kernel version 6.4 or later (for bases, the only matching one -- either
/// `"rv32i"` or `"rv64i"` -- is enabled).
///
/// [^cntr]: Even if this extension is available, it does not necessarily
/// mean all performance counters are accessible.
/// For example, accesses to all performance counters except `time`
/// (wall-clock) are blocked by default on the Linux kernel
/// version 6.6 or later.
/// Also beware that, even if performance counters like `cycle` and
/// `instret` are accessible, their value can be unreliable (e.g. returning
/// the constant value) under certain circumstances.
///
/// [^dep]: Or enabled as a dependency of another extension (a superset)
/// even if runtime detection of this feature itself is not supported (as
/// long as the runtime detection of the superset is supported).
///
/// [^zkr]: Linux does not report existence of this extension even if
/// supported by the hardware mainly because the `seed` CSR on the Zkr
/// extension (which provides hardware-based randomness) is normally
/// inaccessible from the user mode.
/// For the Zk extension features except this CSR, check existence of both
/// `"zkn"` and `"zkt"` features instead.
///
/// There's also bases and extensions marked as standard instruction set,
/// but they are in frozen or draft state. These instruction sets are also
/// reserved by this macro and can be detected in the future platforms.
///
/// Draft RISC-V instruction sets:
///
/// * RV128I: `"rv128i"`
/// * J: `"j"`
/// * P: `"p"`
/// * Zam: `"zam"`
///
/// # Performance Hints
///
/// The two features below define performance hints for unaligned
/// scalar/vector memory accesses, respectively. If enabled, it denotes that
/// corresponding unaligned memory access is reasonably fast.
///
/// * `"unaligned-scalar-mem"`
/// * Runtime detection requires Linux kernel version 6.4 or later.
/// * `"unaligned-vector-mem"`
/// * Runtime detection requires Linux kernel version 6.13 or later.
#[stable(feature = "riscv_ratified", since = "1.78.0")]
@FEATURE: #[unstable(feature = "stdarch_riscv_feature_detection", issue = "111192")] rv32i: "rv32i";
without cfg check: true;
/// RV32I Base Integer Instruction Set
@FEATURE: #[unstable(feature = "stdarch_riscv_feature_detection", issue = "111192")] rv32e: "rv32e";
without cfg check: true;
/// RV32E Base Integer Instruction Set
@FEATURE: #[unstable(feature = "stdarch_riscv_feature_detection", issue = "111192")] rv64i: "rv64i";
without cfg check: true;
/// RV64I Base Integer Instruction Set
@FEATURE: #[unstable(feature = "stdarch_riscv_feature_detection", issue = "111192")] rv128i: "rv128i";
without cfg check: true;
/// RV128I Base Integer Instruction Set
@FEATURE: #[unstable(feature = "stdarch_riscv_feature_detection", issue = "111192")] unaligned_scalar_mem: "unaligned-scalar-mem";
/// Has reasonably performant unaligned scalar
@FEATURE: #[unstable(feature = "stdarch_riscv_feature_detection", issue = "111192")] unaligned_vector_mem: "unaligned-vector-mem";
/// Has reasonably performant unaligned vector
@FEATURE: #[stable(feature = "riscv_ratified_v2", since = "1.94.0")] zicsr: "zicsr";
/// "Zicsr" Extension for Control and Status Register (CSR) Instructions
@FEATURE: #[stable(feature = "riscv_ratified_v2", since = "1.94.0")] zicntr: "zicntr";
/// "Zicntr" Extension for Base Counters and Timers
@FEATURE: #[stable(feature = "riscv_ratified_v2", since = "1.94.0")] zihpm: "zihpm";
/// "Zihpm" Extension for Hardware Performance Counters
@FEATURE: #[stable(feature = "riscv_ratified_v2", since = "1.94.0")] zifencei: "zifencei";
/// "Zifencei" Extension for Instruction-Fetch Fence
@FEATURE: #[stable(feature = "riscv_ratified_v2", since = "1.94.0")] zihintntl: "zihintntl";
/// "Zihintntl" Extension for Non-Temporal Locality Hints
@FEATURE: #[stable(feature = "riscv_ratified_v2", since = "1.94.0")] zihintpause: "zihintpause";
/// "Zihintpause" Extension for Pause Hint
@FEATURE: #[stable(feature = "riscv_ratified_v2", since = "1.94.0")] zimop: "zimop";
/// "Zimop" Extension for May-Be-Operations
@FEATURE: #[stable(feature = "riscv_ratified_v2", since = "1.94.0")] zicbom: "zicbom";
/// "Zicbom" Extension for Cache-Block Management Instructions
@FEATURE: #[stable(feature = "riscv_ratified_v2", since = "1.94.0")] zicboz: "zicboz";
/// "Zicboz" Extension for Cache-Block Zero Instruction
@FEATURE: #[stable(feature = "riscv_ratified_v2", since = "1.94.0")] zicond: "zicond";
/// "Zicond" Extension for Integer Conditional Operations
@FEATURE: #[stable(feature = "riscv_ratified", since = "1.78.0")] m: "m";
/// "M" Extension for Integer Multiplication and Division
@FEATURE: #[stable(feature = "riscv_ratified", since = "1.78.0")] a: "a";
/// "A" Extension for Atomic Instructions
@FEATURE: #[stable(feature = "riscv_ratified_v2", since = "1.94.0")] zalrsc: "zalrsc";
/// "Zalrsc" Extension for Load-Reserved/Store-Conditional Instructions
@FEATURE: #[stable(feature = "riscv_ratified_v2", since = "1.94.0")] zaamo: "zaamo";
/// "Zaamo" Extension for Atomic Memory Operations
@FEATURE: #[stable(feature = "riscv_ratified_v2", since = "1.94.0")] zawrs: "zawrs";
/// "Zawrs" Extension for Wait-on-Reservation-Set Instructions
@FEATURE: #[stable(feature = "riscv_ratified_v2", since = "1.94.0")] zabha: "zabha";
/// "Zabha" Extension for Byte and Halfword Atomic Memory Operations
@FEATURE: #[stable(feature = "riscv_ratified_v2", since = "1.94.0")] zacas: "zacas";
/// "Zacas" Extension for Atomic Compare-and-Swap (CAS) Instructions
@FEATURE: #[unstable(feature = "stdarch_riscv_feature_detection", issue = "111192")] zam: "zam";
without cfg check: true;
/// "Zam" Extension for Misaligned Atomics
@FEATURE: #[stable(feature = "riscv_ratified_v2", since = "1.94.0")] ztso: "ztso";
/// "Ztso" Extension for Total Store Ordering
@FEATURE: #[unstable(feature = "stdarch_riscv_feature_detection", issue = "111192")] f: "f";
/// "F" Extension for Single-Precision Floating-Point
@FEATURE: #[unstable(feature = "stdarch_riscv_feature_detection", issue = "111192")] d: "d";
/// "D" Extension for Double-Precision Floating-Point
@FEATURE: #[unstable(feature = "stdarch_riscv_feature_detection", issue = "111192")] q: "q";
without cfg check: true;
/// "Q" Extension for Quad-Precision Floating-Point
@FEATURE: #[unstable(feature = "stdarch_riscv_feature_detection", issue = "111192")] zfh: "zfh";
/// "Zfh" Extension for Half-Precision Floating-Point
@FEATURE: #[unstable(feature = "stdarch_riscv_feature_detection", issue = "111192")] zfhmin: "zfhmin";
/// "Zfhmin" Extension for Minimal Half-Precision Floating-Point
@FEATURE: #[unstable(feature = "stdarch_riscv_feature_detection", issue = "111192")] zfa: "zfa";
/// "Zfa" Extension for Additional Floating-Point Instructions
@FEATURE: #[unstable(feature = "stdarch_riscv_feature_detection", issue = "111192")] zfbfmin: "zfbfmin";
/// "Zfbfmin" Extension for Scalar BF16 Converts
@FEATURE: #[unstable(feature = "stdarch_riscv_feature_detection", issue = "111192")] zfinx: "zfinx";
/// "Zfinx" Extension for Single-Precision Floating-Point in Integer Registers
@FEATURE: #[unstable(feature = "stdarch_riscv_feature_detection", issue = "111192")] zdinx: "zdinx";
/// "Zdinx" Extension for Double-Precision Floating-Point in Integer Registers
@FEATURE: #[unstable(feature = "stdarch_riscv_feature_detection", issue = "111192")] zhinx: "zhinx";
/// "Zhinx" Extension for Half-Precision Floating-Point in Integer Registers
@FEATURE: #[unstable(feature = "stdarch_riscv_feature_detection", issue = "111192")] zhinxmin: "zhinxmin";
/// "Zhinxmin" Extension for Minimal Half-Precision Floating-Point in Integer Registers
@FEATURE: #[stable(feature = "riscv_ratified", since = "1.78.0")] c: "c";
/// "C" Extension for Compressed Instructions
@FEATURE: #[stable(feature = "riscv_ratified_v2", since = "1.94.0")] zca: "zca";
/// "Zca" Compressed Instructions excluding Floating-Point Loads/Stores
@FEATURE: #[unstable(feature = "stdarch_riscv_feature_detection", issue = "111192")] zcf: "zcf";
without cfg check: true;
/// "Zcf" Compressed Instructions for Single-Precision Floating-Point Loads/Stores on RV32
@FEATURE: #[unstable(feature = "stdarch_riscv_feature_detection", issue = "111192")] zcd: "zcd";
without cfg check: true;
/// "Zcd" Compressed Instructions for Double-Precision Floating-Point Loads/Stores
@FEATURE: #[stable(feature = "riscv_ratified_v2", since = "1.94.0")] zcb: "zcb";
/// "Zcb" Simple Code-size Saving Compressed Instructions
@FEATURE: #[stable(feature = "riscv_ratified_v2", since = "1.94.0")] zcmop: "zcmop";
/// "Zcmop" Extension for Compressed May-Be-Operations
@FEATURE: #[stable(feature = "riscv_ratified_v2", since = "1.94.0")] b: "b";
/// "B" Extension for Bit Manipulation
@FEATURE: #[stable(feature = "riscv_ratified", since = "1.78.0")] zba: "zba";
/// "Zba" Extension for Address Generation
@FEATURE: #[stable(feature = "riscv_ratified", since = "1.78.0")] zbb: "zbb";
/// "Zbb" Extension for Basic Bit-Manipulation
@FEATURE: #[stable(feature = "riscv_ratified", since = "1.78.0")] zbc: "zbc";
/// "Zbc" Extension for Carry-less Multiplication
@FEATURE: #[stable(feature = "riscv_ratified", since = "1.78.0")] zbs: "zbs";
/// "Zbs" Extension for Single-Bit Instructions
@FEATURE: #[stable(feature = "riscv_ratified", since = "1.78.0")] zbkb: "zbkb";
/// "Zbkb" Extension for Bit-Manipulation for Cryptography
@FEATURE: #[stable(feature = "riscv_ratified", since = "1.78.0")] zbkc: "zbkc";
/// "Zbkc" Extension for Carry-less Multiplication for Cryptography
@FEATURE: #[stable(feature = "riscv_ratified", since = "1.78.0")] zbkx: "zbkx";
/// "Zbkx" Extension for Crossbar Permutations
@FEATURE: #[stable(feature = "riscv_ratified", since = "1.78.0")] zknd: "zknd";
/// "Zknd" Cryptography Extension for NIST Suite: AES Decryption
@FEATURE: #[stable(feature = "riscv_ratified", since = "1.78.0")] zkne: "zkne";
/// "Zkne" Cryptography Extension for NIST Suite: AES Encryption
@FEATURE: #[stable(feature = "riscv_ratified", since = "1.78.0")] zknh: "zknh";
/// "Zknh" Cryptography Extension for NIST Suite: Hash Function Instructions
@FEATURE: #[stable(feature = "riscv_ratified", since = "1.78.0")] zksed: "zksed";
/// "Zksed" Cryptography Extension for ShangMi Suite: SM4 Block Cipher Instructions
@FEATURE: #[stable(feature = "riscv_ratified", since = "1.78.0")] zksh: "zksh";
/// "Zksh" Cryptography Extension for ShangMi Suite: SM3 Hash Function Instructions
@FEATURE: #[stable(feature = "riscv_ratified", since = "1.78.0")] zkr: "zkr";
/// "Zkr" Entropy Source Extension
@FEATURE: #[stable(feature = "riscv_ratified", since = "1.78.0")] zkn: "zkn";
/// "Zkn" Cryptography Extension for NIST Algorithm Suite
@FEATURE: #[stable(feature = "riscv_ratified", since = "1.78.0")] zks: "zks";
/// "Zks" Cryptography Extension for ShangMi Algorithm Suite
@FEATURE: #[stable(feature = "riscv_ratified", since = "1.78.0")] zk: "zk";
/// "Zk" Cryptography Extension for Standard Scalar Cryptography
@FEATURE: #[stable(feature = "riscv_ratified", since = "1.78.0")] zkt: "zkt";
/// "Zkt" Cryptography Extension for Data Independent Execution Latency
@FEATURE: #[unstable(feature = "stdarch_riscv_feature_detection", issue = "111192")] v: "v";
/// "V" Extension for Vector Operations
@FEATURE: #[unstable(feature = "stdarch_riscv_feature_detection", issue = "111192")] zve32x: "zve32x";
/// "Zve32x" Vector Extension for Embedded Processors (32-bit+; Integer)
@FEATURE: #[unstable(feature = "stdarch_riscv_feature_detection", issue = "111192")] zve32f: "zve32f";
/// "Zve32f" Vector Extension for Embedded Processors (32-bit+; with Single-Precision Floating-Point)
@FEATURE: #[unstable(feature = "stdarch_riscv_feature_detection", issue = "111192")] zve64x: "zve64x";
/// "Zve64x" Vector Extension for Embedded Processors (64-bit+; Integer)
@FEATURE: #[unstable(feature = "stdarch_riscv_feature_detection", issue = "111192")] zve64f: "zve64f";
/// "Zve64f" Vector Extension for Embedded Processors (64-bit+; with Single-Precision Floating-Point)
@FEATURE: #[unstable(feature = "stdarch_riscv_feature_detection", issue = "111192")] zve64d: "zve64d";
/// "Zve64d" Vector Extension for Embedded Processors (64-bit+; with Double-Precision Floating-Point)
@FEATURE: #[unstable(feature = "stdarch_riscv_feature_detection", issue = "111192")] zvfh: "zvfh";
/// "Zvfh" Vector Extension for Half-Precision Floating-Point
@FEATURE: #[unstable(feature = "stdarch_riscv_feature_detection", issue = "111192")] zvfhmin: "zvfhmin";
/// "Zvfhmin" Vector Extension for Minimal Half-Precision Floating-Point
@FEATURE: #[unstable(feature = "stdarch_riscv_feature_detection", issue = "111192")] zvfbfmin: "zvfbfmin";
/// "Zvfbfmin" Vector Extension for BF16 Converts
@FEATURE: #[unstable(feature = "stdarch_riscv_feature_detection", issue = "111192")] zvfbfwma: "zvfbfwma";
/// "Zvfbfwma" Vector Extension for BF16 Widening Multiply-Add
@FEATURE: #[unstable(feature = "stdarch_riscv_feature_detection", issue = "111192")] zvbb: "zvbb";
/// "Zvbb" Extension for Vector Basic Bit-Manipulation
@FEATURE: #[unstable(feature = "stdarch_riscv_feature_detection", issue = "111192")] zvbc: "zvbc";
/// "Zvbc" Extension for Vector Carryless Multiplication
@FEATURE: #[unstable(feature = "stdarch_riscv_feature_detection", issue = "111192")] zvkb: "zvkb";
/// "Zvkb" Extension for Vector Cryptography Bit-Manipulation
@FEATURE: #[unstable(feature = "stdarch_riscv_feature_detection", issue = "111192")] zvkg: "zvkg";
/// "Zvkg" Cryptography Extension for Vector GCM/GMAC
@FEATURE: #[unstable(feature = "stdarch_riscv_feature_detection", issue = "111192")] zvkned: "zvkned";
/// "Zvkned" Cryptography Extension for NIST Suite: Vector AES Block Cipher
@FEATURE: #[unstable(feature = "stdarch_riscv_feature_detection", issue = "111192")] zvknha: "zvknha";
/// "Zvknha" Cryptography Extension for Vector SHA-2 Secure Hash (SHA-256)
@FEATURE: #[unstable(feature = "stdarch_riscv_feature_detection", issue = "111192")] zvknhb: "zvknhb";
/// "Zvknhb" Cryptography Extension for Vector SHA-2 Secure Hash (SHA-256/512)
@FEATURE: #[unstable(feature = "stdarch_riscv_feature_detection", issue = "111192")] zvksed: "zvksed";
/// "Zvksed" Cryptography Extension for ShangMi Suite: Vector SM4 Block Cipher
@FEATURE: #[unstable(feature = "stdarch_riscv_feature_detection", issue = "111192")] zvksh: "zvksh";
/// "Zvksh" Cryptography Extension for ShangMi Suite: Vector SM3 Secure Hash
@FEATURE: #[unstable(feature = "stdarch_riscv_feature_detection", issue = "111192")] zvkn: "zvkn";
/// "Zvkn" Cryptography Extension for NIST Algorithm Suite
@FEATURE: #[unstable(feature = "stdarch_riscv_feature_detection", issue = "111192")] zvknc: "zvknc";
/// "Zvknc" Cryptography Extension for NIST Algorithm Suite with Carryless Multiply
@FEATURE: #[unstable(feature = "stdarch_riscv_feature_detection", issue = "111192")] zvkng: "zvkng";
/// "Zvkng" Cryptography Extension for NIST Algorithm Suite with GCM
@FEATURE: #[unstable(feature = "stdarch_riscv_feature_detection", issue = "111192")] zvks: "zvks";
/// "Zvks" Cryptography Extension for ShangMi Algorithm Suite
@FEATURE: #[unstable(feature = "stdarch_riscv_feature_detection", issue = "111192")] zvksc: "zvksc";
/// "Zvksc" Cryptography Extension for ShangMi Algorithm Suite with Carryless Multiply
@FEATURE: #[unstable(feature = "stdarch_riscv_feature_detection", issue = "111192")] zvksg: "zvksg";
/// "Zvksg" Cryptography Extension for ShangMi Algorithm Suite with GCM
@FEATURE: #[unstable(feature = "stdarch_riscv_feature_detection", issue = "111192")] zvkt: "zvkt";
/// "Zvkt" Extension for Vector Data-Independent Execution Latency
@FEATURE: #[unstable(feature = "stdarch_riscv_feature_detection", issue = "111192")] j: "j";
without cfg check: true;
/// "J" Extension for Dynamically Translated Languages
@FEATURE: #[unstable(feature = "stdarch_riscv_feature_detection", issue = "111192")] p: "p";
without cfg check: true;
/// "P" Extension for Packed-SIMD Instructions
}

View File

@@ -1,61 +0,0 @@
//! Run-time feature detection on s390x.
features! {
@TARGET: s390x;
@CFG: target_arch = "s390x";
@MACRO_NAME: is_s390x_feature_detected;
@MACRO_ATTRS:
/// Check for the presence of a CPU feature at runtime.
///
/// When the feature is known to be enabled at compile time (e.g. via `-Ctarget-feature`)
/// the macro expands to `true`.
#[stable(feature = "stdarch_s390x_feature_detection", since = "1.93.0")]
@FEATURE: #[unstable(feature = "s390x_target_feature", issue = "150259")] concurrent_functions: "concurrent-functions";
/// s390x concurrent-functions facility
@FEATURE: #[unstable(feature = "s390x_target_feature", issue = "150259")] deflate_conversion: "deflate-conversion";
/// s390x deflate-conversion facility
@FEATURE: #[unstable(feature = "s390x_target_feature", issue = "150259")] enhanced_sort: "enhanced-sort";
/// s390x enhanced-sort facility
@FEATURE: #[unstable(feature = "s390x_target_feature", issue = "150259")] guarded_storage: "guarded-storage";
/// s390x guarded-storage facility
@FEATURE: #[unstable(feature = "s390x_target_feature", issue = "150259")] high_word: "high-word";
/// s390x high-word facility
@FEATURE: #[unstable(feature = "s390x_target_feature", issue = "150259")] message_security_assist_extension3: "message-security-assist-extension3";
/// s390x message-security-assist-extension3 facility
@FEATURE: #[unstable(feature = "s390x_target_feature", issue = "150259")] message_security_assist_extension4: "message-security-assist-extension4";
/// s390x message-security-assist-extension4 facility
@FEATURE: #[unstable(feature = "s390x_target_feature", issue = "150259")] message_security_assist_extension5: "message-security-assist-extension5";
/// s390x message-security-assist-extension5 facility
@FEATURE: #[unstable(feature = "s390x_target_feature", issue = "150259")] message_security_assist_extension8: "message-security-assist-extension8";
/// s390x message-security-assist-extension8 facility
@FEATURE: #[unstable(feature = "s390x_target_feature", issue = "150259")] message_security_assist_extension9: "message-security-assist-extension9";
/// s390x message-security-assist-extension9 facility
@FEATURE: #[unstable(feature = "s390x_target_feature", issue = "150259")] message_security_assist_extension12: "message-security-assist-extension12";
/// s390x message-security-assist-extension12 facility
@FEATURE: #[stable(feature = "s390x_target_feature_vector", since = "1.93.0")] miscellaneous_extensions_2: "miscellaneous-extensions-2";
/// s390x miscellaneous-extensions-2 facility
@FEATURE: #[stable(feature = "s390x_target_feature_vector", since = "1.93.0")] miscellaneous_extensions_3: "miscellaneous-extensions-3";
/// s390x miscellaneous-extensions-3 facility
@FEATURE: #[stable(feature = "s390x_target_feature_vector", since = "1.93.0")] miscellaneous_extensions_4: "miscellaneous-extensions-4";
/// s390x miscellaneous-extensions-4 facility
@FEATURE: #[stable(feature = "s390x_target_feature_vector", since = "1.93.0")] nnp_assist: "nnp-assist";
/// s390x nnp-assist facility
@FEATURE: #[unstable(feature = "s390x_target_feature", issue = "150259")] transactional_execution: "transactional-execution";
/// s390x transactional-execution facility
@FEATURE: #[stable(feature = "s390x_target_feature_vector", since = "1.93.0")] vector: "vector";
/// s390x vector facility
@FEATURE: #[stable(feature = "s390x_target_feature_vector", since = "1.93.0")] vector_enhancements_1: "vector-enhancements-1";
/// s390x vector-enhancements-1 facility
@FEATURE: #[stable(feature = "s390x_target_feature_vector", since = "1.93.0")] vector_enhancements_2: "vector-enhancements-2";
/// s390x vector-enhancements-2 facility
@FEATURE: #[stable(feature = "s390x_target_feature_vector", since = "1.93.0")] vector_enhancements_3: "vector-enhancements-3";
/// s390x vector-enhancements-3 facility
@FEATURE: #[stable(feature = "s390x_target_feature_vector", since = "1.93.0")] vector_packed_decimal: "vector-packed-decimal";
/// s390x vector-packed-decimal facility
@FEATURE: #[stable(feature = "s390x_target_feature_vector", since = "1.93.0")] vector_packed_decimal_enhancement: "vector-packed-decimal-enhancement";
/// s390x vector-packed-decimal-enhancement facility
@FEATURE: #[stable(feature = "s390x_target_feature_vector", since = "1.93.0")] vector_packed_decimal_enhancement_2: "vector-packed-decimal-enhancement-2";
/// s390x vector-packed-decimal-enhancement-2 facility
@FEATURE: #[stable(feature = "s390x_target_feature_vector", since = "1.93.0")] vector_packed_decimal_enhancement_3: "vector-packed-decimal-enhancement-3";
/// s390x vector-packed-decimal-enhancement-3 facility
}

View File

@@ -1,280 +0,0 @@
//! This module implements minimal run-time feature detection for x86.
//!
//! The features are detected using the `detect_features` function below.
//! This function uses the CPUID instruction to read the feature flags from the
//! CPU and encodes them in a `usize` where each bit position represents
//! whether a feature is available (bit is set) or unavailable (bit is cleared).
//!
//! The enum `Feature` is used to map bit positions to feature names, and the
//! the `__crate::detect::check_for!` macro is used to map string literals (e.g.,
//! "avx") to these bit positions (e.g., `Feature::avx`).
//!
//! The run-time feature detection is performed by the
//! `__crate::detect::check_for(Feature) -> bool` function. On its first call,
//! this functions queries the CPU for the available features and stores them
//! in a global `AtomicUsize` variable. The query is performed by just checking
//! whether the feature bit in this global variable is set or cleared.
features! {
@TARGET: x86;
@CFG: any(target_arch = "x86", target_arch = "x86_64");
@MACRO_NAME: is_x86_feature_detected;
@MACRO_ATTRS:
/// Check for the presence of a CPU feature at runtime.
///
/// When the feature is known to be enabled at compile time (e.g. via `-Ctarget-feature`)
/// the macro expands to `true`.
///
/// Runtime detection currently relies mostly on the `cpuid` instruction.
///
/// This macro only takes one argument which is a string literal of the feature
/// being tested for. The feature names supported are the lowercase versions of
/// the ones defined by Intel in [their documentation][docs].
///
/// ## Supported arguments
///
/// This macro supports the same names that `#[target_feature]` supports. Unlike
/// `#[target_feature]`, however, this macro does not support names separated
/// with a comma. Instead testing for multiple features must be done through
/// separate macro invocations for now.
///
/// Supported arguments are:
///
/// * `"aes"`
/// * `"pclmulqdq"`
/// * `"rdrand"`
/// * `"rdseed"`
/// * `"tsc"`
/// * `"mmx"`
/// * `"sse"`
/// * `"sse2"`
/// * `"sse3"`
/// * `"ssse3"`
/// * `"sse4.1"`
/// * `"sse4.2"`
/// * `"sse4a"`
/// * `"sha"`
/// * `"avx"`
/// * `"avx2"`
/// * `"sha512"`
/// * `"sm3"`
/// * `"sm4"`
/// * `"avx512f"`
/// * `"avx512cd"`
/// * `"avx512er"`
/// * `"avx512pf"`
/// * `"avx512bw"`
/// * `"avx512dq"`
/// * `"avx512vl"`
/// * `"avx512ifma"`
/// * `"avx512vbmi"`
/// * `"avx512vpopcntdq"`
/// * `"avx512vbmi2"`
/// * `"gfni"`
/// * `"vaes"`
/// * `"vpclmulqdq"`
/// * `"avx512vnni"`
/// * `"avx512bitalg"`
/// * `"avx512bf16"`
/// * `"avx512vp2intersect"`
/// * `"avx512fp16"`
/// * `"avxvnni"`
/// * `"avxifma"`
/// * `"avxneconvert"`
/// * `"avxvnniint8"`
/// * `"avxvnniint16"`
/// * `"amx-tile"`
/// * `"amx-int8"`
/// * `"amx-bf16"`
/// * `"amx-fp16"`
/// * `"amx-complex"`
/// * `"amx-avx512"`
/// * `"amx-fp8"`
/// * `"amx-movrs"`
/// * `"amx-tf32"`
/// * `"f16c"`
/// * `"fma"`
/// * `"bmi1"`
/// * `"bmi2"`
/// * `"abm"`
/// * `"lzcnt"`
/// * `"tbm"`
/// * `"popcnt"`
/// * `"fxsr"`
/// * `"xsave"`
/// * `"xsaveopt"`
/// * `"xsaves"`
/// * `"xsavec"`
/// * `"cmpxchg16b"`
/// * `"kl"`
/// * `"widekl"`
/// * `"adx"`
/// * `"rtm"`
/// * `"movbe"`
/// * `"ermsb"`
/// * `"movrs"`
/// * `"xop"`
///
/// [docs]: https://software.intel.com/sites/landingpage/IntrinsicsGuide
#[stable(feature = "simd_x86", since = "1.27.0")]
@BIND_FEATURE_NAME: "abm"; "lzcnt"; // abm is a synonym for lzcnt
@BIND_FEATURE_NAME: "avx512gfni"; "gfni"; #[deprecated(since = "1.67.0", note = "the `avx512gfni` feature has been renamed to `gfni`")];
@BIND_FEATURE_NAME: "avx512vaes"; "vaes"; #[deprecated(since = "1.67.0", note = "the `avx512vaes` feature has been renamed to `vaes`")];
@BIND_FEATURE_NAME: "avx512vpclmulqdq"; "vpclmulqdq"; #[deprecated(since = "1.67.0", note = "the `avx512vpclmulqdq` feature has been renamed to `vpclmulqdq`")];
@FEATURE: #[stable(feature = "simd_x86", since = "1.27.0")] aes: "aes";
/// AES (Advanced Encryption Standard New Instructions AES-NI)
@FEATURE: #[stable(feature = "simd_x86", since = "1.27.0")] pclmulqdq: "pclmulqdq";
/// CLMUL (Carry-less Multiplication)
@FEATURE: #[stable(feature = "simd_x86", since = "1.27.0")] rdrand: "rdrand";
/// RDRAND
@FEATURE: #[stable(feature = "simd_x86", since = "1.27.0")] rdseed: "rdseed";
/// RDSEED
@FEATURE: #[stable(feature = "simd_x86", since = "1.27.0")] tsc: "tsc";
without cfg check: true;
/// TSC (Time Stamp Counter)
@FEATURE: #[stable(feature = "simd_x86", since = "1.27.0")] mmx: "mmx";
without cfg check: true;
/// MMX (MultiMedia eXtensions)
@FEATURE: #[stable(feature = "simd_x86", since = "1.27.0")] sse: "sse";
/// SSE (Streaming SIMD Extensions)
@FEATURE: #[stable(feature = "simd_x86", since = "1.27.0")] sse2: "sse2";
/// SSE2 (Streaming SIMD Extensions 2)
@FEATURE: #[stable(feature = "simd_x86", since = "1.27.0")] sse3: "sse3";
/// SSE3 (Streaming SIMD Extensions 3)
@FEATURE: #[stable(feature = "simd_x86", since = "1.27.0")] ssse3: "ssse3";
/// SSSE3 (Supplemental Streaming SIMD Extensions 3)
@FEATURE: #[stable(feature = "simd_x86", since = "1.27.0")] sse4_1: "sse4.1";
/// SSE4.1 (Streaming SIMD Extensions 4.1)
@FEATURE: #[stable(feature = "simd_x86", since = "1.27.0")] sse4_2: "sse4.2";
/// SSE4.2 (Streaming SIMD Extensions 4.2)
@FEATURE: #[stable(feature = "simd_x86", since = "1.27.0")] sse4a: "sse4a";
/// SSE4a (Streaming SIMD Extensions 4a)
@FEATURE: #[stable(feature = "simd_x86", since = "1.27.0")] sha: "sha";
/// SHA
@FEATURE: #[stable(feature = "simd_x86", since = "1.27.0")] avx: "avx";
/// AVX (Advanced Vector Extensions)
@FEATURE: #[stable(feature = "simd_x86", since = "1.27.0")] avx2: "avx2";
/// AVX2 (Advanced Vector Extensions 2)
@FEATURE: #[stable(feature = "sha512_sm_x86", since = "1.89.0")] sha512: "sha512";
/// SHA512
@FEATURE: #[stable(feature = "sha512_sm_x86", since = "1.89.0")] sm3: "sm3";
/// SM3
@FEATURE: #[stable(feature = "sha512_sm_x86", since = "1.89.0")] sm4: "sm4";
/// SM4
@FEATURE: #[stable(feature = "simd_x86", since = "1.27.0")] avx512f: "avx512f" ;
/// AVX-512 F (Foundation)
@FEATURE: #[stable(feature = "simd_x86", since = "1.27.0")] avx512cd: "avx512cd" ;
/// AVX-512 CD (Conflict Detection Instructions)
@FEATURE: #[stable(feature = "simd_x86", since = "1.27.0")] avx512er: "avx512er";
without cfg check: true;
/// AVX-512 ER (Expo nential and Reciprocal Instructions)
@FEATURE: #[stable(feature = "simd_x86", since = "1.27.0")] avx512pf: "avx512pf";
without cfg check: true;
/// AVX-512 PF (Prefetch Instructions)
@FEATURE: #[stable(feature = "simd_x86", since = "1.27.0")] avx512bw: "avx512bw";
/// AVX-512 BW (Byte and Word Instructions)
@FEATURE: #[stable(feature = "simd_x86", since = "1.27.0")] avx512dq: "avx512dq";
/// AVX-512 DQ (Doubleword and Quadword)
@FEATURE: #[stable(feature = "simd_x86", since = "1.27.0")] avx512vl: "avx512vl";
/// AVX-512 VL (Vector Length Extensions)
@FEATURE: #[stable(feature = "simd_x86", since = "1.27.0")] avx512ifma: "avx512ifma";
/// AVX-512 IFMA (Integer Fused Multiply Add)
@FEATURE: #[stable(feature = "simd_x86", since = "1.27.0")] avx512vbmi: "avx512vbmi";
/// AVX-512 VBMI (Vector Byte Manipulation Instructions)
@FEATURE: #[stable(feature = "simd_x86", since = "1.27.0")] avx512vpopcntdq: "avx512vpopcntdq";
/// AVX-512 VPOPCNTDQ (Vector Population Count Doubleword and Quadword)
@FEATURE: #[stable(feature = "simd_x86", since = "1.27.0")] avx512vbmi2: "avx512vbmi2";
/// AVX-512 VBMI2 (Additional byte, word, dword and qword capabilities)
@FEATURE: #[stable(feature = "simd_x86", since = "1.27.0")] gfni: "gfni";
/// AVX-512 GFNI (Galois Field New Instruction)
@FEATURE: #[stable(feature = "simd_x86", since = "1.27.0")] vaes: "vaes";
/// AVX-512 VAES (Vector AES instruction)
@FEATURE: #[stable(feature = "simd_x86", since = "1.27.0")] vpclmulqdq: "vpclmulqdq";
/// AVX-512 VPCLMULQDQ (Vector PCLMULQDQ instructions)
@FEATURE: #[stable(feature = "simd_x86", since = "1.27.0")] avx512vnni: "avx512vnni";
/// AVX-512 VNNI (Vector Neural Network Instructions)
@FEATURE: #[stable(feature = "simd_x86", since = "1.27.0")] avx512bitalg: "avx512bitalg";
/// AVX-512 BITALG (Support for VPOPCNT\[B,W\] and VPSHUFBITQMB)
@FEATURE: #[stable(feature = "simd_x86", since = "1.27.0")] avx512bf16: "avx512bf16";
/// AVX-512 BF16 (BFLOAT16 instructions)
@FEATURE: #[stable(feature = "simd_x86", since = "1.27.0")] avx512vp2intersect: "avx512vp2intersect";
/// AVX-512 P2INTERSECT
@FEATURE: #[stable(feature = "simd_x86", since = "1.27.0")] avx512fp16: "avx512fp16";
/// AVX-512 FP16 (FLOAT16 instructions)
@FEATURE: #[stable(feature = "avx512_target_feature", since = "1.89.0")] avxifma: "avxifma";
/// AVX-IFMA (Integer Fused Multiply Add)
@FEATURE: #[stable(feature = "avx512_target_feature", since = "1.89.0")] avxneconvert: "avxneconvert";
/// AVX-NE-CONVERT (Exceptionless Convert)
@FEATURE: #[stable(feature = "avx512_target_feature", since = "1.89.0")] avxvnni: "avxvnni";
/// AVX-VNNI (Vector Neural Network Instructions)
@FEATURE: #[stable(feature = "avx512_target_feature", since = "1.89.0")] avxvnniint16: "avxvnniint16";
/// AVX-VNNI_INT8 (VNNI with 16-bit Integers)
@FEATURE: #[stable(feature = "avx512_target_feature", since = "1.89.0")] avxvnniint8: "avxvnniint8";
/// AVX-VNNI_INT16 (VNNI with 8-bit integers)
@FEATURE: #[unstable(feature = "x86_amx_intrinsics", issue = "126622")] amx_tile: "amx-tile";
/// AMX (Advanced Matrix Extensions) - Tile load/store
@FEATURE: #[unstable(feature = "x86_amx_intrinsics", issue = "126622")] amx_int8: "amx-int8";
/// AMX-INT8 (Operations on 8-bit integers)
@FEATURE: #[unstable(feature = "x86_amx_intrinsics", issue = "126622")] amx_bf16: "amx-bf16";
/// AMX-BF16 (BFloat16 Operations)
@FEATURE: #[unstable(feature = "x86_amx_intrinsics", issue = "126622")] amx_fp16: "amx-fp16";
/// AMX-FP16 (Float16 Operations)
@FEATURE: #[unstable(feature = "x86_amx_intrinsics", issue = "126622")] amx_complex: "amx-complex";
/// AMX-COMPLEX (Complex number Operations)
@FEATURE: #[unstable(feature = "x86_amx_intrinsics", issue = "126622")] amx_avx512: "amx-avx512";
/// AMX-AVX512 (AVX512 operations extended to matrices)
@FEATURE: #[unstable(feature = "x86_amx_intrinsics", issue = "126622")] amx_fp8: "amx-fp8";
/// AMX-FP8 (Float8 Operations)
@FEATURE: #[unstable(feature = "x86_amx_intrinsics", issue = "126622")] amx_movrs: "amx-movrs";
/// AMX-MOVRS (Matrix MOVERS operations)
@FEATURE: #[unstable(feature = "x86_amx_intrinsics", issue = "126622")] amx_tf32: "amx-tf32";
/// AMX-TF32 (TensorFloat32 Operations)
@FEATURE: #[unstable(feature = "apx_target_feature", issue = "139284")] apxf: "apxf";
/// APX-F (Advanced Performance Extensions - Foundation)
@FEATURE: #[unstable(feature = "avx10_target_feature", issue = "138843")] avx10_1: "avx10.1";
/// AVX10.1
@FEATURE: #[unstable(feature = "avx10_target_feature", issue = "138843")] avx10_2: "avx10.2";
/// AVX10.2
@FEATURE: #[stable(feature = "simd_x86", since = "1.27.0")] f16c: "f16c";
/// F16C (Conversions between IEEE-754 `binary16` and `binary32` formats)
@FEATURE: #[stable(feature = "simd_x86", since = "1.27.0")] fma: "fma";
/// FMA (Fused Multiply Add)
@FEATURE: #[stable(feature = "simd_x86", since = "1.27.0")] bmi1: "bmi1" ;
/// BMI1 (Bit Manipulation Instructions 1)
@FEATURE: #[stable(feature = "simd_x86", since = "1.27.0")] bmi2: "bmi2" ;
/// BMI2 (Bit Manipulation Instructions 2)
@FEATURE: #[stable(feature = "simd_x86", since = "1.27.0")] lzcnt: "lzcnt";
/// ABM (Advanced Bit Manipulation) / LZCNT (Leading Zero Count)
@FEATURE: #[stable(feature = "simd_x86", since = "1.27.0")] tbm: "tbm";
/// TBM (Trailing Bit Manipulation)
@FEATURE: #[stable(feature = "simd_x86", since = "1.27.0")] popcnt: "popcnt";
/// POPCNT (Population Count)
@FEATURE: #[stable(feature = "simd_x86", since = "1.27.0")] fxsr: "fxsr";
/// FXSR (Floating-point context fast save and restore)
@FEATURE: #[stable(feature = "simd_x86", since = "1.27.0")] xsave: "xsave";
/// XSAVE (Save Processor Extended States)
@FEATURE: #[stable(feature = "simd_x86", since = "1.27.0")] xsaveopt: "xsaveopt";
/// XSAVEOPT (Save Processor Extended States Optimized)
@FEATURE: #[stable(feature = "simd_x86", since = "1.27.0")] xsaves: "xsaves";
/// XSAVES (Save Processor Extended States Supervisor)
@FEATURE: #[stable(feature = "simd_x86", since = "1.27.0")] xsavec: "xsavec";
/// XSAVEC (Save Processor Extended States Compacted)
@FEATURE: #[stable(feature = "simd_x86", since = "1.27.0")] cmpxchg16b: "cmpxchg16b";
/// CMPXCH16B (16-byte compare-and-swap instruction)
@FEATURE: #[stable(feature = "keylocker_x86", since = "1.89.0")] kl: "kl";
/// Intel Key Locker
@FEATURE: #[stable(feature = "keylocker_x86", since = "1.89.0")] widekl: "widekl";
/// Intel Key Locker Wide
@FEATURE: #[stable(feature = "simd_x86_adx", since = "1.33.0")] adx: "adx";
/// ADX, Intel ADX (Multi-Precision Add-Carry Instruction Extensions)
@FEATURE: #[stable(feature = "simd_x86", since = "1.27.0")] rtm: "rtm";
/// RTM, Intel (Restricted Transactional Memory)
@FEATURE: #[stable(feature = "movbe_target_feature", since = "1.67.0")] movbe: "movbe";
/// MOVBE (Move Data After Swapping Bytes)
@FEATURE: #[unstable(feature = "movrs_target_feature", issue = "137976")] movrs: "movrs";
/// MOVRS (Move data with the read-shared hint)
@FEATURE: #[stable(feature = "simd_x86", since = "1.27.0")] ermsb: "ermsb";
/// ERMSB, Enhanced REP MOVSB and STOSB
@FEATURE: #[unstable(feature = "xop_target_feature", issue = "127208")] xop: "xop";
/// XOP: eXtended Operations (AMD)
}

View File

@@ -1,9 +0,0 @@
//! Bit manipulation utilities.
/// Tests the `bit` of `x`.
#[allow(dead_code)]
#[inline]
pub(crate) fn test(x: usize, bit: u32) -> bool {
debug_assert!(bit < usize::BITS, "bit index out-of-bounds");
x & (1 << bit) != 0
}

View File

@@ -1,203 +0,0 @@
//! Caches run-time feature detection so that it only needs to be computed
//! once.
#![allow(dead_code)] // not used on all platforms
use core::sync::atomic::{AtomicUsize, Ordering};
/// Sets the `bit` of `x`.
#[inline]
const fn set_bit(x: u128, bit: u32) -> u128 {
x | 1 << bit
}
/// Tests the `bit` of `x`.
#[inline]
const fn test_bit(x: u128, bit: u32) -> bool {
x & (1 << bit) != 0
}
/// Unset the `bit of `x`.
#[inline]
const fn unset_bit(x: u128, bit: u32) -> u128 {
x & !(1 << bit)
}
/// Maximum number of features that can be cached.
const CACHE_CAPACITY: u32 = 93;
/// This type is used to initialize the cache
// The derived `Default` implementation will initialize the field to zero,
// which is what we want.
#[derive(Copy, Clone, Default, PartialEq, Eq)]
pub(crate) struct Initializer(u128);
// NOTE: the `debug_assert!` would catch that we do not add more Features than
// the one fitting our cache.
impl Initializer {
/// Tests the `bit` of the cache.
#[inline]
pub(crate) fn test(self, bit: u32) -> bool {
debug_assert!(bit < CACHE_CAPACITY, "too many features, time to increase the cache size!");
test_bit(self.0, bit)
}
/// Sets the `bit` of the cache.
#[inline]
pub(crate) fn set(&mut self, bit: u32) {
debug_assert!(bit < CACHE_CAPACITY, "too many features, time to increase the cache size!");
let v = self.0;
self.0 = set_bit(v, bit);
}
/// Unsets the `bit` of the cache.
#[inline]
pub(crate) fn unset(&mut self, bit: u32) {
debug_assert!(bit < CACHE_CAPACITY, "too many features, time to increase the cache size!");
let v = self.0;
self.0 = unset_bit(v, bit);
}
}
/// This global variable is a cache of the features supported by the CPU.
// Note: the third slot is only used in x86
// Another Slot can be added if needed without any change to `Initializer`
static CACHE: [Cache; 3] = [Cache::uninitialized(), Cache::uninitialized(), Cache::uninitialized()];
/// Feature cache with capacity for `size_of::<usize>() * 8 - 1` features.
///
/// Note: 0 is used to represent an uninitialized cache, and (at least) the most
/// significant bit is set on any cache which has been initialized.
///
/// Note: we use `Relaxed` atomic operations, because we are only interested in
/// the effects of operations on a single memory location. That is, we only need
/// "modification order", and not the full-blown "happens before".
struct Cache(AtomicUsize);
impl Cache {
const CAPACITY: u32 = (core::mem::size_of::<usize>() * 8 - 1) as u32;
const MASK: usize = (1 << Cache::CAPACITY) - 1;
const INITIALIZED_BIT: usize = 1usize << Cache::CAPACITY;
/// Creates an uninitialized cache.
#[allow(clippy::declare_interior_mutable_const)]
const fn uninitialized() -> Self {
Cache(AtomicUsize::new(0))
}
/// Is the `bit` in the cache set? Returns `None` if the cache has not been initialized.
#[inline]
pub(crate) fn test(&self, bit: u32) -> Option<bool> {
let cached = self.0.load(Ordering::Relaxed);
if cached == 0 { None } else { Some(test_bit(cached as u128, bit)) }
}
/// Initializes the cache.
#[inline]
fn initialize(&self, value: usize) -> usize {
debug_assert_eq!((value & !Cache::MASK), 0);
self.0.store(value | Cache::INITIALIZED_BIT, Ordering::Relaxed);
value
}
}
cfg_select! {
feature = "std_detect_env_override" => {
#[inline]
fn disable_features(disable: &[u8], value: &mut Initializer) {
if let Ok(disable) = core::str::from_utf8(disable) {
for v in disable.split(" ") {
let _ = super::Feature::from_str(v).map(|v| value.unset(v as u32));
}
}
}
#[inline]
fn initialize(mut value: Initializer) -> Initializer {
use core::ffi::CStr;
const RUST_STD_DETECT_UNSTABLE: &CStr = c"RUST_STD_DETECT_UNSTABLE";
cfg_select! {
windows => {
use alloc::vec;
#[link(name = "kernel32")]
unsafe extern "system" {
fn GetEnvironmentVariableA(name: *const u8, buffer: *mut u8, size: u32) -> u32;
}
let len = unsafe { GetEnvironmentVariableA(RUST_STD_DETECT_UNSTABLE.as_ptr().cast::<u8>(), core::ptr::null_mut(), 0) };
if len > 0 {
// +1 to include the null terminator.
let mut env = vec![0; len as usize + 1];
let len = unsafe { GetEnvironmentVariableA(RUST_STD_DETECT_UNSTABLE.as_ptr().cast::<u8>(), env.as_mut_ptr(), len + 1) };
if len > 0 {
disable_features(&env[..len as usize], &mut value);
}
}
}
_ => {
let env = unsafe {
libc::getenv(RUST_STD_DETECT_UNSTABLE.as_ptr())
};
if !env.is_null() {
let len = unsafe { libc::strlen(env) };
let env = unsafe { core::slice::from_raw_parts(env as *const u8, len) };
disable_features(env, &mut value);
}
}
}
do_initialize(value);
value
}
}
_ => {
#[inline]
fn initialize(value: Initializer) -> Initializer {
do_initialize(value);
value
}
}
}
#[inline]
fn do_initialize(value: Initializer) {
CACHE[0].initialize((value.0) as usize & Cache::MASK);
CACHE[1].initialize((value.0 >> Cache::CAPACITY) as usize & Cache::MASK);
CACHE[2].initialize((value.0 >> (2 * Cache::CAPACITY)) as usize & Cache::MASK);
}
// We only have to detect features once, and it's fairly costly, so hint to LLVM
// that it should assume that cache hits are more common than misses (which is
// the point of caching). It's possibly unfortunate that this function needs to
// reach across modules like this to call `os::detect_features`, but it produces
// the best code out of several attempted variants.
//
// The `Initializer` that the cache was initialized with is returned, so that
// the caller can call `test()` on it without having to load the value from the
// cache again.
#[cold]
fn detect_and_initialize() -> Initializer {
initialize(super::os::detect_features())
}
/// Tests the `bit` of the storage. If the storage has not been initialized,
/// initializes it with the result of `os::detect_features()`.
///
/// On its first invocation, it detects the CPU features and caches them in the
/// `CACHE` global variable as an `AtomicU64`.
///
/// It uses the `Feature` variant to index into this variable as a bitset. If
/// the bit is set, the feature is enabled, and otherwise it is disabled.
///
/// If the feature `std_detect_env_override` is enabled looks for the env
/// variable `RUST_STD_DETECT_UNSTABLE` and uses its content to disable
/// Features that would had been otherwise detected.
#[inline]
pub(crate) fn test(bit: u32) -> bool {
let (relative_bit, idx) = if bit < Cache::CAPACITY {
(bit, 0)
} else if bit < 2 * Cache::CAPACITY {
(bit - Cache::CAPACITY, 1)
} else {
(bit - 2 * Cache::CAPACITY, 2)
};
CACHE[idx].test(relative_bit).unwrap_or_else(|| detect_and_initialize().test(bit))
}

View File

@@ -1,203 +0,0 @@
#[macro_export]
#[allow_internal_unstable(stdarch_internal)]
#[unstable(feature = "stdarch_internal", issue = "none")]
macro_rules! detect_feature {
($feature:tt, $feature_lit:tt) => {
$crate::detect_feature!($feature, $feature_lit : $feature_lit)
};
($feature:tt, $feature_lit:tt : $($target_feature_lit:tt),*) => {
$(cfg!(target_feature = $target_feature_lit) ||)*
$crate::detect::__is_feature_detected::$feature()
};
($feature:tt, $feature_lit:tt, without cfg check: true) => {
$crate::detect::__is_feature_detected::$feature()
};
}
#[allow(unused_macros, reason = "it's used in the features! macro below")]
macro_rules! check_cfg_feature {
($feature:tt, $feature_lit:tt) => {
check_cfg_feature!($feature, $feature_lit : $feature_lit)
};
($feature:tt, $feature_lit:tt : $($target_feature_lit:tt),*) => {
$(cfg!(target_feature = $target_feature_lit);)*
};
($feature:tt, $feature_lit:tt, without cfg check: $feature_cfg_check:literal) => {
#[allow(unexpected_cfgs, reason = $feature_lit)]
{ cfg!(target_feature = $feature_lit) }
};
}
#[allow(unused)]
macro_rules! features {
(
@TARGET: $target:ident;
@CFG: $cfg:meta;
@MACRO_NAME: $macro_name:ident;
@MACRO_ATTRS: $(#[$macro_attrs:meta])*
$(@BIND_FEATURE_NAME: $bind_feature:tt; $feature_impl:tt; $(#[$deprecate_attr:meta];)?)*
$(@NO_RUNTIME_DETECTION: $nort_feature:tt; )*
$(@FEATURE: #[$stability_attr:meta] $feature:ident: $feature_lit:tt;
$(without cfg check: $feature_cfg_check:tt;)?
$(implied by target_features: [$($target_feature_lit:tt),*];)?
$(#[$feature_comment:meta])*)*
) => {
#[macro_export]
$(#[$macro_attrs])*
#[allow_internal_unstable(stdarch_internal)]
#[cfg($cfg)]
#[doc(cfg($cfg))]
macro_rules! $macro_name {
$(
($feature_lit) => {
$crate::detect_feature!($feature, $feature_lit $(, without cfg check: $feature_cfg_check)? $(: $($target_feature_lit),*)?)
};
)*
$(
($bind_feature) => {
{
$(
#[$deprecate_attr] macro_rules! deprecated_feature { {} => {}; }
deprecated_feature! {};
)?
$crate::$macro_name!($feature_impl)
}
};
)*
$(
($nort_feature) => {
compile_error!(
concat!(
stringify!($nort_feature),
" feature cannot be detected at run-time"
)
)
};
)*
($t:tt,) => {
$crate::$macro_name!($t);
};
($t:tt) => {
compile_error!(
concat!(
concat!("unknown ", stringify!($target)),
concat!(" target feature: ", $t)
)
)
};
}
$(#[$macro_attrs])*
#[macro_export]
#[cfg(not($cfg))]
#[doc(cfg($cfg))]
macro_rules! $macro_name {
$(
($feature_lit) => {
compile_error!(
concat!(
r#"This macro cannot be used on the current target.
You can prevent it from being used in other architectures by
guarding it behind a cfg("#,
stringify!($cfg),
")."
)
)
};
)*
$(
($bind_feature) => { $crate::$macro_name!($feature_impl) };
)*
$(
($nort_feature) => {
compile_error!(
concat!(
stringify!($nort_feature),
" feature cannot be detected at run-time"
)
)
};
)*
($t:tt,) => {
$crate::$macro_name!($t);
};
($t:tt) => {
compile_error!(
concat!(
concat!("unknown ", stringify!($target)),
concat!(" target feature: ", $t)
)
)
};
}
#[deny(unexpected_cfgs)]
#[deny(unfulfilled_lint_expectations)]
const _: () = {
$(
check_cfg_feature!($feature, $feature_lit $(, without cfg check: $feature_cfg_check)? $(: $($target_feature_lit),*)?);
)*
};
/// Each variant denotes a position in a bitset for a particular feature.
///
/// PLEASE: do not use this, it is an implementation detail subject
/// to change.
#[doc(hidden)]
#[allow(non_camel_case_types)]
#[derive(Copy, Clone)]
#[repr(u8)]
#[unstable(feature = "stdarch_internal", issue = "none")]
#[cfg($cfg)]
pub(crate) enum Feature {
$(
$(#[$feature_comment])*
$feature,
)*
// Do not add variants after last:
_last
}
#[cfg($cfg)]
impl Feature {
pub(crate) fn to_str(self) -> &'static str {
match self {
$(Feature::$feature => $feature_lit,)*
Feature::_last => unreachable!(),
}
}
#[cfg(feature = "std_detect_env_override")]
pub(crate) fn from_str(s: &str) -> Result<Feature, ()> {
match s {
$($feature_lit => Ok(Feature::$feature),)*
_ => Err(())
}
}
}
/// Each function performs run-time feature detection for a single
/// feature. This allow us to use stability attributes on a per feature
/// basis.
///
/// PLEASE: do not use this, it is an implementation detail subject
/// to change.
#[doc(hidden)]
#[cfg($cfg)]
#[unstable(feature = "stdarch_internal", issue = "none")]
pub mod __is_feature_detected {
$(
/// PLEASE: do not use this, it is an implementation detail
/// subject to change.
#[inline]
#[doc(hidden)]
#[$stability_attr]
pub fn $feature() -> bool {
$crate::detect::check_for($crate::detect::Feature::$feature)
}
)*
}
};
}

View File

@@ -1,125 +0,0 @@
//! This module implements run-time feature detection.
//!
//! The `is_{arch}_feature_detected!("feature-name")` macros take the name of a
//! feature as a string-literal, and return a boolean indicating whether the
//! feature is enabled at run-time or not.
//!
//! These macros do two things:
//! * map the string-literal into an integer stored as a `Feature` enum,
//! * call a `os::check_for(x: Feature)` function that returns `true` if the
//! feature is enabled.
//!
//! The `Feature` enums are also implemented in the `arch/{target_arch}.rs`
//! modules.
//!
//! The `check_for` functions are, in general, Operating System dependent. Most
//! architectures do not allow user-space programs to query the feature bits
//! due to security concerns (x86 is the big exception). These functions are
//! implemented in the `os/{target_os}.rs` modules.
#[macro_use]
mod macros;
mod arch;
// This module needs to be public because the `is_{arch}_feature_detected!`
// macros expand calls to items within it in user crates.
#[doc(hidden)]
#[unstable(feature = "stdarch_internal", issue = "none")]
pub use self::arch::__is_feature_detected;
pub(crate) use self::arch::Feature;
mod bit;
mod cache;
cfg_select! {
miri => {
// When running under miri all target-features that are not enabled at
// compile-time are reported as disabled at run-time.
//
// For features for which `cfg(target_feature)` returns true,
// this run-time detection logic is never called.
#[path = "os/other.rs"]
mod os;
}
any(target_arch = "x86", target_arch = "x86_64") => {
// On x86/x86_64 no OS specific functionality is required.
#[path = "os/x86.rs"]
mod os;
}
any(target_os = "linux", target_os = "android") => {
#[cfg(any(target_arch = "riscv32", target_arch = "riscv64"))]
#[path = "os/riscv.rs"]
mod riscv;
#[path = "os/linux/mod.rs"]
mod os;
}
target_os = "freebsd" => {
#[cfg(target_arch = "aarch64")]
#[path = "os/aarch64.rs"]
mod aarch64;
#[path = "os/freebsd/mod.rs"]
mod os;
}
target_os = "openbsd" => {
#[allow(dead_code)] // we don't use code that calls the mrs instruction.
#[cfg(target_arch = "aarch64")]
#[path = "os/aarch64.rs"]
mod aarch64;
#[path = "os/openbsd/mod.rs"]
mod os;
}
all(target_os = "windows", any(target_arch = "aarch64", target_arch = "arm64ec")) => {
#[path = "os/windows/aarch64.rs"]
mod os;
}
all(target_vendor = "apple", target_arch = "aarch64") => {
#[path = "os/darwin/aarch64.rs"]
mod os;
}
_ => {
#[path = "os/other.rs"]
mod os;
}
}
/// Performs run-time feature detection.
#[inline]
#[allow(dead_code)]
fn check_for(x: Feature) -> bool {
cache::test(x as u32)
}
/// Returns an `Iterator<Item=(&'static str, bool)>` where
/// `Item.0` is the feature name, and `Item.1` is a `bool` which
/// is `true` if the feature is supported by the host and `false` otherwise.
#[unstable(feature = "stdarch_internal", issue = "none")]
pub fn features() -> impl Iterator<Item = (&'static str, bool)> {
cfg_select! {
any(
target_arch = "x86",
target_arch = "x86_64",
target_arch = "arm",
target_arch = "aarch64",
target_arch = "arm64ec",
target_arch = "riscv32",
target_arch = "riscv64",
target_arch = "powerpc",
target_arch = "powerpc64",
target_arch = "mips",
target_arch = "mips64",
target_arch = "loongarch32",
target_arch = "loongarch64",
target_arch = "s390x",
) => {
(0_u8..Feature::_last as u8).map(|discriminant: u8| {
#[allow(bindings_with_variant_name)] // RISC-V has Feature::f
let f: Feature = unsafe { core::mem::transmute(discriminant) };
let name: &'static str = f.to_str();
let enabled: bool = check_for(f);
(name, enabled)
})
}
_ => None.into_iter(),
}
}

View File

@@ -1,127 +0,0 @@
//! Run-time feature detection for Aarch64 on any OS that emulates the mrs instruction.
//!
//! On FreeBSD >= 12.0, Linux >= 4.11 and other operating systems, it is possible to use
//! privileged system registers from userspace to check CPU feature support.
//!
//! AArch64 system registers ID_AA64ISAR0_EL1, ID_AA64PFR0_EL1, ID_AA64ISAR1_EL1
//! have bits dedicated to features like AdvSIMD, CRC32, AES, atomics (LSE), etc.
//! Each part of the register indicates the level of support for a certain feature, e.g.
//! when ID_AA64ISAR0_EL1\[7:4\] is >= 1, AES is supported; when it's >= 2, PMULL is supported.
//!
//! For proper support of [SoCs where different cores have different capabilities](https://medium.com/@jadr2ddude/a-big-little-problem-a-tale-of-big-little-gone-wrong-e7778ce744bb),
//! the OS has to always report only the features supported by all cores, like [FreeBSD does](https://reviews.freebsd.org/D17137#393947).
//!
//! References:
//!
//! - [Zircon implementation](https://fuchsia.googlesource.com/zircon/+/master/kernel/arch/arm64/feature.cpp)
//! - [Linux documentation](https://www.kernel.org/doc/Documentation/arm64/cpu-feature-registers.txt)
//! - [ARM documentation](https://developer.arm.com/documentation/ddi0601/2022-12/AArch64-Registers?lang=en)
use core::arch::asm;
use crate::detect::{Feature, cache};
/// Try to read the features from the system registers.
///
/// This will cause SIGILL if the current OS is not trapping the mrs instruction.
pub(crate) fn detect_features() -> cache::Initializer {
// ID_AA64ISAR0_EL1 - Instruction Set Attribute Register 0
let aa64isar0: u64;
unsafe {
asm!(
"mrs {}, ID_AA64ISAR0_EL1",
out(reg) aa64isar0,
options(pure, nomem, preserves_flags, nostack)
);
}
// ID_AA64ISAR1_EL1 - Instruction Set Attribute Register 1
let aa64isar1: u64;
unsafe {
asm!(
"mrs {}, ID_AA64ISAR1_EL1",
out(reg) aa64isar1,
options(pure, nomem, preserves_flags, nostack)
);
}
// ID_AA64MMFR2_EL1 - AArch64 Memory Model Feature Register 2
let aa64mmfr2: u64;
unsafe {
asm!(
"mrs {}, ID_AA64MMFR2_EL1",
out(reg) aa64mmfr2,
options(pure, nomem, preserves_flags, nostack)
);
}
// ID_AA64PFR0_EL1 - Processor Feature Register 0
let aa64pfr0: u64;
unsafe {
asm!(
"mrs {}, ID_AA64PFR0_EL1",
out(reg) aa64pfr0,
options(pure, nomem, preserves_flags, nostack)
);
}
parse_system_registers(aa64isar0, aa64isar1, aa64mmfr2, Some(aa64pfr0))
}
pub(crate) fn parse_system_registers(
aa64isar0: u64,
aa64isar1: u64,
aa64mmfr2: u64,
aa64pfr0: Option<u64>,
) -> cache::Initializer {
let mut value = cache::Initializer::default();
let mut enable_feature = |f, enable| {
if enable {
value.set(f as u32);
}
};
// ID_AA64ISAR0_EL1 - Instruction Set Attribute Register 0
enable_feature(Feature::pmull, bits_shift(aa64isar0, 7, 4) >= 2);
enable_feature(Feature::lse, bits_shift(aa64isar0, 23, 20) >= 2);
enable_feature(Feature::crc, bits_shift(aa64isar0, 19, 16) >= 1);
// ID_AA64PFR0_EL1 - Processor Feature Register 0
if let Some(aa64pfr0) = aa64pfr0 {
let fp = bits_shift(aa64pfr0, 19, 16) < 0xF;
let fphp = bits_shift(aa64pfr0, 19, 16) >= 1;
let asimd = bits_shift(aa64pfr0, 23, 20) < 0xF;
let asimdhp = bits_shift(aa64pfr0, 23, 20) >= 1;
enable_feature(Feature::fp, fp);
enable_feature(Feature::fp16, fphp);
// SIMD support requires float support - if half-floats are
// supported, it also requires half-float support:
enable_feature(Feature::asimd, fp && asimd && (!fphp | asimdhp));
// SIMD extensions require SIMD support:
enable_feature(Feature::aes, asimd && bits_shift(aa64isar0, 7, 4) >= 2);
let sha1 = bits_shift(aa64isar0, 11, 8) >= 1;
let sha2 = bits_shift(aa64isar0, 15, 12) >= 1;
enable_feature(Feature::sha2, asimd && sha1 && sha2);
enable_feature(Feature::rdm, asimd && bits_shift(aa64isar0, 31, 28) >= 1);
enable_feature(Feature::dotprod, asimd && bits_shift(aa64isar0, 47, 44) >= 1);
enable_feature(Feature::sve, asimd && bits_shift(aa64pfr0, 35, 32) >= 1);
}
// ID_AA64ISAR1_EL1 - Instruction Set Attribute Register 1
// Check for either APA or API field
enable_feature(Feature::paca, bits_shift(aa64isar1, 11, 4) >= 1);
enable_feature(Feature::rcpc, bits_shift(aa64isar1, 23, 20) >= 1);
// Check for either GPA or GPI field
enable_feature(Feature::pacg, bits_shift(aa64isar1, 31, 24) >= 1);
// ID_AA64MMFR2_EL1 - AArch64 Memory Model Feature Register 2
enable_feature(Feature::lse2, bits_shift(aa64mmfr2, 35, 32) >= 1);
value
}
#[inline]
fn bits_shift(x: u64, high: usize, low: usize) -> u64 {
(x >> low) & ((1 << (high - low + 1)) - 1)
}

View File

@@ -1,166 +0,0 @@
//! Run-time feature detection for aarch64 on Darwin (macOS/iOS/tvOS/watchOS/visionOS).
//!
//! <https://developer.apple.com/documentation/kernel/1387446-sysctlbyname/determining_instruction_set_characteristics>
use core::ffi::CStr;
use crate::detect::{Feature, cache};
#[inline]
fn _sysctlbyname(name: &CStr) -> bool {
use libc;
let mut enabled: i32 = 0;
let mut enabled_len: usize = 4;
let enabled_ptr = &mut enabled as *mut i32 as *mut libc::c_void;
let ret = unsafe {
libc::sysctlbyname(name.as_ptr(), enabled_ptr, &mut enabled_len, core::ptr::null_mut(), 0)
};
match ret {
0 => enabled != 0,
_ => false,
}
}
/// Try to read the features using sysctlbyname.
pub(crate) fn detect_features() -> cache::Initializer {
let mut value = cache::Initializer::default();
let mut enable_feature = |f, enable| {
if enable {
value.set(f as u32);
}
};
// Armv8.0 features not using the standard identifiers
let fp = _sysctlbyname(c"hw.optional.floatingpoint");
let asimd = _sysctlbyname(c"hw.optional.AdvSIMD");
let crc_old = _sysctlbyname(c"hw.optional.armv8_crc32");
// Armv8 and Armv9 features using the standard identifiers
let aes = _sysctlbyname(c"hw.optional.arm.FEAT_AES");
let bf16 = _sysctlbyname(c"hw.optional.arm.FEAT_BF16");
let bti = _sysctlbyname(c"hw.optional.arm.FEAT_BTI");
let crc = _sysctlbyname(c"hw.optional.arm.FEAT_CRC32");
let cssc = _sysctlbyname(c"hw.optional.arm.FEAT_CSSC");
let dit = _sysctlbyname(c"hw.optional.arm.FEAT_DIT");
let dotprod = _sysctlbyname(c"hw.optional.arm.FEAT_DotProd");
let dpb = _sysctlbyname(c"hw.optional.arm.FEAT_DPB");
let dpb2 = _sysctlbyname(c"hw.optional.arm.FEAT_DPB2");
let ecv = _sysctlbyname(c"hw.optional.arm.FEAT_ECV");
let fcma = _sysctlbyname(c"hw.optional.arm.FEAT_FCMA");
let fhm = _sysctlbyname(c"hw.optional.arm.FEAT_FHM");
let flagm = _sysctlbyname(c"hw.optional.arm.FEAT_FlagM");
let flagm2 = _sysctlbyname(c"hw.optional.arm.FEAT_FlagM2");
let fp16 = _sysctlbyname(c"hw.optional.arm.FEAT_FP16");
let frintts = _sysctlbyname(c"hw.optional.arm.FEAT_FRINTTS");
let hbc = _sysctlbyname(c"hw.optional.arm.FEAT_HBC");
let i8mm = _sysctlbyname(c"hw.optional.arm.FEAT_I8MM");
let jsconv = _sysctlbyname(c"hw.optional.arm.FEAT_JSCVT");
let rcpc = _sysctlbyname(c"hw.optional.arm.FEAT_LRCPC");
let rcpc2 = _sysctlbyname(c"hw.optional.arm.FEAT_LRCPC2");
let lse = _sysctlbyname(c"hw.optional.arm.FEAT_LSE");
let lse2 = _sysctlbyname(c"hw.optional.arm.FEAT_LSE2");
let mte = _sysctlbyname(c"hw.optional.arm.FEAT_MTE");
let mte2 = _sysctlbyname(c"hw.optional.arm.FEAT_MTE2");
let pauth = _sysctlbyname(c"hw.optional.arm.FEAT_PAuth");
let pmull = _sysctlbyname(c"hw.optional.arm.FEAT_PMULL");
let rdm = _sysctlbyname(c"hw.optional.arm.FEAT_RDM");
let sb = _sysctlbyname(c"hw.optional.arm.FEAT_SB");
let sha1 = _sysctlbyname(c"hw.optional.arm.FEAT_SHA1");
let sha256 = _sysctlbyname(c"hw.optional.arm.FEAT_SHA256");
let sha3 = _sysctlbyname(c"hw.optional.arm.FEAT_SHA3");
let sha512 = _sysctlbyname(c"hw.optional.arm.FEAT_SHA512");
let sme = _sysctlbyname(c"hw.optional.arm.FEAT_SME");
let sme2 = _sysctlbyname(c"hw.optional.arm.FEAT_SME2");
let sme2p1 = _sysctlbyname(c"hw.optional.arm.FEAT_SME2p1");
let sme_b16b16 = _sysctlbyname(c"hw.optional.arm.FEAT_SME_B16B16");
let sme_f16f16 = _sysctlbyname(c"hw.optional.arm.FEAT_SME_F16F16");
let sme_f64f64 = _sysctlbyname(c"hw.optional.arm.FEAT_SME_F64F64");
let sme_i16i64 = _sysctlbyname(c"hw.optional.arm.FEAT_SME_I16I64");
let ssbs = _sysctlbyname(c"hw.optional.arm.FEAT_SSBS");
let wfxt = _sysctlbyname(c"hw.optional.arm.FEAT_WFxT");
// The following features are not exposed by `is_aarch64_feature_detected`,
// but *are* reported by `sysctl`. They are here as documentation that they
// exist, and may potentially be exposed later.
/*
let afp = _sysctlbyname(c"hw.optional.arm.FEAT_AFP");
let csv2 = _sysctlbyname(c"hw.optional.arm.FEAT_CSV2");
let csv3 = _sysctlbyname(c"hw.optional.arm.FEAT_CSV3");
let ebf16 = _sysctlbyname(c"hw.optional.arm.FEAT_EBF16");
let fpac = _sysctlbyname(c"hw.optional.arm.FEAT_FPAC");
let fpaccombine = _sysctlbyname(c"hw.optional.arm.FEAT_FPACCOMBINE");
let mte_async = _sysctlbyname(c"hw.optional.arm.FEAT_MTE_ASYNC");
let mte_canonical_tags = _sysctlbyname(c"hw.optional.arm.FEAT_MTE_CANONICAL_TAGS");
let mte_no_address_tags = _sysctlbyname(c"hw.optional.arm.FEAT_MTE_NO_ADDRESS_TAGS");
let mte_store_only = _sysctlbyname(c"hw.optional.arm.FEAT_MTE_STORE_ONLY");
let mte3 = _sysctlbyname(c"hw.optional.arm.FEAT_MTE3");
let mte4 = _sysctlbyname(c"hw.optional.arm.FEAT_MTE4");
let pacimp = _sysctlbyname(c"hw.optional.arm.FEAT_PACIMP");
let pauth2 = _sysctlbyname(c"hw.optional.arm.FEAT_PAuth2");
let rpres = _sysctlbyname(c"hw.optional.arm.FEAT_RPRES");
let specres = _sysctlbyname(c"hw.optional.arm.FEAT_SPECRES");
let specres2 = _sysctlbyname(c"hw.optional.arm.FEAT_SPECRES2");
*/
// The following "features" are reported by `sysctl` but are mandatory parts
// of SME or SME2, and so are not exposed separately by
// `is_aarch64_feature_detected`. They are here to document their
// existence, in case they're needed in the future.
/*
let sme_b16f32 = _sysctlbyname(c"hw.optional.arm.SME_B16F32");
let sme_bi32i32 = _sysctlbyname(c"hw.optional.arm.SME_BI32I32");
let sme_f16f32 = _sysctlbyname(c"hw.optional.arm.SME_F16F32");
let sme_f32f32 = _sysctlbyname(c"hw.optional.arm.SME_F32F32");
let sme_i16i32 = _sysctlbyname(c"hw.optional.arm.SME_I16I32");
let sme_i8i32 = _sysctlbyname(c"hw.optional.arm.SME_I8I32");
*/
enable_feature(Feature::aes, aes && pmull);
enable_feature(Feature::asimd, asimd);
enable_feature(Feature::bf16, bf16);
enable_feature(Feature::bti, bti);
enable_feature(Feature::crc, crc_old || crc);
enable_feature(Feature::cssc, cssc);
enable_feature(Feature::dit, dit);
enable_feature(Feature::dotprod, dotprod);
enable_feature(Feature::dpb, dpb);
enable_feature(Feature::dpb2, dpb2);
enable_feature(Feature::ecv, ecv);
enable_feature(Feature::fcma, fcma);
enable_feature(Feature::fhm, fhm);
enable_feature(Feature::flagm, flagm);
enable_feature(Feature::flagm2, flagm2);
enable_feature(Feature::fp, fp);
enable_feature(Feature::fp16, fp16);
enable_feature(Feature::frintts, frintts);
enable_feature(Feature::hbc, hbc);
enable_feature(Feature::i8mm, i8mm);
enable_feature(Feature::jsconv, jsconv);
enable_feature(Feature::lse, lse);
enable_feature(Feature::lse2, lse2);
enable_feature(Feature::mte, mte && mte2);
enable_feature(Feature::paca, pauth);
enable_feature(Feature::pacg, pauth);
enable_feature(Feature::pmull, aes && pmull);
enable_feature(Feature::rcpc, rcpc);
enable_feature(Feature::rcpc2, rcpc2);
enable_feature(Feature::rdm, rdm);
enable_feature(Feature::sb, sb);
enable_feature(Feature::sha2, sha1 && sha256 && asimd);
enable_feature(Feature::sha3, sha512 && sha3 && asimd);
enable_feature(Feature::sme, sme);
enable_feature(Feature::sme2, sme2);
enable_feature(Feature::sme2p1, sme2p1);
enable_feature(Feature::sme_b16b16, sme_b16b16);
enable_feature(Feature::sme_f16f16, sme_f16f16);
enable_feature(Feature::sme_f64f64, sme_f64f64);
enable_feature(Feature::sme_i16i64, sme_i16i64);
enable_feature(Feature::ssbs, ssbs);
enable_feature(Feature::wfxt, wfxt);
value
}

View File

@@ -1,3 +0,0 @@
//! Run-time feature detection for Aarch64 on FreeBSD.
pub(crate) use super::super::aarch64::detect_features;

View File

@@ -1,36 +0,0 @@
//! Run-time feature detection for ARM on FreeBSD
use super::auxvec;
use crate::detect::{Feature, cache};
// Defined in machine/elf.h.
// https://github.com/freebsd/freebsd-src/blob/deb63adf945d446ed91a9d84124c71f15ae571d1/sys/arm/include/elf.h
const HWCAP_NEON: usize = 0x00001000;
const HWCAP2_AES: usize = 0x00000001;
const HWCAP2_PMULL: usize = 0x00000002;
const HWCAP2_SHA1: usize = 0x00000004;
const HWCAP2_SHA2: usize = 0x00000008;
const HWCAP2_CRC32: usize = 0x00000010;
/// Try to read the features from the auxiliary vector
pub(crate) fn detect_features() -> cache::Initializer {
let mut value = cache::Initializer::default();
let enable_feature = |value: &mut cache::Initializer, f, enable| {
if enable {
value.set(f as u32);
}
};
if let Ok(auxv) = auxvec::auxv() {
enable_feature(&mut value, Feature::neon, auxv.hwcap & HWCAP_NEON != 0);
enable_feature(&mut value, Feature::pmull, auxv.hwcap2 & HWCAP2_PMULL != 0);
enable_feature(&mut value, Feature::crc, auxv.hwcap2 & HWCAP2_CRC32 != 0);
enable_feature(&mut value, Feature::aes, auxv.hwcap2 & HWCAP2_AES != 0);
// SHA2 requires SHA1 & SHA2 features
let sha1 = auxv.hwcap2 & HWCAP2_SHA1 != 0;
let sha2 = auxv.hwcap2 & HWCAP2_SHA2 != 0;
enable_feature(&mut value, Feature::sha2, sha1 && sha2);
return value;
}
value
}

View File

@@ -1,63 +0,0 @@
//! Parses ELF auxiliary vectors.
#![cfg_attr(
any(
target_arch = "aarch64",
target_arch = "arm",
target_arch = "powerpc64",
target_arch = "riscv64"
),
allow(dead_code)
)]
/// Cache HWCAP bitfields of the ELF Auxiliary Vector.
///
/// If an entry cannot be read all the bits in the bitfield are set to zero.
/// This should be interpreted as all the features being disabled.
#[derive(Debug, Copy, Clone)]
pub(crate) struct AuxVec {
pub hwcap: usize,
pub hwcap2: usize,
}
/// ELF Auxiliary Vector
///
/// The auxiliary vector is a memory region in a running ELF program's stack
/// composed of (key: usize, value: usize) pairs.
///
/// The keys used in the aux vector are platform dependent. For FreeBSD, they are
/// defined in [sys/elf_common.h][elf_common_h]. The hardware capabilities of a given
/// CPU can be queried with the `AT_HWCAP` and `AT_HWCAP2` keys.
///
/// Note that run-time feature detection is not invoked for features that can
/// be detected at compile-time.
///
/// [elf_common.h]: https://svnweb.freebsd.org/base/release/12.0.0/sys/sys/elf_common.h?revision=341707
pub(crate) fn auxv() -> Result<AuxVec, ()> {
let hwcap = archauxv(libc::AT_HWCAP);
let hwcap2 = archauxv(libc::AT_HWCAP2);
// Zero could indicate that no features were detected, but it's also used to
// indicate an error. In particular, on many platforms AT_HWCAP2 will be
// legitimately zero, since it contains the most recent feature flags.
if hwcap != 0 || hwcap2 != 0 {
return Ok(AuxVec { hwcap, hwcap2 });
}
Err(())
}
/// Tries to read the `key` from the auxiliary vector.
fn archauxv(key: libc::c_int) -> usize {
const OUT_LEN: libc::c_int = core::mem::size_of::<libc::c_ulong>() as libc::c_int;
let mut out: libc::c_ulong = 0;
unsafe {
// elf_aux_info is available on FreeBSD 12.0+ and 11.4+:
// https://github.com/freebsd/freebsd-src/commit/0b08ae2120cdd08c20a2b806e2fcef4d0a36c470
// https://github.com/freebsd/freebsd-src/blob/release/11.4.0/sys/sys/auxv.h
// FreeBSD 11 support in std has been removed in Rust 1.75 (https://github.com/rust-lang/rust/pull/114521),
// so we can safely use this function.
let res =
libc::elf_aux_info(key, &mut out as *mut libc::c_ulong as *mut libc::c_void, OUT_LEN);
// If elf_aux_info fails, `out` will be left at zero (which is the proper default value).
debug_assert!(res == 0 || out == 0);
}
out as usize
}

View File

@@ -1,25 +0,0 @@
//! Run-time feature detection on FreeBSD
mod auxvec;
cfg_select! {
target_arch = "aarch64" => {
mod aarch64;
pub(crate) use self::aarch64::detect_features;
}
target_arch = "arm" => {
mod arm;
pub(crate) use self::arm::detect_features;
}
target_arch = "powerpc64" => {
mod powerpc;
pub(crate) use self::powerpc::detect_features;
}
_ => {
use crate::detect::cache;
/// Performs run-time feature detection.
pub(crate) fn detect_features() -> cache::Initializer {
cache::Initializer::default()
}
}
}

View File

@@ -1,21 +0,0 @@
//! Run-time feature detection for PowerPC on FreeBSD.
use super::auxvec;
use crate::detect::{Feature, cache};
pub(crate) fn detect_features() -> cache::Initializer {
let mut value = cache::Initializer::default();
let enable_feature = |value: &mut cache::Initializer, f, enable| {
if enable {
value.set(f as u32);
}
};
if let Ok(auxv) = auxvec::auxv() {
enable_feature(&mut value, Feature::altivec, auxv.hwcap & 0x10000000 != 0);
enable_feature(&mut value, Feature::vsx, auxv.hwcap & 0x00000080 != 0);
enable_feature(&mut value, Feature::power8, auxv.hwcap2 & 0x80000000 != 0);
return value;
}
value
}

View File

@@ -1,409 +0,0 @@
//! Run-time feature detection for Aarch64 on Linux.
use super::auxvec;
use crate::detect::{Feature, bit, cache};
/// Try to read the features from the auxiliary vector.
pub(crate) fn detect_features() -> cache::Initializer {
#[cfg(target_os = "android")]
let is_exynos9810 = {
// Samsung Exynos 9810 has a bug that big and little cores have different
// ISAs. And on older Android (pre-9), the kernel incorrectly reports
// that features available only on some cores are available on all cores.
// https://reviews.llvm.org/D114523
let mut arch = [0_u8; libc::PROP_VALUE_MAX as usize];
let len = unsafe {
libc::__system_property_get(c"ro.arch".as_ptr(), arch.as_mut_ptr() as *mut libc::c_char)
};
// On Exynos, ro.arch is not available on Android 12+, but it is fine
// because Android 9+ includes the fix.
len > 0 && arch.starts_with(b"exynos9810")
};
#[cfg(not(target_os = "android"))]
let is_exynos9810 = false;
if let Ok(auxv) = auxvec::auxv() {
let hwcap: AtHwcap = auxv.into();
return hwcap.cache(is_exynos9810);
}
cache::Initializer::default()
}
/// These values are part of the platform-specific [asm/hwcap.h][hwcap] .
///
/// The names match those used for cpuinfo.
///
/// [hwcap]: https://github.com/torvalds/linux/blob/master/arch/arm64/include/uapi/asm/hwcap.h
#[derive(Debug, Default, PartialEq)]
struct AtHwcap {
// AT_HWCAP
fp: bool,
asimd: bool,
// evtstrm: No LLVM support.
aes: bool,
pmull: bool,
sha1: bool,
sha2: bool,
crc32: bool,
atomics: bool,
fphp: bool,
asimdhp: bool,
// cpuid: No LLVM support.
asimdrdm: bool,
jscvt: bool,
fcma: bool,
lrcpc: bool,
dcpop: bool,
sha3: bool,
sm3: bool,
sm4: bool,
asimddp: bool,
sha512: bool,
sve: bool,
fhm: bool,
dit: bool,
uscat: bool,
ilrcpc: bool,
flagm: bool,
ssbs: bool,
sb: bool,
paca: bool,
pacg: bool,
// AT_HWCAP2
dcpodp: bool,
sve2: bool,
sveaes: bool,
svepmull: bool,
svebitperm: bool,
svesha3: bool,
svesm4: bool,
flagm2: bool,
frint: bool,
// svei8mm: See i8mm feature.
svef32mm: bool,
svef64mm: bool,
// svebf16: See bf16 feature.
i8mm: bool,
bf16: bool,
// dgh: No LLVM support.
rng: bool,
bti: bool,
mte: bool,
ecv: bool,
// afp: bool,
// rpres: bool,
// mte3: bool,
sme: bool,
smei16i64: bool,
smef64f64: bool,
// smei8i32: bool,
// smef16f32: bool,
// smeb16f32: bool,
// smef32f32: bool,
smefa64: bool,
wfxt: bool,
// ebf16: bool,
// sveebf16: bool,
cssc: bool,
// rprfm: bool,
sve2p1: bool,
sme2: bool,
sme2p1: bool,
// smei16i32: bool,
// smebi32i32: bool,
smeb16b16: bool,
smef16f16: bool,
mops: bool,
hbc: bool,
sveb16b16: bool,
lrcpc3: bool,
lse128: bool,
fpmr: bool,
lut: bool,
faminmax: bool,
f8cvt: bool,
f8fma: bool,
f8dp4: bool,
f8dp2: bool,
f8e4m3: bool,
f8e5m2: bool,
smelutv2: bool,
smef8f16: bool,
smef8f32: bool,
smesf8fma: bool,
smesf8dp4: bool,
smesf8dp2: bool,
// pauthlr: bool,
}
impl From<auxvec::AuxVec> for AtHwcap {
/// Reads AtHwcap from the auxiliary vector.
fn from(auxv: auxvec::AuxVec) -> Self {
let mut cap = AtHwcap {
fp: bit::test(auxv.hwcap, 0),
asimd: bit::test(auxv.hwcap, 1),
// evtstrm: bit::test(auxv.hwcap, 2),
aes: bit::test(auxv.hwcap, 3),
pmull: bit::test(auxv.hwcap, 4),
sha1: bit::test(auxv.hwcap, 5),
sha2: bit::test(auxv.hwcap, 6),
crc32: bit::test(auxv.hwcap, 7),
atomics: bit::test(auxv.hwcap, 8),
fphp: bit::test(auxv.hwcap, 9),
asimdhp: bit::test(auxv.hwcap, 10),
// cpuid: bit::test(auxv.hwcap, 11),
asimdrdm: bit::test(auxv.hwcap, 12),
jscvt: bit::test(auxv.hwcap, 13),
fcma: bit::test(auxv.hwcap, 14),
lrcpc: bit::test(auxv.hwcap, 15),
dcpop: bit::test(auxv.hwcap, 16),
sha3: bit::test(auxv.hwcap, 17),
sm3: bit::test(auxv.hwcap, 18),
sm4: bit::test(auxv.hwcap, 19),
asimddp: bit::test(auxv.hwcap, 20),
sha512: bit::test(auxv.hwcap, 21),
sve: bit::test(auxv.hwcap, 22),
fhm: bit::test(auxv.hwcap, 23),
dit: bit::test(auxv.hwcap, 24),
uscat: bit::test(auxv.hwcap, 25),
ilrcpc: bit::test(auxv.hwcap, 26),
flagm: bit::test(auxv.hwcap, 27),
ssbs: bit::test(auxv.hwcap, 28),
sb: bit::test(auxv.hwcap, 29),
paca: bit::test(auxv.hwcap, 30),
pacg: bit::test(auxv.hwcap, 31),
// AT_HWCAP2
dcpodp: bit::test(auxv.hwcap2, 0),
sve2: bit::test(auxv.hwcap2, 1),
sveaes: bit::test(auxv.hwcap2, 2),
svepmull: bit::test(auxv.hwcap2, 3),
svebitperm: bit::test(auxv.hwcap2, 4),
svesha3: bit::test(auxv.hwcap2, 5),
svesm4: bit::test(auxv.hwcap2, 6),
flagm2: bit::test(auxv.hwcap2, 7),
frint: bit::test(auxv.hwcap2, 8),
// svei8mm: bit::test(auxv.hwcap2, 9),
svef32mm: bit::test(auxv.hwcap2, 10),
svef64mm: bit::test(auxv.hwcap2, 11),
// svebf16: bit::test(auxv.hwcap2, 12),
i8mm: bit::test(auxv.hwcap2, 13),
bf16: bit::test(auxv.hwcap2, 14),
// dgh: bit::test(auxv.hwcap2, 15),
rng: bit::test(auxv.hwcap2, 16),
bti: bit::test(auxv.hwcap2, 17),
mte: bit::test(auxv.hwcap2, 18),
ecv: bit::test(auxv.hwcap2, 19),
// afp: bit::test(auxv.hwcap2, 20),
// rpres: bit::test(auxv.hwcap2, 21),
// mte3: bit::test(auxv.hwcap2, 22),
sme: bit::test(auxv.hwcap2, 23),
smei16i64: bit::test(auxv.hwcap2, 24),
smef64f64: bit::test(auxv.hwcap2, 25),
// smei8i32: bit::test(auxv.hwcap2, 26),
// smef16f32: bit::test(auxv.hwcap2, 27),
// smeb16f32: bit::test(auxv.hwcap2, 28),
// smef32f32: bit::test(auxv.hwcap2, 29),
smefa64: bit::test(auxv.hwcap2, 30),
wfxt: bit::test(auxv.hwcap2, 31),
..Default::default()
};
// Hardware capabilities from bits 32 to 63 should only
// be tested on LP64 targets with 64 bits `usize`.
// On ILP32 targets like `aarch64-unknown-linux-gnu_ilp32`,
// these hardware capabilities will default to `false`.
// https://github.com/rust-lang/rust/issues/146230
#[cfg(target_pointer_width = "64")]
{
// cap.ebf16: bit::test(auxv.hwcap2, 32);
// cap.sveebf16: bit::test(auxv.hwcap2, 33);
cap.cssc = bit::test(auxv.hwcap2, 34);
// cap.rprfm: bit::test(auxv.hwcap2, 35);
cap.sve2p1 = bit::test(auxv.hwcap2, 36);
cap.sme2 = bit::test(auxv.hwcap2, 37);
cap.sme2p1 = bit::test(auxv.hwcap2, 38);
// cap.smei16i32 = bit::test(auxv.hwcap2, 39);
// cap.smebi32i32 = bit::test(auxv.hwcap2, 40);
cap.smeb16b16 = bit::test(auxv.hwcap2, 41);
cap.smef16f16 = bit::test(auxv.hwcap2, 42);
cap.mops = bit::test(auxv.hwcap2, 43);
cap.hbc = bit::test(auxv.hwcap2, 44);
cap.sveb16b16 = bit::test(auxv.hwcap2, 45);
cap.lrcpc3 = bit::test(auxv.hwcap2, 46);
cap.lse128 = bit::test(auxv.hwcap2, 47);
cap.fpmr = bit::test(auxv.hwcap2, 48);
cap.lut = bit::test(auxv.hwcap2, 49);
cap.faminmax = bit::test(auxv.hwcap2, 50);
cap.f8cvt = bit::test(auxv.hwcap2, 51);
cap.f8fma = bit::test(auxv.hwcap2, 52);
cap.f8dp4 = bit::test(auxv.hwcap2, 53);
cap.f8dp2 = bit::test(auxv.hwcap2, 54);
cap.f8e4m3 = bit::test(auxv.hwcap2, 55);
cap.f8e5m2 = bit::test(auxv.hwcap2, 56);
cap.smelutv2 = bit::test(auxv.hwcap2, 57);
cap.smef8f16 = bit::test(auxv.hwcap2, 58);
cap.smef8f32 = bit::test(auxv.hwcap2, 59);
cap.smesf8fma = bit::test(auxv.hwcap2, 60);
cap.smesf8dp4 = bit::test(auxv.hwcap2, 61);
cap.smesf8dp2 = bit::test(auxv.hwcap2, 62);
// cap.pauthlr = bit::test(auxv.hwcap2, ??);
}
cap
}
}
impl AtHwcap {
/// Initializes the cache from the feature -bits.
///
/// The feature dependencies here come directly from LLVM's feature definitions:
/// https://github.com/llvm/llvm-project/blob/main/llvm/lib/Target/AArch64/AArch64.td
fn cache(self, is_exynos9810: bool) -> cache::Initializer {
let mut value = cache::Initializer::default();
{
let mut enable_feature = |f, enable| {
if enable {
value.set(f as u32);
}
};
// Samsung Exynos 9810 has a bug that big and little cores have different
// ISAs. And on older Android (pre-9), the kernel incorrectly reports
// that features available only on some cores are available on all cores.
// So, only check features that are known to be available on exynos-m3:
// $ rustc --print cfg --target aarch64-linux-android -C target-cpu=exynos-m3 | grep target_feature
// See also https://github.com/rust-lang/stdarch/pull/1378#discussion_r1103748342.
if is_exynos9810 {
enable_feature(Feature::fp, self.fp);
enable_feature(Feature::crc, self.crc32);
// ASIMD support requires float support - if half-floats are
// supported, it also requires half-float support:
let asimd = self.fp && self.asimd && (!self.fphp | self.asimdhp);
enable_feature(Feature::asimd, asimd);
// Cryptographic extensions require ASIMD
// AES also covers FEAT_PMULL
enable_feature(Feature::aes, self.aes && self.pmull && asimd);
enable_feature(Feature::sha2, self.sha1 && self.sha2 && asimd);
return value;
}
enable_feature(Feature::fp, self.fp);
// Half-float support requires float support
enable_feature(Feature::fp16, self.fp && self.fphp);
// FHM (fp16fml in LLVM) requires half float support
enable_feature(Feature::fhm, self.fphp && self.fhm);
enable_feature(Feature::pmull, self.pmull);
enable_feature(Feature::crc, self.crc32);
enable_feature(Feature::lse, self.atomics);
enable_feature(Feature::lse2, self.uscat);
enable_feature(Feature::lse128, self.lse128 && self.atomics);
enable_feature(Feature::rcpc, self.lrcpc);
// RCPC2 (rcpc-immo in LLVM) requires RCPC support
let rcpc2 = self.ilrcpc && self.lrcpc;
enable_feature(Feature::rcpc2, rcpc2);
enable_feature(Feature::rcpc3, self.lrcpc3 && rcpc2);
enable_feature(Feature::dit, self.dit);
enable_feature(Feature::flagm, self.flagm);
enable_feature(Feature::flagm2, self.flagm2);
enable_feature(Feature::ssbs, self.ssbs);
enable_feature(Feature::sb, self.sb);
enable_feature(Feature::paca, self.paca);
enable_feature(Feature::pacg, self.pacg);
// enable_feature(Feature::pauth_lr, self.pauthlr);
enable_feature(Feature::dpb, self.dcpop);
enable_feature(Feature::dpb2, self.dcpodp);
enable_feature(Feature::rand, self.rng);
enable_feature(Feature::bti, self.bti);
enable_feature(Feature::mte, self.mte);
// jsconv requires float support
enable_feature(Feature::jsconv, self.jscvt && self.fp);
enable_feature(Feature::rdm, self.asimdrdm);
enable_feature(Feature::dotprod, self.asimddp);
enable_feature(Feature::frintts, self.frint);
// FEAT_I8MM & FEAT_BF16 also include optional SVE components which linux exposes
// separately. We ignore that distinction here.
enable_feature(Feature::i8mm, self.i8mm);
enable_feature(Feature::bf16, self.bf16);
// ASIMD support requires float support - if half-floats are
// supported, it also requires half-float support:
let asimd = self.fp && self.asimd && (!self.fphp | self.asimdhp);
enable_feature(Feature::asimd, asimd);
// ASIMD extensions require ASIMD support:
enable_feature(Feature::fcma, self.fcma && asimd);
enable_feature(Feature::sve, self.sve && asimd);
// SVE extensions require SVE & ASIMD
enable_feature(Feature::f32mm, self.svef32mm && self.sve && asimd);
enable_feature(Feature::f64mm, self.svef64mm && self.sve && asimd);
// Cryptographic extensions require ASIMD
enable_feature(Feature::aes, self.aes && asimd);
enable_feature(Feature::sha2, self.sha1 && self.sha2 && asimd);
// SHA512/SHA3 require SHA1 & SHA256
enable_feature(
Feature::sha3,
self.sha512 && self.sha3 && self.sha1 && self.sha2 && asimd,
);
enable_feature(Feature::sm4, self.sm3 && self.sm4 && asimd);
// SVE2 requires SVE
let sve2 = self.sve2 && self.sve && asimd;
enable_feature(Feature::sve2, sve2);
enable_feature(Feature::sve2p1, self.sve2p1 && sve2);
// SVE2 extensions require SVE2 and crypto features
enable_feature(Feature::sve2_aes, self.sveaes && self.svepmull && sve2 && self.aes);
enable_feature(Feature::sve2_sm4, self.svesm4 && sve2 && self.sm3 && self.sm4);
enable_feature(
Feature::sve2_sha3,
self.svesha3 && sve2 && self.sha512 && self.sha3 && self.sha1 && self.sha2,
);
enable_feature(Feature::sve2_bitperm, self.svebitperm && self.sve2);
enable_feature(Feature::sve_b16b16, self.bf16 && self.sveb16b16);
enable_feature(Feature::hbc, self.hbc);
enable_feature(Feature::mops, self.mops);
enable_feature(Feature::ecv, self.ecv);
enable_feature(Feature::lut, self.lut);
enable_feature(Feature::cssc, self.cssc);
enable_feature(Feature::fpmr, self.fpmr);
enable_feature(Feature::faminmax, self.faminmax);
let fp8 = self.f8cvt && self.faminmax && self.lut && self.bf16;
enable_feature(Feature::fp8, fp8);
let fp8fma = self.f8fma && fp8;
enable_feature(Feature::fp8fma, fp8fma);
let fp8dot4 = self.f8dp4 && fp8fma;
enable_feature(Feature::fp8dot4, fp8dot4);
enable_feature(Feature::fp8dot2, self.f8dp2 && fp8dot4);
enable_feature(Feature::wfxt, self.wfxt);
let sme = self.sme && self.bf16;
enable_feature(Feature::sme, sme);
enable_feature(Feature::sme_i16i64, self.smei16i64 && sme);
enable_feature(Feature::sme_f64f64, self.smef64f64 && sme);
enable_feature(Feature::sme_fa64, self.smefa64 && sme && sve2);
let sme2 = self.sme2 && sme;
enable_feature(Feature::sme2, sme2);
enable_feature(Feature::sme2p1, self.sme2p1 && sme2);
enable_feature(
Feature::sme_b16b16,
sme2 && self.bf16 && self.sveb16b16 && self.smeb16b16,
);
enable_feature(Feature::sme_f16f16, self.smef16f16 && sme2);
enable_feature(Feature::sme_lutv2, self.smelutv2);
let sme_f8f32 = self.smef8f32 && sme2 && fp8;
enable_feature(Feature::sme_f8f32, sme_f8f32);
enable_feature(Feature::sme_f8f16, self.smef8f16 && sme_f8f32);
let ssve_fp8fma = self.smesf8fma && sme2 && fp8;
enable_feature(Feature::ssve_fp8fma, ssve_fp8fma);
let ssve_fp8dot4 = self.smesf8dp4 && ssve_fp8fma;
enable_feature(Feature::ssve_fp8dot4, ssve_fp8dot4);
enable_feature(Feature::ssve_fp8dot2, self.smesf8dp2 && ssve_fp8dot4);
}
value
}
}
#[cfg(target_endian = "little")]
#[cfg(test)]
mod tests;

View File

@@ -1,70 +0,0 @@
use super::auxvec::auxv_from_file;
use super::*;
// The baseline hwcaps used in the (artificial) auxv test files.
fn baseline_hwcaps() -> AtHwcap {
AtHwcap {
fp: true,
asimd: true,
aes: true,
pmull: true,
sha1: true,
sha2: true,
crc32: true,
atomics: true,
fphp: true,
asimdhp: true,
asimdrdm: true,
lrcpc: true,
dcpop: true,
asimddp: true,
ssbs: true,
..AtHwcap::default()
}
}
#[test]
fn linux_empty_hwcap2_aarch64() {
let file = concat!(
env!("CARGO_MANIFEST_DIR"),
"/src/detect/test_data/linux-empty-hwcap2-aarch64.auxv"
);
println!("file: {file}");
let v = auxv_from_file(file).unwrap();
println!("HWCAP : 0x{:0x}", v.hwcap);
println!("HWCAP2: 0x{:0x}", v.hwcap2);
assert_eq!(AtHwcap::from(v), baseline_hwcaps());
}
#[test]
fn linux_no_hwcap2_aarch64() {
let file =
concat!(env!("CARGO_MANIFEST_DIR"), "/src/detect/test_data/linux-no-hwcap2-aarch64.auxv");
println!("file: {file}");
let v = auxv_from_file(file).unwrap();
println!("HWCAP : 0x{:0x}", v.hwcap);
println!("HWCAP2: 0x{:0x}", v.hwcap2);
assert_eq!(AtHwcap::from(v), baseline_hwcaps());
}
#[test]
fn linux_hwcap2_aarch64() {
let file =
concat!(env!("CARGO_MANIFEST_DIR"), "/src/detect/test_data/linux-hwcap2-aarch64.auxv");
println!("file: {file}");
let v = auxv_from_file(file).unwrap();
println!("HWCAP : 0x{:0x}", v.hwcap);
println!("HWCAP2: 0x{:0x}", v.hwcap2);
assert_eq!(
AtHwcap::from(v),
AtHwcap {
// Some other HWCAP bits.
paca: true,
pacg: true,
// HWCAP2-only bits.
dcpodp: true,
frint: true,
rng: true,
bti: true,
mte: true,
..baseline_hwcaps()
}
);
}

View File

@@ -1,34 +0,0 @@
//! Run-time feature detection for ARM on Linux.
use super::auxvec;
use crate::detect::{Feature, bit, cache};
/// Try to read the features from the auxiliary vector.
pub(crate) fn detect_features() -> cache::Initializer {
let mut value = cache::Initializer::default();
let enable_feature = |value: &mut cache::Initializer, f, enable| {
if enable {
value.set(f as u32);
}
};
// The values are part of the platform-specific [asm/hwcap.h][hwcap]
//
// [hwcap]: https://github.com/torvalds/linux/blob/master/arch/arm/include/uapi/asm/hwcap.h
if let Ok(auxv) = auxvec::auxv() {
enable_feature(&mut value, Feature::i8mm, bit::test(auxv.hwcap, 27));
enable_feature(&mut value, Feature::dotprod, bit::test(auxv.hwcap, 24));
enable_feature(&mut value, Feature::neon, bit::test(auxv.hwcap, 12));
enable_feature(&mut value, Feature::pmull, bit::test(auxv.hwcap2, 1));
enable_feature(&mut value, Feature::crc, bit::test(auxv.hwcap2, 4));
enable_feature(&mut value, Feature::aes, bit::test(auxv.hwcap2, 0));
// SHA2 requires SHA1 & SHA2 features
enable_feature(
&mut value,
Feature::sha2,
bit::test(auxv.hwcap2, 2) && bit::test(auxv.hwcap2, 3),
);
return value;
}
value
}

View File

@@ -1,221 +0,0 @@
//! Parses ELF auxiliary vectors.
#![allow(dead_code)]
pub(crate) const AT_NULL: usize = 0;
/// Key to access the CPU Hardware capabilities bitfield.
pub(crate) const AT_HWCAP: usize = 16;
/// Key to access the CPU Hardware capabilities 2 bitfield.
#[cfg(any(
target_arch = "aarch64",
target_arch = "arm",
target_arch = "powerpc",
target_arch = "powerpc64",
target_arch = "s390x",
))]
pub(crate) const AT_HWCAP2: usize = 26;
/// Cache HWCAP bitfields of the ELF Auxiliary Vector.
///
/// If an entry cannot be read all the bits in the bitfield are set to zero.
/// This should be interpreted as all the features being disabled.
#[derive(Debug, Copy, Clone)]
#[cfg_attr(test, derive(PartialEq))]
pub(crate) struct AuxVec {
pub hwcap: usize,
#[cfg(any(
target_arch = "aarch64",
target_arch = "arm",
target_arch = "powerpc",
target_arch = "powerpc64",
target_arch = "s390x",
))]
pub hwcap2: usize,
}
/// ELF Auxiliary Vector
///
/// The auxiliary vector is a memory region in a running ELF program's stack
/// composed of (key: usize, value: usize) pairs.
///
/// The keys used in the aux vector are platform dependent. For Linux, they are
/// defined in [linux/auxvec.h][auxvec_h]. The hardware capabilities of a given
/// CPU can be queried with the `AT_HWCAP` and `AT_HWCAP2` keys.
///
/// There is no perfect way of reading the auxiliary vector.
///
/// - If [`getauxval`] is linked to the binary we use it, and otherwise it will
/// try to read `/proc/self/auxv`.
/// - If that fails, this function returns an error.
///
/// Note that run-time feature detection is not invoked for features that can
/// be detected at compile-time.
///
/// Note: We always directly use `getauxval` on `*-linux-{gnu,musl,ohos}*` and
/// `*-android*` targets rather than `dlsym` it because we can safely assume
/// `getauxval` is linked to the binary.
/// - `*-linux-gnu*` targets ([since Rust 1.64](https://blog.rust-lang.org/2022/08/01/Increasing-glibc-kernel-requirements.html))
/// have glibc requirements higher than [glibc 2.16 that added `getauxval`](https://sourceware.org/legacy-ml/libc-announce/2012/msg00000.html).
/// - `*-linux-musl*` targets ([at least since Rust 1.15](https://github.com/rust-lang/rust/blob/1.15.0/src/ci/docker/x86_64-musl/build-musl.sh#L15))
/// use musl newer than [musl 1.1.0 that added `getauxval`](https://git.musl-libc.org/cgit/musl/tree/WHATSNEW?h=v1.1.0#n1197)
/// - `*-linux-ohos*` targets use a [fork of musl 1.2](https://gitee.com/openharmony/docs/blob/master/en/application-dev/reference/native-lib/musl.md)
/// - `*-android*` targets ([since Rust 1.68](https://blog.rust-lang.org/2023/01/09/android-ndk-update-r25.html))
/// have the minimum supported API level higher than [Android 4.3 (API level 18) that added `getauxval`](https://github.com/aosp-mirror/platform_bionic/blob/d3ebc2f7c49a9893b114124d4a6b315f3a328764/libc/include/sys/auxv.h#L49).
///
/// For more information about when `getauxval` is available check the great
/// [`auxv` crate documentation][auxv_docs].
///
/// [auxvec_h]: https://github.com/torvalds/linux/blob/master/include/uapi/linux/auxvec.h
/// [auxv_docs]: https://docs.rs/auxv/0.3.3/auxv/
/// [`getauxval`]: https://man7.org/linux/man-pages/man3/getauxval.3.html
pub(crate) fn auxv() -> Result<AuxVec, ()> {
// Try to call a getauxval function.
if let Ok(hwcap) = getauxval(AT_HWCAP) {
// Targets with only AT_HWCAP:
#[cfg(any(
target_arch = "riscv32",
target_arch = "riscv64",
target_arch = "mips",
target_arch = "mips64",
target_arch = "loongarch32",
target_arch = "loongarch64",
))]
{
// Zero could indicate that no features were detected, but it's also used to indicate
// an error. In either case, try the fallback.
if hwcap != 0 {
return Ok(AuxVec { hwcap });
}
}
// Targets with AT_HWCAP and AT_HWCAP2:
#[cfg(any(
target_arch = "aarch64",
target_arch = "arm",
target_arch = "powerpc",
target_arch = "powerpc64",
target_arch = "s390x",
))]
{
if let Ok(hwcap2) = getauxval(AT_HWCAP2) {
// Zero could indicate that no features were detected, but it's also used to indicate
// an error. In particular, on many platforms AT_HWCAP2 will be legitimately zero,
// since it contains the most recent feature flags. Use the fallback only if no
// features were detected at all.
if hwcap != 0 || hwcap2 != 0 {
return Ok(AuxVec { hwcap, hwcap2 });
}
}
}
// Intentionnaly not used
let _ = hwcap;
}
// If calling getauxval fails, try to read the auxiliary vector from
// its file:
auxv_from_file("/proc/self/auxv").map_err(|_| ())
}
/// Tries to read the `key` from the auxiliary vector by calling the
/// `getauxval` function. If the function is not linked, this function return `Err`.
fn getauxval(key: usize) -> Result<usize, ()> {
type F = unsafe extern "C" fn(libc::c_ulong) -> libc::c_ulong;
cfg_select! {
any(
all(
target_os = "linux",
any(target_env = "gnu", target_env = "musl", target_env = "ohos"),
),
target_os = "android",
) => {
let ffi_getauxval: F = libc::getauxval;
}
_ => {
let ffi_getauxval: F = unsafe {
let ptr = libc::dlsym(libc::RTLD_DEFAULT, c"getauxval".as_ptr());
if ptr.is_null() {
return Err(());
}
core::mem::transmute(ptr)
};
}
}
Ok(unsafe { ffi_getauxval(key as libc::c_ulong) as usize })
}
/// Tries to read the auxiliary vector from the `file`. If this fails, this
/// function returns `Err`.
pub(super) fn auxv_from_file(file: &str) -> Result<AuxVec, alloc::string::String> {
let file = super::read_file(file)?;
auxv_from_file_bytes(&file)
}
/// Read auxiliary vector from a slice of bytes.
pub(super) fn auxv_from_file_bytes(bytes: &[u8]) -> Result<AuxVec, alloc::string::String> {
// See <https://github.com/torvalds/linux/blob/v5.15/include/uapi/linux/auxvec.h>.
//
// The auxiliary vector contains at most 34 (key,value) fields: from
// `AT_MINSIGSTKSZ` to `AT_NULL`, but its number may increase.
let len = bytes.len();
let mut buf = alloc::vec![0_usize; 1 + len / core::mem::size_of::<usize>()];
unsafe {
core::ptr::copy_nonoverlapping(bytes.as_ptr(), buf.as_mut_ptr() as *mut u8, len);
}
auxv_from_buf(&buf)
}
/// Tries to interpret the `buffer` as an auxiliary vector. If that fails, this
/// function returns `Err`.
fn auxv_from_buf(buf: &[usize]) -> Result<AuxVec, alloc::string::String> {
// Targets with only AT_HWCAP:
#[cfg(any(
target_arch = "riscv32",
target_arch = "riscv64",
target_arch = "mips",
target_arch = "mips64",
target_arch = "loongarch32",
target_arch = "loongarch64",
))]
{
for el in buf.chunks(2) {
match el[0] {
AT_NULL => break,
AT_HWCAP => return Ok(AuxVec { hwcap: el[1] }),
_ => (),
}
}
}
// Targets with AT_HWCAP and AT_HWCAP2:
#[cfg(any(
target_arch = "aarch64",
target_arch = "arm",
target_arch = "powerpc",
target_arch = "powerpc64",
target_arch = "s390x",
))]
{
let mut hwcap = None;
// For some platforms, AT_HWCAP2 was added recently, so let it default to zero.
let mut hwcap2 = 0;
for el in buf.chunks(2) {
match el[0] {
AT_NULL => break,
AT_HWCAP => hwcap = Some(el[1]),
AT_HWCAP2 => hwcap2 = el[1],
_ => (),
}
}
if let Some(hwcap) = hwcap {
return Ok(AuxVec { hwcap, hwcap2 });
}
}
// Suppress unused variable
let _ = buf;
Err(alloc::string::String::from("hwcap not found"))
}
#[cfg(test)]
mod tests;

View File

@@ -1,108 +0,0 @@
use super::*;
// FIXME: on mips/mips64 getauxval returns 0, and /proc/self/auxv
// does not always contain the AT_HWCAP key under qemu.
#[cfg(any(
target_arch = "arm",
target_arch = "powerpc",
target_arch = "powerpc64",
target_arch = "s390x",
))]
#[test]
fn auxv_crate() {
let v = auxv();
if let Ok(hwcap) = getauxval(AT_HWCAP) {
let rt_hwcap = v.expect("failed to find hwcap key").hwcap;
assert_eq!(rt_hwcap, hwcap);
}
// Targets with AT_HWCAP and AT_HWCAP2:
#[cfg(any(
target_arch = "aarch64",
target_arch = "arm",
target_arch = "powerpc",
target_arch = "powerpc64",
target_arch = "s390x",
))]
{
if let Ok(hwcap2) = getauxval(AT_HWCAP2) {
let rt_hwcap2 = v.expect("failed to find hwcap2 key").hwcap2;
assert_eq!(rt_hwcap2, hwcap2);
}
}
}
#[test]
fn auxv_dump() {
if let Ok(auxvec) = auxv() {
println!("{:?}", auxvec);
} else {
println!("both getauxval() and reading /proc/self/auxv failed!");
}
}
cfg_select! {
target_arch = "arm" => {
// The tests below can be executed under qemu, where we do not have access to the test
// files on disk, so we need to embed them with `include_bytes!`.
#[test]
fn linux_rpi3() {
let auxv = include_bytes!(concat!(env!("CARGO_MANIFEST_DIR"), "/src/detect/test_data/linux-rpi3.auxv"));
let v = auxv_from_file_bytes(auxv).unwrap();
assert_eq!(v.hwcap, 4174038);
assert_eq!(v.hwcap2, 16);
}
#[test]
fn linux_macos_vb() {
let auxv = include_bytes!(concat!(env!("CARGO_MANIFEST_DIR"), "/src/detect/test_data/macos-virtualbox-linux-x86-4850HQ.auxv"));
// The file contains HWCAP but not HWCAP2. In that case, we treat HWCAP2 as zero.
let v = auxv_from_file_bytes(auxv).unwrap();
assert_eq!(v.hwcap, 126614527);
assert_eq!(v.hwcap2, 0);
}
}
target_arch = "aarch64" => {
#[cfg(target_endian = "little")]
#[test]
fn linux_artificial_aarch64() {
let auxv = include_bytes!(concat!(env!("CARGO_MANIFEST_DIR"), "/src/detect/test_data/linux-artificial-aarch64.auxv"));
let v = auxv_from_file_bytes(auxv).unwrap();
assert_eq!(v.hwcap, 0x0123456789abcdef);
assert_eq!(v.hwcap2, 0x02468ace13579bdf);
}
#[cfg(target_endian = "little")]
#[test]
fn linux_no_hwcap2_aarch64() {
let auxv = include_bytes!(concat!(env!("CARGO_MANIFEST_DIR"), "/src/detect/test_data/linux-no-hwcap2-aarch64.auxv"));
let v = auxv_from_file_bytes(auxv).unwrap();
// An absent HWCAP2 is treated as zero, and does not prevent acceptance of HWCAP.
assert_ne!(v.hwcap, 0);
assert_eq!(v.hwcap2, 0);
}
}
_ => {}
}
#[test]
fn auxv_dump_procfs() {
if let Ok(auxvec) = auxv_from_file("/proc/self/auxv") {
println!("{:?}", auxvec);
} else {
println!("reading /proc/self/auxv failed!");
}
}
#[cfg(any(
target_arch = "aarch64",
target_arch = "arm",
target_arch = "powerpc",
target_arch = "powerpc64",
target_arch = "s390x",
))]
#[test]
fn auxv_crate_procfs() {
if let Ok(procfs_auxv) = auxv_from_file("/proc/self/auxv") {
assert_eq!(auxv().unwrap(), procfs_auxv);
}
}

View File

@@ -1,60 +0,0 @@
//! Run-time feature detection for LoongArch on Linux.
use core::arch::asm;
use super::auxvec;
use crate::detect::{Feature, bit, cache};
/// Try to read the features from the auxiliary vector.
pub(crate) fn detect_features() -> cache::Initializer {
let mut value = cache::Initializer::default();
let enable_feature = |value: &mut cache::Initializer, feature, enable| {
if enable {
value.set(feature as u32);
}
};
// The values are part of the platform-specific [cpucfg]
//
// [cpucfg]: LoongArch Reference Manual Volume 1: Basic Architecture v1.1
let cpucfg1: usize;
let cpucfg2: usize;
let cpucfg3: usize;
unsafe {
asm!(
"cpucfg {}, {}",
"cpucfg {}, {}",
"cpucfg {}, {}",
out(reg) cpucfg1, in(reg) 1,
out(reg) cpucfg2, in(reg) 2,
out(reg) cpucfg3, in(reg) 3,
options(pure, nomem, preserves_flags, nostack)
);
}
enable_feature(&mut value, Feature::_32s, bit::test(cpucfg1, 0) || bit::test(cpucfg1, 1));
enable_feature(&mut value, Feature::frecipe, bit::test(cpucfg2, 25));
enable_feature(&mut value, Feature::div32, bit::test(cpucfg2, 26));
enable_feature(&mut value, Feature::lam_bh, bit::test(cpucfg2, 27));
enable_feature(&mut value, Feature::lamcas, bit::test(cpucfg2, 28));
enable_feature(&mut value, Feature::scq, bit::test(cpucfg2, 30));
enable_feature(&mut value, Feature::ld_seq_sa, bit::test(cpucfg3, 23));
// The values are part of the platform-specific [asm/hwcap.h][hwcap]
//
// [hwcap]: https://github.com/torvalds/linux/blob/master/arch/loongarch/include/uapi/asm/hwcap.h
if let Ok(auxv) = auxvec::auxv() {
enable_feature(&mut value, Feature::f, bit::test(cpucfg2, 1) && bit::test(auxv.hwcap, 3));
enable_feature(&mut value, Feature::d, bit::test(cpucfg2, 2) && bit::test(auxv.hwcap, 3));
enable_feature(&mut value, Feature::lsx, bit::test(auxv.hwcap, 4));
enable_feature(&mut value, Feature::lasx, bit::test(auxv.hwcap, 5));
enable_feature(
&mut value,
Feature::lbt,
bit::test(auxv.hwcap, 10) && bit::test(auxv.hwcap, 11) && bit::test(auxv.hwcap, 12),
);
enable_feature(&mut value, Feature::lvz, bit::test(auxv.hwcap, 9));
enable_feature(&mut value, Feature::ual, bit::test(auxv.hwcap, 2));
return value;
}
value
}

View File

@@ -1,23 +0,0 @@
//! Run-time feature detection for MIPS on Linux.
use super::auxvec;
use crate::detect::{Feature, bit, cache};
/// Try to read the features from the auxiliary vector.
pub(crate) fn detect_features() -> cache::Initializer {
let mut value = cache::Initializer::default();
let enable_feature = |value: &mut cache::Initializer, f, enable| {
if enable {
value.set(f as u32);
}
};
// The values are part of the platform-specific [asm/hwcap.h][hwcap]
//
// [hwcap]: https://github.com/torvalds/linux/blob/master/arch/mips/include/uapi/asm/hwcap.h
if let Ok(auxv) = auxvec::auxv() {
enable_feature(&mut value, Feature::msa, bit::test(auxv.hwcap, 1));
return value;
}
value
}

View File

@@ -1,74 +0,0 @@
//! Run-time feature detection on Linux
use alloc::vec::Vec;
mod auxvec;
fn read_file(orig_path: &str) -> Result<Vec<u8>, alloc::string::String> {
use alloc::format;
let mut path = Vec::from(orig_path.as_bytes());
path.push(0);
unsafe {
let file = libc::open(path.as_ptr() as *const libc::c_char, libc::O_RDONLY);
if file == -1 {
return Err(format!("Cannot open file at {orig_path}"));
}
let mut data = Vec::new();
loop {
data.reserve(4096);
let spare = data.spare_capacity_mut();
match libc::read(file, spare.as_mut_ptr() as *mut _, spare.len()) {
-1 => {
libc::close(file);
return Err(format!("Error while reading from file at {orig_path}"));
}
0 => break,
n => data.set_len(data.len() + n as usize),
}
}
libc::close(file);
Ok(data)
}
}
cfg_select! {
target_arch = "aarch64" => {
mod aarch64;
pub(crate) use self::aarch64::detect_features;
}
target_arch = "arm" => {
mod arm;
pub(crate) use self::arm::detect_features;
}
any(target_arch = "riscv32", target_arch = "riscv64") => {
mod riscv;
pub(crate) use self::riscv::detect_features;
}
any(target_arch = "mips", target_arch = "mips64") => {
mod mips;
pub(crate) use self::mips::detect_features;
}
any(target_arch = "powerpc", target_arch = "powerpc64") => {
mod powerpc;
pub(crate) use self::powerpc::detect_features;
}
any(target_arch = "loongarch32", target_arch = "loongarch64") => {
mod loongarch;
pub(crate) use self::loongarch::detect_features;
}
target_arch = "s390x" => {
mod s390x;
pub(crate) use self::s390x::detect_features;
}
_ => {
use crate::detect::cache;
/// Performs run-time feature detection.
pub(crate) fn detect_features() -> cache::Initializer {
cache::Initializer::default()
}
}
}

View File

@@ -1,35 +0,0 @@
//! Run-time feature detection for PowerPC on Linux.
use super::auxvec;
use crate::detect::{Feature, cache};
/// Try to read the features from the auxiliary vector.
pub(crate) fn detect_features() -> cache::Initializer {
let mut value = cache::Initializer::default();
let enable_feature = |value: &mut cache::Initializer, f, enable| {
if enable {
value.set(f as u32);
}
};
// The values are part of the platform-specific [asm/cputable.h][cputable]
//
// [cputable]: https://github.com/torvalds/linux/blob/master/arch/powerpc/include/uapi/asm/cputable.h
if let Ok(auxv) = auxvec::auxv() {
// note: the PowerPC values are the mask to do the test (instead of the
// index of the bit to test like in ARM and Aarch64)
enable_feature(&mut value, Feature::altivec, auxv.hwcap & 0x10000000 != 0);
enable_feature(&mut value, Feature::vsx, auxv.hwcap & 0x00000080 != 0);
let power8_features = auxv.hwcap2 & 0x80000000 != 0;
enable_feature(&mut value, Feature::power8, power8_features);
enable_feature(&mut value, Feature::power8_altivec, power8_features);
enable_feature(&mut value, Feature::power8_crypto, power8_features);
enable_feature(&mut value, Feature::power8_vector, power8_features);
let power9_features = auxv.hwcap2 & 0x00800000 != 0;
enable_feature(&mut value, Feature::power9, power9_features);
enable_feature(&mut value, Feature::power9_altivec, power9_features);
enable_feature(&mut value, Feature::power9_vector, power9_features);
return value;
}
value
}

View File

@@ -1,323 +0,0 @@
//! Run-time feature detection for RISC-V on Linux.
//!
//! On RISC-V, detection using auxv only supports single-letter extensions.
//! So, we use riscv_hwprobe that supports multi-letter extensions if available.
//! <https://www.kernel.org/doc/html/latest/arch/riscv/hwprobe.html>
use core::ptr;
use super::super::riscv::imply_features;
use super::auxvec;
use crate::detect::{Feature, bit, cache};
// See <https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/include/uapi/linux/prctl.h?h=v6.16>
// for runtime status query constants.
const PR_RISCV_V_GET_CONTROL: libc::c_int = 70;
const PR_RISCV_V_VSTATE_CTRL_ON: libc::c_int = 2;
const PR_RISCV_V_VSTATE_CTRL_CUR_MASK: libc::c_int = 3;
// See <https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/arch/riscv/include/uapi/asm/hwprobe.h?h=v6.16>
// for riscv_hwprobe struct and hardware probing constants.
#[repr(C)]
struct riscv_hwprobe {
key: i64,
value: u64,
}
impl riscv_hwprobe {
// key is overwritten to -1 if not supported by riscv_hwprobe syscall.
pub fn get(&self) -> Option<u64> {
(self.key != -1).then_some(self.value)
}
}
#[allow(non_upper_case_globals)]
const __NR_riscv_hwprobe: libc::c_long = 258;
const RISCV_HWPROBE_KEY_BASE_BEHAVIOR: i64 = 3;
const RISCV_HWPROBE_BASE_BEHAVIOR_IMA: u64 = 1 << 0;
const RISCV_HWPROBE_KEY_IMA_EXT_0: i64 = 4;
const RISCV_HWPROBE_IMA_FD: u64 = 1 << 0;
const RISCV_HWPROBE_IMA_C: u64 = 1 << 1;
const RISCV_HWPROBE_IMA_V: u64 = 1 << 2;
const RISCV_HWPROBE_EXT_ZBA: u64 = 1 << 3;
const RISCV_HWPROBE_EXT_ZBB: u64 = 1 << 4;
const RISCV_HWPROBE_EXT_ZBS: u64 = 1 << 5;
const RISCV_HWPROBE_EXT_ZICBOZ: u64 = 1 << 6;
const RISCV_HWPROBE_EXT_ZBC: u64 = 1 << 7;
const RISCV_HWPROBE_EXT_ZBKB: u64 = 1 << 8;
const RISCV_HWPROBE_EXT_ZBKC: u64 = 1 << 9;
const RISCV_HWPROBE_EXT_ZBKX: u64 = 1 << 10;
const RISCV_HWPROBE_EXT_ZKND: u64 = 1 << 11;
const RISCV_HWPROBE_EXT_ZKNE: u64 = 1 << 12;
const RISCV_HWPROBE_EXT_ZKNH: u64 = 1 << 13;
const RISCV_HWPROBE_EXT_ZKSED: u64 = 1 << 14;
const RISCV_HWPROBE_EXT_ZKSH: u64 = 1 << 15;
const RISCV_HWPROBE_EXT_ZKT: u64 = 1 << 16;
const RISCV_HWPROBE_EXT_ZVBB: u64 = 1 << 17;
const RISCV_HWPROBE_EXT_ZVBC: u64 = 1 << 18;
const RISCV_HWPROBE_EXT_ZVKB: u64 = 1 << 19;
const RISCV_HWPROBE_EXT_ZVKG: u64 = 1 << 20;
const RISCV_HWPROBE_EXT_ZVKNED: u64 = 1 << 21;
const RISCV_HWPROBE_EXT_ZVKNHA: u64 = 1 << 22;
const RISCV_HWPROBE_EXT_ZVKNHB: u64 = 1 << 23;
const RISCV_HWPROBE_EXT_ZVKSED: u64 = 1 << 24;
const RISCV_HWPROBE_EXT_ZVKSH: u64 = 1 << 25;
const RISCV_HWPROBE_EXT_ZVKT: u64 = 1 << 26;
const RISCV_HWPROBE_EXT_ZFH: u64 = 1 << 27;
const RISCV_HWPROBE_EXT_ZFHMIN: u64 = 1 << 28;
const RISCV_HWPROBE_EXT_ZIHINTNTL: u64 = 1 << 29;
const RISCV_HWPROBE_EXT_ZVFH: u64 = 1 << 30;
const RISCV_HWPROBE_EXT_ZVFHMIN: u64 = 1 << 31;
const RISCV_HWPROBE_EXT_ZFA: u64 = 1 << 32;
const RISCV_HWPROBE_EXT_ZTSO: u64 = 1 << 33;
const RISCV_HWPROBE_EXT_ZACAS: u64 = 1 << 34;
const RISCV_HWPROBE_EXT_ZICOND: u64 = 1 << 35;
const RISCV_HWPROBE_EXT_ZIHINTPAUSE: u64 = 1 << 36;
const RISCV_HWPROBE_EXT_ZVE32X: u64 = 1 << 37;
const RISCV_HWPROBE_EXT_ZVE32F: u64 = 1 << 38;
const RISCV_HWPROBE_EXT_ZVE64X: u64 = 1 << 39;
const RISCV_HWPROBE_EXT_ZVE64F: u64 = 1 << 40;
const RISCV_HWPROBE_EXT_ZVE64D: u64 = 1 << 41;
const RISCV_HWPROBE_EXT_ZIMOP: u64 = 1 << 42;
const RISCV_HWPROBE_EXT_ZCA: u64 = 1 << 43;
const RISCV_HWPROBE_EXT_ZCB: u64 = 1 << 44;
const RISCV_HWPROBE_EXT_ZCD: u64 = 1 << 45;
const RISCV_HWPROBE_EXT_ZCF: u64 = 1 << 46;
const RISCV_HWPROBE_EXT_ZCMOP: u64 = 1 << 47;
const RISCV_HWPROBE_EXT_ZAWRS: u64 = 1 << 48;
// Excluded because it only reports the existence of `prctl`-based pointer masking control.
// const RISCV_HWPROBE_EXT_SUPM: u64 = 1 << 49;
const RISCV_HWPROBE_EXT_ZICNTR: u64 = 1 << 50;
const RISCV_HWPROBE_EXT_ZIHPM: u64 = 1 << 51;
const RISCV_HWPROBE_EXT_ZFBFMIN: u64 = 1 << 52;
const RISCV_HWPROBE_EXT_ZVFBFMIN: u64 = 1 << 53;
const RISCV_HWPROBE_EXT_ZVFBFWMA: u64 = 1 << 54;
const RISCV_HWPROBE_EXT_ZICBOM: u64 = 1 << 55;
const RISCV_HWPROBE_EXT_ZAAMO: u64 = 1 << 56;
const RISCV_HWPROBE_EXT_ZALRSC: u64 = 1 << 57;
const RISCV_HWPROBE_EXT_ZABHA: u64 = 1 << 58;
const RISCV_HWPROBE_KEY_CPUPERF_0: i64 = 5;
const RISCV_HWPROBE_MISALIGNED_FAST: u64 = 3;
const RISCV_HWPROBE_MISALIGNED_MASK: u64 = 7;
const RISCV_HWPROBE_KEY_MISALIGNED_SCALAR_PERF: i64 = 9;
const RISCV_HWPROBE_MISALIGNED_SCALAR_FAST: u64 = 3;
const RISCV_HWPROBE_KEY_MISALIGNED_VECTOR_PERF: i64 = 10;
const RISCV_HWPROBE_MISALIGNED_VECTOR_FAST: u64 = 3;
// syscall returns an unsupported error if riscv_hwprobe is not supported,
// so we can safely use this function on older versions of Linux.
fn _riscv_hwprobe(out: &mut [riscv_hwprobe]) -> bool {
unsafe fn __riscv_hwprobe(
pairs: *mut riscv_hwprobe,
pair_count: libc::size_t,
cpu_set_size: libc::size_t,
cpus: *mut libc::c_ulong,
flags: libc::c_uint,
) -> libc::c_long {
unsafe { libc::syscall(__NR_riscv_hwprobe, pairs, pair_count, cpu_set_size, cpus, flags) }
}
unsafe { __riscv_hwprobe(out.as_mut_ptr(), out.len(), 0, ptr::null_mut(), 0) == 0 }
}
/// Read list of supported features from (1) the auxiliary vector
/// and (2) the results of `riscv_hwprobe` and `prctl` system calls.
pub(crate) fn detect_features() -> cache::Initializer {
let mut value = cache::Initializer::default();
let mut enable_feature = |feature, enable| {
if enable {
value.set(feature as u32);
}
};
// Use auxiliary vector to enable single-letter ISA extensions.
// The values are part of the platform-specific [asm/hwcap.h][hwcap]
//
// [hwcap]: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/arch/riscv/include/uapi/asm/hwcap.h?h=v6.16
let auxv = auxvec::auxv().expect("read auxvec"); // should not fail on RISC-V platform
let mut has_i = bit::test(auxv.hwcap, (b'i' - b'a').into());
#[allow(clippy::eq_op)]
enable_feature(Feature::a, bit::test(auxv.hwcap, (b'a' - b'a').into()));
enable_feature(Feature::c, bit::test(auxv.hwcap, (b'c' - b'a').into()));
enable_feature(Feature::d, bit::test(auxv.hwcap, (b'd' - b'a').into()));
enable_feature(Feature::f, bit::test(auxv.hwcap, (b'f' - b'a').into()));
enable_feature(Feature::m, bit::test(auxv.hwcap, (b'm' - b'a').into()));
let has_v = bit::test(auxv.hwcap, (b'v' - b'a').into());
let mut is_v_set = false;
// Use riscv_hwprobe syscall to query more extensions and
// performance-related capabilities.
'hwprobe: {
macro_rules! init {
{ $($name: ident : $key: expr),* $(,)? } => {
#[repr(usize)]
enum Indices { $($name),* }
let mut t = [$(riscv_hwprobe { key: $key, value: 0 }),*];
macro_rules! data_mut { () => { &mut t } }
macro_rules! query { [$idx: ident] => { t[Indices::$idx as usize].get() } }
}
}
init! {
BaseBehavior: RISCV_HWPROBE_KEY_BASE_BEHAVIOR,
Extensions: RISCV_HWPROBE_KEY_IMA_EXT_0,
MisalignedScalarPerf: RISCV_HWPROBE_KEY_MISALIGNED_SCALAR_PERF,
MisalignedVectorPerf: RISCV_HWPROBE_KEY_MISALIGNED_VECTOR_PERF,
MisalignedScalarPerfFallback: RISCV_HWPROBE_KEY_CPUPERF_0,
};
if !_riscv_hwprobe(data_mut!()) {
break 'hwprobe;
}
// Query scalar misaligned behavior.
if let Some(value) = query![MisalignedScalarPerf] {
enable_feature(
Feature::unaligned_scalar_mem,
value == RISCV_HWPROBE_MISALIGNED_SCALAR_FAST,
);
} else if let Some(value) = query![MisalignedScalarPerfFallback] {
// Deprecated method for fallback
enable_feature(
Feature::unaligned_scalar_mem,
value & RISCV_HWPROBE_MISALIGNED_MASK == RISCV_HWPROBE_MISALIGNED_FAST,
);
}
// Query vector misaligned behavior.
if let Some(value) = query![MisalignedVectorPerf] {
enable_feature(
Feature::unaligned_vector_mem,
value == RISCV_HWPROBE_MISALIGNED_VECTOR_FAST,
);
}
// Query whether "I" base and extensions "M" and "A" (as in the ISA
// manual version 2.2) are enabled. "I" base at that time corresponds
// to "I", "Zicsr", "Zicntr" and "Zifencei" (as in the ISA manual version
// 20240411).
// This is a current requirement of
// `RISCV_HWPROBE_KEY_IMA_EXT_0`-based tests.
if query![BaseBehavior].is_none_or(|value| value & RISCV_HWPROBE_BASE_BEHAVIOR_IMA == 0) {
break 'hwprobe;
}
has_i = true;
enable_feature(Feature::zicsr, true);
enable_feature(Feature::zicntr, true);
enable_feature(Feature::zifencei, true);
enable_feature(Feature::m, true);
enable_feature(Feature::a, true);
// Enable features based on `RISCV_HWPROBE_KEY_IMA_EXT_0`.
let Some(ima_ext_0) = query![Extensions] else {
break 'hwprobe;
};
let test = |mask| (ima_ext_0 & mask) != 0;
enable_feature(Feature::d, test(RISCV_HWPROBE_IMA_FD)); // F is implied.
enable_feature(Feature::c, test(RISCV_HWPROBE_IMA_C));
enable_feature(Feature::zicntr, test(RISCV_HWPROBE_EXT_ZICNTR));
enable_feature(Feature::zihpm, test(RISCV_HWPROBE_EXT_ZIHPM));
enable_feature(Feature::zihintntl, test(RISCV_HWPROBE_EXT_ZIHINTNTL));
enable_feature(Feature::zihintpause, test(RISCV_HWPROBE_EXT_ZIHINTPAUSE));
enable_feature(Feature::zimop, test(RISCV_HWPROBE_EXT_ZIMOP));
enable_feature(Feature::zicbom, test(RISCV_HWPROBE_EXT_ZICBOM));
enable_feature(Feature::zicboz, test(RISCV_HWPROBE_EXT_ZICBOZ));
enable_feature(Feature::zicond, test(RISCV_HWPROBE_EXT_ZICOND));
enable_feature(Feature::zalrsc, test(RISCV_HWPROBE_EXT_ZALRSC));
enable_feature(Feature::zaamo, test(RISCV_HWPROBE_EXT_ZAAMO));
enable_feature(Feature::zawrs, test(RISCV_HWPROBE_EXT_ZAWRS));
enable_feature(Feature::zabha, test(RISCV_HWPROBE_EXT_ZABHA));
enable_feature(Feature::zacas, test(RISCV_HWPROBE_EXT_ZACAS));
enable_feature(Feature::ztso, test(RISCV_HWPROBE_EXT_ZTSO));
enable_feature(Feature::zba, test(RISCV_HWPROBE_EXT_ZBA));
enable_feature(Feature::zbb, test(RISCV_HWPROBE_EXT_ZBB));
enable_feature(Feature::zbs, test(RISCV_HWPROBE_EXT_ZBS));
enable_feature(Feature::zbc, test(RISCV_HWPROBE_EXT_ZBC));
enable_feature(Feature::zbkb, test(RISCV_HWPROBE_EXT_ZBKB));
enable_feature(Feature::zbkc, test(RISCV_HWPROBE_EXT_ZBKC));
enable_feature(Feature::zbkx, test(RISCV_HWPROBE_EXT_ZBKX));
enable_feature(Feature::zknd, test(RISCV_HWPROBE_EXT_ZKND));
enable_feature(Feature::zkne, test(RISCV_HWPROBE_EXT_ZKNE));
enable_feature(Feature::zknh, test(RISCV_HWPROBE_EXT_ZKNH));
enable_feature(Feature::zksed, test(RISCV_HWPROBE_EXT_ZKSED));
enable_feature(Feature::zksh, test(RISCV_HWPROBE_EXT_ZKSH));
enable_feature(Feature::zkt, test(RISCV_HWPROBE_EXT_ZKT));
enable_feature(Feature::zcmop, test(RISCV_HWPROBE_EXT_ZCMOP));
enable_feature(Feature::zca, test(RISCV_HWPROBE_EXT_ZCA));
enable_feature(Feature::zcf, test(RISCV_HWPROBE_EXT_ZCF));
enable_feature(Feature::zcd, test(RISCV_HWPROBE_EXT_ZCD));
enable_feature(Feature::zcb, test(RISCV_HWPROBE_EXT_ZCB));
enable_feature(Feature::zfh, test(RISCV_HWPROBE_EXT_ZFH));
enable_feature(Feature::zfhmin, test(RISCV_HWPROBE_EXT_ZFHMIN));
enable_feature(Feature::zfa, test(RISCV_HWPROBE_EXT_ZFA));
enable_feature(Feature::zfbfmin, test(RISCV_HWPROBE_EXT_ZFBFMIN));
// Use prctl (if any) to determine whether the vector extension
// is enabled on the current thread (assuming the entire process
// share the same status). If prctl fails (e.g. QEMU userland emulator
// as of version 9.2.3), use auxiliary vector to retrieve the default
// vector status on the process startup.
let has_vectors = {
let v_status = unsafe { libc::prctl(PR_RISCV_V_GET_CONTROL) };
if v_status >= 0 {
(v_status & PR_RISCV_V_VSTATE_CTRL_CUR_MASK) == PR_RISCV_V_VSTATE_CTRL_ON
} else {
has_v
}
};
if has_vectors {
enable_feature(Feature::v, test(RISCV_HWPROBE_IMA_V));
enable_feature(Feature::zve32x, test(RISCV_HWPROBE_EXT_ZVE32X));
enable_feature(Feature::zve32f, test(RISCV_HWPROBE_EXT_ZVE32F));
enable_feature(Feature::zve64x, test(RISCV_HWPROBE_EXT_ZVE64X));
enable_feature(Feature::zve64f, test(RISCV_HWPROBE_EXT_ZVE64F));
enable_feature(Feature::zve64d, test(RISCV_HWPROBE_EXT_ZVE64D));
enable_feature(Feature::zvbb, test(RISCV_HWPROBE_EXT_ZVBB));
enable_feature(Feature::zvbc, test(RISCV_HWPROBE_EXT_ZVBC));
enable_feature(Feature::zvkb, test(RISCV_HWPROBE_EXT_ZVKB));
enable_feature(Feature::zvkg, test(RISCV_HWPROBE_EXT_ZVKG));
enable_feature(Feature::zvkned, test(RISCV_HWPROBE_EXT_ZVKNED));
enable_feature(Feature::zvknha, test(RISCV_HWPROBE_EXT_ZVKNHA));
enable_feature(Feature::zvknhb, test(RISCV_HWPROBE_EXT_ZVKNHB));
enable_feature(Feature::zvksed, test(RISCV_HWPROBE_EXT_ZVKSED));
enable_feature(Feature::zvksh, test(RISCV_HWPROBE_EXT_ZVKSH));
enable_feature(Feature::zvkt, test(RISCV_HWPROBE_EXT_ZVKT));
enable_feature(Feature::zvfh, test(RISCV_HWPROBE_EXT_ZVFH));
enable_feature(Feature::zvfhmin, test(RISCV_HWPROBE_EXT_ZVFHMIN));
enable_feature(Feature::zvfbfmin, test(RISCV_HWPROBE_EXT_ZVFBFMIN));
enable_feature(Feature::zvfbfwma, test(RISCV_HWPROBE_EXT_ZVFBFWMA));
}
is_v_set = true;
};
// Set V purely depending on the auxiliary vector
// only if no fine-grained vector extension detection is available.
if !is_v_set {
enable_feature(Feature::v, has_v);
}
// Handle base ISA.
// If future RV128I is supported, implement with `enable_feature` here.
// Note that we should use `target_arch` instead of `target_pointer_width`
// to avoid misdetection caused by experimental ABIs such as RV64ILP32.
#[cfg(target_arch = "riscv64")]
enable_feature(Feature::rv64i, has_i);
#[cfg(target_arch = "riscv32")]
enable_feature(Feature::rv32i, has_i);
imply_features(value)
}

View File

@@ -1,152 +0,0 @@
//! Run-time feature detection for s390x on Linux.
use super::auxvec;
use crate::detect::{Feature, bit, cache};
/// Try to read the features from the auxiliary vector
pub(crate) fn detect_features() -> cache::Initializer {
let opt_hwcap: Option<AtHwcap> = auxvec::auxv().ok().map(Into::into);
let facilities = ExtendedFacilityList::new();
cache(opt_hwcap, facilities)
}
#[derive(Debug, Default, PartialEq)]
struct AtHwcap {
esan3: bool,
zarch: bool,
stfle: bool,
msa: bool,
ldisp: bool,
eimm: bool,
dfp: bool,
hpage: bool,
etf3eh: bool,
high_gprs: bool,
te: bool,
vxrs: bool,
vxrs_bcd: bool,
vxrs_ext: bool,
gs: bool,
vxrs_ext2: bool,
vxrs_pde: bool,
sort: bool,
dflt: bool,
vxrs_pde2: bool,
nnpa: bool,
pci_mio: bool,
sie: bool,
}
impl From<auxvec::AuxVec> for AtHwcap {
/// Reads AtHwcap from the auxiliary vector.
fn from(auxv: auxvec::AuxVec) -> Self {
AtHwcap {
esan3: bit::test(auxv.hwcap, 0),
zarch: bit::test(auxv.hwcap, 1),
stfle: bit::test(auxv.hwcap, 2),
msa: bit::test(auxv.hwcap, 3),
ldisp: bit::test(auxv.hwcap, 4),
eimm: bit::test(auxv.hwcap, 5),
dfp: bit::test(auxv.hwcap, 6),
hpage: bit::test(auxv.hwcap, 7),
etf3eh: bit::test(auxv.hwcap, 8),
high_gprs: bit::test(auxv.hwcap, 9),
te: bit::test(auxv.hwcap, 10),
vxrs: bit::test(auxv.hwcap, 11),
vxrs_bcd: bit::test(auxv.hwcap, 12),
vxrs_ext: bit::test(auxv.hwcap, 13),
gs: bit::test(auxv.hwcap, 14),
vxrs_ext2: bit::test(auxv.hwcap, 15),
vxrs_pde: bit::test(auxv.hwcap, 16),
sort: bit::test(auxv.hwcap, 17),
dflt: bit::test(auxv.hwcap, 18),
vxrs_pde2: bit::test(auxv.hwcap, 19),
nnpa: bit::test(auxv.hwcap, 20),
pci_mio: bit::test(auxv.hwcap, 21),
sie: bit::test(auxv.hwcap, 22),
}
}
}
struct ExtendedFacilityList([u64; 4]);
impl ExtendedFacilityList {
fn new() -> Self {
let mut result: [u64; 4] = [0; 4];
// SAFETY: rust/llvm only support s390x version with the `stfle` instruction.
unsafe {
core::arch::asm!(
// equivalently ".insn s, 0xb2b00000, 0({1})",
"stfle 0({})",
in(reg_addr) result.as_mut_ptr() ,
inout("r0") result.len() as u64 - 1 => _,
options(nostack)
);
}
Self(result)
}
const fn get_bit(&self, n: usize) -> bool {
// NOTE: bits are numbered from the left.
self.0[n / 64] & (1 << (63 - (n % 64))) != 0
}
}
/// Initializes the cache from the feature bits.
///
/// These values are part of the platform-specific [asm/elf.h][kernel], and are a selection of the
/// fields found in the [Facility Indications].
///
/// [Facility Indications]: https://www.ibm.com/support/pages/sites/default/files/2021-05/SA22-7871-10.pdf#page=63
/// [kernel]: https://github.com/torvalds/linux/blob/b62cef9a5c673f1b8083159f5dc03c1c5daced2f/arch/s390/include/asm/elf.h#L129
fn cache(hwcap: Option<AtHwcap>, facilities: ExtendedFacilityList) -> cache::Initializer {
let mut value = cache::Initializer::default();
{
let mut enable_if_set = |bit_index, f| {
if facilities.get_bit(bit_index) {
value.set(f as u32);
}
};
// We use HWCAP for `vector` because it requires both hardware and kernel support.
if let Some(AtHwcap { vxrs: true, .. }) = hwcap {
// vector and related
enable_if_set(129, Feature::vector);
enable_if_set(135, Feature::vector_enhancements_1);
enable_if_set(148, Feature::vector_enhancements_2);
enable_if_set(198, Feature::vector_enhancements_3);
enable_if_set(134, Feature::vector_packed_decimal);
enable_if_set(152, Feature::vector_packed_decimal_enhancement);
enable_if_set(192, Feature::vector_packed_decimal_enhancement_2);
enable_if_set(199, Feature::vector_packed_decimal_enhancement_3);
enable_if_set(165, Feature::nnp_assist);
}
// others
enable_if_set(76, Feature::message_security_assist_extension3);
enable_if_set(77, Feature::message_security_assist_extension4);
enable_if_set(57, Feature::message_security_assist_extension5);
enable_if_set(146, Feature::message_security_assist_extension8);
enable_if_set(155, Feature::message_security_assist_extension9);
enable_if_set(86, Feature::message_security_assist_extension12);
enable_if_set(58, Feature::miscellaneous_extensions_2);
enable_if_set(61, Feature::miscellaneous_extensions_3);
enable_if_set(84, Feature::miscellaneous_extensions_4);
enable_if_set(45, Feature::high_word);
enable_if_set(73, Feature::transactional_execution);
enable_if_set(133, Feature::guarded_storage);
enable_if_set(150, Feature::enhanced_sort);
enable_if_set(151, Feature::deflate_conversion);
enable_if_set(201, Feature::concurrent_functions);
}
value
}

View File

@@ -1,57 +0,0 @@
//! Run-time feature detection for Aarch64 on OpenBSD.
//!
//! OpenBSD doesn't trap the mrs instruction, but exposes the system registers through sysctl.
//! https://github.com/openbsd/src/commit/d335af936b9d7dd9cf655cae1ce19560c45de6c8
//! https://github.com/golang/go/commit/cd54ef1f61945459486e9eea2f016d99ef1da925
use core::mem::MaybeUninit;
use core::ptr;
use crate::detect::cache;
// Defined in machine/cpu.h.
// https://github.com/openbsd/src/blob/72ccc03bd11da614f31f7ff76e3f6fce99bc1c79/sys/arch/arm64/include/cpu.h#L25-L40
const CPU_ID_AA64ISAR0: libc::c_int = 2;
const CPU_ID_AA64ISAR1: libc::c_int = 3;
const CPU_ID_AA64MMFR2: libc::c_int = 7;
const CPU_ID_AA64PFR0: libc::c_int = 8;
/// Try to read the features from the system registers.
pub(crate) fn detect_features() -> cache::Initializer {
// ID_AA64ISAR0_EL1 and ID_AA64ISAR1_EL1 are supported on OpenBSD 7.1+.
// https://github.com/openbsd/src/commit/d335af936b9d7dd9cf655cae1ce19560c45de6c8
// Others are supported on OpenBSD 7.3+.
// https://github.com/openbsd/src/commit/c7654cd65262d532212f65123ee3905ba200365c
// sysctl returns an unsupported error if operation is not supported,
// so we can safely use this function on older versions of OpenBSD.
let aa64isar0 = sysctl64(&[libc::CTL_MACHDEP, CPU_ID_AA64ISAR0]).unwrap_or(0);
let aa64isar1 = sysctl64(&[libc::CTL_MACHDEP, CPU_ID_AA64ISAR1]).unwrap_or(0);
let aa64mmfr2 = sysctl64(&[libc::CTL_MACHDEP, CPU_ID_AA64MMFR2]).unwrap_or(0);
// Do not use unwrap_or(0) because in fp and asimd fields, 0 indicates that
// the feature is available.
let aa64pfr0 = sysctl64(&[libc::CTL_MACHDEP, CPU_ID_AA64PFR0]);
crate::detect::aarch64::parse_system_registers(aa64isar0, aa64isar1, aa64mmfr2, aa64pfr0)
}
#[inline]
fn sysctl64(mib: &[libc::c_int]) -> Option<u64> {
const OUT_LEN: libc::size_t = core::mem::size_of::<u64>();
let mut out = MaybeUninit::<u64>::uninit();
let mut out_len = OUT_LEN;
let res = unsafe {
libc::sysctl(
mib.as_ptr(),
mib.len() as libc::c_uint,
out.as_mut_ptr() as *mut libc::c_void,
&mut out_len,
ptr::null_mut(),
0,
)
};
if res == -1 || out_len != OUT_LEN {
return None;
}
// SAFETY: we've checked that sysctl was successful and `out` was filled.
Some(unsafe { out.assume_init() })
}

View File

@@ -1,54 +0,0 @@
//! Parses ELF auxiliary vectors.
#![cfg_attr(
any(target_arch = "aarch64", target_arch = "powerpc64", target_arch = "riscv64"),
allow(dead_code)
)]
/// Cache HWCAP bitfields of the ELF Auxiliary Vector.
///
/// If an entry cannot be read all the bits in the bitfield are set to zero.
/// This should be interpreted as all the features being disabled.
#[derive(Debug, Copy, Clone)]
pub(crate) struct AuxVec {
pub hwcap: usize,
pub hwcap2: usize,
}
/// ELF Auxiliary Vector
///
/// The auxiliary vector is a memory region in a running ELF program's stack
/// composed of (key: usize, value: usize) pairs.
///
/// The keys used in the aux vector are platform dependent. For OpenBSD, they are
/// defined in [machine/elf.h][elfh]. The hardware capabilities of a given CPU
/// can be queried with the `AT_HWCAP` and `AT_HWCAP2` keys.
///
/// Note that run-time feature detection is not invoked for features that can
/// be detected at compile-time.
///
/// [elf.h]: https://github.com/openbsd/src/blob/master/sys/arch/arm64/include/elf.h
/// [elf.h]: https://github.com/openbsd/src/blob/master/sys/arch/powerpc64/include/elf.h
pub(crate) fn auxv() -> Result<AuxVec, ()> {
let hwcap = archauxv(libc::AT_HWCAP);
let hwcap2 = archauxv(libc::AT_HWCAP2);
// Zero could indicate that no features were detected, but it's also used to
// indicate an error. In particular, on many platforms AT_HWCAP2 will be
// legitimately zero, since it contains the most recent feature flags.
if hwcap != 0 || hwcap2 != 0 {
return Ok(AuxVec { hwcap, hwcap2 });
}
Err(())
}
/// Tries to read the `key` from the auxiliary vector.
fn archauxv(key: libc::c_int) -> usize {
const OUT_LEN: libc::c_int = core::mem::size_of::<libc::c_ulong>() as libc::c_int;
let mut out: libc::c_ulong = 0;
unsafe {
let res =
libc::elf_aux_info(key, &mut out as *mut libc::c_ulong as *mut libc::c_void, OUT_LEN);
// If elf_aux_info fails, `out` will be left at zero (which is the proper default value).
debug_assert!(res == 0 || out == 0);
}
out as usize
}

View File

@@ -1,21 +0,0 @@
//! Run-time feature detection on OpenBSD
mod auxvec;
cfg_select! {
target_arch = "aarch64" => {
mod aarch64;
pub(crate) use self::aarch64::detect_features;
}
target_arch = "powerpc64" => {
mod powerpc;
pub(crate) use self::powerpc::detect_features;
}
_ => {
use crate::detect::cache;
/// Performs run-time feature detection.
pub(crate) fn detect_features() -> cache::Initializer {
cache::Initializer::default()
}
}
}

View File

@@ -1,21 +0,0 @@
//! Run-time feature detection for PowerPC on OpenBSD.
use super::auxvec;
use crate::detect::{Feature, cache};
pub(crate) fn detect_features() -> cache::Initializer {
let mut value = cache::Initializer::default();
let enable_feature = |value: &mut cache::Initializer, f, enable| {
if enable {
value.set(f as u32);
}
};
if let Ok(auxv) = auxvec::auxv() {
enable_feature(&mut value, Feature::altivec, auxv.hwcap & 0x10000000 != 0);
enable_feature(&mut value, Feature::vsx, auxv.hwcap & 0x00000080 != 0);
enable_feature(&mut value, Feature::power8, auxv.hwcap2 & 0x80000000 != 0);
return value;
}
value
}

View File

@@ -1,8 +0,0 @@
//! Other operating systems
use crate::detect::cache;
#[allow(dead_code)]
pub(crate) fn detect_features() -> cache::Initializer {
cache::Initializer::default()
}

View File

@@ -1,159 +0,0 @@
//! Run-time feature detection utility for RISC-V.
//!
//! On RISC-V, full feature detection needs a help of one or more
//! feature detection mechanisms (usually provided by the operating system).
//!
//! RISC-V architecture defines many extensions and some have dependency to others.
//! More importantly, some of them cannot be enabled without resolving such
//! dependencies due to limited set of features that such mechanisms provide.
//!
//! This module provides an OS-independent utility to process such relations
//! between RISC-V extensions.
use crate::detect::{Feature, cache};
/// Imply features by the given set of enabled features.
///
/// Note that it does not perform any consistency checks including existence of
/// conflicting extensions and/or complicated requirements. Eliminating such
/// inconsistencies is the responsibility of the feature detection logic and
/// its provider(s).
pub(crate) fn imply_features(mut value: cache::Initializer) -> cache::Initializer {
loop {
// Check convergence of the feature flags later.
let prev = value;
// Expect that the optimizer turns repeated operations into
// a fewer number of bit-manipulation operations.
macro_rules! imply {
// Regular implication:
// A1 => (B1[, B2...]), A2 => (B1[, B2...]) and so on.
($($from: ident)|+ => $($to: ident)&+) => {
if [$(Feature::$from as u32),+].iter().any(|&x| value.test(x)) {
$(
value.set(Feature::$to as u32);
)+
}
};
// Implication with multiple requirements:
// A1 && A2 ... => (B1[, B2...]).
($($from: ident)&+ => $($to: ident)&+) => {
if [$(Feature::$from as u32),+].iter().all(|&x| value.test(x)) {
$(
value.set(Feature::$to as u32);
)+
}
};
}
macro_rules! group {
($group: ident == $($member: ident)&+) => {
// Forward implication as defined in the specifications.
imply!($group => $($member)&+);
// Reverse implication to "group extension" from its members.
// This is not a part of specifications but convenient for
// feature detection and implemented in e.g. LLVM.
imply!($($member)&+ => $group);
};
}
/*
If a dependency/implication is not explicitly stated in the
specification, it is denoted as a comment as follows:
"defined as subset":
The latter extension is described as a subset of the former
(but the evidence is weak).
"functional":
The former extension is functionally a superset of the latter
(no direct references though).
*/
imply!(zvbb => zvkb);
// Certain set of vector cryptography extensions form a group.
group!(zvkn == zvkned & zvknhb & zvkb & zvkt);
group!(zvknc == zvkn & zvbc);
group!(zvkng == zvkn & zvkg);
group!(zvks == zvksed & zvksh & zvkb & zvkt);
group!(zvksc == zvks & zvbc);
group!(zvksg == zvks & zvkg);
imply!(zvknhb => zvknha); // functional
// For vector cryptography, Zvknhb and Zvbc require integer arithmetic
// with EEW=64 (Zve64x) while others not depending on them
// require EEW=32 (Zve32x).
imply!(zvknhb | zvbc => zve64x);
imply!(zvbb | zvkb | zvkg | zvkned | zvknha | zvksed | zvksh => zve32x);
imply!(zbc => zbkc); // defined as subset
group!(zkn == zbkb & zbkc & zbkx & zkne & zknd & zknh);
group!(zks == zbkb & zbkc & zbkx & zksed & zksh);
group!(zk == zkn & zkr & zkt);
imply!(zabha | zacas => zaamo);
group!(a == zalrsc & zaamo);
group!(b == zba & zbb & zbs);
imply!(zcf => zca & f);
imply!(zcd => zca & d);
imply!(zcmop | zcb => zca);
imply!(zhinx => zhinxmin);
imply!(zdinx | zhinxmin => zfinx);
imply!(zvfh => zvfhmin); // functional
imply!(zvfh => zve32f & zfhmin);
imply!(zvfhmin => zve32f);
imply!(zvfbfwma => zvfbfmin & zfbfmin);
imply!(zvfbfmin => zve32f);
imply!(v => zve64d);
imply!(zve64d => zve64f & d);
imply!(zve64f => zve64x & zve32f);
imply!(zve64x => zve32x);
imply!(zve32f => zve32x & f);
imply!(zfh => zfhmin);
imply!(q => d);
imply!(d | zfhmin | zfa => f);
imply!(zfbfmin => f); // and some of (not all) "Zfh" instructions.
// Relatively complex implication rules around the "C" extension.
// (from "C" and some others)
imply!(c => zca);
imply!(c & d => zcd);
#[cfg(target_arch = "riscv32")]
imply!(c & f => zcf);
// (to "C"; defined as superset)
cfg_select! {
target_arch = "riscv32" => {
if value.test(Feature::d as u32) {
imply!(zcf & zcd => c);
} else if value.test(Feature::f as u32) {
imply!(zcf => c);
} else {
imply!(zca => c);
}
}
_ => {
if value.test(Feature::d as u32) {
imply!(zcd => c);
} else {
imply!(zca => c);
}
}
}
imply!(zicntr | zihpm | f | zfinx | zve32x => zicsr);
// Loop until the feature flags converge.
if prev == value {
return value;
}
}
}
#[cfg(test)]
#[path = "riscv/tests.rs"]
mod tests;

View File

@@ -1,64 +0,0 @@
use super::*;
#[test]
fn simple_direct() {
let mut value = cache::Initializer::default();
value.set(Feature::f as u32);
// F (and other extensions with CSRs) -> Zicsr
assert!(imply_features(value).test(Feature::zicsr as u32));
}
#[test]
fn simple_indirect() {
let mut value = cache::Initializer::default();
value.set(Feature::q as u32);
// Q -> D, D -> F, F -> Zicsr
assert!(imply_features(value).test(Feature::zicsr as u32));
}
#[test]
fn complex_zcd() {
let mut value = cache::Initializer::default();
// C & D -> Zcd
value.set(Feature::c as u32);
assert!(!imply_features(value).test(Feature::zcd as u32));
value.set(Feature::d as u32);
assert!(imply_features(value).test(Feature::zcd as u32));
}
#[test]
fn group_simple_forward() {
let mut value = cache::Initializer::default();
// A -> Zalrsc & Zaamo (forward implication)
value.set(Feature::a as u32);
let value = imply_features(value);
assert!(value.test(Feature::zalrsc as u32));
assert!(value.test(Feature::zaamo as u32));
}
#[test]
fn group_simple_backward() {
let mut value = cache::Initializer::default();
// Zalrsc & Zaamo -> A (reverse implication)
value.set(Feature::zalrsc as u32);
value.set(Feature::zaamo as u32);
assert!(imply_features(value).test(Feature::a as u32));
}
#[test]
fn group_complex_convergence() {
let mut value = cache::Initializer::default();
// Needs 3 iterations to converge
// (and 4th iteration for convergence checking):
// 1. [Zvksc] -> Zvks & Zvbc
// 2. Zvks -> Zvksed & Zvksh & Zvkb & Zvkt
// 3a. [Zvkned] & [Zvknhb] & [Zvkb] & Zvkt -> {Zvkn}
// 3b. Zvkn & Zvbc -> {Zvknc}
value.set(Feature::zvksc as u32);
value.set(Feature::zvkned as u32);
value.set(Feature::zvknhb as u32);
value.set(Feature::zvkb as u32);
let value = imply_features(value);
assert!(value.test(Feature::zvkn as u32));
assert!(value.test(Feature::zvknc as u32));
}

View File

@@ -1,125 +0,0 @@
//! Run-time feature detection for Aarch64 on Windows.
use crate::detect::{Feature, cache};
/// Try to read the features using IsProcessorFeaturePresent.
pub(crate) fn detect_features() -> cache::Initializer {
type DWORD = u32;
type BOOL = i32;
const FALSE: BOOL = 0;
// The following Microsoft documents isn't updated for aarch64.
// https://docs.microsoft.com/en-us/windows/win32/api/processthreadsapi/nf-processthreadsapi-isprocessorfeaturepresent
// These are defined in winnt.h of Windows SDK
const PF_ARM_VFP_32_REGISTERS_AVAILABLE: u32 = 18;
const PF_ARM_NEON_INSTRUCTIONS_AVAILABLE: u32 = 19;
const PF_ARM_V8_CRYPTO_INSTRUCTIONS_AVAILABLE: u32 = 30;
const PF_ARM_V8_CRC32_INSTRUCTIONS_AVAILABLE: u32 = 31;
const PF_ARM_V81_ATOMIC_INSTRUCTIONS_AVAILABLE: u32 = 34;
const PF_ARM_V82_DP_INSTRUCTIONS_AVAILABLE: u32 = 43;
const PF_ARM_V83_JSCVT_INSTRUCTIONS_AVAILABLE: u32 = 44;
const PF_ARM_V83_LRCPC_INSTRUCTIONS_AVAILABLE: u32 = 45;
const PF_ARM_SVE_INSTRUCTIONS_AVAILABLE: u32 = 46;
const PF_ARM_SVE2_INSTRUCTIONS_AVAILABLE: u32 = 47;
const PF_ARM_SVE2_1_INSTRUCTIONS_AVAILABLE: u32 = 48;
const PF_ARM_SVE_AES_INSTRUCTIONS_AVAILABLE: u32 = 49;
const PF_ARM_SVE_PMULL128_INSTRUCTIONS_AVAILABLE: u32 = 50;
const PF_ARM_SVE_BITPERM_INSTRUCTIONS_AVAILABLE: u32 = 51;
// const PF_ARM_SVE_BF16_INSTRUCTIONS_AVAILABLE: u32 = 52;
// const PF_ARM_SVE_EBF16_INSTRUCTIONS_AVAILABLE: u32 = 53;
const PF_ARM_SVE_B16B16_INSTRUCTIONS_AVAILABLE: u32 = 54;
const PF_ARM_SVE_SHA3_INSTRUCTIONS_AVAILABLE: u32 = 55;
const PF_ARM_SVE_SM4_INSTRUCTIONS_AVAILABLE: u32 = 56;
// const PF_ARM_SVE_I8MM_INSTRUCTIONS_AVAILABLE: u32 = 57;
// const PF_ARM_SVE_F32MM_INSTRUCTIONS_AVAILABLE: u32 = 58;
// const PF_ARM_SVE_F64MM_INSTRUCTIONS_AVAILABLE: u32 = 59;
unsafe extern "system" {
fn IsProcessorFeaturePresent(ProcessorFeature: DWORD) -> BOOL;
}
let mut value = cache::Initializer::default();
{
let mut enable_feature = |f, enable| {
if enable {
value.set(f as u32);
}
};
// Some features may be supported on current CPU,
// but no way to detect it by OS API.
// Also, we require unsafe block for the extern "system" calls.
unsafe {
enable_feature(
Feature::fp,
IsProcessorFeaturePresent(PF_ARM_VFP_32_REGISTERS_AVAILABLE) != FALSE,
);
enable_feature(
Feature::asimd,
IsProcessorFeaturePresent(PF_ARM_NEON_INSTRUCTIONS_AVAILABLE) != FALSE,
);
enable_feature(
Feature::crc,
IsProcessorFeaturePresent(PF_ARM_V8_CRC32_INSTRUCTIONS_AVAILABLE) != FALSE,
);
enable_feature(
Feature::lse,
IsProcessorFeaturePresent(PF_ARM_V81_ATOMIC_INSTRUCTIONS_AVAILABLE) != FALSE,
);
enable_feature(
Feature::dotprod,
IsProcessorFeaturePresent(PF_ARM_V82_DP_INSTRUCTIONS_AVAILABLE) != FALSE,
);
enable_feature(
Feature::jsconv,
IsProcessorFeaturePresent(PF_ARM_V83_JSCVT_INSTRUCTIONS_AVAILABLE) != FALSE,
);
enable_feature(
Feature::rcpc,
IsProcessorFeaturePresent(PF_ARM_V83_LRCPC_INSTRUCTIONS_AVAILABLE) != FALSE,
);
enable_feature(
Feature::sve,
IsProcessorFeaturePresent(PF_ARM_SVE_INSTRUCTIONS_AVAILABLE) != FALSE,
);
enable_feature(
Feature::sve2,
IsProcessorFeaturePresent(PF_ARM_SVE2_INSTRUCTIONS_AVAILABLE) != FALSE,
);
enable_feature(
Feature::sve2p1,
IsProcessorFeaturePresent(PF_ARM_SVE2_1_INSTRUCTIONS_AVAILABLE) != FALSE,
);
enable_feature(
Feature::sve2_aes,
IsProcessorFeaturePresent(PF_ARM_SVE_AES_INSTRUCTIONS_AVAILABLE) != FALSE
&& IsProcessorFeaturePresent(PF_ARM_SVE_PMULL128_INSTRUCTIONS_AVAILABLE)
!= FALSE,
);
enable_feature(
Feature::sve2_bitperm,
IsProcessorFeaturePresent(PF_ARM_SVE_BITPERM_INSTRUCTIONS_AVAILABLE) != FALSE,
);
enable_feature(
Feature::sve_b16b16,
IsProcessorFeaturePresent(PF_ARM_SVE_B16B16_INSTRUCTIONS_AVAILABLE) != FALSE,
);
enable_feature(
Feature::sve2_sha3,
IsProcessorFeaturePresent(PF_ARM_SVE_SHA3_INSTRUCTIONS_AVAILABLE) != FALSE,
);
enable_feature(
Feature::sve2_sm4,
IsProcessorFeaturePresent(PF_ARM_SVE_SM4_INSTRUCTIONS_AVAILABLE) != FALSE,
);
// PF_ARM_V8_CRYPTO_INSTRUCTIONS_AVAILABLE means aes, sha1, sha2 and
// pmull support
let crypto =
IsProcessorFeaturePresent(PF_ARM_V8_CRYPTO_INSTRUCTIONS_AVAILABLE) != FALSE;
enable_feature(Feature::aes, crypto);
enable_feature(Feature::pmull, crypto);
enable_feature(Feature::sha2, crypto);
}
}
value
}

View File

@@ -1,330 +0,0 @@
//! x86 run-time feature detection is OS independent.
#[cfg(target_arch = "x86")]
use core::arch::x86::*;
#[cfg(target_arch = "x86_64")]
use core::arch::x86_64::*;
use core::mem;
use crate::detect::{Feature, bit, cache};
/// Run-time feature detection on x86 works by using the CPUID instruction.
///
/// The [CPUID Wikipedia page][wiki_cpuid] contains
/// all the information about which flags to set to query which values, and in
/// which registers these are reported.
///
/// The definitive references are:
/// - [Intel 64 and IA-32 Architectures Software Developer's Manual Volume 2:
/// Instruction Set Reference, A-Z][intel64_ref].
/// - [AMD64 Architecture Programmer's Manual, Volume 3: General-Purpose and
/// System Instructions][amd64_ref].
///
/// [wiki_cpuid]: https://en.wikipedia.org/wiki/CPUID
/// [intel64_ref]: http://www.intel.de/content/dam/www/public/us/en/documents/manuals/64-ia-32-architectures-software-developer-instruction-set-reference-manual-325383.pdf
/// [amd64_ref]: http://support.amd.com/TechDocs/24594.pdf
#[allow(clippy::similar_names)]
pub(crate) fn detect_features() -> cache::Initializer {
let mut value = cache::Initializer::default();
if cfg!(target_env = "sgx") {
// doesn't support this because it is untrusted data
return value;
}
// Calling `__cpuid`/`__cpuid_count` from here on is safe because the CPU
// has `cpuid` support.
// 0. EAX = 0: Basic Information:
// - EAX returns the "Highest Function Parameter", that is, the maximum
// leaf value for subsequent calls of `cpuinfo` in range [0,
// 0x8000_0000]. - The vendor ID is stored in 12 u8 ascii chars,
// returned in EBX, EDX, and ECX (in that order):
let (max_basic_leaf, vendor_id) = {
let CpuidResult { eax: max_basic_leaf, ebx, ecx, edx } = __cpuid(0);
let vendor_id: [[u8; 4]; 3] = [ebx.to_ne_bytes(), edx.to_ne_bytes(), ecx.to_ne_bytes()];
let vendor_id: [u8; 12] = unsafe { mem::transmute(vendor_id) };
(max_basic_leaf, vendor_id)
};
if max_basic_leaf < 1 {
// Earlier Intel 486, CPUID not implemented
return value;
}
// EAX = 1, ECX = 0: Queries "Processor Info and Feature Bits";
// Contains information about most x86 features.
let CpuidResult { ecx: proc_info_ecx, edx: proc_info_edx, .. } = __cpuid(0x0000_0001_u32);
// EAX = 7: Queries "Extended Features";
// Contains information about bmi,bmi2, and avx2 support.
let (
extended_features_ebx,
extended_features_ecx,
extended_features_edx,
extended_features_eax_leaf_1,
extended_features_edx_leaf_1,
) = if max_basic_leaf >= 7 {
let CpuidResult { ebx, ecx, edx, .. } = __cpuid(0x0000_0007_u32);
let CpuidResult { eax: eax_1, edx: edx_1, .. } =
__cpuid_count(0x0000_0007_u32, 0x0000_0001_u32);
(ebx, ecx, edx, eax_1, edx_1)
} else {
(0, 0, 0, 0, 0) // CPUID does not support "Extended Features"
};
// EAX = 0x8000_0000, ECX = 0: Get Highest Extended Function Supported
// - EAX returns the max leaf value for extended information, that is,
// `cpuid` calls in range [0x8000_0000; u32::MAX]:
let CpuidResult { eax: extended_max_basic_leaf, .. } = __cpuid(0x8000_0000_u32);
// EAX = 0x8000_0001, ECX=0: Queries "Extended Processor Info and Feature
// Bits"
let extended_proc_info_ecx = if extended_max_basic_leaf >= 1 {
let CpuidResult { ecx, .. } = __cpuid(0x8000_0001_u32);
ecx
} else {
0
};
{
// borrows value till the end of this scope:
let mut enable = |r, rb, f| {
let present = bit::test(r as usize, rb);
if present {
value.set(f as u32);
}
present
};
enable(proc_info_ecx, 0, Feature::sse3);
enable(proc_info_ecx, 1, Feature::pclmulqdq);
enable(proc_info_ecx, 9, Feature::ssse3);
enable(proc_info_ecx, 13, Feature::cmpxchg16b);
enable(proc_info_ecx, 19, Feature::sse4_1);
enable(proc_info_ecx, 20, Feature::sse4_2);
enable(proc_info_ecx, 22, Feature::movbe);
enable(proc_info_ecx, 23, Feature::popcnt);
enable(proc_info_ecx, 25, Feature::aes);
let f16c = enable(proc_info_ecx, 29, Feature::f16c);
enable(proc_info_ecx, 30, Feature::rdrand);
enable(extended_features_ebx, 18, Feature::rdseed);
enable(extended_features_ebx, 19, Feature::adx);
enable(extended_features_ebx, 11, Feature::rtm);
enable(proc_info_edx, 4, Feature::tsc);
enable(proc_info_edx, 23, Feature::mmx);
enable(proc_info_edx, 24, Feature::fxsr);
enable(proc_info_edx, 25, Feature::sse);
enable(proc_info_edx, 26, Feature::sse2);
enable(extended_features_ebx, 29, Feature::sha);
enable(extended_features_ecx, 8, Feature::gfni);
enable(extended_features_ecx, 9, Feature::vaes);
enable(extended_features_ecx, 10, Feature::vpclmulqdq);
enable(extended_features_ebx, 3, Feature::bmi1);
enable(extended_features_ebx, 8, Feature::bmi2);
enable(extended_features_ebx, 9, Feature::ermsb);
enable(extended_features_eax_leaf_1, 31, Feature::movrs);
// Detect if CPUID.19h available
if bit::test(extended_features_ecx as usize, 23) {
let CpuidResult { ebx, .. } = __cpuid(0x19);
enable(ebx, 0, Feature::kl);
enable(ebx, 2, Feature::widekl);
}
// This detects ABM on AMD CPUs and LZCNT on Intel CPUs.
// On intel CPUs with popcnt, lzcnt implements the
// "missing part" of ABM, so we map both to the same
// internal feature.
//
// The `is_x86_feature_detected!("lzcnt")` macro then
// internally maps to Feature::abm.
enable(extended_proc_info_ecx, 5, Feature::lzcnt);
// As Hygon Dhyana originates from AMD technology and shares most of the architecture with
// AMD's family 17h, but with different CPU Vendor ID("HygonGenuine")/Family series
// number(Family 18h).
//
// For CPUID feature bits, Hygon Dhyana(family 18h) share the same definition with AMD
// family 17h.
//
// Related AMD CPUID specification is https://www.amd.com/system/files/TechDocs/25481.pdf.
// Related Hygon kernel patch can be found on
// http://lkml.kernel.org/r/5ce86123a7b9dad925ac583d88d2f921040e859b.1538583282.git.puwen@hygon.cn
if vendor_id == *b"AuthenticAMD" || vendor_id == *b"HygonGenuine" {
// These features are available on AMD arch CPUs:
enable(extended_proc_info_ecx, 6, Feature::sse4a);
enable(extended_proc_info_ecx, 21, Feature::tbm);
enable(extended_proc_info_ecx, 11, Feature::xop);
}
// `XSAVE` and `AVX` support:
let cpu_xsave = bit::test(proc_info_ecx as usize, 26);
if cpu_xsave {
// 0. Here the CPU supports `XSAVE`.
// 1. Detect `OSXSAVE`, that is, whether the OS is AVX enabled and
// supports saving the state of the AVX/AVX2 vector registers on
// context-switches, see:
//
// - [intel: is avx enabled?][is_avx_enabled],
// - [mozilla: sse.cpp][mozilla_sse_cpp].
//
// [is_avx_enabled]: https://software.intel.com/en-us/blogs/2011/04/14/is-avx-enabled
// [mozilla_sse_cpp]: https://hg.mozilla.org/mozilla-central/file/64bab5cbb9b6/mozglue/build/SSE.cpp#l190
let cpu_osxsave = bit::test(proc_info_ecx as usize, 27);
if cpu_osxsave {
// 2. The OS must have signaled the CPU that it supports saving and
// restoring the:
//
// * SSE -> `XCR0.SSE[1]`
// * AVX -> `XCR0.AVX[2]`
// * AVX-512 -> `XCR0.AVX-512[7:5]`.
// * AMX -> `XCR0.AMX[18:17]`
// * APX -> `XCR0.APX[19]`
//
// by setting the corresponding bits of `XCR0` to `1`.
//
// This is safe because the CPU supports `xsave`
// and the OS has set `osxsave`.
let xcr0 = unsafe { _xgetbv(0) };
// Test `XCR0.SSE[1]` and `XCR0.AVX[2]` with the mask `0b110 == 6`:
let os_avx_support = xcr0 & 6 == 6;
// Test `XCR0.AVX-512[7:5]` with the mask `0b1110_0000 == 0xe0`:
let os_avx512_support = xcr0 & 0xe0 == 0xe0;
// Test `XCR0.AMX[18:17]` with the mask `0b110_0000_0000_0000_0000 == 0x60000`
let os_amx_support = xcr0 & 0x60000 == 0x60000;
// Test `XCR0.APX[19]` with the mask `0b1000_0000_0000_0000_0000 == 0x80000`
let os_apx_support = xcr0 & 0x80000 == 0x80000;
// Only if the OS and the CPU support saving/restoring the AVX
// registers we enable `xsave` support:
if os_avx_support {
// See "13.3 ENABLING THE XSAVE FEATURE SET AND XSAVE-ENABLED
// FEATURES" in the "Intel® 64 and IA-32 Architectures Software
// Developers Manual, Volume 1: Basic Architecture":
//
// "Software enables the XSAVE feature set by setting
// CR4.OSXSAVE[bit 18] to 1 (e.g., with the MOV to CR4
// instruction). If this bit is 0, execution of any of XGETBV,
// XRSTOR, XRSTORS, XSAVE, XSAVEC, XSAVEOPT, XSAVES, and XSETBV
// causes an invalid-opcode exception (#UD)"
//
enable(proc_info_ecx, 26, Feature::xsave);
// For `xsaveopt`, `xsavec`, and `xsaves` we need to query:
// Processor Extended State Enumeration Sub-leaf (EAX = 0DH,
// ECX = 1):
if max_basic_leaf >= 0xd {
let CpuidResult { eax: proc_extended_state1_eax, .. } =
__cpuid_count(0xd_u32, 1);
enable(proc_extended_state1_eax, 0, Feature::xsaveopt);
enable(proc_extended_state1_eax, 1, Feature::xsavec);
enable(proc_extended_state1_eax, 3, Feature::xsaves);
}
// FMA (uses 256-bit wide registers):
let fma = enable(proc_info_ecx, 12, Feature::fma);
// And AVX/AVX2:
enable(proc_info_ecx, 28, Feature::avx);
enable(extended_features_ebx, 5, Feature::avx2);
// "Short" versions of AVX512 instructions
enable(extended_features_eax_leaf_1, 4, Feature::avxvnni);
enable(extended_features_eax_leaf_1, 23, Feature::avxifma);
enable(extended_features_edx_leaf_1, 4, Feature::avxvnniint8);
enable(extended_features_edx_leaf_1, 5, Feature::avxneconvert);
enable(extended_features_edx_leaf_1, 10, Feature::avxvnniint16);
enable(extended_features_eax_leaf_1, 0, Feature::sha512);
enable(extended_features_eax_leaf_1, 1, Feature::sm3);
enable(extended_features_eax_leaf_1, 2, Feature::sm4);
// For AVX-512 the OS also needs to support saving/restoring
// the extended state, only then we enable AVX-512 support:
// Also, Rust makes `avx512f` imply `fma` and `f16c`, because
// otherwise the assembler is broken. But Intel doesn't guarantee
// that `fma` and `f16c` are available with `avx512f`, so we
// need to check for them separately.
if os_avx512_support && f16c && fma {
enable(extended_features_ebx, 16, Feature::avx512f);
enable(extended_features_ebx, 17, Feature::avx512dq);
enable(extended_features_ebx, 21, Feature::avx512ifma);
enable(extended_features_ebx, 26, Feature::avx512pf);
enable(extended_features_ebx, 27, Feature::avx512er);
enable(extended_features_ebx, 28, Feature::avx512cd);
enable(extended_features_ebx, 30, Feature::avx512bw);
enable(extended_features_ebx, 31, Feature::avx512vl);
enable(extended_features_ecx, 1, Feature::avx512vbmi);
enable(extended_features_ecx, 6, Feature::avx512vbmi2);
enable(extended_features_ecx, 11, Feature::avx512vnni);
enable(extended_features_ecx, 12, Feature::avx512bitalg);
enable(extended_features_ecx, 14, Feature::avx512vpopcntdq);
enable(extended_features_edx, 8, Feature::avx512vp2intersect);
enable(extended_features_edx, 23, Feature::avx512fp16);
enable(extended_features_eax_leaf_1, 5, Feature::avx512bf16);
}
}
if os_amx_support {
enable(extended_features_edx, 24, Feature::amx_tile);
enable(extended_features_edx, 25, Feature::amx_int8);
enable(extended_features_edx, 22, Feature::amx_bf16);
enable(extended_features_eax_leaf_1, 21, Feature::amx_fp16);
enable(extended_features_edx_leaf_1, 8, Feature::amx_complex);
if max_basic_leaf >= 0x1e {
let CpuidResult { eax: amx_feature_flags_eax, .. } =
__cpuid_count(0x1e_u32, 1);
enable(amx_feature_flags_eax, 4, Feature::amx_fp8);
enable(amx_feature_flags_eax, 6, Feature::amx_tf32);
enable(amx_feature_flags_eax, 7, Feature::amx_avx512);
enable(amx_feature_flags_eax, 8, Feature::amx_movrs);
}
}
if os_apx_support {
enable(extended_features_edx_leaf_1, 21, Feature::apxf);
}
let avx10_1 = enable(extended_features_edx_leaf_1, 19, Feature::avx10_1);
if avx10_1 {
let CpuidResult { ebx, .. } = __cpuid(0x24);
let avx10_version = ebx & 0xff;
if avx10_version >= 2 {
value.set(Feature::avx10_2 as u32);
}
}
}
}
}
// Unfortunately, some Skylake chips erroneously report support for BMI1 and
// BMI2 without actual support. These chips don't support AVX, and it seems
// that all Intel chips with non-erroneous support BMI do (I didn't check
// other vendors), so we can disable these flags for chips that don't also
// report support for AVX.
//
// It's possible this will pessimize future chips that do support BMI and
// not AVX, but this seems minor compared to a hard crash you get when
// executing an unsupported instruction (to put it another way, it's safe
// for us to under-report CPU features, but not to over-report them). Still,
// to limit any impact this may have in the future, we only do this for
// Intel chips, as it's a bug only present in their chips.
//
// This bug is documented as `SKL052` in the errata section of this document:
// http://www.intel.com/content/dam/www/public/us/en/documents/specification-updates/desktop-6th-gen-core-family-spec-update.pdf
if vendor_id == *b"GenuineIntel" && !value.test(Feature::avx as u32) {
value.unset(Feature::bmi1 as u32);
value.unset(Feature::bmi2 as u32);
}
value
}

View File

@@ -1,35 +0,0 @@
//! Run-time feature detection for the Rust standard library.
//!
//! To detect whether a feature is enabled in the system running the binary
//! use one of the appropriate macro for the target:
//!
//! * `x86` and `x86_64`: [`is_x86_feature_detected`]
//! * `arm`: [`is_arm_feature_detected`]
//! * `aarch64`: [`is_aarch64_feature_detected`]
//! * `riscv`: [`is_riscv_feature_detected`]
//! * `mips`: [`is_mips_feature_detected`]
//! * `mips64`: [`is_mips64_feature_detected`]
//! * `powerpc`: [`is_powerpc_feature_detected`]
//! * `powerpc64`: [`is_powerpc64_feature_detected`]
//! * `loongarch`: [`is_loongarch_feature_detected`]
//! * `s390x`: [`is_s390x_feature_detected`]
#![unstable(feature = "stdarch_internal", issue = "none")]
#![feature(staged_api, doc_cfg, allow_internal_unstable)]
#![deny(rust_2018_idioms)]
#![allow(clippy::shadow_reuse)]
#![cfg_attr(test, allow(unused_imports))]
#![no_std]
#![allow(internal_features)]
#[cfg(test)]
#[macro_use]
extern crate std;
// rust-lang/rust#83888: removing `extern crate` gives an error that `vec_spare>
#[allow(unused_extern_crates)]
extern crate alloc;
#[doc(hidden)]
#[unstable(feature = "stdarch_internal", issue = "none")]
pub mod detect;

View File

@@ -1,335 +0,0 @@
#![allow(internal_features, unused_features)]
#![feature(stdarch_internal)]
#![cfg_attr(target_arch = "arm", feature(stdarch_arm_feature_detection))]
#![cfg_attr(
any(target_arch = "aarch64", target_arch = "arm64ec"),
feature(stdarch_aarch64_feature_detection)
)]
#![cfg_attr(
any(target_arch = "riscv32", target_arch = "riscv64"),
feature(stdarch_riscv_feature_detection)
)]
#![cfg_attr(target_arch = "powerpc", feature(stdarch_powerpc_feature_detection))]
#![cfg_attr(target_arch = "powerpc64", feature(stdarch_powerpc_feature_detection))]
#![allow(clippy::unwrap_used, clippy::use_debug, clippy::print_stdout)]
#[cfg_attr(
any(
target_arch = "arm",
target_arch = "aarch64",
target_arch = "arm64ec",
target_arch = "riscv32",
target_arch = "riscv64",
target_arch = "powerpc",
target_arch = "powerpc64",
target_arch = "s390x",
),
macro_use
)]
#[cfg(any(
target_arch = "arm",
target_arch = "aarch64",
target_arch = "arm64ec",
target_arch = "riscv32",
target_arch = "riscv64",
target_arch = "powerpc",
target_arch = "powerpc64",
target_arch = "s390x",
))]
extern crate std_detect;
#[test]
fn all() {
for (f, e) in std_detect::detect::features() {
println!("{f}: {e}");
}
}
#[test]
#[cfg(all(target_arch = "arm", target_os = "freebsd"))]
fn arm_freebsd() {
println!("neon: {}", is_arm_feature_detected!("neon"));
println!("pmull: {}", is_arm_feature_detected!("pmull"));
println!("crc: {}", is_arm_feature_detected!("crc"));
println!("aes: {}", is_arm_feature_detected!("aes"));
println!("sha2: {}", is_arm_feature_detected!("sha2"));
}
#[test]
#[cfg(all(target_arch = "arm", any(target_os = "linux", target_os = "android")))]
fn arm_linux() {
println!("neon: {}", is_arm_feature_detected!("neon"));
println!("pmull: {}", is_arm_feature_detected!("pmull"));
println!("crc: {}", is_arm_feature_detected!("crc"));
println!("aes: {}", is_arm_feature_detected!("aes"));
println!("sha2: {}", is_arm_feature_detected!("sha2"));
println!("dotprod: {}", is_arm_feature_detected!("dotprod"));
println!("i8mm: {}", is_arm_feature_detected!("i8mm"));
}
#[test]
#[cfg(all(target_arch = "aarch64", any(target_os = "linux", target_os = "android")))]
fn aarch64_linux() {
println!("asimd: {}", is_aarch64_feature_detected!("asimd"));
println!("neon: {}", is_aarch64_feature_detected!("neon"));
println!("pmull: {}", is_aarch64_feature_detected!("pmull"));
println!("fp: {}", is_aarch64_feature_detected!("fp"));
println!("fp16: {}", is_aarch64_feature_detected!("fp16"));
println!("sve: {}", is_aarch64_feature_detected!("sve"));
println!("crc: {}", is_aarch64_feature_detected!("crc"));
println!("lse: {}", is_aarch64_feature_detected!("lse"));
println!("lse2: {}", is_aarch64_feature_detected!("lse2"));
println!("lse128: {}", is_aarch64_feature_detected!("lse128"));
println!("rdm: {}", is_aarch64_feature_detected!("rdm"));
println!("rcpc: {}", is_aarch64_feature_detected!("rcpc"));
println!("rcpc2: {}", is_aarch64_feature_detected!("rcpc2"));
println!("rcpc3: {}", is_aarch64_feature_detected!("rcpc3"));
println!("dotprod: {}", is_aarch64_feature_detected!("dotprod"));
println!("fhm: {}", is_aarch64_feature_detected!("fhm"));
println!("dit: {}", is_aarch64_feature_detected!("dit"));
println!("flagm: {}", is_aarch64_feature_detected!("flagm"));
println!("flagm2: {}", is_aarch64_feature_detected!("flagm2"));
println!("ssbs: {}", is_aarch64_feature_detected!("ssbs"));
println!("sb: {}", is_aarch64_feature_detected!("sb"));
println!("paca: {}", is_aarch64_feature_detected!("paca"));
println!("pacg: {}", is_aarch64_feature_detected!("pacg"));
// println!("pauth-lr: {}", is_aarch64_feature_detected!("pauth-lr"));
println!("dpb: {}", is_aarch64_feature_detected!("dpb"));
println!("dpb2: {}", is_aarch64_feature_detected!("dpb2"));
println!("sve-b16b16: {}", is_aarch64_feature_detected!("sve-b16b16"));
println!("sve2: {}", is_aarch64_feature_detected!("sve2"));
println!("sve2p1: {}", is_aarch64_feature_detected!("sve2p1"));
println!("sve2-aes: {}", is_aarch64_feature_detected!("sve2-aes"));
println!("sve2-sm4: {}", is_aarch64_feature_detected!("sve2-sm4"));
println!("sve2-sha3: {}", is_aarch64_feature_detected!("sve2-sha3"));
println!("sve2-bitperm: {}", is_aarch64_feature_detected!("sve2-bitperm"));
println!("frintts: {}", is_aarch64_feature_detected!("frintts"));
println!("i8mm: {}", is_aarch64_feature_detected!("i8mm"));
println!("f32mm: {}", is_aarch64_feature_detected!("f32mm"));
println!("f64mm: {}", is_aarch64_feature_detected!("f64mm"));
println!("bf16: {}", is_aarch64_feature_detected!("bf16"));
println!("rand: {}", is_aarch64_feature_detected!("rand"));
println!("bti: {}", is_aarch64_feature_detected!("bti"));
println!("mte: {}", is_aarch64_feature_detected!("mte"));
println!("jsconv: {}", is_aarch64_feature_detected!("jsconv"));
println!("fcma: {}", is_aarch64_feature_detected!("fcma"));
println!("aes: {}", is_aarch64_feature_detected!("aes"));
println!("sha2: {}", is_aarch64_feature_detected!("sha2"));
println!("sha3: {}", is_aarch64_feature_detected!("sha3"));
println!("sm4: {}", is_aarch64_feature_detected!("sm4"));
println!("hbc: {}", is_aarch64_feature_detected!("hbc"));
println!("mops: {}", is_aarch64_feature_detected!("mops"));
println!("ecv: {}", is_aarch64_feature_detected!("ecv"));
println!("cssc: {}", is_aarch64_feature_detected!("cssc"));
println!("fpmr: {}", is_aarch64_feature_detected!("fpmr"));
println!("lut: {}", is_aarch64_feature_detected!("lut"));
println!("faminmax: {}", is_aarch64_feature_detected!("faminmax"));
println!("fp8: {}", is_aarch64_feature_detected!("fp8"));
println!("fp8fma: {}", is_aarch64_feature_detected!("fp8fma"));
println!("fp8dot4: {}", is_aarch64_feature_detected!("fp8dot4"));
println!("fp8dot2: {}", is_aarch64_feature_detected!("fp8dot2"));
println!("wfxt: {}", is_aarch64_feature_detected!("wfxt"));
println!("sme: {}", is_aarch64_feature_detected!("sme"));
println!("sme-b16b16: {}", is_aarch64_feature_detected!("sme-b16b16"));
println!("sme-i16i64: {}", is_aarch64_feature_detected!("sme-i16i64"));
println!("sme-f64f64: {}", is_aarch64_feature_detected!("sme-f64f64"));
println!("sme-fa64: {}", is_aarch64_feature_detected!("sme-fa64"));
println!("sme2: {}", is_aarch64_feature_detected!("sme2"));
println!("sme2p1: {}", is_aarch64_feature_detected!("sme2p1"));
println!("sme-f16f16: {}", is_aarch64_feature_detected!("sme-f16f16"));
println!("sme-lutv2: {}", is_aarch64_feature_detected!("sme-lutv2"));
println!("sme-f8f16: {}", is_aarch64_feature_detected!("sme-f8f16"));
println!("sme-f8f32: {}", is_aarch64_feature_detected!("sme-f8f32"));
println!("ssve-fp8fma: {}", is_aarch64_feature_detected!("ssve-fp8fma"));
println!("ssve-fp8dot4: {}", is_aarch64_feature_detected!("ssve-fp8dot4"));
println!("ssve-fp8dot2: {}", is_aarch64_feature_detected!("ssve-fp8dot2"));
}
#[test]
#[cfg(all(any(target_arch = "aarch64", target_arch = "arm64ec"), target_os = "windows"))]
fn aarch64_windows() {
println!("asimd: {:?}", is_aarch64_feature_detected!("asimd"));
println!("fp: {:?}", is_aarch64_feature_detected!("fp"));
println!("crc: {:?}", is_aarch64_feature_detected!("crc"));
println!("lse: {:?}", is_aarch64_feature_detected!("lse"));
println!("dotprod: {:?}", is_aarch64_feature_detected!("dotprod"));
println!("jsconv: {:?}", is_aarch64_feature_detected!("jsconv"));
println!("rcpc: {:?}", is_aarch64_feature_detected!("rcpc"));
println!("aes: {:?}", is_aarch64_feature_detected!("aes"));
println!("pmull: {:?}", is_aarch64_feature_detected!("pmull"));
println!("sha2: {:?}", is_aarch64_feature_detected!("sha2"));
}
#[test]
#[cfg(all(target_arch = "aarch64", any(target_os = "freebsd", target_os = "openbsd")))]
fn aarch64_bsd() {
println!("asimd: {:?}", is_aarch64_feature_detected!("asimd"));
println!("pmull: {:?}", is_aarch64_feature_detected!("pmull"));
println!("fp: {:?}", is_aarch64_feature_detected!("fp"));
println!("fp16: {:?}", is_aarch64_feature_detected!("fp16"));
println!("sve: {:?}", is_aarch64_feature_detected!("sve"));
println!("crc: {:?}", is_aarch64_feature_detected!("crc"));
println!("lse: {:?}", is_aarch64_feature_detected!("lse"));
println!("lse2: {:?}", is_aarch64_feature_detected!("lse2"));
println!("rdm: {:?}", is_aarch64_feature_detected!("rdm"));
println!("rcpc: {:?}", is_aarch64_feature_detected!("rcpc"));
println!("dotprod: {:?}", is_aarch64_feature_detected!("dotprod"));
println!("paca: {:?}", is_aarch64_feature_detected!("paca"));
println!("pacg: {:?}", is_aarch64_feature_detected!("pacg"));
println!("aes: {:?}", is_aarch64_feature_detected!("aes"));
println!("sha2: {:?}", is_aarch64_feature_detected!("sha2"));
}
#[test]
#[cfg(all(target_arch = "aarch64", target_vendor = "apple"))]
fn aarch64_darwin() {
println!("asimd: {:?}", is_aarch64_feature_detected!("asimd"));
println!("fp: {:?}", is_aarch64_feature_detected!("fp"));
println!("fp16: {:?}", is_aarch64_feature_detected!("fp16"));
println!("pmull: {:?}", is_aarch64_feature_detected!("pmull"));
println!("crc: {:?}", is_aarch64_feature_detected!("crc"));
println!("lse: {:?}", is_aarch64_feature_detected!("lse"));
println!("lse2: {:?}", is_aarch64_feature_detected!("lse2"));
println!("rdm: {:?}", is_aarch64_feature_detected!("rdm"));
println!("rcpc: {:?}", is_aarch64_feature_detected!("rcpc"));
println!("rcpc2: {:?}", is_aarch64_feature_detected!("rcpc2"));
println!("dotprod: {:?}", is_aarch64_feature_detected!("dotprod"));
println!("fhm: {:?}", is_aarch64_feature_detected!("fhm"));
println!("flagm: {:?}", is_aarch64_feature_detected!("flagm"));
println!("ssbs: {:?}", is_aarch64_feature_detected!("ssbs"));
println!("sb: {:?}", is_aarch64_feature_detected!("sb"));
println!("paca: {:?}", is_aarch64_feature_detected!("paca"));
println!("dpb: {:?}", is_aarch64_feature_detected!("dpb"));
println!("dpb2: {:?}", is_aarch64_feature_detected!("dpb2"));
println!("frintts: {:?}", is_aarch64_feature_detected!("frintts"));
println!("i8mm: {:?}", is_aarch64_feature_detected!("i8mm"));
println!("bf16: {:?}", is_aarch64_feature_detected!("bf16"));
println!("bti: {:?}", is_aarch64_feature_detected!("bti"));
println!("fcma: {:?}", is_aarch64_feature_detected!("fcma"));
println!("jsconv: {:?}", is_aarch64_feature_detected!("jsconv"));
println!("aes: {:?}", is_aarch64_feature_detected!("aes"));
println!("sha2: {:?}", is_aarch64_feature_detected!("sha2"));
println!("sha3: {:?}", is_aarch64_feature_detected!("sha3"));
}
#[test]
#[cfg(all(
any(target_arch = "riscv32", target_arch = "riscv64"),
any(target_os = "linux", target_os = "android")
))]
fn riscv_linux() {
println!("rv32i: {}", is_riscv_feature_detected!("rv32i"));
println!("rv32e: {}", is_riscv_feature_detected!("rv32e"));
println!("rv64i: {}", is_riscv_feature_detected!("rv64i"));
println!("rv128i: {}", is_riscv_feature_detected!("rv128i"));
println!("unaligned-scalar-mem: {}", is_riscv_feature_detected!("unaligned-scalar-mem"));
println!("unaligned-vector-mem: {}", is_riscv_feature_detected!("unaligned-vector-mem"));
println!("zicsr: {}", is_riscv_feature_detected!("zicsr"));
println!("zicntr: {}", is_riscv_feature_detected!("zicntr"));
println!("zihpm: {}", is_riscv_feature_detected!("zihpm"));
println!("zifencei: {}", is_riscv_feature_detected!("zifencei"));
println!("zihintntl: {}", is_riscv_feature_detected!("zihintntl"));
println!("zihintpause: {}", is_riscv_feature_detected!("zihintpause"));
println!("zimop: {}", is_riscv_feature_detected!("zimop"));
println!("zicbom: {}", is_riscv_feature_detected!("zicbom"));
println!("zicboz: {}", is_riscv_feature_detected!("zicboz"));
println!("zicond: {}", is_riscv_feature_detected!("zicond"));
println!("m: {}", is_riscv_feature_detected!("m"));
println!("a: {}", is_riscv_feature_detected!("a"));
println!("zalrsc: {}", is_riscv_feature_detected!("zalrsc"));
println!("zaamo: {}", is_riscv_feature_detected!("zaamo"));
println!("zawrs: {}", is_riscv_feature_detected!("zawrs"));
println!("zabha: {}", is_riscv_feature_detected!("zabha"));
println!("zacas: {}", is_riscv_feature_detected!("zacas"));
println!("zam: {}", is_riscv_feature_detected!("zam"));
println!("ztso: {}", is_riscv_feature_detected!("ztso"));
println!("f: {}", is_riscv_feature_detected!("f"));
println!("d: {}", is_riscv_feature_detected!("d"));
println!("q: {}", is_riscv_feature_detected!("q"));
println!("zfh: {}", is_riscv_feature_detected!("zfh"));
println!("zfhmin: {}", is_riscv_feature_detected!("zfhmin"));
println!("zfa: {}", is_riscv_feature_detected!("zfa"));
println!("zfbfmin: {}", is_riscv_feature_detected!("zfbfmin"));
println!("zfinx: {}", is_riscv_feature_detected!("zfinx"));
println!("zdinx: {}", is_riscv_feature_detected!("zdinx"));
println!("zhinx: {}", is_riscv_feature_detected!("zhinx"));
println!("zhinxmin: {}", is_riscv_feature_detected!("zhinxmin"));
println!("c: {}", is_riscv_feature_detected!("c"));
println!("zca: {}", is_riscv_feature_detected!("zca"));
println!("zcf: {}", is_riscv_feature_detected!("zcf"));
println!("zcd: {}", is_riscv_feature_detected!("zcd"));
println!("zcb: {}", is_riscv_feature_detected!("zcb"));
println!("zcmop: {}", is_riscv_feature_detected!("zcmop"));
println!("b: {}", is_riscv_feature_detected!("b"));
println!("zba: {}", is_riscv_feature_detected!("zba"));
println!("zbb: {}", is_riscv_feature_detected!("zbb"));
println!("zbc: {}", is_riscv_feature_detected!("zbc"));
println!("zbs: {}", is_riscv_feature_detected!("zbs"));
println!("zbkb: {}", is_riscv_feature_detected!("zbkb"));
println!("zbkc: {}", is_riscv_feature_detected!("zbkc"));
println!("zbkx: {}", is_riscv_feature_detected!("zbkx"));
println!("zknd: {}", is_riscv_feature_detected!("zknd"));
println!("zkne: {}", is_riscv_feature_detected!("zkne"));
println!("zknh: {}", is_riscv_feature_detected!("zknh"));
println!("zksed: {}", is_riscv_feature_detected!("zksed"));
println!("zksh: {}", is_riscv_feature_detected!("zksh"));
println!("zkr: {}", is_riscv_feature_detected!("zkr"));
println!("zkn: {}", is_riscv_feature_detected!("zkn"));
println!("zks: {}", is_riscv_feature_detected!("zks"));
println!("zk: {}", is_riscv_feature_detected!("zk"));
println!("zkt: {}", is_riscv_feature_detected!("zkt"));
println!("v: {}", is_riscv_feature_detected!("v"));
println!("zve32x: {}", is_riscv_feature_detected!("zve32x"));
println!("zve32f: {}", is_riscv_feature_detected!("zve32f"));
println!("zve64x: {}", is_riscv_feature_detected!("zve64x"));
println!("zve64f: {}", is_riscv_feature_detected!("zve64f"));
println!("zve64d: {}", is_riscv_feature_detected!("zve64d"));
println!("zvfh: {}", is_riscv_feature_detected!("zvfh"));
println!("zvfhmin: {}", is_riscv_feature_detected!("zvfhmin"));
println!("zvfbfmin: {}", is_riscv_feature_detected!("zvfbfmin"));
println!("zvfbfwma: {}", is_riscv_feature_detected!("zvfbfwma"));
println!("zvbb: {}", is_riscv_feature_detected!("zvbb"));
println!("zvbc: {}", is_riscv_feature_detected!("zvbc"));
println!("zvkb: {}", is_riscv_feature_detected!("zvkb"));
println!("zvkg: {}", is_riscv_feature_detected!("zvkg"));
println!("zvkned: {}", is_riscv_feature_detected!("zvkned"));
println!("zvknha: {}", is_riscv_feature_detected!("zvknha"));
println!("zvknhb: {}", is_riscv_feature_detected!("zvknhb"));
println!("zvksed: {}", is_riscv_feature_detected!("zvksed"));
println!("zvksh: {}", is_riscv_feature_detected!("zvksh"));
println!("zvkn: {}", is_riscv_feature_detected!("zvkn"));
println!("zvknc: {}", is_riscv_feature_detected!("zvknc"));
println!("zvkng: {}", is_riscv_feature_detected!("zvkng"));
println!("zvks: {}", is_riscv_feature_detected!("zvks"));
println!("zvksc: {}", is_riscv_feature_detected!("zvksc"));
println!("zvksg: {}", is_riscv_feature_detected!("zvksg"));
println!("zvkt: {}", is_riscv_feature_detected!("zvkt"));
println!("j: {}", is_riscv_feature_detected!("j"));
println!("p: {}", is_riscv_feature_detected!("p"));
}
#[test]
#[cfg(all(target_arch = "powerpc", target_os = "linux"))]
fn powerpc_linux() {
println!("altivec: {}", is_powerpc_feature_detected!("altivec"));
println!("vsx: {}", is_powerpc_feature_detected!("vsx"));
println!("power8: {}", is_powerpc_feature_detected!("power8"));
}
#[test]
#[cfg(all(
target_arch = "powerpc64",
any(target_os = "linux", target_os = "freebsd", target_os = "openbsd"),
))]
fn powerpc64_linux_or_bsd() {
println!("altivec: {}", is_powerpc64_feature_detected!("altivec"));
println!("vsx: {}", is_powerpc64_feature_detected!("vsx"));
println!("power8: {}", is_powerpc64_feature_detected!("power8"));
println!("power9: {}", is_powerpc64_feature_detected!("power9"));
}
#[test]
#[cfg(all(target_arch = "s390x", target_os = "linux",))]
fn s390x_linux() {
println!("vector: {}", is_s390x_feature_detected!("vector"));
}

View File

@@ -1,110 +0,0 @@
#![allow(internal_features, unused_features)]
#![cfg_attr(
any(
target_arch = "arm",
target_arch = "aarch64",
target_arch = "arm64ec",
target_arch = "x86",
target_arch = "x86_64",
target_arch = "powerpc",
target_arch = "powerpc64",
target_arch = "s390x",
target_arch = "riscv32",
target_arch = "riscv64",
target_arch = "loongarch32",
target_arch = "loongarch64"
),
feature(stdarch_internal)
)]
#![cfg_attr(target_arch = "arm", feature(stdarch_arm_feature_detection))]
#![cfg_attr(
any(target_arch = "aarch64", target_arch = "arm64ec"),
feature(stdarch_aarch64_feature_detection)
)]
#![cfg_attr(
any(target_arch = "powerpc", target_arch = "powerpc64"),
feature(stdarch_powerpc_feature_detection)
)]
#![cfg_attr(
any(target_arch = "riscv32", target_arch = "riscv64"),
feature(stdarch_riscv_feature_detection)
)]
#![cfg_attr(
any(target_arch = "loongarch32", target_arch = "loongarch64"),
feature(stdarch_loongarch_feature_detection)
)]
#[cfg(any(
target_arch = "arm",
target_arch = "aarch64",
target_arch = "arm64ec",
target_arch = "x86",
target_arch = "x86_64",
target_arch = "powerpc",
target_arch = "powerpc64",
target_arch = "s390x",
target_arch = "riscv32",
target_arch = "riscv64",
target_arch = "loongarch32",
target_arch = "loongarch64"
))]
#[macro_use]
extern crate std_detect;
#[test]
#[cfg(target_arch = "arm")]
fn arm() {
let _ = is_arm_feature_detected!("neon");
let _ = is_arm_feature_detected!("neon",);
}
#[test]
#[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))]
fn aarch64() {
let _ = is_aarch64_feature_detected!("fp");
let _ = is_aarch64_feature_detected!("fp",);
}
#[test]
#[cfg(any(target_arch = "loongarch32", target_arch = "loongarch64"))]
fn loongarch() {
let _ = is_loongarch_feature_detected!("32s");
let _ = is_loongarch_feature_detected!("32s",);
let _ = is_loongarch_feature_detected!("lsx");
let _ = is_loongarch_feature_detected!("lsx",);
}
#[test]
#[cfg(target_arch = "powerpc")]
fn powerpc() {
let _ = is_powerpc_feature_detected!("altivec");
let _ = is_powerpc_feature_detected!("altivec",);
}
#[test]
#[cfg(target_arch = "powerpc64")]
fn powerpc64() {
let _ = is_powerpc64_feature_detected!("altivec");
let _ = is_powerpc64_feature_detected!("altivec",);
}
#[test]
#[cfg(any(target_arch = "riscv32", target_arch = "riscv64"))]
fn riscv() {
let _ = is_riscv_feature_detected!("zk");
let _ = is_riscv_feature_detected!("zk",);
}
#[test]
#[cfg(target_arch = "s390x")]
fn s390x() {
let _ = is_s390x_feature_detected!("vector");
let _ = is_s390x_feature_detected!("vector",);
}
#[test]
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
fn x86() {
let _ = is_x86_feature_detected!("sse");
let _ = is_x86_feature_detected!("sse",);
}

View File

@@ -1,90 +0,0 @@
#![cfg(any(target_arch = "x86", target_arch = "x86_64"))]
#![allow(internal_features)]
#![feature(stdarch_internal, x86_amx_intrinsics, xop_target_feature, movrs_target_feature)]
#[macro_use]
extern crate std_detect;
#[test]
fn dump() {
println!("aes: {:?}", is_x86_feature_detected!("aes"));
println!("pclmulqdq: {:?}", is_x86_feature_detected!("pclmulqdq"));
println!("rdrand: {:?}", is_x86_feature_detected!("rdrand"));
println!("rdseed: {:?}", is_x86_feature_detected!("rdseed"));
println!("tsc: {:?}", is_x86_feature_detected!("tsc"));
println!("sse: {:?}", is_x86_feature_detected!("sse"));
println!("sse2: {:?}", is_x86_feature_detected!("sse2"));
println!("sse3: {:?}", is_x86_feature_detected!("sse3"));
println!("ssse3: {:?}", is_x86_feature_detected!("ssse3"));
println!("sse4.1: {:?}", is_x86_feature_detected!("sse4.1"));
println!("sse4.2: {:?}", is_x86_feature_detected!("sse4.2"));
println!("sse4a: {:?}", is_x86_feature_detected!("sse4a"));
println!("sha: {:?}", is_x86_feature_detected!("sha"));
println!("f16c: {:?}", is_x86_feature_detected!("f16c"));
println!("avx: {:?}", is_x86_feature_detected!("avx"));
println!("avx2: {:?}", is_x86_feature_detected!("avx2"));
println!("sha512: {:?}", is_x86_feature_detected!("sha512"));
println!("sm3: {:?}", is_x86_feature_detected!("sm3"));
println!("sm4: {:?}", is_x86_feature_detected!("sm4"));
println!("avx512f: {:?}", is_x86_feature_detected!("avx512f"));
println!("avx512cd: {:?}", is_x86_feature_detected!("avx512cd"));
println!("avx512er: {:?}", is_x86_feature_detected!("avx512er"));
println!("avx512pf: {:?}", is_x86_feature_detected!("avx512pf"));
println!("avx512bw: {:?}", is_x86_feature_detected!("avx512bw"));
println!("avx512dq: {:?}", is_x86_feature_detected!("avx512dq"));
println!("avx512vl: {:?}", is_x86_feature_detected!("avx512vl"));
println!("avx512_ifma: {:?}", is_x86_feature_detected!("avx512ifma"));
println!("avx512vbmi {:?}", is_x86_feature_detected!("avx512vbmi"));
println!("avx512_vpopcntdq: {:?}", is_x86_feature_detected!("avx512vpopcntdq"));
println!("avx512vbmi2: {:?}", is_x86_feature_detected!("avx512vbmi2"));
println!("gfni: {:?}", is_x86_feature_detected!("gfni"));
println!("vaes: {:?}", is_x86_feature_detected!("vaes"));
println!("vpclmulqdq: {:?}", is_x86_feature_detected!("vpclmulqdq"));
println!("avx512vnni: {:?}", is_x86_feature_detected!("avx512vnni"));
println!("avx512bitalg: {:?}", is_x86_feature_detected!("avx512bitalg"));
println!("avx512bf16: {:?}", is_x86_feature_detected!("avx512bf16"));
println!("avx512vp2intersect: {:?}", is_x86_feature_detected!("avx512vp2intersect"));
println!("avx512fp16: {:?}", is_x86_feature_detected!("avx512fp16"));
println!("fma: {:?}", is_x86_feature_detected!("fma"));
println!("abm: {:?}", is_x86_feature_detected!("abm"));
println!("bmi: {:?}", is_x86_feature_detected!("bmi1"));
println!("bmi2: {:?}", is_x86_feature_detected!("bmi2"));
println!("tbm: {:?}", is_x86_feature_detected!("tbm"));
println!("popcnt: {:?}", is_x86_feature_detected!("popcnt"));
println!("lzcnt: {:?}", is_x86_feature_detected!("lzcnt"));
println!("fxsr: {:?}", is_x86_feature_detected!("fxsr"));
println!("xsave: {:?}", is_x86_feature_detected!("xsave"));
println!("xsaveopt: {:?}", is_x86_feature_detected!("xsaveopt"));
println!("xsaves: {:?}", is_x86_feature_detected!("xsaves"));
println!("xsavec: {:?}", is_x86_feature_detected!("xsavec"));
println!("cmpxchg16b: {:?}", is_x86_feature_detected!("cmpxchg16b"));
println!("adx: {:?}", is_x86_feature_detected!("adx"));
println!("rtm: {:?}", is_x86_feature_detected!("rtm"));
println!("movbe: {:?}", is_x86_feature_detected!("movbe"));
println!("avxvnni: {:?}", is_x86_feature_detected!("avxvnni"));
println!("avxvnniint8: {:?}", is_x86_feature_detected!("avxvnniint8"));
println!("avxneconvert: {:?}", is_x86_feature_detected!("avxneconvert"));
println!("avxifma: {:?}", is_x86_feature_detected!("avxifma"));
println!("avxvnniint16: {:?}", is_x86_feature_detected!("avxvnniint16"));
println!("amx-bf16: {:?}", is_x86_feature_detected!("amx-bf16"));
println!("amx-tile: {:?}", is_x86_feature_detected!("amx-tile"));
println!("amx-int8: {:?}", is_x86_feature_detected!("amx-int8"));
println!("amx-fp16: {:?}", is_x86_feature_detected!("amx-fp16"));
println!("amx-complex: {:?}", is_x86_feature_detected!("amx-complex"));
println!("xop: {:?}", is_x86_feature_detected!("xop"));
println!("kl: {:?}", is_x86_feature_detected!("kl"));
println!("widekl: {:?}", is_x86_feature_detected!("widekl"));
println!("movrs: {:?}", is_x86_feature_detected!("movrs"));
println!("amx-fp8: {:?}", is_x86_feature_detected!("amx-fp8"));
println!("amx-tf32: {:?}", is_x86_feature_detected!("amx-tf32"));
println!("amx-avx512: {:?}", is_x86_feature_detected!("amx-avx512"));
println!("amx-movrs: {:?}", is_x86_feature_detected!("amx-movrs"));
}
#[test]
#[allow(deprecated)]
fn x86_deprecated() {
println!("avx512gfni {:?}", is_x86_feature_detected!("avx512gfni"));
println!("avx512vaes {:?}", is_x86_feature_detected!("avx512vaes"));
println!("avx512vpclmulqdq {:?}", is_x86_feature_detected!("avx512vpclmulqdq"));
}

View File

@@ -1,6 +0,0 @@
s|alloc::ffi|alloc_crate::ffi|g
s|alloc::slice::Join|alloc_crate::slice::Join|g
s|alloc::bstr|alloc_crate::bstr|g
s|alloc::collections::TryReserveError|alloc_crate::collections::TryReserveError|g
s|crate::collections::VecDeque|alloc_crate::collections::VecDeque|g
/\[doc = include_str!/c \ // todo retreive docs

View File

@@ -1,2 +0,0 @@
362d
361a \ #[rustc_const_unstable(feature = "custom_std", issue = "none")]

View File

@@ -1,2 +0,0 @@
234d
233a \ #[rustc_const_unstable(feature = "custom_std", issue = "none")]

View File

@@ -1,3 +0,0 @@
109a \ target_os = "survos" => { \
mod survos; \
}

View File

@@ -1,98 +0,0 @@
use super::*;
use crate::panic::RefUnwindSafe;
fn generate_fake_frames() -> Vec<BacktraceFrame> {
vec![
BacktraceFrame {
frame: RawFrame::Fake,
symbols: vec![BacktraceSymbol {
name: Some(b"std::backtrace::Backtrace::create".to_vec()),
filename: Some(BytesOrWide::Bytes(b"rust/backtrace.rs".to_vec())),
lineno: Some(100),
colno: None,
}],
},
BacktraceFrame {
frame: RawFrame::Fake,
symbols: vec![BacktraceSymbol {
name: Some(b"__rust_maybe_catch_panic".to_vec()),
filename: None,
lineno: None,
colno: None,
}],
},
BacktraceFrame {
frame: RawFrame::Fake,
symbols: vec![
BacktraceSymbol {
name: Some(b"std::rt::lang_start_internal".to_vec()),
filename: Some(BytesOrWide::Bytes(b"rust/rt.rs".to_vec())),
lineno: Some(300),
colno: Some(5),
},
BacktraceSymbol {
name: Some(b"std::rt::lang_start".to_vec()),
filename: Some(BytesOrWide::Bytes(b"rust/rt.rs".to_vec())),
lineno: Some(400),
colno: None,
},
],
},
]
}
#[test]
fn test_debug() {
let backtrace = Backtrace {
inner: Inner::Captured(
(Capture { actual_start: 1, frames: generate_fake_frames() }).into(),
),
};
#[rustfmt::skip]
let expected = "Backtrace [\
\n { fn: \"__rust_maybe_catch_panic\" },\
\n { fn: \"std::rt::lang_start_internal\", file: \"rust/rt.rs\", line: 300 },\
\n { fn: \"std::rt::lang_start\", file: \"rust/rt.rs\", line: 400 },\
\n]";
assert_eq!(format!("{backtrace:#?}"), expected);
// Format the backtrace a second time, just to make sure lazily resolved state is stable
assert_eq!(format!("{backtrace:#?}"), expected);
}
#[test]
fn test_frames() {
let backtrace = Backtrace {
inner: Inner::Captured(
(Capture { actual_start: 1, frames: generate_fake_frames() }).into(),
),
};
let frames = backtrace.frames();
#[rustfmt::skip]
let expected = vec![
"[
{ fn: \"std::backtrace::Backtrace::create\", file: \"rust/backtrace.rs\", line: 100 },
]",
"[
{ fn: \"__rust_maybe_catch_panic\" },
]",
"[
{ fn: \"std::rt::lang_start_internal\", file: \"rust/rt.rs\", line: 300 },
{ fn: \"std::rt::lang_start\", file: \"rust/rt.rs\", line: 400 },
]"
];
let mut iter = frames.iter().zip(expected.iter());
assert!(iter.all(|(f, e)| format!("{f:#?}") == *e));
}
#[test]
fn backtrace_unwind_safe() {
fn assert_unwind_safe<T: UnwindSafe + RefUnwindSafe>() {}
assert_unwind_safe::<Backtrace>();
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -1,4 +0,0 @@
//! Unordered containers, implemented as hash-tables
pub mod map;
pub mod set;

File diff suppressed because it is too large Load Diff

View File

@@ -1,529 +0,0 @@
use super::HashSet;
use crate::hash::RandomState;
use crate::panic::{AssertUnwindSafe, catch_unwind};
use crate::sync::Arc;
use crate::sync::atomic::{AtomicU32, Ordering};
#[test]
fn test_zero_capacities() {
type HS = HashSet<i32>;
let s = HS::new();
assert_eq!(s.capacity(), 0);
let s = HS::default();
assert_eq!(s.capacity(), 0);
let s = HS::with_hasher(RandomState::new());
assert_eq!(s.capacity(), 0);
let s = HS::with_capacity(0);
assert_eq!(s.capacity(), 0);
let s = HS::with_capacity_and_hasher(0, RandomState::new());
assert_eq!(s.capacity(), 0);
let mut s = HS::new();
s.insert(1);
s.insert(2);
s.remove(&1);
s.remove(&2);
s.shrink_to_fit();
assert_eq!(s.capacity(), 0);
let mut s = HS::new();
s.reserve(0);
assert_eq!(s.capacity(), 0);
}
#[test]
fn test_disjoint() {
let mut xs = HashSet::new();
let mut ys = HashSet::new();
assert!(xs.is_disjoint(&ys));
assert!(ys.is_disjoint(&xs));
assert!(xs.insert(5));
assert!(ys.insert(11));
assert!(xs.is_disjoint(&ys));
assert!(ys.is_disjoint(&xs));
assert!(xs.insert(7));
assert!(xs.insert(19));
assert!(xs.insert(4));
assert!(ys.insert(2));
assert!(ys.insert(-11));
assert!(xs.is_disjoint(&ys));
assert!(ys.is_disjoint(&xs));
assert!(ys.insert(7));
assert!(!xs.is_disjoint(&ys));
assert!(!ys.is_disjoint(&xs));
}
#[test]
fn test_subset_and_superset() {
let mut a = HashSet::new();
assert!(a.insert(0));
assert!(a.insert(5));
assert!(a.insert(11));
assert!(a.insert(7));
let mut b = HashSet::new();
assert!(b.insert(0));
assert!(b.insert(7));
assert!(b.insert(19));
assert!(b.insert(250));
assert!(b.insert(11));
assert!(b.insert(200));
assert!(!a.is_subset(&b));
assert!(!a.is_superset(&b));
assert!(!b.is_subset(&a));
assert!(!b.is_superset(&a));
assert!(b.insert(5));
assert!(a.is_subset(&b));
assert!(!a.is_superset(&b));
assert!(!b.is_subset(&a));
assert!(b.is_superset(&a));
}
#[test]
fn test_iterate() {
let mut a = HashSet::new();
for i in 0..32 {
assert!(a.insert(i));
}
let mut observed: u32 = 0;
for k in &a {
observed |= 1 << *k;
}
assert_eq!(observed, 0xFFFF_FFFF);
}
#[test]
fn test_intersection() {
let mut a = HashSet::new();
let mut b = HashSet::new();
assert!(a.intersection(&b).next().is_none());
assert!(a.insert(11));
assert!(a.insert(1));
assert!(a.insert(3));
assert!(a.insert(77));
assert!(a.insert(103));
assert!(a.insert(5));
assert!(a.insert(-5));
assert!(b.insert(2));
assert!(b.insert(11));
assert!(b.insert(77));
assert!(b.insert(-9));
assert!(b.insert(-42));
assert!(b.insert(5));
assert!(b.insert(3));
let mut i = 0;
let expected = [3, 5, 11, 77];
for x in a.intersection(&b) {
assert!(expected.contains(x));
i += 1
}
assert_eq!(i, expected.len());
assert!(a.insert(9)); // make a bigger than b
i = 0;
for x in a.intersection(&b) {
assert!(expected.contains(x));
i += 1
}
assert_eq!(i, expected.len());
i = 0;
for x in b.intersection(&a) {
assert!(expected.contains(x));
i += 1
}
assert_eq!(i, expected.len());
}
#[test]
fn test_difference() {
let mut a = HashSet::new();
let mut b = HashSet::new();
assert!(a.insert(1));
assert!(a.insert(3));
assert!(a.insert(5));
assert!(a.insert(9));
assert!(a.insert(11));
assert!(b.insert(3));
assert!(b.insert(9));
let mut i = 0;
let expected = [1, 5, 11];
for x in a.difference(&b) {
assert!(expected.contains(x));
i += 1
}
assert_eq!(i, expected.len());
}
#[test]
fn test_symmetric_difference() {
let mut a = HashSet::new();
let mut b = HashSet::new();
assert!(a.insert(1));
assert!(a.insert(3));
assert!(a.insert(5));
assert!(a.insert(9));
assert!(a.insert(11));
assert!(b.insert(-2));
assert!(b.insert(3));
assert!(b.insert(9));
assert!(b.insert(14));
assert!(b.insert(22));
let mut i = 0;
let expected = [-2, 1, 5, 11, 14, 22];
for x in a.symmetric_difference(&b) {
assert!(expected.contains(x));
i += 1
}
assert_eq!(i, expected.len());
}
#[test]
fn test_union() {
let mut a = HashSet::new();
let mut b = HashSet::new();
assert!(a.union(&b).next().is_none());
assert!(b.union(&a).next().is_none());
assert!(a.insert(1));
assert!(a.insert(3));
assert!(a.insert(11));
assert!(a.insert(16));
assert!(a.insert(19));
assert!(a.insert(24));
assert!(b.insert(-2));
assert!(b.insert(1));
assert!(b.insert(5));
assert!(b.insert(9));
assert!(b.insert(13));
assert!(b.insert(19));
let mut i = 0;
let expected = [-2, 1, 3, 5, 9, 11, 13, 16, 19, 24];
for x in a.union(&b) {
assert!(expected.contains(x));
i += 1
}
assert_eq!(i, expected.len());
assert!(a.insert(9)); // make a bigger than b
assert!(a.insert(5));
i = 0;
for x in a.union(&b) {
assert!(expected.contains(x));
i += 1
}
assert_eq!(i, expected.len());
i = 0;
for x in b.union(&a) {
assert!(expected.contains(x));
i += 1
}
assert_eq!(i, expected.len());
}
#[test]
fn test_from_iter() {
let xs = [1, 2, 2, 3, 4, 5, 6, 7, 8, 9];
let set: HashSet<_> = xs.iter().cloned().collect();
for x in &xs {
assert!(set.contains(x));
}
assert_eq!(set.iter().len(), xs.len() - 1);
}
#[test]
fn test_move_iter() {
let hs = {
let mut hs = HashSet::new();
hs.insert('a');
hs.insert('b');
hs
};
let v = hs.into_iter().collect::<Vec<char>>();
assert!(v == ['a', 'b'] || v == ['b', 'a']);
}
#[test]
fn test_eq() {
// These constants once happened to expose a bug in insert().
// I'm keeping them around to prevent a regression.
let mut s1 = HashSet::new();
s1.insert(1);
s1.insert(2);
s1.insert(3);
let mut s2 = HashSet::new();
s2.insert(1);
s2.insert(2);
assert!(s1 != s2);
s2.insert(3);
assert_eq!(s1, s2);
}
#[test]
fn test_show() {
let mut set = HashSet::new();
let empty = HashSet::<i32>::new();
set.insert(1);
set.insert(2);
let set_str = format!("{set:?}");
assert!(set_str == "{1, 2}" || set_str == "{2, 1}");
assert_eq!(format!("{empty:?}"), "{}");
}
#[test]
fn test_trivial_drain() {
let mut s = HashSet::<i32>::new();
for _ in s.drain() {}
assert!(s.is_empty());
drop(s);
let mut s = HashSet::<i32>::new();
drop(s.drain());
assert!(s.is_empty());
}
#[test]
fn test_drain() {
let mut s: HashSet<_> = (1..100).collect();
// try this a bunch of times to make sure we don't screw up internal state.
for _ in 0..20 {
assert_eq!(s.len(), 99);
{
let mut last_i = 0;
let mut d = s.drain();
for (i, x) in d.by_ref().take(50).enumerate() {
last_i = i;
assert!(x != 0);
}
assert_eq!(last_i, 49);
}
for _ in &s {
panic!("s should be empty!");
}
// reset to try again.
s.extend(1..100);
}
}
#[test]
fn test_replace() {
use crate::hash;
#[derive(Debug)]
struct Foo(&'static str, #[allow(dead_code)] i32);
impl PartialEq for Foo {
fn eq(&self, other: &Self) -> bool {
self.0 == other.0
}
}
impl Eq for Foo {}
impl hash::Hash for Foo {
fn hash<H: hash::Hasher>(&self, h: &mut H) {
self.0.hash(h);
}
}
let mut s = HashSet::new();
assert_eq!(s.replace(Foo("a", 1)), None);
assert_eq!(s.len(), 1);
assert_eq!(s.replace(Foo("a", 2)), Some(Foo("a", 1)));
assert_eq!(s.len(), 1);
let mut it = s.iter();
assert_eq!(it.next(), Some(&Foo("a", 2)));
assert_eq!(it.next(), None);
}
#[test]
fn test_extend_ref() {
let mut a = HashSet::new();
a.insert(1);
a.extend(&[2, 3, 4]);
assert_eq!(a.len(), 4);
assert!(a.contains(&1));
assert!(a.contains(&2));
assert!(a.contains(&3));
assert!(a.contains(&4));
let mut b = HashSet::new();
b.insert(5);
b.insert(6);
a.extend(&b);
assert_eq!(a.len(), 6);
assert!(a.contains(&1));
assert!(a.contains(&2));
assert!(a.contains(&3));
assert!(a.contains(&4));
assert!(a.contains(&5));
assert!(a.contains(&6));
}
#[test]
fn test_retain() {
let xs = [1, 2, 3, 4, 5, 6];
let mut set: HashSet<i32> = xs.iter().cloned().collect();
set.retain(|&k| k % 2 == 0);
assert_eq!(set.len(), 3);
assert!(set.contains(&2));
assert!(set.contains(&4));
assert!(set.contains(&6));
}
#[test]
fn test_extract_if() {
let mut x: HashSet<_> = [1].iter().copied().collect();
let mut y: HashSet<_> = [1].iter().copied().collect();
x.extract_if(|_| true).for_each(drop);
y.extract_if(|_| false).for_each(drop);
assert_eq!(x.len(), 0);
assert_eq!(y.len(), 1);
}
#[test]
#[cfg_attr(not(panic = "unwind"), ignore = "test requires unwinding support")]
fn test_extract_if_drop_panic_leak() {
static PREDS: AtomicU32 = AtomicU32::new(0);
static DROPS: AtomicU32 = AtomicU32::new(0);
#[derive(PartialEq, Eq, PartialOrd, Hash)]
struct D(i32);
impl Drop for D {
fn drop(&mut self) {
if DROPS.fetch_add(1, Ordering::SeqCst) == 1 {
panic!("panic in `drop`");
}
}
}
let mut set = (0..3).map(|i| D(i)).collect::<HashSet<_>>();
catch_unwind(move || {
set.extract_if(|_| {
PREDS.fetch_add(1, Ordering::SeqCst);
true
})
.for_each(drop)
})
.ok();
assert_eq!(PREDS.load(Ordering::SeqCst), 2);
assert_eq!(DROPS.load(Ordering::SeqCst), 3);
}
#[test]
#[cfg_attr(not(panic = "unwind"), ignore = "test requires unwinding support")]
fn test_extract_if_pred_panic_leak() {
static PREDS: AtomicU32 = AtomicU32::new(0);
static DROPS: AtomicU32 = AtomicU32::new(0);
#[derive(PartialEq, Eq, PartialOrd, Hash)]
struct D;
impl Drop for D {
fn drop(&mut self) {
DROPS.fetch_add(1, Ordering::SeqCst);
}
}
let mut set: HashSet<_> = (0..3).map(|_| D).collect();
catch_unwind(AssertUnwindSafe(|| {
set.extract_if(|_| match PREDS.fetch_add(1, Ordering::SeqCst) {
0 => true,
_ => panic!(),
})
.for_each(drop)
}))
.ok();
assert_eq!(PREDS.load(Ordering::SeqCst), 1);
assert_eq!(DROPS.load(Ordering::SeqCst), 3);
assert_eq!(set.len(), 0);
}
#[test]
fn from_array() {
let set = HashSet::from([1, 2, 3, 4]);
let unordered_duplicates = HashSet::from([4, 1, 4, 3, 2]);
assert_eq!(set, unordered_duplicates);
// This next line must infer the hasher type parameter.
// If you make a change that causes this line to no longer infer,
// that's a problem!
let _must_not_require_type_annotation = HashSet::from([1, 2]);
}
#[test]
fn const_with_hasher() {
const X: HashSet<(), ()> = HashSet::with_hasher(());
const Y: HashSet<(), ()> = Default::default();
assert_eq!(X.len(), 0);
assert_eq!(Y.len(), 0);
}
#[test]
fn test_insert_does_not_overwrite_the_value() {
let first_value = Arc::new(17);
let second_value = Arc::new(17);
let mut set = HashSet::new();
let inserted = set.insert(first_value.clone());
assert!(inserted);
let inserted = set.insert(second_value);
assert!(!inserted);
assert!(
Arc::ptr_eq(set.iter().next().unwrap(), &first_value),
"Insert must not overwrite the value, so the contained value pointer \
must be the same as first value pointer we inserted"
);
}

View File

@@ -1,459 +0,0 @@
//! Collection types.
//!
//! Rust's standard collection library provides efficient implementations of the
//! most common general purpose programming data structures. By using the
//! standard implementations, it should be possible for two libraries to
//! communicate without significant data conversion.
//!
//! To get this out of the way: you should probably just use [`Vec`] or [`HashMap`].
//! These two collections cover most use cases for generic data storage and
//! processing. They are exceptionally good at doing what they do. All the other
//! collections in the standard library have specific use cases where they are
//! the optimal choice, but these cases are borderline *niche* in comparison.
//! Even when `Vec` and `HashMap` are technically suboptimal, they're probably a
//! good enough choice to get started.
//!
//! Rust's collections can be grouped into four major categories:
//!
//! * Sequences: [`Vec`], [`VecDeque`], [`LinkedList`]
//! * Maps: [`HashMap`], [`BTreeMap`]
//! * Sets: [`HashSet`], [`BTreeSet`]
//! * Misc: [`BinaryHeap`]
//!
//! # When Should You Use Which Collection?
//!
//! These are fairly high-level and quick break-downs of when each collection
//! should be considered. Detailed discussions of strengths and weaknesses of
//! individual collections can be found on their own documentation pages.
//!
//! ### Use a [`Vec`] when:
//! * You want to collect items up to be processed or sent elsewhere later, and
//! don't care about any properties of the actual values being stored.
//! * You want a sequence of elements in a particular order, and will only be
//! appending to (or near) the end.
//! * You want a stack.
//! * You want a resizable array.
//! * You want a heap-allocated array.
//!
//! ### Use a [`VecDeque`] when:
//! * You want a [`Vec`] that supports efficient insertion at both ends of the
//! sequence.
//! * You want a queue.
//! * You want a double-ended queue (deque).
//!
//! ### Use a [`LinkedList`] when:
//! * You want a [`Vec`] or [`VecDeque`] of unknown size, and can't tolerate
//! amortization.
//! * You want to efficiently split and append lists.
//! * You are *absolutely* certain you *really*, *truly*, want a doubly linked
//! list.
//!
//! ### Use a [`HashMap`] when:
//! * You want to associate arbitrary keys with an arbitrary value.
//! * You want a cache.
//! * You want a map, with no extra functionality.
//!
//! ### Use a [`BTreeMap`] when:
//! * You want a map sorted by its keys.
//! * You want to be able to get a range of entries on-demand.
//! * You're interested in what the smallest or largest key-value pair is.
//! * You want to find the largest or smallest key that is smaller or larger
//! than something.
//!
//! ### Use the `Set` variant of any of these `Map`s when:
//! * You just want to remember which keys you've seen.
//! * There is no meaningful value to associate with your keys.
//! * You just want a set.
//!
//! ### Use a [`BinaryHeap`] when:
//!
//! * You want to store a bunch of elements, but only ever want to process the
//! "biggest" or "most important" one at any given time.
//! * You want a priority queue.
//!
//! # Performance
//!
//! Choosing the right collection for the job requires an understanding of what
//! each collection is good at. Here we briefly summarize the performance of
//! different collections for certain important operations. For further details,
//! see each type's documentation, and note that the names of actual methods may
//! differ from the tables below on certain collections.
//!
//! Throughout the documentation, we will adhere to the following conventions
//! for operation notation:
//!
//! * The collection's size is denoted by `n`.
//! * If a second collection is involved, its size is denoted by `m`.
//! * Item indices are denoted by `i`.
//! * Operations which have an *amortized* cost are suffixed with a `*`.
//! * Operations with an *expected* cost are suffixed with a `~`.
//!
//! Calling operations that add to a collection will occasionally require a
//! collection to be resized - an extra operation that takes *O*(*n*) time.
//!
//! *Amortized* costs are calculated to account for the time cost of such resize
//! operations *over a sufficiently large series of operations*. An individual
//! operation may be slower or faster due to the sporadic nature of collection
//! resizing, however the average cost per operation will approach the amortized
//! cost.
//!
//! Rust's collections never automatically shrink, so removal operations aren't
//! amortized.
//!
//! [`HashMap`] uses *expected* costs. It is theoretically possible, though very
//! unlikely, for [`HashMap`] to experience significantly worse performance than
//! the expected cost. This is due to the probabilistic nature of hashing - i.e.
//! it is possible to generate a duplicate hash given some input key that will
//! require extra computation to correct.
//!
//! ## Cost of Collection Operations
//!
//!
//! | | get(i) | insert(i) | remove(i) | append(Vec(m)) | split_off(i) | range | append |
//! |----------------|------------------------|-------------------------|------------------------|-------------------|------------------------|-----------------|--------------|
//! | [`Vec`] | *O*(1) | *O*(*n*-*i*)* | *O*(*n*-*i*) | *O*(*m*)* | *O*(*n*-*i*) | N/A | N/A |
//! | [`VecDeque`] | *O*(1) | *O*(min(*i*, *n*-*i*))* | *O*(min(*i*, *n*-*i*)) | *O*(*m*)* | *O*(min(*i*, *n*-*i*)) | N/A | N/A |
//! | [`LinkedList`] | *O*(min(*i*, *n*-*i*)) | *O*(min(*i*, *n*-*i*)) | *O*(min(*i*, *n*-*i*)) | *O*(1) | *O*(min(*i*, *n*-*i*)) | N/A | N/A |
//! | [`HashMap`] | *O*(1)~ | *O*(1)~* | *O*(1)~ | N/A | N/A | N/A | N/A |
//! | [`BTreeMap`] | *O*(log(*n*)) | *O*(log(*n*)) | *O*(log(*n*)) | N/A | N/A | *O*(log(*n*)) | *O*(*n*+*m*) |
//!
//! Note that where ties occur, [`Vec`] is generally going to be faster than
//! [`VecDeque`], and [`VecDeque`] is generally going to be faster than
//! [`LinkedList`].
//!
//! For Sets, all operations have the cost of the equivalent Map operation.
//!
//! # Correct and Efficient Usage of Collections
//!
//! Of course, knowing which collection is the right one for the job doesn't
//! instantly permit you to use it correctly. Here are some quick tips for
//! efficient and correct usage of the standard collections in general. If
//! you're interested in how to use a specific collection in particular, consult
//! its documentation for detailed discussion and code examples.
//!
//! ## Capacity Management
//!
//! Many collections provide several constructors and methods that refer to
//! "capacity". These collections are generally built on top of an array.
//! Optimally, this array would be exactly the right size to fit only the
//! elements stored in the collection, but for the collection to do this would
//! be very inefficient. If the backing array was exactly the right size at all
//! times, then every time an element is inserted, the collection would have to
//! grow the array to fit it. Due to the way memory is allocated and managed on
//! most computers, this would almost surely require allocating an entirely new
//! array and copying every single element from the old one into the new one.
//! Hopefully you can see that this wouldn't be very efficient to do on every
//! operation.
//!
//! Most collections therefore use an *amortized* allocation strategy. They
//! generally let themselves have a fair amount of unoccupied space so that they
//! only have to grow on occasion. When they do grow, they allocate a
//! substantially larger array to move the elements into so that it will take a
//! while for another grow to be required. While this strategy is great in
//! general, it would be even better if the collection *never* had to resize its
//! backing array. Unfortunately, the collection itself doesn't have enough
//! information to do this itself. Therefore, it is up to us programmers to give
//! it hints.
//!
//! Any `with_capacity` constructor will instruct the collection to allocate
//! enough space for the specified number of elements. Ideally this will be for
//! exactly that many elements, but some implementation details may prevent
//! this. See collection-specific documentation for details. In general, use
//! `with_capacity` when you know exactly how many elements will be inserted, or
//! at least have a reasonable upper-bound on that number.
//!
//! When anticipating a large influx of elements, the `reserve` family of
//! methods can be used to hint to the collection how much room it should make
//! for the coming items. As with `with_capacity`, the precise behavior of
//! these methods will be specific to the collection of interest.
//!
//! For optimal performance, collections will generally avoid shrinking
//! themselves. If you believe that a collection will not soon contain any more
//! elements, or just really need the memory, the `shrink_to_fit` method prompts
//! the collection to shrink the backing array to the minimum size capable of
//! holding its elements.
//!
//! Finally, if ever you're interested in what the actual capacity of the
//! collection is, most collections provide a `capacity` method to query this
//! information on demand. This can be useful for debugging purposes, or for
//! use with the `reserve` methods.
//!
//! ## Iterators
//!
//! [Iterators][crate::iter]
//! are a powerful and robust mechanism used throughout Rust's
//! standard libraries. Iterators provide a sequence of values in a generic,
//! safe, efficient and convenient way. The contents of an iterator are usually
//! *lazily* evaluated, so that only the values that are actually needed are
//! ever actually produced, and no allocation need be done to temporarily store
//! them. Iterators are primarily consumed using a `for` loop, although many
//! functions also take iterators where a collection or sequence of values is
//! desired.
//!
//! All of the standard collections provide several iterators for performing
//! bulk manipulation of their contents. The three primary iterators almost
//! every collection should provide are `iter`, `iter_mut`, and `into_iter`.
//! Some of these are not provided on collections where it would be unsound or
//! unreasonable to provide them.
//!
//! `iter` provides an iterator of immutable references to all the contents of a
//! collection in the most "natural" order. For sequence collections like [`Vec`],
//! this means the items will be yielded in increasing order of index starting
//! at 0. For ordered collections like [`BTreeMap`], this means that the items
//! will be yielded in sorted order. For unordered collections like [`HashMap`],
//! the items will be yielded in whatever order the internal representation made
//! most convenient. This is great for reading through all the contents of the
//! collection.
//!
//! ```
//! let vec = vec![1, 2, 3, 4];
//! for x in vec.iter() {
//! println!("vec contained {x:?}");
//! }
//! ```
//!
//! `iter_mut` provides an iterator of *mutable* references in the same order as
//! `iter`. This is great for mutating all the contents of the collection.
//!
//! ```
//! let mut vec = vec![1, 2, 3, 4];
//! for x in vec.iter_mut() {
//! *x += 1;
//! }
//! ```
//!
//! `into_iter` transforms the actual collection into an iterator over its
//! contents by-value. This is great when the collection itself is no longer
//! needed, and the values are needed elsewhere. Using `extend` with `into_iter`
//! is the main way that contents of one collection are moved into another.
//! `extend` automatically calls `into_iter`, and takes any <code>T: [IntoIterator]</code>.
//! Calling `collect` on an iterator itself is also a great way to convert one
//! collection into another. Both of these methods should internally use the
//! capacity management tools discussed in the previous section to do this as
//! efficiently as possible.
//!
//! ```
//! let mut vec1 = vec![1, 2, 3, 4];
//! let vec2 = vec![10, 20, 30, 40];
//! vec1.extend(vec2);
//! ```
//!
//! ```
//! use std::collections::VecDeque;
//!
//! let vec = [1, 2, 3, 4];
//! let buf: VecDeque<_> = vec.into_iter().collect();
//! ```
//!
//! Iterators also provide a series of *adapter* methods for performing common
//! threads to sequences. Among the adapters are functional favorites like `map`,
//! `fold`, `skip` and `take`. Of particular interest to collections is the
//! `rev` adapter, which reverses any iterator that supports this operation. Most
//! collections provide reversible iterators as the way to iterate over them in
//! reverse order.
//!
//! ```
//! let vec = vec![1, 2, 3, 4];
//! for x in vec.iter().rev() {
//! println!("vec contained {x:?}");
//! }
//! ```
//!
//! Several other collection methods also return iterators to yield a sequence
//! of results but avoid allocating an entire collection to store the result in.
//! This provides maximum flexibility as
//! [`collect`][crate::iter::Iterator::collect] or
//! [`extend`][crate::iter::Extend::extend] can be called to
//! "pipe" the sequence into any collection if desired. Otherwise, the sequence
//! can be looped over with a `for` loop. The iterator can also be discarded
//! after partial use, preventing the computation of the unused items.
//!
//! ## Entries
//!
//! The `entry` API is intended to provide an efficient mechanism for
//! manipulating the contents of a map conditionally on the presence of a key or
//! not. The primary motivating use case for this is to provide efficient
//! accumulator maps. For instance, if one wishes to maintain a count of the
//! number of times each key has been seen, they will have to perform some
//! conditional logic on whether this is the first time the key has been seen or
//! not. Normally, this would require a `find` followed by an `insert`,
//! effectively duplicating the search effort on each insertion.
//!
//! When a user calls `map.entry(key)`, the map will search for the key and
//! then yield a variant of the `Entry` enum.
//!
//! If a `Vacant(entry)` is yielded, then the key *was not* found. In this case
//! the only valid operation is to `insert` a value into the entry. When this is
//! done, the vacant entry is consumed and converted into a mutable reference to
//! the value that was inserted. This allows for further manipulation of the
//! value beyond the lifetime of the search itself. This is useful if complex
//! logic needs to be performed on the value regardless of whether the value was
//! just inserted.
//!
//! If an `Occupied(entry)` is yielded, then the key *was* found. In this case,
//! the user has several options: they can `get`, `insert` or `remove` the
//! value of the occupied entry. Additionally, they can convert the occupied
//! entry into a mutable reference to its value, providing symmetry to the
//! vacant `insert` case.
//!
//! ### Examples
//!
//! Here are the two primary ways in which `entry` is used. First, a simple
//! example where the logic performed on the values is trivial.
//!
//! #### Counting the number of times each character in a string occurs
//!
//! ```
//! use std::collections::btree_map::BTreeMap;
//!
//! let mut count = BTreeMap::new();
//! let message = "she sells sea shells by the sea shore";
//!
//! for c in message.chars() {
//! *count.entry(c).or_insert(0) += 1;
//! }
//!
//! assert_eq!(count.get(&'s'), Some(&8));
//!
//! println!("Number of occurrences of each character");
//! for (char, count) in &count {
//! println!("{char}: {count}");
//! }
//! ```
//!
//! When the logic to be performed on the value is more complex, we may simply
//! use the `entry` API to ensure that the value is initialized and perform the
//! logic afterwards.
//!
//! #### Tracking the inebriation of customers at a bar
//!
//! ```
//! use std::collections::btree_map::BTreeMap;
//!
//! // A client of the bar. They have a blood alcohol level.
//! struct Person { blood_alcohol: f32 }
//!
//! // All the orders made to the bar, by client ID.
//! let orders = vec![1, 2, 1, 2, 3, 4, 1, 2, 2, 3, 4, 1, 1, 1];
//!
//! // Our clients.
//! let mut blood_alcohol = BTreeMap::new();
//!
//! for id in orders {
//! // If this is the first time we've seen this customer, initialize them
//! // with no blood alcohol. Otherwise, just retrieve them.
//! let person = blood_alcohol.entry(id).or_insert(Person { blood_alcohol: 0.0 });
//!
//! // Reduce their blood alcohol level. It takes time to order and drink a beer!
//! person.blood_alcohol *= 0.9;
//!
//! // Check if they're sober enough to have another beer.
//! if person.blood_alcohol > 0.3 {
//! // Too drunk... for now.
//! println!("Sorry {id}, I have to cut you off");
//! } else {
//! // Have another!
//! person.blood_alcohol += 0.1;
//! }
//! }
//! ```
//!
//! # Insert and complex keys
//!
//! If we have a more complex key, calls to `insert` will
//! not update the value of the key. For example:
//!
//! ```
//! use std::cmp::Ordering;
//! use std::collections::BTreeMap;
//! use std::hash::{Hash, Hasher};
//!
//! #[derive(Debug)]
//! struct Foo {
//! a: u32,
//! b: &'static str,
//! }
//!
//! // we will compare `Foo`s by their `a` value only.
//! impl PartialEq for Foo {
//! fn eq(&self, other: &Self) -> bool { self.a == other.a }
//! }
//!
//! impl Eq for Foo {}
//!
//! // we will hash `Foo`s by their `a` value only.
//! impl Hash for Foo {
//! fn hash<H: Hasher>(&self, h: &mut H) { self.a.hash(h); }
//! }
//!
//! impl PartialOrd for Foo {
//! fn partial_cmp(&self, other: &Self) -> Option<Ordering> { self.a.partial_cmp(&other.a) }
//! }
//!
//! impl Ord for Foo {
//! fn cmp(&self, other: &Self) -> Ordering { self.a.cmp(&other.a) }
//! }
//!
//! let mut map = BTreeMap::new();
//! map.insert(Foo { a: 1, b: "baz" }, 99);
//!
//! // We already have a Foo with an a of 1, so this will be updating the value.
//! map.insert(Foo { a: 1, b: "xyz" }, 100);
//!
//! // The value has been updated...
//! assert_eq!(map.values().next().unwrap(), &100);
//!
//! // ...but the key hasn't changed. b is still "baz", not "xyz".
//! assert_eq!(map.keys().next().unwrap().b, "baz");
//! ```
#![stable(feature = "rust1", since = "1.0.0")]
#[stable(feature = "try_reserve", since = "1.57.0")]
pub use alloc_crate::collections::TryReserveError;
#[unstable(
feature = "try_reserve_kind",
reason = "Uncertain how much info should be exposed",
issue = "48043"
)]
pub use alloc_crate::collections::TryReserveErrorKind;
#[stable(feature = "rust1", since = "1.0.0")]
pub use alloc_crate::collections::{BTreeMap, BTreeSet, BinaryHeap};
#[stable(feature = "rust1", since = "1.0.0")]
pub use alloc_crate::collections::{LinkedList, VecDeque};
#[stable(feature = "rust1", since = "1.0.0")]
pub use alloc_crate::collections::{binary_heap, btree_map, btree_set};
#[stable(feature = "rust1", since = "1.0.0")]
pub use alloc_crate::collections::{linked_list, vec_deque};
#[stable(feature = "rust1", since = "1.0.0")]
#[doc(inline)]
pub use self::hash_map::HashMap;
#[stable(feature = "rust1", since = "1.0.0")]
#[doc(inline)]
pub use self::hash_set::HashSet;
#[stable(feature = "rust1", since = "1.0.0")]
// FIXME(#82080) The deprecation here is only theoretical, and does not actually produce a warning.
#[deprecated(note = "moved to `std::ops::Bound`", since = "1.26.0")]
#[doc(hidden)]
pub use crate::ops::Bound;
mod hash;
#[stable(feature = "rust1", since = "1.0.0")]
pub mod hash_map {
//! A hash map implemented with quadratic probing and SIMD lookup.
#[stable(feature = "rust1", since = "1.0.0")]
pub use super::hash::map::*;
#[stable(feature = "hashmap_build_hasher", since = "1.7.0")]
pub use crate::hash::random::DefaultHasher;
#[stable(feature = "hashmap_build_hasher", since = "1.7.0")]
pub use crate::hash::random::RandomState;
}
#[stable(feature = "rust1", since = "1.0.0")]
pub mod hash_set {
//! A hash set implemented as a `HashMap` where the value is `()`.
#[stable(feature = "rust1", since = "1.0.0")]
pub use super::hash::set::*;
}

View File

@@ -1,14 +0,0 @@
//! [`CStr`], [`CString`], and related types.
#[stable(feature = "cstring_from_vec_with_nul", since = "1.58.0")]
pub use alloc_crate::ffi::c_str::FromVecWithNulError;
#[stable(feature = "cstring_into", since = "1.7.0")]
pub use alloc_crate::ffi::c_str::IntoStringError;
#[stable(feature = "rust1", since = "1.0.0")]
pub use alloc_crate::ffi::c_str::{CString, NulError};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::ffi::c_str::CStr;
#[stable(feature = "cstr_from_bytes_until_nul", since = "1.69.0")]
pub use core::ffi::c_str::FromBytesUntilNulError;
#[stable(feature = "cstr_from_bytes", since = "1.10.0")]
pub use core::ffi::c_str::FromBytesWithNulError;

View File

@@ -1,207 +0,0 @@
//! Utilities related to FFI bindings.
//!
//! This module provides utilities to handle data across non-Rust
//! interfaces, like other programming languages and the underlying
//! operating system. It is mainly of use for FFI (Foreign Function
//! Interface) bindings and code that needs to exchange C-like strings
//! with other languages.
//!
//! # Overview
//!
//! Rust represents owned strings with the [`String`] type, and
//! borrowed slices of strings with the [`str`] primitive. Both are
//! always in UTF-8 encoding, and may contain nul bytes in the middle,
//! i.e., if you look at the bytes that make up the string, there may
//! be a `\0` among them. Both `String` and `str` store their length
//! explicitly; there are no nul terminators at the end of strings
//! like in C.
//!
//! C strings are different from Rust strings:
//!
//! * **Encodings** - Rust strings are UTF-8, but C strings may use
//! other encodings. If you are using a string from C, you should
//! check its encoding explicitly, rather than just assuming that it
//! is UTF-8 like you can do in Rust.
//!
//! * **Character size** - C strings may use `char` or `wchar_t`-sized
//! characters; please **note** that C's `char` is different from Rust's.
//! The C standard leaves the actual sizes of those types open to
//! interpretation, but defines different APIs for strings made up of
//! each character type. Rust strings are always UTF-8, so different
//! Unicode characters will be encoded in a variable number of bytes
//! each. The Rust type [`char`] represents a '[Unicode scalar
//! value]', which is similar to, but not the same as, a '[Unicode
//! code point]'.
//!
//! * **Nul terminators and implicit string lengths** - Often, C
//! strings are nul-terminated, i.e., they have a `\0` character at the
//! end. The length of a string buffer is not stored, but has to be
//! calculated; to compute the length of a string, C code must
//! manually call a function like `strlen()` for `char`-based strings,
//! or `wcslen()` for `wchar_t`-based ones. Those functions return
//! the number of characters in the string excluding the nul
//! terminator, so the buffer length is really `len+1` characters.
//! Rust strings don't have a nul terminator; their length is always
//! stored and does not need to be calculated. While in Rust
//! accessing a string's length is an *O*(1) operation (because the
//! length is stored); in C it is an *O*(*n*) operation because the
//! length needs to be computed by scanning the string for the nul
//! terminator.
//!
//! * **Internal nul characters** - When C strings have a nul
//! terminator character, this usually means that they cannot have nul
//! characters in the middle — a nul character would essentially
//! truncate the string. Rust strings *can* have nul characters in
//! the middle, because nul does not have to mark the end of the
//! string in Rust.
//!
//! # Representations of non-Rust strings
//!
//! [`CString`] and [`CStr`] are useful when you need to transfer
//! UTF-8 strings to and from languages with a C ABI, like Python.
//!
//! * **From Rust to C:** [`CString`] represents an owned, C-friendly
//! string: it is nul-terminated, and has no internal nul characters.
//! Rust code can create a [`CString`] out of a normal string (provided
//! that the string doesn't have nul characters in the middle), and
//! then use a variety of methods to obtain a raw <code>\*mut [u8]</code> that can
//! then be passed as an argument to functions which use the C
//! conventions for strings.
//!
//! * **From C to Rust:** [`CStr`] represents a borrowed C string; it
//! is what you would use to wrap a raw <code>\*const [u8]</code> that you got from
//! a C function. A [`CStr`] is guaranteed to be a nul-terminated array
//! of bytes. Once you have a [`CStr`], you can convert it to a Rust
//! <code>&[str]</code> if it's valid UTF-8, or lossily convert it by adding
//! replacement characters.
//!
//! [`OsString`] and [`OsStr`] are useful when you need to transfer
//! strings to and from the operating system itself, or when capturing
//! the output of external commands. Conversions between [`OsString`],
//! [`OsStr`] and Rust strings work similarly to those for [`CString`]
//! and [`CStr`].
//!
//! * [`OsString`] losslessly represents an owned platform string. However, this
//! representation is not necessarily in a form native to the platform.
//! In the Rust standard library, various APIs that transfer strings to/from the operating
//! system use [`OsString`] instead of plain strings. For example,
//! [`env::var_os()`] is used to query environment variables; it
//! returns an <code>[Option]<[OsString]></code>. If the environment variable
//! exists you will get a <code>[Some]\(os_string)</code>, which you can
//! *then* try to convert to a Rust string. This yields a [`Result`], so that
//! your code can detect errors in case the environment variable did
//! not in fact contain valid Unicode data.
//!
//! * [`OsStr`] losslessly represents a borrowed reference to a platform string.
//! However, this representation is not necessarily in a form native to the platform.
//! It can be converted into a UTF-8 Rust string slice in a similar way to
//! [`OsString`].
//!
//! # Conversions
//!
//! ## On Unix
//!
//! On Unix, [`OsStr`] implements the
//! <code>std::os::unix::ffi::[OsStrExt][unix.OsStrExt]</code> trait, which
//! augments it with two methods, [`from_bytes`] and [`as_bytes`].
//! These do inexpensive conversions from and to byte slices.
//!
//! Additionally, on Unix [`OsString`] implements the
//! <code>std::os::unix::ffi::[OsStringExt][unix.OsStringExt]</code> trait,
//! which provides [`from_vec`] and [`into_vec`] methods that consume
//! their arguments, and take or produce vectors of [`u8`].
//!
//! ## On Windows
//!
//! An [`OsStr`] can be losslessly converted to a native Windows string. And
//! a native Windows string can be losslessly converted to an [`OsString`].
//!
//! On Windows, [`OsStr`] implements the
//! <code>std::os::windows::ffi::[OsStrExt][windows.OsStrExt]</code> trait,
//! which provides an [`encode_wide`] method. This provides an
//! iterator that can be [`collect`]ed into a vector of [`u16`]. After a nul
//! characters is appended, this is the same as a native Windows string.
//!
//! Additionally, on Windows [`OsString`] implements the
//! <code>std::os::windows:ffi::[OsStringExt][windows.OsStringExt]</code>
//! trait, which provides a [`from_wide`] method to convert a native Windows
//! string (without the terminating nul character) to an [`OsString`].
//!
//! ## Other platforms
//!
//! Many other platforms provide their own extension traits in a
//! `std::os::*::ffi` module.
//!
//! ## On all platforms
//!
//! On all platforms, [`OsStr`] consists of a sequence of bytes that is encoded as a superset of
//! UTF-8; see [`OsString`] for more details on its encoding on different platforms.
//!
//! For limited, inexpensive conversions from and to bytes, see [`OsStr::as_encoded_bytes`] and
//! [`OsStr::from_encoded_bytes_unchecked`].
//!
//! For basic string processing, see [`OsStr::slice_encoded_bytes`].
//!
//! [Unicode scalar value]: https://www.unicode.org/glossary/#unicode_scalar_value
//! [Unicode code point]: https://www.unicode.org/glossary/#code_point
//! [`env::set_var()`]: crate::env::set_var "env::set_var"
//! [`env::var_os()`]: crate::env::var_os "env::var_os"
//! [unix.OsStringExt]: crate::os::unix::ffi::OsStringExt "os::unix::ffi::OsStringExt"
//! [`from_vec`]: crate::os::unix::ffi::OsStringExt::from_vec "os::unix::ffi::OsStringExt::from_vec"
//! [`into_vec`]: crate::os::unix::ffi::OsStringExt::into_vec "os::unix::ffi::OsStringExt::into_vec"
//! [unix.OsStrExt]: crate::os::unix::ffi::OsStrExt "os::unix::ffi::OsStrExt"
//! [`from_bytes`]: crate::os::unix::ffi::OsStrExt::from_bytes "os::unix::ffi::OsStrExt::from_bytes"
//! [`as_bytes`]: crate::os::unix::ffi::OsStrExt::as_bytes "os::unix::ffi::OsStrExt::as_bytes"
//! [`OsStrExt`]: crate::os::unix::ffi::OsStrExt "os::unix::ffi::OsStrExt"
//! [windows.OsStrExt]: crate::os::windows::ffi::OsStrExt "os::windows::ffi::OsStrExt"
//! [`encode_wide`]: crate::os::windows::ffi::OsStrExt::encode_wide "os::windows::ffi::OsStrExt::encode_wide"
//! [`collect`]: crate::iter::Iterator::collect "iter::Iterator::collect"
//! [windows.OsStringExt]: crate::os::windows::ffi::OsStringExt "os::windows::ffi::OsStringExt"
//! [`from_wide`]: crate::os::windows::ffi::OsStringExt::from_wide "os::windows::ffi::OsStringExt::from_wide"
#![stable(feature = "rust1", since = "1.0.0")]
#[stable(feature = "c_str_module", since = "1.88.0")]
pub mod c_str;
#[stable(feature = "core_c_void", since = "1.30.0")]
pub use core::ffi::c_void;
#[unstable(
feature = "c_variadic",
reason = "the `c_variadic` feature has not been properly tested on \
all supported platforms",
issue = "44930"
)]
pub use core::ffi::{VaArgSafe, VaList};
#[stable(feature = "core_ffi_c", since = "1.64.0")]
pub use core::ffi::{
c_char, c_double, c_float, c_int, c_long, c_longlong, c_schar, c_short, c_uchar, c_uint,
c_ulong, c_ulonglong, c_ushort,
};
#[unstable(feature = "c_size_t", issue = "88345")]
pub use core::ffi::{c_ptrdiff_t, c_size_t, c_ssize_t};
#[doc(inline)]
#[stable(feature = "cstr_from_bytes_until_nul", since = "1.69.0")]
pub use self::c_str::FromBytesUntilNulError;
#[doc(inline)]
#[stable(feature = "cstr_from_bytes", since = "1.10.0")]
pub use self::c_str::FromBytesWithNulError;
#[doc(inline)]
#[stable(feature = "cstring_from_vec_with_nul", since = "1.58.0")]
pub use self::c_str::FromVecWithNulError;
#[doc(inline)]
#[stable(feature = "cstring_into", since = "1.7.0")]
pub use self::c_str::IntoStringError;
#[doc(inline)]
#[stable(feature = "rust1", since = "1.0.0")]
pub use self::c_str::NulError;
#[doc(inline)]
#[stable(feature = "rust1", since = "1.0.0")]
pub use self::c_str::{CStr, CString};
#[stable(feature = "rust1", since = "1.0.0")]
#[doc(inline)]
pub use self::os_str::{OsStr, OsString};
#[stable(feature = "os_str_display", since = "1.87.0")]
pub mod os_str;

File diff suppressed because it is too large Load Diff

View File

@@ -1,311 +0,0 @@
use super::*;
use crate::mem::MaybeUninit;
use crate::ptr;
#[test]
fn test_os_string_with_capacity() {
let os_string = OsString::with_capacity(0);
assert_eq!(0, os_string.inner.into_inner().capacity());
let os_string = OsString::with_capacity(10);
assert_eq!(10, os_string.inner.into_inner().capacity());
let mut os_string = OsString::with_capacity(0);
os_string.push("abc");
assert!(os_string.inner.into_inner().capacity() >= 3);
}
#[test]
fn test_os_string_clear() {
let mut os_string = OsString::from("abc");
assert_eq!(3, os_string.inner.as_inner().len());
os_string.clear();
assert_eq!(&os_string, "");
assert_eq!(0, os_string.inner.as_inner().len());
}
#[test]
fn test_os_string_leak() {
let os_string = OsString::from("have a cake");
let (len, cap) = (os_string.len(), os_string.capacity());
let leaked = os_string.leak();
assert_eq!(leaked.as_encoded_bytes(), b"have a cake");
unsafe { drop(String::from_raw_parts(leaked as *mut OsStr as _, len, cap)) }
}
#[test]
fn test_os_string_capacity() {
let os_string = OsString::with_capacity(0);
assert_eq!(0, os_string.capacity());
let os_string = OsString::with_capacity(10);
assert_eq!(10, os_string.capacity());
let mut os_string = OsString::with_capacity(0);
os_string.push("abc");
assert!(os_string.capacity() >= 3);
}
#[test]
fn test_os_string_reserve() {
let mut os_string = OsString::new();
assert_eq!(os_string.capacity(), 0);
os_string.reserve(2);
assert!(os_string.capacity() >= 2);
for _ in 0..16 {
os_string.push("a");
}
assert!(os_string.capacity() >= 16);
os_string.reserve(16);
assert!(os_string.capacity() >= 32);
os_string.push("a");
os_string.reserve(16);
assert!(os_string.capacity() >= 33)
}
#[test]
fn test_os_string_reserve_exact() {
let mut os_string = OsString::new();
assert_eq!(os_string.capacity(), 0);
os_string.reserve_exact(2);
assert!(os_string.capacity() >= 2);
for _ in 0..16 {
os_string.push("a");
}
assert!(os_string.capacity() >= 16);
os_string.reserve_exact(16);
assert!(os_string.capacity() >= 32);
os_string.push("a");
os_string.reserve_exact(16);
assert!(os_string.capacity() >= 33)
}
#[test]
fn test_os_string_join() {
let strings = [OsStr::new("hello"), OsStr::new("dear"), OsStr::new("world")];
assert_eq!("hello", strings[..1].join(OsStr::new(" ")));
assert_eq!("hello dear world", strings.join(OsStr::new(" ")));
assert_eq!("hellodearworld", strings.join(OsStr::new("")));
assert_eq!("hello.\n dear.\n world", strings.join(OsStr::new(".\n ")));
assert_eq!("dear world", strings[1..].join(&OsString::from(" ")));
let strings_abc = [OsString::from("a"), OsString::from("b"), OsString::from("c")];
assert_eq!("a b c", strings_abc.join(OsStr::new(" ")));
}
#[test]
fn test_os_string_default() {
let os_string: OsString = Default::default();
assert_eq!("", &os_string);
}
#[test]
fn test_os_str_is_empty() {
let mut os_string = OsString::new();
assert!(os_string.is_empty());
os_string.push("abc");
assert!(!os_string.is_empty());
os_string.clear();
assert!(os_string.is_empty());
}
#[test]
fn test_os_str_len() {
let mut os_string = OsString::new();
assert_eq!(0, os_string.len());
os_string.push("abc");
assert_eq!(3, os_string.len());
os_string.clear();
assert_eq!(0, os_string.len());
}
#[test]
fn test_os_str_default() {
let os_str: &OsStr = Default::default();
assert_eq!("", os_str);
}
#[test]
fn into_boxed() {
let orig = "Hello, world!";
let os_str = OsStr::new(orig);
let boxed: Box<OsStr> = Box::from(os_str);
let os_string = os_str.to_owned().into_boxed_os_str().into_os_string();
assert_eq!(os_str, &*boxed);
assert_eq!(&*boxed, &*os_string);
assert_eq!(&*os_string, os_str);
}
#[test]
fn boxed_default() {
let boxed = <Box<OsStr>>::default();
assert!(boxed.is_empty());
}
#[test]
fn test_os_str_clone_into() {
let mut os_string = OsString::with_capacity(123);
os_string.push("hello");
let os_str = OsStr::new("bonjour");
os_str.clone_into(&mut os_string);
assert_eq!(os_str, os_string);
assert!(os_string.capacity() >= 123);
}
#[test]
fn into_rc() {
let orig = "Hello, world!";
let os_str = OsStr::new(orig);
let rc: Rc<OsStr> = Rc::from(os_str);
let arc: Arc<OsStr> = Arc::from(os_str);
assert_eq!(&*rc, os_str);
assert_eq!(&*arc, os_str);
let rc2: Rc<OsStr> = Rc::from(os_str.to_owned());
let arc2: Arc<OsStr> = Arc::from(os_str.to_owned());
assert_eq!(&*rc2, os_str);
assert_eq!(&*arc2, os_str);
}
#[test]
fn slice_encoded_bytes() {
let os_str = OsStr::new("123θგ🦀");
// ASCII
let digits = os_str.slice_encoded_bytes(..3);
assert_eq!(digits, "123");
let three = os_str.slice_encoded_bytes(2..3);
assert_eq!(three, "3");
// 2-byte UTF-8
let theta = os_str.slice_encoded_bytes(3..5);
assert_eq!(theta, "θ");
// 3-byte UTF-8
let gani = os_str.slice_encoded_bytes(5..8);
assert_eq!(gani, "");
// 4-byte UTF-8
let crab = os_str.slice_encoded_bytes(8..);
assert_eq!(crab, "🦀");
}
#[test]
#[should_panic]
fn slice_out_of_bounds() {
let crab = OsStr::new("🦀");
let _ = crab.slice_encoded_bytes(..5);
}
#[test]
#[should_panic]
fn slice_mid_char() {
let crab = OsStr::new("🦀");
let _ = crab.slice_encoded_bytes(..2);
}
#[cfg(unix)]
#[test]
#[should_panic(expected = "byte index 1 is not an OsStr boundary")]
fn slice_invalid_data() {
use crate::os::unix::ffi::OsStrExt;
let os_string = OsStr::from_bytes(b"\xFF\xFF");
let _ = os_string.slice_encoded_bytes(1..);
}
#[cfg(unix)]
#[test]
#[should_panic(expected = "byte index 1 is not an OsStr boundary")]
fn slice_partial_utf8() {
use crate::os::unix::ffi::{OsStrExt, OsStringExt};
let part_crab = OsStr::from_bytes(&"🦀".as_bytes()[..3]);
let mut os_string = OsString::from_vec(vec![0xFF]);
os_string.push(part_crab);
let _ = os_string.slice_encoded_bytes(1..);
}
#[cfg(unix)]
#[test]
fn slice_invalid_edge() {
use crate::os::unix::ffi::{OsStrExt, OsStringExt};
let os_string = OsStr::from_bytes(b"a\xFFa");
assert_eq!(os_string.slice_encoded_bytes(..1), "a");
assert_eq!(os_string.slice_encoded_bytes(1..), OsStr::from_bytes(b"\xFFa"));
assert_eq!(os_string.slice_encoded_bytes(..2), OsStr::from_bytes(b"a\xFF"));
assert_eq!(os_string.slice_encoded_bytes(2..), "a");
let os_string = OsStr::from_bytes(&"abc🦀".as_bytes()[..6]);
assert_eq!(os_string.slice_encoded_bytes(..3), "abc");
assert_eq!(os_string.slice_encoded_bytes(3..), OsStr::from_bytes(b"\xF0\x9F\xA6"));
let mut os_string = OsString::from_vec(vec![0xFF]);
os_string.push("🦀");
assert_eq!(os_string.slice_encoded_bytes(..1), OsStr::from_bytes(b"\xFF"));
assert_eq!(os_string.slice_encoded_bytes(1..), "🦀");
}
#[cfg(windows)]
#[test]
#[should_panic(expected = "byte index 3 lies between surrogate codepoints")]
fn slice_between_surrogates() {
use crate::os::windows::ffi::OsStringExt;
let os_string = OsString::from_wide(&[0xD800, 0xD800]);
assert_eq!(os_string.as_encoded_bytes(), &[0xED, 0xA0, 0x80, 0xED, 0xA0, 0x80]);
let _ = os_string.slice_encoded_bytes(..3);
}
#[cfg(windows)]
#[test]
fn slice_surrogate_edge() {
use crate::os::windows::ffi::OsStringExt;
let surrogate = OsString::from_wide(&[0xD800]);
let mut pre_crab = surrogate.clone();
pre_crab.push("🦀");
assert_eq!(pre_crab.slice_encoded_bytes(..3), surrogate);
assert_eq!(pre_crab.slice_encoded_bytes(3..), "🦀");
let mut post_crab = OsString::from("🦀");
post_crab.push(&surrogate);
assert_eq!(post_crab.slice_encoded_bytes(..4), "🦀");
assert_eq!(post_crab.slice_encoded_bytes(4..), surrogate);
}
#[test]
fn clone_to_uninit() {
let a = OsStr::new("hello.txt");
let mut storage = vec![MaybeUninit::<u8>::uninit(); size_of_val::<OsStr>(a)];
unsafe { a.clone_to_uninit(ptr::from_mut::<[_]>(storage.as_mut_slice()).cast()) };
assert_eq!(a.as_encoded_bytes(), unsafe { storage.assume_init_ref() });
let mut b: Box<OsStr> = OsStr::new("world.exe").into();
assert_eq!(size_of_val::<OsStr>(a), size_of_val::<OsStr>(&b));
assert_ne!(a, &*b);
unsafe { a.clone_to_uninit(ptr::from_mut::<OsStr>(&mut b).cast()) };
assert_eq!(a, &*b);
}
#[test]
fn debug() {
let s = "'single quotes'";
assert_eq!(format!("{:?}", OsStr::new(s)), format!("{:?}", s));
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,91 +0,0 @@
//! Generic hashing support.
//!
//! This module provides a generic way to compute the [hash] of a value.
//! Hashes are most commonly used with [`HashMap`] and [`HashSet`].
//!
//! [hash]: https://en.wikipedia.org/wiki/Hash_function
//! [`HashMap`]: ../../std/collections/struct.HashMap.html
//! [`HashSet`]: ../../std/collections/struct.HashSet.html
//!
//! The simplest way to make a type hashable is to use `#[derive(Hash)]`:
//!
//! # Examples
//!
//! ```rust
//! use std::hash::{DefaultHasher, Hash, Hasher};
//!
//! #[derive(Hash)]
//! struct Person {
//! id: u32,
//! name: String,
//! phone: u64,
//! }
//!
//! let person1 = Person {
//! id: 5,
//! name: "Janet".to_string(),
//! phone: 555_666_7777,
//! };
//! let person2 = Person {
//! id: 5,
//! name: "Bob".to_string(),
//! phone: 555_666_7777,
//! };
//!
//! assert!(calculate_hash(&person1) != calculate_hash(&person2));
//!
//! fn calculate_hash<T: Hash>(t: &T) -> u64 {
//! let mut s = DefaultHasher::new();
//! t.hash(&mut s);
//! s.finish()
//! }
//! ```
//!
//! If you need more control over how a value is hashed, you need to implement
//! the [`Hash`] trait:
//!
//! ```rust
//! use std::hash::{DefaultHasher, Hash, Hasher};
//!
//! struct Person {
//! id: u32,
//! # #[allow(dead_code)]
//! name: String,
//! phone: u64,
//! }
//!
//! impl Hash for Person {
//! fn hash<H: Hasher>(&self, state: &mut H) {
//! self.id.hash(state);
//! self.phone.hash(state);
//! }
//! }
//!
//! let person1 = Person {
//! id: 5,
//! name: "Janet".to_string(),
//! phone: 555_666_7777,
//! };
//! let person2 = Person {
//! id: 5,
//! name: "Bob".to_string(),
//! phone: 555_666_7777,
//! };
//!
//! assert_eq!(calculate_hash(&person1), calculate_hash(&person2));
//!
//! fn calculate_hash<T: Hash>(t: &T) -> u64 {
//! let mut s = DefaultHasher::new();
//! t.hash(&mut s);
//! s.finish()
//! }
//! ```
#![stable(feature = "rust1", since = "1.0.0")]
pub(crate) mod random;
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::hash::*;
#[stable(feature = "std_hash_exports", since = "1.76.0")]
pub use self::random::{DefaultHasher, RandomState};

View File

@@ -1,159 +0,0 @@
//! This module exists to isolate [`RandomState`] and [`DefaultHasher`] outside of the
//! [`collections`] module without actually publicly exporting them, so that parts of that
//! implementation can more easily be moved to the [`alloc`] crate.
//!
//! Although its items are public and contain stability attributes, they can't actually be accessed
//! outside this crate.
//!
//! [`collections`]: crate::collections
use super::{BuildHasher, Hasher, SipHasher13};
use crate::cell::Cell;
use crate::fmt;
use crate::sys::random::hashmap_random_keys;
/// `RandomState` is the default state for [`HashMap`] types.
///
/// A particular instance `RandomState` will create the same instances of
/// [`Hasher`], but the hashers created by two different `RandomState`
/// instances are unlikely to produce the same result for the same values.
///
/// [`HashMap`]: crate::collections::HashMap
///
/// # Examples
///
/// ```
/// use std::collections::HashMap;
/// use std::hash::RandomState;
///
/// let s = RandomState::new();
/// let mut map = HashMap::with_hasher(s);
/// map.insert(1, 2);
/// ```
#[stable(feature = "hashmap_build_hasher", since = "1.7.0")]
#[derive(Clone)]
pub struct RandomState {
k0: u64,
k1: u64,
}
impl RandomState {
/// Constructs a new `RandomState` that is initialized with random keys.
///
/// # Examples
///
/// ```
/// use std::hash::RandomState;
///
/// let s = RandomState::new();
/// ```
#[inline]
#[allow(deprecated)]
// rand
#[must_use]
#[stable(feature = "hashmap_build_hasher", since = "1.7.0")]
pub fn new() -> RandomState {
// Historically this function did not cache keys from the OS and instead
// simply always called `rand::thread_rng().gen()` twice. In #31356 it
// was discovered, however, that because we re-seed the thread-local RNG
// from the OS periodically that this can cause excessive slowdown when
// many hash maps are created on a thread. To solve this performance
// trap we cache the first set of randomly generated keys per-thread.
//
// Later in #36481 it was discovered that exposing a deterministic
// iteration order allows a form of DOS attack. To counter that we
// increment one of the seeds on every RandomState creation, giving
// every corresponding HashMap a different iteration order.
thread_local!(static KEYS: Cell<(u64, u64)> = {
Cell::new(hashmap_random_keys())
});
KEYS.with(|keys| {
let (k0, k1) = keys.get();
keys.set((k0.wrapping_add(1), k1));
RandomState { k0, k1 }
})
}
}
#[stable(feature = "hashmap_build_hasher", since = "1.7.0")]
impl BuildHasher for RandomState {
type Hasher = DefaultHasher;
#[inline]
fn build_hasher(&self) -> DefaultHasher {
DefaultHasher(SipHasher13::new_with_keys(self.k0, self.k1))
}
}
/// The default [`Hasher`] used by [`RandomState`].
///
/// The internal algorithm is not specified, and so it and its hashes should
/// not be relied upon over releases.
#[derive(Clone, Debug)]
#[stable(feature = "hashmap_build_hasher", since = "1.7.0")]
pub struct DefaultHasher(SipHasher13);
impl DefaultHasher {
/// Creates a new `DefaultHasher`.
///
/// This hasher is not guaranteed to be the same as all other
/// `DefaultHasher` instances, but is the same as all other `DefaultHasher`
/// instances created through `new` or `default`.
#[stable(feature = "hashmap_default_hasher", since = "1.13.0")]
#[inline]
#[rustc_const_unstable(feature = "const_default", issue = "143894")]
#[must_use]
pub const fn new() -> DefaultHasher {
DefaultHasher(SipHasher13::new_with_keys(0, 0))
}
}
#[stable(feature = "hashmap_default_hasher", since = "1.13.0")]
#[rustc_const_unstable(feature = "const_default", issue = "143894")]
impl const Default for DefaultHasher {
/// Creates a new `DefaultHasher` using [`new`].
/// See its documentation for more.
///
/// [`new`]: DefaultHasher::new
#[inline]
fn default() -> DefaultHasher {
DefaultHasher::new()
}
}
#[stable(feature = "hashmap_default_hasher", since = "1.13.0")]
impl Hasher for DefaultHasher {
// The underlying `SipHasher13` doesn't override the other
// `write_*` methods, so it's ok not to forward them here.
#[inline]
fn write(&mut self, msg: &[u8]) {
self.0.write(msg)
}
#[inline]
fn write_str(&mut self, s: &str) {
self.0.write_str(s);
}
#[inline]
fn finish(&self) -> u64 {
self.0.finish()
}
}
#[stable(feature = "hashmap_build_hasher", since = "1.7.0")]
impl Default for RandomState {
/// Constructs a new `RandomState`.
#[inline]
fn default() -> RandomState {
RandomState::new()
}
}
#[stable(feature = "std_debug", since = "1.16.0")]
impl fmt::Debug for RandomState {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("RandomState").finish_non_exhaustive()
}
}

View File

@@ -1,601 +0,0 @@
mod buffer;
use buffer::Buffer;
use crate::fmt;
use crate::io::{
self, BorrowedCursor, BufRead, DEFAULT_BUF_SIZE, IoSliceMut, Read, Seek, SeekFrom, SizeHint,
SpecReadByte, uninlined_slow_read_byte,
};
/// The `BufReader<R>` struct adds buffering to any reader.
///
/// It can be excessively inefficient to work directly with a [`Read`] instance.
/// For example, every call to [`read`][`TcpStream::read`] on [`TcpStream`]
/// results in a system call. A `BufReader<R>` performs large, infrequent reads on
/// the underlying [`Read`] and maintains an in-memory buffer of the results.
///
/// `BufReader<R>` can improve the speed of programs that make *small* and
/// *repeated* read calls to the same file or network socket. It does not
/// help when reading very large amounts at once, or reading just one or a few
/// times. It also provides no advantage when reading from a source that is
/// already in memory, like a <code>[Vec]\<u8></code>.
///
/// When the `BufReader<R>` is dropped, the contents of its buffer will be
/// discarded. Creating multiple instances of a `BufReader<R>` on the same
/// stream can cause data loss. Reading from the underlying reader after
/// unwrapping the `BufReader<R>` with [`BufReader::into_inner`] can also cause
/// data loss.
///
/// [`TcpStream::read`]: crate::net::TcpStream::read
/// [`TcpStream`]: crate::net::TcpStream
///
/// # Examples
///
/// ```no_run
/// use std::io::prelude::*;
/// use std::io::BufReader;
/// use std::fs::File;
///
/// fn main() -> std::io::Result<()> {
/// let f = File::open("log.txt")?;
/// let mut reader = BufReader::new(f);
///
/// let mut line = String::new();
/// let len = reader.read_line(&mut line)?;
/// println!("First line is {len} bytes long");
/// Ok(())
/// }
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub struct BufReader<R: ?Sized> {
buf: Buffer,
inner: R,
}
impl<R: Read> BufReader<R> {
/// Creates a new `BufReader<R>` with a default buffer capacity. The default is currently 8 KiB,
/// but may change in the future.
///
/// # Examples
///
/// ```no_run
/// use std::io::BufReader;
/// use std::fs::File;
///
/// fn main() -> std::io::Result<()> {
/// let f = File::open("log.txt")?;
/// let reader = BufReader::new(f);
/// Ok(())
/// }
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn new(inner: R) -> BufReader<R> {
BufReader::with_capacity(DEFAULT_BUF_SIZE, inner)
}
pub(crate) fn try_new_buffer() -> io::Result<Buffer> {
Buffer::try_with_capacity(DEFAULT_BUF_SIZE)
}
pub(crate) fn with_buffer(inner: R, buf: Buffer) -> Self {
Self { inner, buf }
}
/// Creates a new `BufReader<R>` with the specified buffer capacity.
///
/// # Examples
///
/// Creating a buffer with ten bytes of capacity:
///
/// ```no_run
/// use std::io::BufReader;
/// use std::fs::File;
///
/// fn main() -> std::io::Result<()> {
/// let f = File::open("log.txt")?;
/// let reader = BufReader::with_capacity(10, f);
/// Ok(())
/// }
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn with_capacity(capacity: usize, inner: R) -> BufReader<R> {
BufReader {
inner,
buf: Buffer::with_capacity(capacity),
}
}
}
impl<R: Read + ?Sized> BufReader<R> {
/// Attempt to look ahead `n` bytes.
///
/// `n` must be less than or equal to `capacity`.
///
/// The returned slice may be less than `n` bytes long if
/// end of file is reached.
///
/// After calling this method, you may call [`consume`](BufRead::consume)
/// with a value less than or equal to `n` to advance over some or all of
/// the returned bytes.
///
/// ## Examples
///
/// ```rust
/// #![feature(bufreader_peek)]
/// use std::io::{Read, BufReader};
///
/// let mut bytes = &b"oh, hello there"[..];
/// let mut rdr = BufReader::with_capacity(6, &mut bytes);
/// assert_eq!(rdr.peek(2).unwrap(), b"oh");
/// let mut buf = [0; 4];
/// rdr.read(&mut buf[..]).unwrap();
/// assert_eq!(&buf, b"oh, ");
/// assert_eq!(rdr.peek(5).unwrap(), b"hello");
/// let mut s = String::new();
/// rdr.read_to_string(&mut s).unwrap();
/// assert_eq!(&s, "hello there");
/// assert_eq!(rdr.peek(1).unwrap().len(), 0);
/// ```
#[unstable(feature = "bufreader_peek", issue = "128405")]
pub fn peek(&mut self, n: usize) -> io::Result<&[u8]> {
assert!(n <= self.capacity());
while n > self.buf.buffer().len() {
if self.buf.pos() > 0 {
self.buf.backshift();
}
let new = self.buf.read_more(&mut self.inner)?;
if new == 0 {
// end of file, no more bytes to read
return Ok(&self.buf.buffer()[..]);
}
debug_assert_eq!(self.buf.pos(), 0);
}
Ok(&self.buf.buffer()[..n])
}
}
impl<R: ?Sized> BufReader<R> {
/// Gets a reference to the underlying reader.
///
/// It is inadvisable to directly read from the underlying reader.
///
/// # Examples
///
/// ```no_run
/// use std::io::BufReader;
/// use std::fs::File;
///
/// fn main() -> std::io::Result<()> {
/// let f1 = File::open("log.txt")?;
/// let reader = BufReader::new(f1);
///
/// let f2 = reader.get_ref();
/// Ok(())
/// }
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn get_ref(&self) -> &R {
&self.inner
}
/// Gets a mutable reference to the underlying reader.
///
/// It is inadvisable to directly read from the underlying reader.
///
/// # Examples
///
/// ```no_run
/// use std::io::BufReader;
/// use std::fs::File;
///
/// fn main() -> std::io::Result<()> {
/// let f1 = File::open("log.txt")?;
/// let mut reader = BufReader::new(f1);
///
/// let f2 = reader.get_mut();
/// Ok(())
/// }
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn get_mut(&mut self) -> &mut R {
&mut self.inner
}
/// Returns a reference to the internally buffered data.
///
/// Unlike [`fill_buf`], this will not attempt to fill the buffer if it is empty.
///
/// [`fill_buf`]: BufRead::fill_buf
///
/// # Examples
///
/// ```no_run
/// use std::io::{BufReader, BufRead};
/// use std::fs::File;
///
/// fn main() -> std::io::Result<()> {
/// let f = File::open("log.txt")?;
/// let mut reader = BufReader::new(f);
/// assert!(reader.buffer().is_empty());
///
/// if reader.fill_buf()?.len() > 0 {
/// assert!(!reader.buffer().is_empty());
/// }
/// Ok(())
/// }
/// ```
#[stable(feature = "bufreader_buffer", since = "1.37.0")]
pub fn buffer(&self) -> &[u8] {
self.buf.buffer()
}
/// Returns the number of bytes the internal buffer can hold at once.
///
/// # Examples
///
/// ```no_run
/// use std::io::{BufReader, BufRead};
/// use std::fs::File;
///
/// fn main() -> std::io::Result<()> {
/// let f = File::open("log.txt")?;
/// let mut reader = BufReader::new(f);
///
/// let capacity = reader.capacity();
/// let buffer = reader.fill_buf()?;
/// assert!(buffer.len() <= capacity);
/// Ok(())
/// }
/// ```
#[stable(feature = "buffered_io_capacity", since = "1.46.0")]
pub fn capacity(&self) -> usize {
self.buf.capacity()
}
/// Unwraps this `BufReader<R>`, returning the underlying reader.
///
/// Note that any leftover data in the internal buffer is lost. Therefore,
/// a following read from the underlying reader may lead to data loss.
///
/// # Examples
///
/// ```no_run
/// use std::io::BufReader;
/// use std::fs::File;
///
/// fn main() -> std::io::Result<()> {
/// let f1 = File::open("log.txt")?;
/// let reader = BufReader::new(f1);
///
/// let f2 = reader.into_inner();
/// Ok(())
/// }
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn into_inner(self) -> R
where
R: Sized,
{
self.inner
}
/// Invalidates all data in the internal buffer.
#[inline]
pub(in crate::io) fn discard_buffer(&mut self) {
self.buf.discard_buffer()
}
}
// This is only used by a test which asserts that the initialization-tracking is correct.
#[cfg(test)]
impl<R: ?Sized> BufReader<R> {
#[allow(missing_docs)]
pub fn initialized(&self) -> usize {
self.buf.initialized()
}
}
impl<R: ?Sized + Seek> BufReader<R> {
/// Seeks relative to the current position. If the new position lies within the buffer,
/// the buffer will not be flushed, allowing for more efficient seeks.
/// This method does not return the location of the underlying reader, so the caller
/// must track this information themselves if it is required.
#[stable(feature = "bufreader_seek_relative", since = "1.53.0")]
pub fn seek_relative(&mut self, offset: i64) -> io::Result<()> {
let pos = self.buf.pos() as u64;
if offset < 0 {
if let Some(_) = pos.checked_sub((-offset) as u64) {
self.buf.unconsume((-offset) as usize);
return Ok(());
}
} else if let Some(new_pos) = pos.checked_add(offset as u64) {
if new_pos <= self.buf.filled() as u64 {
self.buf.consume(offset as usize);
return Ok(());
}
}
self.seek(SeekFrom::Current(offset)).map(drop)
}
}
impl<R> SpecReadByte for BufReader<R>
where
Self: Read,
{
#[inline]
fn spec_read_byte(&mut self) -> Option<io::Result<u8>> {
let mut byte = 0;
if self.buf.consume_with(1, |claimed| byte = claimed[0]) {
return Some(Ok(byte));
}
// Fallback case, only reached once per buffer refill.
uninlined_slow_read_byte(self)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<R: ?Sized + Read> Read for BufReader<R> {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
// If we don't have any buffered data and we're doing a massive read
// (larger than our internal buffer), bypass our internal buffer
// entirely.
if self.buf.pos() == self.buf.filled() && buf.len() >= self.capacity() {
self.discard_buffer();
return self.inner.read(buf);
}
let mut rem = self.fill_buf()?;
let nread = rem.read(buf)?;
self.consume(nread);
Ok(nread)
}
fn read_buf(&mut self, mut cursor: BorrowedCursor<'_>) -> io::Result<()> {
// If we don't have any buffered data and we're doing a massive read
// (larger than our internal buffer), bypass our internal buffer
// entirely.
if self.buf.pos() == self.buf.filled() && cursor.capacity() >= self.capacity() {
self.discard_buffer();
return self.inner.read_buf(cursor);
}
let prev = cursor.written();
let mut rem = self.fill_buf()?;
rem.read_buf(cursor.reborrow())?; // actually never fails
self.consume(cursor.written() - prev); //slice impl of read_buf known to never unfill buf
Ok(())
}
// Small read_exacts from a BufReader are extremely common when used with a deserializer.
// The default implementation calls read in a loop, which results in surprisingly poor code
// generation for the common path where the buffer has enough bytes to fill the passed-in
// buffer.
fn read_exact(&mut self, buf: &mut [u8]) -> io::Result<()> {
if self
.buf
.consume_with(buf.len(), |claimed| buf.copy_from_slice(claimed))
{
return Ok(());
}
crate::io::default_read_exact(self, buf)
}
fn read_buf_exact(&mut self, mut cursor: BorrowedCursor<'_>) -> io::Result<()> {
if self
.buf
.consume_with(cursor.capacity(), |claimed| cursor.append(claimed))
{
return Ok(());
}
crate::io::default_read_buf_exact(self, cursor)
}
fn read_vectored(&mut self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
let total_len = bufs.iter().map(|b| b.len()).sum::<usize>();
if self.buf.pos() == self.buf.filled() && total_len >= self.capacity() {
self.discard_buffer();
return self.inner.read_vectored(bufs);
}
let mut rem = self.fill_buf()?;
let nread = rem.read_vectored(bufs)?;
self.consume(nread);
Ok(nread)
}
fn is_read_vectored(&self) -> bool {
self.inner.is_read_vectored()
}
// The inner reader might have an optimized `read_to_end`. Drain our buffer and then
// delegate to the inner implementation.
fn read_to_end(&mut self, buf: &mut Vec<u8>) -> io::Result<usize> {
let inner_buf = self.buffer();
buf.try_reserve(inner_buf.len())?;
buf.extend_from_slice(inner_buf);
let nread = inner_buf.len();
self.discard_buffer();
Ok(nread + self.inner.read_to_end(buf)?)
}
// The inner reader might have an optimized `read_to_end`. Drain our buffer and then
// delegate to the inner implementation.
fn read_to_string(&mut self, buf: &mut String) -> io::Result<usize> {
// In the general `else` case below we must read bytes into a side buffer, check
// that they are valid UTF-8, and then append them to `buf`. This requires a
// potentially large memcpy.
//
// If `buf` is empty--the most common case--we can leverage `append_to_string`
// to read directly into `buf`'s internal byte buffer, saving an allocation and
// a memcpy.
if buf.is_empty() {
// `append_to_string`'s safety relies on the buffer only being appended to since
// it only checks the UTF-8 validity of new data. If there were existing content in
// `buf` then an untrustworthy reader (i.e. `self.inner`) could not only append
// bytes but also modify existing bytes and render them invalid. On the other hand,
// if `buf` is empty then by definition any writes must be appends and
// `append_to_string` will validate all of the new bytes.
unsafe { crate::io::append_to_string(buf, |b| self.read_to_end(b)) }
} else {
// We cannot append our byte buffer directly onto the `buf` String as there could
// be an incomplete UTF-8 sequence that has only been partially read. We must read
// everything into a side buffer first and then call `from_utf8` on the complete
// buffer.
let mut bytes = Vec::new();
self.read_to_end(&mut bytes)?;
let string = crate::str::from_utf8(&bytes).map_err(|_| io::Error::INVALID_UTF8)?;
*buf += string;
Ok(string.len())
}
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<R: ?Sized + Read> BufRead for BufReader<R> {
fn fill_buf(&mut self) -> io::Result<&[u8]> {
self.buf.fill_buf(&mut self.inner)
}
fn consume(&mut self, amt: usize) {
self.buf.consume(amt)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<R> fmt::Debug for BufReader<R>
where
R: ?Sized + fmt::Debug,
{
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt.debug_struct("BufReader")
.field("reader", &&self.inner)
.field(
"buffer",
&format_args!("{}/{}", self.buf.filled() - self.buf.pos(), self.capacity()),
)
.finish()
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<R: ?Sized + Seek> Seek for BufReader<R> {
/// Seek to an offset, in bytes, in the underlying reader.
///
/// The position used for seeking with <code>[SeekFrom::Current]\(_)</code> is the
/// position the underlying reader would be at if the `BufReader<R>` had no
/// internal buffer.
///
/// Seeking always discards the internal buffer, even if the seek position
/// would otherwise fall within it. This guarantees that calling
/// [`BufReader::into_inner()`] immediately after a seek yields the underlying reader
/// at the same position.
///
/// To seek without discarding the internal buffer, use [`BufReader::seek_relative`].
///
/// See [`std::io::Seek`] for more details.
///
/// Note: In the edge case where you're seeking with <code>[SeekFrom::Current]\(n)</code>
/// where `n` minus the internal buffer length overflows an `i64`, two
/// seeks will be performed instead of one. If the second seek returns
/// [`Err`], the underlying reader will be left at the same position it would
/// have if you called `seek` with <code>[SeekFrom::Current]\(0)</code>.
///
/// [`std::io::Seek`]: Seek
fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
let result: u64;
if let SeekFrom::Current(n) = pos {
let remainder = (self.buf.filled() - self.buf.pos()) as i64;
// it should be safe to assume that remainder fits within an i64 as the alternative
// means we managed to allocate 8 exbibytes and that's absurd.
// But it's not out of the realm of possibility for some weird underlying reader to
// support seeking by i64::MIN so we need to handle underflow when subtracting
// remainder.
if let Some(offset) = n.checked_sub(remainder) {
result = self.inner.seek(SeekFrom::Current(offset))?;
} else {
// seek backwards by our remainder, and then by the offset
self.inner.seek(SeekFrom::Current(-remainder))?;
self.discard_buffer();
result = self.inner.seek(SeekFrom::Current(n))?;
}
} else {
// Seeking with Start/End doesn't care about our buffer length.
result = self.inner.seek(pos)?;
}
self.discard_buffer();
Ok(result)
}
/// Returns the current seek position from the start of the stream.
///
/// The value returned is equivalent to `self.seek(SeekFrom::Current(0))`
/// but does not flush the internal buffer. Due to this optimization the
/// function does not guarantee that calling `.into_inner()` immediately
/// afterwards will yield the underlying reader at the same position. Use
/// [`BufReader::seek`] instead if you require that guarantee.
///
/// # Panics
///
/// This function will panic if the position of the inner reader is smaller
/// than the amount of buffered data. That can happen if the inner reader
/// has an incorrect implementation of [`Seek::stream_position`], or if the
/// position has gone out of sync due to calling [`Seek::seek`] directly on
/// the underlying reader.
///
/// # Example
///
/// ```no_run
/// use std::{
/// io::{self, BufRead, BufReader, Seek},
/// fs::File,
/// };
///
/// fn main() -> io::Result<()> {
/// let mut f = BufReader::new(File::open("foo.txt")?);
///
/// let before = f.stream_position()?;
/// f.read_line(&mut String::new())?;
/// let after = f.stream_position()?;
///
/// println!("The first line was {} bytes long", after - before);
/// Ok(())
/// }
/// ```
fn stream_position(&mut self) -> io::Result<u64> {
let remainder = (self.buf.filled() - self.buf.pos()) as u64;
self.inner.stream_position().map(|pos| {
pos.checked_sub(remainder).expect(
"overflow when subtracting remaining buffer size from inner stream position",
)
})
}
/// Seeks relative to the current position.
///
/// If the new position lies within the buffer, the buffer will not be
/// flushed, allowing for more efficient seeks. This method does not return
/// the location of the underlying reader, so the caller must track this
/// information themselves if it is required.
fn seek_relative(&mut self, offset: i64) -> io::Result<()> {
self.seek_relative(offset)
}
}
impl<T: ?Sized> SizeHint for BufReader<T> {
#[inline]
fn lower_bound(&self) -> usize {
SizeHint::lower_bound(self.get_ref()) + self.buffer().len()
}
#[inline]
fn upper_bound(&self) -> Option<usize> {
SizeHint::upper_bound(self.get_ref()).and_then(|up| self.buffer().len().checked_add(up))
}
}

View File

@@ -1,155 +0,0 @@
//! An encapsulation of `BufReader`'s buffer management logic.
//!
//! This module factors out the basic functionality of `BufReader` in order to protect two core
//! invariants:
//! * `filled` bytes of `buf` are always initialized
//! * `pos` is always <= `filled`
//! Since this module encapsulates the buffer management logic, we can ensure that the range
//! `pos..filled` is always a valid index into the initialized region of the buffer. This means
//! that user code which wants to do reads from a `BufReader` via `buffer` + `consume` can do so
//! without encountering any runtime bounds checks.
use crate::cmp;
use crate::io::{self, BorrowedBuf, ErrorKind, Read};
use crate::mem::MaybeUninit;
pub struct Buffer {
// The buffer.
buf: Box<[MaybeUninit<u8>]>,
// The current seek offset into `buf`, must always be <= `filled`.
pos: usize,
// Each call to `fill_buf` sets `filled` to indicate how many bytes at the start of `buf` are
// initialized with bytes from a read.
filled: usize,
// This is the max number of bytes returned across all `fill_buf` calls. We track this so that we
// can accurately tell `read_buf` how many bytes of buf are initialized, to bypass as much of its
// defensive initialization as possible. Note that while this often the same as `filled`, it
// doesn't need to be. Calls to `fill_buf` are not required to actually fill the buffer, and
// omitting this is a huge perf regression for `Read` impls that do not.
initialized: usize,
}
impl Buffer {
#[inline]
pub fn with_capacity(capacity: usize) -> Self {
let buf = Box::new_uninit_slice(capacity);
Self { buf, pos: 0, filled: 0, initialized: 0 }
}
#[inline]
pub fn try_with_capacity(capacity: usize) -> io::Result<Self> {
match Box::try_new_uninit_slice(capacity) {
Ok(buf) => Ok(Self { buf, pos: 0, filled: 0, initialized: 0 }),
Err(_) => {
Err(io::const_error!(ErrorKind::OutOfMemory, "failed to allocate read buffer"))
}
}
}
#[inline]
pub fn buffer(&self) -> &[u8] {
// SAFETY: self.pos and self.filled are valid, and self.filled >= self.pos, and
// that region is initialized because those are all invariants of this type.
unsafe { self.buf.get_unchecked(self.pos..self.filled).assume_init_ref() }
}
#[inline]
pub fn capacity(&self) -> usize {
self.buf.len()
}
#[inline]
pub fn filled(&self) -> usize {
self.filled
}
#[inline]
pub fn pos(&self) -> usize {
self.pos
}
// This is only used by a test which asserts that the initialization-tracking is correct.
#[cfg(test)]
pub fn initialized(&self) -> usize {
self.initialized
}
#[inline]
pub fn discard_buffer(&mut self) {
self.pos = 0;
self.filled = 0;
}
#[inline]
pub fn consume(&mut self, amt: usize) {
self.pos = cmp::min(self.pos + amt, self.filled);
}
/// If there are `amt` bytes available in the buffer, pass a slice containing those bytes to
/// `visitor` and return true. If there are not enough bytes available, return false.
#[inline]
pub fn consume_with<V>(&mut self, amt: usize, mut visitor: V) -> bool
where
V: FnMut(&[u8]),
{
if let Some(claimed) = self.buffer().get(..amt) {
visitor(claimed);
// If the indexing into self.buffer() succeeds, amt must be a valid increment.
self.pos += amt;
true
} else {
false
}
}
#[inline]
pub fn unconsume(&mut self, amt: usize) {
self.pos = self.pos.saturating_sub(amt);
}
/// Read more bytes into the buffer without discarding any of its contents
pub fn read_more(&mut self, mut reader: impl Read) -> io::Result<usize> {
let mut buf = BorrowedBuf::from(&mut self.buf[self.filled..]);
let old_init = self.initialized - self.filled;
unsafe {
buf.set_init(old_init);
}
reader.read_buf(buf.unfilled())?;
self.filled += buf.len();
self.initialized += buf.init_len() - old_init;
Ok(buf.len())
}
/// Remove bytes that have already been read from the buffer.
pub fn backshift(&mut self) {
self.buf.copy_within(self.pos..self.filled, 0);
self.filled -= self.pos;
self.pos = 0;
}
#[inline]
pub fn fill_buf(&mut self, mut reader: impl Read) -> io::Result<&[u8]> {
// If we've reached the end of our internal buffer then we need to fetch
// some more data from the reader.
// Branch using `>=` instead of the more correct `==`
// to tell the compiler that the pos..cap slice is always valid.
if self.pos >= self.filled {
debug_assert!(self.pos == self.filled);
let mut buf = BorrowedBuf::from(&mut *self.buf);
// SAFETY: `self.filled` bytes will always have been initialized.
unsafe {
buf.set_init(self.initialized);
}
let result = reader.read_buf(buf.unfilled());
self.pos = 0;
self.filled = buf.len();
self.initialized = buf.init_len();
result?;
}
Ok(self.buffer())
}
}

View File

@@ -1,680 +0,0 @@
use crate::io::{
self, DEFAULT_BUF_SIZE, ErrorKind, IntoInnerError, IoSlice, Seek, SeekFrom, Write,
};
use crate::mem::{self, ManuallyDrop};
use crate::{error, fmt, ptr};
/// Wraps a writer and buffers its output.
///
/// It can be excessively inefficient to work directly with something that
/// implements [`Write`]. For example, every call to
/// [`write`][`TcpStream::write`] on [`TcpStream`] results in a system call. A
/// `BufWriter<W>` keeps an in-memory buffer of data and writes it to an underlying
/// writer in large, infrequent batches.
///
/// `BufWriter<W>` can improve the speed of programs that make *small* and
/// *repeated* write calls to the same file or network socket. It does not
/// help when writing very large amounts at once, or writing just one or a few
/// times. It also provides no advantage when writing to a destination that is
/// in memory, like a <code>[Vec]\<u8></code>.
///
/// It is critical to call [`flush`] before `BufWriter<W>` is dropped. Though
/// dropping will attempt to flush the contents of the buffer, any errors
/// that happen in the process of dropping will be ignored. Calling [`flush`]
/// ensures that the buffer is empty and thus dropping will not even attempt
/// file operations.
///
/// # Examples
///
/// Let's write the numbers one through ten to a [`TcpStream`]:
///
/// ```no_run
/// use std::io::prelude::*;
/// use std::net::TcpStream;
///
/// let mut stream = TcpStream::connect("127.0.0.1:34254").unwrap();
///
/// for i in 0..10 {
/// stream.write(&[i+1]).unwrap();
/// }
/// ```
///
/// Because we're not buffering, we write each one in turn, incurring the
/// overhead of a system call per byte written. We can fix this with a
/// `BufWriter<W>`:
///
/// ```no_run
/// use std::io::prelude::*;
/// use std::io::BufWriter;
/// use std::net::TcpStream;
///
/// let mut stream = BufWriter::new(TcpStream::connect("127.0.0.1:34254").unwrap());
///
/// for i in 0..10 {
/// stream.write(&[i+1]).unwrap();
/// }
/// stream.flush().unwrap();
/// ```
///
/// By wrapping the stream with a `BufWriter<W>`, these ten writes are all grouped
/// together by the buffer and will all be written out in one system call when
/// the `stream` is flushed.
///
/// [`TcpStream::write`]: crate::net::TcpStream::write
/// [`TcpStream`]: crate::net::TcpStream
/// [`flush`]: BufWriter::flush
#[stable(feature = "rust1", since = "1.0.0")]
pub struct BufWriter<W: ?Sized + Write> {
// The buffer. Avoid using this like a normal `Vec` in common code paths.
// That is, don't use `buf.push`, `buf.extend_from_slice`, or any other
// methods that require bounds checking or the like. This makes an enormous
// difference to performance (we may want to stop using a `Vec` entirely).
buf: Vec<u8>,
// #30888: If the inner writer panics in a call to write, we don't want to
// write the buffered data a second time in BufWriter's destructor. This
// flag tells the Drop impl if it should skip the flush.
panicked: bool,
inner: W,
}
impl<W: Write> BufWriter<W> {
/// Creates a new `BufWriter<W>` with a default buffer capacity. The default is currently 8 KiB,
/// but may change in the future.
///
/// # Examples
///
/// ```no_run
/// use std::io::BufWriter;
/// use std::net::TcpStream;
///
/// let mut buffer = BufWriter::new(TcpStream::connect("127.0.0.1:34254").unwrap());
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn new(inner: W) -> BufWriter<W> {
BufWriter::with_capacity(DEFAULT_BUF_SIZE, inner)
}
pub(crate) fn try_new_buffer() -> io::Result<Vec<u8>> {
Vec::try_with_capacity(DEFAULT_BUF_SIZE).map_err(|_| {
io::const_error!(ErrorKind::OutOfMemory, "failed to allocate write buffer")
})
}
pub(crate) fn with_buffer(inner: W, buf: Vec<u8>) -> Self {
Self { inner, buf, panicked: false }
}
/// Creates a new `BufWriter<W>` with at least the specified buffer capacity.
///
/// # Examples
///
/// Creating a buffer with a buffer of at least a hundred bytes.
///
/// ```no_run
/// use std::io::BufWriter;
/// use std::net::TcpStream;
///
/// let stream = TcpStream::connect("127.0.0.1:34254").unwrap();
/// let mut buffer = BufWriter::with_capacity(100, stream);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn with_capacity(capacity: usize, inner: W) -> BufWriter<W> {
BufWriter { inner, buf: Vec::with_capacity(capacity), panicked: false }
}
/// Unwraps this `BufWriter<W>`, returning the underlying writer.
///
/// The buffer is written out before returning the writer.
///
/// # Errors
///
/// An [`Err`] will be returned if an error occurs while flushing the buffer.
///
/// # Examples
///
/// ```no_run
/// use std::io::BufWriter;
/// use std::net::TcpStream;
///
/// let mut buffer = BufWriter::new(TcpStream::connect("127.0.0.1:34254").unwrap());
///
/// // unwrap the TcpStream and flush the buffer
/// let stream = buffer.into_inner().unwrap();
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn into_inner(mut self) -> Result<W, IntoInnerError<BufWriter<W>>> {
match self.flush_buf() {
Err(e) => Err(IntoInnerError::new(self, e)),
Ok(()) => Ok(self.into_parts().0),
}
}
/// Disassembles this `BufWriter<W>`, returning the underlying writer, and any buffered but
/// unwritten data.
///
/// If the underlying writer panicked, it is not known what portion of the data was written.
/// In this case, we return `WriterPanicked` for the buffered data (from which the buffer
/// contents can still be recovered).
///
/// `into_parts` makes no attempt to flush data and cannot fail.
///
/// # Examples
///
/// ```
/// use std::io::{BufWriter, Write};
///
/// let mut buffer = [0u8; 10];
/// let mut stream = BufWriter::new(buffer.as_mut());
/// write!(stream, "too much data").unwrap();
/// stream.flush().expect_err("it doesn't fit");
/// let (recovered_writer, buffered_data) = stream.into_parts();
/// assert_eq!(recovered_writer.len(), 0);
/// assert_eq!(&buffered_data.unwrap(), b"ata");
/// ```
#[stable(feature = "bufwriter_into_parts", since = "1.56.0")]
pub fn into_parts(self) -> (W, Result<Vec<u8>, WriterPanicked>) {
let mut this = ManuallyDrop::new(self);
let buf = mem::take(&mut this.buf);
let buf = if !this.panicked { Ok(buf) } else { Err(WriterPanicked { buf }) };
// SAFETY: double-drops are prevented by putting `this` in a ManuallyDrop that is never dropped
let inner = unsafe { ptr::read(&this.inner) };
(inner, buf)
}
}
impl<W: ?Sized + Write> BufWriter<W> {
/// Send data in our local buffer into the inner writer, looping as
/// necessary until either it's all been sent or an error occurs.
///
/// Because all the data in the buffer has been reported to our owner as
/// "successfully written" (by returning nonzero success values from
/// `write`), any 0-length writes from `inner` must be reported as i/o
/// errors from this method.
pub(in crate::io) fn flush_buf(&mut self) -> io::Result<()> {
/// Helper struct to ensure the buffer is updated after all the writes
/// are complete. It tracks the number of written bytes and drains them
/// all from the front of the buffer when dropped.
struct BufGuard<'a> {
buffer: &'a mut Vec<u8>,
written: usize,
}
impl<'a> BufGuard<'a> {
fn new(buffer: &'a mut Vec<u8>) -> Self {
Self { buffer, written: 0 }
}
/// The unwritten part of the buffer
fn remaining(&self) -> &[u8] {
&self.buffer[self.written..]
}
/// Flag some bytes as removed from the front of the buffer
fn consume(&mut self, amt: usize) {
self.written += amt;
}
/// true if all of the bytes have been written
fn done(&self) -> bool {
self.written >= self.buffer.len()
}
}
impl Drop for BufGuard<'_> {
fn drop(&mut self) {
if self.written > 0 {
self.buffer.drain(..self.written);
}
}
}
let mut guard = BufGuard::new(&mut self.buf);
while !guard.done() {
self.panicked = true;
let r = self.inner.write(guard.remaining());
self.panicked = false;
match r {
Ok(0) => {
return Err(io::const_error!(
ErrorKind::WriteZero,
"failed to write the buffered data",
));
}
Ok(n) => guard.consume(n),
Err(ref e) if e.is_interrupted() => {}
Err(e) => return Err(e),
}
}
Ok(())
}
/// Buffer some data without flushing it, regardless of the size of the
/// data. Writes as much as possible without exceeding capacity. Returns
/// the number of bytes written.
pub(super) fn write_to_buf(&mut self, buf: &[u8]) -> usize {
let available = self.spare_capacity();
let amt_to_buffer = available.min(buf.len());
// SAFETY: `amt_to_buffer` is <= buffer's spare capacity by construction.
unsafe {
self.write_to_buffer_unchecked(&buf[..amt_to_buffer]);
}
amt_to_buffer
}
/// Gets a reference to the underlying writer.
///
/// # Examples
///
/// ```no_run
/// use std::io::BufWriter;
/// use std::net::TcpStream;
///
/// let mut buffer = BufWriter::new(TcpStream::connect("127.0.0.1:34254").unwrap());
///
/// // we can use reference just like buffer
/// let reference = buffer.get_ref();
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn get_ref(&self) -> &W {
&self.inner
}
/// Gets a mutable reference to the underlying writer.
///
/// It is inadvisable to directly write to the underlying writer.
///
/// # Examples
///
/// ```no_run
/// use std::io::BufWriter;
/// use std::net::TcpStream;
///
/// let mut buffer = BufWriter::new(TcpStream::connect("127.0.0.1:34254").unwrap());
///
/// // we can use reference just like buffer
/// let reference = buffer.get_mut();
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn get_mut(&mut self) -> &mut W {
&mut self.inner
}
/// Returns a reference to the internally buffered data.
///
/// # Examples
///
/// ```no_run
/// use std::io::BufWriter;
/// use std::net::TcpStream;
///
/// let buf_writer = BufWriter::new(TcpStream::connect("127.0.0.1:34254").unwrap());
///
/// // See how many bytes are currently buffered
/// let bytes_buffered = buf_writer.buffer().len();
/// ```
#[stable(feature = "bufreader_buffer", since = "1.37.0")]
pub fn buffer(&self) -> &[u8] {
&self.buf
}
/// Returns a mutable reference to the internal buffer.
///
/// This can be used to write data directly into the buffer without triggering writers
/// to the underlying writer.
///
/// That the buffer is a `Vec` is an implementation detail.
/// Callers should not modify the capacity as there currently is no public API to do so
/// and thus any capacity changes would be unexpected by the user.
pub(in crate::io) fn buffer_mut(&mut self) -> &mut Vec<u8> {
&mut self.buf
}
/// Returns the number of bytes the internal buffer can hold without flushing.
///
/// # Examples
///
/// ```no_run
/// use std::io::BufWriter;
/// use std::net::TcpStream;
///
/// let buf_writer = BufWriter::new(TcpStream::connect("127.0.0.1:34254").unwrap());
///
/// // Check the capacity of the inner buffer
/// let capacity = buf_writer.capacity();
/// // Calculate how many bytes can be written without flushing
/// let without_flush = capacity - buf_writer.buffer().len();
/// ```
#[stable(feature = "buffered_io_capacity", since = "1.46.0")]
pub fn capacity(&self) -> usize {
self.buf.capacity()
}
// Ensure this function does not get inlined into `write`, so that it
// remains inlineable and its common path remains as short as possible.
// If this function ends up being called frequently relative to `write`,
// it's likely a sign that the client is using an improperly sized buffer
// or their write patterns are somewhat pathological.
#[cold]
#[inline(never)]
fn write_cold(&mut self, buf: &[u8]) -> io::Result<usize> {
if buf.len() > self.spare_capacity() {
self.flush_buf()?;
}
// Why not len > capacity? To avoid a needless trip through the buffer when the input
// exactly fills it. We'd just need to flush it to the underlying writer anyway.
if buf.len() >= self.buf.capacity() {
self.panicked = true;
let r = self.get_mut().write(buf);
self.panicked = false;
r
} else {
// Write to the buffer. In this case, we write to the buffer even if it fills it
// exactly. Doing otherwise would mean flushing the buffer, then writing this
// input to the inner writer, which in many cases would be a worse strategy.
// SAFETY: There was either enough spare capacity already, or there wasn't and we
// flushed the buffer to ensure that there is. In the latter case, we know that there
// is because flushing ensured that our entire buffer is spare capacity, and we entered
// this block because the input buffer length is less than that capacity. In either
// case, it's safe to write the input buffer to our buffer.
unsafe {
self.write_to_buffer_unchecked(buf);
}
Ok(buf.len())
}
}
// Ensure this function does not get inlined into `write_all`, so that it
// remains inlineable and its common path remains as short as possible.
// If this function ends up being called frequently relative to `write_all`,
// it's likely a sign that the client is using an improperly sized buffer
// or their write patterns are somewhat pathological.
#[cold]
#[inline(never)]
fn write_all_cold(&mut self, buf: &[u8]) -> io::Result<()> {
// Normally, `write_all` just calls `write` in a loop. We can do better
// by calling `self.get_mut().write_all()` directly, which avoids
// round trips through the buffer in the event of a series of partial
// writes in some circumstances.
if buf.len() > self.spare_capacity() {
self.flush_buf()?;
}
// Why not len > capacity? To avoid a needless trip through the buffer when the input
// exactly fills it. We'd just need to flush it to the underlying writer anyway.
if buf.len() >= self.buf.capacity() {
self.panicked = true;
let r = self.get_mut().write_all(buf);
self.panicked = false;
r
} else {
// Write to the buffer. In this case, we write to the buffer even if it fills it
// exactly. Doing otherwise would mean flushing the buffer, then writing this
// input to the inner writer, which in many cases would be a worse strategy.
// SAFETY: There was either enough spare capacity already, or there wasn't and we
// flushed the buffer to ensure that there is. In the latter case, we know that there
// is because flushing ensured that our entire buffer is spare capacity, and we entered
// this block because the input buffer length is less than that capacity. In either
// case, it's safe to write the input buffer to our buffer.
unsafe {
self.write_to_buffer_unchecked(buf);
}
Ok(())
}
}
// SAFETY: Requires `buf.len() <= self.buf.capacity() - self.buf.len()`,
// i.e., that input buffer length is less than or equal to spare capacity.
#[inline]
unsafe fn write_to_buffer_unchecked(&mut self, buf: &[u8]) {
debug_assert!(buf.len() <= self.spare_capacity());
let old_len = self.buf.len();
let buf_len = buf.len();
let src = buf.as_ptr();
unsafe {
let dst = self.buf.as_mut_ptr().add(old_len);
ptr::copy_nonoverlapping(src, dst, buf_len);
self.buf.set_len(old_len + buf_len);
}
}
#[inline]
fn spare_capacity(&self) -> usize {
self.buf.capacity() - self.buf.len()
}
}
#[stable(feature = "bufwriter_into_parts", since = "1.56.0")]
/// Error returned for the buffered data from `BufWriter::into_parts`, when the underlying
/// writer has previously panicked. Contains the (possibly partly written) buffered data.
///
/// # Example
///
/// ```
/// use std::io::{self, BufWriter, Write};
/// use std::panic::{catch_unwind, AssertUnwindSafe};
///
/// struct PanickingWriter;
/// impl Write for PanickingWriter {
/// fn write(&mut self, buf: &[u8]) -> io::Result<usize> { panic!() }
/// fn flush(&mut self) -> io::Result<()> { panic!() }
/// }
///
/// let mut stream = BufWriter::new(PanickingWriter);
/// write!(stream, "some data").unwrap();
/// let result = catch_unwind(AssertUnwindSafe(|| {
/// stream.flush().unwrap()
/// }));
/// assert!(result.is_err());
/// let (recovered_writer, buffered_data) = stream.into_parts();
/// assert!(matches!(recovered_writer, PanickingWriter));
/// assert_eq!(buffered_data.unwrap_err().into_inner(), b"some data");
/// ```
pub struct WriterPanicked {
buf: Vec<u8>,
}
impl WriterPanicked {
/// Returns the perhaps-unwritten data. Some of this data may have been written by the
/// panicking call(s) to the underlying writer, so simply writing it again is not a good idea.
#[must_use = "`self` will be dropped if the result is not used"]
#[stable(feature = "bufwriter_into_parts", since = "1.56.0")]
pub fn into_inner(self) -> Vec<u8> {
self.buf
}
}
#[stable(feature = "bufwriter_into_parts", since = "1.56.0")]
impl error::Error for WriterPanicked {}
#[stable(feature = "bufwriter_into_parts", since = "1.56.0")]
impl fmt::Display for WriterPanicked {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
"BufWriter inner writer panicked, what data remains unwritten is not known".fmt(f)
}
}
#[stable(feature = "bufwriter_into_parts", since = "1.56.0")]
impl fmt::Debug for WriterPanicked {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("WriterPanicked")
.field("buffer", &format_args!("{}/{}", self.buf.len(), self.buf.capacity()))
.finish()
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<W: ?Sized + Write> Write for BufWriter<W> {
#[inline]
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
// Use < instead of <= to avoid a needless trip through the buffer in some cases.
// See `write_cold` for details.
if buf.len() < self.spare_capacity() {
// SAFETY: safe by above conditional.
unsafe {
self.write_to_buffer_unchecked(buf);
}
Ok(buf.len())
} else {
self.write_cold(buf)
}
}
#[inline]
fn write_all(&mut self, buf: &[u8]) -> io::Result<()> {
// Use < instead of <= to avoid a needless trip through the buffer in some cases.
// See `write_all_cold` for details.
if buf.len() < self.spare_capacity() {
// SAFETY: safe by above conditional.
unsafe {
self.write_to_buffer_unchecked(buf);
}
Ok(())
} else {
self.write_all_cold(buf)
}
}
fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
// FIXME: Consider applying `#[inline]` / `#[inline(never)]` optimizations already applied
// to `write` and `write_all`. The performance benefits can be significant. See #79930.
if self.get_ref().is_write_vectored() {
// We have to handle the possibility that the total length of the buffers overflows
// `usize` (even though this can only happen if multiple `IoSlice`s reference the
// same underlying buffer, as otherwise the buffers wouldn't fit in memory). If the
// computation overflows, then surely the input cannot fit in our buffer, so we forward
// to the inner writer's `write_vectored` method to let it handle it appropriately.
let mut saturated_total_len: usize = 0;
for buf in bufs {
saturated_total_len = saturated_total_len.saturating_add(buf.len());
if saturated_total_len > self.spare_capacity() && !self.buf.is_empty() {
// Flush if the total length of the input exceeds our buffer's spare capacity.
// If we would have overflowed, this condition also holds, and we need to flush.
self.flush_buf()?;
}
if saturated_total_len >= self.buf.capacity() {
// Forward to our inner writer if the total length of the input is greater than or
// equal to our buffer capacity. If we would have overflowed, this condition also
// holds, and we punt to the inner writer.
self.panicked = true;
let r = self.get_mut().write_vectored(bufs);
self.panicked = false;
return r;
}
}
// `saturated_total_len < self.buf.capacity()` implies that we did not saturate.
// SAFETY: We checked whether or not the spare capacity was large enough above. If
// it was, then we're safe already. If it wasn't, we flushed, making sufficient
// room for any input <= the buffer size, which includes this input.
unsafe {
bufs.iter().for_each(|b| self.write_to_buffer_unchecked(b));
};
Ok(saturated_total_len)
} else {
let mut iter = bufs.iter();
let mut total_written = if let Some(buf) = iter.by_ref().find(|&buf| !buf.is_empty()) {
// This is the first non-empty slice to write, so if it does
// not fit in the buffer, we still get to flush and proceed.
if buf.len() > self.spare_capacity() {
self.flush_buf()?;
}
if buf.len() >= self.buf.capacity() {
// The slice is at least as large as the buffering capacity,
// so it's better to write it directly, bypassing the buffer.
self.panicked = true;
let r = self.get_mut().write(buf);
self.panicked = false;
return r;
} else {
// SAFETY: We checked whether or not the spare capacity was large enough above.
// If it was, then we're safe already. If it wasn't, we flushed, making
// sufficient room for any input <= the buffer size, which includes this input.
unsafe {
self.write_to_buffer_unchecked(buf);
}
buf.len()
}
} else {
return Ok(0);
};
debug_assert!(total_written != 0);
for buf in iter {
if buf.len() <= self.spare_capacity() {
// SAFETY: safe by above conditional.
unsafe {
self.write_to_buffer_unchecked(buf);
}
// This cannot overflow `usize`. If we are here, we've written all of the bytes
// so far to our buffer, and we've ensured that we never exceed the buffer's
// capacity. Therefore, `total_written` <= `self.buf.capacity()` <= `usize::MAX`.
total_written += buf.len();
} else {
break;
}
}
Ok(total_written)
}
}
fn is_write_vectored(&self) -> bool {
true
}
fn flush(&mut self) -> io::Result<()> {
self.flush_buf().and_then(|()| self.get_mut().flush())
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<W: ?Sized + Write> fmt::Debug for BufWriter<W>
where
W: fmt::Debug,
{
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt.debug_struct("BufWriter")
.field("writer", &&self.inner)
.field("buffer", &format_args!("{}/{}", self.buf.len(), self.buf.capacity()))
.finish()
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<W: ?Sized + Write + Seek> Seek for BufWriter<W> {
/// Seek to the offset, in bytes, in the underlying writer.
///
/// Seeking always writes out the internal buffer before seeking.
fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
self.flush_buf()?;
self.get_mut().seek(pos)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<W: ?Sized + Write> Drop for BufWriter<W> {
fn drop(&mut self) {
if !self.panicked {
// dtors should not panic, so we ignore a failed flush
let _r = self.flush_buf();
}
}
}

View File

@@ -1,235 +0,0 @@
use crate::fmt;
use crate::io::buffered::LineWriterShim;
use crate::io::{self, BufWriter, IntoInnerError, IoSlice, Write};
/// Wraps a writer and buffers output to it, flushing whenever a newline
/// (`0x0a`, `'\n'`) is detected.
///
/// The [`BufWriter`] struct wraps a writer and buffers its output.
/// But it only does this batched write when it goes out of scope, or when the
/// internal buffer is full. Sometimes, you'd prefer to write each line as it's
/// completed, rather than the entire buffer at once. Enter `LineWriter`. It
/// does exactly that.
///
/// Like [`BufWriter`], a `LineWriter`s buffer will also be flushed when the
/// `LineWriter` goes out of scope or when its internal buffer is full.
///
/// If there's still a partial line in the buffer when the `LineWriter` is
/// dropped, it will flush those contents.
///
/// # Examples
///
/// We can use `LineWriter` to write one line at a time, significantly
/// reducing the number of actual writes to the file.
///
/// ```no_run
/// use std::fs::{self, File};
/// use std::io::prelude::*;
/// use std::io::LineWriter;
///
/// fn main() -> std::io::Result<()> {
/// let road_not_taken = b"I shall be telling this with a sigh
/// Somewhere ages and ages hence:
/// Two roads diverged in a wood, and I -
/// I took the one less traveled by,
/// And that has made all the difference.";
///
/// let file = File::create("poem.txt")?;
/// let mut file = LineWriter::new(file);
///
/// file.write_all(b"I shall be telling this with a sigh")?;
///
/// // No bytes are written until a newline is encountered (or
/// // the internal buffer is filled).
/// assert_eq!(fs::read_to_string("poem.txt")?, "");
/// file.write_all(b"\n")?;
/// assert_eq!(
/// fs::read_to_string("poem.txt")?,
/// "I shall be telling this with a sigh\n",
/// );
///
/// // Write the rest of the poem.
/// file.write_all(b"Somewhere ages and ages hence:
/// Two roads diverged in a wood, and I -
/// I took the one less traveled by,
/// And that has made all the difference.")?;
///
/// // The last line of the poem doesn't end in a newline, so
/// // we have to flush or drop the `LineWriter` to finish
/// // writing.
/// file.flush()?;
///
/// // Confirm the whole poem was written.
/// assert_eq!(fs::read("poem.txt")?, &road_not_taken[..]);
/// Ok(())
/// }
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub struct LineWriter<W: ?Sized + Write> {
inner: BufWriter<W>,
}
impl<W: Write> LineWriter<W> {
/// Creates a new `LineWriter`.
///
/// # Examples
///
/// ```no_run
/// use std::fs::File;
/// use std::io::LineWriter;
///
/// fn main() -> std::io::Result<()> {
/// let file = File::create("poem.txt")?;
/// let file = LineWriter::new(file);
/// Ok(())
/// }
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn new(inner: W) -> LineWriter<W> {
// Lines typically aren't that long, don't use a giant buffer
LineWriter::with_capacity(1024, inner)
}
/// Creates a new `LineWriter` with at least the specified capacity for the
/// internal buffer.
///
/// # Examples
///
/// ```no_run
/// use std::fs::File;
/// use std::io::LineWriter;
///
/// fn main() -> std::io::Result<()> {
/// let file = File::create("poem.txt")?;
/// let file = LineWriter::with_capacity(100, file);
/// Ok(())
/// }
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn with_capacity(capacity: usize, inner: W) -> LineWriter<W> {
LineWriter { inner: BufWriter::with_capacity(capacity, inner) }
}
/// Gets a mutable reference to the underlying writer.
///
/// Caution must be taken when calling methods on the mutable reference
/// returned as extra writes could corrupt the output stream.
///
/// # Examples
///
/// ```no_run
/// use std::fs::File;
/// use std::io::LineWriter;
///
/// fn main() -> std::io::Result<()> {
/// let file = File::create("poem.txt")?;
/// let mut file = LineWriter::new(file);
///
/// // we can use reference just like file
/// let reference = file.get_mut();
/// Ok(())
/// }
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn get_mut(&mut self) -> &mut W {
self.inner.get_mut()
}
/// Unwraps this `LineWriter`, returning the underlying writer.
///
/// The internal buffer is written out before returning the writer.
///
/// # Errors
///
/// An [`Err`] will be returned if an error occurs while flushing the buffer.
///
/// # Examples
///
/// ```no_run
/// use std::fs::File;
/// use std::io::LineWriter;
///
/// fn main() -> std::io::Result<()> {
/// let file = File::create("poem.txt")?;
///
/// let writer: LineWriter<File> = LineWriter::new(file);
///
/// let file: File = writer.into_inner()?;
/// Ok(())
/// }
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn into_inner(self) -> Result<W, IntoInnerError<LineWriter<W>>> {
self.inner.into_inner().map_err(|err| err.new_wrapped(|inner| LineWriter { inner }))
}
}
impl<W: ?Sized + Write> LineWriter<W> {
/// Gets a reference to the underlying writer.
///
/// # Examples
///
/// ```no_run
/// use std::fs::File;
/// use std::io::LineWriter;
///
/// fn main() -> std::io::Result<()> {
/// let file = File::create("poem.txt")?;
/// let file = LineWriter::new(file);
///
/// let reference = file.get_ref();
/// Ok(())
/// }
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn get_ref(&self) -> &W {
self.inner.get_ref()
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<W: ?Sized + Write> Write for LineWriter<W> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
LineWriterShim::new(&mut self.inner).write(buf)
}
fn flush(&mut self) -> io::Result<()> {
self.inner.flush()
}
fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
LineWriterShim::new(&mut self.inner).write_vectored(bufs)
}
fn is_write_vectored(&self) -> bool {
self.inner.is_write_vectored()
}
fn write_all(&mut self, buf: &[u8]) -> io::Result<()> {
LineWriterShim::new(&mut self.inner).write_all(buf)
}
fn write_all_vectored(&mut self, bufs: &mut [IoSlice<'_>]) -> io::Result<()> {
LineWriterShim::new(&mut self.inner).write_all_vectored(bufs)
}
fn write_fmt(&mut self, fmt: fmt::Arguments<'_>) -> io::Result<()> {
LineWriterShim::new(&mut self.inner).write_fmt(fmt)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<W: ?Sized + Write> fmt::Debug for LineWriter<W>
where
W: fmt::Debug,
{
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt.debug_struct("LineWriter")
.field("writer", &self.get_ref())
.field(
"buffer",
&format_args!("{}/{}", self.inner.buffer().len(), self.inner.capacity()),
)
.finish_non_exhaustive()
}
}

View File

@@ -1,297 +0,0 @@
use core::slice::memchr;
use crate::io::{self, BufWriter, IoSlice, Write};
/// Private helper struct for implementing the line-buffered writing logic.
///
/// This shim temporarily wraps a BufWriter, and uses its internals to
/// implement a line-buffered writer (specifically by using the internal
/// methods like write_to_buf and flush_buf). In this way, a more
/// efficient abstraction can be created than one that only had access to
/// `write` and `flush`, without needlessly duplicating a lot of the
/// implementation details of BufWriter. This also allows existing
/// `BufWriters` to be temporarily given line-buffering logic; this is what
/// enables Stdout to be alternately in line-buffered or block-buffered mode.
#[derive(Debug)]
pub struct LineWriterShim<'a, W: ?Sized + Write> {
buffer: &'a mut BufWriter<W>,
}
impl<'a, W: ?Sized + Write> LineWriterShim<'a, W> {
pub fn new(buffer: &'a mut BufWriter<W>) -> Self {
Self { buffer }
}
/// Gets a reference to the inner writer (that is, the writer
/// wrapped by the BufWriter).
fn inner(&self) -> &W {
self.buffer.get_ref()
}
/// Gets a mutable reference to the inner writer (that is, the writer
/// wrapped by the BufWriter). Be careful with this writer, as writes to
/// it will bypass the buffer.
fn inner_mut(&mut self) -> &mut W {
self.buffer.get_mut()
}
/// Gets the content currently buffered in self.buffer
fn buffered(&self) -> &[u8] {
self.buffer.buffer()
}
/// Flushes the buffer iff the last byte is a newline (indicating that an
/// earlier write only succeeded partially, and we want to retry flushing
/// the buffered line before continuing with a subsequent write).
fn flush_if_completed_line(&mut self) -> io::Result<()> {
match self.buffered().last().copied() {
Some(b'\n') => self.buffer.flush_buf(),
_ => Ok(()),
}
}
}
impl<'a, W: ?Sized + Write> Write for LineWriterShim<'a, W> {
/// Writes some data into this BufWriter with line buffering.
///
/// This means that, if any newlines are present in the data, the data up to
/// the last newline is sent directly to the underlying writer, and data
/// after it is buffered. Returns the number of bytes written.
///
/// This function operates on a "best effort basis"; in keeping with the
/// convention of `Write::write`, it makes at most one attempt to write
/// new data to the underlying writer. If that write only reports a partial
/// success, the remaining data will be buffered.
///
/// Because this function attempts to send completed lines to the underlying
/// writer, it will also flush the existing buffer if it ends with a
/// newline, even if the incoming data does not contain any newlines.
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
let newline_idx = match memchr::memrchr(b'\n', buf) {
// If there are no new newlines (that is, if this write is less than
// one line), just do a regular buffered write (which may flush if
// we exceed the inner buffer's size)
None => {
self.flush_if_completed_line()?;
return self.buffer.write(buf);
}
// Otherwise, arrange for the lines to be written directly to the
// inner writer.
Some(newline_idx) => newline_idx + 1,
};
// Flush existing content to prepare for our write. We have to do this
// before attempting to write `buf` in order to maintain consistency;
// if we add `buf` to the buffer then try to flush it all at once,
// we're obligated to return Ok(), which would mean suppressing any
// errors that occur during flush.
self.buffer.flush_buf()?;
// This is what we're going to try to write directly to the inner
// writer. The rest will be buffered, if nothing goes wrong.
let lines = &buf[..newline_idx];
// Write `lines` directly to the inner writer. In keeping with the
// `write` convention, make at most one attempt to add new (unbuffered)
// data. Because this write doesn't touch the BufWriter state directly,
// and the buffer is known to be empty, we don't need to worry about
// self.buffer.panicked here.
let flushed = self.inner_mut().write(lines)?;
// If buffer returns Ok(0), propagate that to the caller without
// doing additional buffering; otherwise we're just guaranteeing
// an "ErrorKind::WriteZero" later.
if flushed == 0 {
return Ok(0);
}
// Now that the write has succeeded, buffer the rest (or as much of
// the rest as possible). If there were any unwritten newlines, we
// only buffer out to the last unwritten newline that fits in the
// buffer; this helps prevent flushing partial lines on subsequent
// calls to LineWriterShim::write.
// Handle the cases in order of most-common to least-common, under
// the presumption that most writes succeed in totality, and that most
// writes are smaller than the buffer.
// - Is this a partial line (ie, no newlines left in the unwritten tail)
// - If not, does the data out to the last unwritten newline fit in
// the buffer?
// - If not, scan for the last newline that *does* fit in the buffer
let tail = if flushed >= newline_idx {
let tail = &buf[flushed..];
// Avoid unnecessary short writes by not splitting the remaining
// bytes if they're larger than the buffer.
// They can be written in full by the next call to write.
if tail.len() >= self.buffer.capacity() {
return Ok(flushed);
}
tail
} else if newline_idx - flushed <= self.buffer.capacity() {
&buf[flushed..newline_idx]
} else {
let scan_area = &buf[flushed..];
let scan_area = &scan_area[..self.buffer.capacity()];
match memchr::memrchr(b'\n', scan_area) {
Some(newline_idx) => &scan_area[..newline_idx + 1],
None => scan_area,
}
};
let buffered = self.buffer.write_to_buf(tail);
Ok(flushed + buffered)
}
fn flush(&mut self) -> io::Result<()> {
self.buffer.flush()
}
/// Writes some vectored data into this BufWriter with line buffering.
///
/// This means that, if any newlines are present in the data, the data up to
/// and including the buffer containing the last newline is sent directly to
/// the inner writer, and the data after it is buffered. Returns the number
/// of bytes written.
///
/// This function operates on a "best effort basis"; in keeping with the
/// convention of `Write::write`, it makes at most one attempt to write
/// new data to the underlying writer.
///
/// Because this function attempts to send completed lines to the underlying
/// writer, it will also flush the existing buffer if it contains any
/// newlines.
///
/// Because sorting through an array of `IoSlice` can be a bit convoluted,
/// This method differs from write in the following ways:
///
/// - It attempts to write the full content of all the buffers up to and
/// including the one containing the last newline. This means that it
/// may attempt to write a partial line, that buffer has data past the
/// newline.
/// - If the write only reports partial success, it does not attempt to
/// find the precise location of the written bytes and buffer the rest.
///
/// If the underlying vector doesn't support vectored writing, we instead
/// simply write the first non-empty buffer with `write`. This way, we
/// get the benefits of more granular partial-line handling without losing
/// anything in efficiency
fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
// If there's no specialized behavior for write_vectored, just use
// write. This has the benefit of more granular partial-line handling.
if !self.is_write_vectored() {
return match bufs.iter().find(|buf| !buf.is_empty()) {
Some(buf) => self.write(buf),
None => Ok(0),
};
}
// Find the buffer containing the last newline
// FIXME: This is overly slow if there are very many bufs and none contain
// newlines. e.g. writev() on Linux only writes up to 1024 slices, so
// scanning the rest is wasted effort. This makes write_all_vectored()
// quadratic.
let last_newline_buf_idx = bufs
.iter()
.enumerate()
.rev()
.find_map(|(i, buf)| memchr::memchr(b'\n', buf).map(|_| i));
// If there are no new newlines (that is, if this write is less than
// one line), just do a regular buffered write
let last_newline_buf_idx = match last_newline_buf_idx {
// No newlines; just do a normal buffered write
None => {
self.flush_if_completed_line()?;
return self.buffer.write_vectored(bufs);
}
Some(i) => i,
};
// Flush existing content to prepare for our write
self.buffer.flush_buf()?;
// This is what we're going to try to write directly to the inner
// writer. The rest will be buffered, if nothing goes wrong.
let (lines, tail) = bufs.split_at(last_newline_buf_idx + 1);
// Write `lines` directly to the inner writer. In keeping with the
// `write` convention, make at most one attempt to add new (unbuffered)
// data. Because this write doesn't touch the BufWriter state directly,
// and the buffer is known to be empty, we don't need to worry about
// self.panicked here.
let flushed = self.inner_mut().write_vectored(lines)?;
// If inner returns Ok(0), propagate that to the caller without
// doing additional buffering; otherwise we're just guaranteeing
// an "ErrorKind::WriteZero" later.
if flushed == 0 {
return Ok(0);
}
// Don't try to reconstruct the exact amount written; just bail
// in the event of a partial write
let mut lines_len: usize = 0;
for buf in lines {
// With overlapping/duplicate slices the total length may in theory
// exceed usize::MAX
lines_len = lines_len.saturating_add(buf.len());
if flushed < lines_len {
return Ok(flushed);
}
}
// Now that the write has succeeded, buffer the rest (or as much of the
// rest as possible)
let buffered: usize = tail
.iter()
.filter(|buf| !buf.is_empty())
.map(|buf| self.buffer.write_to_buf(buf))
.take_while(|&n| n > 0)
.sum();
Ok(flushed + buffered)
}
fn is_write_vectored(&self) -> bool {
self.inner().is_write_vectored()
}
/// Writes some data into this BufWriter with line buffering.
///
/// This means that, if any newlines are present in the data, the data up to
/// the last newline is sent directly to the underlying writer, and data
/// after it is buffered.
///
/// Because this function attempts to send completed lines to the underlying
/// writer, it will also flush the existing buffer if it contains any
/// newlines, even if the incoming data does not contain any newlines.
fn write_all(&mut self, buf: &[u8]) -> io::Result<()> {
match memchr::memrchr(b'\n', buf) {
// If there are no new newlines (that is, if this write is less than
// one line), just do a regular buffered write (which may flush if
// we exceed the inner buffer's size)
None => {
self.flush_if_completed_line()?;
self.buffer.write_all(buf)
}
Some(newline_idx) => {
let (lines, tail) = buf.split_at(newline_idx + 1);
if self.buffered().is_empty() {
self.inner_mut().write_all(lines)?;
} else {
// If there is any buffered data, we add the incoming lines
// to that buffer before flushing, which saves us at least
// one write call. We can't really do this with `write`,
// since we can't do this *and* not suppress errors *and*
// report a consistent state to the caller in a return
// value, but here in write_all it's fine.
self.buffer.write_all(lines)?;
self.buffer.flush_buf()?;
}
self.buffer.write_all(tail)
}
}
}
}

View File

@@ -1,189 +0,0 @@
//! Buffering wrappers for I/O traits
mod bufreader;
mod bufwriter;
mod linewriter;
mod linewritershim;
#[cfg(test)]
mod tests;
#[stable(feature = "bufwriter_into_parts", since = "1.56.0")]
pub use bufwriter::WriterPanicked;
use linewritershim::LineWriterShim;
#[stable(feature = "rust1", since = "1.0.0")]
pub use self::{bufreader::BufReader, bufwriter::BufWriter, linewriter::LineWriter};
use crate::io::Error;
use crate::{error, fmt};
/// An error returned by [`BufWriter::into_inner`] which combines an error that
/// happened while writing out the buffer, and the buffered writer object
/// which may be used to recover from the condition.
///
/// # Examples
///
/// ```no_run
/// use std::io::BufWriter;
/// use std::net::TcpStream;
///
/// let mut stream = BufWriter::new(TcpStream::connect("127.0.0.1:34254").unwrap());
///
/// // do stuff with the stream
///
/// // we want to get our `TcpStream` back, so let's try:
///
/// let stream = match stream.into_inner() {
/// Ok(s) => s,
/// Err(e) => {
/// // Here, e is an IntoInnerError
/// panic!("An error occurred");
/// }
/// };
/// ```
#[derive(Debug)]
#[stable(feature = "rust1", since = "1.0.0")]
pub struct IntoInnerError<W>(W, Error);
impl<W> IntoInnerError<W> {
/// Constructs a new IntoInnerError
fn new(writer: W, error: Error) -> Self {
Self(writer, error)
}
/// Helper to construct a new IntoInnerError; intended to help with
/// adapters that wrap other adapters
fn new_wrapped<W2>(self, f: impl FnOnce(W) -> W2) -> IntoInnerError<W2> {
let Self(writer, error) = self;
IntoInnerError::new(f(writer), error)
}
/// Returns the error which caused the call to [`BufWriter::into_inner()`]
/// to fail.
///
/// This error was returned when attempting to write the internal buffer.
///
/// # Examples
///
/// ```no_run
/// use std::io::BufWriter;
/// use std::net::TcpStream;
///
/// let mut stream = BufWriter::new(TcpStream::connect("127.0.0.1:34254").unwrap());
///
/// // do stuff with the stream
///
/// // we want to get our `TcpStream` back, so let's try:
///
/// let stream = match stream.into_inner() {
/// Ok(s) => s,
/// Err(e) => {
/// // Here, e is an IntoInnerError, let's log the inner error.
/// //
/// // We'll just 'log' to stdout for this example.
/// println!("{}", e.error());
///
/// panic!("An unexpected error occurred.");
/// }
/// };
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn error(&self) -> &Error {
&self.1
}
/// Returns the buffered writer instance which generated the error.
///
/// The returned object can be used for error recovery, such as
/// re-inspecting the buffer.
///
/// # Examples
///
/// ```no_run
/// use std::io::BufWriter;
/// use std::net::TcpStream;
///
/// let mut stream = BufWriter::new(TcpStream::connect("127.0.0.1:34254").unwrap());
///
/// // do stuff with the stream
///
/// // we want to get our `TcpStream` back, so let's try:
///
/// let stream = match stream.into_inner() {
/// Ok(s) => s,
/// Err(e) => {
/// // Here, e is an IntoInnerError, let's re-examine the buffer:
/// let buffer = e.into_inner();
///
/// // do stuff to try to recover
///
/// // afterwards, let's just return the stream
/// buffer.into_inner().unwrap()
/// }
/// };
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn into_inner(self) -> W {
self.0
}
/// Consumes the [`IntoInnerError`] and returns the error which caused the call to
/// [`BufWriter::into_inner()`] to fail. Unlike `error`, this can be used to
/// obtain ownership of the underlying error.
///
/// # Example
/// ```
/// use std::io::{BufWriter, ErrorKind, Write};
///
/// let mut not_enough_space = [0u8; 10];
/// let mut stream = BufWriter::new(not_enough_space.as_mut());
/// write!(stream, "this cannot be actually written").unwrap();
/// let into_inner_err = stream.into_inner().expect_err("now we discover it's too small");
/// let err = into_inner_err.into_error();
/// assert_eq!(err.kind(), ErrorKind::WriteZero);
/// ```
#[stable(feature = "io_into_inner_error_parts", since = "1.55.0")]
pub fn into_error(self) -> Error {
self.1
}
/// Consumes the [`IntoInnerError`] and returns the error which caused the call to
/// [`BufWriter::into_inner()`] to fail, and the underlying writer.
///
/// This can be used to simply obtain ownership of the underlying error; it can also be used for
/// advanced error recovery.
///
/// # Example
/// ```
/// use std::io::{BufWriter, ErrorKind, Write};
///
/// let mut not_enough_space = [0u8; 10];
/// let mut stream = BufWriter::new(not_enough_space.as_mut());
/// write!(stream, "this cannot be actually written").unwrap();
/// let into_inner_err = stream.into_inner().expect_err("now we discover it's too small");
/// let (err, recovered_writer) = into_inner_err.into_parts();
/// assert_eq!(err.kind(), ErrorKind::WriteZero);
/// assert_eq!(recovered_writer.buffer(), b"t be actually written");
/// ```
#[stable(feature = "io_into_inner_error_parts", since = "1.55.0")]
pub fn into_parts(self) -> (Error, W) {
(self.1, self.0)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<W> From<IntoInnerError<W>> for Error {
fn from(iie: IntoInnerError<W>) -> Error {
iie.1
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<W: Send + fmt::Debug> error::Error for IntoInnerError<W> {}
#[stable(feature = "rust1", since = "1.0.0")]
impl<W> fmt::Display for IntoInnerError<W> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.error().fmt(f)
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,297 +0,0 @@
use super::{BorrowedBuf, BufReader, BufWriter, DEFAULT_BUF_SIZE, Read, Result, Write};
use crate::alloc::Allocator;
use crate::cmp;
use alloc_crate::collections::VecDeque;
use crate::io::IoSlice;
use crate::mem::MaybeUninit;
use crate::sys::io::{CopyState, kernel_copy};
#[cfg(test)]
mod tests;
/// Copies the entire contents of a reader into a writer.
///
/// This function will continuously read data from `reader` and then
/// write it into `writer` in a streaming fashion until `reader`
/// returns EOF.
///
/// On success, the total number of bytes that were copied from
/// `reader` to `writer` is returned.
///
/// If you want to copy the contents of one file to another and youre
/// working with filesystem paths, see the [`fs::copy`] function.
///
/// [`fs::copy`]: crate::fs::copy
///
/// # Errors
///
/// This function will return an error immediately if any call to [`read`] or
/// [`write`] returns an error. All instances of [`ErrorKind::Interrupted`] are
/// handled by this function and the underlying operation is retried.
///
/// [`read`]: Read::read
/// [`write`]: Write::write
/// [`ErrorKind::Interrupted`]: crate::io::ErrorKind::Interrupted
///
/// # Examples
///
/// ```
/// use std::io;
///
/// fn main() -> io::Result<()> {
/// let mut reader: &[u8] = b"hello";
/// let mut writer: Vec<u8> = vec![];
///
/// io::copy(&mut reader, &mut writer)?;
///
/// assert_eq!(&b"hello"[..], &writer[..]);
/// Ok(())
/// }
/// ```
///
/// # Platform-specific behavior
///
/// On Linux (including Android), this function uses `copy_file_range(2)`,
/// `sendfile(2)` or `splice(2)` syscalls to move data directly between file
/// descriptors if possible.
///
/// Note that platform-specific behavior [may change in the future][changes].
///
/// [changes]: crate::io#platform-specific-behavior
#[stable(feature = "rust1", since = "1.0.0")]
pub fn copy<R: ?Sized, W: ?Sized>(reader: &mut R, writer: &mut W) -> Result<u64>
where
R: Read,
W: Write,
{
match kernel_copy(reader, writer)? {
CopyState::Ended(copied) => Ok(copied),
CopyState::Fallback(copied) => {
generic_copy(reader, writer).map(|additional| copied + additional)
}
}
}
/// The userspace read-write-loop implementation of `io::copy` that is used when
/// OS-specific specializations for copy offloading are not available or not applicable.
fn generic_copy<R: ?Sized, W: ?Sized>(reader: &mut R, writer: &mut W) -> Result<u64>
where
R: Read,
W: Write,
{
let read_buf = BufferedReaderSpec::buffer_size(reader);
let write_buf = BufferedWriterSpec::buffer_size(writer);
if read_buf >= DEFAULT_BUF_SIZE && read_buf >= write_buf {
return BufferedReaderSpec::copy_to(reader, writer);
}
BufferedWriterSpec::copy_from(writer, reader)
}
/// Specialization of the read-write loop that reuses the internal
/// buffer of a BufReader. If there's no buffer then the writer side
/// should be used instead.
trait BufferedReaderSpec {
fn buffer_size(&self) -> usize;
fn copy_to(&mut self, to: &mut (impl Write + ?Sized)) -> Result<u64>;
}
impl<T> BufferedReaderSpec for T
where
Self: Read,
T: ?Sized,
{
#[inline]
default fn buffer_size(&self) -> usize {
0
}
default fn copy_to(&mut self, _to: &mut (impl Write + ?Sized)) -> Result<u64> {
unreachable!("only called from specializations")
}
}
impl BufferedReaderSpec for &[u8] {
fn buffer_size(&self) -> usize {
// prefer this specialization since the source "buffer" is all we'll ever need,
// even if it's small
usize::MAX
}
fn copy_to(&mut self, to: &mut (impl Write + ?Sized)) -> Result<u64> {
let len = self.len();
to.write_all(self)?;
*self = &self[len..];
Ok(len as u64)
}
}
impl<A: Allocator> BufferedReaderSpec for VecDeque<u8, A> {
fn buffer_size(&self) -> usize {
// prefer this specialization since the source "buffer" is all we'll ever need,
// even if it's small
usize::MAX
}
fn copy_to(&mut self, to: &mut (impl Write + ?Sized)) -> Result<u64> {
let len = self.len();
let (front, back) = self.as_slices();
let bufs = &mut [IoSlice::new(front), IoSlice::new(back)];
to.write_all_vectored(bufs)?;
self.clear();
Ok(len as u64)
}
}
impl<I> BufferedReaderSpec for BufReader<I>
where
Self: Read,
I: ?Sized,
{
fn buffer_size(&self) -> usize {
self.capacity()
}
fn copy_to(&mut self, to: &mut (impl Write + ?Sized)) -> Result<u64> {
let mut len = 0;
loop {
// Hack: this relies on `impl Read for BufReader` always calling fill_buf
// if the buffer is empty, even for empty slices.
// It can't be called directly here since specialization prevents us
// from adding I: Read
match self.read(&mut []) {
Ok(_) => {}
Err(e) if e.is_interrupted() => continue,
Err(e) => return Err(e),
}
let buf = self.buffer();
if self.buffer().len() == 0 {
return Ok(len);
}
// In case the writer side is a BufWriter then its write_all
// implements an optimization that passes through large
// buffers to the underlying writer. That code path is #[cold]
// but we're still avoiding redundant memcopies when doing
// a copy between buffered inputs and outputs.
to.write_all(buf)?;
len += buf.len() as u64;
self.discard_buffer();
}
}
}
/// Specialization of the read-write loop that either uses a stack buffer
/// or reuses the internal buffer of a BufWriter
trait BufferedWriterSpec: Write {
fn buffer_size(&self) -> usize;
fn copy_from<R: Read + ?Sized>(&mut self, reader: &mut R) -> Result<u64>;
}
impl<W: Write + ?Sized> BufferedWriterSpec for W {
#[inline]
default fn buffer_size(&self) -> usize {
0
}
default fn copy_from<R: Read + ?Sized>(&mut self, reader: &mut R) -> Result<u64> {
stack_buffer_copy(reader, self)
}
}
impl<I: Write + ?Sized> BufferedWriterSpec for BufWriter<I> {
fn buffer_size(&self) -> usize {
self.capacity()
}
fn copy_from<R: Read + ?Sized>(&mut self, reader: &mut R) -> Result<u64> {
if self.capacity() < DEFAULT_BUF_SIZE {
return stack_buffer_copy(reader, self);
}
let mut len = 0;
let mut init = 0;
loop {
let buf = self.buffer_mut();
let mut read_buf: BorrowedBuf<'_> = buf.spare_capacity_mut().into();
unsafe {
// SAFETY: init is either 0 or the init_len from the previous iteration.
read_buf.set_init(init);
}
if read_buf.capacity() >= DEFAULT_BUF_SIZE {
let mut cursor = read_buf.unfilled();
match reader.read_buf(cursor.reborrow()) {
Ok(()) => {
let bytes_read = cursor.written();
if bytes_read == 0 {
return Ok(len);
}
init = read_buf.init_len() - bytes_read;
len += bytes_read as u64;
// SAFETY: BorrowedBuf guarantees all of its filled bytes are init
unsafe { buf.set_len(buf.len() + bytes_read) };
// Read again if the buffer still has enough capacity, as BufWriter itself would do
// This will occur if the reader returns short reads
}
Err(ref e) if e.is_interrupted() => {}
Err(e) => return Err(e),
}
} else {
// All the bytes that were already in the buffer are initialized,
// treat them as such when the buffer is flushed.
init += buf.len();
self.flush_buf()?;
}
}
}
}
impl BufferedWriterSpec for Vec<u8> {
fn buffer_size(&self) -> usize {
cmp::max(DEFAULT_BUF_SIZE, self.capacity() - self.len())
}
fn copy_from<R: Read + ?Sized>(&mut self, reader: &mut R) -> Result<u64> {
reader.read_to_end(self).map(|bytes| u64::try_from(bytes).expect("usize overflowed u64"))
}
}
fn stack_buffer_copy<R: Read + ?Sized, W: Write + ?Sized>(
reader: &mut R,
writer: &mut W,
) -> Result<u64> {
let buf: &mut [_] = &mut [MaybeUninit::uninit(); DEFAULT_BUF_SIZE];
let mut buf: BorrowedBuf<'_> = buf.into();
let mut len = 0;
loop {
match reader.read_buf(buf.unfilled()) {
Ok(()) => {}
Err(e) if e.is_interrupted() => continue,
Err(e) => return Err(e),
};
if buf.filled().is_empty() {
break;
}
len += buf.filled().len() as u64;
writer.write_all(buf.filled())?;
buf.clear();
}
Ok(len)
}

View File

@@ -1,147 +0,0 @@
use crate::cmp::{max, min};
use alloc_crate::collections::VecDeque;
use crate::io;
use crate::io::*;
#[test]
fn copy_copies() {
let mut r = repeat(0).take(4);
let mut w = sink();
assert_eq!(copy(&mut r, &mut w).unwrap(), 4);
let mut r = repeat(0).take(1 << 17);
assert_eq!(copy(&mut r as &mut dyn Read, &mut w as &mut dyn Write).unwrap(), 1 << 17);
}
struct ShortReader {
cap: usize,
read_size: usize,
observed_buffer: usize,
}
impl Read for ShortReader {
fn read(&mut self, buf: &mut [u8]) -> Result<usize> {
let bytes = min(self.cap, self.read_size).min(buf.len());
self.cap -= bytes;
self.observed_buffer = max(self.observed_buffer, buf.len());
Ok(bytes)
}
}
struct WriteObserver {
observed_buffer: usize,
}
impl Write for WriteObserver {
fn write(&mut self, buf: &[u8]) -> Result<usize> {
self.observed_buffer = max(self.observed_buffer, buf.len());
Ok(buf.len())
}
fn flush(&mut self) -> Result<()> {
Ok(())
}
}
#[test]
fn copy_specializes_bufwriter() {
let cap = 117 * 1024;
let buf_sz = 16 * 1024;
let mut r = ShortReader { cap, observed_buffer: 0, read_size: 1337 };
let mut w = BufWriter::with_capacity(buf_sz, WriteObserver { observed_buffer: 0 });
assert_eq!(
copy(&mut r, &mut w).unwrap(),
cap as u64,
"expected the whole capacity to be copied"
);
assert_eq!(r.observed_buffer, buf_sz, "expected a large buffer to be provided to the reader");
assert!(w.get_mut().observed_buffer > DEFAULT_BUF_SIZE, "expected coalesced writes");
}
#[test]
fn copy_specializes_bufreader() {
let mut source = vec![0; 768 * 1024];
source[1] = 42;
let mut buffered = BufReader::with_capacity(256 * 1024, Cursor::new(&mut source));
let mut sink = Vec::new();
assert_eq!(crate::io::copy(&mut buffered, &mut sink).unwrap(), source.len() as u64);
assert_eq!(source.as_slice(), sink.as_slice());
let buf_sz = 71 * 1024;
assert!(buf_sz > DEFAULT_BUF_SIZE, "test precondition");
let mut buffered = BufReader::with_capacity(buf_sz, Cursor::new(&mut source));
let mut sink = WriteObserver { observed_buffer: 0 };
assert_eq!(crate::io::copy(&mut buffered, &mut sink).unwrap(), source.len() as u64);
assert_eq!(
sink.observed_buffer, buf_sz,
"expected a large buffer to be provided to the writer"
);
}
#[test]
fn copy_specializes_to_vec() {
let cap = DEFAULT_BUF_SIZE * 10;
let mut source = ShortReader { cap, observed_buffer: 0, read_size: DEFAULT_BUF_SIZE };
let mut sink = Vec::new();
let copied = io::copy(&mut source, &mut sink).unwrap();
assert_eq!(cap as u64, copied);
assert_eq!(sink.len() as u64, copied);
assert!(
source.observed_buffer > DEFAULT_BUF_SIZE,
"expected a large buffer to be provided to the reader, got {}",
source.observed_buffer
);
}
#[test]
fn copy_specializes_from_vecdeque() {
let mut source = VecDeque::with_capacity(100 * 1024);
for _ in 0..20 * 1024 {
source.push_front(0);
}
for _ in 0..20 * 1024 {
source.push_back(0);
}
let mut sink = WriteObserver { observed_buffer: 0 };
assert_eq!(40 * 1024u64, io::copy(&mut source, &mut sink).unwrap());
assert_eq!(20 * 1024, sink.observed_buffer);
}
#[test]
fn copy_specializes_from_slice() {
let mut source = [1; 60 * 1024].as_slice();
let mut sink = WriteObserver { observed_buffer: 0 };
assert_eq!(60 * 1024u64, io::copy(&mut source, &mut sink).unwrap());
assert_eq!(60 * 1024, sink.observed_buffer);
}
#[cfg(unix)]
mod io_benches {
use test::Bencher;
use crate::fs::{File, OpenOptions};
use crate::io::BufReader;
use crate::io::prelude::*;
#[bench]
#[cfg_attr(target_os = "emscripten", ignore)] // no /dev
fn bench_copy_buf_reader(b: &mut Bencher) {
let mut file_in = File::open("/dev/zero").expect("opening /dev/zero failed");
// use dyn to avoid specializations unrelated to readbuf
let dyn_in = &mut file_in as &mut dyn Read;
let mut reader = BufReader::with_capacity(256 * 1024, dyn_in.take(0));
let mut writer =
OpenOptions::new().write(true).open("/dev/null").expect("opening /dev/null failed");
const BYTES: u64 = 1024 * 1024;
b.bytes = BYTES;
b.iter(|| {
reader.get_mut().set_limit(BYTES);
crate::io::copy(&mut reader, &mut writer).unwrap()
});
}
}

View File

@@ -1,757 +0,0 @@
#[cfg(test)]
mod tests;
use crate::alloc::Allocator;
use crate::cmp;
use crate::io::prelude::*;
use crate::io::{self, BorrowedCursor, ErrorKind, IoSlice, IoSliceMut, SeekFrom};
/// A `Cursor` wraps an in-memory buffer and provides it with a
/// [`Seek`] implementation.
///
/// `Cursor`s are used with in-memory buffers, anything implementing
/// <code>[AsRef]<\[u8]></code>, to allow them to implement [`Read`] and/or [`Write`],
/// allowing these buffers to be used anywhere you might use a reader or writer
/// that does actual I/O.
///
/// The standard library implements some I/O traits on various types which
/// are commonly used as a buffer, like <code>Cursor<[Vec]\<u8>></code> and
/// <code>Cursor<[&\[u8\]][bytes]></code>.
///
/// # Examples
///
/// We may want to write bytes to a [`File`] in our production
/// code, but use an in-memory buffer in our tests. We can do this with
/// `Cursor`:
///
/// [bytes]: crate::slice "slice"
/// [`File`]: crate::fs::File
///
/// ```no_run
/// use std::io::prelude::*;
/// use std::io::{self, SeekFrom};
/// use std::fs::File;
///
/// // a library function we've written
/// fn write_ten_bytes_at_end<W: Write + Seek>(mut writer: W) -> io::Result<()> {
/// writer.seek(SeekFrom::End(-10))?;
///
/// for i in 0..10 {
/// writer.write(&[i])?;
/// }
///
/// // all went well
/// Ok(())
/// }
///
/// # fn foo() -> io::Result<()> {
/// // Here's some code that uses this library function.
/// //
/// // We might want to use a BufReader here for efficiency, but let's
/// // keep this example focused.
/// let mut file = File::create("foo.txt")?;
/// // First, we need to allocate 10 bytes to be able to write into.
/// file.set_len(10)?;
///
/// write_ten_bytes_at_end(&mut file)?;
/// # Ok(())
/// # }
///
/// // now let's write a test
/// #[test]
/// fn test_writes_bytes() {
/// // setting up a real File is much slower than an in-memory buffer,
/// // let's use a cursor instead
/// use std::io::Cursor;
/// let mut buff = Cursor::new(vec![0; 15]);
///
/// write_ten_bytes_at_end(&mut buff).unwrap();
///
/// assert_eq!(&buff.get_ref()[5..15], &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]);
/// }
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[derive(Debug, Default, Eq, PartialEq)]
pub struct Cursor<T> {
inner: T,
pos: u64,
}
impl<T> Cursor<T> {
/// Creates a new cursor wrapping the provided underlying in-memory buffer.
///
/// Cursor initial position is `0` even if underlying buffer (e.g., [`Vec`])
/// is not empty. So writing to cursor starts with overwriting [`Vec`]
/// content, not with appending to it.
///
/// # Examples
///
/// ```
/// use std::io::Cursor;
///
/// let buff = Cursor::new(Vec::new());
/// # fn force_inference(_: &Cursor<Vec<u8>>) {}
/// # force_inference(&buff);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_stable(feature = "const_io_structs", since = "1.79.0")]
pub const fn new(inner: T) -> Cursor<T> {
Cursor { pos: 0, inner }
}
/// Consumes this cursor, returning the underlying value.
///
/// # Examples
///
/// ```
/// use std::io::Cursor;
///
/// let buff = Cursor::new(Vec::new());
/// # fn force_inference(_: &Cursor<Vec<u8>>) {}
/// # force_inference(&buff);
///
/// let vec = buff.into_inner();
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn into_inner(self) -> T {
self.inner
}
/// Gets a reference to the underlying value in this cursor.
///
/// # Examples
///
/// ```
/// use std::io::Cursor;
///
/// let buff = Cursor::new(Vec::new());
/// # fn force_inference(_: &Cursor<Vec<u8>>) {}
/// # force_inference(&buff);
///
/// let reference = buff.get_ref();
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_stable(feature = "const_io_structs", since = "1.79.0")]
pub const fn get_ref(&self) -> &T {
&self.inner
}
/// Gets a mutable reference to the underlying value in this cursor.
///
/// Care should be taken to avoid modifying the internal I/O state of the
/// underlying value as it may corrupt this cursor's position.
///
/// # Examples
///
/// ```
/// use std::io::Cursor;
///
/// let mut buff = Cursor::new(Vec::new());
/// # fn force_inference(_: &Cursor<Vec<u8>>) {}
/// # force_inference(&buff);
///
/// let reference = buff.get_mut();
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_stable(feature = "const_mut_cursor", since = "1.86.0")]
pub const fn get_mut(&mut self) -> &mut T {
&mut self.inner
}
/// Returns the current position of this cursor.
///
/// # Examples
///
/// ```
/// use std::io::Cursor;
/// use std::io::prelude::*;
/// use std::io::SeekFrom;
///
/// let mut buff = Cursor::new(vec![1, 2, 3, 4, 5]);
///
/// assert_eq!(buff.position(), 0);
///
/// buff.seek(SeekFrom::Current(2)).unwrap();
/// assert_eq!(buff.position(), 2);
///
/// buff.seek(SeekFrom::Current(-1)).unwrap();
/// assert_eq!(buff.position(), 1);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_stable(feature = "const_io_structs", since = "1.79.0")]
pub const fn position(&self) -> u64 {
self.pos
}
/// Sets the position of this cursor.
///
/// # Examples
///
/// ```
/// use std::io::Cursor;
///
/// let mut buff = Cursor::new(vec![1, 2, 3, 4, 5]);
///
/// assert_eq!(buff.position(), 0);
///
/// buff.set_position(2);
/// assert_eq!(buff.position(), 2);
///
/// buff.set_position(4);
/// assert_eq!(buff.position(), 4);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_stable(feature = "const_mut_cursor", since = "1.86.0")]
pub const fn set_position(&mut self, pos: u64) {
self.pos = pos;
}
}
impl<T> Cursor<T>
where
T: AsRef<[u8]>,
{
/// Splits the underlying slice at the cursor position and returns them.
///
/// # Examples
///
/// ```
/// #![feature(cursor_split)]
/// use std::io::Cursor;
///
/// let mut buff = Cursor::new(vec![1, 2, 3, 4, 5]);
///
/// assert_eq!(buff.split(), ([].as_slice(), [1, 2, 3, 4, 5].as_slice()));
///
/// buff.set_position(2);
/// assert_eq!(buff.split(), ([1, 2].as_slice(), [3, 4, 5].as_slice()));
///
/// buff.set_position(6);
/// assert_eq!(buff.split(), ([1, 2, 3, 4, 5].as_slice(), [].as_slice()));
/// ```
#[unstable(feature = "cursor_split", issue = "86369")]
pub fn split(&self) -> (&[u8], &[u8]) {
let slice = self.inner.as_ref();
let pos = self.pos.min(slice.len() as u64);
slice.split_at(pos as usize)
}
}
impl<T> Cursor<T>
where
T: AsMut<[u8]>,
{
/// Splits the underlying slice at the cursor position and returns them
/// mutably.
///
/// # Examples
///
/// ```
/// #![feature(cursor_split)]
/// use std::io::Cursor;
///
/// let mut buff = Cursor::new(vec![1, 2, 3, 4, 5]);
///
/// assert_eq!(buff.split_mut(), ([].as_mut_slice(), [1, 2, 3, 4, 5].as_mut_slice()));
///
/// buff.set_position(2);
/// assert_eq!(buff.split_mut(), ([1, 2].as_mut_slice(), [3, 4, 5].as_mut_slice()));
///
/// buff.set_position(6);
/// assert_eq!(buff.split_mut(), ([1, 2, 3, 4, 5].as_mut_slice(), [].as_mut_slice()));
/// ```
#[unstable(feature = "cursor_split", issue = "86369")]
pub fn split_mut(&mut self) -> (&mut [u8], &mut [u8]) {
let slice = self.inner.as_mut();
let pos = self.pos.min(slice.len() as u64);
slice.split_at_mut(pos as usize)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> Clone for Cursor<T>
where
T: Clone,
{
#[inline]
fn clone(&self) -> Self {
Cursor { inner: self.inner.clone(), pos: self.pos }
}
#[inline]
fn clone_from(&mut self, other: &Self) {
self.inner.clone_from(&other.inner);
self.pos = other.pos;
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> io::Seek for Cursor<T>
where
T: AsRef<[u8]>,
{
fn seek(&mut self, style: SeekFrom) -> io::Result<u64> {
let (base_pos, offset) = match style {
SeekFrom::Start(n) => {
self.pos = n;
return Ok(n);
}
SeekFrom::End(n) => (self.inner.as_ref().len() as u64, n),
SeekFrom::Current(n) => (self.pos, n),
};
match base_pos.checked_add_signed(offset) {
Some(n) => {
self.pos = n;
Ok(self.pos)
}
None => Err(io::const_error!(
ErrorKind::InvalidInput,
"invalid seek to a negative or overflowing position",
)),
}
}
fn stream_len(&mut self) -> io::Result<u64> {
Ok(self.inner.as_ref().len() as u64)
}
fn stream_position(&mut self) -> io::Result<u64> {
Ok(self.pos)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> Read for Cursor<T>
where
T: AsRef<[u8]>,
{
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
let n = Read::read(&mut Cursor::split(self).1, buf)?;
self.pos += n as u64;
Ok(n)
}
fn read_buf(&mut self, mut cursor: BorrowedCursor<'_>) -> io::Result<()> {
let prev_written = cursor.written();
Read::read_buf(&mut Cursor::split(self).1, cursor.reborrow())?;
self.pos += (cursor.written() - prev_written) as u64;
Ok(())
}
fn read_vectored(&mut self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
let mut nread = 0;
for buf in bufs {
let n = self.read(buf)?;
nread += n;
if n < buf.len() {
break;
}
}
Ok(nread)
}
fn is_read_vectored(&self) -> bool {
true
}
fn read_exact(&mut self, buf: &mut [u8]) -> io::Result<()> {
let result = Read::read_exact(&mut Cursor::split(self).1, buf);
match result {
Ok(_) => self.pos += buf.len() as u64,
// The only possible error condition is EOF, so place the cursor at "EOF"
Err(_) => self.pos = self.inner.as_ref().len() as u64,
}
result
}
fn read_buf_exact(&mut self, mut cursor: BorrowedCursor<'_>) -> io::Result<()> {
let prev_written = cursor.written();
let result = Read::read_buf_exact(&mut Cursor::split(self).1, cursor.reborrow());
self.pos += (cursor.written() - prev_written) as u64;
result
}
fn read_to_end(&mut self, buf: &mut Vec<u8>) -> io::Result<usize> {
let content = Cursor::split(self).1;
let len = content.len();
buf.try_reserve(len)?;
buf.extend_from_slice(content);
self.pos += len as u64;
Ok(len)
}
fn read_to_string(&mut self, buf: &mut String) -> io::Result<usize> {
let content =
crate::str::from_utf8(Cursor::split(self).1).map_err(|_| io::Error::INVALID_UTF8)?;
let len = content.len();
buf.try_reserve(len)?;
buf.push_str(content);
self.pos += len as u64;
Ok(len)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> BufRead for Cursor<T>
where
T: AsRef<[u8]>,
{
fn fill_buf(&mut self) -> io::Result<&[u8]> {
Ok(Cursor::split(self).1)
}
fn consume(&mut self, amt: usize) {
self.pos += amt as u64;
}
}
// Non-resizing write implementation
#[inline]
fn slice_write(pos_mut: &mut u64, slice: &mut [u8], buf: &[u8]) -> io::Result<usize> {
let pos = cmp::min(*pos_mut, slice.len() as u64);
let amt = (&mut slice[(pos as usize)..]).write(buf)?;
*pos_mut += amt as u64;
Ok(amt)
}
#[inline]
fn slice_write_vectored(
pos_mut: &mut u64,
slice: &mut [u8],
bufs: &[IoSlice<'_>],
) -> io::Result<usize> {
let mut nwritten = 0;
for buf in bufs {
let n = slice_write(pos_mut, slice, buf)?;
nwritten += n;
if n < buf.len() {
break;
}
}
Ok(nwritten)
}
#[inline]
fn slice_write_all(pos_mut: &mut u64, slice: &mut [u8], buf: &[u8]) -> io::Result<()> {
let n = slice_write(pos_mut, slice, buf)?;
if n < buf.len() { Err(io::Error::WRITE_ALL_EOF) } else { Ok(()) }
}
#[inline]
fn slice_write_all_vectored(
pos_mut: &mut u64,
slice: &mut [u8],
bufs: &[IoSlice<'_>],
) -> io::Result<()> {
for buf in bufs {
let n = slice_write(pos_mut, slice, buf)?;
if n < buf.len() {
return Err(io::Error::WRITE_ALL_EOF);
}
}
Ok(())
}
/// Reserves the required space, and pads the vec with 0s if necessary.
fn reserve_and_pad<A: Allocator>(
pos_mut: &mut u64,
vec: &mut Vec<u8, A>,
buf_len: usize,
) -> io::Result<usize> {
let pos: usize = (*pos_mut).try_into().map_err(|_| {
io::const_error!(
ErrorKind::InvalidInput,
"cursor position exceeds maximum possible vector length",
)
})?;
// For safety reasons, we don't want these numbers to overflow
// otherwise our allocation won't be enough
let desired_cap = pos.saturating_add(buf_len);
if desired_cap > vec.capacity() {
// We want our vec's total capacity
// to have room for (pos+buf_len) bytes. Reserve allocates
// based on additional elements from the length, so we need to
// reserve the difference
vec.reserve(desired_cap - vec.len());
}
// Pad if pos is above the current len.
if pos > vec.len() {
let diff = pos - vec.len();
// Unfortunately, `resize()` would suffice but the optimiser does not
// realise the `reserve` it does can be eliminated. So we do it manually
// to eliminate that extra branch
let spare = vec.spare_capacity_mut();
debug_assert!(spare.len() >= diff);
// Safety: we have allocated enough capacity for this.
// And we are only writing, not reading
unsafe {
spare.get_unchecked_mut(..diff).fill(core::mem::MaybeUninit::new(0));
vec.set_len(pos);
}
}
Ok(pos)
}
/// Writes the slice to the vec without allocating.
///
/// # Safety
///
/// `vec` must have `buf.len()` spare capacity.
unsafe fn vec_write_all_unchecked<A>(pos: usize, vec: &mut Vec<u8, A>, buf: &[u8]) -> usize
where
A: Allocator,
{
debug_assert!(vec.capacity() >= pos + buf.len());
unsafe { vec.as_mut_ptr().add(pos).copy_from(buf.as_ptr(), buf.len()) };
pos + buf.len()
}
/// Resizing `write_all` implementation for [`Cursor`].
///
/// Cursor is allowed to have a pre-allocated and initialised
/// vector body, but with a position of 0. This means the [`Write`]
/// will overwrite the contents of the vec.
///
/// This also allows for the vec body to be empty, but with a position of N.
/// This means that [`Write`] will pad the vec with 0 initially,
/// before writing anything from that point
fn vec_write_all<A>(pos_mut: &mut u64, vec: &mut Vec<u8, A>, buf: &[u8]) -> io::Result<usize>
where
A: Allocator,
{
let buf_len = buf.len();
let mut pos = reserve_and_pad(pos_mut, vec, buf_len)?;
// Write the buf then progress the vec forward if necessary
// Safety: we have ensured that the capacity is available
// and that all bytes get written up to pos
unsafe {
pos = vec_write_all_unchecked(pos, vec, buf);
if pos > vec.len() {
vec.set_len(pos);
}
};
// Bump us forward
*pos_mut += buf_len as u64;
Ok(buf_len)
}
/// Resizing `write_all_vectored` implementation for [`Cursor`].
///
/// Cursor is allowed to have a pre-allocated and initialised
/// vector body, but with a position of 0. This means the [`Write`]
/// will overwrite the contents of the vec.
///
/// This also allows for the vec body to be empty, but with a position of N.
/// This means that [`Write`] will pad the vec with 0 initially,
/// before writing anything from that point
fn vec_write_all_vectored<A>(
pos_mut: &mut u64,
vec: &mut Vec<u8, A>,
bufs: &[IoSlice<'_>],
) -> io::Result<usize>
where
A: Allocator,
{
// For safety reasons, we don't want this sum to overflow ever.
// If this saturates, the reserve should panic to avoid any unsound writing.
let buf_len = bufs.iter().fold(0usize, |a, b| a.saturating_add(b.len()));
let mut pos = reserve_and_pad(pos_mut, vec, buf_len)?;
// Write the buf then progress the vec forward if necessary
// Safety: we have ensured that the capacity is available
// and that all bytes get written up to the last pos
unsafe {
for buf in bufs {
pos = vec_write_all_unchecked(pos, vec, buf);
}
if pos > vec.len() {
vec.set_len(pos);
}
}
// Bump us forward
*pos_mut += buf_len as u64;
Ok(buf_len)
}
#[stable(feature = "rust1", since = "1.0.0")]
impl Write for Cursor<&mut [u8]> {
#[inline]
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
slice_write(&mut self.pos, self.inner, buf)
}
#[inline]
fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
slice_write_vectored(&mut self.pos, self.inner, bufs)
}
#[inline]
fn is_write_vectored(&self) -> bool {
true
}
#[inline]
fn write_all(&mut self, buf: &[u8]) -> io::Result<()> {
slice_write_all(&mut self.pos, self.inner, buf)
}
#[inline]
fn write_all_vectored(&mut self, bufs: &mut [IoSlice<'_>]) -> io::Result<()> {
slice_write_all_vectored(&mut self.pos, self.inner, bufs)
}
#[inline]
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
#[stable(feature = "cursor_mut_vec", since = "1.25.0")]
impl<A> Write for Cursor<&mut Vec<u8, A>>
where
A: Allocator,
{
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
vec_write_all(&mut self.pos, self.inner, buf)
}
fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
vec_write_all_vectored(&mut self.pos, self.inner, bufs)
}
#[inline]
fn is_write_vectored(&self) -> bool {
true
}
fn write_all(&mut self, buf: &[u8]) -> io::Result<()> {
vec_write_all(&mut self.pos, self.inner, buf)?;
Ok(())
}
fn write_all_vectored(&mut self, bufs: &mut [IoSlice<'_>]) -> io::Result<()> {
vec_write_all_vectored(&mut self.pos, self.inner, bufs)?;
Ok(())
}
#[inline]
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<A> Write for Cursor<Vec<u8, A>>
where
A: Allocator,
{
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
vec_write_all(&mut self.pos, &mut self.inner, buf)
}
fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
vec_write_all_vectored(&mut self.pos, &mut self.inner, bufs)
}
#[inline]
fn is_write_vectored(&self) -> bool {
true
}
fn write_all(&mut self, buf: &[u8]) -> io::Result<()> {
vec_write_all(&mut self.pos, &mut self.inner, buf)?;
Ok(())
}
fn write_all_vectored(&mut self, bufs: &mut [IoSlice<'_>]) -> io::Result<()> {
vec_write_all_vectored(&mut self.pos, &mut self.inner, bufs)?;
Ok(())
}
#[inline]
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
#[stable(feature = "cursor_box_slice", since = "1.5.0")]
impl<A> Write for Cursor<Box<[u8], A>>
where
A: Allocator,
{
#[inline]
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
slice_write(&mut self.pos, &mut self.inner, buf)
}
#[inline]
fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
slice_write_vectored(&mut self.pos, &mut self.inner, bufs)
}
#[inline]
fn is_write_vectored(&self) -> bool {
true
}
#[inline]
fn write_all(&mut self, buf: &[u8]) -> io::Result<()> {
slice_write_all(&mut self.pos, &mut self.inner, buf)
}
#[inline]
fn write_all_vectored(&mut self, bufs: &mut [IoSlice<'_>]) -> io::Result<()> {
slice_write_all_vectored(&mut self.pos, &mut self.inner, bufs)
}
#[inline]
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
#[stable(feature = "cursor_array", since = "1.61.0")]
impl<const N: usize> Write for Cursor<[u8; N]> {
#[inline]
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
slice_write(&mut self.pos, &mut self.inner, buf)
}
#[inline]
fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
slice_write_vectored(&mut self.pos, &mut self.inner, bufs)
}
#[inline]
fn is_write_vectored(&self) -> bool {
true
}
#[inline]
fn write_all(&mut self, buf: &[u8]) -> io::Result<()> {
slice_write_all(&mut self.pos, &mut self.inner, buf)
}
#[inline]
fn write_all_vectored(&mut self, bufs: &mut [IoSlice<'_>]) -> io::Result<()> {
slice_write_all_vectored(&mut self.pos, &mut self.inner, bufs)
}
#[inline]
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}

View File

@@ -1,567 +0,0 @@
use crate::io::prelude::*;
use crate::io::{Cursor, IoSlice, IoSliceMut, SeekFrom};
#[test]
fn test_vec_writer() {
let mut writer = Vec::new();
assert_eq!(writer.write(&[0]).unwrap(), 1);
assert_eq!(writer.write(&[1, 2, 3]).unwrap(), 3);
assert_eq!(writer.write(&[4, 5, 6, 7]).unwrap(), 4);
assert_eq!(
writer
.write_vectored(&[IoSlice::new(&[]), IoSlice::new(&[8, 9]), IoSlice::new(&[10])],)
.unwrap(),
3
);
let b: &[_] = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
assert_eq!(writer, b);
}
#[test]
fn test_mem_writer() {
let mut writer = Cursor::new(Vec::new());
writer.set_position(10);
assert_eq!(writer.write(&[0]).unwrap(), 1);
assert_eq!(writer.write(&[1, 2, 3]).unwrap(), 3);
assert_eq!(writer.write(&[4, 5, 6, 7]).unwrap(), 4);
assert_eq!(
writer
.write_vectored(&[IoSlice::new(&[]), IoSlice::new(&[8, 9]), IoSlice::new(&[10])],)
.unwrap(),
3
);
let b: &[_] = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
assert_eq!(&writer.get_ref()[..10], &[0; 10]);
assert_eq!(&writer.get_ref()[10..], b);
}
#[test]
fn test_mem_writer_preallocated() {
let mut writer = Cursor::new(vec![0, 0, 0, 0, 0, 0, 0, 0, 8, 9, 10]);
assert_eq!(writer.write(&[0]).unwrap(), 1);
assert_eq!(writer.write(&[1, 2, 3]).unwrap(), 3);
assert_eq!(writer.write(&[4, 5, 6, 7]).unwrap(), 4);
let b: &[_] = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
assert_eq!(&writer.get_ref()[..], b);
}
#[test]
fn test_mem_mut_writer() {
let mut vec = Vec::new();
let mut writer = Cursor::new(&mut vec);
assert_eq!(writer.write(&[0]).unwrap(), 1);
assert_eq!(writer.write(&[1, 2, 3]).unwrap(), 3);
assert_eq!(writer.write(&[4, 5, 6, 7]).unwrap(), 4);
assert_eq!(
writer
.write_vectored(&[IoSlice::new(&[]), IoSlice::new(&[8, 9]), IoSlice::new(&[10])],)
.unwrap(),
3
);
let b: &[_] = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
assert_eq!(&writer.get_ref()[..], b);
}
fn test_slice_writer<T>(writer: &mut Cursor<T>)
where
T: AsRef<[u8]>,
Cursor<T>: Write,
{
assert_eq!(writer.position(), 0);
assert_eq!(writer.write(&[0]).unwrap(), 1);
assert_eq!(writer.position(), 1);
assert_eq!(writer.write(&[1, 2, 3]).unwrap(), 3);
assert_eq!(writer.write(&[4, 5, 6, 7]).unwrap(), 4);
assert_eq!(writer.position(), 8);
assert_eq!(writer.write(&[]).unwrap(), 0);
assert_eq!(writer.position(), 8);
assert_eq!(writer.write(&[8, 9]).unwrap(), 1);
assert_eq!(writer.write(&[10]).unwrap(), 0);
let b: &[_] = &[0, 1, 2, 3, 4, 5, 6, 7, 8];
assert_eq!(writer.get_ref().as_ref(), b);
}
fn test_slice_writer_vectored<T>(writer: &mut Cursor<T>)
where
T: AsRef<[u8]>,
Cursor<T>: Write,
{
assert_eq!(writer.position(), 0);
assert_eq!(writer.write_vectored(&[IoSlice::new(&[0])]).unwrap(), 1);
assert_eq!(writer.position(), 1);
assert_eq!(
writer.write_vectored(&[IoSlice::new(&[1, 2, 3]), IoSlice::new(&[4, 5, 6, 7]),]).unwrap(),
7,
);
assert_eq!(writer.position(), 8);
assert_eq!(writer.write_vectored(&[]).unwrap(), 0);
assert_eq!(writer.position(), 8);
assert_eq!(writer.write_vectored(&[IoSlice::new(&[8, 9])]).unwrap(), 1);
assert_eq!(writer.write_vectored(&[IoSlice::new(&[10])]).unwrap(), 0);
let b: &[_] = &[0, 1, 2, 3, 4, 5, 6, 7, 8];
assert_eq!(writer.get_ref().as_ref(), b);
}
#[test]
fn test_box_slice_writer() {
let mut writer = Cursor::new(vec![0u8; 9].into_boxed_slice());
test_slice_writer(&mut writer);
}
#[test]
fn test_box_slice_writer_vectored() {
let mut writer = Cursor::new(vec![0u8; 9].into_boxed_slice());
test_slice_writer_vectored(&mut writer);
}
#[test]
fn test_array_writer() {
let mut writer = Cursor::new([0u8; 9]);
test_slice_writer(&mut writer);
}
#[test]
fn test_array_writer_vectored() {
let mut writer = Cursor::new([0u8; 9]);
test_slice_writer_vectored(&mut writer);
}
#[test]
fn test_buf_writer() {
let mut buf = [0 as u8; 9];
let mut writer = Cursor::new(&mut buf[..]);
test_slice_writer(&mut writer);
}
#[test]
fn test_buf_writer_vectored() {
let mut buf = [0 as u8; 9];
let mut writer = Cursor::new(&mut buf[..]);
test_slice_writer_vectored(&mut writer);
}
#[test]
fn test_buf_writer_seek() {
let mut buf = [0 as u8; 8];
{
let mut writer = Cursor::new(&mut buf[..]);
assert_eq!(writer.position(), 0);
assert_eq!(writer.write(&[1]).unwrap(), 1);
assert_eq!(writer.position(), 1);
assert_eq!(writer.seek(SeekFrom::Start(2)).unwrap(), 2);
assert_eq!(writer.position(), 2);
assert_eq!(writer.write(&[2]).unwrap(), 1);
assert_eq!(writer.position(), 3);
assert_eq!(writer.seek(SeekFrom::Current(-2)).unwrap(), 1);
assert_eq!(writer.position(), 1);
assert_eq!(writer.write(&[3]).unwrap(), 1);
assert_eq!(writer.position(), 2);
assert_eq!(writer.seek(SeekFrom::End(-1)).unwrap(), 7);
assert_eq!(writer.position(), 7);
assert_eq!(writer.write(&[4]).unwrap(), 1);
assert_eq!(writer.position(), 8);
}
let b: &[_] = &[1, 3, 2, 0, 0, 0, 0, 4];
assert_eq!(buf, b);
}
#[test]
fn test_buf_writer_error() {
let mut buf = [0 as u8; 2];
let mut writer = Cursor::new(&mut buf[..]);
assert_eq!(writer.write(&[0]).unwrap(), 1);
assert_eq!(writer.write(&[0, 0]).unwrap(), 1);
assert_eq!(writer.write(&[0, 0]).unwrap(), 0);
}
#[test]
fn test_mem_reader() {
let mut reader = Cursor::new(vec![0, 1, 2, 3, 4, 5, 6, 7]);
let mut buf = [];
assert_eq!(reader.read(&mut buf).unwrap(), 0);
assert_eq!(reader.position(), 0);
let mut buf = [0];
assert_eq!(reader.read(&mut buf).unwrap(), 1);
assert_eq!(reader.position(), 1);
let b: &[_] = &[0];
assert_eq!(buf, b);
let mut buf = [0; 4];
assert_eq!(reader.read(&mut buf).unwrap(), 4);
assert_eq!(reader.position(), 5);
let b: &[_] = &[1, 2, 3, 4];
assert_eq!(buf, b);
assert_eq!(reader.read(&mut buf).unwrap(), 3);
let b: &[_] = &[5, 6, 7];
assert_eq!(&buf[..3], b);
assert_eq!(reader.read(&mut buf).unwrap(), 0);
}
#[test]
fn test_mem_reader_vectored() {
let mut reader = Cursor::new(vec![0, 1, 2, 3, 4, 5, 6, 7]);
let mut buf = [];
assert_eq!(reader.read_vectored(&mut [IoSliceMut::new(&mut buf)]).unwrap(), 0);
assert_eq!(reader.position(), 0);
let mut buf = [0];
assert_eq!(
reader.read_vectored(&mut [IoSliceMut::new(&mut []), IoSliceMut::new(&mut buf),]).unwrap(),
1,
);
assert_eq!(reader.position(), 1);
let b: &[_] = &[0];
assert_eq!(buf, b);
let mut buf1 = [0; 4];
let mut buf2 = [0; 4];
assert_eq!(
reader
.read_vectored(&mut [IoSliceMut::new(&mut buf1), IoSliceMut::new(&mut buf2),])
.unwrap(),
7,
);
let b1: &[_] = &[1, 2, 3, 4];
let b2: &[_] = &[5, 6, 7];
assert_eq!(buf1, b1);
assert_eq!(&buf2[..3], b2);
assert_eq!(reader.read(&mut buf).unwrap(), 0);
}
#[test]
fn test_boxed_slice_reader() {
let mut reader = Cursor::new(vec![0, 1, 2, 3, 4, 5, 6, 7].into_boxed_slice());
let mut buf = [];
assert_eq!(reader.read(&mut buf).unwrap(), 0);
assert_eq!(reader.position(), 0);
let mut buf = [0];
assert_eq!(reader.read(&mut buf).unwrap(), 1);
assert_eq!(reader.position(), 1);
let b: &[_] = &[0];
assert_eq!(buf, b);
let mut buf = [0; 4];
assert_eq!(reader.read(&mut buf).unwrap(), 4);
assert_eq!(reader.position(), 5);
let b: &[_] = &[1, 2, 3, 4];
assert_eq!(buf, b);
assert_eq!(reader.read(&mut buf).unwrap(), 3);
let b: &[_] = &[5, 6, 7];
assert_eq!(&buf[..3], b);
assert_eq!(reader.read(&mut buf).unwrap(), 0);
}
#[test]
fn test_boxed_slice_reader_vectored() {
let mut reader = Cursor::new(vec![0, 1, 2, 3, 4, 5, 6, 7].into_boxed_slice());
let mut buf = [];
assert_eq!(reader.read_vectored(&mut [IoSliceMut::new(&mut buf)]).unwrap(), 0);
assert_eq!(reader.position(), 0);
let mut buf = [0];
assert_eq!(
reader.read_vectored(&mut [IoSliceMut::new(&mut []), IoSliceMut::new(&mut buf),]).unwrap(),
1,
);
assert_eq!(reader.position(), 1);
let b: &[_] = &[0];
assert_eq!(buf, b);
let mut buf1 = [0; 4];
let mut buf2 = [0; 4];
assert_eq!(
reader
.read_vectored(&mut [IoSliceMut::new(&mut buf1), IoSliceMut::new(&mut buf2)],)
.unwrap(),
7,
);
let b1: &[_] = &[1, 2, 3, 4];
let b2: &[_] = &[5, 6, 7];
assert_eq!(buf1, b1);
assert_eq!(&buf2[..3], b2);
assert_eq!(reader.read(&mut buf).unwrap(), 0);
}
#[test]
fn read_to_end() {
let mut reader = Cursor::new(vec![0, 1, 2, 3, 4, 5, 6, 7]);
let mut v = Vec::new();
reader.read_to_end(&mut v).unwrap();
assert_eq!(v, [0, 1, 2, 3, 4, 5, 6, 7]);
}
#[test]
fn test_slice_reader() {
let in_buf = vec![0, 1, 2, 3, 4, 5, 6, 7];
let reader = &mut &in_buf[..];
let mut buf = [];
assert_eq!(reader.read(&mut buf).unwrap(), 0);
let mut buf = [0];
assert_eq!(reader.read(&mut buf).unwrap(), 1);
assert_eq!(reader.len(), 7);
let b: &[_] = &[0];
assert_eq!(&buf[..], b);
let mut buf = [0; 4];
assert_eq!(reader.read(&mut buf).unwrap(), 4);
assert_eq!(reader.len(), 3);
let b: &[_] = &[1, 2, 3, 4];
assert_eq!(&buf[..], b);
assert_eq!(reader.read(&mut buf).unwrap(), 3);
let b: &[_] = &[5, 6, 7];
assert_eq!(&buf[..3], b);
assert_eq!(reader.read(&mut buf).unwrap(), 0);
}
#[test]
fn test_slice_reader_vectored() {
let in_buf = vec![0, 1, 2, 3, 4, 5, 6, 7];
let reader = &mut &in_buf[..];
let mut buf = [];
assert_eq!(reader.read_vectored(&mut [IoSliceMut::new(&mut buf)]).unwrap(), 0);
let mut buf = [0];
assert_eq!(
reader.read_vectored(&mut [IoSliceMut::new(&mut []), IoSliceMut::new(&mut buf),]).unwrap(),
1,
);
assert_eq!(reader.len(), 7);
let b: &[_] = &[0];
assert_eq!(buf, b);
let mut buf1 = [0; 4];
let mut buf2 = [0; 4];
assert_eq!(
reader
.read_vectored(&mut [IoSliceMut::new(&mut buf1), IoSliceMut::new(&mut buf2)],)
.unwrap(),
7,
);
let b1: &[_] = &[1, 2, 3, 4];
let b2: &[_] = &[5, 6, 7];
assert_eq!(buf1, b1);
assert_eq!(&buf2[..3], b2);
assert_eq!(reader.read(&mut buf).unwrap(), 0);
}
#[test]
fn test_read_exact() {
let in_buf = vec![0, 1, 2, 3, 4, 5, 6, 7];
let reader = &mut &in_buf[..];
let mut buf = [];
assert!(reader.read_exact(&mut buf).is_ok());
let mut buf = [8];
assert!(reader.read_exact(&mut buf).is_ok());
assert_eq!(buf[0], 0);
assert_eq!(reader.len(), 7);
let mut buf = [0, 0, 0, 0, 0, 0, 0];
assert!(reader.read_exact(&mut buf).is_ok());
assert_eq!(buf, [1, 2, 3, 4, 5, 6, 7]);
assert_eq!(reader.len(), 0);
let mut buf = [0];
assert!(reader.read_exact(&mut buf).is_err());
}
#[test]
fn test_buf_reader() {
let in_buf = vec![0, 1, 2, 3, 4, 5, 6, 7];
let mut reader = Cursor::new(&in_buf[..]);
let mut buf = [];
assert_eq!(reader.read(&mut buf).unwrap(), 0);
assert_eq!(reader.position(), 0);
let mut buf = [0];
assert_eq!(reader.read(&mut buf).unwrap(), 1);
assert_eq!(reader.position(), 1);
let b: &[_] = &[0];
assert_eq!(buf, b);
let mut buf = [0; 4];
assert_eq!(reader.read(&mut buf).unwrap(), 4);
assert_eq!(reader.position(), 5);
let b: &[_] = &[1, 2, 3, 4];
assert_eq!(buf, b);
assert_eq!(reader.read(&mut buf).unwrap(), 3);
let b: &[_] = &[5, 6, 7];
assert_eq!(&buf[..3], b);
assert_eq!(reader.read(&mut buf).unwrap(), 0);
}
#[test]
fn seek_past_end() {
let buf = [0xff];
let mut r = Cursor::new(&buf[..]);
assert_eq!(r.seek(SeekFrom::Start(10)).unwrap(), 10);
assert_eq!(r.read(&mut [0]).unwrap(), 0);
let mut r = Cursor::new(vec![10]);
assert_eq!(r.seek(SeekFrom::Start(10)).unwrap(), 10);
assert_eq!(r.read(&mut [0]).unwrap(), 0);
let mut buf = [0];
let mut r = Cursor::new(&mut buf[..]);
assert_eq!(r.seek(SeekFrom::Start(10)).unwrap(), 10);
assert_eq!(r.write(&[3]).unwrap(), 0);
let mut r = Cursor::new(vec![10].into_boxed_slice());
assert_eq!(r.seek(SeekFrom::Start(10)).unwrap(), 10);
assert_eq!(r.write(&[3]).unwrap(), 0);
}
#[test]
fn seek_past_i64() {
let buf = [0xff];
let mut r = Cursor::new(&buf[..]);
assert_eq!(r.seek(SeekFrom::Start(6)).unwrap(), 6);
assert_eq!(r.seek(SeekFrom::Current(0x7ffffffffffffff0)).unwrap(), 0x7ffffffffffffff6);
assert_eq!(r.seek(SeekFrom::Current(0x10)).unwrap(), 0x8000000000000006);
assert_eq!(r.seek(SeekFrom::Current(0)).unwrap(), 0x8000000000000006);
assert!(r.seek(SeekFrom::Current(0x7ffffffffffffffd)).is_err());
assert_eq!(r.seek(SeekFrom::Current(-0x8000000000000000)).unwrap(), 6);
let mut r = Cursor::new(vec![10]);
assert_eq!(r.seek(SeekFrom::Start(6)).unwrap(), 6);
assert_eq!(r.seek(SeekFrom::Current(0x7ffffffffffffff0)).unwrap(), 0x7ffffffffffffff6);
assert_eq!(r.seek(SeekFrom::Current(0x10)).unwrap(), 0x8000000000000006);
assert_eq!(r.seek(SeekFrom::Current(0)).unwrap(), 0x8000000000000006);
assert!(r.seek(SeekFrom::Current(0x7ffffffffffffffd)).is_err());
assert_eq!(r.seek(SeekFrom::Current(-0x8000000000000000)).unwrap(), 6);
let mut buf = [0];
let mut r = Cursor::new(&mut buf[..]);
assert_eq!(r.seek(SeekFrom::Start(6)).unwrap(), 6);
assert_eq!(r.seek(SeekFrom::Current(0x7ffffffffffffff0)).unwrap(), 0x7ffffffffffffff6);
assert_eq!(r.seek(SeekFrom::Current(0x10)).unwrap(), 0x8000000000000006);
assert_eq!(r.seek(SeekFrom::Current(0)).unwrap(), 0x8000000000000006);
assert!(r.seek(SeekFrom::Current(0x7ffffffffffffffd)).is_err());
assert_eq!(r.seek(SeekFrom::Current(-0x8000000000000000)).unwrap(), 6);
let mut r = Cursor::new(vec![10].into_boxed_slice());
assert_eq!(r.seek(SeekFrom::Start(6)).unwrap(), 6);
assert_eq!(r.seek(SeekFrom::Current(0x7ffffffffffffff0)).unwrap(), 0x7ffffffffffffff6);
assert_eq!(r.seek(SeekFrom::Current(0x10)).unwrap(), 0x8000000000000006);
assert_eq!(r.seek(SeekFrom::Current(0)).unwrap(), 0x8000000000000006);
assert!(r.seek(SeekFrom::Current(0x7ffffffffffffffd)).is_err());
assert_eq!(r.seek(SeekFrom::Current(-0x8000000000000000)).unwrap(), 6);
}
#[test]
fn seek_before_0() {
let buf = [0xff];
let mut r = Cursor::new(&buf[..]);
assert!(r.seek(SeekFrom::End(-2)).is_err());
let mut r = Cursor::new(vec![10]);
assert!(r.seek(SeekFrom::End(-2)).is_err());
let mut buf = [0];
let mut r = Cursor::new(&mut buf[..]);
assert!(r.seek(SeekFrom::End(-2)).is_err());
let mut r = Cursor::new(vec![10].into_boxed_slice());
assert!(r.seek(SeekFrom::End(-2)).is_err());
}
#[test]
fn test_seekable_mem_writer() {
let mut writer = Cursor::new(Vec::<u8>::new());
assert_eq!(writer.position(), 0);
assert_eq!(writer.write(&[0]).unwrap(), 1);
assert_eq!(writer.position(), 1);
assert_eq!(writer.write(&[1, 2, 3]).unwrap(), 3);
assert_eq!(writer.write(&[4, 5, 6, 7]).unwrap(), 4);
assert_eq!(writer.position(), 8);
let b: &[_] = &[0, 1, 2, 3, 4, 5, 6, 7];
assert_eq!(&writer.get_ref()[..], b);
assert_eq!(writer.seek(SeekFrom::Start(0)).unwrap(), 0);
assert_eq!(writer.position(), 0);
assert_eq!(writer.write(&[3, 4]).unwrap(), 2);
let b: &[_] = &[3, 4, 2, 3, 4, 5, 6, 7];
assert_eq!(&writer.get_ref()[..], b);
assert_eq!(writer.seek(SeekFrom::Current(1)).unwrap(), 3);
assert_eq!(writer.write(&[0, 1]).unwrap(), 2);
let b: &[_] = &[3, 4, 2, 0, 1, 5, 6, 7];
assert_eq!(&writer.get_ref()[..], b);
assert_eq!(writer.seek(SeekFrom::End(-1)).unwrap(), 7);
assert_eq!(writer.write(&[1, 2]).unwrap(), 2);
let b: &[_] = &[3, 4, 2, 0, 1, 5, 6, 1, 2];
assert_eq!(&writer.get_ref()[..], b);
assert_eq!(writer.seek(SeekFrom::End(1)).unwrap(), 10);
assert_eq!(writer.write(&[1]).unwrap(), 1);
let b: &[_] = &[3, 4, 2, 0, 1, 5, 6, 1, 2, 0, 1];
assert_eq!(&writer.get_ref()[..], b);
}
#[test]
fn vec_seek_past_end() {
let mut r = Cursor::new(Vec::new());
assert_eq!(r.seek(SeekFrom::Start(10)).unwrap(), 10);
assert_eq!(r.write(&[3]).unwrap(), 1);
}
#[test]
fn vec_seek_before_0() {
let mut r = Cursor::new(Vec::new());
assert!(r.seek(SeekFrom::End(-2)).is_err());
}
#[test]
#[cfg(target_pointer_width = "32")]
fn vec_seek_and_write_past_usize_max() {
let mut c = Cursor::new(Vec::new());
c.set_position(usize::MAX as u64 + 1);
assert!(c.write_all(&[1, 2, 3]).is_err());
}
#[test]
fn test_partial_eq() {
assert_eq!(Cursor::new(Vec::<u8>::new()), Cursor::new(Vec::<u8>::new()));
}
#[test]
fn test_eq() {
struct AssertEq<T: Eq>(pub T);
let _: AssertEq<Cursor<Vec<u8>>> = AssertEq(Cursor::new(Vec::new()));
}
#[allow(dead_code)]
fn const_cursor() {
const CURSOR: Cursor<&[u8]> = Cursor::new(&[0]);
const _: &&[u8] = CURSOR.get_ref();
const _: u64 = CURSOR.position();
}
#[bench]
fn bench_write_vec(b: &mut test::Bencher) {
let slice = &[1; 128];
b.iter(|| {
let mut buf = b"some random data to overwrite".to_vec();
let mut cursor = Cursor::new(&mut buf);
let _ = cursor.write_all(slice);
test::black_box(&cursor);
})
}
#[bench]
fn bench_write_vec_vectored(b: &mut test::Bencher) {
let slices = [
IoSlice::new(&[1; 128]),
IoSlice::new(&[2; 256]),
IoSlice::new(&[3; 512]),
IoSlice::new(&[4; 1024]),
IoSlice::new(&[5; 2048]),
IoSlice::new(&[6; 4096]),
IoSlice::new(&[7; 8192]),
IoSlice::new(&[8; 8192 * 2]),
];
b.iter(|| {
let mut buf = b"some random data to overwrite".to_vec();
let mut cursor = Cursor::new(&mut buf);
let mut slices = slices;
let _ = cursor.write_all_vectored(&mut slices);
test::black_box(&cursor);
})
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,411 +0,0 @@
//! This is a densely packed error representation which is used on targets with
//! 64-bit pointers.
//!
//! (Note that `bitpacked` vs `unpacked` here has no relationship to
//! `#[repr(packed)]`, it just refers to attempting to use any available bits in
//! a more clever manner than `rustc`'s default layout algorithm would).
//!
//! Conceptually, it stores the same data as the "unpacked" equivalent we use on
//! other targets. Specifically, you can imagine it as an optimized version of
//! the following enum (which is roughly equivalent to what's stored by
//! `repr_unpacked::Repr`, e.g. `super::ErrorData<Box<Custom>>`):
//!
//! ```ignore (exposition-only)
//! enum ErrorData {
//! Os(i32),
//! Simple(ErrorKind),
//! SimpleMessage(&'static SimpleMessage),
//! Custom(Box<Custom>),
//! }
//! ```
//!
//! However, it packs this data into a 64bit non-zero value.
//!
//! This optimization not only allows `io::Error` to occupy a single pointer,
//! but improves `io::Result` as well, especially for situations like
//! `io::Result<()>` (which is now 64 bits) or `io::Result<u64>` (which is now
//! 128 bits), which are quite common.
//!
//! # Layout
//! Tagged values are 64 bits, with the 2 least significant bits used for the
//! tag. This means there are 4 "variants":
//!
//! - **Tag 0b00**: The first variant is equivalent to
//! `ErrorData::SimpleMessage`, and holds a `&'static SimpleMessage` directly.
//!
//! `SimpleMessage` has an alignment >= 4 (which is requested with
//! `#[repr(align)]` and checked statically at the bottom of this file), which
//! means every `&'static SimpleMessage` should have the both tag bits as 0,
//! meaning its tagged and untagged representation are equivalent.
//!
//! This means we can skip tagging it, which is necessary as this variant can
//! be constructed from a `const fn`, which probably cannot tag pointers (or
//! at least it would be difficult).
//!
//! - **Tag 0b01**: The other pointer variant holds the data for
//! `ErrorData::Custom` and the remaining 62 bits are used to store a
//! `Box<Custom>`. `Custom` also has alignment >= 4, so the bottom two bits
//! are free to use for the tag.
//!
//! The only important thing to note is that `ptr::wrapping_add` and
//! `ptr::wrapping_sub` are used to tag the pointer, rather than bitwise
//! operations. This should preserve the pointer's provenance, which would
//! otherwise be lost.
//!
//! - **Tag 0b10**: Holds the data for `ErrorData::Os(i32)`. We store the `i32`
//! in the pointer's most significant 32 bits, and don't use the bits `2..32`
//! for anything. Using the top 32 bits is just to let us easily recover the
//! `i32` code with the correct sign.
//!
//! - **Tag 0b11**: Holds the data for `ErrorData::Simple(ErrorKind)`. This
//! stores the `ErrorKind` in the top 32 bits as well, although it doesn't
//! occupy nearly that many. Most of the bits are unused here, but it's not
//! like we need them for anything else yet.
//!
//! # Use of `NonNull<()>`
//!
//! Everything is stored in a `NonNull<()>`, which is odd, but actually serves a
//! purpose.
//!
//! Conceptually you might think of this more like:
//!
//! ```ignore (exposition-only)
//! union Repr {
//! // holds integer (Simple/Os) variants, and
//! // provides access to the tag bits.
//! bits: NonZero<u64>,
//! // Tag is 0, so this is stored untagged.
//! msg: &'static SimpleMessage,
//! // Tagged (offset) `Box<Custom>` pointer.
//! tagged_custom: NonNull<()>,
//! }
//! ```
//!
//! But there are a few problems with this:
//!
//! 1. Union access is equivalent to a transmute, so this representation would
//! require we transmute between integers and pointers in at least one
//! direction, which may be UB (and even if not, it is likely harder for a
//! compiler to reason about than explicit ptr->int operations).
//!
//! 2. Even if all fields of a union have a niche, the union itself doesn't,
//! although this may change in the future. This would make things like
//! `io::Result<()>` and `io::Result<usize>` larger, which defeats part of
//! the motivation of this bitpacking.
//!
//! Storing everything in a `NonZero<usize>` (or some other integer) would be a
//! bit more traditional for pointer tagging, but it would lose provenance
//! information, couldn't be constructed from a `const fn`, and would probably
//! run into other issues as well.
//!
//! The `NonNull<()>` seems like the only alternative, even if it's fairly odd
//! to use a pointer type to store something that may hold an integer, some of
//! the time.
use core::marker::PhantomData;
use core::num::NonZeroUsize;
use core::ptr::NonNull;
use super::{Custom, ErrorData, ErrorKind, RawOsError, SimpleMessage};
// The 2 least-significant bits are used as tag.
const TAG_MASK: usize = 0b11;
const TAG_SIMPLE_MESSAGE: usize = 0b00;
const TAG_CUSTOM: usize = 0b01;
const TAG_OS: usize = 0b10;
const TAG_SIMPLE: usize = 0b11;
/// The internal representation.
///
/// See the module docs for more, this is just a way to hack in a check that we
/// indeed are not unwind-safe.
///
/// ```compile_fail,E0277
/// fn is_unwind_safe<T: core::panic::UnwindSafe>() {}
/// is_unwind_safe::<std::io::Error>();
/// ```
#[repr(transparent)]
#[rustc_insignificant_dtor]
pub(super) struct Repr(NonNull<()>, PhantomData<ErrorData<Box<Custom>>>);
// All the types `Repr` stores internally are Send + Sync, and so is it.
unsafe impl Send for Repr {}
unsafe impl Sync for Repr {}
impl Repr {
pub(super) fn new_custom(b: Box<Custom>) -> Self {
let p = Box::into_raw(b).cast::<u8>();
// Should only be possible if an allocator handed out a pointer with
// wrong alignment.
debug_assert_eq!(p.addr() & TAG_MASK, 0);
// Note: We know `TAG_CUSTOM <= size_of::<Custom>()` (static_assert at
// end of file), and both the start and end of the expression must be
// valid without address space wraparound due to `Box`'s semantics.
//
// This means it would be correct to implement this using `ptr::add`
// (rather than `ptr::wrapping_add`), but it's unclear this would give
// any benefit, so we just use `wrapping_add` instead.
let tagged = p.wrapping_add(TAG_CUSTOM).cast::<()>();
// Safety: `TAG_CUSTOM + p` is the same as `TAG_CUSTOM | p`,
// because `p`'s alignment means it isn't allowed to have any of the
// `TAG_BITS` set (you can verify that addition and bitwise-or are the
// same when the operands have no bits in common using a truth table).
//
// Then, `TAG_CUSTOM | p` is not zero, as that would require
// `TAG_CUSTOM` and `p` both be zero, and neither is (as `p` came from a
// box, and `TAG_CUSTOM` just... isn't zero -- it's `0b01`). Therefore,
// `TAG_CUSTOM + p` isn't zero and so `tagged` can't be, and the
// `new_unchecked` is safe.
let res = Self(unsafe { NonNull::new_unchecked(tagged) }, PhantomData);
// quickly smoke-check we encoded the right thing (This generally will
// only run in std's tests, unless the user uses -Zbuild-std)
debug_assert!(matches!(res.data(), ErrorData::Custom(_)), "repr(custom) encoding failed");
res
}
#[inline]
pub(super) fn new_os(code: RawOsError) -> Self {
let utagged = ((code as usize) << 32) | TAG_OS;
// Safety: `TAG_OS` is not zero, so the result of the `|` is not 0.
let res = Self(
NonNull::without_provenance(unsafe { NonZeroUsize::new_unchecked(utagged) }),
PhantomData,
);
// quickly smoke-check we encoded the right thing (This generally will
// only run in std's tests, unless the user uses -Zbuild-std)
debug_assert!(
matches!(res.data(), ErrorData::Os(c) if c == code),
"repr(os) encoding failed for {code}"
);
res
}
#[inline]
pub(super) fn new_simple(kind: ErrorKind) -> Self {
let utagged = ((kind as usize) << 32) | TAG_SIMPLE;
// Safety: `TAG_SIMPLE` is not zero, so the result of the `|` is not 0.
let res = Self(
NonNull::without_provenance(unsafe { NonZeroUsize::new_unchecked(utagged) }),
PhantomData,
);
// quickly smoke-check we encoded the right thing (This generally will
// only run in std's tests, unless the user uses -Zbuild-std)
debug_assert!(
matches!(res.data(), ErrorData::Simple(k) if k == kind),
"repr(simple) encoding failed {:?}",
kind,
);
res
}
#[inline]
pub(super) const fn new_simple_message(m: &'static SimpleMessage) -> Self {
// Safety: References are never null.
Self(unsafe { NonNull::new_unchecked(m as *const _ as *mut ()) }, PhantomData)
}
#[inline]
pub(super) fn data(&self) -> ErrorData<&Custom> {
// Safety: We're a Repr, decode_repr is fine.
unsafe { decode_repr(self.0, |c| &*c) }
}
#[inline]
pub(super) fn data_mut(&mut self) -> ErrorData<&mut Custom> {
// Safety: We're a Repr, decode_repr is fine.
unsafe { decode_repr(self.0, |c| &mut *c) }
}
#[inline]
pub(super) fn into_data(self) -> ErrorData<Box<Custom>> {
let this = core::mem::ManuallyDrop::new(self);
// Safety: We're a Repr, decode_repr is fine. The `Box::from_raw` is
// safe because we prevent double-drop using `ManuallyDrop`.
unsafe { decode_repr(this.0, |p| Box::from_raw(p)) }
}
}
impl Drop for Repr {
#[inline]
fn drop(&mut self) {
// Safety: We're a Repr, decode_repr is fine. The `Box::from_raw` is
// safe because we're being dropped.
unsafe {
let _ = decode_repr(self.0, |p| Box::<Custom>::from_raw(p));
}
}
}
// Shared helper to decode a `Repr`'s internal pointer into an ErrorData.
//
// Safety: `ptr`'s bits should be encoded as described in the document at the
// top (it should `some_repr.0`)
#[inline]
unsafe fn decode_repr<C, F>(ptr: NonNull<()>, make_custom: F) -> ErrorData<C>
where
F: FnOnce(*mut Custom) -> C,
{
let bits = ptr.as_ptr().addr();
match bits & TAG_MASK {
TAG_OS => {
let code = ((bits as i64) >> 32) as RawOsError;
ErrorData::Os(code)
}
TAG_SIMPLE => {
let kind_bits = (bits >> 32) as u32;
let kind = kind_from_prim(kind_bits).unwrap_or_else(|| {
debug_assert!(false, "Invalid io::error::Repr bits: `Repr({:#018x})`", bits);
// This means the `ptr` passed in was not valid, which violates
// the unsafe contract of `decode_repr`.
//
// Using this rather than unwrap meaningfully improves the code
// for callers which only care about one variant (usually
// `Custom`)
unsafe { core::hint::unreachable_unchecked() };
});
ErrorData::Simple(kind)
}
TAG_SIMPLE_MESSAGE => {
// SAFETY: per tag
unsafe { ErrorData::SimpleMessage(&*ptr.cast::<SimpleMessage>().as_ptr()) }
}
TAG_CUSTOM => {
// It would be correct for us to use `ptr::byte_sub` here (see the
// comment above the `wrapping_add` call in `new_custom` for why),
// but it isn't clear that it makes a difference, so we don't.
let custom = ptr.as_ptr().wrapping_byte_sub(TAG_CUSTOM).cast::<Custom>();
ErrorData::Custom(make_custom(custom))
}
_ => {
// Can't happen, and compiler can tell
unreachable!();
}
}
}
// This compiles to the same code as the check+transmute, but doesn't require
// unsafe, or to hard-code max ErrorKind or its size in a way the compiler
// couldn't verify.
#[inline]
fn kind_from_prim(ek: u32) -> Option<ErrorKind> {
macro_rules! from_prim {
($prim:expr => $Enum:ident { $($Variant:ident),* $(,)? }) => {{
// Force a compile error if the list gets out of date.
const _: fn(e: $Enum) = |e: $Enum| match e {
$($Enum::$Variant => ()),*
};
match $prim {
$(v if v == ($Enum::$Variant as _) => Some($Enum::$Variant),)*
_ => None,
}
}}
}
from_prim!(ek => ErrorKind {
NotFound,
PermissionDenied,
ConnectionRefused,
ConnectionReset,
HostUnreachable,
NetworkUnreachable,
ConnectionAborted,
NotConnected,
AddrInUse,
AddrNotAvailable,
NetworkDown,
BrokenPipe,
AlreadyExists,
WouldBlock,
NotADirectory,
IsADirectory,
DirectoryNotEmpty,
ReadOnlyFilesystem,
FilesystemLoop,
StaleNetworkFileHandle,
InvalidInput,
InvalidData,
TimedOut,
WriteZero,
StorageFull,
NotSeekable,
QuotaExceeded,
FileTooLarge,
ResourceBusy,
ExecutableFileBusy,
Deadlock,
CrossesDevices,
TooManyLinks,
InvalidFilename,
ArgumentListTooLong,
Interrupted,
Other,
UnexpectedEof,
Unsupported,
OutOfMemory,
InProgress,
Uncategorized,
})
}
// Some static checking to alert us if a change breaks any of the assumptions
// that our encoding relies on for correctness and soundness. (Some of these are
// a bit overly thorough/cautious, admittedly)
//
// If any of these are hit on a platform that std supports, we should likely
// just use `repr_unpacked.rs` there instead (unless the fix is easy).
macro_rules! static_assert {
($condition:expr) => {
const _: () = assert!($condition);
};
(@usize_eq: $lhs:expr, $rhs:expr) => {
const _: [(); $lhs] = [(); $rhs];
};
}
// The bitpacking we use requires pointers be exactly 64 bits.
static_assert!(@usize_eq: size_of::<NonNull<()>>(), 8);
// We also require pointers and usize be the same size.
static_assert!(@usize_eq: size_of::<NonNull<()>>(), size_of::<usize>());
// `Custom` and `SimpleMessage` need to be thin pointers.
static_assert!(@usize_eq: size_of::<&'static SimpleMessage>(), 8);
static_assert!(@usize_eq: size_of::<Box<Custom>>(), 8);
static_assert!((TAG_MASK + 1).is_power_of_two());
// And they must have sufficient alignment.
static_assert!(align_of::<SimpleMessage>() >= TAG_MASK + 1);
static_assert!(align_of::<Custom>() >= TAG_MASK + 1);
static_assert!(@usize_eq: TAG_MASK & TAG_SIMPLE_MESSAGE, TAG_SIMPLE_MESSAGE);
static_assert!(@usize_eq: TAG_MASK & TAG_CUSTOM, TAG_CUSTOM);
static_assert!(@usize_eq: TAG_MASK & TAG_OS, TAG_OS);
static_assert!(@usize_eq: TAG_MASK & TAG_SIMPLE, TAG_SIMPLE);
// This is obviously true (`TAG_CUSTOM` is `0b01`), but in `Repr::new_custom` we
// offset a pointer by this value, and expect it to both be within the same
// object, and to not wrap around the address space. See the comment in that
// function for further details.
//
// Actually, at the moment we use `ptr::wrapping_add`, not `ptr::add`, so this
// check isn't needed for that one, although the assertion that we don't
// actually wrap around in that wrapping_add does simplify the safety reasoning
// elsewhere considerably.
static_assert!(size_of::<Custom>() >= TAG_CUSTOM);
// These two store a payload which is allowed to be zero, so they must be
// non-zero to preserve the `NonNull`'s range invariant.
static_assert!(TAG_OS != 0);
static_assert!(TAG_SIMPLE != 0);
// We can't tag `SimpleMessage`s, the tag must be 0.
static_assert!(@usize_eq: TAG_SIMPLE_MESSAGE, 0);
// Check that the point of all of this still holds.
//
// We'd check against `io::Error`, but *technically* it's allowed to vary,
// as it's not `#[repr(transparent)]`/`#[repr(C)]`. We could add that, but
// the `#[repr()]` would show up in rustdoc, which might be seen as a stable
// commitment.
static_assert!(@usize_eq: size_of::<Repr>(), 8);
static_assert!(@usize_eq: size_of::<Option<Repr>>(), 8);
static_assert!(@usize_eq: size_of::<Result<(), Repr>>(), 8);
static_assert!(@usize_eq: size_of::<Result<usize, Repr>>(), 16);

View File

@@ -1,50 +0,0 @@
//! This is a fairly simple unpacked error representation that's used on
//! non-64bit targets, where the packed 64 bit representation wouldn't work, and
//! would have no benefit.
use super::{Custom, ErrorData, ErrorKind, RawOsError, SimpleMessage};
type Inner = ErrorData<Box<Custom>>;
pub(super) struct Repr(Inner);
impl Repr {
#[inline]
pub(super) fn new_custom(b: Box<Custom>) -> Self {
Self(Inner::Custom(b))
}
#[inline]
pub(super) fn new_os(code: RawOsError) -> Self {
Self(Inner::Os(code))
}
#[inline]
pub(super) fn new_simple(kind: ErrorKind) -> Self {
Self(Inner::Simple(kind))
}
#[inline]
pub(super) const fn new_simple_message(m: &'static SimpleMessage) -> Self {
Self(Inner::SimpleMessage(m))
}
#[inline]
pub(super) fn into_data(self) -> ErrorData<Box<Custom>> {
self.0
}
#[inline]
pub(super) fn data(&self) -> ErrorData<&Custom> {
match &self.0 {
Inner::Os(c) => ErrorData::Os(*c),
Inner::Simple(k) => ErrorData::Simple(*k),
Inner::SimpleMessage(m) => ErrorData::SimpleMessage(*m),
Inner::Custom(m) => ErrorData::Custom(&*m),
}
}
#[inline]
pub(super) fn data_mut(&mut self) -> ErrorData<&mut Custom> {
match &mut self.0 {
Inner::Os(c) => ErrorData::Os(*c),
Inner::Simple(k) => ErrorData::Simple(*k),
Inner::SimpleMessage(m) => ErrorData::SimpleMessage(*m),
Inner::Custom(m) => ErrorData::Custom(&mut *m),
}
}
}

View File

@@ -1,191 +0,0 @@
use super::{Custom, Error, ErrorData, ErrorKind, Repr, SimpleMessage, const_error};
use crate::sys::io::{decode_error_kind, error_string};
use crate::{assert_matches, error, fmt};
#[test]
fn test_size() {
assert!(size_of::<Error>() <= size_of::<[usize; 2]>());
}
#[test]
fn test_debug_error() {
let code = 6;
let msg = error_string(code);
let kind = decode_error_kind(code);
let err = Error {
repr: Repr::new_custom(Box::new(Custom {
kind: ErrorKind::InvalidInput,
error: Box::new(Error { repr: super::Repr::new_os(code) }),
})),
};
let expected = format!(
"Custom {{ \
kind: InvalidInput, \
error: Os {{ \
code: {:?}, \
kind: {:?}, \
message: {:?} \
}} \
}}",
code, kind, msg
);
assert_eq!(format!("{err:?}"), expected);
}
#[test]
fn test_downcasting() {
#[derive(Debug)]
struct TestError;
impl fmt::Display for TestError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str("asdf")
}
}
impl error::Error for TestError {}
// we have to call all of these UFCS style right now since method
// resolution won't implicitly drop the Send+Sync bounds
let mut err = Error::new(ErrorKind::Other, TestError);
assert!(err.get_ref().unwrap().is::<TestError>());
assert_eq!("asdf", err.get_ref().unwrap().to_string());
assert!(err.get_mut().unwrap().is::<TestError>());
let extracted = err.into_inner().unwrap();
extracted.downcast::<TestError>().unwrap();
}
#[test]
fn test_const() {
const E: Error = const_error!(ErrorKind::NotFound, "hello");
assert_eq!(E.kind(), ErrorKind::NotFound);
assert_eq!(E.to_string(), "hello");
assert!(format!("{E:?}").contains("\"hello\""));
assert!(format!("{E:?}").contains("NotFound"));
}
#[test]
fn test_os_packing() {
for code in -20..20 {
let e = Error::from_raw_os_error(code);
assert_eq!(e.raw_os_error(), Some(code));
assert_matches!(
e.repr.data(),
ErrorData::Os(c) if c == code,
);
}
}
#[test]
fn test_errorkind_packing() {
assert_eq!(Error::from(ErrorKind::NotFound).kind(), ErrorKind::NotFound);
assert_eq!(Error::from(ErrorKind::PermissionDenied).kind(), ErrorKind::PermissionDenied);
assert_eq!(Error::from(ErrorKind::Uncategorized).kind(), ErrorKind::Uncategorized);
// Check that the innards look like what we want.
assert_matches!(
Error::from(ErrorKind::OutOfMemory).repr.data(),
ErrorData::Simple(ErrorKind::OutOfMemory),
);
}
#[test]
fn test_simple_message_packing() {
use super::ErrorKind::*;
use super::SimpleMessage;
macro_rules! check_simple_msg {
($err:expr, $kind:ident, $msg:literal) => {{
let e = &$err;
// Check that the public api is right.
assert_eq!(e.kind(), $kind);
assert!(format!("{e:?}").contains($msg));
// and we got what we expected
assert_matches!(
e.repr.data(),
ErrorData::SimpleMessage(SimpleMessage { kind: $kind, message: $msg })
);
}};
}
let not_static = const_error!(Uncategorized, "not a constant!");
check_simple_msg!(not_static, Uncategorized, "not a constant!");
const CONST: Error = const_error!(NotFound, "definitely a constant!");
check_simple_msg!(CONST, NotFound, "definitely a constant!");
static STATIC: Error = const_error!(BrokenPipe, "a constant, sort of!");
check_simple_msg!(STATIC, BrokenPipe, "a constant, sort of!");
}
#[derive(Debug, PartialEq)]
struct Bojji(bool);
impl error::Error for Bojji {}
impl fmt::Display for Bojji {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "ah! {:?}", self)
}
}
#[test]
fn test_custom_error_packing() {
use super::Custom;
let test = Error::new(ErrorKind::Uncategorized, Bojji(true));
assert_matches!(
test.repr.data(),
ErrorData::Custom(Custom {
kind: ErrorKind::Uncategorized,
error,
}) if error.downcast_ref::<Bojji>().as_deref() == Some(&Bojji(true)),
);
}
#[derive(Debug)]
struct E;
impl fmt::Display for E {
fn fmt(&self, _f: &mut fmt::Formatter<'_>) -> fmt::Result {
Ok(())
}
}
impl error::Error for E {}
#[test]
fn test_std_io_error_downcast() {
// Case 1: custom error, downcast succeeds
let io_error = Error::new(ErrorKind::Other, Bojji(true));
let e: Bojji = io_error.downcast().unwrap();
assert!(e.0);
// Case 2: custom error, downcast fails
let io_error = Error::new(ErrorKind::Other, Bojji(true));
let io_error = io_error.downcast::<E>().unwrap_err();
// ensures that the custom error is intact
assert_eq!(ErrorKind::Other, io_error.kind());
let e: Bojji = io_error.downcast().unwrap();
assert!(e.0);
// Case 3: os error
let errno = 20;
let io_error = Error::from_raw_os_error(errno);
let io_error = io_error.downcast::<E>().unwrap_err();
assert_eq!(errno, io_error.raw_os_error().unwrap());
// Case 4: simple
let kind = ErrorKind::OutOfMemory;
let io_error: Error = kind.into();
let io_error = io_error.downcast::<E>().unwrap_err();
assert_eq!(kind, io_error.kind());
// Case 5: simple message
const SIMPLE_MESSAGE: SimpleMessage =
SimpleMessage { kind: ErrorKind::Other, message: "simple message error test" };
let io_error = Error::from_static_message(&SIMPLE_MESSAGE);
let io_error = io_error.downcast::<E>().unwrap_err();
assert_eq!(SIMPLE_MESSAGE.kind, io_error.kind());
assert_eq!(SIMPLE_MESSAGE.message, format!("{io_error}"));
}

View File

@@ -1,717 +0,0 @@
#[cfg(test)]
mod tests;
use crate::alloc::Allocator;
use alloc_crate::collections::VecDeque;
use crate::io::{self, BorrowedCursor, BufRead, IoSlice, IoSliceMut, Read, Seek, SeekFrom, Write};
use crate::{cmp, fmt, mem, str};
// =============================================================================
// Forwarding implementations
#[stable(feature = "rust1", since = "1.0.0")]
impl<R: Read + ?Sized> Read for &mut R {
#[inline]
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
(**self).read(buf)
}
#[inline]
fn read_buf(&mut self, cursor: BorrowedCursor<'_>) -> io::Result<()> {
(**self).read_buf(cursor)
}
#[inline]
fn read_vectored(&mut self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
(**self).read_vectored(bufs)
}
#[inline]
fn is_read_vectored(&self) -> bool {
(**self).is_read_vectored()
}
#[inline]
fn read_to_end(&mut self, buf: &mut Vec<u8>) -> io::Result<usize> {
(**self).read_to_end(buf)
}
#[inline]
fn read_to_string(&mut self, buf: &mut String) -> io::Result<usize> {
(**self).read_to_string(buf)
}
#[inline]
fn read_exact(&mut self, buf: &mut [u8]) -> io::Result<()> {
(**self).read_exact(buf)
}
#[inline]
fn read_buf_exact(&mut self, cursor: BorrowedCursor<'_>) -> io::Result<()> {
(**self).read_buf_exact(cursor)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<W: Write + ?Sized> Write for &mut W {
#[inline]
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
(**self).write(buf)
}
#[inline]
fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
(**self).write_vectored(bufs)
}
#[inline]
fn is_write_vectored(&self) -> bool {
(**self).is_write_vectored()
}
#[inline]
fn flush(&mut self) -> io::Result<()> {
(**self).flush()
}
#[inline]
fn write_all(&mut self, buf: &[u8]) -> io::Result<()> {
(**self).write_all(buf)
}
#[inline]
fn write_all_vectored(&mut self, bufs: &mut [IoSlice<'_>]) -> io::Result<()> {
(**self).write_all_vectored(bufs)
}
#[inline]
fn write_fmt(&mut self, fmt: fmt::Arguments<'_>) -> io::Result<()> {
(**self).write_fmt(fmt)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<S: Seek + ?Sized> Seek for &mut S {
#[inline]
fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
(**self).seek(pos)
}
#[inline]
fn rewind(&mut self) -> io::Result<()> {
(**self).rewind()
}
#[inline]
fn stream_len(&mut self) -> io::Result<u64> {
(**self).stream_len()
}
#[inline]
fn stream_position(&mut self) -> io::Result<u64> {
(**self).stream_position()
}
#[inline]
fn seek_relative(&mut self, offset: i64) -> io::Result<()> {
(**self).seek_relative(offset)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<B: BufRead + ?Sized> BufRead for &mut B {
#[inline]
fn fill_buf(&mut self) -> io::Result<&[u8]> {
(**self).fill_buf()
}
#[inline]
fn consume(&mut self, amt: usize) {
(**self).consume(amt)
}
#[inline]
fn has_data_left(&mut self) -> io::Result<bool> {
(**self).has_data_left()
}
#[inline]
fn read_until(&mut self, byte: u8, buf: &mut Vec<u8>) -> io::Result<usize> {
(**self).read_until(byte, buf)
}
#[inline]
fn skip_until(&mut self, byte: u8) -> io::Result<usize> {
(**self).skip_until(byte)
}
#[inline]
fn read_line(&mut self, buf: &mut String) -> io::Result<usize> {
(**self).read_line(buf)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<R: Read + ?Sized> Read for Box<R> {
#[inline]
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
(**self).read(buf)
}
#[inline]
fn read_buf(&mut self, cursor: BorrowedCursor<'_>) -> io::Result<()> {
(**self).read_buf(cursor)
}
#[inline]
fn read_vectored(&mut self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
(**self).read_vectored(bufs)
}
#[inline]
fn is_read_vectored(&self) -> bool {
(**self).is_read_vectored()
}
#[inline]
fn read_to_end(&mut self, buf: &mut Vec<u8>) -> io::Result<usize> {
(**self).read_to_end(buf)
}
#[inline]
fn read_to_string(&mut self, buf: &mut String) -> io::Result<usize> {
(**self).read_to_string(buf)
}
#[inline]
fn read_exact(&mut self, buf: &mut [u8]) -> io::Result<()> {
(**self).read_exact(buf)
}
#[inline]
fn read_buf_exact(&mut self, cursor: BorrowedCursor<'_>) -> io::Result<()> {
(**self).read_buf_exact(cursor)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<W: Write + ?Sized> Write for Box<W> {
#[inline]
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
(**self).write(buf)
}
#[inline]
fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
(**self).write_vectored(bufs)
}
#[inline]
fn is_write_vectored(&self) -> bool {
(**self).is_write_vectored()
}
#[inline]
fn flush(&mut self) -> io::Result<()> {
(**self).flush()
}
#[inline]
fn write_all(&mut self, buf: &[u8]) -> io::Result<()> {
(**self).write_all(buf)
}
#[inline]
fn write_all_vectored(&mut self, bufs: &mut [IoSlice<'_>]) -> io::Result<()> {
(**self).write_all_vectored(bufs)
}
#[inline]
fn write_fmt(&mut self, fmt: fmt::Arguments<'_>) -> io::Result<()> {
(**self).write_fmt(fmt)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<S: Seek + ?Sized> Seek for Box<S> {
#[inline]
fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
(**self).seek(pos)
}
#[inline]
fn rewind(&mut self) -> io::Result<()> {
(**self).rewind()
}
#[inline]
fn stream_len(&mut self) -> io::Result<u64> {
(**self).stream_len()
}
#[inline]
fn stream_position(&mut self) -> io::Result<u64> {
(**self).stream_position()
}
#[inline]
fn seek_relative(&mut self, offset: i64) -> io::Result<()> {
(**self).seek_relative(offset)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<B: BufRead + ?Sized> BufRead for Box<B> {
#[inline]
fn fill_buf(&mut self) -> io::Result<&[u8]> {
(**self).fill_buf()
}
#[inline]
fn consume(&mut self, amt: usize) {
(**self).consume(amt)
}
#[inline]
fn has_data_left(&mut self) -> io::Result<bool> {
(**self).has_data_left()
}
#[inline]
fn read_until(&mut self, byte: u8, buf: &mut Vec<u8>) -> io::Result<usize> {
(**self).read_until(byte, buf)
}
#[inline]
fn skip_until(&mut self, byte: u8) -> io::Result<usize> {
(**self).skip_until(byte)
}
#[inline]
fn read_line(&mut self, buf: &mut String) -> io::Result<usize> {
(**self).read_line(buf)
}
}
// =============================================================================
// In-memory buffer implementations
/// Read is implemented for `&[u8]` by copying from the slice.
///
/// Note that reading updates the slice to point to the yet unread part.
/// The slice will be empty when EOF is reached.
#[stable(feature = "rust1", since = "1.0.0")]
impl Read for &[u8] {
#[inline]
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
let amt = cmp::min(buf.len(), self.len());
let (a, b) = self.split_at(amt);
// First check if the amount of bytes we want to read is small:
// `copy_from_slice` will generally expand to a call to `memcpy`, and
// for a single byte the overhead is significant.
if amt == 1 {
buf[0] = a[0];
} else {
buf[..amt].copy_from_slice(a);
}
*self = b;
Ok(amt)
}
#[inline]
fn read_buf(&mut self, mut cursor: BorrowedCursor<'_>) -> io::Result<()> {
let amt = cmp::min(cursor.capacity(), self.len());
let (a, b) = self.split_at(amt);
cursor.append(a);
*self = b;
Ok(())
}
#[inline]
fn read_vectored(&mut self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
let mut nread = 0;
for buf in bufs {
nread += self.read(buf)?;
if self.is_empty() {
break;
}
}
Ok(nread)
}
#[inline]
fn is_read_vectored(&self) -> bool {
true
}
#[inline]
fn read_exact(&mut self, buf: &mut [u8]) -> io::Result<()> {
if buf.len() > self.len() {
// `read_exact` makes no promise about the content of `buf` if it
// fails so don't bother about that.
*self = &self[self.len()..];
return Err(io::Error::READ_EXACT_EOF);
}
let (a, b) = self.split_at(buf.len());
// First check if the amount of bytes we want to read is small:
// `copy_from_slice` will generally expand to a call to `memcpy`, and
// for a single byte the overhead is significant.
if buf.len() == 1 {
buf[0] = a[0];
} else {
buf.copy_from_slice(a);
}
*self = b;
Ok(())
}
#[inline]
fn read_buf_exact(&mut self, mut cursor: BorrowedCursor<'_>) -> io::Result<()> {
if cursor.capacity() > self.len() {
// Append everything we can to the cursor.
cursor.append(*self);
*self = &self[self.len()..];
return Err(io::Error::READ_EXACT_EOF);
}
let (a, b) = self.split_at(cursor.capacity());
cursor.append(a);
*self = b;
Ok(())
}
#[inline]
fn read_to_end(&mut self, buf: &mut Vec<u8>) -> io::Result<usize> {
let len = self.len();
buf.try_reserve(len)?;
buf.extend_from_slice(*self);
*self = &self[len..];
Ok(len)
}
#[inline]
fn read_to_string(&mut self, buf: &mut String) -> io::Result<usize> {
let content = str::from_utf8(self).map_err(|_| io::Error::INVALID_UTF8)?;
let len = self.len();
buf.try_reserve(len)?;
buf.push_str(content);
*self = &self[len..];
Ok(len)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl BufRead for &[u8] {
#[inline]
fn fill_buf(&mut self) -> io::Result<&[u8]> {
Ok(*self)
}
#[inline]
fn consume(&mut self, amt: usize) {
*self = &self[amt..];
}
}
/// Write is implemented for `&mut [u8]` by copying into the slice, overwriting
/// its data.
///
/// Note that writing updates the slice to point to the yet unwritten part.
/// The slice will be empty when it has been completely overwritten.
///
/// If the number of bytes to be written exceeds the size of the slice, write operations will
/// return short writes: ultimately, `Ok(0)`; in this situation, `write_all` returns an error of
/// kind `ErrorKind::WriteZero`.
#[stable(feature = "rust1", since = "1.0.0")]
impl Write for &mut [u8] {
#[inline]
fn write(&mut self, data: &[u8]) -> io::Result<usize> {
let amt = cmp::min(data.len(), self.len());
let (a, b) = mem::take(self).split_at_mut(amt);
a.copy_from_slice(&data[..amt]);
*self = b;
Ok(amt)
}
#[inline]
fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
let mut nwritten = 0;
for buf in bufs {
nwritten += self.write(buf)?;
if self.is_empty() {
break;
}
}
Ok(nwritten)
}
#[inline]
fn is_write_vectored(&self) -> bool {
true
}
#[inline]
fn write_all(&mut self, data: &[u8]) -> io::Result<()> {
if self.write(data)? < data.len() { Err(io::Error::WRITE_ALL_EOF) } else { Ok(()) }
}
#[inline]
fn write_all_vectored(&mut self, bufs: &mut [IoSlice<'_>]) -> io::Result<()> {
for buf in bufs {
if self.write(buf)? < buf.len() {
return Err(io::Error::WRITE_ALL_EOF);
}
}
Ok(())
}
#[inline]
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
/// Write is implemented for `Vec<u8>` by appending to the vector.
/// The vector will grow as needed.
#[stable(feature = "rust1", since = "1.0.0")]
impl<A: Allocator> Write for Vec<u8, A> {
#[inline]
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
self.extend_from_slice(buf);
Ok(buf.len())
}
#[inline]
fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
let len = bufs.iter().map(|b| b.len()).sum();
self.reserve(len);
for buf in bufs {
self.extend_from_slice(buf);
}
Ok(len)
}
#[inline]
fn is_write_vectored(&self) -> bool {
true
}
#[inline]
fn write_all(&mut self, buf: &[u8]) -> io::Result<()> {
self.extend_from_slice(buf);
Ok(())
}
#[inline]
fn write_all_vectored(&mut self, bufs: &mut [IoSlice<'_>]) -> io::Result<()> {
self.write_vectored(bufs)?;
Ok(())
}
#[inline]
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
/// Read is implemented for `VecDeque<u8>` by consuming bytes from the front of the `VecDeque`.
#[stable(feature = "vecdeque_read_write", since = "1.63.0")]
impl<A: Allocator> Read for VecDeque<u8, A> {
/// Fill `buf` with the contents of the "front" slice as returned by
/// [`as_slices`][`VecDeque::as_slices`]. If the contained byte slices of the `VecDeque` are
/// discontiguous, multiple calls to `read` will be needed to read the entire content.
#[inline]
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
let (ref mut front, _) = self.as_slices();
let n = Read::read(front, buf)?;
self.drain(..n);
Ok(n)
}
#[inline]
fn read_exact(&mut self, buf: &mut [u8]) -> io::Result<()> {
let (front, back) = self.as_slices();
// Use only the front buffer if it is big enough to fill `buf`, else use
// the back buffer too.
match buf.split_at_mut_checked(front.len()) {
None => buf.copy_from_slice(&front[..buf.len()]),
Some((buf_front, buf_back)) => match back.split_at_checked(buf_back.len()) {
Some((back, _)) => {
buf_front.copy_from_slice(front);
buf_back.copy_from_slice(back);
}
None => {
self.clear();
return Err(io::Error::READ_EXACT_EOF);
}
},
}
self.drain(..buf.len());
Ok(())
}
#[inline]
fn read_buf(&mut self, cursor: BorrowedCursor<'_>) -> io::Result<()> {
let (ref mut front, _) = self.as_slices();
let n = cmp::min(cursor.capacity(), front.len());
Read::read_buf(front, cursor)?;
self.drain(..n);
Ok(())
}
#[inline]
fn read_buf_exact(&mut self, mut cursor: BorrowedCursor<'_>) -> io::Result<()> {
let len = cursor.capacity();
let (front, back) = self.as_slices();
match front.split_at_checked(cursor.capacity()) {
Some((front, _)) => cursor.append(front),
None => {
cursor.append(front);
match back.split_at_checked(cursor.capacity()) {
Some((back, _)) => cursor.append(back),
None => {
cursor.append(back);
self.clear();
return Err(io::Error::READ_EXACT_EOF);
}
}
}
}
self.drain(..len);
Ok(())
}
#[inline]
fn read_to_end(&mut self, buf: &mut Vec<u8>) -> io::Result<usize> {
// The total len is known upfront so we can reserve it in a single call.
let len = self.len();
buf.try_reserve(len)?;
let (front, back) = self.as_slices();
buf.extend_from_slice(front);
buf.extend_from_slice(back);
self.clear();
Ok(len)
}
#[inline]
fn read_to_string(&mut self, buf: &mut String) -> io::Result<usize> {
// SAFETY: We only append to the buffer
unsafe { io::append_to_string(buf, |buf| self.read_to_end(buf)) }
}
}
/// BufRead is implemented for `VecDeque<u8>` by reading bytes from the front of the `VecDeque`.
#[stable(feature = "vecdeque_buf_read", since = "1.75.0")]
impl<A: Allocator> BufRead for VecDeque<u8, A> {
/// Returns the contents of the "front" slice as returned by
/// [`as_slices`][`VecDeque::as_slices`]. If the contained byte slices of the `VecDeque` are
/// discontiguous, multiple calls to `fill_buf` will be needed to read the entire content.
#[inline]
fn fill_buf(&mut self) -> io::Result<&[u8]> {
let (front, _) = self.as_slices();
Ok(front)
}
#[inline]
fn consume(&mut self, amt: usize) {
self.drain(..amt);
}
}
/// Write is implemented for `VecDeque<u8>` by appending to the `VecDeque`, growing it as needed.
#[stable(feature = "vecdeque_read_write", since = "1.63.0")]
impl<A: Allocator> Write for VecDeque<u8, A> {
#[inline]
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
self.extend(buf);
Ok(buf.len())
}
#[inline]
fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
let len = bufs.iter().map(|b| b.len()).sum();
self.reserve(len);
for buf in bufs {
self.extend(&**buf);
}
Ok(len)
}
#[inline]
fn is_write_vectored(&self) -> bool {
true
}
#[inline]
fn write_all(&mut self, buf: &[u8]) -> io::Result<()> {
self.extend(buf);
Ok(())
}
#[inline]
fn write_all_vectored(&mut self, bufs: &mut [IoSlice<'_>]) -> io::Result<()> {
self.write_vectored(bufs)?;
Ok(())
}
#[inline]
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
#[unstable(feature = "read_buf", issue = "78485")]
impl<'a> io::Write for core::io::BorrowedCursor<'a> {
#[inline]
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
let amt = cmp::min(buf.len(), self.capacity());
self.append(&buf[..amt]);
Ok(amt)
}
#[inline]
fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
let mut nwritten = 0;
for buf in bufs {
let n = self.write(buf)?;
nwritten += n;
if n < buf.len() {
break;
}
}
Ok(nwritten)
}
#[inline]
fn is_write_vectored(&self) -> bool {
true
}
#[inline]
fn write_all(&mut self, buf: &[u8]) -> io::Result<()> {
if self.write(buf)? < buf.len() { Err(io::Error::WRITE_ALL_EOF) } else { Ok(()) }
}
#[inline]
fn write_all_vectored(&mut self, bufs: &mut [IoSlice<'_>]) -> io::Result<()> {
for buf in bufs {
if self.write(buf)? < buf.len() {
return Err(io::Error::WRITE_ALL_EOF);
}
}
Ok(())
}
#[inline]
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}

View File

@@ -1,57 +0,0 @@
use crate::io::prelude::*;
#[bench]
fn bench_read_slice(b: &mut test::Bencher) {
let buf = [5; 1024];
let mut dst = [0; 128];
b.iter(|| {
let mut rd = &buf[..];
for _ in 0..8 {
let _ = rd.read(&mut dst);
test::black_box(&dst);
}
})
}
#[bench]
fn bench_write_slice(b: &mut test::Bencher) {
let mut buf = [0; 1024];
let src = [5; 128];
b.iter(|| {
let mut wr = &mut buf[..];
for _ in 0..8 {
let _ = wr.write_all(&src);
test::black_box(&wr);
}
})
}
#[bench]
fn bench_read_vec(b: &mut test::Bencher) {
let buf = vec![5; 1024];
let mut dst = [0; 128];
b.iter(|| {
let mut rd = &buf[..];
for _ in 0..8 {
let _ = rd.read(&mut dst);
test::black_box(&dst);
}
})
}
#[bench]
fn bench_write_vec(b: &mut test::Bencher) {
let mut buf = Vec::with_capacity(1024);
let src = [5; 128];
b.iter(|| {
let mut wr = &mut buf[..];
for _ in 0..8 {
let _ = wr.write_all(&src);
test::black_box(&wr);
}
})
}

Some files were not shown because too many files have changed in this diff Show More