chore: checkpoint before Python removal

This commit is contained in:
2026-03-26 22:33:59 +00:00
parent 683cec9307
commit e568ddf82a
29972 changed files with 11269302 additions and 2 deletions

20
vendor/allocator-api2/src/lib.rs vendored Normal file
View File

@@ -0,0 +1,20 @@
//!
//! allocator-api2 crate.
//!
#![cfg_attr(not(feature = "std"), no_std)]
#[cfg(feature = "alloc")]
extern crate alloc as alloc_crate;
#[cfg(not(feature = "nightly"))]
#[macro_use]
mod stable;
#[cfg(feature = "nightly")]
mod nightly;
#[cfg(not(feature = "nightly"))]
pub use self::stable::*;
#[cfg(feature = "nightly")]
pub use self::nightly::*;

5
vendor/allocator-api2/src/nightly.rs vendored Normal file
View File

@@ -0,0 +1,5 @@
#[cfg(not(feature = "alloc"))]
pub use core::alloc;
#[cfg(feature = "alloc")]
pub use alloc_crate::{alloc, boxed, vec, collections};

View File

@@ -0,0 +1,187 @@
use core::ptr::NonNull;
use alloc_crate::alloc::{alloc, alloc_zeroed, dealloc, realloc};
use crate::stable::{assume, invalid_mut};
use super::{AllocError, Allocator, Layout};
/// The global memory allocator.
///
/// This type implements the [`Allocator`] trait by forwarding calls
/// to the allocator registered with the `#[global_allocator]` attribute
/// if there is one, or the `std` crates default.
///
/// Note: while this type is unstable, the functionality it provides can be
/// accessed through the [free functions in `alloc`](crate#functions).
#[derive(Copy, Clone, Default, Debug)]
pub struct Global;
impl Global {
#[inline(always)]
fn alloc_impl(&self, layout: Layout, zeroed: bool) -> Result<NonNull<[u8]>, AllocError> {
match layout.size() {
0 => Ok(unsafe {
NonNull::new_unchecked(core::ptr::slice_from_raw_parts_mut(
invalid_mut(layout.align()),
0,
))
}),
// SAFETY: `layout` is non-zero in size,
size => unsafe {
let raw_ptr = if zeroed {
alloc_zeroed(layout)
} else {
alloc(layout)
};
let ptr = NonNull::new(raw_ptr).ok_or(AllocError)?;
Ok(NonNull::new_unchecked(core::ptr::slice_from_raw_parts_mut(
ptr.as_ptr(),
size,
)))
},
}
}
// SAFETY: Same as `Allocator::grow`
#[inline(always)]
unsafe fn grow_impl(
&self,
ptr: NonNull<u8>,
old_layout: Layout,
new_layout: Layout,
zeroed: bool,
) -> Result<NonNull<[u8]>, AllocError> {
debug_assert!(
new_layout.size() >= old_layout.size(),
"`new_layout.size()` must be greater than or equal to `old_layout.size()`"
);
match old_layout.size() {
0 => self.alloc_impl(new_layout, zeroed),
// SAFETY: `new_size` is non-zero as `old_size` is greater than or equal to `new_size`
// as required by safety conditions. Other conditions must be upheld by the caller
old_size if old_layout.align() == new_layout.align() => unsafe {
let new_size = new_layout.size();
// `realloc` probably checks for `new_size >= old_layout.size()` or something similar.
assume(new_size >= old_layout.size());
let raw_ptr = realloc(ptr.as_ptr(), old_layout, new_size);
let ptr = NonNull::new(raw_ptr).ok_or(AllocError)?;
if zeroed {
raw_ptr.add(old_size).write_bytes(0, new_size - old_size);
}
Ok(NonNull::new_unchecked(core::ptr::slice_from_raw_parts_mut(
ptr.as_ptr(),
new_size,
)))
},
// SAFETY: because `new_layout.size()` must be greater than or equal to `old_size`,
// both the old and new memory allocation are valid for reads and writes for `old_size`
// bytes. Also, because the old allocation wasn't yet deallocated, it cannot overlap
// `new_ptr`. Thus, the call to `copy_nonoverlapping` is safe. The safety contract
// for `dealloc` must be upheld by the caller.
old_size => unsafe {
let new_ptr = self.alloc_impl(new_layout, zeroed)?;
core::ptr::copy_nonoverlapping(ptr.as_ptr(), new_ptr.as_ptr().cast(), old_size);
self.deallocate(ptr, old_layout);
Ok(new_ptr)
},
}
}
}
unsafe impl Allocator for Global {
#[inline(always)]
fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
self.alloc_impl(layout, false)
}
#[inline(always)]
fn allocate_zeroed(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
self.alloc_impl(layout, true)
}
#[inline(always)]
unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) {
if layout.size() != 0 {
// SAFETY: `layout` is non-zero in size,
// other conditions must be upheld by the caller
unsafe { dealloc(ptr.as_ptr(), layout) }
}
}
#[inline(always)]
unsafe fn grow(
&self,
ptr: NonNull<u8>,
old_layout: Layout,
new_layout: Layout,
) -> Result<NonNull<[u8]>, AllocError> {
// SAFETY: all conditions must be upheld by the caller
unsafe { self.grow_impl(ptr, old_layout, new_layout, false) }
}
#[inline(always)]
unsafe fn grow_zeroed(
&self,
ptr: NonNull<u8>,
old_layout: Layout,
new_layout: Layout,
) -> Result<NonNull<[u8]>, AllocError> {
// SAFETY: all conditions must be upheld by the caller
unsafe { self.grow_impl(ptr, old_layout, new_layout, true) }
}
#[inline(always)]
unsafe fn shrink(
&self,
ptr: NonNull<u8>,
old_layout: Layout,
new_layout: Layout,
) -> Result<NonNull<[u8]>, AllocError> {
debug_assert!(
new_layout.size() <= old_layout.size(),
"`new_layout.size()` must be smaller than or equal to `old_layout.size()`"
);
match new_layout.size() {
// SAFETY: conditions must be upheld by the caller
0 => unsafe {
self.deallocate(ptr, old_layout);
Ok(NonNull::new_unchecked(core::ptr::slice_from_raw_parts_mut(
invalid_mut(new_layout.align()),
0,
)))
},
// SAFETY: `new_size` is non-zero. Other conditions must be upheld by the caller
new_size if old_layout.align() == new_layout.align() => unsafe {
// `realloc` probably checks for `new_size <= old_layout.size()` or something similar.
assume(new_size <= old_layout.size());
let raw_ptr = realloc(ptr.as_ptr(), old_layout, new_size);
let ptr = NonNull::new(raw_ptr).ok_or(AllocError)?;
Ok(NonNull::new_unchecked(core::ptr::slice_from_raw_parts_mut(
ptr.as_ptr(),
new_size,
)))
},
// SAFETY: because `new_size` must be smaller than or equal to `old_layout.size()`,
// both the old and new memory allocation are valid for reads and writes for `new_size`
// bytes. Also, because the old allocation wasn't yet deallocated, it cannot overlap
// `new_ptr`. Thus, the call to `copy_nonoverlapping` is safe. The safety contract
// for `dealloc` must be upheld by the caller.
new_size => unsafe {
let new_ptr = self.allocate(new_layout)?;
core::ptr::copy_nonoverlapping(ptr.as_ptr(), new_ptr.as_ptr().cast(), new_size);
self.deallocate(ptr, old_layout);
Ok(new_ptr)
},
}
}
}

View File

@@ -0,0 +1,416 @@
//! Memory allocation APIs
use core::{
fmt,
ptr::{self, NonNull},
};
#[cfg(feature = "alloc")]
mod global;
#[cfg(feature = "std")]
mod system;
pub use core::alloc::{GlobalAlloc, Layout, LayoutError};
#[cfg(feature = "alloc")]
pub use self::global::Global;
#[cfg(feature = "std")]
pub use self::system::System;
#[cfg(feature = "alloc")]
pub use alloc_crate::alloc::{alloc, alloc_zeroed, dealloc, realloc};
#[cfg(all(feature = "alloc", not(no_global_oom_handling)))]
pub use alloc_crate::alloc::handle_alloc_error;
/// The `AllocError` error indicates an allocation failure
/// that may be due to resource exhaustion or to
/// something wrong when combining the given input arguments with this
/// allocator.
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
pub struct AllocError;
#[cfg(feature = "std")]
impl std::error::Error for AllocError {}
// (we need this for downstream impl of trait Error)
impl fmt::Display for AllocError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str("memory allocation failed")
}
}
/// An implementation of `Allocator` can allocate, grow, shrink, and deallocate arbitrary blocks of
/// data described via [`Layout`][].
///
/// `Allocator` is designed to be implemented on ZSTs, references, or smart pointers because having
/// an allocator like `MyAlloc([u8; N])` cannot be moved, without updating the pointers to the
/// allocated memory.
///
/// Unlike [`GlobalAlloc`][], zero-sized allocations are allowed in `Allocator`. If an underlying
/// allocator does not support this (like jemalloc) or return a null pointer (such as
/// `libc::malloc`), this must be caught by the implementation.
///
/// ### Currently allocated memory
///
/// Some of the methods require that a memory block be *currently allocated* via an allocator. This
/// means that:
///
/// * the starting address for that memory block was previously returned by [`allocate`], [`grow`], or
/// [`shrink`], and
///
/// * the memory block has not been subsequently deallocated, where blocks are either deallocated
/// directly by being passed to [`deallocate`] or were changed by being passed to [`grow`] or
/// [`shrink`] that returns `Ok`. If `grow` or `shrink` have returned `Err`, the passed pointer
/// remains valid.
///
/// [`allocate`]: Allocator::allocate
/// [`grow`]: Allocator::grow
/// [`shrink`]: Allocator::shrink
/// [`deallocate`]: Allocator::deallocate
///
/// ### Memory fitting
///
/// Some of the methods require that a layout *fit* a memory block. What it means for a layout to
/// "fit" a memory block means (or equivalently, for a memory block to "fit" a layout) is that the
/// following conditions must hold:
///
/// * The block must be allocated with the same alignment as [`layout.align()`], and
///
/// * The provided [`layout.size()`] must fall in the range `min ..= max`, where:
/// - `min` is the size of the layout most recently used to allocate the block, and
/// - `max` is the latest actual size returned from [`allocate`], [`grow`], or [`shrink`].
///
/// [`layout.align()`]: Layout::align
/// [`layout.size()`]: Layout::size
///
/// # Safety
///
/// * Memory blocks returned from an allocator must point to valid memory and retain their validity
/// until the instance and all of its clones are dropped,
///
/// * cloning or moving the allocator must not invalidate memory blocks returned from this
/// allocator. A cloned allocator must behave like the same allocator, and
///
/// * any pointer to a memory block which is [*currently allocated*] may be passed to any other
/// method of the allocator.
///
/// [*currently allocated*]: #currently-allocated-memory
pub unsafe trait Allocator {
/// Attempts to allocate a block of memory.
///
/// On success, returns a [`NonNull<[u8]>`][NonNull] meeting the size and alignment guarantees of `layout`.
///
/// The returned block may have a larger size than specified by `layout.size()`, and may or may
/// not have its contents initialized.
///
/// # Errors
///
/// Returning `Err` indicates that either memory is exhausted or `layout` does not meet
/// allocator's size or alignment constraints.
///
/// Implementations are encouraged to return `Err` on memory exhaustion rather than panicking or
/// aborting, but this is not a strict requirement. (Specifically: it is *legal* to implement
/// this trait atop an underlying native allocation library that aborts on memory exhaustion.)
///
/// Clients wishing to abort computation in response to an allocation error are encouraged to
/// call the [`handle_alloc_error`] function, rather than directly invoking `panic!` or similar.
///
/// [`handle_alloc_error`]: ../../alloc/alloc/fn.handle_alloc_error.html
fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError>;
/// Behaves like `allocate`, but also ensures that the returned memory is zero-initialized.
///
/// # Errors
///
/// Returning `Err` indicates that either memory is exhausted or `layout` does not meet
/// allocator's size or alignment constraints.
///
/// Implementations are encouraged to return `Err` on memory exhaustion rather than panicking or
/// aborting, but this is not a strict requirement. (Specifically: it is *legal* to implement
/// this trait atop an underlying native allocation library that aborts on memory exhaustion.)
///
/// Clients wishing to abort computation in response to an allocation error are encouraged to
/// call the [`handle_alloc_error`] function, rather than directly invoking `panic!` or similar.
///
/// [`handle_alloc_error`]: ../../alloc/alloc/fn.handle_alloc_error.html
#[inline(always)]
fn allocate_zeroed(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
let ptr = self.allocate(layout)?;
// SAFETY: `alloc` returns a valid memory block
unsafe { ptr.cast::<u8>().as_ptr().write_bytes(0, ptr.len()) }
Ok(ptr)
}
/// Deallocates the memory referenced by `ptr`.
///
/// # Safety
///
/// * `ptr` must denote a block of memory [*currently allocated*] via this allocator, and
/// * `layout` must [*fit*] that block of memory.
///
/// [*currently allocated*]: #currently-allocated-memory
/// [*fit*]: #memory-fitting
unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout);
/// Attempts to extend the memory block.
///
/// Returns a new [`NonNull<[u8]>`][NonNull] containing a pointer and the actual size of the allocated
/// memory. The pointer is suitable for holding data described by `new_layout`. To accomplish
/// this, the allocator may extend the allocation referenced by `ptr` to fit the new layout.
///
/// If this returns `Ok`, then ownership of the memory block referenced by `ptr` has been
/// transferred to this allocator. Any access to the old `ptr` is Undefined Behavior, even if the
/// allocation was grown in-place. The newly returned pointer is the only valid pointer
/// for accessing this memory now.
///
/// If this method returns `Err`, then ownership of the memory block has not been transferred to
/// this allocator, and the contents of the memory block are unaltered.
///
/// # Safety
///
/// * `ptr` must denote a block of memory [*currently allocated*] via this allocator.
/// * `old_layout` must [*fit*] that block of memory (The `new_layout` argument need not fit it.).
/// * `new_layout.size()` must be greater than or equal to `old_layout.size()`.
///
/// Note that `new_layout.align()` need not be the same as `old_layout.align()`.
///
/// [*currently allocated*]: #currently-allocated-memory
/// [*fit*]: #memory-fitting
///
/// # Errors
///
/// Returns `Err` if the new layout does not meet the allocator's size and alignment
/// constraints of the allocator, or if growing otherwise fails.
///
/// Implementations are encouraged to return `Err` on memory exhaustion rather than panicking or
/// aborting, but this is not a strict requirement. (Specifically: it is *legal* to implement
/// this trait atop an underlying native allocation library that aborts on memory exhaustion.)
///
/// Clients wishing to abort computation in response to an allocation error are encouraged to
/// call the [`handle_alloc_error`] function, rather than directly invoking `panic!` or similar.
///
/// [`handle_alloc_error`]: ../../alloc/alloc/fn.handle_alloc_error.html
#[inline(always)]
unsafe fn grow(
&self,
ptr: NonNull<u8>,
old_layout: Layout,
new_layout: Layout,
) -> Result<NonNull<[u8]>, AllocError> {
debug_assert!(
new_layout.size() >= old_layout.size(),
"`new_layout.size()` must be greater than or equal to `old_layout.size()`"
);
let new_ptr = self.allocate(new_layout)?;
// SAFETY: because `new_layout.size()` must be greater than or equal to
// `old_layout.size()`, both the old and new memory allocation are valid for reads and
// writes for `old_layout.size()` bytes. Also, because the old allocation wasn't yet
// deallocated, it cannot overlap `new_ptr`. Thus, the call to `copy_nonoverlapping` is
// safe. The safety contract for `dealloc` must be upheld by the caller.
unsafe {
ptr::copy_nonoverlapping(ptr.as_ptr(), new_ptr.as_ptr().cast(), old_layout.size());
self.deallocate(ptr, old_layout);
}
Ok(new_ptr)
}
/// Behaves like `grow`, but also ensures that the new contents are set to zero before being
/// returned.
///
/// The memory block will contain the following contents after a successful call to
/// `grow_zeroed`:
/// * Bytes `0..old_layout.size()` are preserved from the original allocation.
/// * Bytes `old_layout.size()..old_size` will either be preserved or zeroed, depending on
/// the allocator implementation. `old_size` refers to the size of the memory block prior
/// to the `grow_zeroed` call, which may be larger than the size that was originally
/// requested when it was allocated.
/// * Bytes `old_size..new_size` are zeroed. `new_size` refers to the size of the memory
/// block returned by the `grow_zeroed` call.
///
/// # Safety
///
/// * `ptr` must denote a block of memory [*currently allocated*] via this allocator.
/// * `old_layout` must [*fit*] that block of memory (The `new_layout` argument need not fit it.).
/// * `new_layout.size()` must be greater than or equal to `old_layout.size()`.
///
/// Note that `new_layout.align()` need not be the same as `old_layout.align()`.
///
/// [*currently allocated*]: #currently-allocated-memory
/// [*fit*]: #memory-fitting
///
/// # Errors
///
/// Returns `Err` if the new layout does not meet the allocator's size and alignment
/// constraints of the allocator, or if growing otherwise fails.
///
/// Implementations are encouraged to return `Err` on memory exhaustion rather than panicking or
/// aborting, but this is not a strict requirement. (Specifically: it is *legal* to implement
/// this trait atop an underlying native allocation library that aborts on memory exhaustion.)
///
/// Clients wishing to abort computation in response to an allocation error are encouraged to
/// call the [`handle_alloc_error`] function, rather than directly invoking `panic!` or similar.
///
/// [`handle_alloc_error`]: ../../alloc/alloc/fn.handle_alloc_error.html
#[inline(always)]
unsafe fn grow_zeroed(
&self,
ptr: NonNull<u8>,
old_layout: Layout,
new_layout: Layout,
) -> Result<NonNull<[u8]>, AllocError> {
debug_assert!(
new_layout.size() >= old_layout.size(),
"`new_layout.size()` must be greater than or equal to `old_layout.size()`"
);
let new_ptr = self.allocate_zeroed(new_layout)?;
// SAFETY: because `new_layout.size()` must be greater than or equal to
// `old_layout.size()`, both the old and new memory allocation are valid for reads and
// writes for `old_layout.size()` bytes. Also, because the old allocation wasn't yet
// deallocated, it cannot overlap `new_ptr`. Thus, the call to `copy_nonoverlapping` is
// safe. The safety contract for `dealloc` must be upheld by the caller.
unsafe {
ptr::copy_nonoverlapping(ptr.as_ptr(), new_ptr.as_ptr().cast(), old_layout.size());
self.deallocate(ptr, old_layout);
}
Ok(new_ptr)
}
/// Attempts to shrink the memory block.
///
/// Returns a new [`NonNull<[u8]>`][NonNull] containing a pointer and the actual size of the allocated
/// memory. The pointer is suitable for holding data described by `new_layout`. To accomplish
/// this, the allocator may shrink the allocation referenced by `ptr` to fit the new layout.
///
/// If this returns `Ok`, then ownership of the memory block referenced by `ptr` has been
/// transferred to this allocator. Any access to the old `ptr` is Undefined Behavior, even if the
/// allocation was shrunk in-place. The newly returned pointer is the only valid pointer
/// for accessing this memory now.
///
/// If this method returns `Err`, then ownership of the memory block has not been transferred to
/// this allocator, and the contents of the memory block are unaltered.
///
/// # Safety
///
/// * `ptr` must denote a block of memory [*currently allocated*] via this allocator.
/// * `old_layout` must [*fit*] that block of memory (The `new_layout` argument need not fit it.).
/// * `new_layout.size()` must be smaller than or equal to `old_layout.size()`.
///
/// Note that `new_layout.align()` need not be the same as `old_layout.align()`.
///
/// [*currently allocated*]: #currently-allocated-memory
/// [*fit*]: #memory-fitting
///
/// # Errors
///
/// Returns `Err` if the new layout does not meet the allocator's size and alignment
/// constraints of the allocator, or if shrinking otherwise fails.
///
/// Implementations are encouraged to return `Err` on memory exhaustion rather than panicking or
/// aborting, but this is not a strict requirement. (Specifically: it is *legal* to implement
/// this trait atop an underlying native allocation library that aborts on memory exhaustion.)
///
/// Clients wishing to abort computation in response to an allocation error are encouraged to
/// call the [`handle_alloc_error`] function, rather than directly invoking `panic!` or similar.
///
/// [`handle_alloc_error`]: ../../alloc/alloc/fn.handle_alloc_error.html
#[inline(always)]
unsafe fn shrink(
&self,
ptr: NonNull<u8>,
old_layout: Layout,
new_layout: Layout,
) -> Result<NonNull<[u8]>, AllocError> {
debug_assert!(
new_layout.size() <= old_layout.size(),
"`new_layout.size()` must be smaller than or equal to `old_layout.size()`"
);
let new_ptr = self.allocate(new_layout)?;
// SAFETY: because `new_layout.size()` must be lower than or equal to
// `old_layout.size()`, both the old and new memory allocation are valid for reads and
// writes for `new_layout.size()` bytes. Also, because the old allocation wasn't yet
// deallocated, it cannot overlap `new_ptr`. Thus, the call to `copy_nonoverlapping` is
// safe. The safety contract for `dealloc` must be upheld by the caller.
unsafe {
ptr::copy_nonoverlapping(ptr.as_ptr(), new_ptr.as_ptr().cast(), new_layout.size());
self.deallocate(ptr, old_layout);
}
Ok(new_ptr)
}
/// Creates a "by reference" adapter for this instance of `Allocator`.
///
/// The returned adapter also implements `Allocator` and will simply borrow this.
#[inline(always)]
fn by_ref(&self) -> &Self
where
Self: Sized,
{
self
}
}
unsafe impl<A> Allocator for &A
where
A: Allocator + ?Sized,
{
#[inline(always)]
fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
(**self).allocate(layout)
}
#[inline(always)]
fn allocate_zeroed(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
(**self).allocate_zeroed(layout)
}
#[inline(always)]
unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) {
// SAFETY: the safety contract must be upheld by the caller
unsafe { (**self).deallocate(ptr, layout) }
}
#[inline(always)]
unsafe fn grow(
&self,
ptr: NonNull<u8>,
old_layout: Layout,
new_layout: Layout,
) -> Result<NonNull<[u8]>, AllocError> {
// SAFETY: the safety contract must be upheld by the caller
unsafe { (**self).grow(ptr, old_layout, new_layout) }
}
#[inline(always)]
unsafe fn grow_zeroed(
&self,
ptr: NonNull<u8>,
old_layout: Layout,
new_layout: Layout,
) -> Result<NonNull<[u8]>, AllocError> {
// SAFETY: the safety contract must be upheld by the caller
unsafe { (**self).grow_zeroed(ptr, old_layout, new_layout) }
}
#[inline(always)]
unsafe fn shrink(
&self,
ptr: NonNull<u8>,
old_layout: Layout,
new_layout: Layout,
) -> Result<NonNull<[u8]>, AllocError> {
// SAFETY: the safety contract must be upheld by the caller
unsafe { (**self).shrink(ptr, old_layout, new_layout) }
}
}

View File

@@ -0,0 +1,172 @@
use core::ptr::NonNull;
pub use std::alloc::System;
use crate::stable::{assume, invalid_mut};
use super::{AllocError, Allocator, GlobalAlloc as _, Layout};
unsafe impl Allocator for System {
#[inline(always)]
fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
alloc_impl(layout, false)
}
#[inline(always)]
fn allocate_zeroed(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
alloc_impl(layout, true)
}
#[inline(always)]
unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) {
if layout.size() != 0 {
// SAFETY: `layout` is non-zero in size,
// other conditions must be upheld by the caller
unsafe { System.dealloc(ptr.as_ptr(), layout) }
}
}
#[inline(always)]
unsafe fn grow(
&self,
ptr: NonNull<u8>,
old_layout: Layout,
new_layout: Layout,
) -> Result<NonNull<[u8]>, AllocError> {
// SAFETY: all conditions must be upheld by the caller
unsafe { grow_impl(ptr, old_layout, new_layout, false) }
}
#[inline(always)]
unsafe fn grow_zeroed(
&self,
ptr: NonNull<u8>,
old_layout: Layout,
new_layout: Layout,
) -> Result<NonNull<[u8]>, AllocError> {
// SAFETY: all conditions must be upheld by the caller
unsafe { grow_impl(ptr, old_layout, new_layout, true) }
}
#[inline(always)]
unsafe fn shrink(
&self,
ptr: NonNull<u8>,
old_layout: Layout,
new_layout: Layout,
) -> Result<NonNull<[u8]>, AllocError> {
debug_assert!(
new_layout.size() <= old_layout.size(),
"`new_layout.size()` must be smaller than or equal to `old_layout.size()`"
);
match new_layout.size() {
// SAFETY: conditions must be upheld by the caller
0 => unsafe {
self.deallocate(ptr, old_layout);
Ok(NonNull::new_unchecked(core::ptr::slice_from_raw_parts_mut(
invalid_mut(new_layout.align()),
0,
)))
},
// SAFETY: `new_size` is non-zero. Other conditions must be upheld by the caller
new_size if old_layout.align() == new_layout.align() => unsafe {
// `realloc` probably checks for `new_size <= old_layout.size()` or something similar.
assume(new_size <= old_layout.size());
let raw_ptr = System.realloc(ptr.as_ptr(), old_layout, new_size);
let ptr = NonNull::new(raw_ptr).ok_or(AllocError)?;
Ok(NonNull::new_unchecked(core::ptr::slice_from_raw_parts_mut(
ptr.as_ptr(),
new_size,
)))
},
// SAFETY: because `new_size` must be smaller than or equal to `old_layout.size()`,
// both the old and new memory allocation are valid for reads and writes for `new_size`
// bytes. Also, because the old allocation wasn't yet deallocated, it cannot overlap
// `new_ptr`. Thus, the call to `copy_nonoverlapping` is safe. The safety contract
// for `dealloc` must be upheld by the caller.
new_size => unsafe {
let new_ptr = self.allocate(new_layout)?;
core::ptr::copy_nonoverlapping(ptr.as_ptr(), new_ptr.as_ptr().cast(), new_size);
self.deallocate(ptr, old_layout);
Ok(new_ptr)
},
}
}
}
#[inline(always)]
fn alloc_impl(layout: Layout, zeroed: bool) -> Result<NonNull<[u8]>, AllocError> {
match layout.size() {
0 => Ok(unsafe {
NonNull::new_unchecked(core::ptr::slice_from_raw_parts_mut(
invalid_mut(layout.align()),
0,
))
}),
// SAFETY: `layout` is non-zero in size,
size => unsafe {
let raw_ptr = if zeroed {
System.alloc_zeroed(layout)
} else {
System.alloc(layout)
};
let ptr = NonNull::new(raw_ptr).ok_or(AllocError)?;
Ok(NonNull::new_unchecked(core::ptr::slice_from_raw_parts_mut(
ptr.as_ptr(),
size,
)))
},
}
}
// SAFETY: Same as `Allocator::grow`
#[inline(always)]
unsafe fn grow_impl(
ptr: NonNull<u8>,
old_layout: Layout,
new_layout: Layout,
zeroed: bool,
) -> Result<NonNull<[u8]>, AllocError> {
debug_assert!(
new_layout.size() >= old_layout.size(),
"`new_layout.size()` must be greater than or equal to `old_layout.size()`"
);
match old_layout.size() {
0 => alloc_impl(new_layout, zeroed),
// SAFETY: `new_size` is non-zero as `old_size` is greater than or equal to `new_size`
// as required by safety conditions. Other conditions must be upheld by the caller
old_size if old_layout.align() == new_layout.align() => unsafe {
let new_size = new_layout.size();
// `realloc` probably checks for `new_size >= old_layout.size()` or something similar.
assume(new_size >= old_layout.size());
let raw_ptr = System.realloc(ptr.as_ptr(), old_layout, new_size);
let ptr = NonNull::new(raw_ptr).ok_or(AllocError)?;
if zeroed {
raw_ptr.add(old_size).write_bytes(0, new_size - old_size);
}
Ok(NonNull::new_unchecked(core::ptr::slice_from_raw_parts_mut(
ptr.as_ptr(),
new_size,
)))
},
// SAFETY: because `new_layout.size()` must be greater than or equal to `old_size`,
// both the old and new memory allocation are valid for reads and writes for `old_size`
// bytes. Also, because the old allocation wasn't yet deallocated, it cannot overlap
// `new_ptr`. Thus, the call to `copy_nonoverlapping` is safe. The safety contract
// for `dealloc` must be upheld by the caller.
old_size => unsafe {
let new_ptr = alloc_impl(new_layout, zeroed)?;
core::ptr::copy_nonoverlapping(ptr.as_ptr(), new_ptr.as_ptr().cast(), old_size);
System.deallocate(ptr, old_layout);
Ok(new_ptr)
},
}
}

2272
vendor/allocator-api2/src/stable/boxed.rs vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,83 @@
/// Creates a [`Vec`] containing the arguments.
///
/// `vec!` allows `Vec`s to be defined with the same syntax as array expressions.
/// There are two forms of this macro:
///
/// - Create a [`Vec`] containing a given list of elements:
///
/// ```
/// use allocator_api2::vec;
/// let v = vec![1, 2, 3];
/// assert_eq!(v[0], 1);
/// assert_eq!(v[1], 2);
/// assert_eq!(v[2], 3);
/// ```
///
///
/// ```
/// use allocator_api2::{vec, alloc::Global};
/// let v = vec![in Global; 1, 2, 3];
/// assert_eq!(v[0], 1);
/// assert_eq!(v[1], 2);
/// assert_eq!(v[2], 3);
/// ```
///
/// - Create a [`Vec`] from a given element and size:
///
/// ```
/// use allocator_api2::vec;
/// let v = vec![1; 3];
/// assert_eq!(v, [1, 1, 1]);
/// ```
///
/// ```
/// use allocator_api2::{vec, alloc::Global};
/// let v = vec![in Global; 1; 3];
/// assert_eq!(v, [1, 1, 1]);
/// ```
///
/// Note that unlike array expressions this syntax supports all elements
/// which implement [`Clone`] and the number of elements doesn't have to be
/// a constant.
///
/// This will use `clone` to duplicate an expression, so one should be careful
/// using this with types having a nonstandard `Clone` implementation. For
/// example, `vec![Rc::new(1); 5]` will create a vector of five references
/// to the same boxed integer value, not five references pointing to independently
/// boxed integers.
///
/// Also, note that `vec![expr; 0]` is allowed, and produces an empty vector.
/// This will still evaluate `expr`, however, and immediately drop the resulting value, so
/// be mindful of side effects.
///
/// [`Vec`]: crate::vec::Vec
#[cfg(not(no_global_oom_handling))]
#[macro_export]
macro_rules! vec {
(in $alloc:expr $(;)?) => (
$crate::vec::Vec::new_in($alloc)
);
(in $alloc:expr; $elem:expr; $n:expr) => (
$crate::vec::from_elem_in($elem, $n, $alloc)
);
(in $alloc:expr; $($x:expr),+ $(,)?) => (
$crate::boxed::Box::<[_]>::into_vec(
$crate::boxed::Box::slice(
$crate::boxed::Box::new_in([$($x),+], $alloc)
)
)
);
() => (
$crate::vec::Vec::new()
);
($elem:expr; $n:expr) => (
$crate::vec::from_elem($elem, $n)
);
($($x:expr),+ $(,)?) => (
$crate::boxed::Box::<[_]>::into_vec(
$crate::boxed::Box::slice(
$crate::boxed::Box::new([$($x),+])
)
)
);
}

105
vendor/allocator-api2/src/stable/mod.rs vendored Normal file
View File

@@ -0,0 +1,105 @@
#![deny(unsafe_op_in_unsafe_fn)]
#![allow(clippy::needless_doctest_main, clippy::partialeq_ne_impl)]
#[cfg(feature = "alloc")]
pub use self::slice::SliceExt;
pub mod alloc;
#[cfg(feature = "alloc")]
pub mod boxed;
#[cfg(feature = "alloc")]
mod raw_vec;
#[cfg(feature = "alloc")]
pub mod vec;
#[cfg(feature = "alloc")]
mod macros;
#[cfg(feature = "alloc")]
mod slice;
#[cfg(feature = "alloc")]
mod unique;
/// Allows turning a [`Box<T: Sized, A>`][boxed::Box] into a [`Box<U: ?Sized, A>`][boxed::Box] where `T` can be unsizing-coerced into a `U`.
///
/// This is the only way to create an `allocator_api2::boxed::Box` of an unsized type on stable.
///
/// With the standard library's `alloc::boxed::Box`, this is done automatically using the unstable unsize traits, but this crate's Box
/// can't take advantage of that machinery on stable. So, we need to use type inference and the fact that you *can*
/// still coerce the inner pointer of a box to get the compiler to help us unsize it using this macro.
///
/// # Example
///
/// ```
/// use allocator_api2::unsize_box;
/// use allocator_api2::boxed::Box;
/// use core::any::Any;
///
/// let sized_box: Box<u64> = Box::new(0);
/// let unsized_box: Box<dyn Any> = unsize_box!(sized_box);
/// ```
#[macro_export]
#[cfg(feature = "alloc")]
macro_rules! unsize_box {( $boxed:expr $(,)? ) => ({
let (ptr, allocator) = ::allocator_api2::boxed::Box::into_raw_with_allocator($boxed);
// we don't want to allow casting to arbitrary type U, but we do want to allow unsize coercion to happen.
// that's exactly what's happening here -- this is *not* a pointer cast ptr as *mut _, but the compiler
// *will* allow an unsizing coercion to happen into the `ptr` place, if one is available. And we use _ so that the user can
// fill in what they want the unsized type to be by annotating the type of the variable this macro will
// assign its result to.
let ptr: *mut _ = ptr;
// SAFETY: see above for why ptr's type can only be something that can be safely coerced.
// also, ptr just came from a properly allocated box in the same allocator.
unsafe {
::allocator_api2::boxed::Box::from_raw_in(ptr, allocator)
}
})}
#[cfg(feature = "alloc")]
pub mod collections {
pub use super::raw_vec::{TryReserveError, TryReserveErrorKind};
}
#[cfg(feature = "alloc")]
#[track_caller]
#[inline(always)]
#[cfg(debug_assertions)]
unsafe fn assume(v: bool) {
if !v {
core::unreachable!()
}
}
#[cfg(feature = "alloc")]
#[track_caller]
#[inline(always)]
#[cfg(not(debug_assertions))]
unsafe fn assume(v: bool) {
if !v {
unsafe {
core::hint::unreachable_unchecked();
}
}
}
#[cfg(feature = "alloc")]
#[inline(always)]
fn addr<T>(x: *const T) -> usize {
#[allow(clippy::useless_transmute, clippy::transmutes_expressible_as_ptr_casts)]
unsafe {
core::mem::transmute(x)
}
}
#[cfg(feature = "alloc")]
#[inline(always)]
fn invalid_mut<T>(addr: usize) -> *mut T {
#[allow(clippy::useless_transmute, clippy::transmutes_expressible_as_ptr_casts)]
unsafe {
core::mem::transmute(addr)
}
}

View File

@@ -0,0 +1,642 @@
use core::alloc::LayoutError;
use core::mem::{self, ManuallyDrop, MaybeUninit};
use core::ops::Drop;
use core::ptr::{self, NonNull};
use core::slice;
use core::{cmp, fmt};
use super::{
alloc::{Allocator, Global, Layout},
assume,
boxed::Box,
};
#[cfg(not(no_global_oom_handling))]
use super::alloc::handle_alloc_error;
/// The error type for `try_reserve` methods.
#[derive(Clone, PartialEq, Eq, Debug)]
pub struct TryReserveError {
kind: TryReserveErrorKind,
}
impl TryReserveError {
/// Details about the allocation that caused the error
pub fn kind(&self) -> TryReserveErrorKind {
self.kind.clone()
}
}
/// Details of the allocation that caused a `TryReserveError`
#[derive(Clone, PartialEq, Eq, Debug)]
pub enum TryReserveErrorKind {
/// Error due to the computed capacity exceeding the collection's maximum
/// (usually `isize::MAX` bytes).
CapacityOverflow,
/// The memory allocator returned an error
AllocError {
/// The layout of allocation request that failed
layout: Layout,
#[doc(hidden)]
non_exhaustive: (),
},
}
use TryReserveErrorKind::*;
impl From<TryReserveErrorKind> for TryReserveError {
#[inline(always)]
fn from(kind: TryReserveErrorKind) -> Self {
Self { kind }
}
}
impl From<LayoutError> for TryReserveErrorKind {
/// Always evaluates to [`TryReserveErrorKind::CapacityOverflow`].
#[inline(always)]
fn from(_: LayoutError) -> Self {
TryReserveErrorKind::CapacityOverflow
}
}
impl fmt::Display for TryReserveError {
fn fmt(
&self,
fmt: &mut core::fmt::Formatter<'_>,
) -> core::result::Result<(), core::fmt::Error> {
fmt.write_str("memory allocation failed")?;
let reason = match self.kind {
TryReserveErrorKind::CapacityOverflow => {
" because the computed capacity exceeded the collection's maximum"
}
TryReserveErrorKind::AllocError { .. } => {
" because the memory allocator returned an error"
}
};
fmt.write_str(reason)
}
}
#[cfg(feature = "std")]
impl std::error::Error for TryReserveError {}
#[cfg(not(no_global_oom_handling))]
enum AllocInit {
/// The contents of the new memory are uninitialized.
Uninitialized,
/// The new memory is guaranteed to be zeroed.
Zeroed,
}
/// A low-level utility for more ergonomically allocating, reallocating, and deallocating
/// a buffer of memory on the heap without having to worry about all the corner cases
/// involved. This type is excellent for building your own data structures like Vec and VecDeque.
/// In particular:
///
/// * Produces `NonNull::dangling()` on zero-sized types.
/// * Produces `NonNull::dangling()` on zero-length allocations.
/// * Avoids freeing `NonNull::dangling()`.
/// * Catches all overflows in capacity computations (promotes them to "capacity overflow" panics).
/// * Guards against 32-bit systems allocating more than isize::MAX bytes.
/// * Guards against overflowing your length.
/// * Calls `handle_alloc_error` for fallible allocations.
/// * Contains a `ptr::NonNull` and thus endows the user with all related benefits.
/// * Uses the excess returned from the allocator to use the largest available capacity.
///
/// This type does not in anyway inspect the memory that it manages. When dropped it *will*
/// free its memory, but it *won't* try to drop its contents. It is up to the user of `RawVec`
/// to handle the actual things *stored* inside of a `RawVec`.
///
/// Note that the excess of a zero-sized types is always infinite, so `capacity()` always returns
/// `usize::MAX`. This means that you need to be careful when round-tripping this type with a
/// `Box<[T]>`, since `capacity()` won't yield the length.
#[allow(missing_debug_implementations)]
pub(crate) struct RawVec<T, A: Allocator = Global> {
ptr: NonNull<T>,
cap: usize,
alloc: A,
}
// Safety: RawVec owns both T and A, so sending is safe if
// sending is safe for T and A.
unsafe impl<T, A: Allocator> Send for RawVec<T, A>
where
T: Send,
A: Send,
{
}
// Safety: RawVec owns both T and A, so sharing is safe if
// sharing is safe for T and A.
unsafe impl<T, A: Allocator> Sync for RawVec<T, A>
where
T: Sync,
A: Sync,
{
}
impl<T> RawVec<T, Global> {
/// Creates the biggest possible `RawVec` (on the system heap)
/// without allocating. If `T` has positive size, then this makes a
/// `RawVec` with capacity `0`. If `T` is zero-sized, then it makes a
/// `RawVec` with capacity `usize::MAX`. Useful for implementing
/// delayed allocation.
#[must_use]
pub const fn new() -> Self {
Self::new_in(Global)
}
/// Creates a `RawVec` (on the system heap) with exactly the
/// capacity and alignment requirements for a `[T; capacity]`. This is
/// equivalent to calling `RawVec::new` when `capacity` is `0` or `T` is
/// zero-sized. Note that if `T` is zero-sized this means you will
/// *not* get a `RawVec` with the requested capacity.
///
/// # Panics
///
/// Panics if the requested capacity exceeds `isize::MAX` bytes.
///
/// # Aborts
///
/// Aborts on OOM.
#[cfg(not(no_global_oom_handling))]
#[must_use]
#[inline(always)]
pub fn with_capacity(capacity: usize) -> Self {
Self::with_capacity_in(capacity, Global)
}
/// Like `with_capacity`, but guarantees the buffer is zeroed.
#[cfg(not(no_global_oom_handling))]
#[must_use]
#[inline(always)]
pub fn with_capacity_zeroed(capacity: usize) -> Self {
Self::with_capacity_zeroed_in(capacity, Global)
}
}
impl<T, A: Allocator> RawVec<T, A> {
// Tiny Vecs are dumb. Skip to:
// - 8 if the element size is 1, because any heap allocators is likely
// to round up a request of less than 8 bytes to at least 8 bytes.
// - 4 if elements are moderate-sized (<= 1 KiB).
// - 1 otherwise, to avoid wasting too much space for very short Vecs.
pub(crate) const MIN_NON_ZERO_CAP: usize = if mem::size_of::<T>() == 1 {
8
} else if mem::size_of::<T>() <= 1024 {
4
} else {
1
};
/// Like `new`, but parameterized over the choice of allocator for
/// the returned `RawVec`.
#[inline(always)]
pub const fn new_in(alloc: A) -> Self {
// `cap: 0` means "unallocated". zero-sized types are ignored.
Self {
ptr: NonNull::dangling(),
cap: 0,
alloc,
}
}
/// Like `with_capacity`, but parameterized over the choice of
/// allocator for the returned `RawVec`.
#[cfg(not(no_global_oom_handling))]
#[inline(always)]
pub fn with_capacity_in(capacity: usize, alloc: A) -> Self {
Self::allocate_in(capacity, AllocInit::Uninitialized, alloc)
}
/// Like `with_capacity_zeroed`, but parameterized over the choice
/// of allocator for the returned `RawVec`.
#[cfg(not(no_global_oom_handling))]
#[inline(always)]
pub fn with_capacity_zeroed_in(capacity: usize, alloc: A) -> Self {
Self::allocate_in(capacity, AllocInit::Zeroed, alloc)
}
/// Converts the entire buffer into `Box<[MaybeUninit<T>]>` with the specified `len`.
///
/// Note that this will correctly reconstitute any `cap` changes
/// that may have been performed. (See description of type for details.)
///
/// # Safety
///
/// * `len` must be greater than or equal to the most recently requested capacity, and
/// * `len` must be less than or equal to `self.capacity()`.
///
/// Note, that the requested capacity and `self.capacity()` could differ, as
/// an allocator could overallocate and return a greater memory block than requested.
#[inline(always)]
pub unsafe fn into_box(self, len: usize) -> Box<[MaybeUninit<T>], A> {
// Sanity-check one half of the safety requirement (we cannot check the other half).
debug_assert!(
len <= self.capacity(),
"`len` must be smaller than or equal to `self.capacity()`"
);
let me = ManuallyDrop::new(self);
unsafe {
let slice = slice::from_raw_parts_mut(me.ptr() as *mut MaybeUninit<T>, len);
Box::<[MaybeUninit<T>], A>::from_raw_in(slice, ptr::read(&me.alloc))
}
}
#[cfg(not(no_global_oom_handling))]
#[inline(always)]
fn allocate_in(capacity: usize, init: AllocInit, alloc: A) -> Self {
// Don't allocate here because `Drop` will not deallocate when `capacity` is 0.
if mem::size_of::<T>() == 0 || capacity == 0 {
Self::new_in(alloc)
} else {
// We avoid `unwrap_or_else` here because it bloats the amount of
// LLVM IR generated.
let layout = match Layout::array::<T>(capacity) {
Ok(layout) => layout,
Err(_) => capacity_overflow(),
};
match alloc_guard(layout.size()) {
Ok(_) => {}
Err(_) => capacity_overflow(),
}
let result = match init {
AllocInit::Uninitialized => alloc.allocate(layout),
AllocInit::Zeroed => alloc.allocate_zeroed(layout),
};
let ptr = match result {
Ok(ptr) => ptr,
Err(_) => handle_alloc_error(layout),
};
// Allocators currently return a `NonNull<[u8]>` whose length
// matches the size requested. If that ever changes, the capacity
// here should change to `ptr.len() / mem::size_of::<T>()`.
Self {
ptr: unsafe { NonNull::new_unchecked(ptr.cast().as_ptr()) },
cap: capacity,
alloc,
}
}
}
/// Reconstitutes a `RawVec` from a pointer, capacity, and allocator.
///
/// # Safety
///
/// The `ptr` must be allocated (via the given allocator `alloc`), and with the given
/// `capacity`.
/// The `capacity` cannot exceed `isize::MAX` for sized types. (only a concern on 32-bit
/// systems). ZST vectors may have a capacity up to `usize::MAX`.
/// If the `ptr` and `capacity` come from a `RawVec` created via `alloc`, then this is
/// guaranteed.
#[inline(always)]
pub unsafe fn from_raw_parts_in(ptr: *mut T, capacity: usize, alloc: A) -> Self {
Self {
ptr: unsafe { NonNull::new_unchecked(ptr) },
cap: capacity,
alloc,
}
}
/// Gets a raw pointer to the start of the allocation. Note that this is
/// `NonNull::dangling()` if `capacity == 0` or `T` is zero-sized. In the former case, you must
/// be careful.
#[inline(always)]
pub fn ptr(&self) -> *mut T {
self.ptr.as_ptr()
}
/// Gets the capacity of the allocation.
///
/// This will always be `usize::MAX` if `T` is zero-sized.
#[inline(always)]
pub fn capacity(&self) -> usize {
if mem::size_of::<T>() == 0 {
usize::MAX
} else {
self.cap
}
}
/// Returns a shared reference to the allocator backing this `RawVec`.
#[inline(always)]
pub fn allocator(&self) -> &A {
&self.alloc
}
#[inline(always)]
fn current_memory(&self) -> Option<(NonNull<u8>, Layout)> {
if mem::size_of::<T>() == 0 || self.cap == 0 {
None
} else {
// We have an allocated chunk of memory, so we can bypass runtime
// checks to get our current layout.
unsafe {
let layout = Layout::array::<T>(self.cap).unwrap_unchecked();
Some((self.ptr.cast(), layout))
}
}
}
/// Ensures that the buffer contains at least enough space to hold `len +
/// additional` elements. If it doesn't already have enough capacity, will
/// reallocate enough space plus comfortable slack space to get amortized
/// *O*(1) behavior. Will limit this behavior if it would needlessly cause
/// itself to panic.
///
/// If `len` exceeds `self.capacity()`, this may fail to actually allocate
/// the requested space. This is not really unsafe, but the unsafe
/// code *you* write that relies on the behavior of this function may break.
///
/// This is ideal for implementing a bulk-push operation like `extend`.
///
/// # Panics
///
/// Panics if the new capacity exceeds `isize::MAX` bytes.
///
/// # Aborts
///
/// Aborts on OOM.
#[cfg(not(no_global_oom_handling))]
#[inline(always)]
pub fn reserve(&mut self, len: usize, additional: usize) {
// Callers expect this function to be very cheap when there is already sufficient capacity.
// Therefore, we move all the resizing and error-handling logic from grow_amortized and
// handle_reserve behind a call, while making sure that this function is likely to be
// inlined as just a comparison and a call if the comparison fails.
#[cold]
#[inline(always)]
fn do_reserve_and_handle<T, A: Allocator>(
slf: &mut RawVec<T, A>,
len: usize,
additional: usize,
) {
handle_reserve(slf.grow_amortized(len, additional));
}
if self.needs_to_grow(len, additional) {
do_reserve_and_handle(self, len, additional);
}
}
/// A specialized version of `reserve()` used only by the hot and
/// oft-instantiated `Vec::push()`, which does its own capacity check.
#[cfg(not(no_global_oom_handling))]
#[inline(always)]
pub fn reserve_for_push(&mut self, len: usize) {
handle_reserve(self.grow_amortized(len, 1));
}
/// The same as `reserve`, but returns on errors instead of panicking or aborting.
#[inline(always)]
pub fn try_reserve(&mut self, len: usize, additional: usize) -> Result<(), TryReserveError> {
if self.needs_to_grow(len, additional) {
self.grow_amortized(len, additional)
} else {
Ok(())
}
}
/// Ensures that the buffer contains at least enough space to hold `len +
/// additional` elements. If it doesn't already, will reallocate the
/// minimum possible amount of memory necessary. Generally this will be
/// exactly the amount of memory necessary, but in principle the allocator
/// is free to give back more than we asked for.
///
/// If `len` exceeds `self.capacity()`, this may fail to actually allocate
/// the requested space. This is not really unsafe, but the unsafe code
/// *you* write that relies on the behavior of this function may break.
///
/// # Panics
///
/// Panics if the new capacity exceeds `isize::MAX` bytes.
///
/// # Aborts
///
/// Aborts on OOM.
#[cfg(not(no_global_oom_handling))]
#[inline(always)]
pub fn reserve_exact(&mut self, len: usize, additional: usize) {
handle_reserve(self.try_reserve_exact(len, additional));
}
/// The same as `reserve_exact`, but returns on errors instead of panicking or aborting.
#[inline(always)]
pub fn try_reserve_exact(
&mut self,
len: usize,
additional: usize,
) -> Result<(), TryReserveError> {
if self.needs_to_grow(len, additional) {
self.grow_exact(len, additional)
} else {
Ok(())
}
}
/// Shrinks the buffer down to the specified capacity. If the given amount
/// is 0, actually completely deallocates.
///
/// # Panics
///
/// Panics if the given amount is *larger* than the current capacity.
///
/// # Aborts
///
/// Aborts on OOM.
#[cfg(not(no_global_oom_handling))]
#[inline(always)]
pub fn shrink_to_fit(&mut self, cap: usize) {
handle_reserve(self.shrink(cap));
}
}
impl<T, A: Allocator> RawVec<T, A> {
/// Returns if the buffer needs to grow to fulfill the needed extra capacity.
/// Mainly used to make inlining reserve-calls possible without inlining `grow`.
#[inline(always)]
fn needs_to_grow(&self, len: usize, additional: usize) -> bool {
additional > self.capacity().wrapping_sub(len)
}
#[inline(always)]
fn set_ptr_and_cap(&mut self, ptr: NonNull<[u8]>, cap: usize) {
// Allocators currently return a `NonNull<[u8]>` whose length matches
// the size requested. If that ever changes, the capacity here should
// change to `ptr.len() / mem::size_of::<T>()`.
self.ptr = unsafe { NonNull::new_unchecked(ptr.cast().as_ptr()) };
self.cap = cap;
}
// This method is usually instantiated many times. So we want it to be as
// small as possible, to improve compile times. But we also want as much of
// its contents to be statically computable as possible, to make the
// generated code run faster. Therefore, this method is carefully written
// so that all of the code that depends on `T` is within it, while as much
// of the code that doesn't depend on `T` as possible is in functions that
// are non-generic over `T`.
#[inline(always)]
fn grow_amortized(&mut self, len: usize, additional: usize) -> Result<(), TryReserveError> {
// This is ensured by the calling contexts.
debug_assert!(additional > 0);
if mem::size_of::<T>() == 0 {
// Since we return a capacity of `usize::MAX` when `elem_size` is
// 0, getting to here necessarily means the `RawVec` is overfull.
return Err(CapacityOverflow.into());
}
// Nothing we can really do about these checks, sadly.
let required_cap = len.checked_add(additional).ok_or(CapacityOverflow)?;
// This guarantees exponential growth. The doubling cannot overflow
// because `cap <= isize::MAX` and the type of `cap` is `usize`.
let cap = cmp::max(self.cap * 2, required_cap);
let cap = cmp::max(Self::MIN_NON_ZERO_CAP, cap);
let new_layout = Layout::array::<T>(cap);
// `finish_grow` is non-generic over `T`.
let ptr = finish_grow(new_layout, self.current_memory(), &mut self.alloc)?;
self.set_ptr_and_cap(ptr, cap);
Ok(())
}
// The constraints on this method are much the same as those on
// `grow_amortized`, but this method is usually instantiated less often so
// it's less critical.
#[inline(always)]
fn grow_exact(&mut self, len: usize, additional: usize) -> Result<(), TryReserveError> {
if mem::size_of::<T>() == 0 {
// Since we return a capacity of `usize::MAX` when the type size is
// 0, getting to here necessarily means the `RawVec` is overfull.
return Err(CapacityOverflow.into());
}
let cap = len.checked_add(additional).ok_or(CapacityOverflow)?;
let new_layout = Layout::array::<T>(cap);
// `finish_grow` is non-generic over `T`.
let ptr = finish_grow(new_layout, self.current_memory(), &mut self.alloc)?;
self.set_ptr_and_cap(ptr, cap);
Ok(())
}
#[cfg(not(no_global_oom_handling))]
#[inline(always)]
fn shrink(&mut self, cap: usize) -> Result<(), TryReserveError> {
assert!(
cap <= self.capacity(),
"Tried to shrink to a larger capacity"
);
let (ptr, layout) = if let Some(mem) = self.current_memory() {
mem
} else {
return Ok(());
};
let ptr = unsafe {
// `Layout::array` cannot overflow here because it would have
// overflowed earlier when capacity was larger.
let new_layout = Layout::array::<T>(cap).unwrap_unchecked();
self.alloc
.shrink(ptr, layout, new_layout)
.map_err(|_| AllocError {
layout: new_layout,
non_exhaustive: (),
})?
};
self.set_ptr_and_cap(ptr, cap);
Ok(())
}
}
// This function is outside `RawVec` to minimize compile times. See the comment
// above `RawVec::grow_amortized` for details. (The `A` parameter isn't
// significant, because the number of different `A` types seen in practice is
// much smaller than the number of `T` types.)
#[inline(always)]
fn finish_grow<A>(
new_layout: Result<Layout, LayoutError>,
current_memory: Option<(NonNull<u8>, Layout)>,
alloc: &mut A,
) -> Result<NonNull<[u8]>, TryReserveError>
where
A: Allocator,
{
// Check for the error here to minimize the size of `RawVec::grow_*`.
let new_layout = new_layout.map_err(|_| CapacityOverflow)?;
alloc_guard(new_layout.size())?;
let memory = if let Some((ptr, old_layout)) = current_memory {
debug_assert_eq!(old_layout.align(), new_layout.align());
unsafe {
// The allocator checks for alignment equality
assume(old_layout.align() == new_layout.align());
alloc.grow(ptr, old_layout, new_layout)
}
} else {
alloc.allocate(new_layout)
};
memory.map_err(|_| {
AllocError {
layout: new_layout,
non_exhaustive: (),
}
.into()
})
}
impl<T, A: Allocator> Drop for RawVec<T, A> {
/// Frees the memory owned by the `RawVec` *without* trying to drop its contents.
#[inline(always)]
fn drop(&mut self) {
if let Some((ptr, layout)) = self.current_memory() {
unsafe { self.alloc.deallocate(ptr, layout) }
}
}
}
// Central function for reserve error handling.
#[cfg(not(no_global_oom_handling))]
#[inline(always)]
fn handle_reserve(result: Result<(), TryReserveError>) {
match result.map_err(|e| e.kind()) {
Err(CapacityOverflow) => capacity_overflow(),
Err(AllocError { layout, .. }) => handle_alloc_error(layout),
Ok(()) => { /* yay */ }
}
}
// We need to guarantee the following:
// * We don't ever allocate `> isize::MAX` byte-size objects.
// * We don't overflow `usize::MAX` and actually allocate too little.
//
// On 64-bit we just need to check for overflow since trying to allocate
// `> isize::MAX` bytes will surely fail. On 32-bit and 16-bit we need to add
// an extra guard for this in case we're running on a platform which can use
// all 4GB in user-space, e.g., PAE or x32.
#[inline(always)]
fn alloc_guard(alloc_size: usize) -> Result<(), TryReserveError> {
if usize::BITS < 64 && alloc_size > isize::MAX as usize {
Err(CapacityOverflow.into())
} else {
Ok(())
}
}
// One central function responsible for reporting capacity overflows. This'll
// ensure that the code generation related to these panics is minimal as there's
// only one location which panics rather than a bunch throughout the module.
#[cfg(not(no_global_oom_handling))]
fn capacity_overflow() -> ! {
panic!("capacity overflow");
}

View File

@@ -0,0 +1,171 @@
use crate::{
alloc::{Allocator, Global},
vec::Vec,
};
/// Slice methods that use `Box` and `Vec` from this crate.
pub trait SliceExt<T> {
/// Copies `self` into a new `Vec`.
///
/// # Examples
///
/// ```
/// let s = [10, 40, 30];
/// let x = s.to_vec();
/// // Here, `s` and `x` can be modified independently.
/// ```
#[cfg(not(no_global_oom_handling))]
#[inline(always)]
fn to_vec(&self) -> Vec<T, Global>
where
T: Clone,
{
self.to_vec_in(Global)
}
/// Copies `self` into a new `Vec` with an allocator.
///
/// # Examples
///
/// ```
/// #![feature(allocator_api)]
///
/// use std::alloc::System;
///
/// let s = [10, 40, 30];
/// let x = s.to_vec_in(System);
/// // Here, `s` and `x` can be modified independently.
/// ```
#[cfg(not(no_global_oom_handling))]
fn to_vec_in<A: Allocator>(&self, alloc: A) -> Vec<T, A>
where
T: Clone;
/// Creates a vector by copying a slice `n` times.
///
/// # Panics
///
/// This function will panic if the capacity would overflow.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// assert_eq!([1, 2].repeat(3), vec![1, 2, 1, 2, 1, 2]);
/// ```
///
/// A panic upon overflow:
///
/// ```should_panic
/// // this will panic at runtime
/// b"0123456789abcdef".repeat(usize::MAX);
/// ```
fn repeat(&self, n: usize) -> Vec<T, Global>
where
T: Copy;
}
impl<T> SliceExt<T> for [T] {
#[cfg(not(no_global_oom_handling))]
#[inline]
fn to_vec_in<A: Allocator>(&self, alloc: A) -> Vec<T, A>
where
T: Clone,
{
struct DropGuard<'a, T, A: Allocator> {
vec: &'a mut Vec<T, A>,
num_init: usize,
}
impl<'a, T, A: Allocator> Drop for DropGuard<'a, T, A> {
#[inline]
fn drop(&mut self) {
// SAFETY:
// items were marked initialized in the loop below
unsafe {
self.vec.set_len(self.num_init);
}
}
}
let mut vec = Vec::with_capacity_in(self.len(), alloc);
let mut guard = DropGuard {
vec: &mut vec,
num_init: 0,
};
let slots = guard.vec.spare_capacity_mut();
// .take(slots.len()) is necessary for LLVM to remove bounds checks
// and has better codegen than zip.
for (i, b) in self.iter().enumerate().take(slots.len()) {
guard.num_init = i;
slots[i].write(b.clone());
}
core::mem::forget(guard);
// SAFETY:
// the vec was allocated and initialized above to at least this length.
unsafe {
vec.set_len(self.len());
}
vec
}
#[cfg(not(no_global_oom_handling))]
#[inline]
fn repeat(&self, n: usize) -> Vec<T, Global>
where
T: Copy,
{
if n == 0 {
return Vec::new();
}
// If `n` is larger than zero, it can be split as
// `n = 2^expn + rem (2^expn > rem, expn >= 0, rem >= 0)`.
// `2^expn` is the number represented by the leftmost '1' bit of `n`,
// and `rem` is the remaining part of `n`.
// Using `Vec` to access `set_len()`.
let capacity = self.len().checked_mul(n).expect("capacity overflow");
let mut buf = Vec::with_capacity(capacity);
// `2^expn` repetition is done by doubling `buf` `expn`-times.
buf.extend(self);
{
let mut m = n >> 1;
// If `m > 0`, there are remaining bits up to the leftmost '1'.
while m > 0 {
// `buf.extend(buf)`:
unsafe {
core::ptr::copy_nonoverlapping(
buf.as_ptr(),
(buf.as_mut_ptr() as *mut T).add(buf.len()),
buf.len(),
);
// `buf` has capacity of `self.len() * n`.
let buf_len = buf.len();
buf.set_len(buf_len * 2);
}
m >>= 1;
}
}
// `rem` (`= n - 2^expn`) repetition is done by copying
// first `rem` repetitions from `buf` itself.
let rem_len = capacity - buf.len(); // `self.len() * rem`
if rem_len > 0 {
// `buf.extend(buf[0 .. rem_len])`:
unsafe {
// This is non-overlapping since `2^expn > rem`.
core::ptr::copy_nonoverlapping(
buf.as_ptr(),
(buf.as_mut_ptr() as *mut T).add(buf.len()),
rem_len,
);
// `buf.len() + rem_len` equals to `buf.capacity()` (`= self.len() * n`).
buf.set_len(capacity);
}
}
buf
}
}

View File

@@ -0,0 +1,106 @@
/// A wrapper around a raw non-null `*mut T` that indicates that the possessor
/// of this wrapper owns the referent. Useful for building abstractions like
/// `Box<T>`, `Vec<T>`, `String`, and `HashMap<K, V>`.
///
/// Unlike `*mut T`, `Unique<T>` behaves "as if" it were an instance of `T`.
/// It implements `Send`/`Sync` if `T` is `Send`/`Sync`. It also implies
/// the kind of strong aliasing guarantees an instance of `T` can expect:
/// the referent of the pointer should not be modified without a unique path to
/// its owning Unique.
///
/// If you're uncertain of whether it's correct to use `Unique` for your purposes,
/// consider using `NonNull`, which has weaker semantics.
///
/// Unlike `*mut T`, the pointer must always be non-null, even if the pointer
/// is never dereferenced. This is so that enums may use this forbidden value
/// as a discriminant -- `Option<Unique<T>>` has the same size as `Unique<T>`.
/// However the pointer may still dangle if it isn't dereferenced.
///
/// Unlike `*mut T`, `Unique<T>` is covariant over `T`. This should always be correct
/// for any type which upholds Unique's aliasing requirements.
#[repr(transparent)]
pub(crate) struct Unique<T: ?Sized> {
pointer: NonNull<T>,
_marker: PhantomData<T>,
}
/// `Unique` pointers are `Send` if `T` is `Send` because the data they
/// reference is unaliased. Note that this aliasing invariant is
/// unenforced by the type system; the abstraction using the
/// `Unique` must enforce it.
unsafe impl<T: Send + ?Sized> Send for Unique<T> {}
/// `Unique` pointers are `Sync` if `T` is `Sync` because the data they
/// reference is unaliased. Note that this aliasing invariant is
/// unenforced by the type system; the abstraction using the
/// `Unique` must enforce it.
unsafe impl<T: Sync + ?Sized> Sync for Unique<T> {}
impl<T: ?Sized> Unique<T> {
/// Creates a new `Unique`.
///
/// # Safety
///
/// `ptr` must be non-null.
#[inline]
pub const unsafe fn new_unchecked(ptr: *mut T) -> Self {
// SAFETY: the caller must guarantee that `ptr` is non-null.
unsafe {
Unique {
pointer: NonNull::new_unchecked(ptr),
_marker: PhantomData,
}
}
}
/// Acquires the underlying `*mut` pointer.
#[must_use = "`self` will be dropped if the result is not used"]
#[inline]
pub const fn as_ptr(self) -> *mut T {
self.pointer.as_ptr()
}
/// Acquires the underlying `*mut` pointer.
#[must_use = "`self` will be dropped if the result is not used"]
#[inline]
pub const fn as_non_null_ptr(self) -> NonNull<T> {
self.pointer
}
/// Dereferences the content.
///
/// The resulting lifetime is bound to self so this behaves "as if"
/// it were actually an instance of T that is getting borrowed. If a longer
/// (unbound) lifetime is needed, use `&*my_ptr.as_ptr()`.
#[must_use]
#[inline]
pub const unsafe fn as_ref(&self) -> &T {
// SAFETY: the caller must guarantee that `self` meets all the
// requirements for a reference.
unsafe { &*(self.as_ptr() as *const T) }
}
/// Mutably dereferences the content.
///
/// The resulting lifetime is bound to self so this behaves "as if"
/// it were actually an instance of T that is getting borrowed. If a longer
/// (unbound) lifetime is needed, use `&mut *my_ptr.as_ptr()`.
#[must_use]
#[inline]
pub unsafe fn as_mut(&mut self) -> &mut T {
// SAFETY: the caller must guarantee that `self` meets all the
// requirements for a mutable reference.
unsafe { self.pointer.as_mut() }
}
}
impl<T: ?Sized> Clone for Unique<T> {
#[inline]
fn clone(&self) -> Self {
*self
}
}
impl<T: ?Sized> Copy for Unique<T> {}
use core::{marker::PhantomData, ptr::NonNull};

View File

@@ -0,0 +1,242 @@
use core::fmt;
use core::iter::FusedIterator;
use core::mem::{self, size_of, ManuallyDrop};
use core::ptr::{self, NonNull};
use core::slice::{self};
use crate::stable::alloc::{Allocator, Global};
use super::Vec;
/// A draining iterator for `Vec<T>`.
///
/// This `struct` is created by [`Vec::drain`].
/// See its documentation for more.
///
/// # Example
///
/// ```
/// let mut v = vec![0, 1, 2];
/// let iter: std::vec::Drain<_> = v.drain(..);
/// ```
pub struct Drain<'a, T: 'a, A: Allocator + 'a = Global> {
/// Index of tail to preserve
pub(super) tail_start: usize,
/// Length of tail
pub(super) tail_len: usize,
/// Current remaining range to remove
pub(super) iter: slice::Iter<'a, T>,
pub(super) vec: NonNull<Vec<T, A>>,
}
impl<T: fmt::Debug, A: Allocator> fmt::Debug for Drain<'_, T, A> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_tuple("Drain").field(&self.iter.as_slice()).finish()
}
}
impl<'a, T, A: Allocator> Drain<'a, T, A> {
/// Returns the remaining items of this iterator as a slice.
///
/// # Examples
///
/// ```
/// let mut vec = vec!['a', 'b', 'c'];
/// let mut drain = vec.drain(..);
/// assert_eq!(drain.as_slice(), &['a', 'b', 'c']);
/// let _ = drain.next().unwrap();
/// assert_eq!(drain.as_slice(), &['b', 'c']);
/// ```
#[must_use]
#[inline(always)]
pub fn as_slice(&self) -> &[T] {
self.iter.as_slice()
}
/// Returns a reference to the underlying allocator.
#[must_use]
#[inline(always)]
pub fn allocator(&self) -> &A {
unsafe { self.vec.as_ref().allocator() }
}
/// Keep unyielded elements in the source `Vec`.
///
/// # Examples
///
/// ```
/// #![feature(drain_keep_rest)]
///
/// let mut vec = vec!['a', 'b', 'c'];
/// let mut drain = vec.drain(..);
///
/// assert_eq!(drain.next().unwrap(), 'a');
///
/// // This call keeps 'b' and 'c' in the vec.
/// drain.keep_rest();
///
/// // If we wouldn't call `keep_rest()`,
/// // `vec` would be empty.
/// assert_eq!(vec, ['b', 'c']);
/// ```
#[inline(always)]
pub fn keep_rest(self) {
// At this moment layout looks like this:
//
// [head] [yielded by next] [unyielded] [yielded by next_back] [tail]
// ^-- start \_________/-- unyielded_len \____/-- self.tail_len
// ^-- unyielded_ptr ^-- tail
//
// Normally `Drop` impl would drop [unyielded] and then move [tail] to the `start`.
// Here we want to
// 1. Move [unyielded] to `start`
// 2. Move [tail] to a new start at `start + len(unyielded)`
// 3. Update length of the original vec to `len(head) + len(unyielded) + len(tail)`
// a. In case of ZST, this is the only thing we want to do
// 4. Do *not* drop self, as everything is put in a consistent state already, there is nothing to do
let mut this = ManuallyDrop::new(self);
unsafe {
let source_vec = this.vec.as_mut();
let start = source_vec.len();
let tail = this.tail_start;
let unyielded_len = this.iter.len();
let unyielded_ptr = this.iter.as_slice().as_ptr();
// ZSTs have no identity, so we don't need to move them around.
let needs_move = mem::size_of::<T>() != 0;
if needs_move {
let start_ptr = source_vec.as_mut_ptr().add(start);
// memmove back unyielded elements
if unyielded_ptr != start_ptr {
let src = unyielded_ptr;
let dst = start_ptr;
ptr::copy(src, dst, unyielded_len);
}
// memmove back untouched tail
if tail != (start + unyielded_len) {
let src = source_vec.as_ptr().add(tail);
let dst = start_ptr.add(unyielded_len);
ptr::copy(src, dst, this.tail_len);
}
}
source_vec.set_len(start + unyielded_len + this.tail_len);
}
}
}
impl<'a, T, A: Allocator> AsRef<[T]> for Drain<'a, T, A> {
#[inline(always)]
fn as_ref(&self) -> &[T] {
self.as_slice()
}
}
unsafe impl<T: Sync, A: Sync + Allocator> Sync for Drain<'_, T, A> {}
unsafe impl<T: Send, A: Send + Allocator> Send for Drain<'_, T, A> {}
impl<T, A: Allocator> Iterator for Drain<'_, T, A> {
type Item = T;
#[inline(always)]
fn next(&mut self) -> Option<T> {
self.iter
.next()
.map(|elt| unsafe { ptr::read(elt as *const _) })
}
#[inline(always)]
fn size_hint(&self) -> (usize, Option<usize>) {
self.iter.size_hint()
}
}
impl<T, A: Allocator> DoubleEndedIterator for Drain<'_, T, A> {
#[inline(always)]
fn next_back(&mut self) -> Option<T> {
self.iter
.next_back()
.map(|elt| unsafe { ptr::read(elt as *const _) })
}
}
impl<T, A: Allocator> Drop for Drain<'_, T, A> {
#[inline]
fn drop(&mut self) {
/// Moves back the un-`Drain`ed elements to restore the original `Vec`.
struct DropGuard<'r, 'a, T, A: Allocator>(&'r mut Drain<'a, T, A>);
impl<'r, 'a, T, A: Allocator> Drop for DropGuard<'r, 'a, T, A> {
fn drop(&mut self) {
if self.0.tail_len > 0 {
unsafe {
let source_vec = self.0.vec.as_mut();
// memmove back untouched tail, update to new length
let start = source_vec.len();
let tail = self.0.tail_start;
if tail != start {
let src = source_vec.as_ptr().add(tail);
let dst = source_vec.as_mut_ptr().add(start);
ptr::copy(src, dst, self.0.tail_len);
}
source_vec.set_len(start + self.0.tail_len);
}
}
}
}
let iter = mem::replace(&mut self.iter, [].iter());
let drop_len = iter.len();
let mut vec = self.vec;
if size_of::<T>() == 0 {
// ZSTs have no identity, so we don't need to move them around, we only need to drop the correct amount.
// this can be achieved by manipulating the Vec length instead of moving values out from `iter`.
unsafe {
let vec = vec.as_mut();
let old_len = vec.len();
vec.set_len(old_len + drop_len + self.tail_len);
vec.truncate(old_len + self.tail_len);
}
return;
}
// ensure elements are moved back into their appropriate places, even when drop_in_place panics
let _guard = DropGuard(self);
if drop_len == 0 {
return;
}
// as_slice() must only be called when iter.len() is > 0 because
// vec::Splice modifies vec::Drain fields and may grow the vec which would invalidate
// the iterator's internal pointers. Creating a reference to deallocated memory
// is invalid even when it is zero-length
let drop_ptr = iter.as_slice().as_ptr();
unsafe {
// drop_ptr comes from a slice::Iter which only gives us a &[T] but for drop_in_place
// a pointer with mutable provenance is necessary. Therefore we must reconstruct
// it from the original vec but also avoid creating a &mut to the front since that could
// invalidate raw pointers to it which some unsafe code might rely on.
let vec_ptr = vec.as_mut().as_mut_ptr();
let drop_offset = drop_ptr.offset_from(vec_ptr) as usize;
let to_drop = ptr::slice_from_raw_parts_mut(vec_ptr.add(drop_offset), drop_len);
ptr::drop_in_place(to_drop);
}
}
}
impl<T, A: Allocator> ExactSizeIterator for Drain<'_, T, A> {}
impl<T, A: Allocator> FusedIterator for Drain<'_, T, A> {}

View File

@@ -0,0 +1,191 @@
use core::fmt;
use core::iter::FusedIterator;
use core::marker::PhantomData;
use core::mem::{self, size_of, ManuallyDrop};
use core::ptr::{self, NonNull};
use core::slice::{self};
use crate::stable::addr;
use super::{Allocator, Global, RawVec};
#[cfg(not(no_global_oom_handling))]
use super::Vec;
/// An iterator that moves out of a vector.
///
/// This `struct` is created by the `into_iter` method on [`Vec`](super::Vec)
/// (provided by the [`IntoIterator`] trait).
///
/// # Example
///
/// ```
/// let v = vec![0, 1, 2];
/// let iter: std::vec::IntoIter<_> = v.into_iter();
/// ```
pub struct IntoIter<T, A: Allocator = Global> {
pub(super) buf: NonNull<T>,
pub(super) phantom: PhantomData<T>,
pub(super) cap: usize,
// the drop impl reconstructs a RawVec from buf, cap and alloc
// to avoid dropping the allocator twice we need to wrap it into ManuallyDrop
pub(super) alloc: ManuallyDrop<A>,
pub(super) ptr: *const T,
pub(super) end: *const T,
}
impl<T: fmt::Debug, A: Allocator> fmt::Debug for IntoIter<T, A> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_tuple("IntoIter").field(&self.as_slice()).finish()
}
}
impl<T, A: Allocator> IntoIter<T, A> {
/// Returns the remaining items of this iterator as a slice.
///
/// # Examples
///
/// ```
/// let vec = vec!['a', 'b', 'c'];
/// let mut into_iter = vec.into_iter();
/// assert_eq!(into_iter.as_slice(), &['a', 'b', 'c']);
/// let _ = into_iter.next().unwrap();
/// assert_eq!(into_iter.as_slice(), &['b', 'c']);
/// ```
pub fn as_slice(&self) -> &[T] {
unsafe { slice::from_raw_parts(self.ptr, self.len()) }
}
/// Returns the remaining items of this iterator as a mutable slice.
///
/// # Examples
///
/// ```
/// let vec = vec!['a', 'b', 'c'];
/// let mut into_iter = vec.into_iter();
/// assert_eq!(into_iter.as_slice(), &['a', 'b', 'c']);
/// into_iter.as_mut_slice()[2] = 'z';
/// assert_eq!(into_iter.next().unwrap(), 'a');
/// assert_eq!(into_iter.next().unwrap(), 'b');
/// assert_eq!(into_iter.next().unwrap(), 'z');
/// ```
pub fn as_mut_slice(&mut self) -> &mut [T] {
unsafe { &mut *self.as_raw_mut_slice() }
}
/// Returns a reference to the underlying allocator.
#[inline(always)]
pub fn allocator(&self) -> &A {
&self.alloc
}
fn as_raw_mut_slice(&mut self) -> *mut [T] {
ptr::slice_from_raw_parts_mut(self.ptr as *mut T, self.len())
}
}
impl<T, A: Allocator> AsRef<[T]> for IntoIter<T, A> {
fn as_ref(&self) -> &[T] {
self.as_slice()
}
}
unsafe impl<T: Send, A: Allocator + Send> Send for IntoIter<T, A> {}
unsafe impl<T: Sync, A: Allocator + Sync> Sync for IntoIter<T, A> {}
impl<T, A: Allocator> Iterator for IntoIter<T, A> {
type Item = T;
#[inline(always)]
fn next(&mut self) -> Option<T> {
if self.ptr == self.end {
None
} else if size_of::<T>() == 0 {
// purposefully don't use 'ptr.offset' because for
// vectors with 0-size elements this would return the
// same pointer.
self.ptr = self.ptr.cast::<u8>().wrapping_add(1).cast();
// Make up a value of this ZST.
Some(unsafe { mem::zeroed() })
} else {
let old = self.ptr;
self.ptr = unsafe { self.ptr.add(1) };
Some(unsafe { ptr::read(old) })
}
}
#[inline(always)]
fn size_hint(&self) -> (usize, Option<usize>) {
let exact = if size_of::<T>() == 0 {
addr(self.end).wrapping_sub(addr(self.ptr))
} else {
unsafe { self.end.offset_from(self.ptr) as usize }
};
(exact, Some(exact))
}
#[inline(always)]
fn count(self) -> usize {
self.len()
}
}
impl<T, A: Allocator> DoubleEndedIterator for IntoIter<T, A> {
#[inline(always)]
fn next_back(&mut self) -> Option<T> {
if self.end == self.ptr {
None
} else if size_of::<T>() == 0 {
// See above for why 'ptr.offset' isn't used
self.end = self.end.cast::<u8>().wrapping_add(1).cast();
// Make up a value of this ZST.
Some(unsafe { mem::zeroed() })
} else {
self.end = unsafe { self.end.sub(1) };
Some(unsafe { ptr::read(self.end) })
}
}
}
impl<T, A: Allocator> ExactSizeIterator for IntoIter<T, A> {}
impl<T, A: Allocator> FusedIterator for IntoIter<T, A> {}
#[cfg(not(no_global_oom_handling))]
impl<T: Clone, A: Allocator + Clone> Clone for IntoIter<T, A> {
fn clone(&self) -> Self {
let mut vec = Vec::<T, A>::with_capacity_in(self.len(), (*self.alloc).clone());
vec.extend(self.as_slice().iter().cloned());
vec.into_iter()
}
}
impl<T, A: Allocator> Drop for IntoIter<T, A> {
fn drop(&mut self) {
struct DropGuard<'a, T, A: Allocator>(&'a mut IntoIter<T, A>);
impl<T, A: Allocator> Drop for DropGuard<'_, T, A> {
fn drop(&mut self) {
unsafe {
// `IntoIter::alloc` is not used anymore after this and will be dropped by RawVec
let alloc = ManuallyDrop::take(&mut self.0.alloc);
// RawVec handles deallocation
let _ = RawVec::from_raw_parts_in(self.0.buf.as_ptr(), self.0.cap, alloc);
}
}
}
let guard = DropGuard(self);
// destroy the remaining elements
unsafe {
ptr::drop_in_place(guard.0.as_raw_mut_slice());
}
// now `guard` will be dropped and do the rest
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,43 @@
#[cfg(not(no_global_oom_handling))]
use alloc_crate::borrow::Cow;
use crate::stable::alloc::Allocator;
use super::Vec;
macro_rules! __impl_slice_eq1 {
([$($vars:tt)*] $lhs:ty, $rhs:ty $(where $ty:ty: $bound:ident)?) => {
impl<T, U, $($vars)*> PartialEq<$rhs> for $lhs
where
T: PartialEq<U>,
$($ty: $bound)?
{
#[inline(always)]
fn eq(&self, other: &$rhs) -> bool { self[..] == other[..] }
#[inline(always)]
fn ne(&self, other: &$rhs) -> bool { self[..] != other[..] }
}
}
}
__impl_slice_eq1! { [A1: Allocator, A2: Allocator] Vec<T, A1>, Vec<U, A2> }
__impl_slice_eq1! { [A: Allocator] Vec<T, A>, &[U] }
__impl_slice_eq1! { [A: Allocator] Vec<T, A>, &mut [U] }
__impl_slice_eq1! { [A: Allocator] &[T], Vec<U, A> }
__impl_slice_eq1! { [A: Allocator] &mut [T], Vec<U, A> }
__impl_slice_eq1! { [A: Allocator] Vec<T, A>, [U] }
__impl_slice_eq1! { [A: Allocator] [T], Vec<U, A> }
#[cfg(not(no_global_oom_handling))]
__impl_slice_eq1! { [A: Allocator] Cow<'_, [T]>, Vec<U, A> where T: Clone }
__impl_slice_eq1! { [A: Allocator, const N: usize] Vec<T, A>, [U; N] }
__impl_slice_eq1! { [A: Allocator, const N: usize] Vec<T, A>, &[U; N] }
// NOTE: some less important impls are omitted to reduce code bloat
// FIXME(Centril): Reconsider this?
//__impl_slice_eq1! { [const N: usize] Vec<A>, &mut [B; N], }
//__impl_slice_eq1! { [const N: usize] [A; N], Vec<B>, }
//__impl_slice_eq1! { [const N: usize] &[A; N], Vec<B>, }
//__impl_slice_eq1! { [const N: usize] &mut [A; N], Vec<B>, }
//__impl_slice_eq1! { [const N: usize] Cow<'a, [A]>, [B; N], }
//__impl_slice_eq1! { [const N: usize] Cow<'a, [A]>, &[B; N], }
//__impl_slice_eq1! { [const N: usize] Cow<'a, [A]>, &mut [B; N], }

View File

@@ -0,0 +1,31 @@
// Set the length of the vec when the `SetLenOnDrop` value goes out of scope.
//
// The idea is: The length field in SetLenOnDrop is a local variable
// that the optimizer will see does not alias with any stores through the Vec's data
// pointer. This is a workaround for alias analysis issue #32155
pub(super) struct SetLenOnDrop<'a> {
len: &'a mut usize,
local_len: usize,
}
impl<'a> SetLenOnDrop<'a> {
#[inline(always)]
pub(super) fn new(len: &'a mut usize) -> Self {
SetLenOnDrop {
local_len: *len,
len,
}
}
#[inline(always)]
pub(super) fn increment_len(&mut self, increment: usize) {
self.local_len += increment;
}
}
impl Drop for SetLenOnDrop<'_> {
#[inline(always)]
fn drop(&mut self) {
*self.len = self.local_len;
}
}

View File

@@ -0,0 +1,135 @@
use core::ptr::{self};
use core::slice::{self};
use crate::stable::alloc::{Allocator, Global};
use super::{Drain, Vec};
/// A splicing iterator for `Vec`.
///
/// This struct is created by [`Vec::splice()`].
/// See its documentation for more.
///
/// # Example
///
/// ```
/// let mut v = vec![0, 1, 2];
/// let new = [7, 8];
/// let iter: std::vec::Splice<_> = v.splice(1.., new);
/// ```
#[derive(Debug)]
pub struct Splice<'a, I: Iterator + 'a, A: Allocator + 'a = Global> {
pub(super) drain: Drain<'a, I::Item, A>,
pub(super) replace_with: I,
}
impl<I: Iterator, A: Allocator> Iterator for Splice<'_, I, A> {
type Item = I::Item;
#[inline(always)]
fn next(&mut self) -> Option<Self::Item> {
self.drain.next()
}
#[inline(always)]
fn size_hint(&self) -> (usize, Option<usize>) {
self.drain.size_hint()
}
}
impl<I: Iterator, A: Allocator> DoubleEndedIterator for Splice<'_, I, A> {
#[inline(always)]
fn next_back(&mut self) -> Option<Self::Item> {
self.drain.next_back()
}
}
impl<I: Iterator, A: Allocator> ExactSizeIterator for Splice<'_, I, A> {}
impl<I: Iterator, A: Allocator> Drop for Splice<'_, I, A> {
#[inline]
fn drop(&mut self) {
self.drain.by_ref().for_each(drop);
unsafe {
if self.drain.tail_len == 0 {
self.drain.vec.as_mut().extend(self.replace_with.by_ref());
return;
}
// First fill the range left by drain().
if !self.drain.fill(&mut self.replace_with) {
return;
}
// There may be more elements. Use the lower bound as an estimate.
// FIXME: Is the upper bound a better guess? Or something else?
let (lower_bound, _upper_bound) = self.replace_with.size_hint();
if lower_bound > 0 {
self.drain.move_tail(lower_bound);
if !self.drain.fill(&mut self.replace_with) {
return;
}
}
// Collect any remaining elements.
// This is a zero-length vector which does not allocate if `lower_bound` was exact.
let mut collected = self
.replace_with
.by_ref()
.collect::<Vec<I::Item>>()
.into_iter();
// Now we have an exact count.
if collected.len() > 0 {
self.drain.move_tail(collected.len());
let filled = self.drain.fill(&mut collected);
debug_assert!(filled);
debug_assert_eq!(collected.len(), 0);
}
}
// Let `Drain::drop` move the tail back if necessary and restore `vec.len`.
}
}
/// Private helper methods for `Splice::drop`
impl<T, A: Allocator> Drain<'_, T, A> {
/// The range from `self.vec.len` to `self.tail_start` contains elements
/// that have been moved out.
/// Fill that range as much as possible with new elements from the `replace_with` iterator.
/// Returns `true` if we filled the entire range. (`replace_with.next()` didnt return `None`.)
#[inline(always)]
unsafe fn fill<I: Iterator<Item = T>>(&mut self, replace_with: &mut I) -> bool {
let vec = unsafe { self.vec.as_mut() };
let range_start = vec.len;
let range_end = self.tail_start;
let range_slice = unsafe {
slice::from_raw_parts_mut(vec.as_mut_ptr().add(range_start), range_end - range_start)
};
for place in range_slice {
if let Some(new_item) = replace_with.next() {
unsafe { ptr::write(place, new_item) };
vec.len += 1;
} else {
return false;
}
}
true
}
/// Makes room for inserting more elements before the tail.
#[inline(always)]
unsafe fn move_tail(&mut self, additional: usize) {
let vec = unsafe { self.vec.as_mut() };
let len = self.tail_start + self.tail_len;
vec.buf.reserve(len, additional);
let new_tail_start = self.tail_start + additional;
unsafe {
let src = vec.as_ptr().add(self.tail_start);
let dst = vec.as_mut_ptr().add(new_tail_start);
ptr::copy(src, dst, self.tail_len);
}
self.tail_start = new_tail_start;
}
}