chore: checkpoint before Python removal

This commit is contained in:
2026-03-26 22:33:59 +00:00
parent 683cec9307
commit e568ddf82a
29972 changed files with 11269302 additions and 2 deletions

38
vendor/tokio/src/util/as_ref.rs vendored Normal file
View File

@@ -0,0 +1,38 @@
use super::typeid;
#[derive(Debug)]
pub(crate) enum OwnedBuf {
Vec(Vec<u8>),
#[cfg(feature = "io-util")]
Bytes(bytes::Bytes),
}
impl AsRef<[u8]> for OwnedBuf {
fn as_ref(&self) -> &[u8] {
match self {
Self::Vec(vec) => vec,
#[cfg(feature = "io-util")]
Self::Bytes(bytes) => bytes,
}
}
}
pub(crate) fn upgrade<B: AsRef<[u8]>>(buf: B) -> OwnedBuf {
let buf = match unsafe { typeid::try_transmute::<B, Vec<u8>>(buf) } {
Ok(vec) => return OwnedBuf::Vec(vec),
Err(original_buf) => original_buf,
};
let buf = match unsafe { typeid::try_transmute::<B, String>(buf) } {
Ok(string) => return OwnedBuf::Vec(string.into_bytes()),
Err(original_buf) => original_buf,
};
#[cfg(feature = "io-util")]
let buf = match unsafe { typeid::try_transmute::<B, bytes::Bytes>(buf) } {
Ok(bytes) => return OwnedBuf::Bytes(bytes),
Err(original_buf) => original_buf,
};
OwnedBuf::Vec(buf.as_ref().to_owned())
}

51
vendor/tokio/src/util/atomic_cell.rs vendored Normal file
View File

@@ -0,0 +1,51 @@
use crate::loom::sync::atomic::AtomicPtr;
use std::ptr;
use std::sync::atomic::Ordering::AcqRel;
pub(crate) struct AtomicCell<T> {
data: AtomicPtr<T>,
}
unsafe impl<T: Send> Send for AtomicCell<T> {}
unsafe impl<T: Send> Sync for AtomicCell<T> {}
impl<T> AtomicCell<T> {
pub(crate) fn new(data: Option<Box<T>>) -> AtomicCell<T> {
AtomicCell {
data: AtomicPtr::new(to_raw(data)),
}
}
pub(crate) fn swap(&self, val: Option<Box<T>>) -> Option<Box<T>> {
let old = self.data.swap(to_raw(val), AcqRel);
from_raw(old)
}
pub(crate) fn set(&self, val: Box<T>) {
let _ = self.swap(Some(val));
}
pub(crate) fn take(&self) -> Option<Box<T>> {
self.swap(None)
}
}
fn to_raw<T>(data: Option<Box<T>>) -> *mut T {
data.map_or(ptr::null_mut(), Box::into_raw)
}
fn from_raw<T>(val: *mut T) -> Option<Box<T>> {
if val.is_null() {
None
} else {
Some(unsafe { Box::from_raw(val) })
}
}
impl<T> Drop for AtomicCell<T> {
fn drop(&mut self) {
// Free any data still held by the cell
let _ = self.take();
}
}

64
vendor/tokio/src/util/bit.rs vendored Normal file
View File

@@ -0,0 +1,64 @@
use std::fmt;
#[derive(Clone, Copy, PartialEq)]
pub(crate) struct Pack {
mask: usize,
shift: u32,
}
impl Pack {
/// Value is packed in the `width` least-significant bits.
pub(crate) const fn least_significant(width: u32) -> Pack {
let mask = mask_for(width);
Pack { mask, shift: 0 }
}
/// Value is packed in the `width` more-significant bits.
pub(crate) const fn then(&self, width: u32) -> Pack {
let shift = usize::BITS - self.mask.leading_zeros();
let mask = mask_for(width) << shift;
Pack { mask, shift }
}
/// Width, in bits, dedicated to storing the value.
pub(crate) const fn width(&self) -> u32 {
usize::BITS - (self.mask >> self.shift).leading_zeros()
}
/// Max representable value.
pub(crate) const fn max_value(&self) -> usize {
(1 << self.width()) - 1
}
pub(crate) fn pack(&self, value: usize, base: usize) -> usize {
assert!(value <= self.max_value());
(base & !self.mask) | (value << self.shift)
}
pub(crate) fn unpack(&self, src: usize) -> usize {
unpack(src, self.mask, self.shift)
}
}
impl fmt::Debug for Pack {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
fmt,
"Pack {{ mask: {:b}, shift: {} }}",
self.mask, self.shift
)
}
}
/// Returns a `usize` with the right-most `n` bits set.
pub(crate) const fn mask_for(n: u32) -> usize {
let shift = 1usize.wrapping_shl(n - 1);
shift | (shift - 1)
}
/// Unpacks a value using a mask & shift.
pub(crate) const fn unpack(src: usize, mask: usize, shift: u32) -> usize {
(src & mask) >> shift
}

29
vendor/tokio/src/util/blocking_check.rs vendored Normal file
View File

@@ -0,0 +1,29 @@
#[cfg(unix)]
use std::os::fd::AsFd;
#[cfg(unix)]
#[allow(unused_variables)]
#[track_caller]
pub(crate) fn check_socket_for_blocking<S: AsFd>(s: &S) -> crate::io::Result<()> {
#[cfg(not(tokio_allow_from_blocking_fd))]
{
let sock = socket2::SockRef::from(s);
debug_assert!(
sock.nonblocking()?,
"Registering a blocking socket with the tokio runtime is unsupported. \
If you wish to do anyways, please add `--cfg tokio_allow_from_blocking_fd` to your \
RUSTFLAGS. See github.com/tokio-rs/tokio/issues/7172 for details."
);
}
Ok(())
}
#[cfg(not(unix))]
#[allow(unused_variables)]
pub(crate) fn check_socket_for_blocking<S>(s: &S) -> crate::io::Result<()> {
// we cannot retrieve the nonblocking status on windows
// and i dont know how to support wasi yet
Ok(())
}

89
vendor/tokio/src/util/cacheline.rs vendored Normal file
View File

@@ -0,0 +1,89 @@
#![cfg_attr(not(feature = "sync"), allow(dead_code, unreachable_pub))]
use std::ops::{Deref, DerefMut};
/// Pads and aligns a value to the length of a cache line.
#[derive(Clone, Copy, Default, Hash, PartialEq, Eq)]
// Starting from Intel's Sandy Bridge, spatial prefetcher is now pulling pairs of 64-byte cache
// lines at a time, so we have to align to 128 bytes rather than 64.
//
// Sources:
// - https://www.intel.com/content/dam/www/public/us/en/documents/manuals/64-ia-32-architectures-optimization-manual.pdf
// - https://github.com/facebook/folly/blob/1b5288e6eea6df074758f877c849b6e73bbb9fbb/folly/lang/Align.h#L107
//
// ARM's big.LITTLE architecture has asymmetric cores and "big" cores have 128-byte cache line size.
//
// Sources:
// - https://www.mono-project.com/news/2016/09/12/arm64-icache/
//
// powerpc64 has 128-byte cache line size.
//
// Sources:
// - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_ppc64x.go#L9
#[cfg_attr(
any(
target_arch = "x86_64",
target_arch = "aarch64",
target_arch = "powerpc64",
),
repr(align(128))
)]
// arm, mips and mips64 have 32-byte cache line size.
//
// Sources:
// - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_arm.go#L7
// - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_mips.go#L7
// - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_mipsle.go#L7
// - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_mips64x.go#L9
#[cfg_attr(
any(target_arch = "arm", target_arch = "mips", target_arch = "mips64",),
repr(align(32))
)]
// s390x has 256-byte cache line size.
//
// Sources:
// - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_s390x.go#L7
#[cfg_attr(target_arch = "s390x", repr(align(256)))]
// x86, riscv and wasm have 64-byte cache line size.
//
// Sources:
// - https://github.com/golang/go/blob/dda2991c2ea0c5914714469c4defc2562a907230/src/internal/cpu/cpu_x86.go#L9
// - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_wasm.go#L7
// - https://github.com/torvalds/linux/blob/3516bd729358a2a9b090c1905bd2a3fa926e24c6/arch/riscv/include/asm/cache.h#L10
//
// All others are assumed to have 64-byte cache line size.
#[cfg_attr(
not(any(
target_arch = "x86_64",
target_arch = "aarch64",
target_arch = "powerpc64",
target_arch = "arm",
target_arch = "mips",
target_arch = "mips64",
target_arch = "s390x",
)),
repr(align(64))
)]
pub(crate) struct CachePadded<T> {
value: T,
}
impl<T> CachePadded<T> {
/// Pads and aligns a value to the length of a cache line.
pub(crate) fn new(value: T) -> CachePadded<T> {
CachePadded::<T> { value }
}
}
impl<T> Deref for CachePadded<T> {
type Target = T;
fn deref(&self) -> &T {
&self.value
}
}
impl<T> DerefMut for CachePadded<T> {
fn deref_mut(&mut self) -> &mut T {
&mut self.value
}
}

16
vendor/tokio/src/util/error.rs vendored Normal file
View File

@@ -0,0 +1,16 @@
// Some combinations of features may not use these constants.
#![cfg_attr(not(feature = "full"), allow(dead_code))]
/// Error string explaining that the Tokio context hasn't been instantiated.
pub(crate) const CONTEXT_MISSING_ERROR: &str =
"there is no reactor running, must be called from the context of a Tokio 1.x runtime";
/// Error string explaining that the Tokio context is shutting down and cannot drive timers.
pub(crate) const RUNTIME_SHUTTING_DOWN_ERROR: &str =
"A Tokio 1.x context was found, but it is being shutdown.";
/// Error string explaining that the Tokio context is not available because the
/// thread-local storing it has been destroyed. This usually only happens during
/// destructors of other thread-locals.
pub(crate) const THREAD_LOCAL_DESTROYED_ERROR: &str =
"The Tokio context thread-local variable has been destroyed.";

View File

@@ -0,0 +1,512 @@
//! This module defines an `IdleNotifiedSet`, which is a collection of elements.
//! Each element is intended to correspond to a task, and the collection will
//! keep track of which tasks have had their waker notified, and which have not.
//!
//! Each entry in the set holds some user-specified value. The value's type is
//! specified using the `T` parameter. It will usually be a `JoinHandle` or
//! similar.
use std::marker::PhantomPinned;
use std::mem::ManuallyDrop;
use std::ptr::NonNull;
use std::task::{Context, Waker};
use crate::loom::cell::UnsafeCell;
use crate::loom::sync::{Arc, Mutex};
use crate::util::linked_list::{self, Link};
use crate::util::{waker_ref, Wake};
type LinkedList<T> =
linked_list::LinkedList<ListEntry<T>, <ListEntry<T> as linked_list::Link>::Target>;
/// This is the main handle to the collection.
pub(crate) struct IdleNotifiedSet<T> {
lists: Arc<Lists<T>>,
length: usize,
}
/// A handle to an entry that is guaranteed to be stored in the idle or notified
/// list of its `IdleNotifiedSet`. This value borrows the `IdleNotifiedSet`
/// mutably to prevent the entry from being moved to the `Neither` list, which
/// only the `IdleNotifiedSet` may do.
///
/// The main consequence of being stored in one of the lists is that the `value`
/// field has not yet been consumed.
///
/// Note: This entry can be moved from the idle to the notified list while this
/// object exists by waking its waker.
pub(crate) struct EntryInOneOfTheLists<'a, T> {
entry: Arc<ListEntry<T>>,
set: &'a mut IdleNotifiedSet<T>,
}
type Lists<T> = Mutex<ListsInner<T>>;
/// The linked lists hold strong references to the `ListEntry` items, and the
/// `ListEntry` items also hold a strong reference back to the Lists object, but
/// the destructor of the `IdleNotifiedSet` will clear the two lists, so once
/// that object is destroyed, no ref-cycles will remain.
struct ListsInner<T> {
notified: LinkedList<T>,
idle: LinkedList<T>,
/// Whenever an element in the `notified` list is woken, this waker will be
/// notified and consumed, if it exists.
waker: Option<Waker>,
}
/// Which of the two lists in the shared Lists object is this entry stored in?
///
/// If the value is `Idle`, then an entry's waker may move it to the notified
/// list. Otherwise, only the `IdleNotifiedSet` may move it.
///
/// If the value is `Neither`, then it is still possible that the entry is in
/// some third external list (this happens in `drain`).
#[derive(Copy, Clone, Eq, PartialEq)]
enum List {
Notified,
Idle,
Neither,
}
/// An entry in the list.
///
/// # Safety
///
/// The `my_list` field must only be accessed while holding the mutex in
/// `parent`. It is an invariant that the value of `my_list` corresponds to
/// which linked list in the `parent` holds this entry. Once this field takes
/// the value `Neither`, then it may never be modified again.
///
/// If the value of `my_list` is `Notified` or `Idle`, then the `pointers` field
/// must only be accessed while holding the mutex. If the value of `my_list` is
/// `Neither`, then the `pointers` field may be accessed by the
/// `IdleNotifiedSet` (this happens inside `drain`).
///
/// The `value` field is owned by the `IdleNotifiedSet` and may only be accessed
/// by the `IdleNotifiedSet`. The operation that sets the value of `my_list` to
/// `Neither` assumes ownership of the `value`, and it must either drop it or
/// move it out from this entry to prevent it from getting leaked. (Since the
/// two linked lists are emptied in the destructor of `IdleNotifiedSet`, the
/// value should not be leaked.)
struct ListEntry<T> {
/// The linked list pointers of the list this entry is in.
pointers: linked_list::Pointers<ListEntry<T>>,
/// Pointer to the shared `Lists` struct.
parent: Arc<Lists<T>>,
/// The value stored in this entry.
value: UnsafeCell<ManuallyDrop<T>>,
/// Used to remember which list this entry is in.
my_list: UnsafeCell<List>,
/// Required by the `linked_list::Pointers` field.
_pin: PhantomPinned,
}
generate_addr_of_methods! {
impl<T> ListEntry<T> {
unsafe fn addr_of_pointers(self: NonNull<Self>) -> NonNull<linked_list::Pointers<ListEntry<T>>> {
&self.pointers
}
}
}
// With mutable access to the `IdleNotifiedSet`, you can get mutable access to
// the values.
unsafe impl<T: Send> Send for IdleNotifiedSet<T> {}
// With the current API we strictly speaking don't even need `T: Sync`, but we
// require it anyway to support adding &self APIs that access the values in the
// future.
unsafe impl<T: Sync> Sync for IdleNotifiedSet<T> {}
// These impls control when it is safe to create a Waker. Since the waker does
// not allow access to the value in any way (including its destructor), it is
// not necessary for `T` to be Send or Sync.
unsafe impl<T> Send for ListEntry<T> {}
unsafe impl<T> Sync for ListEntry<T> {}
impl<T> IdleNotifiedSet<T> {
/// Create a new `IdleNotifiedSet`.
pub(crate) fn new() -> Self {
let lists = Mutex::new(ListsInner {
notified: LinkedList::new(),
idle: LinkedList::new(),
waker: None,
});
IdleNotifiedSet {
lists: Arc::new(lists),
length: 0,
}
}
pub(crate) fn len(&self) -> usize {
self.length
}
pub(crate) fn is_empty(&self) -> bool {
self.length == 0
}
/// Insert the given value into the `idle` list.
pub(crate) fn insert_idle(&mut self, value: T) -> EntryInOneOfTheLists<'_, T> {
self.length += 1;
let entry = Arc::new(ListEntry {
parent: self.lists.clone(),
value: UnsafeCell::new(ManuallyDrop::new(value)),
my_list: UnsafeCell::new(List::Idle),
pointers: linked_list::Pointers::new(),
_pin: PhantomPinned,
});
{
let mut lock = self.lists.lock();
lock.idle.push_front(entry.clone());
}
// Safety: We just put the entry in the idle list, so it is in one of the lists.
EntryInOneOfTheLists { entry, set: self }
}
/// Pop an entry from the notified list to poll it. The entry is moved to
/// the idle list atomically.
pub(crate) fn pop_notified(&mut self, waker: &Waker) -> Option<EntryInOneOfTheLists<'_, T>> {
// We don't decrement the length because this call moves the entry to
// the idle list rather than removing it.
if self.length == 0 {
// Fast path.
return None;
}
let mut lock = self.lists.lock();
let should_update_waker = match lock.waker.as_mut() {
Some(cur_waker) => !waker.will_wake(cur_waker),
None => true,
};
if should_update_waker {
lock.waker = Some(waker.clone());
}
// Pop the entry, returning None if empty.
let entry = lock.notified.pop_back()?;
lock.idle.push_front(entry.clone());
// Safety: We are holding the lock.
entry.my_list.with_mut(|ptr| unsafe {
*ptr = List::Idle;
});
drop(lock);
// Safety: We just put the entry in the idle list, so it is in one of the lists.
Some(EntryInOneOfTheLists { entry, set: self })
}
/// Tries to pop an entry from the notified list to poll it. The entry is moved to
/// the idle list atomically.
pub(crate) fn try_pop_notified(&mut self) -> Option<EntryInOneOfTheLists<'_, T>> {
// We don't decrement the length because this call moves the entry to
// the idle list rather than removing it.
if self.length == 0 {
// Fast path.
return None;
}
let mut lock = self.lists.lock();
// Pop the entry, returning None if empty.
let entry = lock.notified.pop_back()?;
lock.idle.push_front(entry.clone());
// Safety: We are holding the lock.
entry.my_list.with_mut(|ptr| unsafe {
*ptr = List::Idle;
});
drop(lock);
// Safety: We just put the entry in the idle list, so it is in one of the lists.
Some(EntryInOneOfTheLists { entry, set: self })
}
/// Call a function on every element in this list.
pub(crate) fn for_each<F: FnMut(&mut T)>(&mut self, mut func: F) {
fn get_ptrs<T>(list: &mut LinkedList<T>, ptrs: &mut Vec<*mut T>) {
let mut node = list.last();
while let Some(entry) = node {
ptrs.push(entry.value.with_mut(|ptr| {
let ptr: *mut ManuallyDrop<T> = ptr;
let ptr: *mut T = ptr.cast();
ptr
}));
let prev = entry.pointers.get_prev();
node = prev.map(|prev| unsafe { &*prev.as_ptr() });
}
}
// Atomically get a raw pointer to the value of every entry.
//
// Since this only locks the mutex once, it is not possible for a value
// to get moved from the idle list to the notified list during the
// operation, which would otherwise result in some value being listed
// twice.
let mut ptrs = Vec::with_capacity(self.len());
{
let mut lock = self.lists.lock();
get_ptrs(&mut lock.idle, &mut ptrs);
get_ptrs(&mut lock.notified, &mut ptrs);
}
debug_assert_eq!(ptrs.len(), ptrs.capacity());
for ptr in ptrs {
// Safety: When we grabbed the pointers, the entries were in one of
// the two lists. This means that their value was valid at the time,
// and it must still be valid because we are the IdleNotifiedSet,
// and only we can remove an entry from the two lists. (It's
// possible that an entry is moved from one list to the other during
// this loop, but that is ok.)
func(unsafe { &mut *ptr });
}
}
/// Remove all entries in both lists, applying some function to each element.
///
/// The closure is called on all elements even if it panics. Having it panic
/// twice is a double-panic, and will abort the application.
pub(crate) fn drain<F: FnMut(T)>(&mut self, func: F) {
if self.length == 0 {
// Fast path.
return;
}
self.length = 0;
// The LinkedList is not cleared on panic, so we use a bomb to clear it.
//
// This value has the invariant that any entry in its `all_entries` list
// has `my_list` set to `Neither` and that the value has not yet been
// dropped.
struct AllEntries<T, F: FnMut(T)> {
all_entries: LinkedList<T>,
func: F,
}
impl<T, F: FnMut(T)> AllEntries<T, F> {
fn pop_next(&mut self) -> bool {
if let Some(entry) = self.all_entries.pop_back() {
// Safety: We just took this value from the list, so we can
// destroy the value in the entry.
entry
.value
.with_mut(|ptr| unsafe { (self.func)(ManuallyDrop::take(&mut *ptr)) });
true
} else {
false
}
}
}
impl<T, F: FnMut(T)> Drop for AllEntries<T, F> {
fn drop(&mut self) {
while self.pop_next() {}
}
}
let mut all_entries = AllEntries {
all_entries: LinkedList::new(),
func,
};
// Atomically move all entries to the new linked list in the AllEntries
// object.
{
let mut lock = self.lists.lock();
unsafe {
// Safety: We are holding the lock and `all_entries` is a new
// LinkedList.
move_to_new_list(&mut lock.idle, &mut all_entries.all_entries);
move_to_new_list(&mut lock.notified, &mut all_entries.all_entries);
}
}
// Keep destroying entries in the list until it is empty.
//
// If the closure panics, then the destructor of the `AllEntries` bomb
// ensures that we keep running the destructor on the remaining values.
// A second panic will abort the program.
while all_entries.pop_next() {}
}
}
/// # Safety
///
/// The mutex for the entries must be held, and the target list must be such
/// that setting `my_list` to `Neither` is ok.
unsafe fn move_to_new_list<T>(from: &mut LinkedList<T>, to: &mut LinkedList<T>) {
while let Some(entry) = from.pop_back() {
entry.my_list.with_mut(|ptr| {
// Safety: pointer is accessed while holding the mutex.
unsafe {
*ptr = List::Neither;
}
});
to.push_front(entry);
}
}
impl<'a, T> EntryInOneOfTheLists<'a, T> {
/// Remove this entry from the list it is in, returning the value associated
/// with the entry.
///
/// This consumes the value, since it is no longer guaranteed to be in a
/// list.
pub(crate) fn remove(self) -> T {
self.set.length -= 1;
{
let mut lock = self.set.lists.lock();
// Safety: We are holding the lock so there is no race, and we will
// remove the entry afterwards to uphold invariants.
let old_my_list = self.entry.my_list.with_mut(|ptr| unsafe {
let old_my_list = *ptr;
*ptr = List::Neither;
old_my_list
});
let list = match old_my_list {
List::Idle => &mut lock.idle,
List::Notified => &mut lock.notified,
// An entry in one of the lists is in one of the lists.
List::Neither => unreachable!(),
};
unsafe {
// Safety: We just checked that the entry is in this particular
// list.
list.remove(ListEntry::as_raw(&self.entry)).unwrap();
}
}
// By setting `my_list` to `Neither`, we have taken ownership of the
// value. We return it to the caller.
//
// Safety: We have a mutable reference to the `IdleNotifiedSet` that
// owns this entry, so we can use its permission to access the value.
self.entry
.value
.with_mut(|ptr| unsafe { ManuallyDrop::take(&mut *ptr) })
}
/// Access the value in this entry together with a context for its waker.
pub(crate) fn with_value_and_context<F, U>(&mut self, func: F) -> U
where
F: FnOnce(&mut T, &mut Context<'_>) -> U,
T: 'static,
{
let waker = waker_ref(&self.entry);
let mut context = Context::from_waker(&waker);
// Safety: We have a mutable reference to the `IdleNotifiedSet` that
// owns this entry, so we can use its permission to access the value.
self.entry
.value
.with_mut(|ptr| unsafe { func(&mut *ptr, &mut context) })
}
}
impl<T> Drop for IdleNotifiedSet<T> {
fn drop(&mut self) {
// Clear both lists.
self.drain(drop);
#[cfg(debug_assertions)]
if !std::thread::panicking() {
let lock = self.lists.lock();
assert!(lock.idle.is_empty());
assert!(lock.notified.is_empty());
}
}
}
impl<T: 'static> Wake for ListEntry<T> {
fn wake_by_ref(me: &Arc<Self>) {
let mut lock = me.parent.lock();
// Safety: We are holding the lock and we will update the lists to
// maintain invariants.
let old_my_list = me.my_list.with_mut(|ptr| unsafe {
let old_my_list = *ptr;
if old_my_list == List::Idle {
*ptr = List::Notified;
}
old_my_list
});
if old_my_list == List::Idle {
// We move ourself to the notified list.
let me = unsafe {
// Safety: We just checked that we are in this particular list.
lock.idle.remove(ListEntry::as_raw(me)).unwrap()
};
lock.notified.push_front(me);
if let Some(waker) = lock.waker.take() {
drop(lock);
waker.wake();
}
}
}
fn wake(me: Arc<Self>) {
Self::wake_by_ref(&me);
}
}
/// # Safety
///
/// `ListEntry` is forced to be !Unpin.
unsafe impl<T> linked_list::Link for ListEntry<T> {
type Handle = Arc<ListEntry<T>>;
type Target = ListEntry<T>;
fn as_raw(handle: &Self::Handle) -> NonNull<ListEntry<T>> {
let ptr: *const ListEntry<T> = Arc::as_ptr(handle);
// Safety: We can't get a null pointer from `Arc::as_ptr`.
unsafe { NonNull::new_unchecked(ptr as *mut ListEntry<T>) }
}
unsafe fn from_raw(ptr: NonNull<ListEntry<T>>) -> Arc<ListEntry<T>> {
unsafe { Arc::from_raw(ptr.as_ptr()) }
}
unsafe fn pointers(
target: NonNull<ListEntry<T>>,
) -> NonNull<linked_list::Pointers<ListEntry<T>>> {
unsafe { ListEntry::addr_of_pointers(target) }
}
}
#[cfg(all(test, not(loom)))]
mod tests {
use crate::runtime::Builder;
use crate::task::JoinSet;
// A test that runs under miri.
//
// https://github.com/tokio-rs/tokio/pull/5693
#[test]
fn join_set_test() {
let rt = Builder::new_current_thread().build().unwrap();
let mut set = JoinSet::new();
set.spawn_on(futures::future::ready(()), rt.handle());
rt.block_on(set.join_next()).unwrap().unwrap();
}
}

808
vendor/tokio/src/util/linked_list.rs vendored Normal file
View File

@@ -0,0 +1,808 @@
#![cfg_attr(not(feature = "full"), allow(dead_code))]
// It doesn't make sense to enforce `unsafe_op_in_unsafe_fn` for this module because
//
// * The intrusive linked list naturally relies on unsafe operations.
// * Excessive `unsafe {}` blocks hurt readability significantly.
// TODO: replace with `#[expect(unsafe_op_in_unsafe_fn)]` after bumpping
// the MSRV to 1.81.0.
#![allow(unsafe_op_in_unsafe_fn)]
//! An intrusive double linked list of data.
//!
//! The data structure supports tracking pinned nodes. Most of the data
//! structure's APIs are `unsafe` as they require the caller to ensure the
//! specified node is actually contained by the list.
use core::cell::UnsafeCell;
use core::fmt;
use core::marker::{PhantomData, PhantomPinned};
use core::mem::ManuallyDrop;
use core::ptr::{self, NonNull};
/// An intrusive linked list.
///
/// Currently, the list is not emptied on drop. It is the caller's
/// responsibility to ensure the list is empty before dropping it.
pub(crate) struct LinkedList<L, T> {
/// Linked list head
head: Option<NonNull<T>>,
/// Linked list tail
tail: Option<NonNull<T>>,
/// Node type marker.
_marker: PhantomData<*const L>,
}
unsafe impl<L: Link> Send for LinkedList<L, L::Target> where L::Target: Send {}
unsafe impl<L: Link> Sync for LinkedList<L, L::Target> where L::Target: Sync {}
/// Defines how a type is tracked within a linked list.
///
/// In order to support storing a single type within multiple lists, accessing
/// the list pointers is decoupled from the entry type.
///
/// # Safety
///
/// Implementations must guarantee that `Target` types are pinned in memory. In
/// other words, when a node is inserted, the value will not be moved as long as
/// it is stored in the list.
pub(crate) unsafe trait Link {
/// Handle to the list entry.
///
/// This is usually a pointer-ish type.
type Handle;
/// Node type.
type Target;
/// Convert the handle to a raw pointer without consuming the handle.
#[allow(clippy::wrong_self_convention)]
fn as_raw(handle: &Self::Handle) -> NonNull<Self::Target>;
/// Convert the raw pointer to a handle
unsafe fn from_raw(ptr: NonNull<Self::Target>) -> Self::Handle;
/// Return the pointers for a node
///
/// # Safety
///
/// The resulting pointer should have the same tag in the stacked-borrows
/// stack as the argument. In particular, the method may not create an
/// intermediate reference in the process of creating the resulting raw
/// pointer.
///
/// The `target` pointer must be valid.
unsafe fn pointers(target: NonNull<Self::Target>) -> NonNull<Pointers<Self::Target>>;
}
/// Previous / next pointers.
pub(crate) struct Pointers<T> {
inner: UnsafeCell<PointersInner<T>>,
}
/// We do not want the compiler to put the `noalias` attribute on mutable
/// references to this type, so the type has been made `!Unpin` with a
/// `PhantomPinned` field.
///
/// Additionally, we never access the `prev` or `next` fields directly, as any
/// such access would implicitly involve the creation of a reference to the
/// field, which we want to avoid since the fields are not `!Unpin`, and would
/// hence be given the `noalias` attribute if we were to do such an access. As
/// an alternative to accessing the fields directly, the `Pointers` type
/// provides getters and setters for the two fields, and those are implemented
/// using `ptr`-specific methods which avoids the creation of intermediate
/// references.
///
/// See this link for more information:
/// <https://github.com/rust-lang/rust/pull/82834>
struct PointersInner<T> {
/// The previous node in the list. null if there is no previous node.
prev: Option<NonNull<T>>,
/// The next node in the list. null if there is no previous node.
next: Option<NonNull<T>>,
/// This type is !Unpin due to the heuristic from:
/// <https://github.com/rust-lang/rust/pull/82834>
_pin: PhantomPinned,
}
unsafe impl<T: Send> Send for Pointers<T> {}
unsafe impl<T: Sync> Sync for Pointers<T> {}
// ===== impl LinkedList =====
impl<L, T> LinkedList<L, T> {
/// Creates an empty linked list.
pub(crate) const fn new() -> LinkedList<L, T> {
LinkedList {
head: None,
tail: None,
_marker: PhantomData,
}
}
}
impl<L: Link> LinkedList<L, L::Target> {
/// Adds an element first in the list.
pub(crate) fn push_front(&mut self, val: L::Handle) {
// The value should not be dropped, it is being inserted into the list
let val = ManuallyDrop::new(val);
let ptr = L::as_raw(&val);
assert_ne!(self.head, Some(ptr));
unsafe {
L::pointers(ptr).as_mut().set_next(self.head);
L::pointers(ptr).as_mut().set_prev(None);
if let Some(head) = self.head {
L::pointers(head).as_mut().set_prev(Some(ptr));
}
self.head = Some(ptr);
if self.tail.is_none() {
self.tail = Some(ptr);
}
}
}
/// Removes the first element from a list and returns it, or None if it is
/// empty.
pub(crate) fn pop_front(&mut self) -> Option<L::Handle> {
unsafe {
let head = self.head?;
self.head = L::pointers(head).as_ref().get_next();
if let Some(new_head) = L::pointers(head).as_ref().get_next() {
L::pointers(new_head).as_mut().set_prev(None);
} else {
self.tail = None;
}
L::pointers(head).as_mut().set_prev(None);
L::pointers(head).as_mut().set_next(None);
Some(L::from_raw(head))
}
}
/// Removes the last element from a list and returns it, or None if it is
/// empty.
pub(crate) fn pop_back(&mut self) -> Option<L::Handle> {
unsafe {
let last = self.tail?;
self.tail = L::pointers(last).as_ref().get_prev();
if let Some(prev) = L::pointers(last).as_ref().get_prev() {
L::pointers(prev).as_mut().set_next(None);
} else {
self.head = None;
}
L::pointers(last).as_mut().set_prev(None);
L::pointers(last).as_mut().set_next(None);
Some(L::from_raw(last))
}
}
/// Returns whether the linked list does not contain any node
pub(crate) fn is_empty(&self) -> bool {
if self.head.is_some() {
return false;
}
assert!(self.tail.is_none());
true
}
/// Removes the specified node from the list
///
/// # Safety
///
/// The caller **must** ensure that exactly one of the following is true:
/// - `node` is currently contained by `self`,
/// - `node` is not contained by any list,
/// - `node` is currently contained by some other `GuardedLinkedList` **and**
/// the caller has an exclusive access to that list. This condition is
/// used by the linked list in `sync::Notify`.
pub(crate) unsafe fn remove(&mut self, node: NonNull<L::Target>) -> Option<L::Handle> {
if let Some(prev) = L::pointers(node).as_ref().get_prev() {
debug_assert_eq!(L::pointers(prev).as_ref().get_next(), Some(node));
L::pointers(prev)
.as_mut()
.set_next(L::pointers(node).as_ref().get_next());
} else {
if self.head != Some(node) {
return None;
}
self.head = L::pointers(node).as_ref().get_next();
}
if let Some(next) = L::pointers(node).as_ref().get_next() {
debug_assert_eq!(L::pointers(next).as_ref().get_prev(), Some(node));
L::pointers(next)
.as_mut()
.set_prev(L::pointers(node).as_ref().get_prev());
} else {
// This might be the last item in the list
if self.tail != Some(node) {
return None;
}
self.tail = L::pointers(node).as_ref().get_prev();
}
L::pointers(node).as_mut().set_next(None);
L::pointers(node).as_mut().set_prev(None);
Some(L::from_raw(node))
}
}
impl<L: Link> fmt::Debug for LinkedList<L, L::Target> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("LinkedList")
.field("head", &self.head)
.field("tail", &self.tail)
.finish()
}
}
#[cfg(any(
feature = "fs",
feature = "rt",
all(unix, feature = "process"),
feature = "signal",
feature = "sync",
))]
impl<L: Link> LinkedList<L, L::Target> {
pub(crate) fn last(&self) -> Option<&L::Target> {
let tail = self.tail.as_ref()?;
unsafe { Some(&*tail.as_ptr()) }
}
}
impl<L: Link> Default for LinkedList<L, L::Target> {
fn default() -> Self {
Self::new()
}
}
// ===== impl DrainFilter =====
cfg_io_driver_impl! {
pub(crate) struct DrainFilter<'a, T: Link, F> {
list: &'a mut LinkedList<T, T::Target>,
filter: F,
curr: Option<NonNull<T::Target>>,
}
impl<T: Link> LinkedList<T, T::Target> {
pub(crate) fn drain_filter<F>(&mut self, filter: F) -> DrainFilter<'_, T, F>
where
F: FnMut(&T::Target) -> bool,
{
let curr = self.head;
DrainFilter {
curr,
filter,
list: self,
}
}
}
impl<'a, T, F> Iterator for DrainFilter<'a, T, F>
where
T: Link,
F: FnMut(&T::Target) -> bool,
{
type Item = T::Handle;
fn next(&mut self) -> Option<Self::Item> {
while let Some(curr) = self.curr {
// safety: the pointer references data contained by the list
self.curr = unsafe { T::pointers(curr).as_ref() }.get_next();
// safety: the value is still owned by the linked list.
if (self.filter)(unsafe { &mut *curr.as_ptr() }) {
return unsafe { self.list.remove(curr) };
}
}
None
}
}
}
cfg_taskdump! {
impl<T: Link> LinkedList<T, T::Target> {
pub(crate) fn for_each<F>(&mut self, mut f: F)
where
F: FnMut(&T::Handle),
{
let mut next = self.head;
while let Some(curr) = next {
unsafe {
let handle = ManuallyDrop::new(T::from_raw(curr));
f(&handle);
next = T::pointers(curr).as_ref().get_next();
}
}
}
}
}
// ===== impl GuardedLinkedList =====
feature! {
#![any(
feature = "process",
feature = "sync",
feature = "rt",
feature = "signal",
)]
/// An intrusive linked list, but instead of keeping pointers to the head
/// and tail nodes, it uses a special guard node linked with those nodes.
/// It means that the list is circular and every pointer of a node from
/// the list is not `None`, including pointers from the guard node.
///
/// If a list is empty, then both pointers of the guard node are pointing
/// at the guard node itself.
pub(crate) struct GuardedLinkedList<L, T> {
/// Pointer to the guard node.
guard: NonNull<T>,
/// Node type marker.
_marker: PhantomData<*const L>,
}
impl<L: Link> LinkedList<L, L::Target> {
/// Turns a linked list into the guarded version by linking the guard node
/// with the head and tail nodes. Like with other nodes, you should guarantee
/// that the guard node is pinned in memory.
pub(crate) fn into_guarded(self, guard_handle: L::Handle) -> GuardedLinkedList<L, L::Target> {
// `guard_handle` is a NonNull pointer, we don't have to care about dropping it.
let guard = L::as_raw(&guard_handle);
unsafe {
if let Some(head) = self.head {
debug_assert!(L::pointers(head).as_ref().get_prev().is_none());
L::pointers(head).as_mut().set_prev(Some(guard));
L::pointers(guard).as_mut().set_next(Some(head));
// The list is not empty, so the tail cannot be `None`.
let tail = self.tail.unwrap();
debug_assert!(L::pointers(tail).as_ref().get_next().is_none());
L::pointers(tail).as_mut().set_next(Some(guard));
L::pointers(guard).as_mut().set_prev(Some(tail));
} else {
// The list is empty.
L::pointers(guard).as_mut().set_prev(Some(guard));
L::pointers(guard).as_mut().set_next(Some(guard));
}
}
GuardedLinkedList { guard, _marker: PhantomData }
}
}
impl<L: Link> GuardedLinkedList<L, L::Target> {
fn tail(&self) -> Option<NonNull<L::Target>> {
let tail_ptr = unsafe {
L::pointers(self.guard).as_ref().get_prev().unwrap()
};
// Compare the tail pointer with the address of the guard node itself.
// If the guard points at itself, then there are no other nodes and
// the list is considered empty.
if tail_ptr != self.guard {
Some(tail_ptr)
} else {
None
}
}
/// Removes the last element from a list and returns it, or None if it is
/// empty.
pub(crate) fn pop_back(&mut self) -> Option<L::Handle> {
unsafe {
let last = self.tail()?;
let before_last = L::pointers(last).as_ref().get_prev().unwrap();
L::pointers(self.guard).as_mut().set_prev(Some(before_last));
L::pointers(before_last).as_mut().set_next(Some(self.guard));
L::pointers(last).as_mut().set_prev(None);
L::pointers(last).as_mut().set_next(None);
Some(L::from_raw(last))
}
}
}
}
// ===== impl Pointers =====
impl<T> Pointers<T> {
/// Create a new set of empty pointers
pub(crate) fn new() -> Pointers<T> {
Pointers {
inner: UnsafeCell::new(PointersInner {
prev: None,
next: None,
_pin: PhantomPinned,
}),
}
}
pub(crate) fn get_prev(&self) -> Option<NonNull<T>> {
// SAFETY: Field is accessed immutably through a reference.
unsafe { ptr::addr_of!((*self.inner.get()).prev).read() }
}
pub(crate) fn get_next(&self) -> Option<NonNull<T>> {
// SAFETY: Field is accessed immutably through a reference.
unsafe { ptr::addr_of!((*self.inner.get()).next).read() }
}
fn set_prev(&mut self, value: Option<NonNull<T>>) {
// SAFETY: Field is accessed mutably through a mutable reference.
unsafe {
ptr::addr_of_mut!((*self.inner.get()).prev).write(value);
}
}
fn set_next(&mut self, value: Option<NonNull<T>>) {
// SAFETY: Field is accessed mutably through a mutable reference.
unsafe {
ptr::addr_of_mut!((*self.inner.get()).next).write(value);
}
}
}
impl<T> fmt::Debug for Pointers<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let prev = self.get_prev();
let next = self.get_next();
f.debug_struct("Pointers")
.field("prev", &prev)
.field("next", &next)
.finish()
}
}
#[cfg(any(test, fuzzing))]
#[cfg(not(loom))]
pub(crate) mod tests {
use super::*;
use std::pin::Pin;
#[derive(Debug)]
#[repr(C)]
struct Entry {
pointers: Pointers<Entry>,
val: i32,
}
unsafe impl<'a> Link for &'a Entry {
type Handle = Pin<&'a Entry>;
type Target = Entry;
fn as_raw(handle: &Pin<&'_ Entry>) -> NonNull<Entry> {
NonNull::from(handle.get_ref())
}
unsafe fn from_raw(ptr: NonNull<Entry>) -> Pin<&'a Entry> {
Pin::new_unchecked(&*ptr.as_ptr())
}
unsafe fn pointers(target: NonNull<Entry>) -> NonNull<Pointers<Entry>> {
target.cast()
}
}
fn entry(val: i32) -> Pin<Box<Entry>> {
Box::pin(Entry {
pointers: Pointers::new(),
val,
})
}
fn ptr(r: &Pin<Box<Entry>>) -> NonNull<Entry> {
r.as_ref().get_ref().into()
}
fn collect_list(list: &mut LinkedList<&'_ Entry, <&'_ Entry as Link>::Target>) -> Vec<i32> {
let mut ret = vec![];
while let Some(entry) = list.pop_back() {
ret.push(entry.val);
}
ret
}
fn push_all<'a>(
list: &mut LinkedList<&'a Entry, <&'_ Entry as Link>::Target>,
entries: &[Pin<&'a Entry>],
) {
for entry in entries.iter() {
list.push_front(*entry);
}
}
#[cfg(test)]
macro_rules! assert_clean {
($e:ident) => {{
assert!($e.pointers.get_next().is_none());
assert!($e.pointers.get_prev().is_none());
}};
}
#[cfg(test)]
macro_rules! assert_ptr_eq {
($a:expr, $b:expr) => {{
// Deal with mapping a Pin<&mut T> -> Option<NonNull<T>>
assert_eq!(Some($a.as_ref().get_ref().into()), $b)
}};
}
#[test]
fn const_new() {
const _: LinkedList<&Entry, <&Entry as Link>::Target> = LinkedList::new();
}
#[test]
fn push_and_drain() {
let a = entry(5);
let b = entry(7);
let c = entry(31);
let mut list = LinkedList::new();
assert!(list.is_empty());
list.push_front(a.as_ref());
assert!(!list.is_empty());
list.push_front(b.as_ref());
list.push_front(c.as_ref());
let items: Vec<i32> = collect_list(&mut list);
assert_eq!([5, 7, 31].to_vec(), items);
assert!(list.is_empty());
}
#[test]
fn push_pop_push_pop() {
let a = entry(5);
let b = entry(7);
let mut list = LinkedList::<&Entry, <&Entry as Link>::Target>::new();
list.push_front(a.as_ref());
let entry = list.pop_back().unwrap();
assert_eq!(5, entry.val);
assert!(list.is_empty());
list.push_front(b.as_ref());
let entry = list.pop_back().unwrap();
assert_eq!(7, entry.val);
assert!(list.is_empty());
assert!(list.pop_back().is_none());
}
#[test]
fn remove_by_address() {
let a = entry(5);
let b = entry(7);
let c = entry(31);
unsafe {
// Remove first
let mut list = LinkedList::new();
push_all(&mut list, &[c.as_ref(), b.as_ref(), a.as_ref()]);
assert!(list.remove(ptr(&a)).is_some());
assert_clean!(a);
// `a` should be no longer there and can't be removed twice
assert!(list.remove(ptr(&a)).is_none());
assert!(!list.is_empty());
assert!(list.remove(ptr(&b)).is_some());
assert_clean!(b);
// `b` should be no longer there and can't be removed twice
assert!(list.remove(ptr(&b)).is_none());
assert!(!list.is_empty());
assert!(list.remove(ptr(&c)).is_some());
assert_clean!(c);
// `b` should be no longer there and can't be removed twice
assert!(list.remove(ptr(&c)).is_none());
assert!(list.is_empty());
}
unsafe {
// Remove middle
let mut list = LinkedList::new();
push_all(&mut list, &[c.as_ref(), b.as_ref(), a.as_ref()]);
assert!(list.remove(ptr(&a)).is_some());
assert_clean!(a);
assert_ptr_eq!(b, list.head);
assert_ptr_eq!(c, b.pointers.get_next());
assert_ptr_eq!(b, c.pointers.get_prev());
let items = collect_list(&mut list);
assert_eq!([31, 7].to_vec(), items);
}
unsafe {
// Remove middle
let mut list = LinkedList::new();
push_all(&mut list, &[c.as_ref(), b.as_ref(), a.as_ref()]);
assert!(list.remove(ptr(&b)).is_some());
assert_clean!(b);
assert_ptr_eq!(c, a.pointers.get_next());
assert_ptr_eq!(a, c.pointers.get_prev());
let items = collect_list(&mut list);
assert_eq!([31, 5].to_vec(), items);
}
unsafe {
// Remove last
// Remove middle
let mut list = LinkedList::new();
push_all(&mut list, &[c.as_ref(), b.as_ref(), a.as_ref()]);
assert!(list.remove(ptr(&c)).is_some());
assert_clean!(c);
assert!(b.pointers.get_next().is_none());
assert_ptr_eq!(b, list.tail);
let items = collect_list(&mut list);
assert_eq!([7, 5].to_vec(), items);
}
unsafe {
// Remove first of two
let mut list = LinkedList::new();
push_all(&mut list, &[b.as_ref(), a.as_ref()]);
assert!(list.remove(ptr(&a)).is_some());
assert_clean!(a);
// a should be no longer there and can't be removed twice
assert!(list.remove(ptr(&a)).is_none());
assert_ptr_eq!(b, list.head);
assert_ptr_eq!(b, list.tail);
assert!(b.pointers.get_next().is_none());
assert!(b.pointers.get_prev().is_none());
let items = collect_list(&mut list);
assert_eq!([7].to_vec(), items);
}
unsafe {
// Remove last of two
let mut list = LinkedList::new();
push_all(&mut list, &[b.as_ref(), a.as_ref()]);
assert!(list.remove(ptr(&b)).is_some());
assert_clean!(b);
assert_ptr_eq!(a, list.head);
assert_ptr_eq!(a, list.tail);
assert!(a.pointers.get_next().is_none());
assert!(a.pointers.get_prev().is_none());
let items = collect_list(&mut list);
assert_eq!([5].to_vec(), items);
}
unsafe {
// Remove last item
let mut list = LinkedList::new();
push_all(&mut list, &[a.as_ref()]);
assert!(list.remove(ptr(&a)).is_some());
assert_clean!(a);
assert!(list.head.is_none());
assert!(list.tail.is_none());
let items = collect_list(&mut list);
assert!(items.is_empty());
}
unsafe {
// Remove missing
let mut list = LinkedList::<&Entry, <&Entry as Link>::Target>::new();
list.push_front(b.as_ref());
list.push_front(a.as_ref());
assert!(list.remove(ptr(&c)).is_none());
}
}
/// This is a fuzz test. You run it by entering `cargo fuzz run fuzz_linked_list` in CLI in `/tokio/` module.
#[cfg(fuzzing)]
pub fn fuzz_linked_list(ops: &[u8]) {
enum Op {
Push,
Pop,
Remove(usize),
}
use std::collections::VecDeque;
let ops = ops
.iter()
.map(|i| match i % 3u8 {
0 => Op::Push,
1 => Op::Pop,
2 => Op::Remove((i / 3u8) as usize),
_ => unreachable!(),
})
.collect::<Vec<_>>();
let mut ll = LinkedList::<&Entry, <&Entry as Link>::Target>::new();
let mut reference = VecDeque::new();
let entries: Vec<_> = (0..ops.len()).map(|i| entry(i as i32)).collect();
for (i, op) in ops.iter().enumerate() {
match op {
Op::Push => {
reference.push_front(i as i32);
assert_eq!(entries[i].val, i as i32);
ll.push_front(entries[i].as_ref());
}
Op::Pop => {
if reference.is_empty() {
assert!(ll.is_empty());
continue;
}
let v = reference.pop_back();
assert_eq!(v, ll.pop_back().map(|v| v.val));
}
Op::Remove(n) => {
if reference.is_empty() {
assert!(ll.is_empty());
continue;
}
let idx = n % reference.len();
let expect = reference.remove(idx).unwrap();
unsafe {
let entry = ll.remove(ptr(&entries[expect as usize])).unwrap();
assert_eq!(expect, entry.val);
}
}
}
}
}
}

9
vendor/tokio/src/util/markers.rs vendored Normal file
View File

@@ -0,0 +1,9 @@
/// Marker for types that are `Sync` but not `Send`
#[allow(dead_code)]
pub(crate) struct SyncNotSend(#[allow(dead_code)] *mut ());
unsafe impl Sync for SyncNotSend {}
cfg_rt! {
pub(crate) struct NotSendOrSync(#[allow(dead_code)] *mut ());
}

102
vendor/tokio/src/util/memchr.rs vendored Normal file
View File

@@ -0,0 +1,102 @@
//! Search for a byte in a byte array using libc.
//!
//! When nothing pulls in libc, then just use a trivial implementation. Note
//! that we only depend on libc on unix.
#[cfg(not(all(unix, feature = "libc")))]
fn memchr_inner(needle: u8, haystack: &[u8]) -> Option<usize> {
haystack.iter().position(|val| needle == *val)
}
#[cfg(all(unix, feature = "libc"))]
fn memchr_inner(needle: u8, haystack: &[u8]) -> Option<usize> {
let start = haystack.as_ptr();
// SAFETY: `start` is valid for `haystack.len()` bytes.
let ptr = (unsafe { libc::memchr(start.cast(), needle as _, haystack.len()) })
.cast::<u8>()
.cast_const();
if ptr.is_null() {
None
} else {
// SAFETY: `ptr` will always be in bounds, since libc guarantees that the ptr will either
// be to an element inside the array or the ptr will be null
// since the ptr is in bounds the offset must also always be non null
// and there can't be more than isize::MAX elements inside an array
// as rust guarantees that the maximum number of bytes a allocation
// may occupy is isize::MAX
unsafe {
// TODO(MSRV 1.87): When bumping MSRV, switch to `ptr.byte_offset_from_unsigned(start)`.
Some(usize::try_from(ptr.offset_from(start)).unwrap_unchecked())
}
}
}
pub(crate) fn memchr(needle: u8, haystack: &[u8]) -> Option<usize> {
let index = memchr_inner(needle, haystack)?;
// SAFETY: `memchr_inner` returns Some(index) and in that case index must point to an element in haystack
// or `memchr_inner` None which is guarded by the `?` operator above
// therefore the index must **always** point to an element in the array
// and so this indexing operation is safe
// TODO(MSRV 1.81): When bumping MSRV, switch to `std::hint::assert_unchecked(haystack.get(..=index).is_some());`
unsafe {
if haystack.get(..=index).is_none() {
std::hint::unreachable_unchecked()
}
}
Some(index)
}
#[cfg(test)]
mod tests {
use super::memchr;
#[test]
fn memchr_test() {
let haystack = b"123abc456\0\xffabc\n";
assert_eq!(memchr(b'1', haystack), Some(0));
assert_eq!(memchr(b'2', haystack), Some(1));
assert_eq!(memchr(b'3', haystack), Some(2));
assert_eq!(memchr(b'4', haystack), Some(6));
assert_eq!(memchr(b'5', haystack), Some(7));
assert_eq!(memchr(b'6', haystack), Some(8));
assert_eq!(memchr(b'7', haystack), None);
assert_eq!(memchr(b'a', haystack), Some(3));
assert_eq!(memchr(b'b', haystack), Some(4));
assert_eq!(memchr(b'c', haystack), Some(5));
assert_eq!(memchr(b'd', haystack), None);
assert_eq!(memchr(b'A', haystack), None);
assert_eq!(memchr(0, haystack), Some(9));
assert_eq!(memchr(0xff, haystack), Some(10));
assert_eq!(memchr(0xfe, haystack), None);
assert_eq!(memchr(1, haystack), None);
assert_eq!(memchr(b'\n', haystack), Some(14));
assert_eq!(memchr(b'\r', haystack), None);
}
#[test]
fn memchr_all() {
let mut arr = Vec::new();
for b in 0..=255 {
arr.push(b);
}
for b in 0..=255 {
assert_eq!(memchr(b, &arr), Some(b as usize));
}
arr.reverse();
for b in 0..=255 {
assert_eq!(memchr(b, &arr), Some(255 - b as usize));
}
}
#[test]
fn memchr_empty() {
for b in 0..=255 {
assert_eq!(memchr(b, b""), None);
}
}
}

81
vendor/tokio/src/util/metric_atomics.rs vendored Normal file
View File

@@ -0,0 +1,81 @@
use std::sync::atomic::{AtomicUsize, Ordering};
cfg_64bit_metrics! {
use std::sync::atomic::AtomicU64;
}
/// `AtomicU64` that is a no-op on platforms without 64-bit atomics
///
/// When used on platforms without 64-bit atomics, writes to this are no-ops.
/// The `load` method is only defined when 64-bit atomics are available.
#[derive(Debug, Default)]
pub(crate) struct MetricAtomicU64 {
#[cfg(target_has_atomic = "64")]
value: AtomicU64,
}
// some of these are currently only used behind cfg_unstable
#[allow(dead_code)]
impl MetricAtomicU64 {
// Load is only defined when supported
cfg_64bit_metrics! {
pub(crate) fn load(&self, ordering: Ordering) -> u64 {
self.value.load(ordering)
}
}
cfg_64bit_metrics! {
pub(crate) fn store(&self, val: u64, ordering: Ordering) {
self.value.store(val, ordering)
}
pub(crate) fn new(value: u64) -> Self {
Self { value: AtomicU64::new(value) }
}
pub(crate) fn add(&self, value: u64, ordering: Ordering) {
self.value.fetch_add(value, ordering);
}
}
cfg_no_64bit_metrics! {
pub(crate) fn store(&self, _val: u64, _ordering: Ordering) { }
// on platforms without 64-bit atomics, fetch-add returns unit
pub(crate) fn add(&self, _value: u64, _ordering: Ordering) { }
pub(crate) fn new(_value: u64) -> Self { Self { } }
}
}
#[cfg_attr(not(all(tokio_unstable, feature = "rt")), allow(dead_code))]
/// `AtomicUsize` for use in metrics.
///
/// This exposes simplified APIs for use in metrics & uses `std::sync` instead of Loom to avoid polluting loom logs with metric information.
#[derive(Debug, Default)]
pub(crate) struct MetricAtomicUsize {
value: AtomicUsize,
}
#[cfg_attr(not(all(tokio_unstable, feature = "rt")), allow(dead_code))]
impl MetricAtomicUsize {
pub(crate) fn new(value: usize) -> Self {
Self {
value: AtomicUsize::new(value),
}
}
pub(crate) fn load(&self, ordering: Ordering) -> usize {
self.value.load(ordering)
}
pub(crate) fn store(&self, val: usize, ordering: Ordering) {
self.value.store(val, ordering)
}
pub(crate) fn increment(&self) -> usize {
self.value.fetch_add(1, Ordering::Relaxed)
}
pub(crate) fn decrement(&self) -> usize {
self.value.fetch_sub(1, Ordering::Relaxed)
}
}

110
vendor/tokio/src/util/mod.rs vendored Normal file
View File

@@ -0,0 +1,110 @@
cfg_io_driver! {
pub(crate) mod bit;
}
#[cfg(feature = "fs")]
pub(crate) mod as_ref;
#[cfg(feature = "rt")]
pub(crate) mod atomic_cell;
#[cfg(feature = "net")]
mod blocking_check;
#[cfg(feature = "net")]
#[allow(unused_imports)]
pub(crate) use blocking_check::check_socket_for_blocking;
pub(crate) mod metric_atomics;
#[cfg(any(
// io driver uses `WakeList` directly
feature = "net",
feature = "process",
// `sync` enables `Notify` and `batch_semaphore`, which require `WakeList`.
feature = "sync",
// `fs` uses `batch_semaphore`, which requires `WakeList`.
feature = "fs",
// rt and signal use `Notify`, which requires `WakeList`.
feature = "rt",
feature = "signal",
// time driver uses `WakeList` in `Handle::process_at_time`.
feature = "time",
))]
mod wake_list;
#[cfg(any(
feature = "net",
feature = "process",
feature = "sync",
feature = "fs",
feature = "rt",
feature = "signal",
feature = "time",
))]
pub(crate) use wake_list::WakeList;
#[cfg(any(
feature = "fs",
feature = "net",
feature = "process",
feature = "rt",
feature = "sync",
feature = "signal",
feature = "time",
fuzzing,
))]
pub(crate) mod linked_list;
cfg_rt! {
pub(crate) mod sharded_list;
}
#[cfg(any(feature = "rt", feature = "macros"))]
pub(crate) mod rand;
cfg_rt! {
mod idle_notified_set;
pub(crate) use idle_notified_set::IdleNotifiedSet;
pub(crate) use self::rand::RngSeedGenerator;
mod wake;
pub(crate) use wake::WakerRef;
pub(crate) use wake::{waker_ref, Wake};
mod sync_wrapper;
pub(crate) use sync_wrapper::SyncWrapper;
mod rc_cell;
pub(crate) use rc_cell::RcCell;
}
cfg_rt_multi_thread! {
mod try_lock;
pub(crate) use try_lock::TryLock;
}
pub(crate) mod trace;
#[cfg(feature = "fs")]
pub(crate) mod typeid;
pub(crate) mod error;
#[cfg(feature = "io-util")]
pub(crate) mod memchr;
pub(crate) mod markers;
pub(crate) mod cacheline;
cfg_io_driver_impl! {
pub(crate) mod ptr_expose;
}
use std::{ops::DerefMut, pin::Pin};
/// Copy of [`std::pin::Pin::as_deref_mut`].
// TODO: Remove this once we bump the MSRV to 1.84.
pub(crate) fn pin_as_deref_mut<P: DerefMut>(ptr: Pin<&mut Pin<P>>) -> Pin<&mut P::Target> {
unsafe { ptr.get_unchecked_mut() }.as_mut()
}

74
vendor/tokio/src/util/ptr_expose.rs vendored Normal file
View File

@@ -0,0 +1,74 @@
//! Utility for helping miri understand our exposed pointers.
//!
//! During normal execution, this module is equivalent to pointer casts. However, when running
//! under miri, pointer casts are replaced with lookups in a hash map. This makes Tokio compatible
//! with strict provenance when running under miri (which comes with a performance cost).
use std::marker::PhantomData;
#[cfg(miri)]
use {crate::loom::sync::Mutex, std::collections::BTreeMap};
pub(crate) struct PtrExposeDomain<T> {
#[cfg(miri)]
map: Mutex<BTreeMap<usize, *const T>>,
_phantom: PhantomData<T>,
}
// SAFETY: Actually using the pointers is unsafe, so it's sound to transfer them across threads.
unsafe impl<T> Sync for PtrExposeDomain<T> {}
impl<T> PtrExposeDomain<T> {
pub(crate) const fn new() -> Self {
Self {
#[cfg(miri)]
map: Mutex::const_new(BTreeMap::new()),
_phantom: PhantomData,
}
}
#[inline]
pub(crate) fn expose_provenance(&self, ptr: *const T) -> usize {
#[cfg(miri)]
{
let addr: usize = ptr.addr();
self.map.lock().insert(addr, ptr);
addr
}
#[cfg(not(miri))]
{
ptr as usize
}
}
#[inline]
#[allow(clippy::wrong_self_convention)] // mirrors std name
pub(crate) fn from_exposed_addr(&self, addr: usize) -> *const T {
#[cfg(miri)]
{
let maybe_ptr = self.map.lock().get(&addr).copied();
// SAFETY: Intentionally trigger a miri failure if the provenance we want is not
// exposed.
unsafe { maybe_ptr.unwrap_unchecked() }
}
#[cfg(not(miri))]
{
addr as *const T
}
}
#[inline]
pub(crate) fn unexpose_provenance(&self, _ptr: *const T) {
#[cfg(miri)]
{
let addr: usize = _ptr.addr();
let maybe_ptr = self.map.lock().remove(&addr);
// SAFETY: Intentionally trigger a miri failure if the provenance we want is not
// exposed.
unsafe { maybe_ptr.unwrap_unchecked() };
}
}
}

95
vendor/tokio/src/util/rand.rs vendored Normal file
View File

@@ -0,0 +1,95 @@
cfg_rt! {
mod rt;
pub(crate) use rt::RngSeedGenerator;
cfg_unstable! {
mod rt_unstable;
}
}
/// A seed for random number generation.
///
/// In order to make certain functions within a runtime deterministic, a seed
/// can be specified at the time of creation.
#[allow(unreachable_pub)]
#[derive(Clone, Debug)]
pub struct RngSeed {
s: u32,
r: u32,
}
/// Fast random number generate.
///
/// Implement `xorshift64+`: 2 32-bit `xorshift` sequences added together.
/// Shift triplet `[17,7,16]` was calculated as indicated in Marsaglia's
/// `Xorshift` paper: <https://www.jstatsoft.org/article/view/v008i14/xorshift.pdf>
/// This generator passes the SmallCrush suite, part of TestU01 framework:
/// <http://simul.iro.umontreal.ca/testu01/tu01.html>
#[derive(Clone, Copy, Debug)]
pub(crate) struct FastRand {
one: u32,
two: u32,
}
impl RngSeed {
/// Creates a random seed using loom internally.
pub(crate) fn new() -> Self {
Self::from_u64(crate::loom::rand::seed())
}
fn from_u64(seed: u64) -> Self {
let one = (seed >> 32) as u32;
let mut two = seed as u32;
if two == 0 {
// This value cannot be zero
two = 1;
}
Self::from_pair(one, two)
}
fn from_pair(s: u32, r: u32) -> Self {
Self { s, r }
}
}
impl FastRand {
/// Initialize a new fast random number generator using the default source of entropy.
pub(crate) fn new() -> FastRand {
FastRand::from_seed(RngSeed::new())
}
/// Initializes a new, thread-local, fast random number generator.
pub(crate) fn from_seed(seed: RngSeed) -> FastRand {
FastRand {
one: seed.s,
two: seed.r,
}
}
#[cfg(any(
feature = "macros",
feature = "rt-multi-thread",
all(feature = "sync", feature = "rt")
))]
pub(crate) fn fastrand_n(&mut self, n: u32) -> u32 {
// This is similar to fastrand() % n, but faster.
// See https://lemire.me/blog/2016/06/27/a-fast-alternative-to-the-modulo-reduction/
let mul = (self.fastrand() as u64).wrapping_mul(n as u64);
(mul >> 32) as u32
}
fn fastrand(&mut self) -> u32 {
let mut s1 = self.one;
let s0 = self.two;
s1 ^= s1 << 17;
s1 = s1 ^ s0 ^ s1 >> 7 ^ s0 >> 16;
self.one = s0;
self.two = s1;
s0.wrapping_add(s1)
}
}

61
vendor/tokio/src/util/rand/rt.rs vendored Normal file
View File

@@ -0,0 +1,61 @@
use super::{FastRand, RngSeed};
use std::sync::Mutex;
/// A deterministic generator for seeds (and other generators).
///
/// Given the same initial seed, the generator will output the same sequence of seeds.
///
/// Since the seed generator will be kept in a runtime handle, we need to wrap `FastRand`
/// in a Mutex to make it thread safe. Different to the `FastRand` that we keep in a
/// thread local store, the expectation is that seed generation will not need to happen
/// very frequently, so the cost of the mutex should be minimal.
#[derive(Debug)]
pub(crate) struct RngSeedGenerator {
/// Internal state for the seed generator. We keep it in a Mutex so that we can safely
/// use it across multiple threads.
state: Mutex<FastRand>,
}
impl RngSeedGenerator {
/// Returns a new generator from the provided seed.
pub(crate) fn new(seed: RngSeed) -> Self {
Self {
state: Mutex::new(FastRand::from_seed(seed)),
}
}
/// Returns the next seed in the sequence.
pub(crate) fn next_seed(&self) -> RngSeed {
let mut rng = self
.state
.lock()
.expect("RNG seed generator is internally corrupt");
let s = rng.fastrand();
let r = rng.fastrand();
RngSeed::from_pair(s, r)
}
/// Directly creates a generator using the next seed.
pub(crate) fn next_generator(&self) -> Self {
RngSeedGenerator::new(self.next_seed())
}
}
impl FastRand {
/// Replaces the state of the random number generator with the provided seed, returning
/// the seed that represents the previous state of the random number generator.
///
/// The random number generator will become equivalent to one created with
/// the same seed.
pub(crate) fn replace_seed(&mut self, seed: RngSeed) -> RngSeed {
let old_seed = RngSeed::from_pair(self.one, self.two);
self.one = seed.s;
self.two = seed.r;
old_seed
}
}

View File

@@ -0,0 +1,20 @@
use super::RngSeed;
use std::collections::hash_map::DefaultHasher;
use std::hash::Hasher;
impl RngSeed {
/// Generates a seed from the provided byte slice.
///
/// # Example
///
/// ```
/// # use tokio::runtime::RngSeed;
/// let seed = RngSeed::from_bytes(b"make me a seed");
/// ```
pub fn from_bytes(bytes: &[u8]) -> Self {
let mut hasher = DefaultHasher::default();
hasher.write(bytes);
Self::from_u64(hasher.finish())
}
}

57
vendor/tokio/src/util/rc_cell.rs vendored Normal file
View File

@@ -0,0 +1,57 @@
use crate::loom::cell::UnsafeCell;
use std::rc::Rc;
/// This is exactly like `Cell<Option<Rc<T>>>`, except that it provides a `get`
/// method even though `Rc` is not `Copy`.
pub(crate) struct RcCell<T> {
inner: UnsafeCell<Option<Rc<T>>>,
}
impl<T> RcCell<T> {
#[cfg(not(all(loom, test)))]
pub(crate) const fn new() -> Self {
Self {
inner: UnsafeCell::new(None),
}
}
// The UnsafeCell in loom does not have a const `new` fn.
#[cfg(all(loom, test))]
pub(crate) fn new() -> Self {
Self {
inner: UnsafeCell::new(None),
}
}
/// Safety: This method may not be called recursively.
#[inline]
unsafe fn with_inner<F, R>(&self, f: F) -> R
where
F: FnOnce(&mut Option<Rc<T>>) -> R,
{
// safety: This type is not Sync, so concurrent calls of this method
// cannot happen. Furthermore, the caller guarantees that the method is
// not called recursively. Finally, this is the only place that can
// create mutable references to the inner Rc. This ensures that any
// mutable references created here are exclusive.
self.inner.with_mut(|ptr| f(unsafe { &mut *ptr }))
}
pub(crate) fn get(&self) -> Option<Rc<T>> {
// safety: The `Rc::clone` method will not call any unknown user-code,
// so it will not result in a recursive call to `with_inner`.
unsafe { self.with_inner(|rc| rc.clone()) }
}
pub(crate) fn replace(&self, val: Option<Rc<T>>) -> Option<Rc<T>> {
// safety: No destructors or other unknown user-code will run inside the
// `with_inner` call, so no recursive call to `with_inner` can happen.
unsafe { self.with_inner(|rc| std::mem::replace(rc, val)) }
}
pub(crate) fn set(&self, val: Option<Rc<T>>) {
let old = self.replace(val);
drop(old);
}
}

164
vendor/tokio/src/util/sharded_list.rs vendored Normal file
View File

@@ -0,0 +1,164 @@
use std::ptr::NonNull;
use std::sync::atomic::Ordering;
use crate::loom::sync::{Mutex, MutexGuard};
use crate::util::metric_atomics::{MetricAtomicU64, MetricAtomicUsize};
use super::linked_list::{Link, LinkedList};
/// An intrusive linked list supporting highly concurrent updates.
///
/// It currently relies on `LinkedList`, so it is the caller's
/// responsibility to ensure the list is empty before dropping it.
///
/// Note: Due to its inner sharded design, the order of nodes cannot be guaranteed.
pub(crate) struct ShardedList<L, T> {
lists: Box<[Mutex<LinkedList<L, T>>]>,
added: MetricAtomicU64,
count: MetricAtomicUsize,
shard_mask: usize,
}
/// Determines which linked list an item should be stored in.
///
/// # Safety
///
/// Implementations must guarantee that the id of an item does not change from
/// call to call.
pub(crate) unsafe trait ShardedListItem: Link {
/// # Safety
///
/// The provided pointer must point at a valid list item.
unsafe fn get_shard_id(target: NonNull<Self::Target>) -> usize;
}
impl<L, T> ShardedList<L, T> {
/// Creates a new and empty sharded linked list with the specified size.
pub(crate) fn new(sharded_size: usize) -> Self {
assert!(sharded_size.is_power_of_two());
let shard_mask = sharded_size - 1;
let mut lists = Vec::with_capacity(sharded_size);
for _ in 0..sharded_size {
lists.push(Mutex::new(LinkedList::<L, T>::new()))
}
Self {
lists: lists.into_boxed_slice(),
added: MetricAtomicU64::new(0),
count: MetricAtomicUsize::new(0),
shard_mask,
}
}
}
/// Used to get the lock of shard.
pub(crate) struct ShardGuard<'a, L, T> {
lock: MutexGuard<'a, LinkedList<L, T>>,
added: &'a MetricAtomicU64,
count: &'a MetricAtomicUsize,
id: usize,
}
impl<L: ShardedListItem> ShardedList<L, L::Target> {
/// Removes the last element from a list specified by `shard_id` and returns it, or None if it is
/// empty.
pub(crate) fn pop_back(&self, shard_id: usize) -> Option<L::Handle> {
let mut lock = self.shard_inner(shard_id);
let node = lock.pop_back();
if node.is_some() {
self.count.decrement();
}
node
}
/// Removes the specified node from the list.
///
/// # Safety
///
/// The caller **must** ensure that exactly one of the following is true:
/// - `node` is currently contained by `self`,
/// - `node` is not contained by any list,
/// - `node` is currently contained by some other `GuardedLinkedList`.
pub(crate) unsafe fn remove(&self, node: NonNull<L::Target>) -> Option<L::Handle> {
let id = unsafe { L::get_shard_id(node) };
let mut lock = self.shard_inner(id);
// SAFETY: Since the shard id cannot change, it's not possible for this node
// to be in any other list of the same sharded list.
let node = unsafe { lock.remove(node) };
if node.is_some() {
self.count.decrement();
}
node
}
/// Gets the lock of `ShardedList`, makes us have the write permission.
pub(crate) fn lock_shard(&self, val: &L::Handle) -> ShardGuard<'_, L, L::Target> {
let id = unsafe { L::get_shard_id(L::as_raw(val)) };
ShardGuard {
lock: self.shard_inner(id),
added: &self.added,
count: &self.count,
id,
}
}
/// Gets the count of elements in this list.
pub(crate) fn len(&self) -> usize {
self.count.load(Ordering::Relaxed)
}
cfg_unstable_metrics! {
cfg_64bit_metrics! {
/// Gets the total number of elements added to this list.
pub(crate) fn added(&self) -> u64 {
self.added.load(Ordering::Relaxed)
}
}
}
/// Returns whether the linked list does not contain any node.
pub(crate) fn is_empty(&self) -> bool {
self.len() == 0
}
/// Gets the shard size of this `SharedList`.
///
/// Used to help us to decide the parameter `shard_id` of the `pop_back` method.
pub(crate) fn shard_size(&self) -> usize {
self.shard_mask + 1
}
#[inline]
fn shard_inner(&self, id: usize) -> MutexGuard<'_, LinkedList<L, <L as Link>::Target>> {
// Safety: This modulo operation ensures that the index is not out of bounds.
unsafe { self.lists.get_unchecked(id & self.shard_mask).lock() }
}
}
impl<'a, L: ShardedListItem> ShardGuard<'a, L, L::Target> {
/// Push a value to this shard.
pub(crate) fn push(mut self, val: L::Handle) {
let id = unsafe { L::get_shard_id(L::as_raw(&val)) };
assert_eq!(id, self.id);
self.lock.push_front(val);
self.added.add(1, Ordering::Relaxed);
self.count.increment();
}
}
cfg_taskdump! {
impl<L: ShardedListItem> ShardedList<L, L::Target> {
pub(crate) fn for_each<F>(&self, mut f: F)
where
F: FnMut(&L::Handle),
{
let mut guards = Vec::with_capacity(self.lists.len());
for list in self.lists.iter() {
guards.push(list.lock());
}
for g in &mut guards {
g.for_each(&mut f);
}
}
}
}

37
vendor/tokio/src/util/sync_wrapper.rs vendored Normal file
View File

@@ -0,0 +1,37 @@
//! This module contains a type that can make `Send + !Sync` types `Sync` by
//! disallowing all immutable access to the value.
//!
//! A similar primitive is provided in the `sync_wrapper` crate.
use std::any::Any;
pub(crate) struct SyncWrapper<T> {
value: T,
}
// safety: The SyncWrapper being send allows you to send the inner value across
// thread boundaries.
unsafe impl<T: Send> Send for SyncWrapper<T> {}
// safety: An immutable reference to a SyncWrapper is useless, so moving such an
// immutable reference across threads is safe.
unsafe impl<T> Sync for SyncWrapper<T> {}
impl<T> SyncWrapper<T> {
pub(crate) fn new(value: T) -> Self {
Self { value }
}
pub(crate) fn into_inner(self) -> T {
self.value
}
}
impl SyncWrapper<Box<dyn Any + Send>> {
/// Attempt to downcast using `Any::downcast_ref()` to a type that is known to be `Sync`.
pub(crate) fn downcast_ref_sync<T: Any + Sync>(&self) -> Option<&T> {
// SAFETY: if the downcast fails, the inner value is not touched,
// so no thread-safety violation can occur.
self.value.downcast_ref()
}
}

191
vendor/tokio/src/util/trace.rs vendored Normal file
View File

@@ -0,0 +1,191 @@
cfg_rt! {
use std::marker::PhantomData;
#[derive(Copy, Clone)]
pub(crate) struct SpawnMeta<'a> {
/// The name of the task
#[cfg(all(tokio_unstable, feature = "tracing"))]
pub(crate) name: Option<&'a str>,
/// The original size of the future or function being spawned
#[cfg(all(tokio_unstable, feature = "tracing"))]
pub(crate) original_size: usize,
/// The source code location where the task was spawned.
///
/// This is wrapped in a type that may be empty when `tokio_unstable` is
/// not enabled.
pub(crate) spawned_at: crate::runtime::task::SpawnLocation,
_pd: PhantomData<&'a ()>,
}
impl<'a> SpawnMeta<'a> {
/// Create new spawn meta with a name and original size (before possible auto-boxing)
#[cfg(all(tokio_unstable, feature = "tracing"))]
#[track_caller]
pub(crate) fn new(name: Option<&'a str>, original_size: usize) -> Self {
Self {
name,
original_size,
spawned_at: crate::runtime::task::SpawnLocation::capture(),
_pd: PhantomData,
}
}
/// Create a new unnamed spawn meta with the original size (before possible auto-boxing)
#[track_caller]
pub(crate) fn new_unnamed(original_size: usize) -> Self {
#[cfg(not(all(tokio_unstable, feature = "tracing")))]
let _original_size = original_size;
Self {
#[cfg(all(tokio_unstable, feature = "tracing"))]
name: None,
#[cfg(all(tokio_unstable, feature = "tracing"))]
original_size,
spawned_at: crate::runtime::task::SpawnLocation::capture(),
_pd: PhantomData,
}
}
}
cfg_trace! {
use core::{
pin::Pin,
task::{Context, Poll},
};
use pin_project_lite::pin_project;
use std::mem;
use std::future::Future;
use tracing::instrument::Instrument;
pub(crate) use tracing::instrument::Instrumented;
#[inline]
pub(crate) fn task<F>(task: F, kind: &'static str, meta: SpawnMeta<'_>, id: u64) -> Instrumented<F> {
fn get_span(kind: &'static str, spawn_meta: SpawnMeta<'_>, id: u64, task_size: usize) -> tracing::Span {
let original_size = if spawn_meta.original_size != task_size {
Some(spawn_meta.original_size)
} else {
None
};
tracing::trace_span!(
target: "tokio::task",
parent: None,
"runtime.spawn",
%kind,
task.name = %spawn_meta.name.unwrap_or_default(),
task.id = id,
original_size.bytes = original_size,
size.bytes = task_size,
loc.file = spawn_meta.spawned_at.0.file(),
loc.line = spawn_meta.spawned_at.0.line(),
loc.col = spawn_meta.spawned_at.0.column(),
)
}
use tracing::instrument::Instrument;
let span = get_span(kind, meta, id, mem::size_of::<F>());
task.instrument(span)
}
#[inline]
pub(crate) fn blocking_task<Fn, Fut>(task: Fut, spawn_meta: SpawnMeta<'_>, id: u64) -> Instrumented<Fut> {
let fn_size = mem::size_of::<Fn>();
let original_size = if spawn_meta.original_size != fn_size {
Some(spawn_meta.original_size)
} else {
None
};
let span = tracing::trace_span!(
target: "tokio::task::blocking",
"runtime.spawn",
kind = %"blocking",
task.name = %spawn_meta.name.unwrap_or_default(),
task.id = id,
"fn" = %std::any::type_name::<Fn>(),
original_size.bytes = original_size,
size.bytes = fn_size,
loc.file = spawn_meta.spawned_at.0.file(),
loc.line = spawn_meta.spawned_at.0.line(),
loc.col = spawn_meta.spawned_at.0.column(),
);
task.instrument(span)
}
pub(crate) fn async_op<P,F>(inner: P, resource_span: tracing::Span, source: &str, poll_op_name: &'static str, inherits_child_attrs: bool) -> InstrumentedAsyncOp<F>
where P: FnOnce() -> F {
resource_span.in_scope(|| {
let async_op_span = tracing::trace_span!("runtime.resource.async_op", source = source, inherits_child_attrs = inherits_child_attrs);
let enter = async_op_span.enter();
let async_op_poll_span = tracing::trace_span!("runtime.resource.async_op.poll");
let inner = inner();
drop(enter);
let tracing_ctx = AsyncOpTracingCtx {
async_op_span,
async_op_poll_span,
resource_span: resource_span.clone(),
};
InstrumentedAsyncOp {
inner,
tracing_ctx,
poll_op_name,
}
})
}
#[derive(Debug, Clone)]
pub(crate) struct AsyncOpTracingCtx {
pub(crate) async_op_span: tracing::Span,
pub(crate) async_op_poll_span: tracing::Span,
pub(crate) resource_span: tracing::Span,
}
pin_project! {
#[derive(Debug, Clone)]
pub(crate) struct InstrumentedAsyncOp<F> {
#[pin]
pub(crate) inner: F,
pub(crate) tracing_ctx: AsyncOpTracingCtx,
pub(crate) poll_op_name: &'static str
}
}
impl<F: Future> Future for InstrumentedAsyncOp<F> {
type Output = F::Output;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.project();
let poll_op_name = &*this.poll_op_name;
let _res_enter = this.tracing_ctx.resource_span.enter();
let _async_op_enter = this.tracing_ctx.async_op_span.enter();
let _async_op_poll_enter = this.tracing_ctx.async_op_poll_span.enter();
trace_poll_op!(poll_op_name, this.inner.poll(cx))
}
}
}
cfg_not_trace! {
#[inline]
pub(crate) fn task<F>(task: F, _kind: &'static str, _meta: SpawnMeta<'_>, _id: u64) -> F {
// nop
task
}
#[inline]
pub(crate) fn blocking_task<Fn, Fut>(task: Fut, _spawn_meta: SpawnMeta<'_>, _id: u64) -> Fut {
let _ = PhantomData::<&Fn>;
// nop
task
}
}
}
cfg_time! {
#[track_caller]
pub(crate) fn caller_location() -> Option<&'static std::panic::Location<'static>> {
#[cfg(all(tokio_unstable, feature = "tracing"))]
return Some(std::panic::Location::caller());
#[cfg(not(all(tokio_unstable, feature = "tracing")))]
None
}
}

80
vendor/tokio/src/util/try_lock.rs vendored Normal file
View File

@@ -0,0 +1,80 @@
use crate::loom::sync::atomic::AtomicBool;
use std::cell::UnsafeCell;
use std::marker::PhantomData;
use std::ops::{Deref, DerefMut};
use std::sync::atomic::Ordering::SeqCst;
pub(crate) struct TryLock<T> {
locked: AtomicBool,
data: UnsafeCell<T>,
}
pub(crate) struct LockGuard<'a, T> {
lock: &'a TryLock<T>,
_p: PhantomData<std::rc::Rc<()>>,
}
unsafe impl<T: Send> Send for TryLock<T> {}
unsafe impl<T: Send> Sync for TryLock<T> {}
unsafe impl<T: Sync> Sync for LockGuard<'_, T> {}
macro_rules! new {
($data:ident) => {
TryLock {
locked: AtomicBool::new(false),
data: UnsafeCell::new($data),
}
};
}
impl<T> TryLock<T> {
#[cfg(not(loom))]
/// Create a new `TryLock`
pub(crate) const fn new(data: T) -> TryLock<T> {
new!(data)
}
#[cfg(loom)]
/// Create a new `TryLock`
pub(crate) fn new(data: T) -> TryLock<T> {
new!(data)
}
/// Attempt to acquire lock
pub(crate) fn try_lock(&self) -> Option<LockGuard<'_, T>> {
if self
.locked
.compare_exchange(false, true, SeqCst, SeqCst)
.is_err()
{
return None;
}
Some(LockGuard {
lock: self,
_p: PhantomData,
})
}
}
impl<T> Deref for LockGuard<'_, T> {
type Target = T;
fn deref(&self) -> &T {
unsafe { &*self.lock.data.get() }
}
}
impl<T> DerefMut for LockGuard<'_, T> {
fn deref_mut(&mut self) -> &mut T {
unsafe { &mut *self.lock.data.get() }
}
}
impl<T> Drop for LockGuard<'_, T> {
fn drop(&mut self) {
self.lock.locked.store(false, SeqCst);
}
}

45
vendor/tokio/src/util/typeid.rs vendored Normal file
View File

@@ -0,0 +1,45 @@
use std::{
any::TypeId,
marker::PhantomData,
mem::{self, ManuallyDrop},
};
// SAFETY: this function does not compare lifetimes. Values returned as `Ok`
// may have their lifetimes extended.
pub(super) unsafe fn try_transmute<Src, Target: 'static>(x: Src) -> Result<Target, Src> {
if nonstatic_typeid::<Src>() == TypeId::of::<Target>() {
let x = ManuallyDrop::new(x);
// SAFETY: we have checked that the types are the same.
Ok(unsafe { mem::transmute_copy::<Src, Target>(&x) })
} else {
Err(x)
}
}
// https://github.com/dtolnay/typeid/blob/b06a3c08a0eaccc7df6091ade1ae4e3fb53609d5/src/lib.rs#L197-L222
#[inline(always)]
fn nonstatic_typeid<T>() -> TypeId
where
T: ?Sized,
{
trait NonStaticAny {
fn get_type_id(&self) -> TypeId
where
Self: 'static;
}
impl<T: ?Sized> NonStaticAny for PhantomData<T> {
#[inline(always)]
fn get_type_id(&self) -> TypeId
where
Self: 'static,
{
TypeId::of::<T>()
}
}
let phantom_data = PhantomData::<T>;
NonStaticAny::get_type_id(unsafe {
mem::transmute::<&dyn NonStaticAny, &(dyn NonStaticAny + 'static)>(&phantom_data)
})
}

78
vendor/tokio/src/util/wake.rs vendored Normal file
View File

@@ -0,0 +1,78 @@
use crate::loom::sync::Arc;
use std::marker::PhantomData;
use std::mem::ManuallyDrop;
use std::ops::Deref;
use std::task::{RawWaker, RawWakerVTable, Waker};
/// Simplified waking interface based on Arcs.
pub(crate) trait Wake: Send + Sync + Sized + 'static {
/// Wake by value.
fn wake(arc_self: Arc<Self>);
/// Wake by reference.
fn wake_by_ref(arc_self: &Arc<Self>);
}
/// A `Waker` that is only valid for a given lifetime.
#[derive(Debug)]
pub(crate) struct WakerRef<'a> {
waker: ManuallyDrop<Waker>,
_p: PhantomData<&'a ()>,
}
impl Deref for WakerRef<'_> {
type Target = Waker;
fn deref(&self) -> &Waker {
&self.waker
}
}
/// Creates a reference to a `Waker` from a reference to `Arc<impl Wake>`.
pub(crate) fn waker_ref<W: Wake>(wake: &Arc<W>) -> WakerRef<'_> {
let ptr = Arc::as_ptr(wake).cast::<()>();
let waker = unsafe { Waker::from_raw(RawWaker::new(ptr, waker_vtable::<W>())) };
WakerRef {
waker: ManuallyDrop::new(waker),
_p: PhantomData,
}
}
fn waker_vtable<W: Wake>() -> &'static RawWakerVTable {
&RawWakerVTable::new(
clone_arc_raw::<W>,
wake_arc_raw::<W>,
wake_by_ref_arc_raw::<W>,
drop_arc_raw::<W>,
)
}
unsafe fn clone_arc_raw<T: Wake>(data: *const ()) -> RawWaker {
// Safety: `data` was created from an `Arc::as_ptr` in function `waker_ref`.
unsafe {
Arc::<T>::increment_strong_count(data as *const T);
}
RawWaker::new(data, waker_vtable::<T>())
}
unsafe fn wake_arc_raw<T: Wake>(data: *const ()) {
// Safety: `data` was created from an `Arc::as_ptr` in function `waker_ref`.
let arc: Arc<T> = unsafe { Arc::from_raw(data as *const T) };
Wake::wake(arc);
}
// used by `waker_ref`
unsafe fn wake_by_ref_arc_raw<T: Wake>(data: *const ()) {
// Retain Arc, but don't touch refcount by wrapping in ManuallyDrop
// Safety: `data` was created from an `Arc::as_ptr` in function `waker_ref`.
let arc = ManuallyDrop::new(unsafe { Arc::<T>::from_raw(data.cast()) });
Wake::wake_by_ref(&arc);
}
unsafe fn drop_arc_raw<T: Wake>(data: *const ()) {
// Safety: `data` was created from an `Arc::as_ptr` in function `waker_ref`.
drop(unsafe { Arc::<T>::from_raw(data.cast()) });
}

83
vendor/tokio/src/util/wake_list.rs vendored Normal file
View File

@@ -0,0 +1,83 @@
use core::mem::MaybeUninit;
use core::ptr;
use std::task::Waker;
const NUM_WAKERS: usize = 32;
/// A list of wakers to be woken.
///
/// # Invariants
///
/// The first `curr` elements of `inner` are initialized.
pub(crate) struct WakeList {
inner: [MaybeUninit<Waker>; NUM_WAKERS],
curr: usize,
}
impl WakeList {
pub(crate) fn new() -> Self {
const UNINIT_WAKER: MaybeUninit<Waker> = MaybeUninit::uninit();
Self {
inner: [UNINIT_WAKER; NUM_WAKERS],
curr: 0,
}
}
#[inline]
pub(crate) fn can_push(&self) -> bool {
self.curr < NUM_WAKERS
}
pub(crate) fn push(&mut self, val: Waker) {
debug_assert!(self.can_push());
self.inner[self.curr] = MaybeUninit::new(val);
self.curr += 1;
}
pub(crate) fn wake_all(&mut self) {
struct DropGuard {
start: *mut Waker,
end: *mut Waker,
}
impl Drop for DropGuard {
fn drop(&mut self) {
// SAFETY: Both pointers are part of the same object, with `start <= end`.
let len = unsafe { self.end.offset_from(self.start) } as usize;
let slice = ptr::slice_from_raw_parts_mut(self.start, len);
// SAFETY: All elements in `start..len` are initialized, so we can drop them.
unsafe { ptr::drop_in_place(slice) };
}
}
debug_assert!(self.curr <= NUM_WAKERS);
let mut guard = {
let start = self.inner.as_mut_ptr().cast::<Waker>();
// SAFETY: The resulting pointer is in bounds or one after the length of the same object.
let end = unsafe { start.add(self.curr) };
// Transfer ownership of the wakers in `inner` to `DropGuard`.
self.curr = 0;
DropGuard { start, end }
};
while !ptr::eq(guard.start, guard.end) {
// SAFETY: `start` is always initialized if `start != end`.
let waker = unsafe { ptr::read(guard.start) };
// SAFETY: The resulting pointer is in bounds or one after the length of the same object.
guard.start = unsafe { guard.start.add(1) };
// If this panics, then `guard` will clean up the remaining wakers.
waker.wake();
}
}
}
impl Drop for WakeList {
fn drop(&mut self) {
let slice =
ptr::slice_from_raw_parts_mut(self.inner.as_mut_ptr().cast::<Waker>(), self.curr);
// SAFETY: The first `curr` elements are initialized, so we can drop them.
unsafe { ptr::drop_in_place(slice) };
}
}