chore: checkpoint before Python removal

This commit is contained in:
2026-03-26 22:33:59 +00:00
parent 683cec9307
commit e568ddf82a
29972 changed files with 11269302 additions and 2 deletions

1
vendor/seize/.cargo-checksum.json vendored Normal file
View File

@@ -0,0 +1 @@
{"files":{".cargo_vcs_info.json":"f4f8bed0fd573417e0525a449276df2d6e40231b01c762e9ddf6fbc646d1653a",".github/workflows/ci.yml":"8fd137cbdfa44c034aadd294e91d2c8acc8c65434135bb25e7b5761acc31ed16","Cargo.toml":"c52ec4cdeb997b1457a8be68901b0b3d1578ea870eac732f92bdc9fe7a4aa8d2","Cargo.toml.orig":"4dfed13161f36f925ff4a27585a8c6afc3f9aa05615e9d3c087541fe05475c03","LICENSE":"de701d0618d694feb1af90f02181a1763d9b0bdeb70a3a592781e529077dba65","README.md":"b7056ec86e6b65432a94a9015ddd7db7cbba37889919a76827eea7685649903a","benches/single_thread.rs":"abd7ccaf20e1f320c60623a68a91d1a7202a92f11c9ee4020c76426ed909957b","benches/stack.rs":"15441ab61c0abb002b1d8ea6bd634591e1bd7094edbaa09e815be28a61bc7121","src/collector.rs":"ce21ee1cf71ad221f856baf3a17e8389a4f24a32d172bc63e2757a5b418be7cc","src/lib.rs":"24db1de50a0d9b336b3c3b07f268463b6355539fd468c566e043d8bd2236036f","src/raw.rs":"4d7c66b70246740dd12d220dd3a291f4ee95790d94d97f9d238e33f889fae1e3","src/reclaim.rs":"c26bbf8bf26425261d94c4a940b188c1ee555d1815338dd9bf359703d3883d1f","src/tls/mod.rs":"05096a97ac0ba0345c1c7f4f7fcae81a4fb3ec51b11eb2ebd3711835f44c5113","src/tls/thread_id.rs":"f53d48f28e74f2748882d08aec7059fe4619babd23022238423ee80dc8268afb","src/utils.rs":"258ddce0666d28ec5fab89d7297f4ae36e5f2c30aa6e3e0aa2024c31de212d7b","tests/lib.rs":"3fc2e1e28c043b064a593d29a48eebc9eaf0de10ca88f893847d9d87b8c4aef6"},"package":"689224d06523904ebcc9b482c6a3f4f7fb396096645c4cd10c0d2ff7371a34d3"}

6
vendor/seize/.cargo_vcs_info.json vendored Normal file
View File

@@ -0,0 +1,6 @@
{
"git": {
"sha1": "ae7f70611d8285d5c51d801c8fe9b108e41f3156"
},
"path_in_vcs": ""
}

95
vendor/seize/.github/workflows/ci.yml vendored Normal file
View File

@@ -0,0 +1,95 @@
on: [push, pull_request]
name: CI
jobs:
check:
name: Cargo Check
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: stable
override: true
- uses: actions-rs/cargo@v1
with:
command: check
test:
name: Test
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: stable
override: true
- uses: actions-rs/cargo@v1
with:
command: test
miri:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- run: |
echo "NIGHTLY=nightly-$(curl -s https://rust-lang.github.io/rustup-components-history/x86_64-unknown-linux-gnu/miri)" >> $GITHUB_ENV
- name: Install ${{ env.NIGHTLY }}
uses: dtolnay/rust-toolchain@master
with:
toolchain: ${{ env.NIGHTLY }}
components: miri
- name: cargo miri test
run: cargo miri test
env:
MIRIFLAGS: "-Zmiri-disable-isolation"
doc:
name: Docs
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: stable
override: true
- uses: actions-rs/cargo@v1
with:
command: test
args: --doc
fmt:
name: Format
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: stable
override: true
- run: rustup component add rustfmt
- uses: actions-rs/cargo@v1
with:
command: fmt
args: --all -- --check
clippy:
name: Clippy
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: stable
override: true
- run: rustup component add clippy
- uses: actions-rs/cargo@v1
with:
command: clippy
args: --all-targets -- -Dclippy::all

45
vendor/seize/Cargo.toml vendored Normal file
View File

@@ -0,0 +1,45 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
#
# When uploading crates to the registry Cargo will automatically
# "normalize" Cargo.toml files for maximal compatibility
# with all versions of Cargo and also rewrite `path` dependencies
# to registry (e.g., crates.io) dependencies.
#
# If you are reading this file be aware that the original Cargo.toml
# will likely look very different (and much more reasonable).
# See Cargo.toml.orig for the original contents.
[package]
edition = "2021"
name = "seize"
version = "0.3.3"
authors = ["Ibraheem Ahmed <ibraheem@ibraheem.ca>"]
description = "Fast, efficient, and robust memory reclamation for concurrent data structures."
readme = "README.md"
keywords = [
"lock-free",
"atomic",
"garbage",
]
categories = [
"concurrency",
"memory-management",
]
license = "MIT"
repository = "https://github.com/ibraheemdev/seize"
[[bench]]
name = "stack"
harness = false
[[bench]]
name = "single_thread"
harness = false
[dependencies]
[dev-dependencies.criterion]
version = "0.3.5"
[dev-dependencies.crossbeam-epoch]
version = "0.9.8"

21
vendor/seize/LICENSE vendored Normal file
View File

@@ -0,0 +1,21 @@
MIT License
Copyright (c) 2022 Ibraheem Ahmed
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

289
vendor/seize/README.md vendored Normal file
View File

@@ -0,0 +1,289 @@
# Seize
[![Crate](https://img.shields.io/crates/v/seize?style=for-the-badge)](https://crates.io/crates/seize)
[![Github](https://img.shields.io/badge/github-seize-success?style=for-the-badge)](https://github.com/ibraheemdev/seize)
[![Docs](https://img.shields.io/badge/docs.rs-0.3.3-4d76ae?style=for-the-badge)](https://docs.rs/seize)
Fast, efficient, and robust memory reclamation for concurrent data structures.
# Introduction
Concurrent data structures are faced with the problem of deciding when it is
safe to free memory. Although an object might have been logically removed, other
threads that previously loaded it may still be accessing it, and thus it is
not safe to free immediately. Over the years, many algorithms have been devised
to solve this problem. However, most traditional memory reclamation schemes make
the tradeoff between performance, efficiency, and robustness. For example,
[epoch based reclamation] is fast and lightweight but lacks robustness in that a
stalled thread can prevent the reclamation of _all_ retired objects. [Hazard
pointers], another popular scheme, tracks individual pointers, making it efficient
and robust but generally much slower.
Another problem that is often not considered is workload balancing. In most
reclamation schemes, the thread that retires an object is the one that reclaims
it. This leads to unbalanced reclamation in read-dominated workloads;
parallelism is degraded when only a fraction of threads are writing. This is
especially prevalent with the use of M:N threading models as provided by
asynchronous runtimes like [Tokio].
# Details
Seize is based on the [hyaline reclamation scheme], which uses reference counting
to determine when it is safe to free memory. However, reference counters are only
used for objects that have been retired, allowing it to avoid the high overhead
incurred by traditional reference counting schemes where every memory access requires
modifying shared memory. Performance is competitive with that of epoch based schemes,
while memory efficiency is similar to hazard pointers. Reclamation is naturally
balanced as the thread with the last reference to an object is the one that frees it.
Epochs can also be optionally tracked to protect against stalled threads, making reclamation
truly lock-free.
Seize is compatible with all modern hardware that supports single-word atomic
operations such as FAA and CAS.
# Guide
Seize tries to stay out of your way as much as possible. It works with raw
pointers directly instead of creating safe wrapper types that end up being a
hassle to work with in practice. Below is a step-by-step guide on how to get
started.
### Collectors
Seize avoids the use of global state and encourages creating a designated
_collector_ per data structure. Collectors allow you to allocate, protect, and
retire objects:
```rust,ignore
use seize::Collector;
struct Stack<T> {
collector: Collector,
// ...
}
impl<T> Stack<T> {
pub fn new() -> Self {
Self {
collector: Collector::new(),
}
}
}
```
### Allocating Objects
Seize requires storing some metadata about the global epoch for each object that
is allocated. It also needs to reserve a couple words for retirement lists.
Because of this, objects in a concurrent data structure that may be reclaimed must
embed the `Link` type or use the `Linked<T>` wrapper provided for convenience. See
[DST Support](#dst-support) for more details.
You can create a `Link` with the `link` method, or allocate and link a value with
the `link_boxed` helper:
```rust
use seize::{reclaim, Collector, Linked};
use std::mem::ManuallyDrop;
use std::sync::atomic::{AtomicPtr, Ordering};
pub struct Stack<T> {
head: AtomicPtr<Linked<Node<T>>>, // <===
collector: Collector,
}
struct Node<T> {
next: *mut Linked<Node<T>>, // <===
value: ManuallyDrop<T>,
}
impl<T> Stack<T> {
pub fn push(&self, value: T) {
let node = self.collector.link_boxed(Node { // <===
next: std::ptr::null_mut(),
value: ManuallyDrop::new(value),
});
// ...
}
}
```
### Starting Operations
Before starting an operation that involves loading atomic pointers, you must
mark the thread as _active_ by calling the `enter` method.
```rust,ignore
impl Stack {
pub fn push(&self, value: T) {
// ...
let guard = self.collector.enter(); // <===
// ...
}
}
```
### Protecting Pointers
`enter` returns a guard that allows you to safely load atomic pointers. Any
valid pointer loaded through a guard is guaranteed to stay valid until the
guard is dropped, or is retired by the current thread. Importantly, if another
thread retires an object that you protected, the collector knows not to reclaim
the object until your guard is dropped.
```rust,ignore
impl Stack {
pub fn push(&self, value: T) {
// ...
let guard = self.collector.enter();
loop {
let head = guard.protect(&self.head, Ordering::Acquire); // <===
unsafe { (*node).next = head; }
if self
.head
.compare_exchange(head, node, Ordering::Release, Ordering::Relaxed)
.is_ok()
{
break;
}
}
// drop(guard);
}
}
```
Note that the lifetime of a guarded pointer is logically tied to that of the
guard -- when the guard is dropped the pointer is invalidated -- but a raw
pointer is returned for convenience. Datastructures that return shared references
to values should ensure that the lifetime of the reference is tied to the lifetime
of a guard.
### Retiring Objects
Objects that have been removed from a data structure can be safely _retired_
through the collector. It will be _reclaimed_ when no threads holds a reference
to it:
```rust,ignore
impl<T> Stack<T> {
pub fn pop(&self) -> Option<T> {
let guard = self.collector.enter(); // <=== mark the thread as active
loop {
let head = guard.protect(&self.head, Ordering::Acquire); // <=== safely load the head
if head.is_null() {
return None;
}
let next = unsafe { (*head).next };
if self
.head
.compare_exchange(head, next, Ordering::Release, Ordering::Relaxed)
.is_ok()
{
unsafe {
let data = ptr::read(&(*head).value);
self.collector.retire(head, reclaim::boxed::<Linked<Node<T>>>); // <===
return Some(ManuallyDrop::into_inner(data));
}
}
}
}
}
```
There are a couple important things to note about retiring an object:
#### Retired objects must be logically removed
An object can only be retired if it is _no longer accessible_ to any thread that
comes after. In the above code example this was ensured by swapping out the node
before retiring it. Threads that loaded a value _before_ it was retired are
safe, but threads that come after are not.
#### Retired objects cannot be accessed by the current thread
Unlike in schemes like EBR, a guard does not protect objects retired by the
current thread. If no other thread holds a reference to an object it may be
reclaimed _immediately_. This makes the following code unsound:
```rust,ignore
let ptr = guard.protect(&node, Ordering::Acquire);
collector.retire(ptr, |_| {});
println!("{}", (*ptr).value); // <===== unsound!
```
Retirement can be delayed until the guard is dropped by calling `defer_retire` on
the guard, instead of on the collector directly:
```rust,ignore
let ptr = guard.protect(&node, Ordering::Acquire);
guard.defer_retire(ptr, |_| {});
println!("{}", (*ptr).value); // <===== ok!
drop(guard); // <===== ptr is invalidated
```
#### Custom Reclaimers
You probably noticed that `retire` takes a function as a second parameter. This
function is known as a _reclaimer_, and is run when the collector decides it is
safe to free the retired object. Typically you will pass in a function from the
[`seize::reclaim`](https://docs.rs/seize/latest/seize/reclaim/index.html) module.
For example, values allocated with `Box` can use `reclaim::boxed`:
```rust,ignore
use seize::reclaim;
impl<T> Stack<T> {
pub fn pop(&self) -> Option<T> {
// ...
self.collector.retire(head, reclaim::boxed::<Linked<Node<T>>); // <===
// ...
}
}
```
The type annotation there is important. It is **unsound** to pass a reclaimer of
a different type than the object being retired.
If you need to run custom reclamation code, you can write a custom reclaimer.
Functions passed to `retire` are called with a type-erased `Link` pointer. This is
because retired values are connected to thread-local batches via linked lists,
losing any type information. To extract the underlying value from a link, you can
call the `cast` method:
```rust,ignore
collector.retire(value, |link: *mut Link| unsafe {
// SAFETY: the value retired was of type *mut Linked<T>
let ptr: *mut Linked<T> = Link::cast(link);
// SAFETY: the value was allocated with `link_boxed`
let value = Box::from_raw(ptr);
println!("dropping {}", value);
drop(value);
});
```
### DST Support
Most reclamation use cases can work with `Linked<T>` and avoid working with
links directly. However, advanced use cases such as dynamically sized types
may requie more control over type layout. To support this, seize allows embedding
a `Link` directly in your type. See the [`AsLink`](https://docs.rs/seize/latest/seize/trait.AsLink.html)
trait for more details.
[hazard pointers]:
https://www.cs.otago.ac.nz/cosc440/readings/hazard-pointers.pdf
[hyaline reclamation scheme]: https://arxiv.org/pdf/1905.07903.pdf
[epoch based reclamation]:
https://www.cl.cam.ac.uk/techreports/UCAM-CL-TR-579.pdf
[tokio]: https://github.com/tokio-rs/tokio

171
vendor/seize/benches/single_thread.rs vendored Normal file
View File

@@ -0,0 +1,171 @@
use criterion::{criterion_group, criterion_main, Criterion};
// this benchmark mainly compares uncontended enter/leave overhead
fn single_thread(c: &mut Criterion) {
c.bench_function("single_thread-seize", |b| {
let mut stack = seize_stack::Stack::new();
b.iter(|| {
for i in 0..1000 {
stack.push(i);
assert!(stack.pop().is_some());
}
for i in 0..1000 {
stack.push(i);
assert!(stack.pop().is_some());
}
assert!(stack.pop().is_none());
assert!(stack.is_empty());
});
drop(stack);
});
c.bench_function("single_thread-crossbeam", |b| {
let mut stack = crossbeam_stack::Stack::new();
b.iter(|| {
for i in 0..1000 {
stack.push(i);
assert!(stack.pop().is_some());
}
for i in 0..1000 {
stack.push(i);
assert!(stack.pop().is_some());
}
assert!(stack.pop().is_none());
assert!(stack.is_empty());
})
});
}
criterion_group!(benches, single_thread);
criterion_main!(benches);
mod seize_stack {
use criterion::black_box;
use seize::{Collector, Link, Linked};
use std::ptr;
pub struct Stack {
head: *mut Linked<Node>,
collector: Collector,
alloc: *mut Linked<Node>,
}
struct Node {
data: Option<usize>,
next: *mut Linked<Node>,
}
impl Stack {
pub fn new() -> Stack {
let collector = Collector::new().epoch_frequency(None);
Stack {
head: ptr::null_mut(),
alloc: collector.link_boxed(Node {
data: Some(1),
next: ptr::null_mut(),
}),
collector,
}
}
pub fn push(&mut self, _value: usize) {
let guard = black_box(self.collector.enter());
self.head = self.alloc;
drop(guard);
}
pub fn pop(&mut self) -> Option<usize> {
let guard = black_box(self.collector.enter());
unsafe {
let head = self.head;
if head.is_null() {
return None;
}
self.head = (*head).next;
guard.defer_retire(head, |link| {
let head: *mut Linked<Node> = Link::cast(link);
assert!(!head.is_null());
assert!((*head).data == Some(1));
});
(*head).data
}
}
pub fn is_empty(&self) -> bool {
let _guard = black_box(crossbeam_epoch::pin());
self.head.is_null()
}
}
}
mod crossbeam_stack {
use criterion::black_box;
use std::ptr;
pub struct Stack {
head: *mut Node,
alloc: *mut Node,
}
struct Node {
data: Option<usize>,
next: *mut Node,
}
impl Stack {
pub fn new() -> Stack {
Stack {
head: ptr::null_mut(),
alloc: Box::into_raw(Box::new(Node {
data: Some(1),
next: ptr::null_mut(),
})),
}
}
pub fn push(&mut self, _value: usize) {
let guard = black_box(crossbeam_epoch::pin());
self.head = self.alloc;
drop(guard);
}
pub fn pop(&mut self) -> Option<usize> {
let guard = black_box(crossbeam_epoch::pin());
unsafe {
let head = self.head;
if head.is_null() {
return None;
}
self.head = (*head).next;
guard.defer_unchecked(move || {
assert!(!head.is_null());
assert!((*head).data == Some(1));
});
(*head).data
}
}
pub fn is_empty(&self) -> bool {
let _guard = black_box(crossbeam_epoch::pin());
self.head.is_null()
}
}
}

244
vendor/seize/benches/stack.rs vendored Normal file
View File

@@ -0,0 +1,244 @@
use std::sync::Arc;
use std::thread;
use criterion::{criterion_group, criterion_main, Criterion};
fn treiber_stack(c: &mut Criterion) {
c.bench_function("trieber_stack-haphazard", |b| {
b.iter(|| {
let stack = Arc::new(haphazard_stack::TreiberStack::new());
let handles = (0..66)
.map(|_| {
let stack = stack.clone();
thread::spawn(move || {
for i in 0..1000 {
stack.push(i);
assert!(stack.pop().is_some());
}
})
})
.collect::<Vec<_>>();
for i in 0..1000 {
stack.push(i);
assert!(stack.pop().is_some());
}
for handle in handles {
handle.join().unwrap();
}
assert!(stack.pop().is_none());
assert!(stack.is_empty());
})
});
c.bench_function("trieber_stack-seize", |b| {
b.iter(|| {
let stack = Arc::new(seize_stack::TreiberStack::new());
let handles = (0..66)
.map(|_| {
let stack = stack.clone();
thread::spawn(move || {
for i in 0..1000 {
stack.push(i);
assert!(stack.pop().is_some());
}
})
})
.collect::<Vec<_>>();
for i in 0..1000 {
stack.push(i);
assert!(stack.pop().is_some());
}
for handle in handles {
handle.join().unwrap();
}
assert!(stack.pop().is_none());
assert!(stack.is_empty());
})
});
}
criterion_group!(benches, treiber_stack);
criterion_main!(benches);
mod seize_stack {
use seize::{reclaim, Collector, Linked};
use std::mem::ManuallyDrop;
use std::ptr;
use std::sync::atomic::{AtomicPtr, Ordering};
#[derive(Debug)]
pub struct TreiberStack<T> {
head: AtomicPtr<Linked<Node<T>>>,
collector: Collector,
}
#[derive(Debug)]
struct Node<T> {
data: ManuallyDrop<T>,
next: *mut Linked<Node<T>>,
}
impl<T> TreiberStack<T> {
pub fn new() -> TreiberStack<T> {
TreiberStack {
head: AtomicPtr::new(ptr::null_mut()),
collector: Collector::new().epoch_frequency(None),
}
}
pub fn push(&self, value: T) {
let node = self.collector.link_boxed(Node {
data: ManuallyDrop::new(value),
next: ptr::null_mut(),
});
let guard = self.collector.enter();
loop {
let head = guard.protect(&self.head, Ordering::Acquire);
unsafe { (*node).next = head }
if self
.head
.compare_exchange(head, node, Ordering::AcqRel, Ordering::Relaxed)
.is_ok()
{
break;
}
}
}
pub fn pop(&self) -> Option<T> {
let guard = self.collector.enter();
loop {
let head = guard.protect(&self.head, Ordering::Acquire);
if head.is_null() {
return None;
}
let next = unsafe { (*head).next };
if self
.head
.compare_exchange(head, next, Ordering::Release, Ordering::Relaxed)
.is_ok()
{
unsafe {
let data = ptr::read(&(*head).data);
self.collector
.retire(head, reclaim::boxed::<Linked<Node<T>>>);
return Some(ManuallyDrop::into_inner(data));
}
}
}
}
pub fn is_empty(&self) -> bool {
let guard = self.collector.enter();
guard.protect(&self.head, Ordering::Relaxed).is_null()
}
}
impl<T> Drop for TreiberStack<T> {
fn drop(&mut self) {
while self.pop().is_some() {}
}
}
}
mod haphazard_stack {
use haphazard::{Domain, HazardPointer};
use std::mem::ManuallyDrop;
use std::ptr;
use std::sync::atomic::{AtomicPtr, Ordering};
#[derive(Debug)]
pub struct TreiberStack<T: 'static> {
head: AtomicPtr<Node<T>>,
}
#[derive(Debug)]
struct Node<T> {
data: ManuallyDrop<T>,
next: *mut Node<T>,
}
unsafe impl<T> Send for Node<T> {}
unsafe impl<T> Sync for Node<T> {}
impl<T> TreiberStack<T> {
pub fn new() -> TreiberStack<T> {
TreiberStack {
head: AtomicPtr::default(),
}
}
pub fn push(&self, value: T) {
let node = Box::into_raw(Box::new(Node {
data: ManuallyDrop::new(value),
next: ptr::null_mut(),
}));
let mut h = HazardPointer::new();
loop {
let head = match h.protect_ptr(&self.head) {
Some((ptr, _)) => ptr.as_ptr(),
None => ptr::null_mut(),
};
unsafe { (*node).next = head }
if self
.head
.compare_exchange(head, node, Ordering::Release, Ordering::Relaxed)
.is_ok()
{
break;
}
}
}
pub fn pop(&self) -> Option<T> {
let mut h = HazardPointer::new();
loop {
let (head, _) = h.protect_ptr(&self.head)?;
let next = unsafe { head.as_ref().next };
if self
.head
.compare_exchange(head.as_ptr(), next, Ordering::Release, Ordering::Relaxed)
.is_ok()
{
unsafe {
let data = ptr::read(&head.as_ref().data);
Domain::global().retire_ptr::<_, Box<Node<T>>>(head.as_ptr());
return Some(ManuallyDrop::into_inner(data));
}
}
}
}
pub fn is_empty(&self) -> bool {
let mut h = HazardPointer::new();
unsafe { h.protect(&self.head) }.is_none()
}
}
impl<T> Drop for TreiberStack<T> {
fn drop(&mut self) {
while self.pop().is_some() {}
}
}
}

496
vendor/seize/src/collector.rs vendored Normal file
View File

@@ -0,0 +1,496 @@
use crate::raw;
use crate::tls::Thread;
use std::cell::UnsafeCell;
use std::marker::PhantomData;
use std::num::NonZeroU64;
use std::sync::atomic::{AtomicPtr, Ordering};
use std::{fmt, ptr};
/// Fast, efficient, and robust memory reclamation.
///
/// See the [crate documentation](crate) for details.
pub struct Collector {
raw: raw::Collector,
unique: *mut u8,
}
unsafe impl Send for Collector {}
unsafe impl Sync for Collector {}
impl Collector {
const DEFAULT_RETIRE_TICK: usize = 120;
const DEFAULT_EPOCH_TICK: NonZeroU64 = unsafe { NonZeroU64::new_unchecked(110) };
/// Creates a new collector.
pub fn new() -> Self {
let cpus = std::thread::available_parallelism()
.map(Into::into)
.unwrap_or(1);
Self {
raw: raw::Collector::new(cpus, Self::DEFAULT_EPOCH_TICK, Self::DEFAULT_RETIRE_TICK),
unique: Box::into_raw(Box::new(0)),
}
}
/// Sets the frequency of epoch advancement.
///
/// Seize uses epochs to protect against stalled threads.
/// The more frequently the epoch is advanced, the faster
/// stalled threads can be detected. However, it also means
/// that threads will have to do work to catch up to the
/// current epoch more often.
///
/// The default epoch frequency is `110`, meaning that
/// the epoch will advance after every 110 values are
/// linked to the collector. Benchmarking has shown that
/// this is a good tradeoff between throughput and memory
/// efficiency.
///
/// If `None` is passed epoch tracking, and protection
/// against stalled threads, will be disabled completely.
pub fn epoch_frequency(mut self, n: Option<NonZeroU64>) -> Self {
self.raw.epoch_frequency = n;
self
}
/// Sets the number of values that must be in a batch
/// before reclamation is attempted.
///
/// Retired values are added to thread-local *batches*
/// before starting the reclamation process. After
/// `batch_size` is hit, values are moved to separate
/// *retirement lists*, where reference counting kicks
/// in and batches are eventually reclaimed.
///
/// A larger batch size means that deallocation is done
/// less frequently, but reclamation also becomes more
/// expensive due to longer retirement lists needing
/// to be traversed and freed.
///
/// Note that batch sizes should generally be larger
/// than the number of threads accessing objects.
///
/// The default batch size is `120`. Tests have shown that
/// this makes a good tradeoff between throughput and memory
/// efficiency.
pub fn batch_size(mut self, n: usize) -> Self {
self.raw.batch_size = n;
self
}
/// Marks the current thread as active, returning a guard
/// that allows protecting loads of atomic pointers. The thread
/// will be marked as inactive when the guard is dropped.
///
/// See [the guide](crate#starting-operations) for an introduction
/// to using guards.
///
/// # Examples
///
/// ```rust
/// # use std::sync::atomic::{AtomicPtr, Ordering};
/// # let collector = seize::Collector::new();
/// use seize::{reclaim, Linked};
///
/// let ptr = AtomicPtr::new(collector.link_boxed(1_usize));
///
/// let guard = collector.enter();
/// let value = guard.protect(&ptr, Ordering::Acquire);
/// unsafe { assert_eq!(**value, 1) }
/// # unsafe { guard.defer_retire(value, reclaim::boxed::<Linked<usize>>) };
/// ```
///
/// Note that `enter` is reentrant, and it is legal to create
/// multiple guards on the same thread. The thread will stay
/// marked as active until the last guard is dropped:
///
/// ```rust
/// # use std::sync::atomic::{AtomicPtr, Ordering};
/// # let collector = seize::Collector::new();
/// use seize::{reclaim, Linked};
///
/// let ptr = AtomicPtr::new(collector.link_boxed(1_usize));
///
/// let guard1 = collector.enter();
/// let guard2 = collector.enter();
///
/// let value = guard2.protect(&ptr, Ordering::Acquire);
/// drop(guard1);
/// // the first guard is dropped, but `value`
/// // is still safe to access as a guard still
/// // exists
/// unsafe { assert_eq!(**value, 1) }
/// # unsafe { guard2.defer_retire(value, reclaim::boxed::<Linked<usize>>) };
/// drop(guard2) // _now_, the thread is marked as inactive
/// ```
pub fn enter(&self) -> Guard<'_> {
let thread = Thread::current();
unsafe { self.raw.enter(thread) };
Guard {
thread,
collector: Some(self),
_unsend: PhantomData,
}
}
/// Link a value to the collector.
///
/// See [the guide](crate#allocating-objects) for details.
pub fn link(&self) -> Link {
Link {
node: UnsafeCell::new(self.raw.node()),
}
}
/// Creates a new `Linked` object with the given value.
///
/// This is equivalent to:
///
/// ```ignore
/// Linked {
/// value,
/// link: collector.link()
/// }
/// ```
pub fn link_value<T>(&self, value: T) -> Linked<T> {
Linked {
link: self.link(),
value,
}
}
/// Links a value to the collector and allocates it with `Box`.
///
/// This is equivalent to:
///
/// ```ignore
/// Box::into_raw(Box::new(Linked {
/// value,
/// link: collector.link()
/// }))
/// ```
pub fn link_boxed<T>(&self, value: T) -> *mut Linked<T> {
Box::into_raw(Box::new(Linked {
link: self.link(),
value,
}))
}
/// Retires a value, running `reclaim` when no threads hold a reference to it.
///
/// Note that this method is disconnected from any guards on the current thread,
/// so the pointer may be reclaimed immediately. See [`Guard::defer_retire`] if
/// the pointer may still be accessed by the current thread.
///
/// See [the guide](crate#retiring-objects) for details.
///
/// # Safety
///
/// The retired object must no longer be accessible to any thread that enters
/// after it is removed. It also cannot be accessed by the current thread
/// after `retire` is called.
///
/// Additionally, he reclaimer passed to `retire` must correctly deallocate values of type `T`.
#[allow(clippy::missing_safety_doc)] // in guide
pub unsafe fn retire<T: AsLink>(&self, ptr: *mut T, reclaim: unsafe fn(*mut Link)) {
debug_assert!(!ptr.is_null(), "attempted to retire null pointer");
// note that `add` doesn't actually reclaim the pointer immediately if the
// current thread is active, it instead adds it to it's reclamation list,
// but we don't guarantee that publicly.
unsafe { self.raw.add(ptr, reclaim, Thread::current()) }
}
/// Returns true if both references point to the same collector.
pub fn ptr_eq(this: &Collector, other: &Collector) -> bool {
ptr::eq(this.unique, other.unique)
}
}
impl Drop for Collector {
fn drop(&mut self) {
unsafe {
let _ = Box::from_raw(self.unique);
}
}
}
impl Clone for Collector {
fn clone(&self) -> Self {
Collector::new()
.batch_size(self.raw.batch_size)
.epoch_frequency(self.raw.epoch_frequency)
}
}
impl Default for Collector {
fn default() -> Self {
Self::new()
}
}
impl fmt::Debug for Collector {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let mut strukt = f.debug_struct("Collector");
if self.raw.epoch_frequency.is_some() {
strukt.field("epoch", &self.raw.epoch.load(Ordering::Acquire));
}
strukt
.field("batch_size", &self.raw.batch_size)
.field("epoch_frequency", &self.raw.epoch_frequency)
.finish()
}
}
/// A guard that keeps the current thread marked as active,
/// enabling protected loads of atomic pointers.
///
/// See [`Collector::enter`] for details.
pub struct Guard<'a> {
collector: Option<&'a Collector>,
thread: Thread,
// must not be Send or Sync as we are tied to the current threads state in
// the collector
_unsend: PhantomData<*mut ()>,
}
impl Guard<'_> {
/// Returns a dummy guard.
///
/// Calling [`protect`](Guard::protect) on an unprotected guard will
/// load the pointer directly, and [`retire`](Guard::defer_retire) will
/// reclaim objects immediately.
///
/// Unprotected guards are useful when calling guarded functions
/// on a data structure that has just been created or is about
/// to be destroyed, because you know that no other thread holds
/// a reference to it.
///
/// # Safety
///
/// You must ensure that code used with this guard is sound with
/// the unprotected behavior described above.
pub const unsafe fn unprotected() -> Guard<'static> {
Guard {
thread: Thread::EMPTY,
collector: None,
_unsend: PhantomData,
}
}
/// Protects the load of an atomic pointer.
///
/// See [the guide](crate#protecting-pointers) for details.
#[inline]
pub fn protect<T: AsLink>(&self, ptr: &AtomicPtr<T>, ordering: Ordering) -> *mut T {
match self.collector {
Some(collector) => unsafe { collector.raw.protect(ptr, ordering, self.thread) },
// unprotected guard
None => ptr.load(ordering),
}
}
/// Retires a value, running `reclaim` when no threads hold a reference to it.
///
/// This method delays reclamation until the guard is dropped as opposed to
/// [`Collector::retire`], which may reclaim objects immediately.
///
/// See [the guide](crate#retiring-objects) for details.
#[allow(clippy::missing_safety_doc)] // in guide
pub unsafe fn defer_retire<T: AsLink>(&self, ptr: *mut T, reclaim: unsafe fn(*mut Link)) {
debug_assert!(!ptr.is_null(), "attempted to retire null pointer");
match self.collector {
Some(collector) => unsafe { collector.raw.add(ptr, reclaim, self.thread) },
// unprotected guard
None => unsafe { (reclaim)(ptr.cast::<Link>()) },
}
}
/// Get a reference to the collector this guard we created from.
///
/// This method is useful when you need to ensure that all guards
/// used with a data structure come from the same collector.
///
/// If this is an [`unprotected`](Guard::unprotected) guard
/// this method will return `None`.
pub fn collector(&self) -> Option<&Collector> {
self.collector
}
/// Refreshes the guard.
///
/// Refreshing a guard is similar to dropping and immediately
/// creating a new guard. The curent thread remains active, but any
/// pointers that were previously protected may be reclaimed.
///
/// # Safety
///
/// This method is not marked as `unsafe`, but will affect
/// the validity of pointers returned by [`protect`](Guard::protect),
/// similar to dropping a guard. It is intended to be used safely
/// by users of concurrent data structures, as references will
/// be tied to the guard and this method takes `&mut self`.
///
/// If this is an [`unprotected`](Guard::unprotected) guard
/// this method will be a no-op.
pub fn refresh(&mut self) {
match self.collector {
None => {}
Some(collector) => unsafe { collector.raw.refresh(self.thread) },
}
}
/// Flush any retired values in the local batch.
///
/// This method flushes any values from the current thread's local
/// batch, starting the reclamation process. Note that no memory
/// can be reclaimed while this guard is active, but calling `flush`
/// may allow memory to be reclaimed more quickly after the guard is
/// dropped.
///
/// See [`Collector::batch_size`] for details about batching.
pub fn flush(&self) {
if let Some(collector) = self.collector {
unsafe { collector.raw.try_retire_batch(self.thread) }
}
}
/// Returns a numeric identifier for the current thread.
///
/// Guards rely on thread-local state, including thread IDs. If you already
/// have a guard you can use this method to get a cheap identifier for the
/// current thread, avoiding TLS overhead. Note that thread IDs may be reused,
/// so the value returned is only unique for the lifetime of this thread.
pub fn thread_id(&self) -> usize {
self.thread.id
}
}
impl Drop for Guard<'_> {
fn drop(&mut self) {
if let Some(collector) = self.collector {
unsafe { collector.raw.leave(self.thread) }
}
}
}
impl fmt::Debug for Guard<'_> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_tuple("Guard").finish()
}
}
/// A link to the collector.
///
/// See [the guide](crate#custom-reclaimers) for details.
#[repr(transparent)]
pub struct Link {
#[allow(dead_code)]
pub(crate) node: UnsafeCell<raw::Node>,
}
impl Link {
/// Cast this `link` to it's underlying type.
///
/// Note that while this function is safe, using the returned
/// pointer is only sound if the link is in fact a type-erased `T`.
/// This means that when casting a link in a reclaimer, the value
/// that was retired must be of type `T`.
pub fn cast<T: AsLink>(link: *mut Link) -> *mut T {
link.cast()
}
}
/// A type that can be pointer-cast to and from a [`Link`].
///
/// Most reclamation use cases can avoid this trait and work instead
/// with the [`Linked`] wrapper type. However, if you want more control
/// over the layout of your type (i.e. are working with a DST),
/// you may need to implement this trait directly.
///
/// # Safety
///
/// Types implementing this trait must be marked `#[repr(C)]`
/// and have a [`Link`] as their **first** field.
///
/// # Examples
///
/// ```rust
/// use seize::{AsLink, Collector, Link};
///
/// #[repr(C)]
/// struct Bytes {
/// // safety invariant: Link must be the first field
/// link: Link,
/// values: [*mut u8; 0],
/// }
///
/// // Safety: Bytes is repr(C) and has Link as it's first field
/// unsafe impl AsLink for Bytes {}
///
/// // Deallocate an `Bytes`.
/// unsafe fn dealloc(ptr: *mut Bytes, collector: &Collector) {
/// collector.retire(ptr, |link| {
/// // safety `ptr` is of type *mut Bytes
/// let link: *mut Bytes = Link::cast(link);
/// // ..
/// });
/// }
/// ```
pub unsafe trait AsLink {}
/// A value [linked](Collector::link) to a collector.
///
/// This type implements `Deref` and `DerefMut` to the
/// inner value, so you can access methods on fields
/// on it as normal. An extra `*` may be needed when
/// `T` needs to be accessed directly.
///
/// See [the guide](crate#allocating-objects) for details.
#[repr(C)]
pub struct Linked<T> {
pub link: Link, // Safety Invariant: this field must come first
pub value: T,
}
unsafe impl<T> AsLink for Linked<T> {}
impl<T: PartialEq> PartialEq for Linked<T> {
fn eq(&self, other: &Self) -> bool {
self.value == other.value
}
}
impl<T: Eq> Eq for Linked<T> {}
impl<T: fmt::Debug> fmt::Debug for Linked<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{:?}", self.value)
}
}
impl<T: fmt::Display> fmt::Display for Linked<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", self.value)
}
}
impl<T> std::ops::Deref for Linked<T> {
type Target = T;
fn deref(&self) -> &Self::Target {
&self.value
}
}
impl<T> std::ops::DerefMut for Linked<T> {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.value
}
}

11
vendor/seize/src/lib.rs vendored Normal file
View File

@@ -0,0 +1,11 @@
#![deny(unsafe_op_in_unsafe_fn)]
#![doc = include_str!("../README.md")]
mod collector;
mod raw;
mod tls;
mod utils;
pub mod reclaim;
pub use collector::{AsLink, Collector, Guard, Link, Linked};

571
vendor/seize/src/raw.rs vendored Normal file
View File

@@ -0,0 +1,571 @@
use crate::tls::{Thread, ThreadLocal};
use crate::utils::CachePadded;
use crate::{AsLink, Link};
use std::cell::{Cell, UnsafeCell};
use std::mem::ManuallyDrop;
use std::num::NonZeroU64;
use std::ptr::{self, NonNull};
use std::sync::atomic::{self, AtomicPtr, AtomicU64, AtomicUsize, Ordering};
// Fast, lock-free, robust concurrent memory reclamation.
//
// The core algorithm is described [in this paper](https://arxiv.org/pdf/2108.02763.pdf).
pub struct Collector {
// Per-thread reservations lists
reservations: ThreadLocal<CachePadded<Reservation>>,
// Per-thread batches of retired nodes
batches: ThreadLocal<CachePadded<UnsafeCell<LocalBatch>>>,
// The number of nodes allocated per-thread
node_count: ThreadLocal<UnsafeCell<u64>>,
// The global epoch value
pub(crate) epoch: AtomicU64,
// The number of node allocations before advancing the global epoch
pub(crate) epoch_frequency: Option<NonZeroU64>,
// The number of nodes in a batch before we free
pub(crate) batch_size: usize,
}
impl Collector {
// Create a collector with the provided configuration.
pub fn new(threads: usize, epoch_frequency: NonZeroU64, batch_size: usize) -> Self {
Self {
epoch: AtomicU64::new(1),
reservations: ThreadLocal::with_capacity(threads),
batches: ThreadLocal::with_capacity(threads),
node_count: ThreadLocal::with_capacity(threads),
epoch_frequency: Some(epoch_frequency),
batch_size,
}
}
// Create a new node.
pub fn node(&self) -> Node {
// safety: node counts are only accessed by the current thread
let count = unsafe { &mut *self.node_count.load(Thread::current()).get() };
*count += 1;
// record the current epoch value
//
// note that it's fine if we see older epoch values here, which just means more
// threads will be counted as active than might actually be
let birth_epoch = match self.epoch_frequency {
// advance the global epoch
Some(ref freq) if *count % freq.get() == 0 => {
self.epoch.fetch_add(1, Ordering::Relaxed) + 1
}
Some(_) => self.epoch.load(Ordering::Relaxed),
// we aren't tracking epochs
None => 0,
};
Node { birth_epoch }
}
// Mark the current thread as active.
//
// # Safety
//
// `thread` must be the current thread.
pub unsafe fn enter(&self, thread: Thread) {
let reservation = self.reservations.load(thread);
// calls to `enter` may be reentrant, so we need to keep track of the number
// of active guards for the current thread
let guards = reservation.guards.get();
reservation.guards.set(guards + 1);
// avoid clearing already active reservation lists
if guards == 0 {
// mark the thread as active
//
// seqcst: establish a total order between this store and the fence in `retire`
// - if our store comes first, the thread retiring will see that we are active
// - if the fence comes first, we will see the new values of any objects being
// retired by that thread (all pointer loads are also seqcst and thus participate
// in the total order)
reservation.head.store(ptr::null_mut(), Ordering::SeqCst);
}
}
// Load an atomic pointer.
//
// # Safety
//
// `thread` must be the current thread.
#[inline]
pub unsafe fn protect<T>(
&self,
ptr: &AtomicPtr<T>,
_ordering: Ordering,
thread: Thread,
) -> *mut T {
if self.epoch_frequency.is_none() {
// epoch tracking is disabled, but pointer loads still need to be seqcst to participate
// in the total order. see `enter` for details
return ptr.load(Ordering::SeqCst);
}
let reservation = self.reservations.load(thread);
// load the last epoch we recorded
//
// relaxed: the reservation epoch is only modified by the current thread
let mut prev_epoch = reservation.epoch.load(Ordering::Relaxed);
loop {
// seqcst:
// - ensure that this load participates in the total order. see the store
// to reservation.head and reservation.epoch for details
// - acquire the birth epoch of the pointer. we need to record at least
// that epoch below to let other threads know we have access to this pointer
// (TOOD: this requires objects to be stored with release ordering, which is
// not documented)
let ptr = ptr.load(Ordering::SeqCst);
// relaxed: we acquired at least the pointer's birth epoch above
let current_epoch = self.epoch.load(Ordering::Relaxed);
// we were already marked as active in the birth epoch, so we are safe
if prev_epoch == current_epoch {
return ptr;
}
// our epoch is out of date, record the new one and try again
//
// seqcst: establish a total order between this store and the fence in `retire`
// - if our store comes first, the thread retiring will see that we are active in
// the current epoch
// - if the fence comes first, we will see the new values of any objects being
// retired by that thread (all pointer loads are also seqcst and thus participate
// in the total order)
reservation.epoch.store(current_epoch, Ordering::SeqCst);
prev_epoch = current_epoch;
}
}
// Mark the current thread as inactive.
//
// # Safety
//
// `thread` must be the current thread.
pub unsafe fn leave(&self, thread: Thread) {
let reservation = self.reservations.load(thread);
// decrement the active guard count
let guards = reservation.guards.get();
reservation.guards.set(guards - 1);
// we can only decrement reference counts after all guards for the current thread
// are dropped
if guards == 1 {
// release: exit the critical section
// acquire: acquire any new entries
let head = reservation.head.swap(Entry::INACTIVE, Ordering::AcqRel);
if head != Entry::INACTIVE {
// decrement the reference counts of any entries that were added
unsafe { Collector::traverse(head) }
}
}
}
// Decrement any reference counts, keeping the thread marked as active.
//
// # Safety
//
// `thread` must be the current thread.
pub unsafe fn refresh(&self, thread: Thread) {
let reservation = self.reservations.load(thread);
let guards = reservation.guards.get();
// we can only decrement reference counts after all guards for the current
// thread are dropped
if guards == 1 {
// release: exit the critical section
// acquire: acquire any new entries and the values of any objects
// that were retired
let head = reservation.head.swap(ptr::null_mut(), Ordering::AcqRel);
if head != Entry::INACTIVE {
// decrement the reference counts of any entries that were added
unsafe { Collector::traverse(head) }
}
}
}
// Add a node to the retirement batch, retiring the batch if `batch_size` is reached.
//
// # Safety
//
// `ptr` is a valid pointer, and `thread` must be the current thread.
pub unsafe fn add<T>(&self, ptr: *mut T, reclaim: unsafe fn(*mut Link), thread: Thread)
where
T: AsLink,
{
// safety: local batches are only accessed by the current thread until retirement
let local_batch = unsafe {
&mut *self
.batches
.load_or(|| LocalBatch::new(self.batch_size), thread)
.get()
};
// safety: local batch pointers are always valid until reclamation
let batch = unsafe { local_batch.0.as_mut() };
// `ptr` is guaranteed to be a valid pointer that can be cast to a node (`T: AsLink`)
//
// any other thread with a reference to the pointer only has a shared
// reference to the UnsafeCell<Node>, which is allowed to alias. the caller
// guarantees that the same pointer is not retired twice, so we can safely write
// to the node through this pointer.
let node = UnsafeCell::raw_get(ptr.cast::<UnsafeCell<Node>>());
// if a thread is active in the minimum birth era, it has access to at least one
// of the nodes in the batch and must be tracked.
//
// if epoch tracking is disabled this will always be false (0 > 0).
let birth_epoch = unsafe { (*node).birth_epoch };
if batch.min_epoch > birth_epoch {
batch.min_epoch = birth_epoch;
}
// create an entry for this node
batch.entries.push(Entry {
node,
reclaim,
batch: local_batch.0.as_ptr(),
});
// attempt to retire the batch if we have enough entries
if batch.entries.len() % self.batch_size == 0 {
unsafe { self.try_retire(local_batch, thread) }
}
}
// Attempt to retire nodes in the current thread's batch.
//
// # Safety
//
// `thread` must be the current thread.
pub unsafe fn try_retire_batch(&self, thread: Thread) {
let local_batch = self
.batches
.load_or(|| LocalBatch::new(self.batch_size), thread);
// safety: batches are only accessed by the current thread
unsafe { self.try_retire(&mut *local_batch.get(), thread) }
}
// Attempt to retire nodes in this batch.
//
// Note that if a guard on the current thread is active, the batch will also be added to it's
// reservation list for deferred reclamation.
//
// # Safety
//
// `thread` must be the current thread.
pub unsafe fn try_retire(&self, local_batch: &mut LocalBatch, thread: Thread) {
// establish a total order between the retirement of nodes in this batch and stores
// marking a thread as active (or active in an epoch):
// - if the store comes first, we will see that the thread is active
// - if this fence comes first, the thread will see the new values of any objects
// in this batch.
//
// this fence also establishes synchronizes with the fence run when a thread is created:
// - if our fence comes first, they will see the new values of any objects in this batch
// - if their fence comes first, we will see the new thread
atomic::fence(Ordering::SeqCst);
// safety: local batch pointers are always valid until reclamation.
// if the batch ends up being retired then this pointer is stable
let batch_entries = unsafe { local_batch.0.as_mut().entries.as_mut_ptr() };
let batch = unsafe { local_batch.0.as_ref() };
// if there are not enough entries in this batch for active threads, we have to try again later
//
// relaxed: the fence above already ensures that we see any threads that might
// have access to any objects in this batch. any other threads that were created
// after it will see their new values.
if batch.entries.len() <= self.reservations.threads.load(Ordering::Relaxed) {
return;
}
let current_reservation = self.reservations.load(thread);
let mut marked = 0;
// record all active threads
//
// we need to do this in a separate step before actually retiring to
// make sure we have enough entries, as the number of threads can grow
for reservation in self.reservations.iter() {
// if we don't have enough entries to insert into the reservation lists
// of all active threads, try again later
let Some(entry) = batch.entries.get(marked) else {
return;
};
// if this thread is inactive, we can skip it
//
// relaxed: see the acquire fence below
if reservation.head.load(Ordering::Relaxed) == Entry::INACTIVE {
continue;
}
// if this thread's epoch is behind the earliest birth epoch in this batch
// we can skip it, as there is no way it could have accessed any of the objects
// in this batch. we make sure never to skip the current thread even if it's epoch
// is behind because it may still have access to the pointer (because it's the
// thread that allocated it). the current thread is only skipped if there is no
// active guard.
//
// relaxed: if the epoch is behind there is nothing to synchronize with, and
// we already ensured we will see it's relevant epoch with the seqcst fence
// above
//
// if epoch tracking is disabled this is always false (0 < 0)
if !ptr::eq(reservation, current_reservation)
&& reservation.epoch.load(Ordering::Relaxed) < batch.min_epoch
{
continue;
}
// temporarily store this thread's list in a node in our batch
//
// safety: all nodes in a batch are valid, and this batch has not been
// shared yet to other threads
unsafe { (*entry.node).head = &reservation.head }
marked += 1;
}
// for any inactive threads we skipped above, synchronize with `leave` to ensure
// any accesses happen-before we retire. we ensured with the seqcst fence above
// that the next time the thread becomes active it will see the new values of any
// objects in this batch.
atomic::fence(Ordering::Acquire);
// add the batch to all active thread's reservation lists
let mut active = 0;
for i in 0..marked {
let curr = &batch.entries[i];
let curr_ptr = unsafe { batch_entries.add(i) };
// safety: all nodes in the batch are valid, and we just initialized `head`
// for all `marked` nodes in the loop above
let head = unsafe { &*(*curr.node).head };
// acquire:
// - if the thread became inactive, synchronize with `leave` to ensure any accesses
// happen-before we retire
// - if the thread is active, acquire any entries added by a concurrent call
// to `retire`
let mut prev = head.load(Ordering::Acquire);
loop {
// the thread became inactive, skip it
//
// as long as the thread became inactive at some point after we verified it was
// active, it can no longer access any objects in this batch. the next time it
// becomes active it will load the new object values due to the seqcst fence above
if prev == Entry::INACTIVE {
break;
}
// link this node to the reservation list
unsafe { *(*curr.node).next = AtomicPtr::new(prev) }
// release: release the entries in this batch
match head.compare_exchange_weak(
prev,
curr_ptr,
Ordering::Release,
Ordering::Relaxed,
) {
Ok(_) => {
active += 1;
break;
}
// lost the race to another thread, retry
Err(found) => {
// acquire the new entries
atomic::fence(Ordering::Acquire);
prev = found;
continue;
}
}
}
}
// release: if we don't free the list, release the batch to the thread that will
if batch
.active
.fetch_add(active, Ordering::Release)
.wrapping_add(active)
== 0
{
// ensure any access of the data in the list happens-before we free the list
atomic::fence(Ordering::Acquire);
// safety: the reference count is 0, meaning that either no threads were active,
// or they have all already decremented the count
unsafe { Collector::free_batch(local_batch.0.as_ptr()) }
}
// reset the batch
*local_batch = LocalBatch::new(self.batch_size).value.into_inner();
}
// Traverse the reservation list, decrementing the reference count of each batch.
//
// # Safety
//
// `list` must be a valid reservation list
unsafe fn traverse(mut list: *mut Entry) {
loop {
let curr = list;
if curr.is_null() {
break;
}
// safety: `curr` is a valid link in the list
//
// relaxed: any entries were acquired when we loaded `head`
list = unsafe { (*(*curr).node).next.load(Ordering::Relaxed) };
let batch = unsafe { (*curr).batch };
// safety: batch pointers are valid for reads until they are freed
unsafe {
// release: if we don't free the list, release any access of the batch to the thread
// that will
if (*batch).active.fetch_sub(1, Ordering::Release) == 1 {
// ensure any access of the data in the list happens-before we free the list
atomic::fence(Ordering::Acquire);
// safety: we have the last reference to the batch
Collector::free_batch(batch)
}
}
}
}
// Free a reservation list.
//
// # Safety
//
// The batch reference count must be zero.
unsafe fn free_batch(batch: *mut Batch) {
// safety: we are the last reference to the batch
for entry in unsafe { (*batch).entries.iter_mut() } {
unsafe { (entry.reclaim)(entry.node.cast::<Link>()) };
}
unsafe { LocalBatch::free(batch) };
}
}
impl Drop for Collector {
fn drop(&mut self) {
for batch in self.batches.iter() {
// safety: we have &mut self
let batch = unsafe { &mut *batch.get() };
// safety: we have &mut self
unsafe { Collector::free_batch(batch.0.as_ptr()) }
}
}
}
// A node attached to every allocated object.
//
// Nodes keep track of their birth epoch, as well as thread-local
// reservation lists.
#[repr(C)]
pub union Node {
// Before retiring: the epoch this node was created in
birth_epoch: u64,
// While retiring: temporary location for an active reservation list.
head: *const AtomicPtr<Entry>,
// After retiring: next node in the thread's reservation list
next: ManuallyDrop<AtomicPtr<Entry>>,
}
// A per-thread reservation list.
//
// Reservation lists are lists of retired entries, where
// each entry represents a batch.
#[repr(C)]
struct Reservation {
// The head of the list
head: AtomicPtr<Entry>,
// The epoch this thread last accessed a pointer in
epoch: AtomicU64,
// the number of active guards for this thread
guards: Cell<u64>,
}
impl Default for Reservation {
fn default() -> Self {
Reservation {
head: AtomicPtr::new(Entry::INACTIVE),
epoch: AtomicU64::new(0),
guards: Cell::new(0),
}
}
}
// A batch of nodes waiting to be retired
struct Batch {
// Nodes in this batch.
//
// TODO: this allocation can be flattened
entries: Vec<Entry>,
// The minimum epoch of all nodes in this batch.
min_epoch: u64,
// The reference count for active threads.
active: AtomicUsize,
}
// A retired node.
struct Entry {
node: *mut Node,
reclaim: unsafe fn(*mut Link),
// the batch this node is a part of.
batch: *mut Batch,
}
impl Entry {
// Represents an inactive thread.
//
// While null indicates an empty list, INACTIVE indicates the thread has no active
// guards and is not accessing any objects.
pub const INACTIVE: *mut Entry = -1_isize as usize as _;
}
pub struct LocalBatch(NonNull<Batch>);
impl LocalBatch {
// Create a new batch with an initial capacity.
fn new(capacity: usize) -> CachePadded<UnsafeCell<LocalBatch>> {
let ptr = unsafe {
NonNull::new_unchecked(Box::into_raw(Box::new(Batch {
entries: Vec::with_capacity(capacity),
min_epoch: 0,
active: AtomicUsize::new(0),
})))
};
CachePadded::new(UnsafeCell::new(LocalBatch(ptr)))
}
// Free the batch.
unsafe fn free(ptr: *mut Batch) {
unsafe { drop(Box::from_raw(ptr)) }
}
}
// Local batches are only accessed by the current thread.
unsafe impl Send for LocalBatch {}
unsafe impl Sync for LocalBatch {}

41
vendor/seize/src/reclaim.rs vendored Normal file
View File

@@ -0,0 +1,41 @@
//! Common memory reclaimers.
//!
//! Functions in this module can be passed to [`retire`](crate::Collector::retire)
//! to free allocated memory or run drop glue. See [the guide](crate#custom-reclaimers)
//! for details about memory reclamation, and writing custom reclaimers.
use std::ptr;
use crate::{AsLink, Link};
/// Reclaims memory allocated with [`Box`].
///
/// This function calls [`Box::from_raw`] on the linked pointer.
///
/// # Safety
///
/// Ensure that the correct type annotations are used when
/// passing this function to [`retire`](crate::Collector::retire).
/// The value retired must have been of type `T` to be retired through
/// `boxed::<T>`.
pub unsafe fn boxed<T: AsLink>(link: *mut Link) {
unsafe {
let _: Box<T> = Box::from_raw(Link::cast(link));
}
}
/// Reclaims memory by dropping the value in place.
///
/// This function calls [`ptr::drop_in_place`] on the linked pointer.
///
/// # Safety
///
/// Ensure that the correct type annotations are used when
/// passing this function to [`retire`](crate::Collector::retire).
/// The value retired must have been of type `T` to be retired through
/// `in_place::<T>`.
pub unsafe fn in_place<T: AsLink>(link: *mut Link) {
unsafe {
ptr::drop_in_place::<T>(Link::cast(link));
}
}

359
vendor/seize/src/tls/mod.rs vendored Normal file
View File

@@ -0,0 +1,359 @@
// Copyright 2017 Amanieu d'Antras
//
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
mod thread_id;
use std::cell::UnsafeCell;
use std::mem::{self, MaybeUninit};
use std::ptr;
use std::sync::atomic::{self, AtomicBool, AtomicPtr, AtomicUsize, Ordering};
pub use thread_id::Thread;
const BUCKETS: usize = (usize::BITS + 1) as usize;
pub struct ThreadLocal<T: Send> {
buckets: [AtomicPtr<Entry<T>>; BUCKETS],
pub threads: AtomicUsize,
}
struct Entry<T> {
present: AtomicBool,
value: UnsafeCell<MaybeUninit<T>>,
}
impl<T> Drop for Entry<T> {
fn drop(&mut self) {
unsafe {
if *self.present.get_mut() {
ptr::drop_in_place((*self.value.get()).as_mut_ptr());
}
}
}
}
unsafe impl<T: Send> Sync for ThreadLocal<T> {}
impl<T> ThreadLocal<T>
where
T: Send,
{
pub fn with_capacity(capacity: usize) -> ThreadLocal<T> {
let allocated_buckets = capacity
.checked_sub(1)
.map(|c| (usize::BITS as usize) - (c.leading_zeros() as usize) + 1)
.unwrap_or(0);
let mut buckets = [ptr::null_mut(); BUCKETS];
let mut bucket_size = 1;
for (i, bucket) in buckets[..allocated_buckets].iter_mut().enumerate() {
*bucket = allocate_bucket::<T>(bucket_size);
if i != 0 {
bucket_size <<= 1;
}
}
ThreadLocal {
// safety: `AtomicPtr` has the same representation as a pointer
buckets: unsafe { mem::transmute(buckets) },
threads: AtomicUsize::new(0),
}
}
pub fn load(&self, thread: Thread) -> &T
where
T: Default,
{
self.load_or(T::default, thread)
}
pub fn load_or(&self, create: impl Fn() -> T, thread: Thread) -> &T {
let bucket = unsafe { self.buckets.get_unchecked(thread.bucket) };
let mut bucket_ptr = bucket.load(Ordering::Acquire);
if bucket_ptr.is_null() {
let new_bucket = allocate_bucket(thread.bucket_size);
match bucket.compare_exchange(
ptr::null_mut(),
new_bucket,
Ordering::AcqRel,
Ordering::Acquire,
) {
Ok(_) => bucket_ptr = new_bucket,
// if the bucket value changed (from null), that means
// another thread stored a new bucket before we could,
// and we can free our bucket and use that one instead
Err(other) => unsafe {
let _ = Box::from_raw(ptr::slice_from_raw_parts_mut(
new_bucket,
thread.bucket_size,
));
bucket_ptr = other;
},
}
}
unsafe {
let entry = &*bucket_ptr.add(thread.index);
// relaxed: only this thread can set the value
if entry.present.load(Ordering::Relaxed) {
(*entry.value.get()).assume_init_ref()
} else {
// insert the new element into the bucket
entry.value.get().write(MaybeUninit::new(create()));
// release: necessary for iterator
entry.present.store(true, Ordering::Release);
self.threads.fetch_add(1, Ordering::Relaxed);
// seqcst: synchronize with the fence in `retire`:
// - if this fence comes first, the thread retiring will see the new thread count
// and our entry
// - if their fence comes first, we will see the new values of any pointers being
// retired by that thread
atomic::fence(Ordering::SeqCst);
(*entry.value.get()).assume_init_ref()
}
}
}
#[cfg(test)]
fn try_load(&self) -> Option<&T> {
let thread = Thread::current();
let bucket_ptr =
unsafe { self.buckets.get_unchecked(thread.bucket) }.load(Ordering::Acquire);
if bucket_ptr.is_null() {
return None;
}
unsafe {
let entry = &*bucket_ptr.add(thread.index);
// read without atomic operations as only this thread can set the value.
if entry.present.load(Ordering::Relaxed) {
Some((*entry.value.get()).assume_init_ref())
} else {
None
}
}
}
pub fn iter(&self) -> Iter<'_, T> {
Iter {
bucket: 0,
bucket_size: 1,
index: 0,
thread_local: self,
}
}
}
impl<T> Drop for ThreadLocal<T>
where
T: Send,
{
fn drop(&mut self) {
let mut bucket_size = 1;
for (i, bucket) in self.buckets.iter_mut().enumerate() {
let bucket_ptr = *bucket.get_mut();
let this_bucket_size = bucket_size;
if i != 0 {
bucket_size <<= 1;
}
if bucket_ptr.is_null() {
continue;
}
let _ = unsafe {
Box::from_raw(std::slice::from_raw_parts_mut(bucket_ptr, this_bucket_size))
};
}
}
}
pub struct Iter<'a, T>
where
T: Send,
{
thread_local: &'a ThreadLocal<T>,
bucket: usize,
bucket_size: usize,
index: usize,
}
impl<'a, T> Iterator for Iter<'a, T>
where
T: Send,
{
type Item = &'a T;
fn next(&mut self) -> Option<Self::Item> {
// because we reuse thread IDs, a new thread could join and be inserted into the middle of the list,
// so we have to check all the buckets here. yielding extra values is fine, but not yielding all originally
// active threads is not
while self.bucket < BUCKETS {
let bucket = unsafe {
self.thread_local
.buckets
.get_unchecked(self.bucket)
.load(Ordering::Acquire)
};
if !bucket.is_null() {
while self.index < self.bucket_size {
let entry = unsafe { &*bucket.add(self.index) };
self.index += 1;
if entry.present.load(Ordering::Acquire) {
return Some(unsafe { (*entry.value.get()).assume_init_ref() });
}
}
}
if self.bucket != 0 {
self.bucket_size <<= 1;
}
self.bucket += 1;
self.index = 0;
}
None
}
}
fn allocate_bucket<T>(size: usize) -> *mut Entry<T> {
Box::into_raw(
(0..size)
.map(|_| Entry::<T> {
present: AtomicBool::new(false),
value: UnsafeCell::new(MaybeUninit::uninit()),
})
.collect(),
) as *mut _
}
#[cfg(test)]
#[allow(clippy::redundant_closure)]
mod tests {
use super::*;
use std::cell::RefCell;
use std::sync::atomic::Ordering::Relaxed;
use std::sync::Arc;
use std::thread;
fn make_create() -> Arc<dyn Fn() -> usize + Send + Sync> {
let count = AtomicUsize::new(0);
Arc::new(move || count.fetch_add(1, Relaxed))
}
#[test]
fn same_thread() {
let create = make_create();
let tls = ThreadLocal::with_capacity(1);
assert_eq!(None, tls.try_load());
assert_eq!(0, *tls.load_or(|| create(), Thread::current()));
assert_eq!(Some(&0), tls.try_load());
assert_eq!(0, *tls.load_or(|| create(), Thread::current()));
assert_eq!(Some(&0), tls.try_load());
assert_eq!(0, *tls.load_or(|| create(), Thread::current()));
assert_eq!(Some(&0), tls.try_load());
}
#[test]
fn different_thread() {
let create = make_create();
let tls = Arc::new(ThreadLocal::with_capacity(1));
assert_eq!(None, tls.try_load());
assert_eq!(0, *tls.load_or(|| create(), Thread::current()));
assert_eq!(Some(&0), tls.try_load());
let tls2 = tls.clone();
let create2 = create.clone();
thread::spawn(move || {
assert_eq!(None, tls2.try_load());
assert_eq!(1, *tls2.load_or(|| create2(), Thread::current()));
assert_eq!(Some(&1), tls2.try_load());
})
.join()
.unwrap();
assert_eq!(Some(&0), tls.try_load());
assert_eq!(0, *tls.load_or(|| create(), Thread::current()));
}
#[test]
fn iter() {
let tls = Arc::new(ThreadLocal::with_capacity(1));
tls.load_or(|| Box::new(1), Thread::current());
let tls2 = tls.clone();
thread::spawn(move || {
tls2.load_or(|| Box::new(2), Thread::current());
let tls3 = tls2.clone();
thread::spawn(move || {
tls3.load_or(|| Box::new(3), Thread::current());
})
.join()
.unwrap();
drop(tls2);
})
.join()
.unwrap();
let tls = Arc::try_unwrap(tls).unwrap_or_else(|_| panic!("."));
let mut v = tls.iter().map(|x| **x).collect::<Vec<i32>>();
v.sort_unstable();
assert_eq!(vec![1, 2, 3], v);
}
#[test]
fn iter_snapshot() {
let tls = Arc::new(ThreadLocal::with_capacity(1));
tls.load_or(|| Box::new(1), Thread::current());
let iterator = tls.iter();
tls.load_or(|| Box::new(2), Thread::current());
let v = iterator.map(|x| **x).collect::<Vec<i32>>();
assert_eq!(vec![1], v);
}
#[test]
fn test_drop() {
let local = ThreadLocal::with_capacity(1);
struct Dropped(Arc<AtomicUsize>);
impl Drop for Dropped {
fn drop(&mut self) {
self.0.fetch_add(1, Relaxed);
}
}
let dropped = Arc::new(AtomicUsize::new(0));
local.load_or(|| Dropped(dropped.clone()), Thread::current());
assert_eq!(dropped.load(Relaxed), 0);
drop(local);
assert_eq!(dropped.load(Relaxed), 1);
}
#[test]
fn is_sync() {
fn foo<T: Sync>() {}
foo::<ThreadLocal<String>>();
foo::<ThreadLocal<RefCell<String>>>();
}
}

131
vendor/seize/src/tls/thread_id.rs vendored Normal file
View File

@@ -0,0 +1,131 @@
// Copyright 2017 Amanieu d'Antras
//
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
use std::cmp::Reverse;
use std::collections::BinaryHeap;
use std::sync::{Mutex, OnceLock};
use std::usize;
/// Thread ID manager which allocates thread IDs. It attempts to aggressively
/// reuse thread IDs where possible to avoid cases where a ThreadLocal grows
/// indefinitely when it is used by many short-lived threads.
#[derive(Default)]
struct ThreadIdManager {
free_from: usize,
free_list: BinaryHeap<Reverse<usize>>,
}
impl ThreadIdManager {
fn alloc(&mut self) -> usize {
if let Some(id) = self.free_list.pop() {
id.0
} else {
let id = self.free_from;
self.free_from = self
.free_from
.checked_add(1)
.expect("Ran out of thread IDs");
id
}
}
fn free(&mut self, id: usize) {
self.free_list.push(Reverse(id));
}
}
fn thread_id_manager() -> &'static Mutex<ThreadIdManager> {
static THREAD_ID_MANAGER: OnceLock<Mutex<ThreadIdManager>> = OnceLock::new();
THREAD_ID_MANAGER.get_or_init(Default::default)
}
/// Data which is unique to the current thread while it is running.
/// A thread ID may be reused after a thread exits.
#[derive(Clone, Copy)]
pub struct Thread {
pub(crate) id: usize,
pub(crate) bucket: usize,
pub(crate) bucket_size: usize,
pub(crate) index: usize,
}
impl Thread {
pub const EMPTY: Thread = Thread {
id: 0,
bucket: 0,
bucket_size: 0,
index: 0,
};
fn new(id: usize) -> Thread {
let bucket = (usize::BITS as usize) - id.leading_zeros() as usize;
let bucket_size = 1 << bucket.saturating_sub(1);
let index = if id != 0 { id ^ bucket_size } else { 0 };
Thread {
id,
bucket,
bucket_size,
index,
}
}
/// Get the current thread.
pub fn current() -> Thread {
THREAD_HOLDER.with(|holder| holder.0)
}
}
/// Wrapper around `Thread` that allocates and deallocates the ID.
struct ThreadHolder(Thread);
impl ThreadHolder {
fn new() -> ThreadHolder {
ThreadHolder(Thread::new(thread_id_manager().lock().unwrap().alloc()))
}
}
impl Drop for ThreadHolder {
fn drop(&mut self) {
thread_id_manager().lock().unwrap().free(self.0.id);
}
}
thread_local!(static THREAD_HOLDER: ThreadHolder = ThreadHolder::new());
#[test]
fn test_thread() {
let thread = Thread::new(0);
assert_eq!(thread.id, 0);
assert_eq!(thread.bucket, 0);
assert_eq!(thread.bucket_size, 1);
assert_eq!(thread.index, 0);
let thread = Thread::new(1);
assert_eq!(thread.id, 1);
assert_eq!(thread.bucket, 1);
assert_eq!(thread.bucket_size, 1);
assert_eq!(thread.index, 0);
let thread = Thread::new(2);
assert_eq!(thread.id, 2);
assert_eq!(thread.bucket, 2);
assert_eq!(thread.bucket_size, 2);
assert_eq!(thread.index, 0);
let thread = Thread::new(3);
assert_eq!(thread.id, 3);
assert_eq!(thread.bucket, 2);
assert_eq!(thread.bucket_size, 2);
assert_eq!(thread.index, 1);
let thread = Thread::new(19);
assert_eq!(thread.id, 19);
assert_eq!(thread.bucket, 5);
assert_eq!(thread.bucket_size, 16);
assert_eq!(thread.index, 3);
}

56
vendor/seize/src/utils.rs vendored Normal file
View File

@@ -0,0 +1,56 @@
/// Pads and aligns a value to the length of a cache line.
#[cfg_attr(
any(
target_arch = "x86_64",
target_arch = "aarch64",
target_arch = "powerpc64",
),
repr(align(128))
)]
#[cfg_attr(
any(
target_arch = "arm",
target_arch = "mips",
target_arch = "mips64",
target_arch = "riscv64",
),
repr(align(32))
)]
#[cfg_attr(target_arch = "s390x", repr(align(256)))]
#[cfg_attr(
not(any(
target_arch = "x86_64",
target_arch = "aarch64",
target_arch = "powerpc64",
target_arch = "arm",
target_arch = "mips",
target_arch = "mips64",
target_arch = "riscv64",
target_arch = "s390x",
)),
repr(align(64))
)]
#[derive(Default)]
pub struct CachePadded<T> {
pub value: T,
}
impl<T> CachePadded<T> {
pub fn new(value: T) -> Self {
Self { value }
}
}
impl<T> std::ops::Deref for CachePadded<T> {
type Target = T;
fn deref(&self) -> &T {
&self.value
}
}
impl<T> std::ops::DerefMut for CachePadded<T> {
fn deref_mut(&mut self) -> &mut T {
&mut self.value
}
}

Binary file not shown.

View File

View File

@@ -0,0 +1 @@
{"name":"seize","vers":"0.3.3","deps":[{"name":"criterion","req":"^0.3.5","features":[],"optional":false,"default_features":true,"target":null,"kind":"dev","registry":"https://github.com/rust-lang/crates.io-index","package":null,"public":null,"artifact":null,"bindep_target":null,"lib":false},{"name":"crossbeam-epoch","req":"^0.9.8","features":[],"optional":false,"default_features":true,"target":null,"kind":"dev","registry":"https://github.com/rust-lang/crates.io-index","package":null,"public":null,"artifact":null,"bindep_target":null,"lib":false}],"features":{},"features2":null,"cksum":"7a283aa4ae87b4d1c74a2ab1333c1f4a93aaef7822a23ea989f91e468f55312c","yanked":null,"links":null,"rust_version":null,"v":2}

Binary file not shown.

410
vendor/seize/tests/lib.rs vendored Normal file
View File

@@ -0,0 +1,410 @@
use seize::{reclaim, Collector, Guard, Linked};
use std::mem::ManuallyDrop;
use std::ptr;
use std::sync::atomic::{AtomicBool, AtomicPtr, AtomicUsize, Ordering};
use std::sync::Arc;
use std::thread;
#[cfg(miri)]
mod cfg {
pub const THREADS: usize = 4;
pub const ITEMS: usize = 100;
pub const ITER: usize = 1;
}
#[cfg(not(miri))]
mod cfg {
pub const THREADS: usize = 32;
pub const ITEMS: usize = 10_000;
pub const ITER: usize = 100;
}
#[test]
fn stress() {
#[derive(Debug)]
pub struct TreiberStack<T> {
head: AtomicPtr<Linked<Node<T>>>,
collector: Collector,
}
#[derive(Debug)]
struct Node<T> {
data: ManuallyDrop<T>,
next: *mut Linked<Node<T>>,
}
impl<T> TreiberStack<T> {
pub fn new(batch_size: usize) -> TreiberStack<T> {
TreiberStack {
head: AtomicPtr::new(ptr::null_mut()),
collector: Collector::new().batch_size(batch_size),
}
}
pub fn push(&self, t: T) {
let new = self.collector.link_boxed(Node {
data: ManuallyDrop::new(t),
next: ptr::null_mut(),
});
let guard = self.collector.enter();
loop {
let head = guard.protect(&self.head, Ordering::Acquire);
unsafe { (*new).next = head }
if self
.head
.compare_exchange(head, new, Ordering::Release, Ordering::Relaxed)
.is_ok()
{
break;
}
}
}
pub fn pop(&self) -> Option<T> {
let guard = self.collector.enter();
loop {
let head = guard.protect(&self.head, Ordering::Acquire);
if head.is_null() {
return None;
}
let next = unsafe { (*head).next };
if self
.head
.compare_exchange(head, next, Ordering::AcqRel, Ordering::Relaxed)
.is_ok()
{
unsafe {
let data = ptr::read(&(*head).data);
self.collector
.retire(head, reclaim::boxed::<Linked<Node<T>>>);
return Some(ManuallyDrop::into_inner(data));
}
}
}
}
pub fn is_empty(&self) -> bool {
let guard = self.collector.enter();
guard.protect(&self.head, Ordering::Relaxed).is_null()
}
}
impl<T> Drop for TreiberStack<T> {
fn drop(&mut self) {
while self.pop().is_some() {}
}
}
for _ in 0..cfg::ITER {
let stack = Arc::new(TreiberStack::new(33));
let handles = (0..cfg::THREADS)
.map(|_| {
let stack = stack.clone();
thread::spawn(move || {
for i in 0..cfg::ITEMS {
stack.push(i);
stack.pop();
}
})
})
.collect::<Vec<_>>();
for i in 0..cfg::ITEMS {
stack.push(i);
stack.pop();
}
for handle in handles {
handle.join().unwrap();
}
assert!(stack.pop().is_none());
assert!(stack.is_empty());
}
}
#[test]
fn single_thread() {
struct Foo(Arc<AtomicUsize>);
impl Drop for Foo {
fn drop(&mut self) {
self.0.fetch_add(1, Ordering::Release);
}
}
let collector = Arc::new(Collector::new().batch_size(2));
let dropped = Arc::new(AtomicUsize::new(0));
for _ in 0..22 {
let zero = AtomicPtr::new(collector.link_boxed(Foo(dropped.clone())));
{
let guard = collector.enter();
let _ = guard.protect(&zero, Ordering::Acquire);
}
{
let guard = collector.enter();
let value = guard.protect(&zero, Ordering::Acquire);
unsafe { collector.retire(value, reclaim::boxed::<Linked<Foo>>) }
}
}
assert_eq!(dropped.load(Ordering::Acquire), 22);
}
#[test]
fn two_threads() {
struct Foo(Arc<AtomicBool>);
impl Drop for Foo {
fn drop(&mut self) {
self.0.store(true, Ordering::Release);
}
}
let collector = Arc::new(Collector::new().batch_size(3));
let one_dropped = Arc::new(AtomicBool::new(false));
let zero_dropped = Arc::new(AtomicBool::new(false));
let (tx, rx) = std::sync::mpsc::channel();
let one = Arc::new(AtomicPtr::new(
collector.link_boxed(Foo(one_dropped.clone())),
));
let h = std::thread::spawn({
let one = one.clone();
let collector = collector.clone();
move || {
let guard = collector.enter();
let _value = guard.protect(&one, Ordering::Acquire);
tx.send(()).unwrap();
drop(guard);
tx.send(()).unwrap();
}
});
for _ in 0..2 {
let zero = AtomicPtr::new(collector.link_boxed(Foo(zero_dropped.clone())));
let guard = collector.enter();
let value = guard.protect(&zero, Ordering::Acquire);
unsafe { collector.retire(value, reclaim::boxed::<Linked<Foo>>) }
}
rx.recv().unwrap(); // wait for thread to access value
let guard = collector.enter();
let value = guard.protect(&one, Ordering::Acquire);
unsafe { collector.retire(value, reclaim::boxed::<Linked<Foo>>) }
rx.recv().unwrap(); // wait for thread to drop guard
h.join().unwrap();
drop(guard);
assert_eq!(
(
zero_dropped.load(Ordering::Acquire),
one_dropped.load(Ordering::Acquire)
),
(true, true)
);
}
#[test]
fn refresh() {
let collector = Arc::new(Collector::new().batch_size(3));
let nums = (0..cfg::ITEMS)
.map(|i| AtomicPtr::new(collector.link_boxed(i)))
.collect::<Arc<[_]>>();
let handles = (0..cfg::THREADS)
.map(|_| {
std::thread::spawn({
let nums = nums.clone();
let collector = collector.clone();
move || {
let mut guard = collector.enter();
for _ in 0..cfg::ITER {
for n in nums.iter() {
let n = guard.protect(n, Ordering::Acquire);
unsafe { assert!(**n < 10_000) }
}
guard.refresh();
}
}
})
})
.collect::<Vec<_>>();
for i in 0..cfg::ITER {
for n in nums.iter() {
let old = n.swap(collector.link_boxed(i), Ordering::AcqRel);
unsafe { collector.retire(old, reclaim::boxed::<Linked<usize>>) }
}
}
for handle in handles {
handle.join().unwrap()
}
// cleanup
for n in nums.iter() {
let old = n.swap(ptr::null_mut(), Ordering::Acquire);
unsafe { collector.retire(old, reclaim::boxed::<Linked<usize>>) }
}
}
#[test]
fn delayed_retire() {
struct DropTrack(Arc<AtomicUsize>);
impl Drop for DropTrack {
fn drop(&mut self) {
self.0.fetch_add(1, Ordering::Relaxed);
}
}
let collector = Collector::new().batch_size(5);
let dropped = Arc::new(AtomicUsize::new(0));
let objects: Vec<_> = (0..30)
.map(|_| collector.link_boxed(DropTrack(dropped.clone())))
.collect();
let guard = collector.enter();
for object in objects {
unsafe { guard.defer_retire(object, reclaim::boxed::<Linked<DropTrack>>) }
}
assert_eq!(dropped.load(Ordering::Relaxed), 0);
drop(guard);
assert_eq!(dropped.load(Ordering::Relaxed), 30);
}
#[test]
fn reentrant() {
struct UnsafeSend<T>(T);
unsafe impl<T> Send for UnsafeSend<T> {}
struct DropTrack(Arc<AtomicUsize>);
impl Drop for DropTrack {
fn drop(&mut self) {
self.0.fetch_add(1, Ordering::Relaxed);
}
}
let collector = Arc::new(Collector::new().batch_size(5).epoch_frequency(None));
let dropped = Arc::new(AtomicUsize::new(0));
let objects: UnsafeSend<Vec<_>> = UnsafeSend(
(0..5)
.map(|_| collector.link_boxed(DropTrack(dropped.clone())))
.collect(),
);
assert_eq!(dropped.load(Ordering::Relaxed), 0);
let guard1 = collector.enter();
let guard2 = collector.enter();
let guard3 = collector.enter();
std::thread::spawn({
let collector = collector.clone();
move || {
let guard = collector.enter();
for object in { objects }.0 {
unsafe { guard.defer_retire(object, reclaim::boxed::<Linked<DropTrack>>) }
}
}
})
.join()
.unwrap();
assert_eq!(dropped.load(Ordering::Relaxed), 0);
drop(guard1);
assert_eq!(dropped.load(Ordering::Relaxed), 0);
drop(guard2);
assert_eq!(dropped.load(Ordering::Relaxed), 0);
drop(guard3);
assert_eq!(dropped.load(Ordering::Relaxed), 5);
let dropped = Arc::new(AtomicUsize::new(0));
let objects: UnsafeSend<Vec<_>> = UnsafeSend(
(0..5)
.map(|_| collector.link_boxed(DropTrack(dropped.clone())))
.collect(),
);
assert_eq!(dropped.load(Ordering::Relaxed), 0);
let mut guard1 = collector.enter();
let mut guard2 = collector.enter();
let mut guard3 = collector.enter();
std::thread::spawn({
let collector = collector.clone();
move || {
let guard = collector.enter();
for object in { objects }.0 {
unsafe { guard.defer_retire(object, reclaim::boxed::<Linked<DropTrack>>) }
}
}
})
.join()
.unwrap();
assert_eq!(dropped.load(Ordering::Relaxed), 0);
guard1.refresh();
assert_eq!(dropped.load(Ordering::Relaxed), 0);
drop(guard1);
guard2.refresh();
assert_eq!(dropped.load(Ordering::Relaxed), 0);
drop(guard2);
assert_eq!(dropped.load(Ordering::Relaxed), 0);
guard3.refresh();
assert_eq!(dropped.load(Ordering::Relaxed), 5);
}
#[test]
fn collector_eq() {
let a = Collector::new();
let b = Collector::new();
let unprotected = unsafe { Guard::unprotected() };
assert!(Collector::ptr_eq(
a.enter().collector().unwrap(),
a.enter().collector().unwrap()
));
assert!(Collector::ptr_eq(
a.enter().collector().unwrap(),
a.enter().collector().unwrap()
));
assert!(!Collector::ptr_eq(
a.enter().collector().unwrap(),
b.enter().collector().unwrap()
));
assert!(unprotected.collector().is_none());
}