chore: checkpoint before Python removal

This commit is contained in:
2026-03-26 22:33:59 +00:00
parent 683cec9307
commit e568ddf82a
29972 changed files with 11269302 additions and 2 deletions

126
vendor/tungstenite/benches/buffer.rs vendored Normal file
View File

@@ -0,0 +1,126 @@
use std::io::{Cursor, Read, Result as IoResult};
use bytes::Buf;
use criterion::*;
use input_buffer::InputBuffer;
use tungstenite::buffer::ReadBuffer;
const CHUNK_SIZE: usize = 4096;
/// A FIFO buffer for reading packets from the network.
#[derive(Debug)]
pub struct StackReadBuffer<const CHUNK_SIZE: usize> {
storage: Cursor<Vec<u8>>,
chunk: [u8; CHUNK_SIZE],
}
impl<const CHUNK_SIZE: usize> StackReadBuffer<CHUNK_SIZE> {
/// Create a new empty input buffer.
pub fn new() -> Self {
Self::with_capacity(CHUNK_SIZE)
}
/// Create a new empty input buffer with a given `capacity`.
pub fn with_capacity(capacity: usize) -> Self {
Self::from_partially_read(Vec::with_capacity(capacity))
}
/// Create a input buffer filled with previously read data.
pub fn from_partially_read(part: Vec<u8>) -> Self {
Self { storage: Cursor::new(part), chunk: [0; CHUNK_SIZE] }
}
/// Get a cursor to the data storage.
pub fn as_cursor(&self) -> &Cursor<Vec<u8>> {
&self.storage
}
/// Get a cursor to the mutable data storage.
pub fn as_cursor_mut(&mut self) -> &mut Cursor<Vec<u8>> {
&mut self.storage
}
/// Consume the `ReadBuffer` and get the internal storage.
pub fn into_vec(mut self) -> Vec<u8> {
// Current implementation of `tungstenite-rs` expects that the `into_vec()` drains
// the data from the container that has already been read by the cursor.
self.clean_up();
// Now we can safely return the internal container.
self.storage.into_inner()
}
/// Read next portion of data from the given input stream.
pub fn read_from<S: Read>(&mut self, stream: &mut S) -> IoResult<usize> {
self.clean_up();
let size = stream.read(&mut self.chunk)?;
self.storage.get_mut().extend_from_slice(&self.chunk[..size]);
Ok(size)
}
/// Cleans ups the part of the vector that has been already read by the cursor.
fn clean_up(&mut self) {
let pos = self.storage.position() as usize;
self.storage.get_mut().drain(0..pos).count();
self.storage.set_position(0);
}
}
impl<const CHUNK_SIZE: usize> Buf for StackReadBuffer<CHUNK_SIZE> {
fn remaining(&self) -> usize {
Buf::remaining(self.as_cursor())
}
fn chunk(&self) -> &[u8] {
Buf::chunk(self.as_cursor())
}
fn advance(&mut self, cnt: usize) {
Buf::advance(self.as_cursor_mut(), cnt);
}
}
impl<const CHUNK_SIZE: usize> Default for StackReadBuffer<CHUNK_SIZE> {
fn default() -> Self {
Self::new()
}
}
#[inline]
fn input_buffer(mut stream: impl Read) {
let mut buffer = InputBuffer::with_capacity(CHUNK_SIZE);
while buffer.read_from(&mut stream).unwrap() != 0 {}
}
#[inline]
fn stack_read_buffer(mut stream: impl Read) {
let mut buffer = StackReadBuffer::<CHUNK_SIZE>::new();
while buffer.read_from(&mut stream).unwrap() != 0 {}
}
#[inline]
fn heap_read_buffer(mut stream: impl Read) {
let mut buffer = ReadBuffer::<CHUNK_SIZE>::new();
while buffer.read_from(&mut stream).unwrap() != 0 {}
}
fn benchmark(c: &mut Criterion) {
const STREAM_SIZE: usize = 1024 * 1024 * 4;
let data: Vec<u8> = (0..STREAM_SIZE).map(|_| rand::random()).collect();
let stream = Cursor::new(data);
let mut group = c.benchmark_group("buffers");
group.throughput(Throughput::Bytes(STREAM_SIZE as u64));
group.bench_function("InputBuffer", |b| b.iter(|| input_buffer(black_box(stream.clone()))));
group.bench_function("ReadBuffer (stack)", |b| {
b.iter(|| stack_read_buffer(black_box(stream.clone())));
});
group.bench_function("ReadBuffer (heap)", |b| {
b.iter(|| heap_read_buffer(black_box(stream.clone())));
});
group.finish();
}
criterion_group!(benches, benchmark);
criterion_main!(benches);

92
vendor/tungstenite/benches/read.rs vendored Normal file
View File

@@ -0,0 +1,92 @@
//! Benchmarks for read performance.
use criterion::{BatchSize, Criterion};
use std::{
io::{self, Read, Write},
sync::{Arc, Mutex},
};
use tungstenite::{protocol::Role, Message, WebSocket};
/// Mock stream with no artificial delays.
#[derive(Default, Clone)]
struct MockIo(Arc<Mutex<Vec<u8>>>);
impl Read for MockIo {
fn read(&mut self, to: &mut [u8]) -> io::Result<usize> {
let mut data = self.0.lock().unwrap();
if data.is_empty() {
return Err(io::Error::new(io::ErrorKind::WouldBlock, "not ready"));
}
let len = data.len().min(to.len());
to[..len].copy_from_slice(data.drain(..len).as_slice());
Ok(len)
}
}
impl Write for MockIo {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
self.0.lock().unwrap().write(buf)
}
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
fn benchmark(c: &mut Criterion) {
/// Benchmark reading 100k mix of binary & text messages.
fn read_100k(role: Role, b: &mut criterion::Bencher<'_>) {
let io = MockIo::default();
let mut writer = WebSocket::from_raw_socket(
io.clone(),
match role {
Role::Client => Role::Server,
Role::Server => Role::Client,
},
None,
);
let mut ws = WebSocket::from_raw_socket(io, role, None);
b.iter_batched(
|| {
let mut sum = 0;
for i in 0_u64..100_000 {
writer
.send(match i {
_ if i % 3 == 0 => Message::binary(i.to_le_bytes().to_vec()),
_ => Message::text(format!("{{\"id\":{i}}}")),
})
.unwrap();
sum += i;
}
sum
},
|expected_sum| {
let mut sum = 0;
while sum != expected_sum {
match ws.read().unwrap() {
Message::Binary(v) => {
let a: &[u8; 8] = v.as_ref().try_into().unwrap();
sum += u64::from_le_bytes(*a);
}
Message::Text(msg) => {
let i: u64 = msg.as_str()[6..msg.len() - 1].parse().unwrap();
sum += i;
}
m => panic!("Unexpected {m}"),
}
}
},
BatchSize::SmallInput,
);
}
c.bench_function("read+unmask 100k small messages (server)", |b| {
read_100k(Role::Server, b);
});
c.bench_function("read 100k small messages (client)", |b| {
read_100k(Role::Client, b);
});
}
criterion::criterion_group!(read_benches, benchmark);
criterion::criterion_main!(read_benches);

78
vendor/tungstenite/benches/write.rs vendored Normal file
View File

@@ -0,0 +1,78 @@
//! Benchmarks for write performance.
use criterion::Criterion;
use std::{
hint, io,
time::{Duration, Instant},
};
use tungstenite::{protocol::Role, Message, WebSocket};
const MOCK_WRITE_LEN: usize = 8 * 1024 * 1024;
/// `Write` impl that simulates slowish writes and slow flushes.
///
/// Each `write` can buffer up to 8 MiB before flushing but takes an additional **~80ns**
/// to simulate stuff going on in the underlying stream.
/// Each `flush` takes **~8µs** to simulate flush io.
struct MockWrite(Vec<u8>);
impl io::Read for MockWrite {
fn read(&mut self, _: &mut [u8]) -> io::Result<usize> {
Err(io::Error::new(io::ErrorKind::WouldBlock, "reads not supported"))
}
}
impl io::Write for MockWrite {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
if self.0.len() + buf.len() > MOCK_WRITE_LEN {
self.flush()?;
}
// simulate io
spin(Duration::from_nanos(80));
self.0.extend(buf);
Ok(buf.len())
}
fn flush(&mut self) -> io::Result<()> {
if !self.0.is_empty() {
// simulate io
spin(Duration::from_micros(8));
self.0.clear();
}
Ok(())
}
}
fn spin(duration: Duration) {
let a = Instant::now();
while a.elapsed() < duration {
hint::spin_loop();
}
}
fn benchmark(c: &mut Criterion) {
fn write_100k_then_flush(role: Role, b: &mut criterion::Bencher<'_>) {
let mut ws =
WebSocket::from_raw_socket(MockWrite(Vec::with_capacity(MOCK_WRITE_LEN)), role, None);
b.iter(|| {
for i in 0_u64..100_000 {
let msg = match i {
_ if i % 3 == 0 => Message::binary(i.to_le_bytes().to_vec()),
_ => Message::text(format!("{{\"id\":{i}}}")),
};
ws.write(msg).unwrap();
}
ws.flush().unwrap();
});
}
c.bench_function("write 100k small messages then flush (server)", |b| {
write_100k_then_flush(Role::Server, b);
});
c.bench_function("write+mask 100k small messages then flush (client)", |b| {
write_100k_then_flush(Role::Client, b);
});
}
criterion::criterion_group!(write_benches, benchmark);
criterion::criterion_main!(write_benches);