chore: checkpoint before Python removal

This commit is contained in:
2026-03-26 22:33:59 +00:00
parent 683cec9307
commit e568ddf82a
29972 changed files with 11269302 additions and 2 deletions

View File

@@ -0,0 +1,2 @@
#![cfg(not(feature = "full"))]
compile_error!("run tokio-util tests with `--features full`");

View File

@@ -0,0 +1,39 @@
use tokio::{sync::oneshot, task::yield_now};
use tokio_util::task::AbortOnDropHandle;
#[tokio::test]
async fn aborts_task_on_drop() {
let (mut tx, rx) = oneshot::channel::<bool>();
let handle = tokio::spawn(async move {
let _ = rx.await;
});
let handle = AbortOnDropHandle::new(handle);
drop(handle);
tx.closed().await;
assert!(tx.is_closed());
}
#[tokio::test]
async fn aborts_task_directly() {
let (mut tx, rx) = oneshot::channel::<bool>();
let handle = tokio::spawn(async move {
let _ = rx.await;
});
let handle = AbortOnDropHandle::new(handle);
handle.abort();
tx.closed().await;
assert!(tx.is_closed());
assert!(handle.is_finished());
}
#[tokio::test]
async fn does_not_abort_after_detach() {
let (tx, rx) = oneshot::channel::<bool>();
let handle = tokio::spawn(async move {
let _ = rx.await;
});
let handle = AbortOnDropHandle::new(handle);
handle.detach(); // returns and drops the original join handle
yield_now().await;
assert!(!tx.is_closed()); // task is still live
}

443
vendor/tokio-util/tests/codecs.rs vendored Normal file
View File

@@ -0,0 +1,443 @@
#![warn(rust_2018_idioms)]
use tokio_util::codec::{AnyDelimiterCodec, BytesCodec, Decoder, Encoder, LinesCodec};
use bytes::{BufMut, Bytes, BytesMut};
#[test]
fn bytes_decoder() {
let mut codec = BytesCodec::new();
let buf = &mut BytesMut::new();
buf.put_slice(b"abc");
assert_eq!("abc", codec.decode(buf).unwrap().unwrap());
assert_eq!(None, codec.decode(buf).unwrap());
assert_eq!(None, codec.decode(buf).unwrap());
buf.put_slice(b"a");
assert_eq!("a", codec.decode(buf).unwrap().unwrap());
}
#[test]
fn bytes_encoder() {
let mut codec = BytesCodec::new();
// Default capacity of BytesMut
#[cfg(target_pointer_width = "64")]
const INLINE_CAP: usize = 4 * 8 - 1;
#[cfg(target_pointer_width = "32")]
const INLINE_CAP: usize = 4 * 4 - 1;
let mut buf = BytesMut::new();
codec
.encode(Bytes::from_static(&[0; INLINE_CAP + 1]), &mut buf)
.unwrap();
// Default capacity of Framed Read
const INITIAL_CAPACITY: usize = 8 * 1024;
let mut buf = BytesMut::with_capacity(INITIAL_CAPACITY);
codec
.encode(Bytes::from_static(&[0; INITIAL_CAPACITY + 1]), &mut buf)
.unwrap();
codec
.encode(BytesMut::from(&b"hello"[..]), &mut buf)
.unwrap();
}
#[test]
fn lines_decoder() {
let mut codec = LinesCodec::new();
let buf = &mut BytesMut::new();
buf.reserve(200);
buf.put_slice(b"line 1\nline 2\r\nline 3\n\r\n\r");
assert_eq!("line 1", codec.decode(buf).unwrap().unwrap());
assert_eq!("line 2", codec.decode(buf).unwrap().unwrap());
assert_eq!("line 3", codec.decode(buf).unwrap().unwrap());
assert_eq!("", codec.decode(buf).unwrap().unwrap());
assert_eq!(None, codec.decode(buf).unwrap());
assert_eq!(None, codec.decode_eof(buf).unwrap());
buf.put_slice(b"k");
assert_eq!(None, codec.decode(buf).unwrap());
assert_eq!("\rk", codec.decode_eof(buf).unwrap().unwrap());
assert_eq!(None, codec.decode(buf).unwrap());
assert_eq!(None, codec.decode_eof(buf).unwrap());
}
#[test]
fn lines_decoder_invalid_utf8() {
let mut codec = LinesCodec::new();
let buf = &mut BytesMut::new();
buf.reserve(200);
buf.put_slice(b"line 1\xc3\x28");
assert_eq!(None, codec.decode(buf).unwrap());
assert!(codec.decode_eof(buf).is_err());
assert_eq!(None, codec.decode_eof(buf).unwrap());
buf.put_slice(b"line 22222222222222\n");
assert_eq!("line 22222222222222", codec.decode(buf).unwrap().unwrap());
}
#[test]
fn lines_decoder_max_length() {
const MAX_LENGTH: usize = 6;
let mut codec = LinesCodec::new_with_max_length(MAX_LENGTH);
let buf = &mut BytesMut::new();
buf.reserve(200);
buf.put_slice(b"line 1 is too long\nline 2\nline 3\r\nline 4\n\r\n\r");
assert!(codec.decode(buf).is_err());
let line = codec.decode(buf).unwrap().unwrap();
assert!(line.len() <= MAX_LENGTH, "{line:?}.len() <= {MAX_LENGTH:?}");
assert_eq!("line 2", line);
assert!(codec.decode(buf).is_err());
let line = codec.decode(buf).unwrap().unwrap();
assert!(line.len() <= MAX_LENGTH, "{line:?}.len() <= {MAX_LENGTH:?}");
assert_eq!("line 4", line);
let line = codec.decode(buf).unwrap().unwrap();
assert!(line.len() <= MAX_LENGTH, "{line:?}.len() <= {MAX_LENGTH:?}");
assert_eq!("", line);
assert_eq!(None, codec.decode(buf).unwrap());
assert_eq!(None, codec.decode_eof(buf).unwrap());
buf.put_slice(b"k");
assert_eq!(None, codec.decode(buf).unwrap());
let line = codec.decode_eof(buf).unwrap().unwrap();
assert!(line.len() <= MAX_LENGTH, "{line:?}.len() <= {MAX_LENGTH:?}");
assert_eq!("\rk", line);
assert_eq!(None, codec.decode(buf).unwrap());
assert_eq!(None, codec.decode_eof(buf).unwrap());
// Line that's one character too long. This could cause an out of bounds
// error if we peek at the next characters using slice indexing.
buf.put_slice(b"aaabbbc");
assert!(codec.decode(buf).is_err());
}
#[test]
fn lines_decoder_max_length_underrun() {
const MAX_LENGTH: usize = 6;
let mut codec = LinesCodec::new_with_max_length(MAX_LENGTH);
let buf = &mut BytesMut::new();
buf.reserve(200);
buf.put_slice(b"line ");
assert_eq!(None, codec.decode(buf).unwrap());
buf.put_slice(b"too l");
assert!(codec.decode(buf).is_err());
buf.put_slice(b"ong\n");
assert_eq!(None, codec.decode(buf).unwrap());
buf.put_slice(b"line 2");
assert_eq!(None, codec.decode(buf).unwrap());
buf.put_slice(b"\n");
assert_eq!("line 2", codec.decode(buf).unwrap().unwrap());
}
#[test]
fn lines_decoder_max_length_bursts() {
const MAX_LENGTH: usize = 10;
let mut codec = LinesCodec::new_with_max_length(MAX_LENGTH);
let buf = &mut BytesMut::new();
buf.reserve(200);
buf.put_slice(b"line ");
assert_eq!(None, codec.decode(buf).unwrap());
buf.put_slice(b"too l");
assert_eq!(None, codec.decode(buf).unwrap());
buf.put_slice(b"ong\n");
assert!(codec.decode(buf).is_err());
}
#[test]
fn lines_decoder_max_length_big_burst() {
const MAX_LENGTH: usize = 10;
let mut codec = LinesCodec::new_with_max_length(MAX_LENGTH);
let buf = &mut BytesMut::new();
buf.reserve(200);
buf.put_slice(b"line ");
assert_eq!(None, codec.decode(buf).unwrap());
buf.put_slice(b"too long!\n");
assert!(codec.decode(buf).is_err());
}
#[test]
fn lines_decoder_max_length_newline_between_decodes() {
const MAX_LENGTH: usize = 5;
let mut codec = LinesCodec::new_with_max_length(MAX_LENGTH);
let buf = &mut BytesMut::new();
buf.reserve(200);
buf.put_slice(b"hello");
assert_eq!(None, codec.decode(buf).unwrap());
buf.put_slice(b"\nworld");
assert_eq!("hello", codec.decode(buf).unwrap().unwrap());
}
// Regression test for [infinite loop bug](https://github.com/tokio-rs/tokio/issues/1483)
#[test]
fn lines_decoder_discard_repeat() {
const MAX_LENGTH: usize = 1;
let mut codec = LinesCodec::new_with_max_length(MAX_LENGTH);
let buf = &mut BytesMut::new();
buf.reserve(200);
buf.put_slice(b"aa");
assert!(codec.decode(buf).is_err());
buf.put_slice(b"a");
assert_eq!(None, codec.decode(buf).unwrap());
}
// Regression test for [subsequent calls to LinesCodec decode does not return the desired results bug](https://github.com/tokio-rs/tokio/issues/3555)
#[test]
fn lines_decoder_max_length_underrun_twice() {
const MAX_LENGTH: usize = 11;
let mut codec = LinesCodec::new_with_max_length(MAX_LENGTH);
let buf = &mut BytesMut::new();
buf.reserve(200);
buf.put_slice(b"line ");
assert_eq!(None, codec.decode(buf).unwrap());
buf.put_slice(b"too very l");
assert!(codec.decode(buf).is_err());
buf.put_slice(b"aaaaaaaaaaaaaaaaaaaaaaa");
assert_eq!(None, codec.decode(buf).unwrap());
buf.put_slice(b"ong\nshort\n");
assert_eq!("short", codec.decode(buf).unwrap().unwrap());
}
#[test]
fn lines_encoder() {
let mut codec = LinesCodec::new();
let mut buf = BytesMut::new();
codec.encode("line 1", &mut buf).unwrap();
assert_eq!("line 1\n", buf);
codec.encode("line 2", &mut buf).unwrap();
assert_eq!("line 1\nline 2\n", buf);
}
#[test]
fn any_delimiters_decoder_any_character() {
let mut codec = AnyDelimiterCodec::new(b",;\n\r".to_vec(), b",".to_vec());
let buf = &mut BytesMut::new();
buf.reserve(200);
buf.put_slice(b"chunk 1,chunk 2;chunk 3\n\r");
assert_eq!("chunk 1", codec.decode(buf).unwrap().unwrap());
assert_eq!("chunk 2", codec.decode(buf).unwrap().unwrap());
assert_eq!("chunk 3", codec.decode(buf).unwrap().unwrap());
assert_eq!("", codec.decode(buf).unwrap().unwrap());
assert_eq!(None, codec.decode(buf).unwrap());
assert_eq!(None, codec.decode_eof(buf).unwrap());
buf.put_slice(b"k");
assert_eq!(None, codec.decode(buf).unwrap());
assert_eq!("k", codec.decode_eof(buf).unwrap().unwrap());
assert_eq!(None, codec.decode(buf).unwrap());
assert_eq!(None, codec.decode_eof(buf).unwrap());
}
#[test]
fn any_delimiters_decoder_max_length() {
const MAX_LENGTH: usize = 7;
let mut codec =
AnyDelimiterCodec::new_with_max_length(b",;\n\r".to_vec(), b",".to_vec(), MAX_LENGTH);
let buf = &mut BytesMut::new();
buf.reserve(200);
buf.put_slice(b"chunk 1 is too long\nchunk 2\nchunk 3\r\nchunk 4\n\r\n");
assert!(codec.decode(buf).is_err());
let chunk = codec.decode(buf).unwrap().unwrap();
assert!(
chunk.len() <= MAX_LENGTH,
"{chunk:?}.len() <= {MAX_LENGTH:?}"
);
assert_eq!("chunk 2", chunk);
let chunk = codec.decode(buf).unwrap().unwrap();
assert!(
chunk.len() <= MAX_LENGTH,
"{chunk:?}.len() <= {MAX_LENGTH:?}"
);
assert_eq!("chunk 3", chunk);
// \r\n cause empty chunk
let chunk = codec.decode(buf).unwrap().unwrap();
assert!(
chunk.len() <= MAX_LENGTH,
"{chunk:?}.len() <= {MAX_LENGTH:?}"
);
assert_eq!("", chunk);
let chunk = codec.decode(buf).unwrap().unwrap();
assert!(
chunk.len() <= MAX_LENGTH,
"{chunk:?}.len() <= {MAX_LENGTH:?}"
);
assert_eq!("chunk 4", chunk);
let chunk = codec.decode(buf).unwrap().unwrap();
assert!(
chunk.len() <= MAX_LENGTH,
"{chunk:?}.len() <= {MAX_LENGTH:?}"
);
assert_eq!("", chunk);
let chunk = codec.decode(buf).unwrap().unwrap();
assert!(
chunk.len() <= MAX_LENGTH,
"{chunk:?}.len() <= {MAX_LENGTH:?}"
);
assert_eq!("", chunk);
assert_eq!(None, codec.decode(buf).unwrap());
assert_eq!(None, codec.decode_eof(buf).unwrap());
buf.put_slice(b"k");
assert_eq!(None, codec.decode(buf).unwrap());
let chunk = codec.decode_eof(buf).unwrap().unwrap();
assert!(
chunk.len() <= MAX_LENGTH,
"{chunk:?}.len() <= {MAX_LENGTH:?}"
);
assert_eq!("k", chunk);
assert_eq!(None, codec.decode(buf).unwrap());
assert_eq!(None, codec.decode_eof(buf).unwrap());
// Delimiter that's one character too long. This could cause an out of bounds
// error if we peek at the next characters using slice indexing.
buf.put_slice(b"aaabbbcc");
assert!(codec.decode(buf).is_err());
}
#[test]
fn any_delimiter_decoder_max_length_underrun() {
const MAX_LENGTH: usize = 7;
let mut codec =
AnyDelimiterCodec::new_with_max_length(b",;\n\r".to_vec(), b",".to_vec(), MAX_LENGTH);
let buf = &mut BytesMut::new();
buf.reserve(200);
buf.put_slice(b"chunk ");
assert_eq!(None, codec.decode(buf).unwrap());
buf.put_slice(b"too l");
assert!(codec.decode(buf).is_err());
buf.put_slice(b"ong\n");
assert_eq!(None, codec.decode(buf).unwrap());
buf.put_slice(b"chunk 2");
assert_eq!(None, codec.decode(buf).unwrap());
buf.put_slice(b",");
assert_eq!("chunk 2", codec.decode(buf).unwrap().unwrap());
}
#[test]
fn any_delimiter_decoder_max_length_underrun_twice() {
const MAX_LENGTH: usize = 11;
let mut codec =
AnyDelimiterCodec::new_with_max_length(b",;\n\r".to_vec(), b",".to_vec(), MAX_LENGTH);
let buf = &mut BytesMut::new();
buf.reserve(200);
buf.put_slice(b"chunk ");
assert_eq!(None, codec.decode(buf).unwrap());
buf.put_slice(b"too very l");
assert!(codec.decode(buf).is_err());
buf.put_slice(b"aaaaaaaaaaaaaaaaaaaaaaa");
assert_eq!(None, codec.decode(buf).unwrap());
buf.put_slice(b"ong\nshort\n");
assert_eq!("short", codec.decode(buf).unwrap().unwrap());
}
#[test]
fn any_delimiter_decoder_max_length_bursts() {
const MAX_LENGTH: usize = 11;
let mut codec =
AnyDelimiterCodec::new_with_max_length(b",;\n\r".to_vec(), b",".to_vec(), MAX_LENGTH);
let buf = &mut BytesMut::new();
buf.reserve(200);
buf.put_slice(b"chunk ");
assert_eq!(None, codec.decode(buf).unwrap());
buf.put_slice(b"too l");
assert_eq!(None, codec.decode(buf).unwrap());
buf.put_slice(b"ong\n");
assert!(codec.decode(buf).is_err());
}
#[test]
fn any_delimiter_decoder_max_length_big_burst() {
const MAX_LENGTH: usize = 11;
let mut codec =
AnyDelimiterCodec::new_with_max_length(b",;\n\r".to_vec(), b",".to_vec(), MAX_LENGTH);
let buf = &mut BytesMut::new();
buf.reserve(200);
buf.put_slice(b"chunk ");
assert_eq!(None, codec.decode(buf).unwrap());
buf.put_slice(b"too long!\n");
assert!(codec.decode(buf).is_err());
}
#[test]
fn any_delimiter_decoder_max_length_delimiter_between_decodes() {
const MAX_LENGTH: usize = 5;
let mut codec =
AnyDelimiterCodec::new_with_max_length(b",;\n\r".to_vec(), b",".to_vec(), MAX_LENGTH);
let buf = &mut BytesMut::new();
buf.reserve(200);
buf.put_slice(b"hello");
assert_eq!(None, codec.decode(buf).unwrap());
buf.put_slice(b",world");
assert_eq!("hello", codec.decode(buf).unwrap().unwrap());
}
#[test]
fn any_delimiter_decoder_discard_repeat() {
const MAX_LENGTH: usize = 1;
let mut codec =
AnyDelimiterCodec::new_with_max_length(b",;\n\r".to_vec(), b",".to_vec(), MAX_LENGTH);
let buf = &mut BytesMut::new();
buf.reserve(200);
buf.put_slice(b"aa");
assert!(codec.decode(buf).is_err());
buf.put_slice(b"a");
assert_eq!(None, codec.decode(buf).unwrap());
}
#[test]
fn any_delimiter_encoder() {
let mut codec = AnyDelimiterCodec::new(b",".to_vec(), b";--;".to_vec());
let mut buf = BytesMut::new();
codec.encode("chunk 1", &mut buf).unwrap();
assert_eq!("chunk 1;--;", buf);
codec.encode("chunk 2", &mut buf).unwrap();
assert_eq!("chunk 1;--;chunk 2;--;", buf);
}

44
vendor/tokio-util/tests/compat.rs vendored Normal file
View File

@@ -0,0 +1,44 @@
#![cfg(feature = "compat")]
#![cfg(not(target_os = "wasi"))] // WASI does not support all fs operations
#![warn(rust_2018_idioms)]
use futures_io::SeekFrom;
use futures_util::{AsyncReadExt, AsyncSeekExt, AsyncWriteExt};
use tempfile::NamedTempFile;
use tokio::fs::OpenOptions;
use tokio_util::compat::TokioAsyncWriteCompatExt;
#[tokio::test]
async fn compat_file_seek() -> futures_util::io::Result<()> {
let temp_file = NamedTempFile::new()?;
let mut file = OpenOptions::new()
.read(true)
.write(true)
.create(true)
.truncate(true)
.open(temp_file)
.await?
.compat_write();
file.write_all(&[0, 1, 2, 3, 4, 5]).await?;
file.write_all(&[6, 7]).await?;
assert_eq!(file.stream_position().await?, 8);
// Modify elements at position 2.
assert_eq!(file.seek(SeekFrom::Start(2)).await?, 2);
file.write_all(&[8, 9]).await?;
file.flush().await?;
// Verify we still have 8 elements.
assert_eq!(file.seek(SeekFrom::End(0)).await?, 8);
// Seek back to the start of the file to read and verify contents.
file.seek(SeekFrom::Start(0)).await?;
let mut buf = Vec::new();
let num_bytes = file.read_to_end(&mut buf).await?;
assert_eq!(&buf[..num_bytes], &[0, 1, 8, 9, 4, 5, 6, 7]);
Ok(())
}

25
vendor/tokio-util/tests/context.rs vendored Normal file
View File

@@ -0,0 +1,25 @@
#![cfg(feature = "rt")]
#![cfg(not(target_os = "wasi"))] // Wasi doesn't support threads
#![warn(rust_2018_idioms)]
use tokio::runtime::Builder;
use tokio::time::*;
use tokio_util::context::RuntimeExt;
#[test]
fn tokio_context_with_another_runtime() {
let rt1 = Builder::new_multi_thread()
.worker_threads(1)
// no timer!
.build()
.unwrap();
let rt2 = Builder::new_multi_thread()
.worker_threads(1)
.enable_all()
.build()
.unwrap();
// Without the `HandleExt.wrap()` there would be a panic because there is
// no timer running, since it would be referencing runtime r1.
rt1.block_on(rt2.wrap(async move { sleep(Duration::from_millis(2)).await }));
}

152
vendor/tokio-util/tests/framed.rs vendored Normal file
View File

@@ -0,0 +1,152 @@
#![warn(rust_2018_idioms)]
use tokio_stream::StreamExt;
use tokio_test::assert_ok;
use tokio_util::codec::{Decoder, Encoder, Framed, FramedParts};
use bytes::{Buf, BufMut, BytesMut};
use std::io::{self, Read};
use std::pin::Pin;
use std::task::{Context, Poll};
const INITIAL_CAPACITY: usize = 8 * 1024;
/// Encode and decode u32 values.
#[derive(Default)]
struct U32Codec {
read_bytes: usize,
}
impl Decoder for U32Codec {
type Item = u32;
type Error = io::Error;
fn decode(&mut self, buf: &mut BytesMut) -> io::Result<Option<u32>> {
if buf.len() < 4 {
return Ok(None);
}
let n = buf.split_to(4).get_u32();
self.read_bytes += 4;
Ok(Some(n))
}
}
impl Encoder<u32> for U32Codec {
type Error = io::Error;
fn encode(&mut self, item: u32, dst: &mut BytesMut) -> io::Result<()> {
// Reserve space
dst.reserve(4);
dst.put_u32(item);
Ok(())
}
}
/// Encode and decode u64 values.
#[derive(Default)]
struct U64Codec {
read_bytes: usize,
}
impl Decoder for U64Codec {
type Item = u64;
type Error = io::Error;
fn decode(&mut self, buf: &mut BytesMut) -> io::Result<Option<u64>> {
if buf.len() < 8 {
return Ok(None);
}
let n = buf.split_to(8).get_u64();
self.read_bytes += 8;
Ok(Some(n))
}
}
impl Encoder<u64> for U64Codec {
type Error = io::Error;
fn encode(&mut self, item: u64, dst: &mut BytesMut) -> io::Result<()> {
// Reserve space
dst.reserve(8);
dst.put_u64(item);
Ok(())
}
}
/// This value should never be used
struct DontReadIntoThis;
impl Read for DontReadIntoThis {
fn read(&mut self, _: &mut [u8]) -> io::Result<usize> {
Err(io::Error::new(
io::ErrorKind::Other,
"Read into something you weren't supposed to.",
))
}
}
impl tokio::io::AsyncRead for DontReadIntoThis {
fn poll_read(
self: Pin<&mut Self>,
_cx: &mut Context<'_>,
_buf: &mut tokio::io::ReadBuf<'_>,
) -> Poll<io::Result<()>> {
unreachable!()
}
}
#[tokio::test]
async fn can_read_from_existing_buf() {
let mut parts = FramedParts::new(DontReadIntoThis, U32Codec::default());
parts.read_buf = BytesMut::from(&[0, 0, 0, 42][..]);
let mut framed = Framed::from_parts(parts);
let num = assert_ok!(framed.next().await.unwrap());
assert_eq!(num, 42);
assert_eq!(framed.codec().read_bytes, 4);
}
#[tokio::test]
async fn can_read_from_existing_buf_after_codec_changed() {
let mut parts = FramedParts::new(DontReadIntoThis, U32Codec::default());
parts.read_buf = BytesMut::from(&[0, 0, 0, 42, 0, 0, 0, 0, 0, 0, 0, 84][..]);
let mut framed = Framed::from_parts(parts);
let num = assert_ok!(framed.next().await.unwrap());
assert_eq!(num, 42);
assert_eq!(framed.codec().read_bytes, 4);
let mut framed = framed.map_codec(|codec| U64Codec {
read_bytes: codec.read_bytes,
});
let num = assert_ok!(framed.next().await.unwrap());
assert_eq!(num, 84);
assert_eq!(framed.codec().read_bytes, 12);
}
#[test]
fn external_buf_grows_to_init() {
let mut parts = FramedParts::new(DontReadIntoThis, U32Codec::default());
parts.read_buf = BytesMut::from(&[0, 0, 0, 42][..]);
let framed = Framed::from_parts(parts);
let FramedParts { read_buf, .. } = framed.into_parts();
assert_eq!(read_buf.capacity(), INITIAL_CAPACITY);
}
#[test]
fn external_buf_does_not_shrink() {
let mut parts = FramedParts::new(DontReadIntoThis, U32Codec::default());
parts.read_buf = BytesMut::from(&vec![0; INITIAL_CAPACITY * 2][..]);
let framed = Framed::from_parts(parts);
let FramedParts { read_buf, .. } = framed.into_parts();
assert_eq!(read_buf.capacity(), INITIAL_CAPACITY * 2);
}

339
vendor/tokio-util/tests/framed_read.rs vendored Normal file
View File

@@ -0,0 +1,339 @@
#![warn(rust_2018_idioms)]
use tokio::io::{AsyncRead, ReadBuf};
use tokio_test::assert_ready;
use tokio_test::task;
use tokio_util::codec::{Decoder, FramedRead};
use bytes::{Buf, BytesMut};
use futures::Stream;
use std::collections::VecDeque;
use std::io;
use std::pin::Pin;
use std::task::Poll::{Pending, Ready};
use std::task::{Context, Poll};
macro_rules! mock {
($($x:expr,)*) => {{
let mut v = VecDeque::new();
v.extend(vec![$($x),*]);
Mock { calls: v }
}};
}
macro_rules! assert_read {
($e:expr, $n:expr) => {{
let val = assert_ready!($e);
assert_eq!(val.unwrap().unwrap(), $n);
}};
}
macro_rules! pin {
($id:ident) => {
Pin::new(&mut $id)
};
}
struct U32Decoder;
impl Decoder for U32Decoder {
type Item = u32;
type Error = io::Error;
fn decode(&mut self, buf: &mut BytesMut) -> io::Result<Option<u32>> {
if buf.len() < 4 {
return Ok(None);
}
let n = buf.split_to(4).get_u32();
Ok(Some(n))
}
}
struct U64Decoder;
impl Decoder for U64Decoder {
type Item = u64;
type Error = io::Error;
fn decode(&mut self, buf: &mut BytesMut) -> io::Result<Option<u64>> {
if buf.len() < 8 {
return Ok(None);
}
let n = buf.split_to(8).get_u64();
Ok(Some(n))
}
}
#[test]
fn read_multi_frame_in_packet() {
let mut task = task::spawn(());
let mock = mock! {
Ok(b"\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x02".to_vec()),
};
let mut framed = FramedRead::new(mock, U32Decoder);
task.enter(|cx, _| {
assert_read!(pin!(framed).poll_next(cx), 0);
assert_read!(pin!(framed).poll_next(cx), 1);
assert_read!(pin!(framed).poll_next(cx), 2);
assert!(assert_ready!(pin!(framed).poll_next(cx)).is_none());
});
}
#[test]
fn read_multi_frame_across_packets() {
let mut task = task::spawn(());
let mock = mock! {
Ok(b"\x00\x00\x00\x00".to_vec()),
Ok(b"\x00\x00\x00\x01".to_vec()),
Ok(b"\x00\x00\x00\x02".to_vec()),
};
let mut framed = FramedRead::new(mock, U32Decoder);
task.enter(|cx, _| {
assert_read!(pin!(framed).poll_next(cx), 0);
assert_read!(pin!(framed).poll_next(cx), 1);
assert_read!(pin!(framed).poll_next(cx), 2);
assert!(assert_ready!(pin!(framed).poll_next(cx)).is_none());
});
}
#[test]
fn read_multi_frame_in_packet_after_codec_changed() {
let mut task = task::spawn(());
let mock = mock! {
Ok(b"\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x08".to_vec()),
};
let mut framed = FramedRead::new(mock, U32Decoder);
task.enter(|cx, _| {
assert_read!(pin!(framed).poll_next(cx), 0x04);
let mut framed = framed.map_decoder(|_| U64Decoder);
assert_read!(pin!(framed).poll_next(cx), 0x08);
assert!(assert_ready!(pin!(framed).poll_next(cx)).is_none());
});
}
#[test]
fn read_not_ready() {
let mut task = task::spawn(());
let mock = mock! {
Err(io::Error::new(io::ErrorKind::WouldBlock, "")),
Ok(b"\x00\x00\x00\x00".to_vec()),
Ok(b"\x00\x00\x00\x01".to_vec()),
};
let mut framed = FramedRead::new(mock, U32Decoder);
task.enter(|cx, _| {
assert!(pin!(framed).poll_next(cx).is_pending());
assert_read!(pin!(framed).poll_next(cx), 0);
assert_read!(pin!(framed).poll_next(cx), 1);
assert!(assert_ready!(pin!(framed).poll_next(cx)).is_none());
});
}
#[test]
fn read_partial_then_not_ready() {
let mut task = task::spawn(());
let mock = mock! {
Ok(b"\x00\x00".to_vec()),
Err(io::Error::new(io::ErrorKind::WouldBlock, "")),
Ok(b"\x00\x00\x00\x00\x00\x01\x00\x00\x00\x02".to_vec()),
};
let mut framed = FramedRead::new(mock, U32Decoder);
task.enter(|cx, _| {
assert!(pin!(framed).poll_next(cx).is_pending());
assert_read!(pin!(framed).poll_next(cx), 0);
assert_read!(pin!(framed).poll_next(cx), 1);
assert_read!(pin!(framed).poll_next(cx), 2);
assert!(assert_ready!(pin!(framed).poll_next(cx)).is_none());
});
}
#[test]
fn read_err() {
let mut task = task::spawn(());
let mock = mock! {
Err(io::Error::new(io::ErrorKind::Other, "")),
};
let mut framed = FramedRead::new(mock, U32Decoder);
task.enter(|cx, _| {
assert_eq!(
io::ErrorKind::Other,
assert_ready!(pin!(framed).poll_next(cx))
.unwrap()
.unwrap_err()
.kind()
)
});
}
#[test]
fn read_partial_then_err() {
let mut task = task::spawn(());
let mock = mock! {
Ok(b"\x00\x00".to_vec()),
Err(io::Error::new(io::ErrorKind::Other, "")),
};
let mut framed = FramedRead::new(mock, U32Decoder);
task.enter(|cx, _| {
assert_eq!(
io::ErrorKind::Other,
assert_ready!(pin!(framed).poll_next(cx))
.unwrap()
.unwrap_err()
.kind()
)
});
}
#[test]
fn read_partial_would_block_then_err() {
let mut task = task::spawn(());
let mock = mock! {
Ok(b"\x00\x00".to_vec()),
Err(io::Error::new(io::ErrorKind::WouldBlock, "")),
Err(io::Error::new(io::ErrorKind::Other, "")),
};
let mut framed = FramedRead::new(mock, U32Decoder);
task.enter(|cx, _| {
assert!(pin!(framed).poll_next(cx).is_pending());
assert_eq!(
io::ErrorKind::Other,
assert_ready!(pin!(framed).poll_next(cx))
.unwrap()
.unwrap_err()
.kind()
)
});
}
#[test]
fn huge_size() {
let mut task = task::spawn(());
let data = &[0; 32 * 1024][..];
let mut framed = FramedRead::new(data, BigDecoder);
task.enter(|cx, _| {
assert_read!(pin!(framed).poll_next(cx), 0);
assert!(assert_ready!(pin!(framed).poll_next(cx)).is_none());
});
struct BigDecoder;
impl Decoder for BigDecoder {
type Item = u32;
type Error = io::Error;
fn decode(&mut self, buf: &mut BytesMut) -> io::Result<Option<u32>> {
if buf.len() < 32 * 1024 {
return Ok(None);
}
buf.advance(32 * 1024);
Ok(Some(0))
}
}
}
#[test]
fn data_remaining_is_error() {
let mut task = task::spawn(());
let slice = &[0; 5][..];
let mut framed = FramedRead::new(slice, U32Decoder);
task.enter(|cx, _| {
assert_read!(pin!(framed).poll_next(cx), 0);
assert!(assert_ready!(pin!(framed).poll_next(cx)).unwrap().is_err());
});
}
#[test]
fn multi_frames_on_eof() {
let mut task = task::spawn(());
struct MyDecoder(Vec<u32>);
impl Decoder for MyDecoder {
type Item = u32;
type Error = io::Error;
fn decode(&mut self, _buf: &mut BytesMut) -> io::Result<Option<u32>> {
unreachable!();
}
fn decode_eof(&mut self, _buf: &mut BytesMut) -> io::Result<Option<u32>> {
if self.0.is_empty() {
return Ok(None);
}
Ok(Some(self.0.remove(0)))
}
}
let mut framed = FramedRead::new(mock!(), MyDecoder(vec![0, 1, 2, 3]));
task.enter(|cx, _| {
assert_read!(pin!(framed).poll_next(cx), 0);
assert_read!(pin!(framed).poll_next(cx), 1);
assert_read!(pin!(framed).poll_next(cx), 2);
assert_read!(pin!(framed).poll_next(cx), 3);
assert!(assert_ready!(pin!(framed).poll_next(cx)).is_none());
});
}
#[test]
fn read_eof_then_resume() {
let mut task = task::spawn(());
let mock = mock! {
Ok(b"\x00\x00\x00\x01".to_vec()),
Ok(b"".to_vec()),
Ok(b"\x00\x00\x00\x02".to_vec()),
Ok(b"".to_vec()),
Ok(b"\x00\x00\x00\x03".to_vec()),
};
let mut framed = FramedRead::new(mock, U32Decoder);
task.enter(|cx, _| {
assert_read!(pin!(framed).poll_next(cx), 1);
assert!(assert_ready!(pin!(framed).poll_next(cx)).is_none());
assert_read!(pin!(framed).poll_next(cx), 2);
assert!(assert_ready!(pin!(framed).poll_next(cx)).is_none());
assert_read!(pin!(framed).poll_next(cx), 3);
assert!(assert_ready!(pin!(framed).poll_next(cx)).is_none());
assert!(assert_ready!(pin!(framed).poll_next(cx)).is_none());
});
}
// ===== Mock ======
struct Mock {
calls: VecDeque<io::Result<Vec<u8>>>,
}
impl AsyncRead for Mock {
fn poll_read(
mut self: Pin<&mut Self>,
_cx: &mut Context<'_>,
buf: &mut ReadBuf<'_>,
) -> Poll<io::Result<()>> {
use io::ErrorKind::WouldBlock;
match self.calls.pop_front() {
Some(Ok(data)) => {
debug_assert!(buf.remaining() >= data.len());
buf.put_slice(&data);
Ready(Ok(()))
}
Some(Err(ref e)) if e.kind() == WouldBlock => Pending,
Some(Err(e)) => Ready(Err(e)),
None => Ready(Ok(())),
}
}
}

View File

@@ -0,0 +1,38 @@
use futures_core::stream::Stream;
use std::{io, pin::Pin};
use tokio_test::{assert_ready, io::Builder, task};
use tokio_util::codec::{BytesCodec, FramedRead};
macro_rules! pin {
($id:ident) => {
Pin::new(&mut $id)
};
}
macro_rules! assert_read {
($e:expr, $n:expr) => {{
let val = assert_ready!($e);
assert_eq!(val.unwrap().unwrap(), $n);
}};
}
#[tokio::test]
async fn return_none_after_error() {
let mut io = FramedRead::new(
Builder::new()
.read(b"abcdef")
.read_error(io::Error::new(io::ErrorKind::Other, "Resource errored out"))
.read(b"more data")
.build(),
BytesCodec::new(),
);
let mut task = task::spawn(());
task.enter(|cx, _| {
assert_read!(pin!(io).poll_next(cx), b"abcdef".to_vec());
assert!(assert_ready!(pin!(io).poll_next(cx)).unwrap().is_err());
assert!(assert_ready!(pin!(io).poll_next(cx)).is_none());
assert_read!(pin!(io).poll_next(cx), b"more data".to_vec());
})
}

212
vendor/tokio-util/tests/framed_write.rs vendored Normal file
View File

@@ -0,0 +1,212 @@
#![warn(rust_2018_idioms)]
use tokio::io::AsyncWrite;
use tokio_test::{assert_ready, task};
use tokio_util::codec::{Encoder, FramedWrite};
use bytes::{BufMut, BytesMut};
use futures_sink::Sink;
use std::collections::VecDeque;
use std::io::{self, Write};
use std::pin::Pin;
use std::task::Poll::{Pending, Ready};
use std::task::{Context, Poll};
macro_rules! mock {
($($x:expr,)*) => {{
let mut v = VecDeque::new();
v.extend(vec![$($x),*]);
Mock { calls: v }
}};
}
macro_rules! pin {
($id:ident) => {
Pin::new(&mut $id)
};
}
struct U32Encoder;
impl Encoder<u32> for U32Encoder {
type Error = io::Error;
fn encode(&mut self, item: u32, dst: &mut BytesMut) -> io::Result<()> {
// Reserve space
dst.reserve(4);
dst.put_u32(item);
Ok(())
}
}
struct U64Encoder;
impl Encoder<u64> for U64Encoder {
type Error = io::Error;
fn encode(&mut self, item: u64, dst: &mut BytesMut) -> io::Result<()> {
// Reserve space
dst.reserve(8);
dst.put_u64(item);
Ok(())
}
}
#[test]
fn write_multi_frame_in_packet() {
let mut task = task::spawn(());
let mock = mock! {
Ok(b"\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x02".to_vec()),
};
let mut framed = FramedWrite::new(mock, U32Encoder);
task.enter(|cx, _| {
assert!(assert_ready!(pin!(framed).poll_ready(cx)).is_ok());
assert!(pin!(framed).start_send(0).is_ok());
assert!(assert_ready!(pin!(framed).poll_ready(cx)).is_ok());
assert!(pin!(framed).start_send(1).is_ok());
assert!(assert_ready!(pin!(framed).poll_ready(cx)).is_ok());
assert!(pin!(framed).start_send(2).is_ok());
// Nothing written yet
assert_eq!(1, framed.get_ref().calls.len());
// Flush the writes
assert!(assert_ready!(pin!(framed).poll_flush(cx)).is_ok());
assert_eq!(0, framed.get_ref().calls.len());
});
}
#[test]
fn write_multi_frame_after_codec_changed() {
let mut task = task::spawn(());
let mock = mock! {
Ok(b"\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x08".to_vec()),
};
let mut framed = FramedWrite::new(mock, U32Encoder);
task.enter(|cx, _| {
assert!(assert_ready!(pin!(framed).poll_ready(cx)).is_ok());
assert!(pin!(framed).start_send(0x04).is_ok());
let mut framed = framed.map_encoder(|_| U64Encoder);
assert!(assert_ready!(pin!(framed).poll_ready(cx)).is_ok());
assert!(pin!(framed).start_send(0x08).is_ok());
// Nothing written yet
assert_eq!(1, framed.get_ref().calls.len());
// Flush the writes
assert!(assert_ready!(pin!(framed).poll_flush(cx)).is_ok());
assert_eq!(0, framed.get_ref().calls.len());
});
}
#[test]
fn write_hits_backpressure() {
const ITER: usize = 2 * 1024;
let mut mock = mock! {
// Block the `ITER*2`th write
Err(io::Error::new(io::ErrorKind::WouldBlock, "not ready")),
Ok(b"".to_vec()),
};
for i in 0..=ITER * 2 {
let mut b = BytesMut::with_capacity(4);
b.put_u32(i as u32);
// Append to the end
match mock.calls.back_mut().unwrap() {
Ok(ref mut data) => {
// Write in 2kb chunks
if data.len() < ITER {
data.extend_from_slice(&b[..]);
continue;
} // else fall through and create a new buffer
}
_ => unreachable!(),
}
// Push a new chunk
mock.calls.push_back(Ok(b[..].to_vec()));
}
// 1 'wouldblock', 8 * 2KB buffers, 1 b-byte buffer
assert_eq!(mock.calls.len(), 10);
let mut task = task::spawn(());
let mut framed = FramedWrite::new(mock, U32Encoder);
framed.set_backpressure_boundary(ITER * 8);
task.enter(|cx, _| {
// Send 16KB. This fills up FramedWrite buffer
for i in 0..ITER * 2 {
assert!(assert_ready!(pin!(framed).poll_ready(cx)).is_ok());
assert!(pin!(framed).start_send(i as u32).is_ok());
}
// Now we poll_ready which forces a flush. The mock pops the front message
// and decides to block.
assert!(pin!(framed).poll_ready(cx).is_pending());
// We poll again, forcing another flush, which this time succeeds
// The whole 16KB buffer is flushed
assert!(assert_ready!(pin!(framed).poll_ready(cx)).is_ok());
// Send more data. This matches the final message expected by the mock
assert!(pin!(framed).start_send((ITER * 2) as u32).is_ok());
// Flush the rest of the buffer
assert!(assert_ready!(pin!(framed).poll_flush(cx)).is_ok());
// Ensure the mock is empty
assert_eq!(0, framed.get_ref().calls.len());
})
}
// // ===== Mock ======
struct Mock {
calls: VecDeque<io::Result<Vec<u8>>>,
}
impl Write for Mock {
fn write(&mut self, src: &[u8]) -> io::Result<usize> {
match self.calls.pop_front() {
Some(Ok(data)) => {
assert!(src.len() >= data.len());
assert_eq!(&data[..], &src[..data.len()]);
Ok(data.len())
}
Some(Err(e)) => Err(e),
None => panic!("unexpected write; {src:?}"),
}
}
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
impl AsyncWrite for Mock {
fn poll_write(
self: Pin<&mut Self>,
_cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<Result<usize, io::Error>> {
match Pin::get_mut(self).write(buf) {
Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => Pending,
other => Ready(other),
}
}
fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> {
match Pin::get_mut(self).flush() {
Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => Pending,
other => Ready(other),
}
}
fn poll_shutdown(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> {
unimplemented!()
}
}

268
vendor/tokio-util/tests/future.rs vendored Normal file
View File

@@ -0,0 +1,268 @@
use std::{
future::{pending, ready, Future},
task::{Context, Poll},
};
use futures_test::task::new_count_waker;
use tokio::pin;
use tokio_test::{assert_pending, assert_ready_eq};
use tokio_util::{future::FutureExt, sync::CancellationToken};
#[derive(Default)]
struct ReadyOnTheSecondPollFuture {
polled: bool,
}
impl Future for ReadyOnTheSecondPollFuture {
type Output = ();
fn poll(mut self: std::pin::Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<Self::Output> {
if !self.polled {
self.polled = true;
return Poll::Pending;
}
Poll::Ready(())
}
}
#[test]
fn ready_fut_with_cancellation_token_test() {
let (waker, _) = new_count_waker();
let token = CancellationToken::new();
let ready_fut = ready(());
let ready_with_token_fut = ready_fut.with_cancellation_token(&token);
pin!(ready_with_token_fut);
let res = ready_with_token_fut
.as_mut()
.poll(&mut Context::from_waker(&waker));
assert_ready_eq!(res, Some(()));
}
#[test]
fn pending_fut_with_cancellation_token_test() {
let (waker, _) = new_count_waker();
let token = CancellationToken::new();
let pending_fut = pending::<()>();
let pending_with_token_fut = pending_fut.with_cancellation_token(&token);
pin!(pending_with_token_fut);
let res = pending_with_token_fut
.as_mut()
.poll(&mut Context::from_waker(&waker));
assert_pending!(res);
}
#[test]
fn ready_fut_with_already_cancelled_token_test() {
let (waker, _) = new_count_waker();
let token = CancellationToken::new();
token.cancel();
let ready_fut = ready(());
let ready_fut_with_token_fut = ready_fut.with_cancellation_token(&token);
pin!(ready_fut_with_token_fut);
let res = ready_fut_with_token_fut
.as_mut()
.poll(&mut Context::from_waker(&waker));
assert_ready_eq!(res, None);
}
#[test]
fn pending_fut_with_already_cancelled_token_test() {
let (waker, wake_count) = new_count_waker();
let token = CancellationToken::new();
token.cancel();
let pending_fut = pending::<()>();
let pending_with_token_fut = pending_fut.with_cancellation_token(&token);
pin!(pending_with_token_fut);
let res = pending_with_token_fut
.as_mut()
.poll(&mut Context::from_waker(&waker));
assert_ready_eq!(res, None);
assert_eq!(wake_count, 0);
}
#[test]
fn pending_fut_with_token_cancelled_test() {
let (waker, wake_count) = new_count_waker();
let token = CancellationToken::new();
let pending_fut = pending::<()>();
let pending_with_token_fut = pending_fut.with_cancellation_token(&token);
pin!(pending_with_token_fut);
let res = pending_with_token_fut
.as_mut()
.poll(&mut Context::from_waker(&waker));
assert_pending!(res);
token.cancel();
let res = pending_with_token_fut
.as_mut()
.poll(&mut Context::from_waker(&waker));
assert_ready_eq!(res, None);
assert_eq!(wake_count, 1);
}
#[test]
fn pending_only_on_first_poll_with_cancellation_token_test() {
let (waker, wake_count) = new_count_waker();
let token = CancellationToken::new();
let fut = ReadyOnTheSecondPollFuture::default().with_cancellation_token(&token);
pin!(fut);
// first poll, ReadyOnTheSecondPollFuture returned Pending
let res = fut.as_mut().poll(&mut Context::from_waker(&waker));
assert_pending!(res);
token.cancel();
assert_eq!(wake_count, 1);
// due to the polling fairness (biased behavior) of `WithCancellationToken` Future,
// subsequent polls are biased toward polling ReadyOnTheSecondPollFuture,
// which results in always returning Ready.
let res = fut.as_mut().poll(&mut Context::from_waker(&waker));
assert_ready_eq!(res, Some(()));
}
#[test]
fn ready_fut_with_cancellation_owned_token_test() {
let (waker, _) = new_count_waker();
let token = CancellationToken::new();
let ready_fut = ready(());
let ready_with_token_fut = ready_fut.with_cancellation_token_owned(token);
pin!(ready_with_token_fut);
let res = ready_with_token_fut
.as_mut()
.poll(&mut Context::from_waker(&waker));
assert_ready_eq!(res, Some(()));
}
#[test]
fn pending_fut_with_cancellation_token_owned_test() {
let (waker, _) = new_count_waker();
let token = CancellationToken::new();
let pending_fut = pending::<()>();
let pending_with_token_fut = pending_fut.with_cancellation_token_owned(token);
pin!(pending_with_token_fut);
let res = pending_with_token_fut
.as_mut()
.poll(&mut Context::from_waker(&waker));
assert_pending!(res);
}
#[test]
fn ready_fut_with_already_cancelled_token_owned_test() {
let (waker, _) = new_count_waker();
let token = CancellationToken::new();
token.cancel();
let ready_fut = ready(());
let ready_fut_with_token_fut = ready_fut.with_cancellation_token_owned(token);
pin!(ready_fut_with_token_fut);
let res = ready_fut_with_token_fut
.as_mut()
.poll(&mut Context::from_waker(&waker));
assert_ready_eq!(res, None);
}
#[test]
fn pending_fut_with_already_cancelled_token_owned_test() {
let (waker, wake_count) = new_count_waker();
let token = CancellationToken::new();
token.cancel();
let pending_fut = pending::<()>();
let pending_with_token_fut = pending_fut.with_cancellation_token_owned(token);
pin!(pending_with_token_fut);
let res = pending_with_token_fut
.as_mut()
.poll(&mut Context::from_waker(&waker));
assert_ready_eq!(res, None);
assert_eq!(wake_count, 0);
}
#[test]
fn pending_fut_with_owned_token_cancelled_test() {
let (waker, wake_count) = new_count_waker();
let token = CancellationToken::new();
let pending_fut = pending::<()>();
let pending_with_token_fut = pending_fut.with_cancellation_token_owned(token.clone());
pin!(pending_with_token_fut);
let res = pending_with_token_fut
.as_mut()
.poll(&mut Context::from_waker(&waker));
assert_pending!(res);
token.cancel();
let res = pending_with_token_fut
.as_mut()
.poll(&mut Context::from_waker(&waker));
assert_ready_eq!(res, None);
assert_eq!(wake_count, 1);
}
#[test]
fn pending_only_on_first_poll_with_cancellation_token_owned_test() {
let (waker, wake_count) = new_count_waker();
let token = CancellationToken::new();
let fut = ReadyOnTheSecondPollFuture::default().with_cancellation_token_owned(token.clone());
pin!(fut);
// first poll, ReadyOnTheSecondPollFuture returned Pending
let res = fut.as_mut().poll(&mut Context::from_waker(&waker));
assert_pending!(res);
token.cancel();
assert_eq!(wake_count, 1);
// due to the polling fairness (biased behavior) of `WithCancellationToken` Future,
// subsequent polls are biased toward polling ReadyOnTheSecondPollFuture,
// which results in always returning Ready.
let res = fut.as_mut().poll(&mut Context::from_waker(&waker));
assert_ready_eq!(res, Some(()));
}

194
vendor/tokio-util/tests/io_inspect.rs vendored Normal file
View File

@@ -0,0 +1,194 @@
use std::{
future::poll_fn,
io::IoSlice,
pin::Pin,
task::{Context, Poll},
};
use tokio::io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt, ReadBuf};
use tokio_util::io::{InspectReader, InspectWriter};
/// An AsyncRead implementation that works byte-by-byte, to catch out callers
/// who don't allow for `buf` being part-filled before the call
struct SmallReader {
contents: Vec<u8>,
}
impl Unpin for SmallReader {}
impl AsyncRead for SmallReader {
fn poll_read(
mut self: Pin<&mut Self>,
_cx: &mut Context<'_>,
buf: &mut ReadBuf<'_>,
) -> Poll<std::io::Result<()>> {
if let Some(byte) = self.contents.pop() {
buf.put_slice(&[byte])
}
Poll::Ready(Ok(()))
}
}
#[tokio::test]
async fn read_tee() {
let contents = b"This could be really long, you know".to_vec();
let reader = SmallReader {
contents: contents.clone(),
};
let mut altout: Vec<u8> = Vec::new();
let mut teeout = Vec::new();
{
let mut tee = InspectReader::new(reader, |bytes| altout.extend(bytes));
tee.read_to_end(&mut teeout).await.unwrap();
}
assert_eq!(teeout, altout);
assert_eq!(altout.len(), contents.len());
}
/// An AsyncWrite implementation that works byte-by-byte for poll_write, and
/// that reads the whole of the first buffer plus one byte from the second in
/// poll_write_vectored.
///
/// This is designed to catch bugs in handling partially written buffers
#[derive(Debug)]
struct SmallWriter {
contents: Vec<u8>,
}
impl Unpin for SmallWriter {}
impl AsyncWrite for SmallWriter {
fn poll_write(
mut self: Pin<&mut Self>,
_cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<Result<usize, std::io::Error>> {
// Just write one byte at a time
if buf.is_empty() {
return Poll::Ready(Ok(0));
}
self.contents.push(buf[0]);
Poll::Ready(Ok(1))
}
fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<Result<(), std::io::Error>> {
Poll::Ready(Ok(()))
}
fn poll_shutdown(
self: Pin<&mut Self>,
_cx: &mut Context<'_>,
) -> Poll<Result<(), std::io::Error>> {
Poll::Ready(Ok(()))
}
fn poll_write_vectored(
mut self: Pin<&mut Self>,
_cx: &mut Context<'_>,
bufs: &[IoSlice<'_>],
) -> Poll<Result<usize, std::io::Error>> {
// Write all of the first buffer, then one byte from the second buffer
// This should trip up anything that doesn't correctly handle multiple
// buffers.
if bufs.is_empty() {
return Poll::Ready(Ok(0));
}
let mut written_len = bufs[0].len();
self.contents.extend_from_slice(&bufs[0]);
if bufs.len() > 1 {
let buf = bufs[1];
if !buf.is_empty() {
written_len += 1;
self.contents.push(buf[0]);
}
}
Poll::Ready(Ok(written_len))
}
fn is_write_vectored(&self) -> bool {
true
}
}
#[tokio::test]
async fn write_tee() {
let mut altout: Vec<u8> = Vec::new();
let mut writeout = SmallWriter {
contents: Vec::new(),
};
{
let mut tee = InspectWriter::new(&mut writeout, |bytes| altout.extend(bytes));
tee.write_all(b"A testing string, very testing")
.await
.unwrap();
}
assert_eq!(altout, writeout.contents);
}
// This is inefficient, but works well enough for test use.
// If you want something similar for real code, you'll want to avoid all the
// fun of manipulating `bufs` - ideally, by the time you read this,
// IoSlice::advance_slices will be stable, and you can use that.
async fn write_all_vectored<W: AsyncWrite + Unpin>(
mut writer: W,
mut bufs: Vec<Vec<u8>>,
) -> Result<usize, std::io::Error> {
let mut res = 0;
while !bufs.is_empty() {
let mut written = poll_fn(|cx| {
let bufs: Vec<IoSlice> = bufs.iter().map(|v| IoSlice::new(v)).collect();
Pin::new(&mut writer).poll_write_vectored(cx, &bufs)
})
.await?;
res += written;
while written > 0 {
let buf_len = bufs[0].len();
if buf_len <= written {
bufs.remove(0);
written -= buf_len;
} else {
let buf = &mut bufs[0];
let drain_len = written.min(buf.len());
buf.drain(..drain_len);
written -= drain_len;
}
}
}
Ok(res)
}
#[tokio::test]
async fn write_tee_vectored() {
let mut altout: Vec<u8> = Vec::new();
let mut writeout = SmallWriter {
contents: Vec::new(),
};
let original = b"A very long string split up";
let bufs: Vec<Vec<u8>> = original
.split(|b| b.is_ascii_whitespace())
.map(Vec::from)
.collect();
assert!(bufs.len() > 1);
let expected: Vec<u8> = {
let mut out = Vec::new();
for item in &bufs {
out.extend_from_slice(item)
}
out
};
{
let mut bufcount = 0;
let tee = InspectWriter::new(&mut writeout, |bytes| {
bufcount += 1;
altout.extend(bytes)
});
assert!(tee.is_write_vectored());
write_all_vectored(tee, bufs.clone()).await.unwrap();
assert!(bufcount >= bufs.len());
}
assert_eq!(altout, writeout.contents);
assert_eq!(writeout.contents, expected);
}

View File

@@ -0,0 +1,65 @@
#![warn(rust_2018_idioms)]
use std::pin::Pin;
use std::task::{Context, Poll};
use tokio::io::{AsyncRead, ReadBuf};
use tokio_stream::StreamExt;
/// produces at most `remaining` zeros, that returns error.
/// each time it reads at most 31 byte.
struct Reader {
remaining: usize,
}
impl AsyncRead for Reader {
fn poll_read(
self: Pin<&mut Self>,
_cx: &mut Context<'_>,
buf: &mut ReadBuf<'_>,
) -> Poll<std::io::Result<()>> {
let this = Pin::into_inner(self);
assert_ne!(buf.remaining(), 0);
if this.remaining > 0 {
let n = std::cmp::min(this.remaining, buf.remaining());
let n = std::cmp::min(n, 31);
for x in &mut buf.initialize_unfilled_to(n)[..n] {
*x = 0;
}
buf.advance(n);
this.remaining -= n;
Poll::Ready(Ok(()))
} else {
Poll::Ready(Err(std::io::Error::from_raw_os_error(22)))
}
}
}
#[tokio::test]
async fn correct_behavior_on_errors() {
let reader = Reader { remaining: 8000 };
let mut stream = tokio_util::io::ReaderStream::new(reader);
let mut zeros_received = 0;
let mut had_error = false;
loop {
let item = stream.next().await.unwrap();
println!("{item:?}");
match item {
Ok(bytes) => {
let bytes = &*bytes;
for byte in bytes {
assert_eq!(*byte, 0);
zeros_received += 1;
}
}
Err(_) => {
assert!(!had_error);
had_error = true;
break;
}
}
}
assert!(had_error);
assert_eq!(zeros_received, 8000);
assert!(stream.next().await.is_none());
}

356
vendor/tokio-util/tests/io_simplex.rs vendored Normal file
View File

@@ -0,0 +1,356 @@
use futures::pin_mut;
use futures_test::task::noop_context;
use std::io::IoSlice;
use std::task::Poll;
use tokio::io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt, ReadBuf};
use tokio_test::task::spawn;
use tokio_test::{assert_pending, assert_ready};
use tokio_util::io::simplex;
/// Sanity check for single-threaded operation.
#[tokio::test]
async fn single_thread() {
const N: usize = 64;
const MSG: &[u8] = b"Hello, world!";
const CAPS: &[usize] = &[1, MSG.len() / 2, MSG.len() - 1, MSG.len(), MSG.len() + 1];
// test different buffer capacities to cover edge cases
for &capacity in CAPS {
let (mut tx, mut rx) = simplex::new(capacity);
for _ in 0..N {
let mut read = 0;
let mut write = 0;
let mut buf = [0; MSG.len()];
while read < MSG.len() || write < MSG.len() {
if write < MSG.len() {
let n = tx.write(&MSG[write..]).await.unwrap();
write += n;
}
if read < MSG.len() {
let n = rx.read(&mut buf[read..]).await.unwrap();
read += n;
}
}
assert_eq!(&buf[..], MSG);
}
}
}
/// Sanity check for multi-threaded operation.
#[test]
#[cfg(not(target_os = "wasi"))] // No thread on wasi.
fn multi_thread() {
use futures::executor::block_on;
use std::thread;
const N: usize = 64;
const MSG: &[u8] = b"Hello, world!";
const CAPS: &[usize] = &[1, MSG.len() / 2, MSG.len() - 1, MSG.len(), MSG.len() + 1];
// test different buffer capacities to cover edge cases
for &capacity in CAPS {
let (mut tx, mut rx) = simplex::new(capacity);
let jh0 = thread::spawn(move || {
block_on(async {
let mut buf = vec![0; MSG.len()];
for _ in 0..N {
rx.read_exact(&mut buf).await.unwrap();
assert_eq!(&buf[..], MSG);
buf.clear();
buf.resize(MSG.len(), 0);
}
});
});
let jh1 = thread::spawn(move || {
block_on(async {
for _ in 0..N {
tx.write_all(MSG).await.unwrap();
}
});
});
jh0.join().unwrap();
jh1.join().unwrap();
}
}
#[test]
#[should_panic(expected = "capacity must be greater than zero")]
fn zero_capacity() {
let _ = simplex::new(0);
}
/// The `Receiver::poll_read` should return `Poll::Ready(Ok(()))`
/// if the `ReadBuf` has zero remaining capacity.
#[tokio::test]
async fn read_buf_is_full() {
let (_tx, rx) = simplex::new(32);
let mut buf = ReadBuf::new(&mut []);
tokio::pin!(rx);
assert_ready!(rx.as_mut().poll_read(&mut noop_context(), &mut buf)).unwrap();
assert_eq!(buf.filled().len(), 0);
}
/// The `Sender::poll_write` should return `Poll::Ready(Ok(0))`
/// if the input buffer has zero length.
#[tokio::test]
async fn write_buf_is_empty() {
let (tx, _rx) = simplex::new(32);
tokio::pin!(tx);
let n = assert_ready!(tx.as_mut().poll_write(&mut noop_context(), &[])).unwrap();
assert_eq!(n, 0);
}
/// The `Sender` should returns error if the `Receiver` has been dropped.
#[tokio::test]
async fn drop_receiver_0() {
let (mut tx, rx) = simplex::new(32);
drop(rx);
tx.write_u8(1).await.unwrap_err();
}
/// The `Sender` should be woken up if the `Receiver` has been dropped.
#[tokio::test]
async fn drop_receiver_1() {
let (mut tx, rx) = simplex::new(1);
let mut write_task = spawn(tx.write_u16(1));
assert_pending!(write_task.poll());
assert!(!write_task.is_woken());
drop(rx);
assert!(write_task.is_woken());
}
/// The `Receiver` should return error if:
///
/// - The `Sender` has been dropped.
/// - AND there is no remaining data in the buffer.
#[tokio::test]
async fn drop_sender_0() {
const MSG: &[u8] = b"Hello, world!";
let (tx, mut rx) = simplex::new(32);
drop(tx);
let mut buf = vec![0; MSG.len()];
rx.read_exact(&mut buf).await.unwrap_err();
}
/// The `Receiver` should be woken up if:
///
/// - The `Sender` has been dropped.
/// - AND there is still remaining data in the buffer.
#[tokio::test]
async fn drop_sender_1() {
let (mut tx, mut rx) = simplex::new(2);
let mut buf = vec![];
let mut read_task = spawn(rx.read_to_end(&mut buf));
assert_pending!(read_task.poll());
tx.write_u8(1).await.unwrap();
assert_pending!(read_task.poll());
assert!(!read_task.is_woken());
drop(tx);
assert!(read_task.is_woken());
read_task.await.unwrap();
assert_eq!(buf, vec![1]);
}
/// All following calls to `Sender::poll_write` and `Sender::poll_flush`
/// should return error after `shutdown` has been called.
#[tokio::test]
async fn shutdown_sender_0() {
const MSG: &[u8] = b"Hello, world!";
let (mut tx, _rx) = simplex::new(32);
tx.shutdown().await.unwrap();
tx.write_all(MSG).await.unwrap_err();
tx.flush().await.unwrap_err();
}
/// The `Sender::poll_shutdown` should be called multiple times
/// without error.
#[tokio::test]
async fn shutdown_sender_1() {
let (mut tx, _rx) = simplex::new(32);
tx.shutdown().await.unwrap();
tx.shutdown().await.unwrap();
}
/// The `Sender::poll_shutdown` should wake up the `Receiver`
#[tokio::test]
async fn shutdown_sender_2() {
let (mut tx, mut rx) = simplex::new(32);
let mut buf = vec![];
let mut read_task = spawn(rx.read_to_end(&mut buf));
assert_pending!(read_task.poll());
tx.write_u8(1).await.unwrap();
assert_pending!(read_task.poll());
assert!(!read_task.is_woken());
tx.shutdown().await.unwrap();
assert!(read_task.is_woken());
read_task.await.unwrap();
assert_eq!(buf, vec![1]);
}
/// Both `Sender` and `Receiver` should yield periodically
/// in a tight-loop.
#[tokio::test]
#[cfg(feature = "rt")]
async fn cooperative_scheduling() {
// this magic number is copied from
// https://github.com/tokio-rs/tokio/blob/925c614c89d0a26777a334612e2ed6ad0e7935c3/tokio/src/task/coop/mod.rs#L116
const INITIAL_BUDGET: usize = 128;
let (tx, _rx) = simplex::new(INITIAL_BUDGET * 2);
pin_mut!(tx);
let mut is_pending = false;
for _ in 0..INITIAL_BUDGET + 1 {
match tx.as_mut().poll_write(&mut noop_context(), &[0u8; 1]) {
Poll::Pending => {
is_pending = true;
break;
}
Poll::Ready(Ok(1)) => {}
Poll::Ready(Ok(n)) => panic!("wrote too many bytes: {n}"),
Poll::Ready(Err(e)) => panic!("{e}"),
}
}
assert!(is_pending);
let (tx, _rx) = simplex::new(INITIAL_BUDGET * 2);
pin_mut!(tx);
let mut is_pending = false;
let io_slices = &[IoSlice::new(&[0u8; 1])];
for _ in 0..INITIAL_BUDGET + 1 {
match tx
.as_mut()
.poll_write_vectored(&mut noop_context(), io_slices)
{
Poll::Pending => {
is_pending = true;
break;
}
Poll::Ready(Ok(1)) => {}
Poll::Ready(Ok(n)) => panic!("wrote too many bytes: {n}"),
Poll::Ready(Err(e)) => panic!("{e}"),
}
}
assert!(is_pending);
let (mut tx, rx) = simplex::new(INITIAL_BUDGET * 2);
tx.write_all(&[0u8; INITIAL_BUDGET + 2]).await.unwrap();
pin_mut!(rx);
let mut is_pending = false;
for _ in 0..INITIAL_BUDGET + 1 {
let mut buf = [0u8; 1];
let mut buf = ReadBuf::new(&mut buf);
match rx.as_mut().poll_read(&mut noop_context(), &mut buf) {
Poll::Pending => {
is_pending = true;
break;
}
Poll::Ready(Ok(())) => assert_eq!(buf.filled().len(), 1),
Poll::Ready(Err(e)) => panic!("{e}"),
}
}
assert!(is_pending);
}
/// The capacity is exactly same as the total length of the vectored buffers.
#[tokio::test]
async fn poll_write_vectored_0() {
const MSG1: &[u8] = b"1";
const MSG2: &[u8] = b"22";
const MSG3: &[u8] = b"333";
const MSG_LEN: usize = MSG1.len() + MSG2.len() + MSG3.len();
let io_slices = &[IoSlice::new(MSG1), IoSlice::new(MSG2), IoSlice::new(MSG3)];
let (tx, mut rx) = simplex::new(MSG_LEN);
tokio::pin!(tx);
let res = tx.poll_write_vectored(&mut noop_context(), io_slices);
let n = assert_ready!(res).unwrap();
assert_eq!(n, MSG_LEN);
let mut buf = [0; MSG_LEN];
let n = rx.read_exact(&mut buf).await.unwrap();
assert_eq!(n, MSG_LEN);
assert_eq!(&buf, b"122333");
}
/// The capacity is smaller than the total length of the vectored buffers.
#[tokio::test]
async fn poll_write_vectored_1() {
const MSG1: &[u8] = b"1";
const MSG2: &[u8] = b"22";
const MSG3: &[u8] = b"333";
const CAPACITY: usize = MSG1.len() + MSG2.len() + 1;
let io_slices = &[IoSlice::new(MSG1), IoSlice::new(MSG2), IoSlice::new(MSG3)];
let (tx, mut rx) = simplex::new(CAPACITY);
tokio::pin!(tx);
// ==== The poll_write_vectored should write MSG1 and MSG2 fully, and MSG3 partially. ====
let res = tx.poll_write_vectored(&mut noop_context(), io_slices);
let n = assert_ready!(res).unwrap();
assert_eq!(n, CAPACITY);
let mut buf = [0; CAPACITY];
let n = rx.read_exact(&mut buf).await.unwrap();
assert_eq!(n, CAPACITY);
assert_eq!(&buf, b"1223");
}
/// There are two empty buffers in the vectored buffers.
#[tokio::test]
async fn poll_write_vectored_2() {
const MSG1: &[u8] = b"1";
const MSG2: &[u8] = b"";
const MSG3: &[u8] = b"22";
const MSG4: &[u8] = b"";
const MSG5: &[u8] = b"333";
const MSG_LEN: usize = MSG1.len() + MSG2.len() + MSG3.len() + MSG4.len() + MSG5.len();
let io_slices = &[
IoSlice::new(MSG1),
IoSlice::new(MSG2),
IoSlice::new(MSG3),
IoSlice::new(MSG4),
IoSlice::new(MSG5),
];
let (tx, mut rx) = simplex::new(MSG_LEN);
tokio::pin!(tx);
let res = tx.poll_write_vectored(&mut noop_context(), io_slices);
let n = assert_ready!(res).unwrap();
assert_eq!(n, MSG_LEN);
let mut buf = [0; MSG_LEN];
let n = rx.read_exact(&mut buf).await.unwrap();
assert_eq!(n, MSG_LEN);
assert_eq!(&buf, b"122333");
}
/// The `Sender::poll_write_vectored` should return `Poll::Ready(Ok(0))`
/// if all the input buffers have zero length.
#[tokio::test]
async fn poll_write_vectored_3() {
let io_slices = &[IoSlice::new(&[]), IoSlice::new(&[]), IoSlice::new(&[])];
let (tx, _rx) = simplex::new(32);
tokio::pin!(tx);
let n = assert_ready!(tx.poll_write_vectored(&mut noop_context(), io_slices)).unwrap();
assert_eq!(n, 0);
}

View File

@@ -0,0 +1,72 @@
#![warn(rust_2018_idioms)]
use bytes::Bytes;
use futures_util::SinkExt;
use std::io::{self, Error, ErrorKind};
use tokio::io::AsyncWriteExt;
use tokio_util::codec::{Encoder, FramedWrite};
use tokio_util::io::{CopyToBytes, SinkWriter};
use tokio_util::sync::PollSender;
#[tokio::test]
async fn test_copied_sink_writer() -> Result<(), Error> {
// Construct a channel pair to send data across and wrap a pollable sink.
// Note that the sink must mimic a writable object, e.g. have `std::io::Error`
// as its error type.
// As `PollSender` requires an owned copy of the buffer, we wrap it additionally
// with a `CopyToBytes` helper.
let (tx, mut rx) = tokio::sync::mpsc::channel::<Bytes>(1);
let mut writer = SinkWriter::new(CopyToBytes::new(
PollSender::new(tx).sink_map_err(|_| io::Error::from(ErrorKind::BrokenPipe)),
));
// Write data to our interface...
let data: [u8; 4] = [1, 2, 3, 4];
let _ = writer.write(&data).await;
// ... and receive it.
assert_eq!(data.to_vec(), rx.recv().await.unwrap().to_vec());
Ok(())
}
/// A trivial encoder.
struct SliceEncoder;
impl SliceEncoder {
fn new() -> Self {
Self {}
}
}
impl<'a> Encoder<&'a [u8]> for SliceEncoder {
type Error = Error;
fn encode(&mut self, item: &'a [u8], dst: &mut bytes::BytesMut) -> Result<(), Self::Error> {
// This is where we'd write packet headers, lengths, etc. in a real encoder.
// For simplicity and demonstration purposes, we just pack a copy of
// the slice at the end of a buffer.
dst.extend_from_slice(item);
Ok(())
}
}
#[tokio::test]
async fn test_direct_sink_writer() -> Result<(), Error> {
// We define a framed writer which accepts byte slices
// and 'reverse' this construction immediately.
let framed_byte_lc = FramedWrite::new(Vec::new(), SliceEncoder::new());
let mut writer = SinkWriter::new(framed_byte_lc);
// Write multiple slices to the sink...
let _ = writer.write(&[1, 2, 3]).await;
let _ = writer.write(&[4, 5, 6]).await;
// ... and compare it with the buffer.
assert_eq!(
writer.into_inner().write_buffer().to_vec().as_slice(),
&[1, 2, 3, 4, 5, 6]
);
Ok(())
}

View File

@@ -0,0 +1,35 @@
#![warn(rust_2018_idioms)]
use bytes::Bytes;
use tokio::io::AsyncReadExt;
use tokio_stream::iter;
use tokio_util::io::StreamReader;
#[tokio::test]
async fn test_stream_reader() -> std::io::Result<()> {
let stream = iter(vec![
std::io::Result::Ok(Bytes::from_static(&[])),
Ok(Bytes::from_static(&[0, 1, 2, 3])),
Ok(Bytes::from_static(&[])),
Ok(Bytes::from_static(&[4, 5, 6, 7])),
Ok(Bytes::from_static(&[])),
Ok(Bytes::from_static(&[8, 9, 10, 11])),
Ok(Bytes::from_static(&[])),
]);
let mut read = StreamReader::new(stream);
let mut buf = [0; 5];
read.read_exact(&mut buf).await?;
assert_eq!(buf, [0, 1, 2, 3, 4]);
assert_eq!(read.read(&mut buf).await?, 3);
assert_eq!(&buf[..3], [5, 6, 7]);
assert_eq!(read.read(&mut buf).await?, 4);
assert_eq!(&buf[..4], [8, 9, 10, 11]);
assert_eq!(read.read(&mut buf).await?, 0);
Ok(())
}

View File

@@ -0,0 +1,74 @@
#![cfg(feature = "io-util")]
#![cfg(not(target_os = "wasi"))] // Wasi doesn't support threads
use std::error::Error;
use std::io::{Cursor, Read, Result as IoResult, Write};
use tokio::io::{AsyncRead, AsyncReadExt};
use tokio_util::io::SyncIoBridge;
async fn test_reader_len(
r: impl AsyncRead + Unpin + Send + 'static,
expected_len: usize,
) -> IoResult<()> {
let mut r = SyncIoBridge::new(r);
let res = tokio::task::spawn_blocking(move || {
let mut buf = Vec::new();
r.read_to_end(&mut buf)?;
Ok::<_, std::io::Error>(buf)
})
.await?;
assert_eq!(res?.len(), expected_len);
Ok(())
}
#[tokio::test]
async fn test_async_read_to_sync() -> Result<(), Box<dyn Error>> {
test_reader_len(tokio::io::empty(), 0).await?;
let buf = b"hello world";
test_reader_len(Cursor::new(buf), buf.len()).await?;
Ok(())
}
#[tokio::test]
async fn test_async_write_to_sync() -> Result<(), Box<dyn Error>> {
let mut dest = Vec::new();
let src = b"hello world";
let dest = tokio::task::spawn_blocking(move || -> Result<_, String> {
let mut w = SyncIoBridge::new(Cursor::new(&mut dest));
std::io::copy(&mut Cursor::new(src), &mut w).map_err(|e| e.to_string())?;
Ok(dest)
})
.await??;
assert_eq!(dest.as_slice(), src);
Ok(())
}
#[tokio::test]
async fn test_into_inner() -> Result<(), Box<dyn Error>> {
let mut buf = Vec::new();
SyncIoBridge::new(tokio::io::empty())
.into_inner()
.read_to_end(&mut buf)
.await
.unwrap();
assert_eq!(buf.len(), 0);
Ok(())
}
#[tokio::test]
async fn test_shutdown() -> Result<(), Box<dyn Error>> {
let (s1, mut s2) = tokio::io::duplex(1024);
let (_rh, wh) = tokio::io::split(s1);
tokio::task::spawn_blocking(move || -> std::io::Result<_> {
let mut wh = SyncIoBridge::new(wh);
wh.write_all(b"hello")?;
wh.shutdown()?;
assert!(wh.write_all(b" world").is_err());
Ok(())
})
.await??;
let mut buf = vec![];
s2.read_to_end(&mut buf).await?;
assert_eq!(buf, b"hello");
Ok(())
}

View File

@@ -0,0 +1,836 @@
#![warn(rust_2018_idioms)]
use tokio::io::{AsyncRead, AsyncWrite, ReadBuf};
use tokio_test::task;
use tokio_test::{
assert_err, assert_ok, assert_pending, assert_ready, assert_ready_err, assert_ready_ok,
};
use tokio_util::codec::*;
use bytes::{BufMut, Bytes, BytesMut};
use futures::{pin_mut, Sink, Stream};
use std::collections::VecDeque;
use std::io;
use std::pin::Pin;
use std::task::{Context, Poll};
macro_rules! mock {
($($x:expr,)*) => {{
let mut v = VecDeque::new();
v.extend(vec![$($x),*]);
Mock { calls: v }
}};
}
macro_rules! assert_next_eq {
($io:ident, $expect:expr) => {{
task::spawn(()).enter(|cx, _| {
let res = assert_ready!($io.as_mut().poll_next(cx));
match res {
Some(Ok(v)) => assert_eq!(v, $expect.as_ref()),
Some(Err(e)) => panic!("error = {:?}", e),
None => panic!("none"),
}
});
}};
}
macro_rules! assert_next_pending {
($io:ident) => {{
task::spawn(()).enter(|cx, _| match $io.as_mut().poll_next(cx) {
Poll::Ready(Some(Ok(v))) => panic!("value = {:?}", v),
Poll::Ready(Some(Err(e))) => panic!("error = {:?}", e),
Poll::Ready(None) => panic!("done"),
Poll::Pending => {}
});
}};
}
macro_rules! assert_next_err {
($io:ident) => {{
task::spawn(()).enter(|cx, _| match $io.as_mut().poll_next(cx) {
Poll::Ready(Some(Ok(v))) => panic!("value = {:?}", v),
Poll::Ready(Some(Err(_))) => {}
Poll::Ready(None) => panic!("done"),
Poll::Pending => panic!("pending"),
});
}};
}
macro_rules! assert_done {
($io:ident) => {{
task::spawn(()).enter(|cx, _| {
let res = assert_ready!($io.as_mut().poll_next(cx));
match res {
Some(Ok(v)) => panic!("value = {:?}", v),
Some(Err(e)) => panic!("error = {:?}", e),
None => {}
}
});
}};
}
#[test]
fn read_empty_io_yields_nothing() {
let io = Box::pin(FramedRead::new(mock!(), LengthDelimitedCodec::new()));
pin_mut!(io);
assert_done!(io);
}
#[test]
fn read_single_frame_one_packet() {
let io = FramedRead::new(
mock! {
data(b"\x00\x00\x00\x09abcdefghi"),
},
LengthDelimitedCodec::new(),
);
pin_mut!(io);
assert_next_eq!(io, b"abcdefghi");
assert_done!(io);
}
#[test]
fn read_single_frame_one_packet_little_endian() {
let io = length_delimited::Builder::new()
.little_endian()
.new_read(mock! {
data(b"\x09\x00\x00\x00abcdefghi"),
});
pin_mut!(io);
assert_next_eq!(io, b"abcdefghi");
assert_done!(io);
}
#[test]
fn read_single_frame_one_packet_native_endian() {
let d = if cfg!(target_endian = "big") {
b"\x00\x00\x00\x09abcdefghi"
} else {
b"\x09\x00\x00\x00abcdefghi"
};
let io = length_delimited::Builder::new()
.native_endian()
.new_read(mock! {
data(d),
});
pin_mut!(io);
assert_next_eq!(io, b"abcdefghi");
assert_done!(io);
}
#[test]
fn read_single_multi_frame_one_packet() {
let mut d: Vec<u8> = vec![];
d.extend_from_slice(b"\x00\x00\x00\x09abcdefghi");
d.extend_from_slice(b"\x00\x00\x00\x03123");
d.extend_from_slice(b"\x00\x00\x00\x0bhello world");
let io = FramedRead::new(
mock! {
data(&d),
},
LengthDelimitedCodec::new(),
);
pin_mut!(io);
assert_next_eq!(io, b"abcdefghi");
assert_next_eq!(io, b"123");
assert_next_eq!(io, b"hello world");
assert_done!(io);
}
#[test]
fn read_single_frame_multi_packet() {
let io = FramedRead::new(
mock! {
data(b"\x00\x00"),
data(b"\x00\x09abc"),
data(b"defghi"),
},
LengthDelimitedCodec::new(),
);
pin_mut!(io);
assert_next_eq!(io, b"abcdefghi");
assert_done!(io);
}
#[test]
fn read_multi_frame_multi_packet() {
let io = FramedRead::new(
mock! {
data(b"\x00\x00"),
data(b"\x00\x09abc"),
data(b"defghi"),
data(b"\x00\x00\x00\x0312"),
data(b"3\x00\x00\x00\x0bhello world"),
},
LengthDelimitedCodec::new(),
);
pin_mut!(io);
assert_next_eq!(io, b"abcdefghi");
assert_next_eq!(io, b"123");
assert_next_eq!(io, b"hello world");
assert_done!(io);
}
#[test]
fn read_single_frame_multi_packet_wait() {
let io = FramedRead::new(
mock! {
data(b"\x00\x00"),
Poll::Pending,
data(b"\x00\x09abc"),
Poll::Pending,
data(b"defghi"),
Poll::Pending,
},
LengthDelimitedCodec::new(),
);
pin_mut!(io);
assert_next_pending!(io);
assert_next_pending!(io);
assert_next_eq!(io, b"abcdefghi");
assert_next_pending!(io);
assert_done!(io);
}
#[test]
fn read_multi_frame_multi_packet_wait() {
let io = FramedRead::new(
mock! {
data(b"\x00\x00"),
Poll::Pending,
data(b"\x00\x09abc"),
Poll::Pending,
data(b"defghi"),
Poll::Pending,
data(b"\x00\x00\x00\x0312"),
Poll::Pending,
data(b"3\x00\x00\x00\x0bhello world"),
Poll::Pending,
},
LengthDelimitedCodec::new(),
);
pin_mut!(io);
assert_next_pending!(io);
assert_next_pending!(io);
assert_next_eq!(io, b"abcdefghi");
assert_next_pending!(io);
assert_next_pending!(io);
assert_next_eq!(io, b"123");
assert_next_eq!(io, b"hello world");
assert_next_pending!(io);
assert_done!(io);
}
#[test]
fn read_incomplete_head() {
let io = FramedRead::new(
mock! {
data(b"\x00\x00"),
},
LengthDelimitedCodec::new(),
);
pin_mut!(io);
assert_next_err!(io);
}
#[test]
fn read_incomplete_head_multi() {
let io = FramedRead::new(
mock! {
Poll::Pending,
data(b"\x00"),
Poll::Pending,
},
LengthDelimitedCodec::new(),
);
pin_mut!(io);
assert_next_pending!(io);
assert_next_pending!(io);
assert_next_err!(io);
}
#[test]
fn read_incomplete_payload() {
let io = FramedRead::new(
mock! {
data(b"\x00\x00\x00\x09ab"),
Poll::Pending,
data(b"cd"),
Poll::Pending,
},
LengthDelimitedCodec::new(),
);
pin_mut!(io);
assert_next_pending!(io);
assert_next_pending!(io);
assert_next_err!(io);
}
#[test]
fn read_max_frame_len() {
let io = length_delimited::Builder::new()
.max_frame_length(5)
.new_read(mock! {
data(b"\x00\x00\x00\x09abcdefghi"),
});
pin_mut!(io);
assert_next_err!(io);
}
#[test]
fn read_update_max_frame_len_at_rest() {
let io = length_delimited::Builder::new().new_read(mock! {
data(b"\x00\x00\x00\x09abcdefghi"),
data(b"\x00\x00\x00\x09abcdefghi"),
});
pin_mut!(io);
assert_next_eq!(io, b"abcdefghi");
io.decoder_mut().set_max_frame_length(5);
assert_next_err!(io);
}
#[test]
fn read_update_max_frame_len_in_flight() {
let io = length_delimited::Builder::new().new_read(mock! {
data(b"\x00\x00\x00\x09abcd"),
Poll::Pending,
data(b"efghi"),
data(b"\x00\x00\x00\x09abcdefghi"),
});
pin_mut!(io);
assert_next_pending!(io);
io.decoder_mut().set_max_frame_length(5);
assert_next_eq!(io, b"abcdefghi");
assert_next_err!(io);
}
#[test]
fn read_one_byte_length_field() {
let io = length_delimited::Builder::new()
.length_field_length(1)
.new_read(mock! {
data(b"\x09abcdefghi"),
});
pin_mut!(io);
assert_next_eq!(io, b"abcdefghi");
assert_done!(io);
}
#[test]
fn read_header_offset() {
let io = length_delimited::Builder::new()
.length_field_length(2)
.length_field_offset(4)
.new_read(mock! {
data(b"zzzz\x00\x09abcdefghi"),
});
pin_mut!(io);
assert_next_eq!(io, b"abcdefghi");
assert_done!(io);
}
#[test]
fn read_single_multi_frame_one_packet_skip_none_adjusted() {
let mut d: Vec<u8> = vec![];
d.extend_from_slice(b"xx\x00\x09abcdefghi");
d.extend_from_slice(b"yy\x00\x03123");
d.extend_from_slice(b"zz\x00\x0bhello world");
let io = length_delimited::Builder::new()
.length_field_length(2)
.length_field_offset(2)
.num_skip(0)
.length_adjustment(4)
.new_read(mock! {
data(&d),
});
pin_mut!(io);
assert_next_eq!(io, b"xx\x00\x09abcdefghi");
assert_next_eq!(io, b"yy\x00\x03123");
assert_next_eq!(io, b"zz\x00\x0bhello world");
assert_done!(io);
}
#[test]
fn read_single_frame_length_adjusted() {
let mut d: Vec<u8> = vec![];
d.extend_from_slice(b"\x00\x00\x0b\x0cHello world");
let io = length_delimited::Builder::new()
.length_field_offset(0)
.length_field_length(3)
.length_adjustment(0)
.num_skip(4)
.new_read(mock! {
data(&d),
});
pin_mut!(io);
assert_next_eq!(io, b"Hello world");
assert_done!(io);
}
#[test]
fn read_single_multi_frame_one_packet_length_includes_head() {
let mut d: Vec<u8> = vec![];
d.extend_from_slice(b"\x00\x0babcdefghi");
d.extend_from_slice(b"\x00\x05123");
d.extend_from_slice(b"\x00\x0dhello world");
let io = length_delimited::Builder::new()
.length_field_length(2)
.length_adjustment(-2)
.new_read(mock! {
data(&d),
});
pin_mut!(io);
assert_next_eq!(io, b"abcdefghi");
assert_next_eq!(io, b"123");
assert_next_eq!(io, b"hello world");
assert_done!(io);
}
#[test]
fn write_single_frame_length_adjusted() {
let io = length_delimited::Builder::new()
.length_adjustment(-2)
.new_write(mock! {
data(b"\x00\x00\x00\x0b"),
data(b"abcdefghi"),
flush(),
});
pin_mut!(io);
task::spawn(()).enter(|cx, _| {
assert_ready_ok!(io.as_mut().poll_ready(cx));
assert_ok!(io.as_mut().start_send(Bytes::from("abcdefghi")));
assert_ready_ok!(io.as_mut().poll_flush(cx));
assert!(io.get_ref().calls.is_empty());
});
}
#[test]
fn write_nothing_yields_nothing() {
let io = FramedWrite::new(mock!(), LengthDelimitedCodec::new());
pin_mut!(io);
task::spawn(()).enter(|cx, _| {
assert_ready_ok!(io.poll_flush(cx));
});
}
#[test]
fn write_single_frame_one_packet() {
let io = FramedWrite::new(
mock! {
data(b"\x00\x00\x00\x09"),
data(b"abcdefghi"),
flush(),
},
LengthDelimitedCodec::new(),
);
pin_mut!(io);
task::spawn(()).enter(|cx, _| {
assert_ready_ok!(io.as_mut().poll_ready(cx));
assert_ok!(io.as_mut().start_send(Bytes::from("abcdefghi")));
assert_ready_ok!(io.as_mut().poll_flush(cx));
assert!(io.get_ref().calls.is_empty());
});
}
#[test]
fn write_single_multi_frame_one_packet() {
let io = FramedWrite::new(
mock! {
data(b"\x00\x00\x00\x09"),
data(b"abcdefghi"),
data(b"\x00\x00\x00\x03"),
data(b"123"),
data(b"\x00\x00\x00\x0b"),
data(b"hello world"),
flush(),
},
LengthDelimitedCodec::new(),
);
pin_mut!(io);
task::spawn(()).enter(|cx, _| {
assert_ready_ok!(io.as_mut().poll_ready(cx));
assert_ok!(io.as_mut().start_send(Bytes::from("abcdefghi")));
assert_ready_ok!(io.as_mut().poll_ready(cx));
assert_ok!(io.as_mut().start_send(Bytes::from("123")));
assert_ready_ok!(io.as_mut().poll_ready(cx));
assert_ok!(io.as_mut().start_send(Bytes::from("hello world")));
assert_ready_ok!(io.as_mut().poll_flush(cx));
assert!(io.get_ref().calls.is_empty());
});
}
#[test]
fn write_single_multi_frame_multi_packet() {
let io = FramedWrite::new(
mock! {
data(b"\x00\x00\x00\x09"),
data(b"abcdefghi"),
flush(),
data(b"\x00\x00\x00\x03"),
data(b"123"),
flush(),
data(b"\x00\x00\x00\x0b"),
data(b"hello world"),
flush(),
},
LengthDelimitedCodec::new(),
);
pin_mut!(io);
task::spawn(()).enter(|cx, _| {
assert_ready_ok!(io.as_mut().poll_ready(cx));
assert_ok!(io.as_mut().start_send(Bytes::from("abcdefghi")));
assert_ready_ok!(io.as_mut().poll_flush(cx));
assert_ready_ok!(io.as_mut().poll_ready(cx));
assert_ok!(io.as_mut().start_send(Bytes::from("123")));
assert_ready_ok!(io.as_mut().poll_flush(cx));
assert_ready_ok!(io.as_mut().poll_ready(cx));
assert_ok!(io.as_mut().start_send(Bytes::from("hello world")));
assert_ready_ok!(io.as_mut().poll_flush(cx));
assert!(io.get_ref().calls.is_empty());
});
}
#[test]
fn write_single_frame_would_block() {
let io = FramedWrite::new(
mock! {
Poll::Pending,
data(b"\x00\x00"),
Poll::Pending,
data(b"\x00\x09"),
data(b"abcdefghi"),
flush(),
},
LengthDelimitedCodec::new(),
);
pin_mut!(io);
task::spawn(()).enter(|cx, _| {
assert_ready_ok!(io.as_mut().poll_ready(cx));
assert_ok!(io.as_mut().start_send(Bytes::from("abcdefghi")));
assert_pending!(io.as_mut().poll_flush(cx));
assert_pending!(io.as_mut().poll_flush(cx));
assert_ready_ok!(io.as_mut().poll_flush(cx));
assert!(io.get_ref().calls.is_empty());
});
}
#[test]
fn write_single_frame_little_endian() {
let io = length_delimited::Builder::new()
.little_endian()
.new_write(mock! {
data(b"\x09\x00\x00\x00"),
data(b"abcdefghi"),
flush(),
});
pin_mut!(io);
task::spawn(()).enter(|cx, _| {
assert_ready_ok!(io.as_mut().poll_ready(cx));
assert_ok!(io.as_mut().start_send(Bytes::from("abcdefghi")));
assert_ready_ok!(io.as_mut().poll_flush(cx));
assert!(io.get_ref().calls.is_empty());
});
}
#[test]
fn write_single_frame_with_short_length_field() {
let io = length_delimited::Builder::new()
.length_field_length(1)
.new_write(mock! {
data(b"\x09"),
data(b"abcdefghi"),
flush(),
});
pin_mut!(io);
task::spawn(()).enter(|cx, _| {
assert_ready_ok!(io.as_mut().poll_ready(cx));
assert_ok!(io.as_mut().start_send(Bytes::from("abcdefghi")));
assert_ready_ok!(io.as_mut().poll_flush(cx));
assert!(io.get_ref().calls.is_empty());
});
}
#[test]
fn write_max_frame_len() {
let io = length_delimited::Builder::new()
.max_frame_length(5)
.new_write(mock! {});
pin_mut!(io);
task::spawn(()).enter(|cx, _| {
assert_ready_ok!(io.as_mut().poll_ready(cx));
assert_err!(io.as_mut().start_send(Bytes::from("abcdef")));
assert!(io.get_ref().calls.is_empty());
});
}
#[test]
fn write_update_max_frame_len_at_rest() {
let io = length_delimited::Builder::new().new_write(mock! {
data(b"\x00\x00\x00\x06"),
data(b"abcdef"),
flush(),
});
pin_mut!(io);
task::spawn(()).enter(|cx, _| {
assert_ready_ok!(io.as_mut().poll_ready(cx));
assert_ok!(io.as_mut().start_send(Bytes::from("abcdef")));
assert_ready_ok!(io.as_mut().poll_flush(cx));
io.encoder_mut().set_max_frame_length(5);
assert_err!(io.as_mut().start_send(Bytes::from("abcdef")));
assert!(io.get_ref().calls.is_empty());
});
}
#[test]
fn write_update_max_frame_len_in_flight() {
let io = length_delimited::Builder::new().new_write(mock! {
data(b"\x00\x00\x00\x06"),
data(b"ab"),
Poll::Pending,
data(b"cdef"),
flush(),
});
pin_mut!(io);
task::spawn(()).enter(|cx, _| {
assert_ready_ok!(io.as_mut().poll_ready(cx));
assert_ok!(io.as_mut().start_send(Bytes::from("abcdef")));
assert_pending!(io.as_mut().poll_flush(cx));
io.encoder_mut().set_max_frame_length(5);
assert_ready_ok!(io.as_mut().poll_flush(cx));
assert_err!(io.as_mut().start_send(Bytes::from("abcdef")));
assert!(io.get_ref().calls.is_empty());
});
}
#[test]
fn write_zero() {
let io = length_delimited::Builder::new().new_write(mock! {});
pin_mut!(io);
task::spawn(()).enter(|cx, _| {
assert_ready_ok!(io.as_mut().poll_ready(cx));
assert_ok!(io.as_mut().start_send(Bytes::from("abcdef")));
assert_ready_err!(io.as_mut().poll_flush(cx));
assert!(io.get_ref().calls.is_empty());
});
}
#[test]
fn encode_overflow() {
// Test reproducing tokio-rs/tokio#681.
let mut codec = length_delimited::Builder::new().new_codec();
let mut buf = BytesMut::with_capacity(1024);
// Put some data into the buffer without resizing it to hold more.
let some_as = std::iter::repeat(b'a').take(1024).collect::<Vec<_>>();
buf.put_slice(&some_as[..]);
// Trying to encode the length header should resize the buffer if it won't fit.
codec.encode(Bytes::from("hello"), &mut buf).unwrap();
}
#[test]
fn frame_does_not_fit() {
let codec = LengthDelimitedCodec::builder()
.length_field_length(1)
.max_frame_length(256)
.new_codec();
assert_eq!(codec.max_frame_length(), 255);
}
#[test]
fn neg_adjusted_frame_does_not_fit() {
let codec = LengthDelimitedCodec::builder()
.length_field_length(1)
.length_adjustment(-1)
.new_codec();
assert_eq!(codec.max_frame_length(), 254);
}
#[test]
fn pos_adjusted_frame_does_not_fit() {
let codec = LengthDelimitedCodec::builder()
.length_field_length(1)
.length_adjustment(1)
.new_codec();
assert_eq!(codec.max_frame_length(), 256);
}
#[test]
fn max_allowed_frame_fits() {
let codec = LengthDelimitedCodec::builder()
.length_field_length(std::mem::size_of::<usize>())
.max_frame_length(usize::MAX)
.new_codec();
assert_eq!(codec.max_frame_length(), usize::MAX);
}
#[test]
fn smaller_frame_len_not_adjusted() {
let codec = LengthDelimitedCodec::builder()
.max_frame_length(10)
.length_field_length(std::mem::size_of::<usize>())
.new_codec();
assert_eq!(codec.max_frame_length(), 10);
}
#[test]
fn max_allowed_length_field() {
let codec = LengthDelimitedCodec::builder()
.length_field_length(8)
.max_frame_length(usize::MAX)
.new_codec();
assert_eq!(codec.max_frame_length(), usize::MAX);
}
// ===== Test utils =====
struct Mock {
calls: VecDeque<Poll<io::Result<Op>>>,
}
enum Op {
Data(Vec<u8>),
Flush,
}
impl AsyncRead for Mock {
fn poll_read(
mut self: Pin<&mut Self>,
_cx: &mut Context<'_>,
dst: &mut ReadBuf<'_>,
) -> Poll<io::Result<()>> {
match self.calls.pop_front() {
Some(Poll::Ready(Ok(Op::Data(data)))) => {
debug_assert!(dst.remaining() >= data.len());
dst.put_slice(&data);
Poll::Ready(Ok(()))
}
Some(Poll::Ready(Ok(_))) => panic!(),
Some(Poll::Ready(Err(e))) => Poll::Ready(Err(e)),
Some(Poll::Pending) => Poll::Pending,
None => Poll::Ready(Ok(())),
}
}
}
impl AsyncWrite for Mock {
fn poll_write(
mut self: Pin<&mut Self>,
_cx: &mut Context<'_>,
src: &[u8],
) -> Poll<Result<usize, io::Error>> {
match self.calls.pop_front() {
Some(Poll::Ready(Ok(Op::Data(data)))) => {
let len = data.len();
assert!(src.len() >= len, "expect={data:?}; actual={src:?}");
assert_eq!(&data[..], &src[..len]);
Poll::Ready(Ok(len))
}
Some(Poll::Ready(Ok(_))) => panic!(),
Some(Poll::Ready(Err(e))) => Poll::Ready(Err(e)),
Some(Poll::Pending) => Poll::Pending,
None => Poll::Ready(Ok(0)),
}
}
fn poll_flush(mut self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> {
match self.calls.pop_front() {
Some(Poll::Ready(Ok(Op::Flush))) => Poll::Ready(Ok(())),
Some(Poll::Ready(Ok(_))) => panic!(),
Some(Poll::Ready(Err(e))) => Poll::Ready(Err(e)),
Some(Poll::Pending) => Poll::Pending,
None => Poll::Ready(Ok(())),
}
}
fn poll_shutdown(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> {
Poll::Ready(Ok(()))
}
}
impl<'a> From<&'a [u8]> for Op {
fn from(src: &'a [u8]) -> Op {
Op::Data(src.into())
}
}
impl From<Vec<u8>> for Op {
fn from(src: Vec<u8>) -> Op {
Op::Data(src)
}
}
fn data(bytes: &[u8]) -> Poll<io::Result<Op>> {
Poll::Ready(Ok(bytes.into()))
}
fn flush() -> Poll<io::Result<Op>> {
Poll::Ready(Ok(Op::Flush))
}

349
vendor/tokio-util/tests/mpsc.rs vendored Normal file
View File

@@ -0,0 +1,349 @@
use futures::sink::SinkExt;
use std::future::poll_fn;
use tokio::sync::mpsc::channel;
use tokio_test::task::spawn;
use tokio_test::{
assert_ok, assert_pending, assert_ready, assert_ready_eq, assert_ready_err, assert_ready_ok,
};
use tokio_util::sync::PollSender;
#[tokio::test]
async fn simple() {
let (send, mut recv) = channel(3);
let mut send = PollSender::new(send);
for i in 1..=3i32 {
let mut reserve = spawn(poll_fn(|cx| send.poll_reserve(cx)));
assert_ready_ok!(reserve.poll());
send.send_item(i).unwrap();
}
let mut reserve = spawn(poll_fn(|cx| send.poll_reserve(cx)));
assert_pending!(reserve.poll());
assert_eq!(recv.recv().await.unwrap(), 1);
assert!(reserve.is_woken());
assert_ready_ok!(reserve.poll());
drop(recv);
send.send_item(42).unwrap();
}
#[tokio::test]
async fn simple_ref() {
let v = [1, 2, 3i32];
let (send, mut recv) = channel(3);
let mut send = PollSender::new(send);
for vi in v.iter() {
let mut reserve = spawn(poll_fn(|cx| send.poll_reserve(cx)));
assert_ready_ok!(reserve.poll());
send.send_item(vi).unwrap();
}
let mut reserve = spawn(poll_fn(|cx| send.poll_reserve(cx)));
assert_pending!(reserve.poll());
assert_eq!(*recv.recv().await.unwrap(), 1);
assert!(reserve.is_woken());
assert_ready_ok!(reserve.poll());
drop(recv);
send.send_item(&42).unwrap();
}
#[tokio::test]
async fn repeated_poll_reserve() {
let (send, mut recv) = channel::<i32>(1);
let mut send = PollSender::new(send);
let mut reserve = spawn(poll_fn(|cx| send.poll_reserve(cx)));
assert_ready_ok!(reserve.poll());
assert_ready_ok!(reserve.poll());
send.send_item(1).unwrap();
assert_eq!(recv.recv().await.unwrap(), 1);
}
#[tokio::test]
async fn abort_send() {
let (send, mut recv) = channel(3);
let mut send = PollSender::new(send);
let send2 = send.get_ref().cloned().unwrap();
for i in 1..=3i32 {
let mut reserve = spawn(poll_fn(|cx| send.poll_reserve(cx)));
assert_ready_ok!(reserve.poll());
send.send_item(i).unwrap();
}
let mut reserve = spawn(poll_fn(|cx| send.poll_reserve(cx)));
assert_pending!(reserve.poll());
assert_eq!(recv.recv().await.unwrap(), 1);
assert!(reserve.is_woken());
assert_ready_ok!(reserve.poll());
let mut send2_send = spawn(send2.send(5));
assert_pending!(send2_send.poll());
assert!(send.abort_send());
assert!(send2_send.is_woken());
assert_ready_ok!(send2_send.poll());
assert_eq!(recv.recv().await.unwrap(), 2);
assert_eq!(recv.recv().await.unwrap(), 3);
assert_eq!(recv.recv().await.unwrap(), 5);
}
#[tokio::test]
async fn close_sender_last() {
let (send, mut recv) = channel::<i32>(3);
let mut send = PollSender::new(send);
let mut recv_task = spawn(recv.recv());
assert_pending!(recv_task.poll());
send.close();
assert!(recv_task.is_woken());
assert!(assert_ready!(recv_task.poll()).is_none());
}
#[tokio::test]
async fn close_sender_not_last() {
let (send, mut recv) = channel::<i32>(3);
let mut send = PollSender::new(send);
let send2 = send.get_ref().cloned().unwrap();
let mut recv_task = spawn(recv.recv());
assert_pending!(recv_task.poll());
send.close();
assert!(!recv_task.is_woken());
assert_pending!(recv_task.poll());
drop(send2);
assert!(recv_task.is_woken());
assert!(assert_ready!(recv_task.poll()).is_none());
}
#[tokio::test]
async fn close_sender_before_reserve() {
let (send, mut recv) = channel::<i32>(3);
let mut send = PollSender::new(send);
let mut recv_task = spawn(recv.recv());
assert_pending!(recv_task.poll());
send.close();
assert!(recv_task.is_woken());
assert!(assert_ready!(recv_task.poll()).is_none());
let mut reserve = spawn(poll_fn(|cx| send.poll_reserve(cx)));
assert_ready_err!(reserve.poll());
}
#[tokio::test]
async fn close_sender_after_pending_reserve() {
let (send, mut recv) = channel::<i32>(1);
let mut send = PollSender::new(send);
let mut recv_task = spawn(recv.recv());
assert_pending!(recv_task.poll());
let mut reserve = spawn(poll_fn(|cx| send.poll_reserve(cx)));
assert_ready_ok!(reserve.poll());
send.send_item(1).unwrap();
assert!(recv_task.is_woken());
let mut reserve = spawn(poll_fn(|cx| send.poll_reserve(cx)));
assert_pending!(reserve.poll());
drop(reserve);
send.close();
assert!(send.is_closed());
let mut reserve = spawn(poll_fn(|cx| send.poll_reserve(cx)));
assert_ready_err!(reserve.poll());
}
#[tokio::test]
async fn close_sender_after_successful_reserve() {
let (send, mut recv) = channel::<i32>(3);
let mut send = PollSender::new(send);
let mut recv_task = spawn(recv.recv());
assert_pending!(recv_task.poll());
let mut reserve = spawn(poll_fn(|cx| send.poll_reserve(cx)));
assert_ready_ok!(reserve.poll());
drop(reserve);
send.close();
assert!(send.is_closed());
assert!(!recv_task.is_woken());
assert_pending!(recv_task.poll());
let mut reserve = spawn(poll_fn(|cx| send.poll_reserve(cx)));
assert_ready_ok!(reserve.poll());
}
#[tokio::test]
async fn abort_send_after_pending_reserve() {
let (send, mut recv) = channel::<i32>(1);
let mut send = PollSender::new(send);
let mut recv_task = spawn(recv.recv());
assert_pending!(recv_task.poll());
let mut reserve = spawn(poll_fn(|cx| send.poll_reserve(cx)));
assert_ready_ok!(reserve.poll());
send.send_item(1).unwrap();
assert_eq!(send.get_ref().unwrap().capacity(), 0);
assert!(!send.abort_send());
let mut reserve = spawn(poll_fn(|cx| send.poll_reserve(cx)));
assert_pending!(reserve.poll());
assert!(send.abort_send());
assert_eq!(send.get_ref().unwrap().capacity(), 0);
}
#[tokio::test]
async fn abort_send_after_successful_reserve() {
let (send, mut recv) = channel::<i32>(1);
let mut send = PollSender::new(send);
let mut recv_task = spawn(recv.recv());
assert_pending!(recv_task.poll());
assert_eq!(send.get_ref().unwrap().capacity(), 1);
let mut reserve = spawn(poll_fn(|cx| send.poll_reserve(cx)));
assert_ready_ok!(reserve.poll());
assert_eq!(send.get_ref().unwrap().capacity(), 0);
assert!(send.abort_send());
assert_eq!(send.get_ref().unwrap().capacity(), 1);
}
#[tokio::test]
async fn closed_when_receiver_drops() {
let (send, _) = channel::<i32>(1);
let mut send = PollSender::new(send);
let mut reserve = spawn(poll_fn(|cx| send.poll_reserve(cx)));
assert_ready_err!(reserve.poll());
}
#[should_panic]
#[test]
fn start_send_panics_when_idle() {
let (send, _) = channel::<i32>(3);
let mut send = PollSender::new(send);
send.send_item(1).unwrap();
}
#[should_panic]
#[test]
fn start_send_panics_when_acquiring() {
let (send, _) = channel::<i32>(1);
let mut send = PollSender::new(send);
let mut reserve = spawn(poll_fn(|cx| send.poll_reserve(cx)));
assert_ready_ok!(reserve.poll());
send.send_item(1).unwrap();
let mut reserve = spawn(poll_fn(|cx| send.poll_reserve(cx)));
assert_pending!(reserve.poll());
send.send_item(2).unwrap();
}
#[test]
fn sink_send_then_flush() {
let (send, mut recv) = channel(1);
let mut send = PollSender::new(send);
let mut recv_task = spawn(recv.recv());
assert_pending!(recv_task.poll());
let mut ready = spawn(poll_fn(|cx| send.poll_ready_unpin(cx)));
assert_ready_ok!(ready.poll());
assert_ok!(send.start_send_unpin(()));
let mut ready = spawn(poll_fn(|cx| send.poll_ready_unpin(cx)));
assert_pending!(ready.poll());
let mut flush = spawn(poll_fn(|cx| send.poll_flush_unpin(cx)));
assert_ready_ok!(flush.poll());
// Flushing does not mean that the sender becomes ready.
let mut ready = spawn(poll_fn(|cx| send.poll_ready_unpin(cx)));
assert_pending!(ready.poll());
assert_ready_eq!(recv_task.poll(), Some(()));
assert!(ready.is_woken());
assert_ready_ok!(ready.poll());
}
#[test]
fn sink_send_then_close() {
let (send, mut recv) = channel(1);
let mut send = PollSender::new(send);
let mut recv_task = spawn(recv.recv());
assert_pending!(recv_task.poll());
let mut ready = spawn(poll_fn(|cx| send.poll_ready_unpin(cx)));
assert_ready_ok!(ready.poll());
assert_ok!(send.start_send_unpin(1));
let mut ready = spawn(poll_fn(|cx| send.poll_ready_unpin(cx)));
assert_pending!(ready.poll());
assert!(recv_task.is_woken());
assert_ready_eq!(recv_task.poll(), Some(1));
assert!(ready.is_woken());
assert_ready_ok!(ready.poll());
drop(recv_task);
let mut recv_task = spawn(recv.recv());
assert_pending!(recv_task.poll());
assert_ok!(send.start_send_unpin(2));
let mut close = spawn(poll_fn(|cx| send.poll_close_unpin(cx)));
assert_ready_ok!(close.poll());
assert!(recv_task.is_woken());
assert_ready_eq!(recv_task.poll(), Some(2));
drop(recv_task);
let mut recv_task = spawn(recv.recv());
assert_ready_eq!(recv_task.poll(), None);
}
#[test]
fn sink_send_ref() {
let data = "data".to_owned();
let (send, mut recv) = channel(1);
let mut send = PollSender::new(send);
let mut recv_task = spawn(recv.recv());
assert_pending!(recv_task.poll());
let mut ready = spawn(poll_fn(|cx| send.poll_ready_unpin(cx)));
assert_ready_ok!(ready.poll());
assert_ok!(send.start_send_unpin(data.as_str()));
let mut flush = spawn(poll_fn(|cx| send.poll_flush_unpin(cx)));
assert_ready_ok!(flush.poll());
assert_ready_eq!(recv_task.poll(), Some("data"));
}

242
vendor/tokio-util/tests/panic.rs vendored Normal file
View File

@@ -0,0 +1,242 @@
#![warn(rust_2018_idioms)]
#![cfg(all(feature = "full", not(target_os = "wasi")))] // Wasi doesn't support panic recovery
#![cfg(panic = "unwind")]
use parking_lot::{const_mutex, Mutex};
use std::error::Error;
use std::panic;
use std::sync::Arc;
use tokio::runtime::Runtime;
use tokio::sync::mpsc::channel;
use tokio::time::{Duration, Instant};
use tokio_test::task;
use tokio_util::io::SyncIoBridge;
use tokio_util::sync::PollSender;
use tokio_util::task::LocalPoolHandle;
use tokio_util::time::DelayQueue;
// Taken from tokio-util::time::wheel, if that changes then
const MAX_DURATION_MS: u64 = (1 << (36)) - 1;
fn test_panic<Func: FnOnce() + panic::UnwindSafe>(func: Func) -> Option<String> {
static PANIC_MUTEX: Mutex<()> = const_mutex(());
{
let _guard = PANIC_MUTEX.lock();
let panic_file: Arc<Mutex<Option<String>>> = Arc::new(Mutex::new(None));
let prev_hook = panic::take_hook();
{
let panic_file = panic_file.clone();
panic::set_hook(Box::new(move |panic_info| {
let panic_location = panic_info.location().unwrap();
panic_file
.lock()
.clone_from(&Some(panic_location.file().to_string()));
}));
}
let result = panic::catch_unwind(func);
// Return to the previously set panic hook (maybe default) so that we get nice error
// messages in the tests.
panic::set_hook(prev_hook);
if result.is_err() {
panic_file.lock().clone()
} else {
None
}
}
}
#[test]
fn sync_bridge_new_panic_caller() -> Result<(), Box<dyn Error>> {
let panic_location_file = test_panic(|| {
let _ = SyncIoBridge::new(tokio::io::empty());
});
// The panic location should be in this file
assert_eq!(&panic_location_file.unwrap(), file!());
Ok(())
}
#[test]
fn poll_sender_send_item_panic_caller() -> Result<(), Box<dyn Error>> {
let panic_location_file = test_panic(|| {
let (send, _) = channel::<u32>(3);
let mut send = PollSender::new(send);
let _ = send.send_item(42);
});
// The panic location should be in this file
assert_eq!(&panic_location_file.unwrap(), file!());
Ok(())
}
#[test]
fn local_pool_handle_new_panic_caller() -> Result<(), Box<dyn Error>> {
let panic_location_file = test_panic(|| {
let _ = LocalPoolHandle::new(0);
});
// The panic location should be in this file
assert_eq!(&panic_location_file.unwrap(), file!());
Ok(())
}
#[test]
fn local_pool_handle_spawn_pinned_by_idx_panic_caller() -> Result<(), Box<dyn Error>> {
let panic_location_file = test_panic(|| {
let rt = basic();
rt.block_on(async {
let handle = LocalPoolHandle::new(2);
handle.spawn_pinned_by_idx(|| async { "test" }, 3);
});
});
// The panic location should be in this file
assert_eq!(&panic_location_file.unwrap(), file!());
Ok(())
}
#[test]
fn delay_queue_insert_at_panic_caller() -> Result<(), Box<dyn Error>> {
let panic_location_file = test_panic(|| {
let rt = basic();
rt.block_on(async {
let mut queue = task::spawn(DelayQueue::with_capacity(3));
//let st = std::time::Instant::from(SystemTime::UNIX_EPOCH);
let _k = queue.insert_at(
"1",
Instant::now() + Duration::from_millis(MAX_DURATION_MS + 1),
);
});
});
// The panic location should be in this file
assert_eq!(&panic_location_file.unwrap(), file!());
Ok(())
}
#[test]
fn delay_queue_insert_panic_caller() -> Result<(), Box<dyn Error>> {
let panic_location_file = test_panic(|| {
let rt = basic();
rt.block_on(async {
let mut queue = task::spawn(DelayQueue::with_capacity(3));
let _k = queue.insert("1", Duration::from_millis(MAX_DURATION_MS + 1));
});
});
// The panic location should be in this file
assert_eq!(&panic_location_file.unwrap(), file!());
Ok(())
}
#[test]
fn delay_queue_remove_panic_caller() -> Result<(), Box<dyn Error>> {
let panic_location_file = test_panic(|| {
let rt = basic();
rt.block_on(async {
let mut queue = task::spawn(DelayQueue::with_capacity(3));
let key = queue.insert_at("1", Instant::now());
queue.remove(&key);
queue.remove(&key);
});
});
// The panic location should be in this file
assert_eq!(&panic_location_file.unwrap(), file!());
Ok(())
}
#[test]
fn delay_queue_reset_at_panic_caller() -> Result<(), Box<dyn Error>> {
let panic_location_file = test_panic(|| {
let rt = basic();
rt.block_on(async {
let mut queue = task::spawn(DelayQueue::with_capacity(3));
let key = queue.insert_at("1", Instant::now());
queue.reset_at(
&key,
Instant::now() + Duration::from_millis(MAX_DURATION_MS + 1),
);
});
});
// The panic location should be in this file
assert_eq!(&panic_location_file.unwrap(), file!());
Ok(())
}
#[test]
fn delay_queue_reset_panic_caller() -> Result<(), Box<dyn Error>> {
let panic_location_file = test_panic(|| {
let rt = basic();
rt.block_on(async {
let mut queue = task::spawn(DelayQueue::with_capacity(3));
let key = queue.insert_at("1", Instant::now());
queue.reset(&key, Duration::from_millis(MAX_DURATION_MS + 1));
});
});
// The panic location should be in this file
assert_eq!(&panic_location_file.unwrap(), file!());
Ok(())
}
#[test]
fn delay_queue_reserve_panic_caller() -> Result<(), Box<dyn Error>> {
let panic_location_file = test_panic(|| {
let rt = basic();
rt.block_on(async {
let mut queue = task::spawn(DelayQueue::<u32>::with_capacity(3));
queue.reserve((1 << 30) as usize);
});
});
// The panic location should be in this file
assert_eq!(&panic_location_file.unwrap(), file!());
Ok(())
}
#[test]
fn future_ext_to_panic_caller() -> Result<(), Box<dyn Error>> {
use tokio::{sync::oneshot, time::Duration};
use tokio_util::future::FutureExt;
let panic_location_file = test_panic(|| {
let (_tx, rx) = oneshot::channel::<()>();
// this panics because there is no runtime available
let _res = rx.timeout(Duration::from_millis(10));
});
// The panic location should be in this file
assert_eq!(&panic_location_file.unwrap(), file!());
Ok(())
}
fn basic() -> Runtime {
tokio::runtime::Builder::new_current_thread()
.enable_all()
.build()
.unwrap()
}

View File

@@ -0,0 +1,84 @@
use std::future::Future;
use std::sync::Arc;
use std::task::Poll;
use tokio::sync::{OwnedSemaphorePermit, Semaphore};
use tokio_util::sync::PollSemaphore;
type SemRet = Option<OwnedSemaphorePermit>;
fn semaphore_poll(
sem: &mut PollSemaphore,
) -> tokio_test::task::Spawn<impl Future<Output = SemRet> + '_> {
let fut = std::future::poll_fn(move |cx| sem.poll_acquire(cx));
tokio_test::task::spawn(fut)
}
fn semaphore_poll_many(
sem: &mut PollSemaphore,
permits: u32,
) -> tokio_test::task::Spawn<impl Future<Output = SemRet> + '_> {
let fut = std::future::poll_fn(move |cx| sem.poll_acquire_many(cx, permits));
tokio_test::task::spawn(fut)
}
#[tokio::test]
async fn it_works() {
let sem = Arc::new(Semaphore::new(1));
let mut poll_sem = PollSemaphore::new(sem.clone());
let permit = sem.acquire().await.unwrap();
let mut poll = semaphore_poll(&mut poll_sem);
assert!(poll.poll().is_pending());
drop(permit);
assert!(matches!(poll.poll(), Poll::Ready(Some(_))));
drop(poll);
sem.close();
assert!(semaphore_poll(&mut poll_sem).await.is_none());
// Check that it is fused.
assert!(semaphore_poll(&mut poll_sem).await.is_none());
assert!(semaphore_poll(&mut poll_sem).await.is_none());
}
#[tokio::test]
async fn can_acquire_many_permits() {
let sem = Arc::new(Semaphore::new(4));
let mut poll_sem = PollSemaphore::new(sem.clone());
let permit1 = semaphore_poll(&mut poll_sem).poll();
assert!(matches!(permit1, Poll::Ready(Some(_))));
let permit2 = semaphore_poll_many(&mut poll_sem, 2).poll();
assert!(matches!(permit2, Poll::Ready(Some(_))));
assert_eq!(sem.available_permits(), 1);
drop(permit2);
let mut permit4 = semaphore_poll_many(&mut poll_sem, 4);
assert!(permit4.poll().is_pending());
drop(permit1);
let permit4 = permit4.poll();
assert!(matches!(permit4, Poll::Ready(Some(_))));
assert_eq!(sem.available_permits(), 0);
}
#[tokio::test]
async fn can_poll_different_amounts_of_permits() {
let sem = Arc::new(Semaphore::new(4));
let mut poll_sem = PollSemaphore::new(sem.clone());
assert!(semaphore_poll_many(&mut poll_sem, 5).poll().is_pending());
assert!(semaphore_poll_many(&mut poll_sem, 4).poll().is_ready());
let permit = sem.acquire_many(4).await.unwrap();
assert!(semaphore_poll_many(&mut poll_sem, 5).poll().is_pending());
assert!(semaphore_poll_many(&mut poll_sem, 4).poll().is_pending());
drop(permit);
assert!(semaphore_poll_many(&mut poll_sem, 5).poll().is_pending());
assert!(semaphore_poll_many(&mut poll_sem, 4).poll().is_ready());
}

85
vendor/tokio-util/tests/reusable_box.rs vendored Normal file
View File

@@ -0,0 +1,85 @@
use futures::future::FutureExt;
use std::alloc::Layout;
use std::future::Future;
use std::marker::PhantomPinned;
use std::pin::Pin;
use std::rc::Rc;
use std::task::{Context, Poll};
use tokio_util::sync::ReusableBoxFuture;
#[test]
// Clippy false positive; it's useful to be able to test the trait impls for any lifetime
#[allow(clippy::extra_unused_lifetimes)]
fn traits<'a>() {
fn assert_traits<T: Send + Sync + Unpin>() {}
// Use a type that is !Unpin
assert_traits::<ReusableBoxFuture<'a, PhantomPinned>>();
// Use a type that is !Send + !Sync
assert_traits::<ReusableBoxFuture<'a, Rc<()>>>();
}
#[test]
fn test_different_futures() {
let fut = async move { 10 };
// Not zero sized!
assert_eq!(Layout::for_value(&fut).size(), 1);
let mut b = ReusableBoxFuture::new(fut);
assert_eq!(b.get_pin().now_or_never(), Some(10));
b.try_set(async move { 20 })
.unwrap_or_else(|_| panic!("incorrect size"));
assert_eq!(b.get_pin().now_or_never(), Some(20));
b.try_set(async move { 30 })
.unwrap_or_else(|_| panic!("incorrect size"));
assert_eq!(b.get_pin().now_or_never(), Some(30));
}
#[test]
fn test_different_sizes() {
let fut1 = async move { 10 };
let val = [0u32; 1000];
let fut2 = async move { val[0] };
let fut3 = ZeroSizedFuture {};
assert_eq!(Layout::for_value(&fut1).size(), 1);
assert_eq!(Layout::for_value(&fut2).size(), 4004);
assert_eq!(Layout::for_value(&fut3).size(), 0);
let mut b = ReusableBoxFuture::new(fut1);
assert_eq!(b.get_pin().now_or_never(), Some(10));
b.set(fut2);
assert_eq!(b.get_pin().now_or_never(), Some(0));
b.set(fut3);
assert_eq!(b.get_pin().now_or_never(), Some(5));
}
struct ZeroSizedFuture {}
impl Future for ZeroSizedFuture {
type Output = u32;
fn poll(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<u32> {
Poll::Ready(5)
}
}
#[test]
fn test_zero_sized() {
let fut = ZeroSizedFuture {};
// Zero sized!
assert_eq!(Layout::for_value(&fut).size(), 0);
let mut b = ReusableBoxFuture::new(fut);
assert_eq!(b.get_pin().now_or_never(), Some(5));
assert_eq!(b.get_pin().now_or_never(), Some(5));
b.try_set(ZeroSizedFuture {})
.unwrap_or_else(|_| panic!("incorrect size"));
assert_eq!(b.get_pin().now_or_never(), Some(5));
assert_eq!(b.get_pin().now_or_never(), Some(5));
}

239
vendor/tokio-util/tests/spawn_pinned.rs vendored Normal file
View File

@@ -0,0 +1,239 @@
#![warn(rust_2018_idioms)]
#![cfg(not(target_os = "wasi"))] // Wasi doesn't support threads
use std::rc::Rc;
use std::sync::Arc;
use tokio::sync::Barrier;
use tokio_util::task;
/// Simple test of running a !Send future via spawn_pinned
#[tokio::test]
async fn can_spawn_not_send_future() {
let pool = task::LocalPoolHandle::new(1);
let output = pool
.spawn_pinned(|| {
// Rc is !Send + !Sync
let local_data = Rc::new("test");
// This future holds an Rc, so it is !Send
async move { local_data.to_string() }
})
.await
.unwrap();
assert_eq!(output, "test");
}
/// Dropping the join handle still lets the task execute
#[test]
fn can_drop_future_and_still_get_output() {
let pool = task::LocalPoolHandle::new(1);
let (sender, receiver) = std::sync::mpsc::channel();
pool.spawn_pinned(move || {
// Rc is !Send + !Sync
let local_data = Rc::new("test");
// This future holds an Rc, so it is !Send
async move {
let _ = sender.send(local_data.to_string());
}
});
assert_eq!(receiver.recv(), Ok("test".to_string()));
}
#[test]
#[should_panic(expected = "assertion failed: pool_size > 0")]
fn cannot_create_zero_sized_pool() {
let _pool = task::LocalPoolHandle::new(0);
}
/// We should be able to spawn multiple futures onto the pool at the same time.
#[tokio::test]
async fn can_spawn_multiple_futures() {
let pool = task::LocalPoolHandle::new(2);
let join_handle1 = pool.spawn_pinned(|| {
let local_data = Rc::new("test1");
async move { local_data.to_string() }
});
let join_handle2 = pool.spawn_pinned(|| {
let local_data = Rc::new("test2");
async move { local_data.to_string() }
});
assert_eq!(join_handle1.await.unwrap(), "test1");
assert_eq!(join_handle2.await.unwrap(), "test2");
}
/// A panic in the spawned task causes the join handle to return an error.
/// But, you can continue to spawn tasks.
#[tokio::test]
#[cfg(panic = "unwind")]
async fn task_panic_propagates() {
let pool = task::LocalPoolHandle::new(1);
let join_handle = pool.spawn_pinned(|| async {
panic!("Test panic");
});
let result = join_handle.await;
assert!(result.is_err());
let error = result.unwrap_err();
assert!(error.is_panic());
let panic_str = error.into_panic().downcast::<&'static str>().unwrap();
assert_eq!(*panic_str, "Test panic");
// Trying again with a "safe" task still works
let join_handle = pool.spawn_pinned(|| async { "test" });
let result = join_handle.await;
assert!(result.is_ok());
assert_eq!(result.unwrap(), "test");
}
/// A panic during task creation causes the join handle to return an error.
/// But, you can continue to spawn tasks.
#[tokio::test]
#[cfg(panic = "unwind")]
async fn callback_panic_does_not_kill_worker() {
let pool = task::LocalPoolHandle::new(1);
let join_handle = pool.spawn_pinned(|| {
panic!("Test panic");
#[allow(unreachable_code)]
async {}
});
let result = join_handle.await;
assert!(result.is_err());
let error = result.unwrap_err();
assert!(error.is_panic());
let panic_str = error.into_panic().downcast::<&'static str>().unwrap();
assert_eq!(*panic_str, "Test panic");
// Trying again with a "safe" callback works
let join_handle = pool.spawn_pinned(|| async { "test" });
let result = join_handle.await;
assert!(result.is_ok());
assert_eq!(result.unwrap(), "test");
}
/// Canceling the task via the returned join handle cancels the spawned task
/// (which has a different, internal join handle).
#[tokio::test]
async fn task_cancellation_propagates() {
let pool = task::LocalPoolHandle::new(1);
let notify_dropped = Arc::new(());
let weak_notify_dropped = Arc::downgrade(&notify_dropped);
let (start_sender, start_receiver) = tokio::sync::oneshot::channel();
let (drop_sender, drop_receiver) = tokio::sync::oneshot::channel::<()>();
let join_handle = pool.spawn_pinned(|| async move {
let _drop_sender = drop_sender;
// Move the Arc into the task
let _notify_dropped = notify_dropped;
let _ = start_sender.send(());
// Keep the task running until it gets aborted
futures::future::pending::<()>().await;
});
// Wait for the task to start
let _ = start_receiver.await;
join_handle.abort();
// Wait for the inner task to abort, dropping the sender.
// The top level join handle aborts quicker than the inner task (the abort
// needs to propagate and get processed on the worker thread), so we can't
// just await the top level join handle.
let _ = drop_receiver.await;
// Check that the Arc has been dropped. This verifies that the inner task
// was canceled as well.
assert!(weak_notify_dropped.upgrade().is_none());
}
/// Tasks should be given to the least burdened worker. When spawning two tasks
/// on a pool with two empty workers the tasks should be spawned on separate
/// workers.
#[tokio::test]
async fn tasks_are_balanced() {
let pool = task::LocalPoolHandle::new(2);
// Spawn a task so one thread has a task count of 1
let (start_sender1, start_receiver1) = tokio::sync::oneshot::channel();
let (end_sender1, end_receiver1) = tokio::sync::oneshot::channel();
let join_handle1 = pool.spawn_pinned(|| async move {
let _ = start_sender1.send(());
let _ = end_receiver1.await;
std::thread::current().id()
});
// Wait for the first task to start up
let _ = start_receiver1.await;
// This task should be spawned on the other thread
let (start_sender2, start_receiver2) = tokio::sync::oneshot::channel();
let join_handle2 = pool.spawn_pinned(|| async move {
let _ = start_sender2.send(());
std::thread::current().id()
});
// Wait for the second task to start up
let _ = start_receiver2.await;
// Allow the first task to end
let _ = end_sender1.send(());
let thread_id1 = join_handle1.await.unwrap();
let thread_id2 = join_handle2.await.unwrap();
// Since the first task was active when the second task spawned, they should
// be on separate workers/threads.
assert_ne!(thread_id1, thread_id2);
}
#[tokio::test]
async fn spawn_by_idx() {
let pool = task::LocalPoolHandle::new(3);
let barrier = Arc::new(Barrier::new(4));
let barrier1 = barrier.clone();
let barrier2 = barrier.clone();
let barrier3 = barrier.clone();
let handle1 = pool.spawn_pinned_by_idx(
|| async move {
barrier1.wait().await;
std::thread::current().id()
},
0,
);
pool.spawn_pinned_by_idx(
|| async move {
barrier2.wait().await;
std::thread::current().id()
},
0,
);
let handle2 = pool.spawn_pinned_by_idx(
|| async move {
barrier3.wait().await;
std::thread::current().id()
},
1,
);
let loads = pool.get_task_loads_for_each_worker();
barrier.wait().await;
assert_eq!(loads[0], 2);
assert_eq!(loads[1], 1);
assert_eq!(loads[2], 0);
let thread_id1 = handle1.await.unwrap();
let thread_id2 = handle2.await.unwrap();
assert_ne!(thread_id1, thread_id2);
}

View File

@@ -0,0 +1,565 @@
#![warn(rust_2018_idioms)]
use tokio::pin;
use tokio::sync::oneshot;
use tokio_util::sync::{CancellationToken, WaitForCancellationFuture};
use core::future::Future;
use core::task::{Context, Poll};
use futures_test::task::new_count_waker;
#[test]
fn cancel_token() {
let (waker, wake_counter) = new_count_waker();
let token = CancellationToken::new();
assert!(!token.is_cancelled());
let wait_fut = token.cancelled();
pin!(wait_fut);
assert_eq!(
Poll::Pending,
wait_fut.as_mut().poll(&mut Context::from_waker(&waker))
);
assert_eq!(wake_counter, 0);
let wait_fut_2 = token.cancelled();
pin!(wait_fut_2);
token.cancel();
assert_eq!(wake_counter, 1);
assert!(token.is_cancelled());
assert_eq!(
Poll::Ready(()),
wait_fut.as_mut().poll(&mut Context::from_waker(&waker))
);
assert_eq!(
Poll::Ready(()),
wait_fut_2.as_mut().poll(&mut Context::from_waker(&waker))
);
}
#[test]
fn cancel_token_owned() {
let (waker, wake_counter) = new_count_waker();
let token = CancellationToken::new();
assert!(!token.is_cancelled());
let wait_fut = token.clone().cancelled_owned();
pin!(wait_fut);
assert_eq!(
Poll::Pending,
wait_fut.as_mut().poll(&mut Context::from_waker(&waker))
);
assert_eq!(wake_counter, 0);
let wait_fut_2 = token.clone().cancelled_owned();
pin!(wait_fut_2);
token.cancel();
assert_eq!(wake_counter, 1);
assert!(token.is_cancelled());
assert_eq!(
Poll::Ready(()),
wait_fut.as_mut().poll(&mut Context::from_waker(&waker))
);
assert_eq!(
Poll::Ready(()),
wait_fut_2.as_mut().poll(&mut Context::from_waker(&waker))
);
}
#[test]
fn cancel_token_owned_drop_test() {
let (waker, wake_counter) = new_count_waker();
let token = CancellationToken::new();
let future = token.cancelled_owned();
pin!(future);
assert_eq!(
Poll::Pending,
future.as_mut().poll(&mut Context::from_waker(&waker))
);
assert_eq!(wake_counter, 0);
// let future be dropped while pinned and under pending state to
// find potential memory related bugs.
}
#[test]
fn cancel_child_token_through_parent() {
let (waker, wake_counter) = new_count_waker();
let token = CancellationToken::new();
let child_token = token.child_token();
assert!(!child_token.is_cancelled());
let child_fut = child_token.cancelled();
pin!(child_fut);
let parent_fut = token.cancelled();
pin!(parent_fut);
assert_eq!(
Poll::Pending,
child_fut.as_mut().poll(&mut Context::from_waker(&waker))
);
assert_eq!(
Poll::Pending,
parent_fut.as_mut().poll(&mut Context::from_waker(&waker))
);
assert_eq!(wake_counter, 0);
token.cancel();
assert_eq!(wake_counter, 2);
assert!(token.is_cancelled());
assert!(child_token.is_cancelled());
assert_eq!(
Poll::Ready(()),
child_fut.as_mut().poll(&mut Context::from_waker(&waker))
);
assert_eq!(
Poll::Ready(()),
parent_fut.as_mut().poll(&mut Context::from_waker(&waker))
);
}
#[test]
fn cancel_grandchild_token_through_parent_if_child_was_dropped() {
let (waker, wake_counter) = new_count_waker();
let token = CancellationToken::new();
let intermediate_token = token.child_token();
let child_token = intermediate_token.child_token();
drop(intermediate_token);
assert!(!child_token.is_cancelled());
let child_fut = child_token.cancelled();
pin!(child_fut);
let parent_fut = token.cancelled();
pin!(parent_fut);
assert_eq!(
Poll::Pending,
child_fut.as_mut().poll(&mut Context::from_waker(&waker))
);
assert_eq!(
Poll::Pending,
parent_fut.as_mut().poll(&mut Context::from_waker(&waker))
);
assert_eq!(wake_counter, 0);
token.cancel();
assert_eq!(wake_counter, 2);
assert!(token.is_cancelled());
assert!(child_token.is_cancelled());
assert_eq!(
Poll::Ready(()),
child_fut.as_mut().poll(&mut Context::from_waker(&waker))
);
assert_eq!(
Poll::Ready(()),
parent_fut.as_mut().poll(&mut Context::from_waker(&waker))
);
}
#[test]
fn cancel_child_token_without_parent() {
let (waker, wake_counter) = new_count_waker();
let token = CancellationToken::new();
let child_token_1 = token.child_token();
let child_fut = child_token_1.cancelled();
pin!(child_fut);
let parent_fut = token.cancelled();
pin!(parent_fut);
assert_eq!(
Poll::Pending,
child_fut.as_mut().poll(&mut Context::from_waker(&waker))
);
assert_eq!(
Poll::Pending,
parent_fut.as_mut().poll(&mut Context::from_waker(&waker))
);
assert_eq!(wake_counter, 0);
child_token_1.cancel();
assert_eq!(wake_counter, 1);
assert!(!token.is_cancelled());
assert!(child_token_1.is_cancelled());
assert_eq!(
Poll::Ready(()),
child_fut.as_mut().poll(&mut Context::from_waker(&waker))
);
assert_eq!(
Poll::Pending,
parent_fut.as_mut().poll(&mut Context::from_waker(&waker))
);
let child_token_2 = token.child_token();
let child_fut_2 = child_token_2.cancelled();
pin!(child_fut_2);
assert_eq!(
Poll::Pending,
child_fut_2.as_mut().poll(&mut Context::from_waker(&waker))
);
assert_eq!(
Poll::Pending,
parent_fut.as_mut().poll(&mut Context::from_waker(&waker))
);
token.cancel();
assert_eq!(wake_counter, 3);
assert!(token.is_cancelled());
assert!(child_token_2.is_cancelled());
assert_eq!(
Poll::Ready(()),
child_fut_2.as_mut().poll(&mut Context::from_waker(&waker))
);
assert_eq!(
Poll::Ready(()),
parent_fut.as_mut().poll(&mut Context::from_waker(&waker))
);
}
#[test]
fn create_child_token_after_parent_was_cancelled() {
for drop_child_first in [true, false].iter().cloned() {
let (waker, wake_counter) = new_count_waker();
let token = CancellationToken::new();
token.cancel();
let child_token = token.child_token();
assert!(child_token.is_cancelled());
{
let child_fut = child_token.cancelled();
pin!(child_fut);
let parent_fut = token.cancelled();
pin!(parent_fut);
assert_eq!(
Poll::Ready(()),
child_fut.as_mut().poll(&mut Context::from_waker(&waker))
);
assert_eq!(
Poll::Ready(()),
parent_fut.as_mut().poll(&mut Context::from_waker(&waker))
);
assert_eq!(wake_counter, 0);
}
if drop_child_first {
drop(child_token);
drop(token);
} else {
drop(token);
drop(child_token);
}
}
}
#[test]
fn drop_multiple_child_tokens() {
for drop_first_child_first in &[true, false] {
let token = CancellationToken::new();
let mut child_tokens = [None, None, None];
for child in &mut child_tokens {
*child = Some(token.child_token());
}
assert!(!token.is_cancelled());
assert!(!child_tokens[0].as_ref().unwrap().is_cancelled());
for i in 0..child_tokens.len() {
if *drop_first_child_first {
child_tokens[i] = None;
} else {
child_tokens[child_tokens.len() - 1 - i] = None;
}
assert!(!token.is_cancelled());
}
drop(token);
}
}
#[test]
fn cancel_only_all_descendants() {
// ARRANGE
let (waker, wake_counter) = new_count_waker();
let parent_token = CancellationToken::new();
let token = parent_token.child_token();
let sibling_token = parent_token.child_token();
let child1_token = token.child_token();
let child2_token = token.child_token();
let grandchild_token = child1_token.child_token();
let grandchild2_token = child1_token.child_token();
let great_grandchild_token = grandchild_token.child_token();
assert!(!parent_token.is_cancelled());
assert!(!token.is_cancelled());
assert!(!sibling_token.is_cancelled());
assert!(!child1_token.is_cancelled());
assert!(!child2_token.is_cancelled());
assert!(!grandchild_token.is_cancelled());
assert!(!grandchild2_token.is_cancelled());
assert!(!great_grandchild_token.is_cancelled());
let parent_fut = parent_token.cancelled();
let fut = token.cancelled();
let sibling_fut = sibling_token.cancelled();
let child1_fut = child1_token.cancelled();
let child2_fut = child2_token.cancelled();
let grandchild_fut = grandchild_token.cancelled();
let grandchild2_fut = grandchild2_token.cancelled();
let great_grandchild_fut = great_grandchild_token.cancelled();
pin!(parent_fut);
pin!(fut);
pin!(sibling_fut);
pin!(child1_fut);
pin!(child2_fut);
pin!(grandchild_fut);
pin!(grandchild2_fut);
pin!(great_grandchild_fut);
assert_eq!(
Poll::Pending,
parent_fut.as_mut().poll(&mut Context::from_waker(&waker))
);
assert_eq!(
Poll::Pending,
fut.as_mut().poll(&mut Context::from_waker(&waker))
);
assert_eq!(
Poll::Pending,
sibling_fut.as_mut().poll(&mut Context::from_waker(&waker))
);
assert_eq!(
Poll::Pending,
child1_fut.as_mut().poll(&mut Context::from_waker(&waker))
);
assert_eq!(
Poll::Pending,
child2_fut.as_mut().poll(&mut Context::from_waker(&waker))
);
assert_eq!(
Poll::Pending,
grandchild_fut
.as_mut()
.poll(&mut Context::from_waker(&waker))
);
assert_eq!(
Poll::Pending,
grandchild2_fut
.as_mut()
.poll(&mut Context::from_waker(&waker))
);
assert_eq!(
Poll::Pending,
great_grandchild_fut
.as_mut()
.poll(&mut Context::from_waker(&waker))
);
assert_eq!(wake_counter, 0);
// ACT
token.cancel();
// ASSERT
assert_eq!(wake_counter, 6);
assert!(!parent_token.is_cancelled());
assert!(token.is_cancelled());
assert!(!sibling_token.is_cancelled());
assert!(child1_token.is_cancelled());
assert!(child2_token.is_cancelled());
assert!(grandchild_token.is_cancelled());
assert!(grandchild2_token.is_cancelled());
assert!(great_grandchild_token.is_cancelled());
assert_eq!(
Poll::Ready(()),
fut.as_mut().poll(&mut Context::from_waker(&waker))
);
assert_eq!(
Poll::Ready(()),
child1_fut.as_mut().poll(&mut Context::from_waker(&waker))
);
assert_eq!(
Poll::Ready(()),
child2_fut.as_mut().poll(&mut Context::from_waker(&waker))
);
assert_eq!(
Poll::Ready(()),
grandchild_fut
.as_mut()
.poll(&mut Context::from_waker(&waker))
);
assert_eq!(
Poll::Ready(()),
grandchild2_fut
.as_mut()
.poll(&mut Context::from_waker(&waker))
);
assert_eq!(
Poll::Ready(()),
great_grandchild_fut
.as_mut()
.poll(&mut Context::from_waker(&waker))
);
assert_eq!(wake_counter, 6);
}
#[test]
fn drop_parent_before_child_tokens() {
let token = CancellationToken::new();
let child1 = token.child_token();
let child2 = token.child_token();
drop(token);
assert!(!child1.is_cancelled());
drop(child1);
drop(child2);
}
#[test]
fn derives_send_sync() {
fn assert_send<T: Send>() {}
fn assert_sync<T: Sync>() {}
assert_send::<CancellationToken>();
assert_sync::<CancellationToken>();
assert_send::<WaitForCancellationFuture<'static>>();
assert_sync::<WaitForCancellationFuture<'static>>();
}
#[test]
fn run_until_cancelled_test() {
let (waker, _) = new_count_waker();
{
let token = CancellationToken::new();
let fut = token.run_until_cancelled(std::future::pending::<()>());
pin!(fut);
assert_eq!(
Poll::Pending,
fut.as_mut().poll(&mut Context::from_waker(&waker))
);
token.cancel();
assert_eq!(
Poll::Ready(None),
fut.as_mut().poll(&mut Context::from_waker(&waker))
);
}
{
let (tx, rx) = oneshot::channel::<()>();
let token = CancellationToken::new();
let fut = token.run_until_cancelled(async move {
rx.await.unwrap();
42
});
pin!(fut);
assert_eq!(
Poll::Pending,
fut.as_mut().poll(&mut Context::from_waker(&waker))
);
tx.send(()).unwrap();
assert_eq!(
Poll::Ready(Some(42)),
fut.as_mut().poll(&mut Context::from_waker(&waker))
);
}
// Do not poll the future when token is already cancelled.
{
let token = CancellationToken::new();
let fut = token.run_until_cancelled(async { panic!("fut polled after cancellation") });
pin!(fut);
token.cancel();
assert_eq!(
Poll::Ready(None),
fut.as_mut().poll(&mut Context::from_waker(&waker))
);
}
}
#[test]
fn run_until_cancelled_owned_test() {
let (waker, _) = new_count_waker();
{
let token = CancellationToken::new();
let to_cancel = token.clone();
let takes_ownership = move |token: CancellationToken| {
token.run_until_cancelled_owned(std::future::pending::<()>())
};
let fut = takes_ownership(token);
pin!(fut);
assert_eq!(
Poll::Pending,
fut.as_mut().poll(&mut Context::from_waker(&waker))
);
to_cancel.cancel();
assert_eq!(
Poll::Ready(None),
fut.as_mut().poll(&mut Context::from_waker(&waker))
);
}
{
let (tx, rx) = oneshot::channel::<()>();
let token = CancellationToken::new();
let takes_ownership = move |token: CancellationToken, rx: oneshot::Receiver<()>| {
token.run_until_cancelled_owned(async move {
rx.await.unwrap();
42
})
};
let fut = takes_ownership(token, rx);
pin!(fut);
assert_eq!(
Poll::Pending,
fut.as_mut().poll(&mut Context::from_waker(&waker))
);
tx.send(()).unwrap();
assert_eq!(
Poll::Ready(Some(42)),
fut.as_mut().poll(&mut Context::from_waker(&waker))
);
}
}

671
vendor/tokio-util/tests/task_join_map.rs vendored Normal file
View File

@@ -0,0 +1,671 @@
#![warn(rust_2018_idioms)]
#![cfg(feature = "join-map")]
use std::panic::AssertUnwindSafe;
use futures::future::{pending, FutureExt};
use tokio::sync::oneshot;
use tokio::task::LocalSet;
use tokio::time::Duration;
use tokio_util::task::JoinMap;
fn rt() -> tokio::runtime::Runtime {
tokio::runtime::Builder::new_current_thread()
.build()
.unwrap()
}
// Spawn `N` tasks that return their index (`i`).
fn spawn_index_tasks(map: &mut JoinMap<usize, usize>, n: usize, on: Option<&LocalSet>) {
for i in 0..n {
let rc = std::rc::Rc::new(i);
match on {
None => map.spawn_local(i, async move { *rc }),
Some(local) => map.spawn_local_on(i, async move { *rc }, local),
};
}
}
// Spawn `N` “pending” tasks that own a `oneshot::Sender`.
// When the task is aborted the sender is dropped, which is observed
// via the returned `Receiver`s.
fn spawn_pending_tasks(
map: &mut JoinMap<usize, ()>,
receivers: &mut Vec<oneshot::Receiver<()>>,
n: usize,
on: Option<&LocalSet>,
) {
for i in 0..n {
let (tx, rx) = oneshot::channel::<()>();
receivers.push(rx);
let fut = async move {
pending::<()>().await;
drop(tx);
};
match on {
None => map.spawn_local(i, fut),
Some(local) => map.spawn_local_on(i, fut, local),
};
}
}
/// Await every task in JoinMap and assert every task returns its own key.
async fn drain_joinmap_and_assert(mut map: JoinMap<usize, usize>, n: usize) {
let mut seen = vec![false; n];
while let Some((k, res)) = map.join_next().await {
let v = res.expect("task panicked");
assert_eq!(k, v);
seen[v] = true;
}
assert!(seen.into_iter().all(|b| b));
assert!(map.is_empty());
}
// Await every receiver and assert they all return `Err` because the
// corresponding sender (inside an aborted task) was dropped.
async fn await_receivers_and_assert(receivers: Vec<oneshot::Receiver<()>>) {
for rx in receivers {
assert!(
rx.await.is_err(),
"task should have been aborted and sender dropped"
);
}
}
#[tokio::test(start_paused = true)]
async fn test_with_sleep() {
let mut map = JoinMap::new();
for i in 0..10 {
map.spawn(i, async move { i });
assert_eq!(map.len(), 1 + i);
}
map.detach_all();
assert_eq!(map.len(), 0);
assert!(map.join_next().await.is_none());
for i in 0..10 {
map.spawn(i, async move {
tokio::time::sleep(Duration::from_secs(i as u64)).await;
i
});
assert_eq!(map.len(), 1 + i);
}
let mut seen = [false; 10];
while let Some((k, res)) = map.join_next().await {
seen[k] = true;
assert_eq!(res.expect("task should have completed successfully"), k);
}
for was_seen in &seen {
assert!(was_seen);
}
assert!(map.join_next().await.is_none());
// Do it again.
for i in 0..10 {
map.spawn(i, async move {
tokio::time::sleep(Duration::from_secs(i as u64)).await;
i
});
}
let mut seen = [false; 10];
while let Some((k, res)) = map.join_next().await {
seen[k] = true;
assert_eq!(res.expect("task should have completed successfully"), k);
}
for was_seen in &seen {
assert!(was_seen);
}
assert!(map.join_next().await.is_none());
}
#[tokio::test]
async fn test_abort_on_drop() {
let mut map = JoinMap::new();
let mut recvs = Vec::new();
for i in 0..16 {
let (send, recv) = oneshot::channel::<()>();
recvs.push(recv);
map.spawn(i, async {
// This task will never complete on its own.
futures::future::pending::<()>().await;
drop(send);
});
}
drop(map);
for recv in recvs {
// The task is aborted soon and we will receive an error.
assert!(recv.await.is_err());
}
}
#[tokio::test]
async fn alternating() {
let mut map = JoinMap::new();
assert_eq!(map.len(), 0);
map.spawn(1, async {});
assert_eq!(map.len(), 1);
map.spawn(2, async {});
assert_eq!(map.len(), 2);
for i in 0..16 {
let (_, res) = map.join_next().await.unwrap();
assert!(res.is_ok());
assert_eq!(map.len(), 1);
map.spawn(i, async {});
assert_eq!(map.len(), 2);
}
}
#[tokio::test]
async fn test_keys() {
use std::collections::HashSet;
let mut map = JoinMap::new();
assert_eq!(map.len(), 0);
map.spawn(1, async {});
assert_eq!(map.len(), 1);
map.spawn(2, async {});
assert_eq!(map.len(), 2);
let keys = map.keys().collect::<HashSet<&u32>>();
assert!(keys.contains(&1));
assert!(keys.contains(&2));
let _ = map.join_next().await.unwrap();
let _ = map.join_next().await.unwrap();
assert_eq!(map.len(), 0);
let keys = map.keys().collect::<HashSet<&u32>>();
assert!(keys.is_empty());
}
#[tokio::test(start_paused = true)]
async fn abort_by_key() {
let mut map = JoinMap::new();
let mut num_canceled = 0;
let mut num_completed = 0;
for i in 0..16 {
map.spawn(i, async move {
tokio::time::sleep(Duration::from_secs(i as u64)).await;
});
}
for i in 0..16 {
if i % 2 != 0 {
// abort odd-numbered tasks.
map.abort(&i);
}
}
while let Some((key, res)) = map.join_next().await {
match res {
Ok(()) => {
num_completed += 1;
assert_eq!(key % 2, 0);
assert!(!map.contains_key(&key));
}
Err(e) => {
num_canceled += 1;
assert!(e.is_cancelled());
assert_ne!(key % 2, 0);
assert!(!map.contains_key(&key));
}
}
}
assert_eq!(num_canceled, 8);
assert_eq!(num_completed, 8);
}
#[tokio::test(start_paused = true)]
async fn abort_by_predicate() {
let mut map = JoinMap::new();
let mut num_canceled = 0;
let mut num_completed = 0;
for i in 0..16 {
map.spawn(i, async move {
tokio::time::sleep(Duration::from_secs(i as u64)).await;
});
}
// abort odd-numbered tasks.
map.abort_matching(|key| key % 2 != 0);
while let Some((key, res)) = map.join_next().await {
match res {
Ok(()) => {
num_completed += 1;
assert_eq!(key % 2, 0);
assert!(!map.contains_key(&key));
}
Err(e) => {
num_canceled += 1;
assert!(e.is_cancelled());
assert_ne!(key % 2, 0);
assert!(!map.contains_key(&key));
}
}
}
assert_eq!(num_canceled, 8);
assert_eq!(num_completed, 8);
}
#[test]
fn runtime_gone() {
let mut map = JoinMap::new();
{
let rt = rt();
map.spawn_on("key", async { 1 }, rt.handle());
drop(rt);
}
let (key, res) = rt().block_on(map.join_next()).unwrap();
assert_eq!(key, "key");
assert!(res.unwrap_err().is_cancelled());
}
// This ensures that `join_next` works correctly when the coop budget is
// exhausted.
#[tokio::test(flavor = "current_thread")]
async fn join_map_coop() {
// Large enough to trigger coop.
const TASK_NUM: u32 = 1000;
static SEM: tokio::sync::Semaphore = tokio::sync::Semaphore::const_new(0);
let mut map = JoinMap::new();
for i in 0..TASK_NUM {
map.spawn(i, async move {
SEM.add_permits(1);
i
});
}
// Wait for all tasks to complete.
//
// Since this is a `current_thread` runtime, there's no race condition
// between the last permit being added and the task completing.
let _ = SEM.acquire_many(TASK_NUM).await.unwrap();
let mut count = 0;
let mut coop_count = 0;
loop {
match map.join_next().now_or_never() {
Some(Some((key, Ok(i)))) => assert_eq!(key, i),
Some(Some((key, Err(err)))) => panic!("failed[{key}]: {err}"),
None => {
coop_count += 1;
tokio::task::yield_now().await;
continue;
}
Some(None) => break,
}
count += 1;
}
assert!(coop_count >= 1);
assert_eq!(count, TASK_NUM);
}
#[tokio::test(start_paused = true)]
async fn abort_all() {
let mut map: JoinMap<usize, ()> = JoinMap::new();
for i in 0..5 {
map.spawn(i, futures::future::pending());
}
for i in 5..10 {
map.spawn(i, async {
tokio::time::sleep(Duration::from_secs(1)).await;
});
}
// The join map will now have 5 pending tasks and 5 ready tasks.
tokio::time::sleep(Duration::from_secs(2)).await;
map.abort_all();
assert_eq!(map.len(), 10);
let mut count = 0;
let mut seen = [false; 10];
while let Some((k, res)) = map.join_next().await {
seen[k] = true;
if let Err(err) = res {
assert!(err.is_cancelled());
}
count += 1;
}
assert_eq!(count, 10);
assert_eq!(map.len(), 0);
for was_seen in &seen {
assert!(was_seen);
}
}
#[tokio::test]
async fn duplicate_keys() {
let mut map = JoinMap::new();
map.spawn(1, async { 1 });
map.spawn(1, async { 2 });
assert_eq!(map.len(), 1);
let (key, res) = map.join_next().await.unwrap();
assert_eq!(key, 1);
assert_eq!(res.unwrap(), 2);
assert!(map.join_next().await.is_none());
}
#[tokio::test]
async fn duplicate_keys2() {
let (send, recv) = oneshot::channel::<()>();
let mut map = JoinMap::new();
map.spawn(1, async { 1 });
map.spawn(1, async {
recv.await.unwrap();
2
});
assert_eq!(map.len(), 1);
tokio::select! {
biased;
res = map.join_next() => match res {
Some((_key, res)) => panic!("Task {res:?} exited."),
None => panic!("Phantom task completion."),
},
() = tokio::task::yield_now() => {},
}
send.send(()).unwrap();
let (key, res) = map.join_next().await.unwrap();
assert_eq!(key, 1);
assert_eq!(res.unwrap(), 2);
assert!(map.join_next().await.is_none());
}
#[cfg_attr(not(panic = "unwind"), ignore)]
#[tokio::test]
async fn duplicate_keys_drop() {
#[derive(Hash, Debug, PartialEq, Eq)]
struct Key;
impl Drop for Key {
fn drop(&mut self) {
panic!("drop called for key");
}
}
let (send, recv) = oneshot::channel::<()>();
let mut map = JoinMap::new();
map.spawn(Key, async { recv.await.unwrap() });
// replace the task, force it to drop the key and abort the task
// we should expect it to panic when dropping the key.
let _ = std::panic::catch_unwind(AssertUnwindSafe(|| map.spawn(Key, async {}))).unwrap_err();
// don't panic when this key drops.
let (key, _) = map.join_next().await.unwrap();
std::mem::forget(key);
// original task should have been aborted, so the sender should be dangling.
assert!(send.is_closed());
assert!(map.join_next().await.is_none());
}
mod spawn_local {
use super::*;
#[test]
#[should_panic(
expected = "`spawn_local` called from outside of a `task::LocalSet` or `runtime::LocalRuntime`"
)]
fn panic_outside_any_runtime() {
let mut map = JoinMap::new();
map.spawn_local((), async {});
}
#[tokio::test(flavor = "multi_thread")]
#[should_panic(
expected = "`spawn_local` called from outside of a `task::LocalSet` or `runtime::LocalRuntime`"
)]
async fn panic_in_multi_thread_runtime() {
let mut map = JoinMap::new();
map.spawn_local((), async {});
}
#[cfg(tokio_unstable)]
mod local_runtime {
use super::*;
/// Spawn several tasks, and then join all tasks.
#[tokio::test(flavor = "local")]
async fn spawn_then_join_next() {
const N: usize = 8;
let mut map = JoinMap::new();
spawn_index_tasks(&mut map, N, None);
assert!(map.join_next().now_or_never().is_none());
drain_joinmap_and_assert(map, N).await;
}
/// Spawn several pending-forever tasks, and then shutdown the [`JoinMap`].
#[tokio::test(flavor = "local")]
async fn spawn_then_shutdown() {
const N: usize = 8;
let mut map = JoinMap::new();
let mut receivers = Vec::new();
spawn_pending_tasks(&mut map, &mut receivers, N, None);
assert!(map.join_next().now_or_never().is_none());
map.shutdown().await;
assert!(map.is_empty());
await_receivers_and_assert(receivers).await;
}
/// Spawn several pending-forever tasks, and then drop the [`JoinMap`].
#[tokio::test(flavor = "local")]
async fn spawn_then_drop() {
const N: usize = 8;
let mut map = JoinMap::new();
let mut receivers = Vec::new();
spawn_pending_tasks(&mut map, &mut receivers, N, None);
assert!(map.join_next().now_or_never().is_none());
drop(map);
await_receivers_and_assert(receivers).await;
}
}
mod local_set {
use super::*;
/// Spawn several tasks, and then join all tasks.
#[tokio::test(flavor = "current_thread")]
async fn spawn_then_join_next() {
const N: usize = 8;
let local = LocalSet::new();
local
.run_until(async move {
let mut map = JoinMap::new();
spawn_index_tasks(&mut map, N, None);
drain_joinmap_and_assert(map, N).await;
})
.await;
}
/// Spawn several pending-forever tasks, and then shutdown the [`JoinMap`].
#[tokio::test(flavor = "current_thread")]
async fn spawn_then_shutdown() {
const N: usize = 8;
let local = LocalSet::new();
local
.run_until(async {
let mut map = JoinMap::new();
let mut receivers = Vec::new();
spawn_pending_tasks(&mut map, &mut receivers, N, None);
assert!(map.join_next().now_or_never().is_none());
map.shutdown().await;
assert!(map.is_empty());
await_receivers_and_assert(receivers).await;
})
.await;
}
/// Spawn several pending-forever tasks, and then drop the [`JoinMap`].
#[tokio::test(flavor = "current_thread")]
async fn spawn_then_drop() {
const N: usize = 8;
let local = LocalSet::new();
local
.run_until(async {
let mut map = JoinMap::new();
let mut receivers = Vec::new();
spawn_pending_tasks(&mut map, &mut receivers, N, None);
assert!(map.join_next().now_or_never().is_none());
drop(map);
await_receivers_and_assert(receivers).await;
})
.await;
}
}
}
mod spawn_local_on {
use super::*;
#[cfg(tokio_unstable)]
mod local_runtime {
use super::*;
/// Spawn several tasks, and then join all tasks.
#[tokio::test(flavor = "local")]
async fn spawn_then_join_next() {
const N: usize = 8;
let local = LocalSet::new();
let mut map = JoinMap::new();
spawn_index_tasks(&mut map, N, Some(&local));
assert!(map.join_next().now_or_never().is_none());
local
.run_until(async move {
drain_joinmap_and_assert(map, N).await;
})
.await;
}
}
mod local_set {
use super::*;
/// Spawn several tasks, and then join all tasks.
#[tokio::test(flavor = "current_thread")]
async fn spawn_then_join_next() {
const N: usize = 8;
let local = LocalSet::new();
let mut pending_map = JoinMap::new();
spawn_index_tasks(&mut pending_map, N, Some(&local));
assert!(pending_map.join_next().now_or_never().is_none());
local
.run_until(async move {
drain_joinmap_and_assert(pending_map, N).await;
})
.await;
}
/// Spawn several pending-forever tasks, and then shutdown the [`JoinMap`].
#[tokio::test(flavor = "current_thread")]
async fn spawn_then_shutdown() {
const N: usize = 8;
let local = LocalSet::new();
let mut map = JoinMap::new();
let mut receivers = Vec::new();
spawn_pending_tasks(&mut map, &mut receivers, N, Some(&local));
assert!(map.join_next().now_or_never().is_none());
local
.run_until(async move {
map.shutdown().await;
assert!(map.is_empty());
await_receivers_and_assert(receivers).await;
})
.await;
}
/// Spawn several pending-forever tasks and then drop the [`JoinMap`]
/// before the `LocalSet` is driven and while the `LocalSet` is already driven.
#[tokio::test(flavor = "current_thread")]
async fn spawn_then_drop() {
const N: usize = 8;
{
let local = LocalSet::new();
let mut map = JoinMap::new();
let mut receivers = Vec::new();
spawn_pending_tasks(&mut map, &mut receivers, N, Some(&local));
assert!(map.join_next().now_or_never().is_none());
drop(map);
local
.run_until(async move { await_receivers_and_assert(receivers).await })
.await;
}
{
let local = LocalSet::new();
let mut map = JoinMap::new();
let mut receivers = Vec::new();
spawn_pending_tasks(&mut map, &mut receivers, N, Some(&local));
assert!(map.join_next().now_or_never().is_none());
local
.run_until(async move {
drop(map);
await_receivers_and_assert(receivers).await;
})
.await;
}
}
}
}

View File

@@ -0,0 +1,379 @@
#![warn(rust_2018_idioms)]
use tokio::sync::oneshot;
use tokio::task::yield_now;
use tokio::time::Duration;
use tokio_test::{assert_pending, assert_ready, task};
use tokio_util::task::JoinQueue;
#[tokio::test]
async fn test_join_queue_no_spurious_wakeups() {
let (tx, rx) = oneshot::channel::<()>();
let mut join_queue = JoinQueue::new();
join_queue.spawn(async move {
let _ = rx.await;
42
});
let mut join_next = task::spawn(join_queue.join_next());
assert_pending!(join_next.poll());
assert!(!join_next.is_woken());
let _ = tx.send(());
yield_now().await;
assert!(join_next.is_woken());
let output = assert_ready!(join_next.poll());
assert_eq!(output.unwrap().unwrap(), 42);
}
#[tokio::test]
async fn test_join_queue_abort_on_drop() {
let mut queue = JoinQueue::new();
let mut recvs = Vec::new();
for _ in 0..16 {
let (send, recv) = oneshot::channel::<()>();
recvs.push(recv);
queue.spawn(async move {
// This task will never complete on its own.
futures::future::pending::<()>().await;
drop(send);
});
}
drop(queue);
for recv in recvs {
// The task is aborted soon and we will receive an error.
assert!(recv.await.is_err());
}
}
#[tokio::test]
async fn test_join_queue_alternating() {
let mut queue = JoinQueue::new();
assert_eq!(queue.len(), 0);
queue.spawn(async {});
assert_eq!(queue.len(), 1);
queue.spawn(async {});
assert_eq!(queue.len(), 2);
for _ in 0..16 {
let res = queue.join_next().await.unwrap();
assert!(res.is_ok());
assert_eq!(queue.len(), 1);
queue.spawn(async {});
assert_eq!(queue.len(), 2);
}
}
#[tokio::test(start_paused = true)]
async fn test_join_queue_abort_all() {
let mut queue: JoinQueue<()> = JoinQueue::new();
for _ in 0..5 {
queue.spawn(futures::future::pending());
}
for _ in 0..5 {
queue.spawn(async {
tokio::time::sleep(Duration::from_secs(1)).await;
});
}
// The join queue will now have 5 pending tasks and 5 ready tasks.
tokio::time::sleep(Duration::from_secs(2)).await;
queue.abort_all();
assert_eq!(queue.len(), 10);
let mut count = 0;
while let Some(res) = queue.join_next().await {
if count < 5 {
assert!(res.unwrap_err().is_cancelled());
} else {
assert!(res.is_ok());
}
count += 1;
}
assert_eq!(count, 10);
assert!(queue.is_empty());
}
#[tokio::test]
async fn test_join_queue_join_all() {
let mut queue = JoinQueue::new();
let mut senders = Vec::new();
for i in 0..5 {
let (tx, rx) = oneshot::channel::<()>();
senders.push(tx);
queue.spawn(async move {
let _ = rx.await;
i
});
}
// Complete all tasks in reverse order
while let Some(tx) = senders.pop() {
let _ = tx.send(());
}
let results = queue.join_all().await;
assert_eq!(results, vec![0, 1, 2, 3, 4]);
}
#[tokio::test]
async fn test_join_queue_shutdown() {
let mut queue = JoinQueue::new();
let mut senders = Vec::new();
for _ in 0..5 {
let (tx, rx) = oneshot::channel::<()>();
senders.push(tx);
queue.spawn(async move {
let _ = rx.await;
});
}
queue.shutdown().await;
assert!(queue.is_empty());
while let Some(tx) = senders.pop() {
assert!(tx.is_closed());
}
}
#[tokio::test]
async fn test_join_queue_with_manual_abort() {
let mut queue = JoinQueue::new();
let mut num_canceled = 0;
let mut num_completed = 0;
let mut senders = Vec::new();
for i in 0..16 {
let (tx, rx) = oneshot::channel::<()>();
senders.push(tx);
let abort = queue.spawn(async move {
let _ = rx.await;
i
});
if i % 2 != 0 {
// abort odd-numbered tasks.
abort.abort();
}
}
// Complete all tasks in reverse order
while let Some(tx) = senders.pop() {
let _ = tx.send(());
}
while let Some(res) = queue.join_next().await {
match res {
Ok(res) => {
assert_eq!(res, num_completed * 2);
num_completed += 1;
}
Err(e) => {
assert!(e.is_cancelled());
num_canceled += 1;
}
}
}
assert_eq!(num_canceled, 8);
assert_eq!(num_completed, 8);
}
#[tokio::test]
async fn test_join_queue_join_next_with_id() {
const TASK_NUM: u32 = 1000;
let (send, recv) = tokio::sync::watch::channel(());
let mut queue = JoinQueue::new();
let mut spawned = Vec::with_capacity(TASK_NUM as usize);
for _ in 0..TASK_NUM {
let mut recv = recv.clone();
let handle = queue.spawn(async move { recv.changed().await.unwrap() });
spawned.push(handle.id());
}
drop(recv);
send.send_replace(());
send.closed().await;
let mut count = 0;
let mut joined = Vec::with_capacity(TASK_NUM as usize);
while let Some(res) = queue.join_next_with_id().await {
match res {
Ok((id, ())) => {
count += 1;
joined.push(id);
}
Err(err) => panic!("failed: {err}"),
}
}
assert_eq!(count, TASK_NUM);
assert_eq!(joined, spawned);
}
#[tokio::test]
async fn test_join_queue_try_join_next() {
let mut queue = JoinQueue::new();
let (tx1, rx1) = oneshot::channel::<()>();
queue.spawn(async {
let _ = rx1.await;
});
let (tx2, rx2) = oneshot::channel::<()>();
queue.spawn(async {
let _ = rx2.await;
});
let (tx3, rx3) = oneshot::channel::<()>();
queue.spawn(async {
let _ = rx3.await;
});
// This function also checks that calling `queue.try_join_next()` repeatedly when
// no task is ready is idempotent, i.e. that it does not change the queue state.
fn check_try_join_next_is_noop(queue: &mut JoinQueue<()>) {
let len = queue.len();
for _ in 0..5 {
assert!(queue.try_join_next().is_none());
assert_eq!(queue.len(), len);
}
}
assert_eq!(queue.len(), 3);
check_try_join_next_is_noop(&mut queue);
tx1.send(()).unwrap();
tokio::task::yield_now().await;
assert_eq!(queue.len(), 3);
assert!(queue.try_join_next().is_some());
assert_eq!(queue.len(), 2);
check_try_join_next_is_noop(&mut queue);
tx3.send(()).unwrap();
tokio::task::yield_now().await;
assert_eq!(queue.len(), 2);
check_try_join_next_is_noop(&mut queue);
tx2.send(()).unwrap();
tokio::task::yield_now().await;
assert_eq!(queue.len(), 2);
assert!(queue.try_join_next().is_some());
assert_eq!(queue.len(), 1);
assert!(queue.try_join_next().is_some());
assert!(queue.is_empty());
check_try_join_next_is_noop(&mut queue);
}
#[tokio::test]
async fn test_join_queue_try_join_next_disabled_coop() {
// This number is large enough to trigger coop. Without using `tokio::task::coop::unconstrained`
// inside `try_join_next` this test fails on `assert!(coop_count == 0)`.
const TASK_NUM: u32 = 1000;
let sem: std::sync::Arc<tokio::sync::Semaphore> =
std::sync::Arc::new(tokio::sync::Semaphore::new(0));
let mut queue = JoinQueue::new();
for _ in 0..TASK_NUM {
let sem = sem.clone();
queue.spawn(async move {
sem.add_permits(1);
});
}
let _ = sem.acquire_many(TASK_NUM).await.unwrap();
let mut count = 0;
let mut coop_count = 0;
while !queue.is_empty() {
match queue.try_join_next() {
Some(Ok(())) => count += 1,
Some(Err(err)) => panic!("failed: {err}"),
None => {
coop_count += 1;
tokio::task::yield_now().await;
}
}
}
assert_eq!(coop_count, 0);
assert_eq!(count, TASK_NUM);
}
#[tokio::test]
async fn test_join_queue_try_join_next_with_id_disabled_coop() {
// Note that this number is large enough to trigger coop as in
// `test_join_queue_try_join_next_coop` test. Without using
// `tokio::task::coop::unconstrained` inside `try_join_next_with_id`
// this test fails on `assert_eq!(count, TASK_NUM)`.
const TASK_NUM: u32 = 1000;
let (send, recv) = tokio::sync::watch::channel(());
let mut queue = JoinQueue::new();
let mut spawned = Vec::with_capacity(TASK_NUM as usize);
for _ in 0..TASK_NUM {
let mut recv = recv.clone();
let handle = queue.spawn(async move { recv.changed().await.unwrap() });
spawned.push(handle.id());
}
drop(recv);
assert!(queue.try_join_next_with_id().is_none());
send.send_replace(());
send.closed().await;
let mut count = 0;
let mut coop_count = 0;
let mut joined = Vec::with_capacity(TASK_NUM as usize);
while !queue.is_empty() {
match queue.try_join_next_with_id() {
Some(Ok((id, ()))) => {
count += 1;
joined.push(id);
}
Some(Err(err)) => panic!("failed: {err}"),
None => {
coop_count += 1;
tokio::task::yield_now().await;
}
}
}
assert_eq!(coop_count, 0);
assert_eq!(count, TASK_NUM);
assert_eq!(joined, spawned);
}
#[test]
#[should_panic(
expected = "`spawn_local` called from outside of a `task::LocalSet` or `runtime::LocalRuntime`"
)]
fn spawn_local_panic_outside_any_runtime() {
let mut queue = JoinQueue::new();
queue.spawn_local(async {});
}
#[tokio::test(flavor = "multi_thread")]
#[should_panic(
expected = "`spawn_local` called from outside of a `task::LocalSet` or `runtime::LocalRuntime`"
)]
async fn spawn_local_panic_in_multi_thread_runtime() {
let mut queue = JoinQueue::new();
queue.spawn_local(async {});
}

380
vendor/tokio-util/tests/task_tracker.rs vendored Normal file
View File

@@ -0,0 +1,380 @@
#![warn(rust_2018_idioms)]
use futures::future::pending;
#[cfg(tokio_unstable)]
use std::rc::Rc;
use tokio::sync::mpsc;
use tokio::task::LocalSet;
use tokio_test::{assert_pending, assert_ready, task};
use tokio_util::task::TaskTracker;
#[test]
fn open_close() {
let tracker = TaskTracker::new();
assert!(!tracker.is_closed());
assert!(tracker.is_empty());
assert_eq!(tracker.len(), 0);
tracker.close();
assert!(tracker.is_closed());
assert!(tracker.is_empty());
assert_eq!(tracker.len(), 0);
tracker.reopen();
assert!(!tracker.is_closed());
tracker.reopen();
assert!(!tracker.is_closed());
assert!(tracker.is_empty());
assert_eq!(tracker.len(), 0);
tracker.close();
assert!(tracker.is_closed());
tracker.close();
assert!(tracker.is_closed());
assert!(tracker.is_empty());
assert_eq!(tracker.len(), 0);
}
#[test]
fn token_len() {
let tracker = TaskTracker::new();
let mut tokens = Vec::new();
for i in 0..10 {
assert_eq!(tracker.len(), i);
tokens.push(tracker.token());
}
assert!(!tracker.is_empty());
assert_eq!(tracker.len(), 10);
for (i, token) in tokens.into_iter().enumerate() {
drop(token);
assert_eq!(tracker.len(), 9 - i);
}
}
#[test]
fn notify_immediately() {
let tracker = TaskTracker::new();
tracker.close();
let mut wait = task::spawn(tracker.wait());
assert_ready!(wait.poll());
}
#[test]
fn notify_immediately_on_reopen() {
let tracker = TaskTracker::new();
tracker.close();
let mut wait = task::spawn(tracker.wait());
tracker.reopen();
assert_ready!(wait.poll());
}
#[test]
fn notify_on_close() {
let tracker = TaskTracker::new();
let mut wait = task::spawn(tracker.wait());
assert_pending!(wait.poll());
tracker.close();
assert_ready!(wait.poll());
}
#[test]
fn notify_on_close_reopen() {
let tracker = TaskTracker::new();
let mut wait = task::spawn(tracker.wait());
assert_pending!(wait.poll());
tracker.close();
tracker.reopen();
assert_ready!(wait.poll());
}
#[test]
fn notify_on_last_task() {
let tracker = TaskTracker::new();
tracker.close();
let token = tracker.token();
let mut wait = task::spawn(tracker.wait());
assert_pending!(wait.poll());
drop(token);
assert_ready!(wait.poll());
}
#[test]
fn notify_on_last_task_respawn() {
let tracker = TaskTracker::new();
tracker.close();
let token = tracker.token();
let mut wait = task::spawn(tracker.wait());
assert_pending!(wait.poll());
drop(token);
let token2 = tracker.token();
assert_ready!(wait.poll());
drop(token2);
}
#[test]
fn no_notify_on_respawn_if_open() {
let tracker = TaskTracker::new();
let token = tracker.token();
let mut wait = task::spawn(tracker.wait());
assert_pending!(wait.poll());
drop(token);
let token2 = tracker.token();
assert_pending!(wait.poll());
drop(token2);
}
#[test]
fn close_during_exit() {
const ITERS: usize = 5;
for close_spot in 0..=ITERS {
let tracker = TaskTracker::new();
let tokens: Vec<_> = (0..ITERS).map(|_| tracker.token()).collect();
let mut wait = task::spawn(tracker.wait());
for (i, token) in tokens.into_iter().enumerate() {
assert_pending!(wait.poll());
if i == close_spot {
tracker.close();
assert_pending!(wait.poll());
}
drop(token);
}
if close_spot == ITERS {
assert_pending!(wait.poll());
tracker.close();
}
assert_ready!(wait.poll());
}
}
#[test]
fn notify_many() {
let tracker = TaskTracker::new();
let mut waits: Vec<_> = (0..10).map(|_| task::spawn(tracker.wait())).collect();
for wait in &mut waits {
assert_pending!(wait.poll());
}
tracker.close();
for wait in &mut waits {
assert_ready!(wait.poll());
}
}
#[cfg(tokio_unstable)]
mod spawn {
use super::*;
/// Spawn several tasks, and then close the [`TaskTracker`].
#[tokio::test(flavor = "local")]
async fn spawn_then_close() {
const N: usize = 8;
let tracker = TaskTracker::new();
for _ in 0..N {
tracker.spawn(async {});
}
for _ in 0..N {
tracker.spawn_on(async {}, &tokio::runtime::Handle::current());
}
tracker.close();
tracker.wait().await;
assert!(tracker.is_empty());
assert!(tracker.is_closed());
}
}
#[cfg(tokio_unstable)]
mod spawn_local {
use super::*;
#[test]
#[should_panic(
expected = "`spawn_local` called from outside of a `task::LocalSet` or `runtime::LocalRuntime`"
)]
fn panic_outside_any_runtime() {
let tracker = TaskTracker::new();
tracker.spawn_local(async {});
}
#[tokio::test(flavor = "multi_thread")]
#[should_panic(
expected = "`spawn_local` called from outside of a `task::LocalSet` or `runtime::LocalRuntime`"
)]
async fn panic_in_multi_thread_runtime() {
let tracker = TaskTracker::new();
tracker.spawn_local(async {});
}
/// Spawn several tasks, and then close the [`TaskTracker`].
#[tokio::test(flavor = "local")]
async fn spawn_then_close() {
const N: usize = 8;
let tracker = TaskTracker::new();
for _ in 0..N {
let rc = Rc::new(());
tracker.spawn_local(async move {
drop(rc);
});
}
tracker.close();
tracker.wait().await;
assert!(tracker.is_empty());
assert!(tracker.is_closed());
}
/// Close the [`TaskTracker`], and then spawn several tasks
#[tokio::test(flavor = "local")]
async fn spawn_after_close() {
const N: usize = 8;
let tracker = TaskTracker::new();
tracker.close();
for _ in 0..N {
let rc = Rc::new(());
tracker.spawn_local(async move {
drop(rc);
});
}
tracker.wait().await;
assert!(tracker.is_closed());
assert!(tracker.is_empty());
}
}
mod spawn_local_on {
use super::*;
#[cfg(tokio_unstable)]
mod local_runtime {
use super::*;
/// Spawn several tasks, and then close the [`TaskTracker`].
#[tokio::test(flavor = "local")]
async fn spawn_then_close() {
const N: usize = 8;
let local_set = LocalSet::new();
let tracker = TaskTracker::new();
for _ in 0..N {
let rc = Rc::new(());
tracker.spawn_local_on(
async move {
drop(rc);
},
&local_set,
);
}
local_set
.run_until(async {
tracker.close();
tracker.wait().await;
assert!(tracker.is_empty());
assert!(tracker.is_closed());
})
.await;
}
}
mod local_set {
use super::*;
/// Spawn several pending-forever tasks, and then drop the [`TaskTracker`]
/// while the `LocalSet` is already driven.
#[tokio::test(flavor = "current_thread")]
async fn spawn_then_drop() {
const N: usize = 8;
let local = LocalSet::new();
let tracker = TaskTracker::new();
let (tx, mut rx) = mpsc::unbounded_channel::<()>();
for _i in 0..N {
let tx = tx.clone();
tracker.spawn_local_on(
async move {
pending::<()>().await;
drop(tx);
},
&local,
);
}
drop(tx);
local
.run_until(async move {
drop(tracker);
tokio::task::yield_now().await;
use tokio::sync::mpsc::error::TryRecvError;
assert!(matches!(rx.try_recv(), Err(TryRecvError::Empty)));
})
.await;
}
/// Close the tracker first, spawn several pending-forever tasks,
/// then wait while the`LocalSet` is already driven.
#[tokio::test(flavor = "current_thread")]
async fn close_then_spawn() {
const N: usize = 8;
let local = LocalSet::new();
let tracker = TaskTracker::new();
tracker.close();
for _ in 0..N {
let rc = std::rc::Rc::new(());
tracker.spawn_local_on(
async move {
drop(rc);
},
&local,
);
}
local
.run_until(async move {
tracker.wait().await;
assert!(tracker.is_closed());
assert!(tracker.is_empty());
})
.await;
}
}
}

View File

@@ -0,0 +1,899 @@
#![allow(clippy::disallowed_names)]
#![warn(rust_2018_idioms)]
#![cfg(feature = "full")]
use futures::StreamExt;
use tokio::time::{self, sleep, sleep_until, Duration, Instant};
use tokio_test::{assert_pending, assert_ready, task};
use tokio_util::time::DelayQueue;
macro_rules! poll {
($queue:ident) => {
$queue.enter(|cx, mut queue| queue.poll_expired(cx))
};
}
macro_rules! assert_ready_some {
($e:expr) => {{
match assert_ready!($e) {
Some(v) => v,
None => panic!("None"),
}
}};
}
#[tokio::test]
async fn single_immediate_delay() {
time::pause();
let mut queue = task::spawn(DelayQueue::new());
let _key = queue.insert_at("foo", Instant::now());
// Advance time by 1ms to handle thee rounding
sleep(ms(1)).await;
assert_ready_some!(poll!(queue));
let entry = assert_ready!(poll!(queue));
assert!(entry.is_none())
}
#[tokio::test]
async fn multi_immediate_delays() {
time::pause();
let mut queue = task::spawn(DelayQueue::new());
let _k = queue.insert_at("1", Instant::now());
let _k = queue.insert_at("2", Instant::now());
let _k = queue.insert_at("3", Instant::now());
sleep(ms(1)).await;
let mut res = vec![];
while res.len() < 3 {
let entry = assert_ready_some!(poll!(queue));
res.push(entry.into_inner());
}
let entry = assert_ready!(poll!(queue));
assert!(entry.is_none());
res.sort_unstable();
assert_eq!("1", res[0]);
assert_eq!("2", res[1]);
assert_eq!("3", res[2]);
}
#[tokio::test]
async fn single_short_delay() {
time::pause();
let mut queue = task::spawn(DelayQueue::new());
let _key = queue.insert_at("foo", Instant::now() + ms(5));
assert_pending!(poll!(queue));
sleep(ms(1)).await;
assert!(!queue.is_woken());
sleep(ms(5)).await;
assert!(queue.is_woken());
let entry = assert_ready_some!(poll!(queue));
assert_eq!(*entry.get_ref(), "foo");
let entry = assert_ready!(poll!(queue));
assert!(entry.is_none());
}
#[tokio::test]
#[cfg_attr(miri, ignore)] // Too slow on miri.
async fn multi_delay_at_start() {
time::pause();
let long = 262_144 + 9 * 4096;
let delays = &[1000, 2, 234, long, 60, 10];
let mut queue = task::spawn(DelayQueue::new());
// Setup the delays
for &i in delays {
let _key = queue.insert_at(i, Instant::now() + ms(i));
}
assert_pending!(poll!(queue));
assert!(!queue.is_woken());
let start = Instant::now();
for elapsed in 0..1200 {
println!("elapsed: {elapsed:?}");
let elapsed = elapsed + 1;
tokio::time::sleep_until(start + ms(elapsed)).await;
if delays.contains(&elapsed) {
assert!(queue.is_woken());
assert_ready!(poll!(queue));
assert_pending!(poll!(queue));
} else if queue.is_woken() {
let cascade = &[192, 960];
assert!(
cascade.contains(&elapsed),
"elapsed={} dt={:?}",
elapsed,
Instant::now() - start
);
assert_pending!(poll!(queue));
}
}
println!("finished multi_delay_start");
}
#[tokio::test]
async fn insert_in_past_fires_immediately() {
println!("running insert_in_past_fires_immediately");
time::pause();
let mut queue = task::spawn(DelayQueue::new());
let now = Instant::now();
sleep(ms(10)).await;
queue.insert_at("foo", now);
assert_ready!(poll!(queue));
println!("finished insert_in_past_fires_immediately");
}
#[tokio::test]
async fn remove_entry() {
time::pause();
let mut queue = task::spawn(DelayQueue::new());
let key = queue.insert_at("foo", Instant::now() + ms(5));
assert_pending!(poll!(queue));
let entry = queue.remove(&key);
assert_eq!(entry.into_inner(), "foo");
sleep(ms(10)).await;
let entry = assert_ready!(poll!(queue));
assert!(entry.is_none());
}
#[tokio::test]
async fn reset_entry() {
time::pause();
let mut queue = task::spawn(DelayQueue::new());
let now = Instant::now();
let key = queue.insert_at("foo", now + ms(5));
assert_pending!(poll!(queue));
sleep(ms(1)).await;
queue.reset_at(&key, now + ms(10));
assert_pending!(poll!(queue));
sleep(ms(7)).await;
assert!(!queue.is_woken());
assert_pending!(poll!(queue));
sleep(ms(3)).await;
assert!(queue.is_woken());
let entry = assert_ready_some!(poll!(queue));
assert_eq!(*entry.get_ref(), "foo");
let entry = assert_ready!(poll!(queue));
assert!(entry.is_none())
}
// Reproduces tokio-rs/tokio#849.
#[tokio::test]
async fn reset_much_later() {
time::pause();
let mut queue = task::spawn(DelayQueue::new());
let now = Instant::now();
sleep(ms(1)).await;
let key = queue.insert_at("foo", now + ms(200));
assert_pending!(poll!(queue));
sleep(ms(3)).await;
queue.reset_at(&key, now + ms(10));
sleep(ms(20)).await;
assert!(queue.is_woken());
}
// Reproduces tokio-rs/tokio#849.
#[tokio::test]
async fn reset_twice() {
time::pause();
let mut queue = task::spawn(DelayQueue::new());
let now = Instant::now();
sleep(ms(1)).await;
let key = queue.insert_at("foo", now + ms(200));
assert_pending!(poll!(queue));
sleep(ms(3)).await;
queue.reset_at(&key, now + ms(50));
sleep(ms(20)).await;
queue.reset_at(&key, now + ms(40));
sleep(ms(20)).await;
assert!(queue.is_woken());
}
/// Regression test: Given an entry inserted with a deadline in the past, so
/// that it is placed directly on the expired queue, reset the entry to a
/// deadline in the future. Validate that this leaves the entry and queue in an
/// internally consistent state by running an additional reset on the entry
/// before polling it to completion.
#[tokio::test]
async fn repeatedly_reset_entry_inserted_as_expired() {
time::pause();
// Instants before the start of the test seem to break in wasm.
time::sleep(ms(1000)).await;
let mut queue = task::spawn(DelayQueue::new());
let now = Instant::now();
let key = queue.insert_at("foo", now - ms(100));
queue.reset_at(&key, now + ms(100));
queue.reset_at(&key, now + ms(50));
assert_pending!(poll!(queue));
time::sleep_until(now + ms(60)).await;
assert!(queue.is_woken());
let entry = assert_ready_some!(poll!(queue)).into_inner();
assert_eq!(entry, "foo");
let entry = assert_ready!(poll!(queue));
assert!(entry.is_none());
}
#[tokio::test]
async fn remove_expired_item() {
time::pause();
let mut queue = DelayQueue::new();
let now = Instant::now();
sleep(ms(10)).await;
let key = queue.insert_at("foo", now);
let entry = queue.remove(&key);
assert_eq!(entry.into_inner(), "foo");
}
/// Regression test: it should be possible to remove entries which fall in the
/// 0th slot of the internal timer wheel — that is, entries whose expiration
/// (a) falls at the beginning of one of the wheel's hierarchical levels and (b)
/// is equal to the wheel's current elapsed time.
#[tokio::test]
async fn remove_at_timer_wheel_threshold() {
time::pause();
let mut queue = task::spawn(DelayQueue::new());
let now = Instant::now();
let key1 = queue.insert_at("foo", now + ms(64));
let key2 = queue.insert_at("bar", now + ms(64));
sleep(ms(80)).await;
let entry = assert_ready_some!(poll!(queue)).into_inner();
match entry {
"foo" => {
let entry = queue.remove(&key2).into_inner();
assert_eq!(entry, "bar");
}
"bar" => {
let entry = queue.remove(&key1).into_inner();
assert_eq!(entry, "foo");
}
other => panic!("other: {other:?}"),
}
}
#[tokio::test]
async fn expires_before_last_insert() {
time::pause();
let mut queue = task::spawn(DelayQueue::new());
let now = Instant::now();
queue.insert_at("foo", now + ms(10_000));
// Delay should be set to 8.192s here.
assert_pending!(poll!(queue));
// Delay should be set to the delay of the new item here
queue.insert_at("bar", now + ms(600));
assert_pending!(poll!(queue));
sleep(ms(600)).await;
assert!(queue.is_woken());
let entry = assert_ready_some!(poll!(queue)).into_inner();
assert_eq!(entry, "bar");
}
#[tokio::test]
async fn multi_reset() {
time::pause();
let mut queue = task::spawn(DelayQueue::new());
let now = Instant::now();
let one = queue.insert_at("one", now + ms(200));
let two = queue.insert_at("two", now + ms(250));
assert_pending!(poll!(queue));
queue.reset_at(&one, now + ms(300));
queue.reset_at(&two, now + ms(350));
queue.reset_at(&one, now + ms(400));
sleep(ms(310)).await;
assert_pending!(poll!(queue));
sleep(ms(50)).await;
let entry = assert_ready_some!(poll!(queue));
assert_eq!(*entry.get_ref(), "two");
assert_pending!(poll!(queue));
sleep(ms(50)).await;
let entry = assert_ready_some!(poll!(queue));
assert_eq!(*entry.get_ref(), "one");
let entry = assert_ready!(poll!(queue));
assert!(entry.is_none())
}
#[tokio::test]
async fn expire_first_key_when_reset_to_expire_earlier() {
time::pause();
let mut queue = task::spawn(DelayQueue::new());
let now = Instant::now();
let one = queue.insert_at("one", now + ms(200));
queue.insert_at("two", now + ms(250));
assert_pending!(poll!(queue));
queue.reset_at(&one, now + ms(100));
sleep(ms(100)).await;
assert!(queue.is_woken());
let entry = assert_ready_some!(poll!(queue)).into_inner();
assert_eq!(entry, "one");
}
#[tokio::test]
async fn expire_second_key_when_reset_to_expire_earlier() {
time::pause();
let mut queue = task::spawn(DelayQueue::new());
let now = Instant::now();
queue.insert_at("one", now + ms(200));
let two = queue.insert_at("two", now + ms(250));
assert_pending!(poll!(queue));
queue.reset_at(&two, now + ms(100));
sleep(ms(100)).await;
assert!(queue.is_woken());
let entry = assert_ready_some!(poll!(queue)).into_inner();
assert_eq!(entry, "two");
}
#[tokio::test]
async fn reset_first_expiring_item_to_expire_later() {
time::pause();
let mut queue = task::spawn(DelayQueue::new());
let now = Instant::now();
let one = queue.insert_at("one", now + ms(200));
let _two = queue.insert_at("two", now + ms(250));
assert_pending!(poll!(queue));
queue.reset_at(&one, now + ms(300));
sleep(ms(250)).await;
assert!(queue.is_woken());
let entry = assert_ready_some!(poll!(queue)).into_inner();
assert_eq!(entry, "two");
}
#[tokio::test]
async fn insert_before_first_after_poll() {
time::pause();
let mut queue = task::spawn(DelayQueue::new());
let now = Instant::now();
let _one = queue.insert_at("one", now + ms(200));
assert_pending!(poll!(queue));
let _two = queue.insert_at("two", now + ms(100));
sleep(ms(99)).await;
assert_pending!(poll!(queue));
sleep(ms(1)).await;
assert!(queue.is_woken());
let entry = assert_ready_some!(poll!(queue)).into_inner();
assert_eq!(entry, "two");
}
#[tokio::test]
async fn insert_after_ready_poll() {
time::pause();
let mut queue = task::spawn(DelayQueue::new());
let now = Instant::now();
queue.insert_at("1", now + ms(100));
queue.insert_at("2", now + ms(100));
queue.insert_at("3", now + ms(100));
assert_pending!(poll!(queue));
sleep(ms(100)).await;
assert!(queue.is_woken());
let mut res = vec![];
while res.len() < 3 {
let entry = assert_ready_some!(poll!(queue));
res.push(entry.into_inner());
queue.insert_at("foo", now + ms(500));
}
res.sort_unstable();
assert_eq!("1", res[0]);
assert_eq!("2", res[1]);
assert_eq!("3", res[2]);
}
#[tokio::test]
async fn reset_later_after_slot_starts() {
time::pause();
let mut queue = task::spawn(DelayQueue::new());
let now = Instant::now();
let foo = queue.insert_at("foo", now + ms(100));
assert_pending!(poll!(queue));
sleep_until(now + Duration::from_millis(80)).await;
assert!(!queue.is_woken());
// At this point the queue hasn't been polled, so `elapsed` on the wheel
// for the queue is still at 0 and hence the 1ms resolution slots cover
// [0-64). Resetting the time on the entry to 120 causes it to get put in
// the [64-128) slot. As the queue knows that the first entry is within
// that slot, but doesn't know when, it must wake immediately to advance
// the wheel.
queue.reset_at(&foo, now + ms(120));
assert!(queue.is_woken());
assert_pending!(poll!(queue));
sleep_until(now + Duration::from_millis(119)).await;
assert!(!queue.is_woken());
sleep(ms(1)).await;
assert!(queue.is_woken());
let entry = assert_ready_some!(poll!(queue)).into_inner();
assert_eq!(entry, "foo");
}
#[tokio::test]
async fn reset_inserted_expired() {
time::pause();
// Instants before the start of the test seem to break in wasm.
time::sleep(ms(1000)).await;
let mut queue = task::spawn(DelayQueue::new());
let now = Instant::now();
let key = queue.insert_at("foo", now - ms(100));
// this causes the panic described in #2473
queue.reset_at(&key, now + ms(100));
assert_eq!(1, queue.len());
sleep(ms(200)).await;
let entry = assert_ready_some!(poll!(queue)).into_inner();
assert_eq!(entry, "foo");
assert_eq!(queue.len(), 0);
}
#[tokio::test]
async fn reset_earlier_after_slot_starts() {
time::pause();
let mut queue = task::spawn(DelayQueue::new());
let now = Instant::now();
let foo = queue.insert_at("foo", now + ms(200));
assert_pending!(poll!(queue));
sleep_until(now + Duration::from_millis(80)).await;
assert!(!queue.is_woken());
// At this point the queue hasn't been polled, so `elapsed` on the wheel
// for the queue is still at 0 and hence the 1ms resolution slots cover
// [0-64). Resetting the time on the entry to 120 causes it to get put in
// the [64-128) slot. As the queue knows that the first entry is within
// that slot, but doesn't know when, it must wake immediately to advance
// the wheel.
queue.reset_at(&foo, now + ms(120));
assert!(queue.is_woken());
assert_pending!(poll!(queue));
sleep_until(now + Duration::from_millis(119)).await;
assert!(!queue.is_woken());
sleep(ms(1)).await;
assert!(queue.is_woken());
let entry = assert_ready_some!(poll!(queue)).into_inner();
assert_eq!(entry, "foo");
}
#[tokio::test]
async fn insert_in_past_after_poll_fires_immediately() {
time::pause();
let mut queue = task::spawn(DelayQueue::new());
let now = Instant::now();
queue.insert_at("foo", now + ms(200));
assert_pending!(poll!(queue));
sleep(ms(80)).await;
assert!(!queue.is_woken());
queue.insert_at("bar", now + ms(40));
assert!(queue.is_woken());
let entry = assert_ready_some!(poll!(queue)).into_inner();
assert_eq!(entry, "bar");
}
#[tokio::test]
async fn delay_queue_poll_expired_when_empty() {
let mut delay_queue = task::spawn(DelayQueue::new());
let key = delay_queue.insert(0, std::time::Duration::from_secs(10));
assert_pending!(poll!(delay_queue));
delay_queue.remove(&key);
assert!(assert_ready!(poll!(delay_queue)).is_none());
}
#[tokio::test(start_paused = true)]
async fn compact_expire_empty() {
let mut queue = task::spawn(DelayQueue::new());
let now = Instant::now();
queue.insert_at("foo1", now + ms(10));
queue.insert_at("foo2", now + ms(10));
sleep(ms(10)).await;
let mut res = vec![];
while res.len() < 2 {
let entry = assert_ready_some!(poll!(queue));
res.push(entry.into_inner());
}
queue.compact();
assert_eq!(queue.len(), 0);
assert_eq!(queue.capacity(), 0);
}
#[tokio::test(start_paused = true)]
async fn compact_remove_empty() {
let mut queue = task::spawn(DelayQueue::new());
let now = Instant::now();
let key1 = queue.insert_at("foo1", now + ms(10));
let key2 = queue.insert_at("foo2", now + ms(10));
queue.remove(&key1);
queue.remove(&key2);
queue.compact();
assert_eq!(queue.len(), 0);
assert_eq!(queue.capacity(), 0);
}
#[tokio::test(start_paused = true)]
// Trigger a re-mapping of keys in the slab due to a `compact` call and
// test removal of re-mapped keys
async fn compact_remove_remapped_keys() {
let mut queue = task::spawn(DelayQueue::new());
let now = Instant::now();
queue.insert_at("foo1", now + ms(10));
queue.insert_at("foo2", now + ms(10));
// should be assigned indices 3 and 4
let key3 = queue.insert_at("foo3", now + ms(20));
let key4 = queue.insert_at("foo4", now + ms(20));
sleep(ms(10)).await;
let mut res = vec![];
while res.len() < 2 {
let entry = assert_ready_some!(poll!(queue));
res.push(entry.into_inner());
}
// items corresponding to `foo3` and `foo4` will be assigned
// new indices here
queue.compact();
queue.insert_at("foo5", now + ms(10));
// test removal of re-mapped keys
let expired3 = queue.remove(&key3);
let expired4 = queue.remove(&key4);
assert_eq!(expired3.into_inner(), "foo3");
assert_eq!(expired4.into_inner(), "foo4");
queue.compact();
assert_eq!(queue.len(), 1);
assert_eq!(queue.capacity(), 1);
}
#[tokio::test(start_paused = true)]
async fn compact_change_deadline() {
let mut queue = task::spawn(DelayQueue::new());
let mut now = Instant::now();
queue.insert_at("foo1", now + ms(10));
queue.insert_at("foo2", now + ms(10));
// should be assigned indices 3 and 4
queue.insert_at("foo3", now + ms(20));
let key4 = queue.insert_at("foo4", now + ms(20));
sleep(ms(10)).await;
let mut res = vec![];
while res.len() < 2 {
let entry = assert_ready_some!(poll!(queue));
res.push(entry.into_inner());
}
// items corresponding to `foo3` and `foo4` should be assigned
// new indices
queue.compact();
now = Instant::now();
queue.insert_at("foo5", now + ms(10));
let key6 = queue.insert_at("foo6", now + ms(10));
queue.reset_at(&key4, now + ms(20));
queue.reset_at(&key6, now + ms(20));
// foo3 and foo5 will expire
sleep(ms(10)).await;
while res.len() < 4 {
let entry = assert_ready_some!(poll!(queue));
res.push(entry.into_inner());
}
sleep(ms(10)).await;
while res.len() < 6 {
let entry = assert_ready_some!(poll!(queue));
res.push(entry.into_inner());
}
let entry = assert_ready!(poll!(queue));
assert!(entry.is_none());
}
#[tokio::test(start_paused = true)]
async fn item_expiry_greater_than_wheel() {
// This function tests that a delay queue that has existed for at least 2^36 milliseconds won't panic when a new item is inserted.
let mut queue = DelayQueue::new();
for _ in 0..2 {
tokio::time::advance(Duration::from_millis(1 << 35)).await;
queue.insert(0, Duration::from_millis(0));
queue.next().await;
}
// This should not panic
let no_panic = std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| {
queue.insert(1, Duration::from_millis(1));
}));
assert!(no_panic.is_ok());
}
#[cfg_attr(target_os = "wasi", ignore = "FIXME: Does not seem to work with WASI")]
#[tokio::test(start_paused = true)]
#[cfg(panic = "unwind")]
async fn remove_after_compact() {
let now = Instant::now();
let mut queue = DelayQueue::new();
let foo_key = queue.insert_at("foo", now + ms(10));
queue.insert_at("bar", now + ms(20));
queue.remove(&foo_key);
queue.compact();
let panic = std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| {
queue.remove(&foo_key);
}));
assert!(panic.is_err());
}
#[cfg_attr(target_os = "wasi", ignore = "FIXME: Does not seem to work with WASI")]
#[tokio::test(start_paused = true)]
#[cfg(panic = "unwind")]
async fn remove_after_compact_poll() {
let now = Instant::now();
let mut queue = task::spawn(DelayQueue::new());
let foo_key = queue.insert_at("foo", now + ms(10));
queue.insert_at("bar", now + ms(20));
sleep(ms(10)).await;
assert_eq!(assert_ready_some!(poll!(queue)).key(), foo_key);
queue.compact();
let panic = std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| {
queue.remove(&foo_key);
}));
assert!(panic.is_err());
}
#[tokio::test(start_paused = true)]
async fn peek() {
let mut queue = task::spawn(DelayQueue::new());
let now = Instant::now();
let key = queue.insert_at("foo", now + ms(5));
let key2 = queue.insert_at("bar", now);
let key3 = queue.insert_at("baz", now + ms(10));
assert_eq!(queue.peek(), Some(key2));
sleep(ms(6)).await;
assert_eq!(queue.peek(), Some(key2));
let entry = assert_ready_some!(poll!(queue));
assert_eq!(entry.get_ref(), &"bar");
assert_eq!(queue.peek(), Some(key));
let entry = assert_ready_some!(poll!(queue));
assert_eq!(entry.get_ref(), &"foo");
assert_eq!(queue.peek(), Some(key3));
assert_pending!(poll!(queue));
sleep(ms(5)).await;
assert_eq!(queue.peek(), Some(key3));
let entry = assert_ready_some!(poll!(queue));
assert_eq!(entry.get_ref(), &"baz");
assert!(queue.peek().is_none());
}
#[tokio::test(start_paused = true)]
async fn wake_after_remove_last() {
let mut queue = task::spawn(DelayQueue::new());
let key = queue.insert("foo", ms(1000));
assert_pending!(poll!(queue));
queue.remove(&key);
assert!(queue.is_woken());
assert!(assert_ready!(poll!(queue)).is_none());
}
fn ms(n: u64) -> Duration {
Duration::from_millis(n)
}

150
vendor/tokio-util/tests/udp.rs vendored Normal file
View File

@@ -0,0 +1,150 @@
#![warn(rust_2018_idioms)]
#![cfg(not(target_os = "wasi"))] // Wasi doesn't support UDP
#![cfg(not(miri))] // No `socket` in Miri.
#![cfg(not(loom))] // No udp / UdpFramed in loom
use tokio::net::UdpSocket;
use tokio_stream::StreamExt;
use tokio_util::codec::{Decoder, Encoder, LinesCodec};
use tokio_util::udp::UdpFramed;
use bytes::{BufMut, BytesMut};
use futures::future::try_join;
use futures::future::FutureExt;
use futures::sink::SinkExt;
use std::io;
use std::sync::Arc;
#[cfg_attr(
any(
target_os = "macos",
target_os = "ios",
target_os = "tvos",
target_os = "watchos",
target_os = "visionos"
),
allow(unused_assignments)
)]
#[tokio::test]
async fn send_framed_byte_codec() -> std::io::Result<()> {
let mut a_soc = UdpSocket::bind("127.0.0.1:0").await?;
let mut b_soc = UdpSocket::bind("127.0.0.1:0").await?;
let a_addr = a_soc.local_addr()?;
let b_addr = b_soc.local_addr()?;
// test sending & receiving bytes
{
let mut a = UdpFramed::new(a_soc, ByteCodec);
let mut b = UdpFramed::new(b_soc, ByteCodec);
let msg = b"4567";
let send = a.send((msg, b_addr));
let recv = b.next().map(|e| e.unwrap());
let (_, received) = try_join(send, recv).await.unwrap();
let (data, addr) = received;
assert_eq!(msg, &*data);
assert_eq!(a_addr, addr);
a_soc = a.into_inner();
b_soc = b.into_inner();
}
#[cfg(not(any(
target_os = "macos",
target_os = "ios",
target_os = "tvos",
target_os = "watchos",
target_os = "visionos"
)))]
// test sending & receiving an empty message
{
let mut a = UdpFramed::new(a_soc, ByteCodec);
let mut b = UdpFramed::new(b_soc, ByteCodec);
let msg = b"";
let send = a.send((msg, b_addr));
let recv = b.next().map(|e| e.unwrap());
let (_, received) = try_join(send, recv).await.unwrap();
let (data, addr) = received;
assert_eq!(msg, &*data);
assert_eq!(a_addr, addr);
}
Ok(())
}
pub struct ByteCodec;
impl Decoder for ByteCodec {
type Item = Vec<u8>;
type Error = io::Error;
fn decode(&mut self, buf: &mut BytesMut) -> Result<Option<Vec<u8>>, io::Error> {
let len = buf.len();
Ok(Some(buf.split_to(len).to_vec()))
}
}
impl Encoder<&[u8]> for ByteCodec {
type Error = io::Error;
fn encode(&mut self, data: &[u8], buf: &mut BytesMut) -> Result<(), io::Error> {
buf.reserve(data.len());
buf.put_slice(data);
Ok(())
}
}
#[tokio::test]
async fn send_framed_lines_codec() -> std::io::Result<()> {
let a_soc = UdpSocket::bind("127.0.0.1:0").await?;
let b_soc = UdpSocket::bind("127.0.0.1:0").await?;
let a_addr = a_soc.local_addr()?;
let b_addr = b_soc.local_addr()?;
let mut a = UdpFramed::new(a_soc, ByteCodec);
let mut b = UdpFramed::new(b_soc, LinesCodec::new());
let msg = b"1\r\n2\r\n3\r\n".to_vec();
a.send((&msg, b_addr)).await?;
assert_eq!(b.next().await.unwrap().unwrap(), ("1".to_string(), a_addr));
assert_eq!(b.next().await.unwrap().unwrap(), ("2".to_string(), a_addr));
assert_eq!(b.next().await.unwrap().unwrap(), ("3".to_string(), a_addr));
Ok(())
}
#[tokio::test]
async fn framed_half() -> std::io::Result<()> {
let a_soc = Arc::new(UdpSocket::bind("127.0.0.1:0").await?);
let b_soc = a_soc.clone();
let a_addr = a_soc.local_addr()?;
let b_addr = b_soc.local_addr()?;
let mut a = UdpFramed::new(a_soc, ByteCodec);
let mut b = UdpFramed::new(b_soc, LinesCodec::new());
let msg = b"1\r\n2\r\n3\r\n".to_vec();
a.send((&msg, b_addr)).await?;
let msg = b"4\r\n5\r\n6\r\n".to_vec();
a.send((&msg, b_addr)).await?;
assert_eq!(b.next().await.unwrap().unwrap(), ("1".to_string(), a_addr));
assert_eq!(b.next().await.unwrap().unwrap(), ("2".to_string(), a_addr));
assert_eq!(b.next().await.unwrap().unwrap(), ("3".to_string(), a_addr));
assert_eq!(b.next().await.unwrap().unwrap(), ("4".to_string(), a_addr));
assert_eq!(b.next().await.unwrap().unwrap(), ("5".to_string(), a_addr));
assert_eq!(b.next().await.unwrap().unwrap(), ("6".to_string(), a_addr));
Ok(())
}