chore: checkpoint before Python removal

This commit is contained in:
2026-03-26 22:33:59 +00:00
parent 683cec9307
commit e568ddf82a
29972 changed files with 11269302 additions and 2 deletions

63
vendor/tokio/src/blocking.rs vendored Normal file
View File

@@ -0,0 +1,63 @@
cfg_rt! {
pub(crate) use crate::runtime::spawn_blocking;
cfg_fs! {
#[allow(unused_imports)]
pub(crate) use crate::runtime::spawn_mandatory_blocking;
}
pub(crate) use crate::task::JoinHandle;
}
cfg_not_rt! {
use std::fmt;
use std::future::Future;
use std::pin::Pin;
use std::task::{Context, Poll};
pub(crate) fn spawn_blocking<F, R>(_f: F) -> JoinHandle<R>
where
F: FnOnce() -> R + Send + 'static,
R: Send + 'static,
{
assert_send_sync::<JoinHandle<std::cell::Cell<()>>>();
panic!("requires the `rt` Tokio feature flag")
}
cfg_fs! {
pub(crate) fn spawn_mandatory_blocking<F, R>(_f: F) -> Option<JoinHandle<R>>
where
F: FnOnce() -> R + Send + 'static,
R: Send + 'static,
{
panic!("requires the `rt` Tokio feature flag")
}
}
pub(crate) struct JoinHandle<R> {
_p: std::marker::PhantomData<R>,
}
unsafe impl<T: Send> Send for JoinHandle<T> {}
unsafe impl<T: Send> Sync for JoinHandle<T> {}
impl<R> Future for JoinHandle<R> {
type Output = Result<R, std::io::Error>;
fn poll(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<Self::Output> {
unreachable!()
}
}
impl<T> fmt::Debug for JoinHandle<T>
where
T: fmt::Debug,
{
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt.debug_struct("JoinHandle").finish()
}
}
fn assert_send_sync<T: Send + Sync>() {
}
}

47
vendor/tokio/src/doc/mod.rs vendored Normal file
View File

@@ -0,0 +1,47 @@
//! Types which are documented locally in the Tokio crate, but does not actually
//! live here.
//!
//! **Note** this module is only visible on docs.rs, you cannot use it directly
//! in your own code.
/// The name of a type which is not defined here.
///
/// This is typically used as an alias for another type, like so:
///
/// ```rust,ignore
/// /// See [some::other::location](https://example.com).
/// type DEFINED_ELSEWHERE = crate::doc::NotDefinedHere;
/// ```
///
/// This type is uninhabitable like the [`never` type] to ensure that no one
/// will ever accidentally use it.
///
/// [`never` type]: https://doc.rust-lang.org/std/primitive.never.html
#[derive(Debug)]
pub enum NotDefinedHere {}
#[cfg(feature = "net")]
impl mio::event::Source for NotDefinedHere {
fn register(
&mut self,
_registry: &mio::Registry,
_token: mio::Token,
_interests: mio::Interest,
) -> std::io::Result<()> {
Ok(())
}
fn reregister(
&mut self,
_registry: &mio::Registry,
_token: mio::Token,
_interests: mio::Interest,
) -> std::io::Result<()> {
Ok(())
}
fn deregister(&mut self, _registry: &mio::Registry) -> std::io::Result<()> {
Ok(())
}
}
#[cfg(any(feature = "net", feature = "fs"))]
pub mod os;

68
vendor/tokio/src/doc/os.rs vendored Normal file
View File

@@ -0,0 +1,68 @@
//! See [`std::os`](https://doc.rust-lang.org/std/os/index.html).
/// Platform-specific extensions to `std` for Windows.
///
/// See [`std::os::windows`](https://doc.rust-lang.org/std/os/windows/index.html).
pub mod windows {
/// Windows-specific extensions to general I/O primitives.
///
/// See [`std::os::windows::io`](https://doc.rust-lang.org/std/os/windows/io/index.html).
pub mod io {
/// See [`std::os::windows::io::RawHandle`](https://doc.rust-lang.org/std/os/windows/io/type.RawHandle.html)
pub type RawHandle = crate::doc::NotDefinedHere;
/// See [`std::os::windows::io::OwnedHandle`](https://doc.rust-lang.org/std/os/windows/io/struct.OwnedHandle.html)
pub type OwnedHandle = crate::doc::NotDefinedHere;
/// See [`std::os::windows::io::AsRawHandle`](https://doc.rust-lang.org/std/os/windows/io/trait.AsRawHandle.html)
pub trait AsRawHandle {
/// See [`std::os::windows::io::AsRawHandle::as_raw_handle`](https://doc.rust-lang.org/std/os/windows/io/trait.AsRawHandle.html#tymethod.as_raw_handle)
fn as_raw_handle(&self) -> RawHandle;
}
/// See [`std::os::windows::io::FromRawHandle`](https://doc.rust-lang.org/std/os/windows/io/trait.FromRawHandle.html)
pub trait FromRawHandle {
/// See [`std::os::windows::io::FromRawHandle::from_raw_handle`](https://doc.rust-lang.org/std/os/windows/io/trait.FromRawHandle.html#tymethod.from_raw_handle)
unsafe fn from_raw_handle(handle: RawHandle) -> Self;
}
/// See [`std::os::windows::io::RawSocket`](https://doc.rust-lang.org/std/os/windows/io/type.RawSocket.html)
pub type RawSocket = crate::doc::NotDefinedHere;
/// See [`std::os::windows::io::AsRawSocket`](https://doc.rust-lang.org/std/os/windows/io/trait.AsRawSocket.html)
pub trait AsRawSocket {
/// See [`std::os::windows::io::AsRawSocket::as_raw_socket`](https://doc.rust-lang.org/std/os/windows/io/trait.AsRawSocket.html#tymethod.as_raw_socket)
fn as_raw_socket(&self) -> RawSocket;
}
/// See [`std::os::windows::io::FromRawSocket`](https://doc.rust-lang.org/std/os/windows/io/trait.FromRawSocket.html)
pub trait FromRawSocket {
/// See [`std::os::windows::io::FromRawSocket::from_raw_socket`](https://doc.rust-lang.org/std/os/windows/io/trait.FromRawSocket.html#tymethod.from_raw_socket)
unsafe fn from_raw_socket(sock: RawSocket) -> Self;
}
/// See [`std::os::windows::io::IntoRawSocket`](https://doc.rust-lang.org/std/os/windows/io/trait.IntoRawSocket.html)
pub trait IntoRawSocket {
/// See [`std::os::windows::io::IntoRawSocket::into_raw_socket`](https://doc.rust-lang.org/std/os/windows/io/trait.IntoRawSocket.html#tymethod.into_raw_socket)
fn into_raw_socket(self) -> RawSocket;
}
/// See [`std::os::windows::io::BorrowedHandle`](https://doc.rust-lang.org/std/os/windows/io/struct.BorrowedHandle.html)
pub type BorrowedHandle<'handle> = crate::doc::NotDefinedHere;
/// See [`std::os::windows::io::AsHandle`](https://doc.rust-lang.org/std/os/windows/io/trait.AsHandle.html)
pub trait AsHandle {
/// See [`std::os::windows::io::AsHandle::as_handle`](https://doc.rust-lang.org/std/os/windows/io/trait.AsHandle.html#tymethod.as_handle)
fn as_handle(&self) -> BorrowedHandle<'_>;
}
/// See [`std::os::windows::io::BorrowedSocket`](https://doc.rust-lang.org/std/os/windows/io/struct.BorrowedSocket.html)
pub type BorrowedSocket<'socket> = crate::doc::NotDefinedHere;
/// See [`std::os::windows::io::AsSocket`](https://doc.rust-lang.org/std/os/windows/io/trait.AsSocket.html)
pub trait AsSocket {
/// See [`std::os::windows::io::AsSocket::as_socket`](https://doc.rust-lang.org/std/os/windows/io/trait.AsSocket.html#tymethod.as_socket)
fn as_socket(&self) -> BorrowedSocket<'_>;
}
}
}

49
vendor/tokio/src/fs/canonicalize.rs vendored Normal file
View File

@@ -0,0 +1,49 @@
use crate::fs::asyncify;
use std::io;
use std::path::{Path, PathBuf};
/// Returns the canonical, absolute form of a path with all intermediate
/// components normalized and symbolic links resolved.
///
/// This is an async version of [`std::fs::canonicalize`].
///
/// # Platform-specific behavior
///
/// This function currently corresponds to the `realpath` function on Unix
/// and the `CreateFile` and `GetFinalPathNameByHandle` functions on Windows.
/// Note that, this [may change in the future][changes].
///
/// On Windows, this converts the path to use [extended length path][path]
/// syntax, which allows your program to use longer path names, but means you
/// can only join backslash-delimited paths to it, and it may be incompatible
/// with other applications (if passed to the application on the command-line,
/// or written to a file another application may read).
///
/// [changes]: https://doc.rust-lang.org/std/io/index.html#platform-specific-behavior
/// [path]: https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx#maxpath
///
/// # Errors
///
/// This function will return an error in the following situations, but is not
/// limited to just these cases:
///
/// * `path` does not exist.
/// * A non-final component in path is not a directory.
///
/// # Examples
///
/// ```no_run
/// use tokio::fs;
/// use std::io;
///
/// #[tokio::main]
/// async fn main() -> io::Result<()> {
/// let path = fs::canonicalize("../a/../foo.txt").await?;
/// Ok(())
/// }
/// ```
pub async fn canonicalize(path: impl AsRef<Path>) -> io::Result<PathBuf> {
let path = path.as_ref().to_owned();
asyncify(move || std::fs::canonicalize(path)).await
}

24
vendor/tokio/src/fs/copy.rs vendored Normal file
View File

@@ -0,0 +1,24 @@
use crate::fs::asyncify;
use std::path::Path;
/// Copies the contents of one file to another. This function will also copy the permission bits
/// of the original file to the destination file.
/// This function will overwrite the contents of to.
///
/// This is the async equivalent of [`std::fs::copy`].
///
/// # Examples
///
/// ```no_run
/// use tokio::fs;
///
/// # async fn dox() -> std::io::Result<()> {
/// fs::copy("foo.txt", "bar.txt").await?;
/// # Ok(())
/// # }
/// ```
pub async fn copy(from: impl AsRef<Path>, to: impl AsRef<Path>) -> Result<u64, std::io::Error> {
let from = from.as_ref().to_owned();
let to = to.as_ref().to_owned();
asyncify(|| std::fs::copy(from, to)).await
}

50
vendor/tokio/src/fs/create_dir.rs vendored Normal file
View File

@@ -0,0 +1,50 @@
use crate::fs::asyncify;
use std::io;
use std::path::Path;
/// Creates a new, empty directory at the provided path.
///
/// This is an async version of [`std::fs::create_dir`].
///
/// # Platform-specific behavior
///
/// This function currently corresponds to the `mkdir` function on Unix
/// and the `CreateDirectory` function on Windows.
/// Note that, this [may change in the future][changes].
///
/// [changes]: https://doc.rust-lang.org/std/io/index.html#platform-specific-behavior
///
/// **NOTE**: If a parent of the given path doesn't exist, this function will
/// return an error. To create a directory and all its missing parents at the
/// same time, use the [`create_dir_all`] function.
///
/// # Errors
///
/// This function will return an error in the following situations, but is not
/// limited to just these cases:
///
/// * User lacks permissions to create directory at `path`.
/// * A parent of the given path doesn't exist. (To create a directory and all
/// its missing parents at the same time, use the [`create_dir_all`]
/// function.)
/// * `path` already exists.
///
/// [`create_dir_all`]: super::create_dir_all()
///
/// # Examples
///
/// ```no_run
/// use tokio::fs;
/// use std::io;
///
/// #[tokio::main]
/// async fn main() -> io::Result<()> {
/// fs::create_dir("/some/dir").await?;
/// Ok(())
/// }
/// ```
pub async fn create_dir(path: impl AsRef<Path>) -> io::Result<()> {
let path = path.as_ref().to_owned();
asyncify(move || std::fs::create_dir(path)).await
}

51
vendor/tokio/src/fs/create_dir_all.rs vendored Normal file
View File

@@ -0,0 +1,51 @@
use crate::fs::asyncify;
use std::io;
use std::path::Path;
/// Recursively creates a directory and all of its parent components if they
/// are missing.
///
/// This is an async version of [`std::fs::create_dir_all`].
///
/// # Platform-specific behavior
///
/// This function currently corresponds to the `mkdir` function on Unix
/// and the `CreateDirectory` function on Windows.
/// Note that, this [may change in the future][changes].
///
/// [changes]: https://doc.rust-lang.org/std/io/index.html#platform-specific-behavior
///
/// # Errors
///
/// This function will return an error in the following situations, but is not
/// limited to just these cases:
///
/// * If any directory in the path specified by `path` does not already exist
/// and it could not be created otherwise. The specific error conditions for
/// when a directory is being created (after it is determined to not exist) are
/// outlined by [`fs::create_dir`].
///
/// Notable exception is made for situations where any of the directories
/// specified in the `path` could not be created as it was being created concurrently.
/// Such cases are considered to be successful. That is, calling `create_dir_all`
/// concurrently from multiple threads or processes is guaranteed not to fail
/// due to a race condition with itself.
///
/// [`fs::create_dir`]: std::fs::create_dir
///
/// # Examples
///
/// ```no_run
/// use tokio::fs;
///
/// #[tokio::main]
/// async fn main() -> std::io::Result<()> {
/// fs::create_dir_all("/some/dir").await?;
/// Ok(())
/// }
/// ```
pub async fn create_dir_all(path: impl AsRef<Path>) -> io::Result<()> {
let path = path.as_ref().to_owned();
asyncify(move || std::fs::create_dir_all(path)).await
}

129
vendor/tokio/src/fs/dir_builder.rs vendored Normal file
View File

@@ -0,0 +1,129 @@
use crate::fs::asyncify;
use std::io;
use std::path::Path;
/// A builder for creating directories in various manners.
///
/// This is a specialized version of [`std::fs::DirBuilder`] for usage on
/// the Tokio runtime.
#[derive(Debug, Default)]
pub struct DirBuilder {
/// Indicates whether to create parent directories if they are missing.
recursive: bool,
/// Sets the Unix mode for newly created directories.
#[cfg(unix)]
pub(super) mode: Option<u32>,
}
impl DirBuilder {
/// Creates a new set of options with default mode/security settings for all
/// platforms and also non-recursive.
///
/// This is an async version of [`std::fs::DirBuilder::new`].
///
/// # Examples
///
/// ```no_run
/// use tokio::fs::DirBuilder;
///
/// let builder = DirBuilder::new();
/// ```
pub fn new() -> Self {
DirBuilder::default()
}
/// Indicates whether to create directories recursively (including all parent directories).
/// Parents that do not exist are created with the same security and permissions settings.
///
/// This option defaults to `false`.
///
/// This is an async version of [`std::fs::DirBuilder::recursive`].
///
/// # Examples
///
/// ```no_run
/// use tokio::fs::DirBuilder;
///
/// let mut builder = DirBuilder::new();
/// builder.recursive(true);
/// ```
pub fn recursive(&mut self, recursive: bool) -> &mut Self {
self.recursive = recursive;
self
}
/// Creates the specified directory with the configured options.
///
/// It is considered an error if the directory already exists unless
/// recursive mode is enabled.
///
/// This is an async version of [`std::fs::DirBuilder::create`].
///
/// # Errors
///
/// An error will be returned under the following circumstances:
///
/// * Path already points to an existing file.
/// * Path already points to an existing directory and the mode is
/// non-recursive.
/// * The calling process doesn't have permissions to create the directory
/// or its missing parents.
/// * Other I/O error occurred.
///
/// # Examples
///
/// ```no_run
/// use tokio::fs::DirBuilder;
/// use std::io;
///
/// #[tokio::main]
/// async fn main() -> io::Result<()> {
/// DirBuilder::new()
/// .recursive(true)
/// .create("/tmp/foo/bar/baz")
/// .await?;
///
/// Ok(())
/// }
/// ```
pub async fn create(&self, path: impl AsRef<Path>) -> io::Result<()> {
let path = path.as_ref().to_owned();
let mut builder = std::fs::DirBuilder::new();
builder.recursive(self.recursive);
#[cfg(unix)]
{
if let Some(mode) = self.mode {
std::os::unix::fs::DirBuilderExt::mode(&mut builder, mode);
}
}
asyncify(move || builder.create(path)).await
}
}
feature! {
#![unix]
impl DirBuilder {
/// Sets the mode to create new directories with.
///
/// This option defaults to 0o777.
///
/// # Examples
///
///
/// ```no_run
/// use tokio::fs::DirBuilder;
///
/// let mut builder = DirBuilder::new();
/// builder.mode(0o775);
/// ```
pub fn mode(&mut self, mode: u32) -> &mut Self {
self.mode = Some(mode);
self
}
}
}

995
vendor/tokio/src/fs/file.rs vendored Normal file
View File

@@ -0,0 +1,995 @@
//! Types for working with [`File`].
//!
//! [`File`]: File
use crate::fs::{asyncify, OpenOptions};
use crate::io::blocking::{Buf, DEFAULT_MAX_BUF_SIZE};
use crate::io::{AsyncRead, AsyncSeek, AsyncWrite, ReadBuf};
use crate::sync::Mutex;
use std::cmp;
use std::fmt;
use std::fs::{Metadata, Permissions};
use std::future::Future;
use std::io::{self, Seek, SeekFrom};
use std::path::Path;
use std::pin::Pin;
use std::sync::Arc;
use std::task::{ready, Context, Poll};
#[cfg(test)]
use super::mocks::JoinHandle;
#[cfg(test)]
use super::mocks::MockFile as StdFile;
#[cfg(test)]
use super::mocks::{spawn_blocking, spawn_mandatory_blocking};
#[cfg(not(test))]
use crate::blocking::JoinHandle;
#[cfg(not(test))]
use crate::blocking::{spawn_blocking, spawn_mandatory_blocking};
#[cfg(not(test))]
use std::fs::File as StdFile;
/// A reference to an open file on the filesystem.
///
/// This is a specialized version of [`std::fs::File`] for usage from the
/// Tokio runtime.
///
/// An instance of a `File` can be read and/or written depending on what options
/// it was opened with. Files also implement [`AsyncSeek`] to alter the logical
/// cursor that the file contains internally.
///
/// A file will not be closed immediately when it goes out of scope if there
/// are any IO operations that have not yet completed. To ensure that a file is
/// closed immediately when it is dropped, you should call [`flush`] before
/// dropping it. Note that this does not ensure that the file has been fully
/// written to disk; the operating system might keep the changes around in an
/// in-memory buffer. See the [`sync_all`] method for telling the OS to write
/// the data to disk.
///
/// Reading and writing to a `File` is usually done using the convenience
/// methods found on the [`AsyncReadExt`] and [`AsyncWriteExt`] traits.
///
/// [`AsyncSeek`]: trait@crate::io::AsyncSeek
/// [`flush`]: fn@crate::io::AsyncWriteExt::flush
/// [`sync_all`]: fn@crate::fs::File::sync_all
/// [`AsyncReadExt`]: trait@crate::io::AsyncReadExt
/// [`AsyncWriteExt`]: trait@crate::io::AsyncWriteExt
///
/// # Examples
///
/// Create a new file and asynchronously write bytes to it:
///
/// ```no_run
/// use tokio::fs::File;
/// use tokio::io::AsyncWriteExt; // for write_all()
///
/// # async fn dox() -> std::io::Result<()> {
/// let mut file = File::create("foo.txt").await?;
/// file.write_all(b"hello, world!").await?;
/// # Ok(())
/// # }
/// ```
///
/// Read the contents of a file into a buffer:
///
/// ```no_run
/// use tokio::fs::File;
/// use tokio::io::AsyncReadExt; // for read_to_end()
///
/// # async fn dox() -> std::io::Result<()> {
/// let mut file = File::open("foo.txt").await?;
///
/// let mut contents = vec![];
/// file.read_to_end(&mut contents).await?;
///
/// println!("len = {}", contents.len());
/// # Ok(())
/// # }
/// ```
pub struct File {
std: Arc<StdFile>,
inner: Mutex<Inner>,
max_buf_size: usize,
}
struct Inner {
state: State,
/// Errors from writes/flushes are returned in write/flush calls. If a write
/// error is observed while performing a read, it is saved until the next
/// write / flush call.
last_write_err: Option<io::ErrorKind>,
pos: u64,
}
#[derive(Debug)]
enum State {
Idle(Option<Buf>),
Busy(JoinHandle<(Operation, Buf)>),
}
#[derive(Debug)]
enum Operation {
Read(io::Result<usize>),
Write(io::Result<()>),
Seek(io::Result<u64>),
}
impl File {
/// Attempts to open a file in read-only mode.
///
/// See [`OpenOptions`] for more details.
///
/// # Errors
///
/// This function will return an error if called from outside of the Tokio
/// runtime or if path does not already exist. Other errors may also be
/// returned according to `OpenOptions::open`.
///
/// # Examples
///
/// ```no_run
/// use tokio::fs::File;
/// use tokio::io::AsyncReadExt;
///
/// # async fn dox() -> std::io::Result<()> {
/// let mut file = File::open("foo.txt").await?;
///
/// let mut contents = vec![];
/// file.read_to_end(&mut contents).await?;
///
/// println!("len = {}", contents.len());
/// # Ok(())
/// # }
/// ```
///
/// The [`read_to_end`] method is defined on the [`AsyncReadExt`] trait.
///
/// [`read_to_end`]: fn@crate::io::AsyncReadExt::read_to_end
/// [`AsyncReadExt`]: trait@crate::io::AsyncReadExt
pub async fn open(path: impl AsRef<Path>) -> io::Result<File> {
Self::options().read(true).open(path).await
}
/// Opens a file in write-only mode.
///
/// This function will create a file if it does not exist, and will truncate
/// it if it does.
///
/// See [`OpenOptions`] for more details.
///
/// # Errors
///
/// Results in an error if called from outside of the Tokio runtime or if
/// the underlying [`create`] call results in an error.
///
/// [`create`]: std::fs::File::create
///
/// # Examples
///
/// ```no_run
/// use tokio::fs::File;
/// use tokio::io::AsyncWriteExt;
///
/// # async fn dox() -> std::io::Result<()> {
/// let mut file = File::create("foo.txt").await?;
/// file.write_all(b"hello, world!").await?;
/// # Ok(())
/// # }
/// ```
///
/// The [`write_all`] method is defined on the [`AsyncWriteExt`] trait.
///
/// [`write_all`]: fn@crate::io::AsyncWriteExt::write_all
/// [`AsyncWriteExt`]: trait@crate::io::AsyncWriteExt
pub async fn create(path: impl AsRef<Path>) -> io::Result<File> {
Self::options()
.write(true)
.create(true)
.truncate(true)
.open(path)
.await
}
/// Opens a file in read-write mode.
///
/// This function will create a file if it does not exist, or return an error
/// if it does. This way, if the call succeeds, the file returned is guaranteed
/// to be new.
///
/// This option is useful because it is atomic. Otherwise between checking
/// whether a file exists and creating a new one, the file may have been
/// created by another process (a TOCTOU race condition / attack).
///
/// This can also be written using `File::options().read(true).write(true).create_new(true).open(...)`.
///
/// See [`OpenOptions`] for more details.
///
/// # Examples
///
/// ```no_run
/// use tokio::fs::File;
/// use tokio::io::AsyncWriteExt;
///
/// # async fn dox() -> std::io::Result<()> {
/// let mut file = File::create_new("foo.txt").await?;
/// file.write_all(b"hello, world!").await?;
/// # Ok(())
/// # }
/// ```
///
/// The [`write_all`] method is defined on the [`AsyncWriteExt`] trait.
///
/// [`write_all`]: fn@crate::io::AsyncWriteExt::write_all
/// [`AsyncWriteExt`]: trait@crate::io::AsyncWriteExt
pub async fn create_new<P: AsRef<Path>>(path: P) -> std::io::Result<File> {
Self::options()
.read(true)
.write(true)
.create_new(true)
.open(path)
.await
}
/// Returns a new [`OpenOptions`] object.
///
/// This function returns a new `OpenOptions` object that you can use to
/// open or create a file with specific options if `open()` or `create()`
/// are not appropriate.
///
/// It is equivalent to `OpenOptions::new()`, but allows you to write more
/// readable code. Instead of
/// `OpenOptions::new().append(true).open("example.log")`,
/// you can write `File::options().append(true).open("example.log")`. This
/// also avoids the need to import `OpenOptions`.
///
/// See the [`OpenOptions::new`] function for more details.
///
/// # Examples
///
/// ```no_run
/// use tokio::fs::File;
/// use tokio::io::AsyncWriteExt;
///
/// # async fn dox() -> std::io::Result<()> {
/// let mut f = File::options().append(true).open("example.log").await?;
/// f.write_all(b"new line\n").await?;
/// # Ok(())
/// # }
/// ```
#[must_use]
pub fn options() -> OpenOptions {
OpenOptions::new()
}
/// Converts a [`std::fs::File`] to a [`tokio::fs::File`](File).
///
/// # Examples
///
/// ```no_run
/// // This line could block. It is not recommended to do this on the Tokio
/// // runtime.
/// let std_file = std::fs::File::open("foo.txt").unwrap();
/// let file = tokio::fs::File::from_std(std_file);
/// ```
pub fn from_std(std: StdFile) -> File {
File {
std: Arc::new(std),
inner: Mutex::new(Inner {
state: State::Idle(Some(Buf::with_capacity(0))),
last_write_err: None,
pos: 0,
}),
max_buf_size: DEFAULT_MAX_BUF_SIZE,
}
}
/// Attempts to sync all OS-internal metadata to disk.
///
/// This function will attempt to ensure that all in-core data reaches the
/// filesystem before returning.
///
/// # Examples
///
/// ```no_run
/// use tokio::fs::File;
/// use tokio::io::AsyncWriteExt;
///
/// # async fn dox() -> std::io::Result<()> {
/// let mut file = File::create("foo.txt").await?;
/// file.write_all(b"hello, world!").await?;
/// file.sync_all().await?;
/// # Ok(())
/// # }
/// ```
///
/// The [`write_all`] method is defined on the [`AsyncWriteExt`] trait.
///
/// [`write_all`]: fn@crate::io::AsyncWriteExt::write_all
/// [`AsyncWriteExt`]: trait@crate::io::AsyncWriteExt
pub async fn sync_all(&self) -> io::Result<()> {
let mut inner = self.inner.lock().await;
inner.complete_inflight().await;
let std = self.std.clone();
asyncify(move || std.sync_all()).await
}
/// This function is similar to `sync_all`, except that it may not
/// synchronize file metadata to the filesystem.
///
/// This is intended for use cases that must synchronize content, but don't
/// need the metadata on disk. The goal of this method is to reduce disk
/// operations.
///
/// Note that some platforms may simply implement this in terms of `sync_all`.
///
/// # Examples
///
/// ```no_run
/// use tokio::fs::File;
/// use tokio::io::AsyncWriteExt;
///
/// # async fn dox() -> std::io::Result<()> {
/// let mut file = File::create("foo.txt").await?;
/// file.write_all(b"hello, world!").await?;
/// file.sync_data().await?;
/// # Ok(())
/// # }
/// ```
///
/// The [`write_all`] method is defined on the [`AsyncWriteExt`] trait.
///
/// [`write_all`]: fn@crate::io::AsyncWriteExt::write_all
/// [`AsyncWriteExt`]: trait@crate::io::AsyncWriteExt
pub async fn sync_data(&self) -> io::Result<()> {
let mut inner = self.inner.lock().await;
inner.complete_inflight().await;
let std = self.std.clone();
asyncify(move || std.sync_data()).await
}
/// Truncates or extends the underlying file, updating the size of this file to become size.
///
/// If the size is less than the current file's size, then the file will be
/// shrunk. If it is greater than the current file's size, then the file
/// will be extended to size and have all of the intermediate data filled in
/// with 0s.
///
/// # Errors
///
/// This function will return an error if the file is not opened for
/// writing.
///
/// # Examples
///
/// ```no_run
/// use tokio::fs::File;
/// use tokio::io::AsyncWriteExt;
///
/// # async fn dox() -> std::io::Result<()> {
/// let mut file = File::create("foo.txt").await?;
/// file.write_all(b"hello, world!").await?;
/// file.set_len(10).await?;
/// # Ok(())
/// # }
/// ```
///
/// The [`write_all`] method is defined on the [`AsyncWriteExt`] trait.
///
/// [`write_all`]: fn@crate::io::AsyncWriteExt::write_all
/// [`AsyncWriteExt`]: trait@crate::io::AsyncWriteExt
pub async fn set_len(&self, size: u64) -> io::Result<()> {
let mut inner = self.inner.lock().await;
inner.complete_inflight().await;
let mut buf = match inner.state {
State::Idle(ref mut buf_cell) => buf_cell.take().unwrap(),
_ => unreachable!(),
};
let seek = if !buf.is_empty() {
Some(SeekFrom::Current(buf.discard_read()))
} else {
None
};
let std = self.std.clone();
inner.state = State::Busy(spawn_blocking(move || {
let res = if let Some(seek) = seek {
(&*std).seek(seek).and_then(|_| std.set_len(size))
} else {
std.set_len(size)
}
.map(|()| 0); // the value is discarded later
// Return the result as a seek
(Operation::Seek(res), buf)
}));
let (op, buf) = match inner.state {
State::Idle(_) => unreachable!(),
State::Busy(ref mut rx) => rx.await?,
};
inner.state = State::Idle(Some(buf));
match op {
Operation::Seek(res) => res.map(|pos| {
inner.pos = pos;
}),
_ => unreachable!(),
}
}
/// Queries metadata about the underlying file.
///
/// # Examples
///
/// ```no_run
/// use tokio::fs::File;
///
/// # async fn dox() -> std::io::Result<()> {
/// let file = File::open("foo.txt").await?;
/// let metadata = file.metadata().await?;
///
/// println!("{:?}", metadata);
/// # Ok(())
/// # }
/// ```
pub async fn metadata(&self) -> io::Result<Metadata> {
let std = self.std.clone();
asyncify(move || std.metadata()).await
}
/// Creates a new `File` instance that shares the same underlying file handle
/// as the existing `File` instance. Reads, writes, and seeks will affect both
/// File instances simultaneously.
///
/// # Examples
///
/// ```no_run
/// use tokio::fs::File;
///
/// # async fn dox() -> std::io::Result<()> {
/// let file = File::open("foo.txt").await?;
/// let file_clone = file.try_clone().await?;
/// # Ok(())
/// # }
/// ```
pub async fn try_clone(&self) -> io::Result<File> {
self.inner.lock().await.complete_inflight().await;
let std = self.std.clone();
let std_file = asyncify(move || std.try_clone()).await?;
let mut file = File::from_std(std_file);
file.set_max_buf_size(self.max_buf_size);
Ok(file)
}
/// Destructures `File` into a [`std::fs::File`]. This function is
/// async to allow any in-flight operations to complete.
///
/// Use `File::try_into_std` to attempt conversion immediately.
///
/// # Examples
///
/// ```no_run
/// use tokio::fs::File;
///
/// # async fn dox() -> std::io::Result<()> {
/// let tokio_file = File::open("foo.txt").await?;
/// let std_file = tokio_file.into_std().await;
/// # Ok(())
/// # }
/// ```
pub async fn into_std(mut self) -> StdFile {
self.inner.get_mut().complete_inflight().await;
Arc::try_unwrap(self.std).expect("Arc::try_unwrap failed")
}
/// Tries to immediately destructure `File` into a [`std::fs::File`].
///
/// # Errors
///
/// This function will return an error containing the file if some
/// operation is in-flight.
///
/// # Examples
///
/// ```no_run
/// use tokio::fs::File;
///
/// # async fn dox() -> std::io::Result<()> {
/// let tokio_file = File::open("foo.txt").await?;
/// let std_file = tokio_file.try_into_std().unwrap();
/// # Ok(())
/// # }
/// ```
#[allow(clippy::result_large_err)]
pub fn try_into_std(mut self) -> Result<StdFile, Self> {
match Arc::try_unwrap(self.std) {
Ok(file) => Ok(file),
Err(std_file_arc) => {
self.std = std_file_arc;
Err(self)
}
}
}
/// Changes the permissions on the underlying file.
///
/// # Platform-specific behavior
///
/// This function currently corresponds to the `fchmod` function on Unix and
/// the `SetFileInformationByHandle` function on Windows. Note that, this
/// [may change in the future][changes].
///
/// [changes]: https://doc.rust-lang.org/std/io/index.html#platform-specific-behavior
///
/// # Errors
///
/// This function will return an error if the user lacks permission change
/// attributes on the underlying file. It may also return an error in other
/// os-specific unspecified cases.
///
/// # Examples
///
/// ```no_run
/// use tokio::fs::File;
///
/// # async fn dox() -> std::io::Result<()> {
/// let file = File::open("foo.txt").await?;
/// let mut perms = file.metadata().await?.permissions();
/// perms.set_readonly(true);
/// file.set_permissions(perms).await?;
/// # Ok(())
/// # }
/// ```
pub async fn set_permissions(&self, perm: Permissions) -> io::Result<()> {
let std = self.std.clone();
asyncify(move || std.set_permissions(perm)).await
}
/// Set the maximum buffer size for the underlying [`AsyncRead`] / [`AsyncWrite`] operation.
///
/// Although Tokio uses a sensible default value for this buffer size, this function would be
/// useful for changing that default depending on the situation.
///
/// # Examples
///
/// ```no_run
/// use tokio::fs::File;
/// use tokio::io::AsyncWriteExt;
///
/// # async fn dox() -> std::io::Result<()> {
/// let mut file = File::open("foo.txt").await?;
///
/// // Set maximum buffer size to 8 MiB
/// file.set_max_buf_size(8 * 1024 * 1024);
///
/// let mut buf = vec![1; 1024 * 1024 * 1024];
///
/// // Write the 1 GiB buffer in chunks up to 8 MiB each.
/// file.write_all(&mut buf).await?;
/// # Ok(())
/// # }
/// ```
pub fn set_max_buf_size(&mut self, max_buf_size: usize) {
self.max_buf_size = max_buf_size;
}
/// Get the maximum buffer size for the underlying [`AsyncRead`] / [`AsyncWrite`] operation.
pub fn max_buf_size(&self) -> usize {
self.max_buf_size
}
}
impl AsyncRead for File {
fn poll_read(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
dst: &mut ReadBuf<'_>,
) -> Poll<io::Result<()>> {
ready!(crate::trace::trace_leaf(cx));
let me = self.get_mut();
let inner = me.inner.get_mut();
loop {
match inner.state {
State::Idle(ref mut buf_cell) => {
let mut buf = buf_cell.take().unwrap();
if !buf.is_empty() || dst.remaining() == 0 {
buf.copy_to(dst);
*buf_cell = Some(buf);
return Poll::Ready(Ok(()));
}
let std = me.std.clone();
let max_buf_size = cmp::min(dst.remaining(), me.max_buf_size);
inner.state = State::Busy(spawn_blocking(move || {
// SAFETY: the `Read` implementation of `std` does not
// read from the buffer it is borrowing and correctly
// reports the length of the data written into the buffer.
let res = unsafe { buf.read_from(&mut &*std, max_buf_size) };
(Operation::Read(res), buf)
}));
}
State::Busy(ref mut rx) => {
let (op, mut buf) = ready!(Pin::new(rx).poll(cx))?;
match op {
Operation::Read(Ok(_)) => {
buf.copy_to(dst);
inner.state = State::Idle(Some(buf));
return Poll::Ready(Ok(()));
}
Operation::Read(Err(e)) => {
assert!(buf.is_empty());
inner.state = State::Idle(Some(buf));
return Poll::Ready(Err(e));
}
Operation::Write(Ok(())) => {
assert!(buf.is_empty());
inner.state = State::Idle(Some(buf));
continue;
}
Operation::Write(Err(e)) => {
assert!(inner.last_write_err.is_none());
inner.last_write_err = Some(e.kind());
inner.state = State::Idle(Some(buf));
}
Operation::Seek(result) => {
assert!(buf.is_empty());
inner.state = State::Idle(Some(buf));
if let Ok(pos) = result {
inner.pos = pos;
}
continue;
}
}
}
}
}
}
}
impl AsyncSeek for File {
fn start_seek(self: Pin<&mut Self>, mut pos: SeekFrom) -> io::Result<()> {
let me = self.get_mut();
let inner = me.inner.get_mut();
match inner.state {
State::Busy(_) => Err(io::Error::new(
io::ErrorKind::Other,
"other file operation is pending, call poll_complete before start_seek",
)),
State::Idle(ref mut buf_cell) => {
let mut buf = buf_cell.take().unwrap();
// Factor in any unread data from the buf
if !buf.is_empty() {
let n = buf.discard_read();
if let SeekFrom::Current(ref mut offset) = pos {
*offset += n;
}
}
let std = me.std.clone();
inner.state = State::Busy(spawn_blocking(move || {
let res = (&*std).seek(pos);
(Operation::Seek(res), buf)
}));
Ok(())
}
}
}
fn poll_complete(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<u64>> {
ready!(crate::trace::trace_leaf(cx));
let inner = self.inner.get_mut();
loop {
match inner.state {
State::Idle(_) => return Poll::Ready(Ok(inner.pos)),
State::Busy(ref mut rx) => {
let (op, buf) = ready!(Pin::new(rx).poll(cx))?;
inner.state = State::Idle(Some(buf));
match op {
Operation::Read(_) => {}
Operation::Write(Err(e)) => {
assert!(inner.last_write_err.is_none());
inner.last_write_err = Some(e.kind());
}
Operation::Write(_) => {}
Operation::Seek(res) => {
if let Ok(pos) = res {
inner.pos = pos;
}
return Poll::Ready(res);
}
}
}
}
}
}
}
impl AsyncWrite for File {
fn poll_write(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
src: &[u8],
) -> Poll<io::Result<usize>> {
ready!(crate::trace::trace_leaf(cx));
let me = self.get_mut();
let inner = me.inner.get_mut();
if let Some(e) = inner.last_write_err.take() {
return Poll::Ready(Err(e.into()));
}
loop {
match inner.state {
State::Idle(ref mut buf_cell) => {
let mut buf = buf_cell.take().unwrap();
let seek = if !buf.is_empty() {
Some(SeekFrom::Current(buf.discard_read()))
} else {
None
};
let n = buf.copy_from(src, me.max_buf_size);
let std = me.std.clone();
let blocking_task_join_handle = spawn_mandatory_blocking(move || {
let res = if let Some(seek) = seek {
(&*std).seek(seek).and_then(|_| buf.write_to(&mut &*std))
} else {
buf.write_to(&mut &*std)
};
(Operation::Write(res), buf)
})
.ok_or_else(|| {
io::Error::new(io::ErrorKind::Other, "background task failed")
})?;
inner.state = State::Busy(blocking_task_join_handle);
return Poll::Ready(Ok(n));
}
State::Busy(ref mut rx) => {
let (op, buf) = ready!(Pin::new(rx).poll(cx))?;
inner.state = State::Idle(Some(buf));
match op {
Operation::Read(_) => {
// We don't care about the result here. The fact
// that the cursor has advanced will be reflected in
// the next iteration of the loop
continue;
}
Operation::Write(res) => {
// If the previous write was successful, continue.
// Otherwise, error.
res?;
continue;
}
Operation::Seek(_) => {
// Ignore the seek
continue;
}
}
}
}
}
}
fn poll_write_vectored(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
bufs: &[io::IoSlice<'_>],
) -> Poll<Result<usize, io::Error>> {
ready!(crate::trace::trace_leaf(cx));
let me = self.get_mut();
let inner = me.inner.get_mut();
if let Some(e) = inner.last_write_err.take() {
return Poll::Ready(Err(e.into()));
}
loop {
match inner.state {
State::Idle(ref mut buf_cell) => {
let mut buf = buf_cell.take().unwrap();
let seek = if !buf.is_empty() {
Some(SeekFrom::Current(buf.discard_read()))
} else {
None
};
let n = buf.copy_from_bufs(bufs, me.max_buf_size);
let std = me.std.clone();
let blocking_task_join_handle = spawn_mandatory_blocking(move || {
let res = if let Some(seek) = seek {
(&*std).seek(seek).and_then(|_| buf.write_to(&mut &*std))
} else {
buf.write_to(&mut &*std)
};
(Operation::Write(res), buf)
})
.ok_or_else(|| {
io::Error::new(io::ErrorKind::Other, "background task failed")
})?;
inner.state = State::Busy(blocking_task_join_handle);
return Poll::Ready(Ok(n));
}
State::Busy(ref mut rx) => {
let (op, buf) = ready!(Pin::new(rx).poll(cx))?;
inner.state = State::Idle(Some(buf));
match op {
Operation::Read(_) => {
// We don't care about the result here. The fact
// that the cursor has advanced will be reflected in
// the next iteration of the loop
continue;
}
Operation::Write(res) => {
// If the previous write was successful, continue.
// Otherwise, error.
res?;
continue;
}
Operation::Seek(_) => {
// Ignore the seek
continue;
}
}
}
}
}
}
fn is_write_vectored(&self) -> bool {
true
}
fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> {
ready!(crate::trace::trace_leaf(cx));
let inner = self.inner.get_mut();
inner.poll_flush(cx)
}
fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> {
ready!(crate::trace::trace_leaf(cx));
self.poll_flush(cx)
}
}
impl From<StdFile> for File {
fn from(std: StdFile) -> Self {
Self::from_std(std)
}
}
impl fmt::Debug for File {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt.debug_struct("tokio::fs::File")
.field("std", &self.std)
.finish()
}
}
#[cfg(unix)]
impl std::os::unix::io::AsRawFd for File {
fn as_raw_fd(&self) -> std::os::unix::io::RawFd {
self.std.as_raw_fd()
}
}
#[cfg(unix)]
impl std::os::unix::io::AsFd for File {
fn as_fd(&self) -> std::os::unix::io::BorrowedFd<'_> {
unsafe {
std::os::unix::io::BorrowedFd::borrow_raw(std::os::unix::io::AsRawFd::as_raw_fd(self))
}
}
}
#[cfg(unix)]
impl std::os::unix::io::FromRawFd for File {
unsafe fn from_raw_fd(fd: std::os::unix::io::RawFd) -> Self {
// Safety: exactly the same safety contract as
// `std::os::unix::io::FromRawFd::from_raw_fd`.
unsafe { StdFile::from_raw_fd(fd).into() }
}
}
cfg_windows! {
use crate::os::windows::io::{AsRawHandle, FromRawHandle, RawHandle, AsHandle, BorrowedHandle};
impl AsRawHandle for File {
fn as_raw_handle(&self) -> RawHandle {
self.std.as_raw_handle()
}
}
impl AsHandle for File {
fn as_handle(&self) -> BorrowedHandle<'_> {
unsafe {
BorrowedHandle::borrow_raw(
AsRawHandle::as_raw_handle(self),
)
}
}
}
impl FromRawHandle for File {
unsafe fn from_raw_handle(handle: RawHandle) -> Self {
// Safety: exactly the same safety contract as
// `FromRawHandle::from_raw_handle`.
unsafe { StdFile::from_raw_handle(handle).into() }
}
}
}
impl Inner {
async fn complete_inflight(&mut self) {
use std::future::poll_fn;
poll_fn(|cx| self.poll_complete_inflight(cx)).await;
}
fn poll_complete_inflight(&mut self, cx: &mut Context<'_>) -> Poll<()> {
ready!(crate::trace::trace_leaf(cx));
match self.poll_flush(cx) {
Poll::Ready(Err(e)) => {
self.last_write_err = Some(e.kind());
Poll::Ready(())
}
Poll::Ready(Ok(())) => Poll::Ready(()),
Poll::Pending => Poll::Pending,
}
}
fn poll_flush(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> {
if let Some(e) = self.last_write_err.take() {
return Poll::Ready(Err(e.into()));
}
let (op, buf) = match self.state {
State::Idle(_) => return Poll::Ready(Ok(())),
State::Busy(ref mut rx) => ready!(Pin::new(rx).poll(cx))?,
};
// The buffer is not used here
self.state = State::Idle(Some(buf));
match op {
Operation::Read(_) => Poll::Ready(Ok(())),
Operation::Write(res) => Poll::Ready(res),
Operation::Seek(_) => Poll::Ready(Ok(())),
}
}
}
#[cfg(test)]
mod tests;

978
vendor/tokio/src/fs/file/tests.rs vendored Normal file
View File

@@ -0,0 +1,978 @@
use super::*;
use crate::{
fs::mocks::*,
io::{AsyncReadExt, AsyncSeekExt, AsyncWriteExt},
};
use mockall::{predicate::eq, Sequence};
use tokio_test::{assert_pending, assert_ready_err, assert_ready_ok, task};
const HELLO: &[u8] = b"hello world...";
const FOO: &[u8] = b"foo bar baz...";
#[test]
fn open_read() {
let mut file = MockFile::default();
file.expect_inner_read().once().returning(|buf| {
buf[0..HELLO.len()].copy_from_slice(HELLO);
Ok(HELLO.len())
});
let mut file = File::from_std(file);
let mut buf = [0; 1024];
let mut t = task::spawn(file.read(&mut buf));
assert_eq!(0, pool::len());
assert_pending!(t.poll());
assert_eq!(1, pool::len());
pool::run_one();
assert!(t.is_woken());
let n = assert_ready_ok!(t.poll());
assert_eq!(n, HELLO.len());
assert_eq!(&buf[..n], HELLO);
}
#[test]
fn read_twice_before_dispatch() {
let mut file = MockFile::default();
file.expect_inner_read().once().returning(|buf| {
buf[0..HELLO.len()].copy_from_slice(HELLO);
Ok(HELLO.len())
});
let mut file = File::from_std(file);
let mut buf = [0; 1024];
let mut t = task::spawn(file.read(&mut buf));
assert_pending!(t.poll());
assert_pending!(t.poll());
assert_eq!(pool::len(), 1);
pool::run_one();
assert!(t.is_woken());
let n = assert_ready_ok!(t.poll());
assert_eq!(&buf[..n], HELLO);
}
#[test]
fn read_with_smaller_buf() {
let mut file = MockFile::default();
file.expect_inner_read().once().returning(|buf| {
buf[0..HELLO.len()].copy_from_slice(HELLO);
Ok(HELLO.len())
});
let mut file = File::from_std(file);
{
let mut buf = [0; 32];
let mut t = task::spawn(file.read(&mut buf));
assert_pending!(t.poll());
}
pool::run_one();
{
let mut buf = [0; 4];
let mut t = task::spawn(file.read(&mut buf));
let n = assert_ready_ok!(t.poll());
assert_eq!(n, 4);
assert_eq!(&buf[..], &HELLO[..n]);
}
// Calling again immediately succeeds with the rest of the buffer
let mut buf = [0; 32];
let mut t = task::spawn(file.read(&mut buf));
let n = assert_ready_ok!(t.poll());
assert_eq!(n, 10);
assert_eq!(&buf[..n], &HELLO[4..]);
assert_eq!(0, pool::len());
}
#[test]
fn read_with_bigger_buf() {
let mut seq = Sequence::new();
let mut file = MockFile::default();
file.expect_inner_read()
.once()
.in_sequence(&mut seq)
.returning(|buf| {
buf[0..4].copy_from_slice(&HELLO[..4]);
Ok(4)
});
file.expect_inner_read()
.once()
.in_sequence(&mut seq)
.returning(|buf| {
buf[0..HELLO.len() - 4].copy_from_slice(&HELLO[4..]);
Ok(HELLO.len() - 4)
});
let mut file = File::from_std(file);
{
let mut buf = [0; 4];
let mut t = task::spawn(file.read(&mut buf));
assert_pending!(t.poll());
}
pool::run_one();
{
let mut buf = [0; 32];
let mut t = task::spawn(file.read(&mut buf));
let n = assert_ready_ok!(t.poll());
assert_eq!(n, 4);
assert_eq!(&buf[..n], &HELLO[..n]);
}
// Calling again immediately succeeds with the rest of the buffer
let mut buf = [0; 32];
let mut t = task::spawn(file.read(&mut buf));
assert_pending!(t.poll());
assert_eq!(1, pool::len());
pool::run_one();
assert!(t.is_woken());
let n = assert_ready_ok!(t.poll());
assert_eq!(n, 10);
assert_eq!(&buf[..n], &HELLO[4..]);
assert_eq!(0, pool::len());
}
#[test]
fn read_err_then_read_success() {
let mut file = MockFile::default();
let mut seq = Sequence::new();
file.expect_inner_read()
.once()
.in_sequence(&mut seq)
.returning(|_| Err(io::ErrorKind::Other.into()));
file.expect_inner_read()
.once()
.in_sequence(&mut seq)
.returning(|buf| {
buf[0..HELLO.len()].copy_from_slice(HELLO);
Ok(HELLO.len())
});
let mut file = File::from_std(file);
{
let mut buf = [0; 32];
let mut t = task::spawn(file.read(&mut buf));
assert_pending!(t.poll());
pool::run_one();
assert_ready_err!(t.poll());
}
{
let mut buf = [0; 32];
let mut t = task::spawn(file.read(&mut buf));
assert_pending!(t.poll());
pool::run_one();
let n = assert_ready_ok!(t.poll());
assert_eq!(n, HELLO.len());
assert_eq!(&buf[..n], HELLO);
}
}
#[test]
fn open_write() {
let mut file = MockFile::default();
file.expect_inner_write()
.once()
.with(eq(HELLO))
.returning(|buf| Ok(buf.len()));
let mut file = File::from_std(file);
let mut t = task::spawn(file.write(HELLO));
assert_eq!(0, pool::len());
assert_ready_ok!(t.poll());
assert_eq!(1, pool::len());
pool::run_one();
assert!(!t.is_woken());
let mut t = task::spawn(file.flush());
assert_ready_ok!(t.poll());
}
#[test]
fn flush_while_idle() {
let file = MockFile::default();
let mut file = File::from_std(file);
let mut t = task::spawn(file.flush());
assert_ready_ok!(t.poll());
}
#[test]
#[cfg_attr(miri, ignore)] // takes a really long time with miri
fn read_with_buffer_larger_than_max() {
// Chunks
let chunk_a = crate::io::blocking::DEFAULT_MAX_BUF_SIZE;
let chunk_b = chunk_a * 2;
let chunk_c = chunk_a * 3;
let chunk_d = chunk_a * 4;
assert_eq!(chunk_d / 1024 / 1024, 8);
let mut data = vec![];
for i in 0..(chunk_d - 1) {
data.push((i % 151) as u8);
}
let data = Arc::new(data);
let d0 = data.clone();
let d1 = data.clone();
let d2 = data.clone();
let d3 = data.clone();
let mut seq = Sequence::new();
let mut file = MockFile::default();
file.expect_inner_read()
.once()
.in_sequence(&mut seq)
.returning(move |buf| {
buf[0..chunk_a].copy_from_slice(&d0[0..chunk_a]);
Ok(chunk_a)
});
file.expect_inner_read()
.once()
.in_sequence(&mut seq)
.returning(move |buf| {
buf[..chunk_a].copy_from_slice(&d1[chunk_a..chunk_b]);
Ok(chunk_b - chunk_a)
});
file.expect_inner_read()
.once()
.in_sequence(&mut seq)
.returning(move |buf| {
buf[..chunk_a].copy_from_slice(&d2[chunk_b..chunk_c]);
Ok(chunk_c - chunk_b)
});
file.expect_inner_read()
.once()
.in_sequence(&mut seq)
.returning(move |buf| {
buf[..chunk_a - 1].copy_from_slice(&d3[chunk_c..]);
Ok(chunk_a - 1)
});
let mut file = File::from_std(file);
let mut actual = vec![0; chunk_d];
let mut pos = 0;
while pos < data.len() {
let mut t = task::spawn(file.read(&mut actual[pos..]));
assert_pending!(t.poll());
pool::run_one();
assert!(t.is_woken());
let n = assert_ready_ok!(t.poll());
assert!(n <= chunk_a);
pos += n;
}
assert_eq!(&data[..], &actual[..data.len()]);
}
#[test]
#[cfg_attr(miri, ignore)] // takes a really long time with miri
fn write_with_buffer_larger_than_max() {
// Chunks
let chunk_a = crate::io::blocking::DEFAULT_MAX_BUF_SIZE;
let chunk_b = chunk_a * 2;
let chunk_c = chunk_a * 3;
let chunk_d = chunk_a * 4;
assert_eq!(chunk_d / 1024 / 1024, 8);
let mut data = vec![];
for i in 0..(chunk_d - 1) {
data.push((i % 151) as u8);
}
let data = Arc::new(data);
let d0 = data.clone();
let d1 = data.clone();
let d2 = data.clone();
let d3 = data.clone();
let mut file = MockFile::default();
let mut seq = Sequence::new();
file.expect_inner_write()
.once()
.in_sequence(&mut seq)
.withf(move |buf| buf == &d0[0..chunk_a])
.returning(|buf| Ok(buf.len()));
file.expect_inner_write()
.once()
.in_sequence(&mut seq)
.withf(move |buf| buf == &d1[chunk_a..chunk_b])
.returning(|buf| Ok(buf.len()));
file.expect_inner_write()
.once()
.in_sequence(&mut seq)
.withf(move |buf| buf == &d2[chunk_b..chunk_c])
.returning(|buf| Ok(buf.len()));
file.expect_inner_write()
.once()
.in_sequence(&mut seq)
.withf(move |buf| buf == &d3[chunk_c..chunk_d - 1])
.returning(|buf| Ok(buf.len()));
let mut file = File::from_std(file);
let mut rem = &data[..];
let mut first = true;
while !rem.is_empty() {
let mut task = task::spawn(file.write(rem));
if !first {
assert_pending!(task.poll());
pool::run_one();
assert!(task.is_woken());
}
first = false;
let n = assert_ready_ok!(task.poll());
rem = &rem[n..];
}
pool::run_one();
}
#[test]
fn write_twice_before_dispatch() {
let mut file = MockFile::default();
let mut seq = Sequence::new();
file.expect_inner_write()
.once()
.in_sequence(&mut seq)
.with(eq(HELLO))
.returning(|buf| Ok(buf.len()));
file.expect_inner_write()
.once()
.in_sequence(&mut seq)
.with(eq(FOO))
.returning(|buf| Ok(buf.len()));
let mut file = File::from_std(file);
let mut t = task::spawn(file.write(HELLO));
assert_ready_ok!(t.poll());
let mut t = task::spawn(file.write(FOO));
assert_pending!(t.poll());
assert_eq!(pool::len(), 1);
pool::run_one();
assert!(t.is_woken());
assert_ready_ok!(t.poll());
let mut t = task::spawn(file.flush());
assert_pending!(t.poll());
assert_eq!(pool::len(), 1);
pool::run_one();
assert!(t.is_woken());
assert_ready_ok!(t.poll());
}
#[test]
fn incomplete_read_followed_by_write() {
let mut file = MockFile::default();
let mut seq = Sequence::new();
file.expect_inner_read()
.once()
.in_sequence(&mut seq)
.returning(|buf| {
buf[0..HELLO.len()].copy_from_slice(HELLO);
Ok(HELLO.len())
});
file.expect_inner_seek()
.once()
.with(eq(SeekFrom::Current(-(HELLO.len() as i64))))
.in_sequence(&mut seq)
.returning(|_| Ok(0));
file.expect_inner_write()
.once()
.with(eq(FOO))
.returning(|_| Ok(FOO.len()));
let mut file = File::from_std(file);
let mut buf = [0; 32];
let mut t = task::spawn(file.read(&mut buf));
assert_pending!(t.poll());
pool::run_one();
let mut t = task::spawn(file.write(FOO));
assert_ready_ok!(t.poll());
assert_eq!(pool::len(), 1);
pool::run_one();
let mut t = task::spawn(file.flush());
assert_ready_ok!(t.poll());
}
#[test]
fn incomplete_partial_read_followed_by_write() {
let mut file = MockFile::default();
let mut seq = Sequence::new();
file.expect_inner_read()
.once()
.in_sequence(&mut seq)
.returning(|buf| {
buf[0..HELLO.len()].copy_from_slice(HELLO);
Ok(HELLO.len())
});
file.expect_inner_seek()
.once()
.in_sequence(&mut seq)
.with(eq(SeekFrom::Current(-10)))
.returning(|_| Ok(0));
file.expect_inner_write()
.once()
.in_sequence(&mut seq)
.with(eq(FOO))
.returning(|_| Ok(FOO.len()));
let mut file = File::from_std(file);
let mut buf = [0; 32];
let mut t = task::spawn(file.read(&mut buf));
assert_pending!(t.poll());
pool::run_one();
let mut buf = [0; 4];
let mut t = task::spawn(file.read(&mut buf));
assert_ready_ok!(t.poll());
let mut t = task::spawn(file.write(FOO));
assert_ready_ok!(t.poll());
assert_eq!(pool::len(), 1);
pool::run_one();
let mut t = task::spawn(file.flush());
assert_ready_ok!(t.poll());
}
#[test]
fn incomplete_read_followed_by_flush() {
let mut file = MockFile::default();
let mut seq = Sequence::new();
file.expect_inner_read()
.once()
.in_sequence(&mut seq)
.returning(|buf| {
buf[0..HELLO.len()].copy_from_slice(HELLO);
Ok(HELLO.len())
});
file.expect_inner_seek()
.once()
.in_sequence(&mut seq)
.with(eq(SeekFrom::Current(-(HELLO.len() as i64))))
.returning(|_| Ok(0));
file.expect_inner_write()
.once()
.in_sequence(&mut seq)
.with(eq(FOO))
.returning(|_| Ok(FOO.len()));
let mut file = File::from_std(file);
let mut buf = [0; 32];
let mut t = task::spawn(file.read(&mut buf));
assert_pending!(t.poll());
pool::run_one();
let mut t = task::spawn(file.flush());
assert_ready_ok!(t.poll());
let mut t = task::spawn(file.write(FOO));
assert_ready_ok!(t.poll());
pool::run_one();
}
#[test]
fn incomplete_flush_followed_by_write() {
let mut file = MockFile::default();
let mut seq = Sequence::new();
file.expect_inner_write()
.once()
.in_sequence(&mut seq)
.with(eq(HELLO))
.returning(|_| Ok(HELLO.len()));
file.expect_inner_write()
.once()
.in_sequence(&mut seq)
.with(eq(FOO))
.returning(|_| Ok(FOO.len()));
let mut file = File::from_std(file);
let mut t = task::spawn(file.write(HELLO));
let n = assert_ready_ok!(t.poll());
assert_eq!(n, HELLO.len());
let mut t = task::spawn(file.flush());
assert_pending!(t.poll());
// TODO: Move under write
pool::run_one();
let mut t = task::spawn(file.write(FOO));
assert_ready_ok!(t.poll());
pool::run_one();
let mut t = task::spawn(file.flush());
assert_ready_ok!(t.poll());
}
#[test]
fn read_err() {
let mut file = MockFile::default();
file.expect_inner_read()
.once()
.returning(|_| Err(io::ErrorKind::Other.into()));
let mut file = File::from_std(file);
let mut buf = [0; 1024];
let mut t = task::spawn(file.read(&mut buf));
assert_pending!(t.poll());
pool::run_one();
assert!(t.is_woken());
assert_ready_err!(t.poll());
}
#[test]
fn write_write_err() {
let mut file = MockFile::default();
file.expect_inner_write()
.once()
.returning(|_| Err(io::ErrorKind::Other.into()));
let mut file = File::from_std(file);
let mut t = task::spawn(file.write(HELLO));
assert_ready_ok!(t.poll());
pool::run_one();
let mut t = task::spawn(file.write(FOO));
assert_ready_err!(t.poll());
}
#[test]
fn write_read_write_err() {
let mut file = MockFile::default();
let mut seq = Sequence::new();
file.expect_inner_write()
.once()
.in_sequence(&mut seq)
.returning(|_| Err(io::ErrorKind::Other.into()));
file.expect_inner_read()
.once()
.in_sequence(&mut seq)
.returning(|buf| {
buf[0..HELLO.len()].copy_from_slice(HELLO);
Ok(HELLO.len())
});
let mut file = File::from_std(file);
let mut t = task::spawn(file.write(HELLO));
assert_ready_ok!(t.poll());
pool::run_one();
let mut buf = [0; 1024];
let mut t = task::spawn(file.read(&mut buf));
assert_pending!(t.poll());
pool::run_one();
let mut t = task::spawn(file.write(FOO));
assert_ready_err!(t.poll());
}
#[test]
fn write_read_flush_err() {
let mut file = MockFile::default();
let mut seq = Sequence::new();
file.expect_inner_write()
.once()
.in_sequence(&mut seq)
.returning(|_| Err(io::ErrorKind::Other.into()));
file.expect_inner_read()
.once()
.in_sequence(&mut seq)
.returning(|buf| {
buf[0..HELLO.len()].copy_from_slice(HELLO);
Ok(HELLO.len())
});
let mut file = File::from_std(file);
let mut t = task::spawn(file.write(HELLO));
assert_ready_ok!(t.poll());
pool::run_one();
let mut buf = [0; 1024];
let mut t = task::spawn(file.read(&mut buf));
assert_pending!(t.poll());
pool::run_one();
let mut t = task::spawn(file.flush());
assert_ready_err!(t.poll());
}
#[test]
fn write_seek_write_err() {
let mut file = MockFile::default();
let mut seq = Sequence::new();
file.expect_inner_write()
.once()
.in_sequence(&mut seq)
.returning(|_| Err(io::ErrorKind::Other.into()));
file.expect_inner_seek()
.once()
.with(eq(SeekFrom::Start(0)))
.in_sequence(&mut seq)
.returning(|_| Ok(0));
let mut file = File::from_std(file);
let mut t = task::spawn(file.write(HELLO));
assert_ready_ok!(t.poll());
pool::run_one();
{
let mut t = task::spawn(file.seek(SeekFrom::Start(0)));
assert_pending!(t.poll());
}
pool::run_one();
let mut t = task::spawn(file.write(FOO));
assert_ready_err!(t.poll());
}
#[test]
fn write_seek_flush_err() {
let mut file = MockFile::default();
let mut seq = Sequence::new();
file.expect_inner_write()
.once()
.in_sequence(&mut seq)
.returning(|_| Err(io::ErrorKind::Other.into()));
file.expect_inner_seek()
.once()
.with(eq(SeekFrom::Start(0)))
.in_sequence(&mut seq)
.returning(|_| Ok(0));
let mut file = File::from_std(file);
let mut t = task::spawn(file.write(HELLO));
assert_ready_ok!(t.poll());
pool::run_one();
{
let mut t = task::spawn(file.seek(SeekFrom::Start(0)));
assert_pending!(t.poll());
}
pool::run_one();
let mut t = task::spawn(file.flush());
assert_ready_err!(t.poll());
}
#[test]
fn sync_all_ordered_after_write() {
let mut file = MockFile::default();
let mut seq = Sequence::new();
file.expect_inner_write()
.once()
.in_sequence(&mut seq)
.with(eq(HELLO))
.returning(|_| Ok(HELLO.len()));
file.expect_sync_all().once().returning(|| Ok(()));
let mut file = File::from_std(file);
let mut t = task::spawn(file.write(HELLO));
assert_ready_ok!(t.poll());
let mut t = task::spawn(file.sync_all());
assert_pending!(t.poll());
assert_eq!(1, pool::len());
pool::run_one();
assert!(t.is_woken());
assert_pending!(t.poll());
assert_eq!(1, pool::len());
pool::run_one();
assert!(t.is_woken());
assert_ready_ok!(t.poll());
}
#[test]
fn sync_all_err_ordered_after_write() {
let mut file = MockFile::default();
let mut seq = Sequence::new();
file.expect_inner_write()
.once()
.in_sequence(&mut seq)
.with(eq(HELLO))
.returning(|_| Ok(HELLO.len()));
file.expect_sync_all()
.once()
.returning(|| Err(io::ErrorKind::Other.into()));
let mut file = File::from_std(file);
let mut t = task::spawn(file.write(HELLO));
assert_ready_ok!(t.poll());
let mut t = task::spawn(file.sync_all());
assert_pending!(t.poll());
assert_eq!(1, pool::len());
pool::run_one();
assert!(t.is_woken());
assert_pending!(t.poll());
assert_eq!(1, pool::len());
pool::run_one();
assert!(t.is_woken());
assert_ready_err!(t.poll());
}
#[test]
fn sync_data_ordered_after_write() {
let mut file = MockFile::default();
let mut seq = Sequence::new();
file.expect_inner_write()
.once()
.in_sequence(&mut seq)
.with(eq(HELLO))
.returning(|_| Ok(HELLO.len()));
file.expect_sync_data().once().returning(|| Ok(()));
let mut file = File::from_std(file);
let mut t = task::spawn(file.write(HELLO));
assert_ready_ok!(t.poll());
let mut t = task::spawn(file.sync_data());
assert_pending!(t.poll());
assert_eq!(1, pool::len());
pool::run_one();
assert!(t.is_woken());
assert_pending!(t.poll());
assert_eq!(1, pool::len());
pool::run_one();
assert!(t.is_woken());
assert_ready_ok!(t.poll());
}
#[test]
fn sync_data_err_ordered_after_write() {
let mut file = MockFile::default();
let mut seq = Sequence::new();
file.expect_inner_write()
.once()
.in_sequence(&mut seq)
.with(eq(HELLO))
.returning(|_| Ok(HELLO.len()));
file.expect_sync_data()
.once()
.returning(|| Err(io::ErrorKind::Other.into()));
let mut file = File::from_std(file);
let mut t = task::spawn(file.write(HELLO));
assert_ready_ok!(t.poll());
let mut t = task::spawn(file.sync_data());
assert_pending!(t.poll());
assert_eq!(1, pool::len());
pool::run_one();
assert!(t.is_woken());
assert_pending!(t.poll());
assert_eq!(1, pool::len());
pool::run_one();
assert!(t.is_woken());
assert_ready_err!(t.poll());
}
#[test]
fn open_set_len_ok() {
let mut file = MockFile::default();
file.expect_set_len().with(eq(123)).returning(|_| Ok(()));
let file = File::from_std(file);
let mut t = task::spawn(file.set_len(123));
assert_pending!(t.poll());
pool::run_one();
assert!(t.is_woken());
assert_ready_ok!(t.poll());
}
#[test]
fn open_set_len_err() {
let mut file = MockFile::default();
file.expect_set_len()
.with(eq(123))
.returning(|_| Err(io::ErrorKind::Other.into()));
let file = File::from_std(file);
let mut t = task::spawn(file.set_len(123));
assert_pending!(t.poll());
pool::run_one();
assert!(t.is_woken());
assert_ready_err!(t.poll());
}
#[test]
fn partial_read_set_len_ok() {
let mut file = MockFile::default();
let mut seq = Sequence::new();
file.expect_inner_read()
.once()
.in_sequence(&mut seq)
.returning(|buf| {
buf[0..HELLO.len()].copy_from_slice(HELLO);
Ok(HELLO.len())
});
file.expect_inner_seek()
.once()
.with(eq(SeekFrom::Current(-(HELLO.len() as i64))))
.in_sequence(&mut seq)
.returning(|_| Ok(0));
file.expect_set_len()
.once()
.in_sequence(&mut seq)
.with(eq(123))
.returning(|_| Ok(()));
file.expect_inner_read()
.once()
.in_sequence(&mut seq)
.returning(|buf| {
buf[0..FOO.len()].copy_from_slice(FOO);
Ok(FOO.len())
});
let mut buf = [0; 32];
let mut file = File::from_std(file);
{
let mut t = task::spawn(file.read(&mut buf));
assert_pending!(t.poll());
}
pool::run_one();
{
let mut t = task::spawn(file.set_len(123));
assert_pending!(t.poll());
pool::run_one();
assert_ready_ok!(t.poll());
}
let mut t = task::spawn(file.read(&mut buf));
assert_pending!(t.poll());
pool::run_one();
let n = assert_ready_ok!(t.poll());
assert_eq!(n, FOO.len());
assert_eq!(&buf[..n], FOO);
}
#[test]
fn busy_file_seek_error() {
let mut file = MockFile::default();
let mut seq = Sequence::new();
file.expect_inner_write()
.once()
.in_sequence(&mut seq)
.returning(|_| Err(io::ErrorKind::Other.into()));
let mut file = crate::io::BufReader::new(File::from_std(file));
{
let mut t = task::spawn(file.write(HELLO));
assert_ready_ok!(t.poll());
}
pool::run_one();
let mut t = task::spawn(file.seek(SeekFrom::Start(0)));
assert_ready_err!(t.poll());
}

44
vendor/tokio/src/fs/hard_link.rs vendored Normal file
View File

@@ -0,0 +1,44 @@
use crate::fs::asyncify;
use std::io;
use std::path::Path;
/// Creates a new hard link on the filesystem.
///
/// This is an async version of [`std::fs::hard_link`].
///
/// The `link` path will be a link pointing to the `original` path. Note that systems
/// often require these two paths to both be located on the same filesystem.
///
/// # Platform-specific behavior
///
/// This function currently corresponds to the `link` function on Unix
/// and the `CreateHardLink` function on Windows.
/// Note that, this [may change in the future][changes].
///
/// [changes]: https://doc.rust-lang.org/std/io/index.html#platform-specific-behavior
///
/// # Errors
///
/// This function will return an error in the following situations, but is not
/// limited to just these cases:
///
/// * The `original` path is not a file or doesn't exist.
///
/// # Examples
///
/// ```no_run
/// use tokio::fs;
///
/// #[tokio::main]
/// async fn main() -> std::io::Result<()> {
/// fs::hard_link("a.txt", "b.txt").await?; // Hard link a.txt to b.txt
/// Ok(())
/// }
/// ```
pub async fn hard_link(original: impl AsRef<Path>, link: impl AsRef<Path>) -> io::Result<()> {
let original = original.as_ref().to_owned();
let link = link.as_ref().to_owned();
asyncify(move || std::fs::hard_link(original, link)).await
}

46
vendor/tokio/src/fs/metadata.rs vendored Normal file
View File

@@ -0,0 +1,46 @@
use crate::fs::asyncify;
use std::fs::Metadata;
use std::io;
use std::path::Path;
/// Given a path, queries the file system to get information about a file,
/// directory, etc.
///
/// This is an async version of [`std::fs::metadata`].
///
/// This function will traverse symbolic links to query information about the
/// destination file.
///
/// # Platform-specific behavior
///
/// This function currently corresponds to the `stat` function on Unix and the
/// `GetFileAttributesEx` function on Windows. Note that, this [may change in
/// the future][changes].
///
/// [changes]: https://doc.rust-lang.org/std/io/index.html#platform-specific-behavior
///
/// # Errors
///
/// This function will return an error in the following situations, but is not
/// limited to just these cases:
///
/// * The user lacks permissions to perform `metadata` call on `path`.
/// * `path` does not exist.
///
/// # Examples
///
/// ```rust,no_run
/// use tokio::fs;
///
/// #[tokio::main]
/// async fn main() -> std::io::Result<()> {
/// let attr = fs::metadata("/some/file/path.txt").await?;
/// // inspect attr ...
/// Ok(())
/// }
/// ```
pub async fn metadata(path: impl AsRef<Path>) -> io::Result<Metadata> {
let path = path.as_ref().to_owned();
asyncify(|| std::fs::metadata(path)).await
}

176
vendor/tokio/src/fs/mocks.rs vendored Normal file
View File

@@ -0,0 +1,176 @@
//! Mock version of std::fs::File;
use mockall::mock;
use crate::sync::oneshot;
#[cfg(all(test, unix))]
use std::os::fd::{AsRawFd, FromRawFd, OwnedFd};
use std::{
cell::RefCell,
collections::VecDeque,
fs::{Metadata, Permissions},
future::Future,
io::{self, Read, Seek, SeekFrom, Write},
path::PathBuf,
pin::Pin,
task::{Context, Poll},
};
mock! {
#[derive(Debug)]
pub File {
pub fn create(pb: PathBuf) -> io::Result<Self>;
// These inner_ methods exist because std::fs::File has two
// implementations for each of these methods: one on "&mut self" and
// one on "&&self". Defining both of those in terms of an inner_ method
// allows us to specify the expectation the same way, regardless of
// which method is used.
pub fn inner_flush(&self) -> io::Result<()>;
pub fn inner_read(&self, dst: &mut [u8]) -> io::Result<usize>;
pub fn inner_seek(&self, pos: SeekFrom) -> io::Result<u64>;
pub fn inner_write(&self, src: &[u8]) -> io::Result<usize>;
pub fn metadata(&self) -> io::Result<Metadata>;
pub fn open(pb: PathBuf) -> io::Result<Self>;
pub fn set_len(&self, size: u64) -> io::Result<()>;
pub fn set_permissions(&self, _perm: Permissions) -> io::Result<()>;
pub fn set_max_buf_size(&self, max_buf_size: usize);
pub fn sync_all(&self) -> io::Result<()>;
pub fn sync_data(&self) -> io::Result<()>;
pub fn try_clone(&self) -> io::Result<Self>;
}
#[cfg(windows)]
impl std::os::windows::io::AsRawHandle for File {
fn as_raw_handle(&self) -> std::os::windows::io::RawHandle;
}
#[cfg(windows)]
impl std::os::windows::io::FromRawHandle for File {
unsafe fn from_raw_handle(h: std::os::windows::io::RawHandle) -> Self;
}
#[cfg(unix)]
impl std::os::unix::io::AsRawFd for File {
fn as_raw_fd(&self) -> std::os::unix::io::RawFd;
}
#[cfg(unix)]
impl std::os::unix::io::FromRawFd for File {
unsafe fn from_raw_fd(h: std::os::unix::io::RawFd) -> Self;
}
}
impl Read for MockFile {
fn read(&mut self, dst: &mut [u8]) -> io::Result<usize> {
// Placate Miri. Tokio will call this method with an uninitialized
// buffer, which is ok because std::io::Read::read implementations don't usually read
// from their input buffers. But Mockall 0.12-0.13 will try to Debug::fmt the
// buffer, even if there is no failure, triggering an uninitialized data access alert from
// Miri. Initialize the data here just to prevent those Miri alerts.
// This can be removed after upgrading to Mockall 0.14.
dst.fill(0);
self.inner_read(dst)
}
}
impl Read for &'_ MockFile {
fn read(&mut self, dst: &mut [u8]) -> io::Result<usize> {
// Placate Miri. Tokio will call this method with an uninitialized
// buffer, which is ok because std::io::Read::read implementations don't usually read
// from their input buffers. But Mockall 0.12-0.13 will try to Debug::fmt the
// buffer, even if there is no failure, triggering an uninitialized data access alert from
// Miri. Initialize the data here just to prevent those Miri alerts.
// This can be removed after upgrading to Mockall 0.14.
dst.fill(0);
self.inner_read(dst)
}
}
impl Seek for &'_ MockFile {
fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
self.inner_seek(pos)
}
}
impl Write for &'_ MockFile {
fn write(&mut self, src: &[u8]) -> io::Result<usize> {
self.inner_write(src)
}
fn flush(&mut self) -> io::Result<()> {
self.inner_flush()
}
}
#[cfg(all(test, unix))]
impl From<MockFile> for OwnedFd {
#[inline]
fn from(file: MockFile) -> OwnedFd {
unsafe { OwnedFd::from_raw_fd(file.as_raw_fd()) }
}
}
tokio_thread_local! {
static QUEUE: RefCell<VecDeque<Box<dyn FnOnce() + Send>>> = RefCell::new(VecDeque::new())
}
#[derive(Debug)]
pub(super) struct JoinHandle<T> {
rx: oneshot::Receiver<T>,
}
pub(super) fn spawn_blocking<F, R>(f: F) -> JoinHandle<R>
where
F: FnOnce() -> R + Send + 'static,
R: Send + 'static,
{
let (tx, rx) = oneshot::channel();
let task = Box::new(move || {
let _ = tx.send(f());
});
QUEUE.with(|cell| cell.borrow_mut().push_back(task));
JoinHandle { rx }
}
pub(super) fn spawn_mandatory_blocking<F, R>(f: F) -> Option<JoinHandle<R>>
where
F: FnOnce() -> R + Send + 'static,
R: Send + 'static,
{
let (tx, rx) = oneshot::channel();
let task = Box::new(move || {
let _ = tx.send(f());
});
QUEUE.with(|cell| cell.borrow_mut().push_back(task));
Some(JoinHandle { rx })
}
impl<T> Future for JoinHandle<T> {
type Output = Result<T, io::Error>;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
use std::task::Poll;
match Pin::new(&mut self.rx).poll(cx) {
Poll::Ready(Ok(v)) => Poll::Ready(Ok(v)),
Poll::Ready(Err(e)) => panic!("error = {e:?}"),
Poll::Pending => Poll::Pending,
}
}
}
pub(super) mod pool {
use super::*;
pub(in super::super) fn len() -> usize {
QUEUE.with(|cell| cell.borrow().len())
}
pub(in super::super) fn run_one() {
let task = QUEUE
.with(|cell| cell.borrow_mut().pop_front())
.expect("expected task to run, but none ready");
task();
}
}

324
vendor/tokio/src/fs/mod.rs vendored Normal file
View File

@@ -0,0 +1,324 @@
#![cfg(not(loom))]
//! Asynchronous file utilities.
//!
//! This module contains utility methods for working with the file system
//! asynchronously. This includes reading/writing to files, and working with
//! directories.
//!
//! Be aware that most operating systems do not provide asynchronous file system
//! APIs. Because of that, Tokio will use ordinary blocking file operations
//! behind the scenes. This is done using the [`spawn_blocking`] threadpool to
//! run them in the background.
//!
//! The `tokio::fs` module should only be used for ordinary files. Trying to use
//! it with e.g., a named pipe on Linux can result in surprising behavior,
//! such as hangs during runtime shutdown. For special files, you should use a
//! dedicated type such as [`tokio::net::unix::pipe`] or [`AsyncFd`] instead.
//!
//! Currently, Tokio will always use [`spawn_blocking`] on all platforms, but it
//! may be changed to use asynchronous file system APIs such as io_uring in the
//! future.
//!
//! # Usage
//!
//! The easiest way to use this module is to use the utility functions that
//! operate on entire files:
//!
//! * [`tokio::fs::read`](fn@crate::fs::read)
//! * [`tokio::fs::read_to_string`](fn@crate::fs::read_to_string)
//! * [`tokio::fs::write`](fn@crate::fs::write)
//!
//! The two `read` functions reads the entire file and returns its contents.
//! The `write` function takes the contents of the file and writes those
//! contents to the file. It overwrites the existing file, if any.
//!
//! For example, to read the file:
//!
//! ```
//! # async fn dox() -> std::io::Result<()> {
//! let contents = tokio::fs::read_to_string("my_file.txt").await?;
//!
//! println!("File has {} lines.", contents.lines().count());
//! # Ok(())
//! # }
//! ```
//!
//! To overwrite the file:
//!
//! ```
//! # async fn dox() -> std::io::Result<()> {
//! let contents = "First line.\nSecond line.\nThird line.\n";
//!
//! tokio::fs::write("my_file.txt", contents.as_bytes()).await?;
//! # Ok(())
//! # }
//! ```
//!
//! ## Using `File`
//!
//! The main type for interacting with files is [`File`]. It can be used to read
//! from and write to a given file. This is done using the [`AsyncRead`] and
//! [`AsyncWrite`] traits. This type is generally used when you want to do
//! something more complex than just reading or writing the entire contents in
//! one go.
//!
//! **Note:** It is important to use [`flush`] when writing to a Tokio
//! [`File`]. This is because calls to `write` will return before the write has
//! finished, and [`flush`] will wait for the write to finish. (The write will
//! happen even if you don't flush; it will just happen later.) This is
//! different from [`std::fs::File`], and is due to the fact that `File` uses
//! `spawn_blocking` behind the scenes.
//!
//! For example, to count the number of lines in a file without loading the
//! entire file into memory:
//!
//! ```no_run
//! use tokio::fs::File;
//! use tokio::io::AsyncReadExt;
//!
//! # async fn dox() -> std::io::Result<()> {
//! let mut file = File::open("my_file.txt").await?;
//!
//! let mut chunk = vec![0; 4096];
//! let mut number_of_lines = 0;
//! loop {
//! let len = file.read(&mut chunk).await?;
//! if len == 0 {
//! // Length of zero means end of file.
//! break;
//! }
//! for &b in &chunk[..len] {
//! if b == b'\n' {
//! number_of_lines += 1;
//! }
//! }
//! }
//!
//! println!("File has {} lines.", number_of_lines);
//! # Ok(())
//! # }
//! ```
//!
//! For example, to write a file line-by-line:
//!
//! ```no_run
//! use tokio::fs::File;
//! use tokio::io::AsyncWriteExt;
//!
//! # async fn dox() -> std::io::Result<()> {
//! let mut file = File::create("my_file.txt").await?;
//!
//! file.write_all(b"First line.\n").await?;
//! file.write_all(b"Second line.\n").await?;
//! file.write_all(b"Third line.\n").await?;
//!
//! // Remember to call `flush` after writing!
//! file.flush().await?;
//! # Ok(())
//! # }
//! ```
//!
//! ## Tuning your file IO
//!
//! Tokio's file uses [`spawn_blocking`] behind the scenes, and this has serious
//! performance consequences. To get good performance with file IO on Tokio, it
//! is recommended to batch your operations into as few `spawn_blocking` calls
//! as possible.
//!
//! One example of this difference can be seen by comparing the two reading
//! examples above. The first example uses [`tokio::fs::read`], which reads the
//! entire file in a single `spawn_blocking` call, and then returns it. The
//! second example will read the file in chunks using many `spawn_blocking`
//! calls. This means that the second example will most likely be more expensive
//! for large files. (Of course, using chunks may be necessary for very large
//! files that don't fit in memory.)
//!
//! The following examples will show some strategies for this:
//!
//! When creating a file, write the data to a `String` or `Vec<u8>` and then
//! write the entire file in a single `spawn_blocking` call with
//! `tokio::fs::write`.
//!
//! ```no_run
//! # async fn dox() -> std::io::Result<()> {
//! let mut contents = String::new();
//!
//! contents.push_str("First line.\n");
//! contents.push_str("Second line.\n");
//! contents.push_str("Third line.\n");
//!
//! tokio::fs::write("my_file.txt", contents.as_bytes()).await?;
//! # Ok(())
//! # }
//! ```
//!
//! Use [`BufReader`] and [`BufWriter`] to buffer many small reads or writes
//! into a few large ones. This example will most likely only perform one
//! `spawn_blocking` call.
//!
//! ```no_run
//! use tokio::fs::File;
//! use tokio::io::{AsyncWriteExt, BufWriter};
//!
//! # async fn dox() -> std::io::Result<()> {
//! let mut file = BufWriter::new(File::create("my_file.txt").await?);
//!
//! file.write_all(b"First line.\n").await?;
//! file.write_all(b"Second line.\n").await?;
//! file.write_all(b"Third line.\n").await?;
//!
//! // Due to the BufWriter, the actual write and spawn_blocking
//! // call happens when you flush.
//! file.flush().await?;
//! # Ok(())
//! # }
//! ```
//!
//! Manually use [`std::fs`] inside [`spawn_blocking`].
//!
//! ```no_run
//! use std::fs::File;
//! use std::io::{self, Write};
//! use tokio::task::spawn_blocking;
//!
//! # async fn dox() -> std::io::Result<()> {
//! spawn_blocking(move || {
//! let mut file = File::create("my_file.txt")?;
//!
//! file.write_all(b"First line.\n")?;
//! file.write_all(b"Second line.\n")?;
//! file.write_all(b"Third line.\n")?;
//!
//! // Unlike Tokio's file, the std::fs file does
//! // not need flush.
//!
//! io::Result::Ok(())
//! }).await.unwrap()?;
//! # Ok(())
//! # }
//! ```
//!
//! It's also good to be aware of [`File::set_max_buf_size`], which controls the
//! maximum amount of bytes that Tokio's [`File`] will read or write in a single
//! [`spawn_blocking`] call. The default is two megabytes, but this is subject
//! to change.
//!
//! [`spawn_blocking`]: fn@crate::task::spawn_blocking
//! [`AsyncRead`]: trait@crate::io::AsyncRead
//! [`AsyncWrite`]: trait@crate::io::AsyncWrite
//! [`BufReader`]: struct@crate::io::BufReader
//! [`BufWriter`]: struct@crate::io::BufWriter
//! [`tokio::net::unix::pipe`]: crate::net::unix::pipe
//! [`AsyncFd`]: crate::io::unix::AsyncFd
//! [`flush`]: crate::io::AsyncWriteExt::flush
//! [`tokio::fs::read`]: fn@crate::fs::read
mod canonicalize;
pub use self::canonicalize::canonicalize;
mod create_dir;
pub use self::create_dir::create_dir;
mod create_dir_all;
pub use self::create_dir_all::create_dir_all;
mod dir_builder;
pub use self::dir_builder::DirBuilder;
mod file;
pub use self::file::File;
mod hard_link;
pub use self::hard_link::hard_link;
mod metadata;
pub use self::metadata::metadata;
mod open_options;
pub use self::open_options::OpenOptions;
mod read;
pub use self::read::read;
mod read_dir;
pub use self::read_dir::{read_dir, DirEntry, ReadDir};
mod read_link;
pub use self::read_link::read_link;
mod read_to_string;
pub use self::read_to_string::read_to_string;
mod remove_dir;
pub use self::remove_dir::remove_dir;
mod remove_dir_all;
pub use self::remove_dir_all::remove_dir_all;
mod remove_file;
pub use self::remove_file::remove_file;
mod rename;
pub use self::rename::rename;
mod set_permissions;
pub use self::set_permissions::set_permissions;
mod symlink_metadata;
pub use self::symlink_metadata::symlink_metadata;
mod write;
pub use self::write::write;
mod copy;
pub use self::copy::copy;
mod try_exists;
pub use self::try_exists::try_exists;
#[cfg(test)]
mod mocks;
feature! {
#![unix]
mod symlink;
pub use self::symlink::symlink;
}
cfg_windows! {
mod symlink_dir;
pub use self::symlink_dir::symlink_dir;
mod symlink_file;
pub use self::symlink_file::symlink_file;
}
cfg_io_uring! {
pub(crate) mod read_uring;
pub(crate) use self::read_uring::read_uring;
pub(crate) use self::open_options::UringOpenOptions;
}
use std::io;
#[cfg(not(test))]
use crate::blocking::spawn_blocking;
#[cfg(test)]
use mocks::spawn_blocking;
pub(crate) async fn asyncify<F, T>(f: F) -> io::Result<T>
where
F: FnOnce() -> io::Result<T> + Send + 'static,
T: Send + 'static,
{
match spawn_blocking(f).await {
Ok(res) => res,
Err(_) => Err(io::Error::new(
io::ErrorKind::Other,
"background task failed",
)),
}
}

856
vendor/tokio/src/fs/open_options.rs vendored Normal file
View File

@@ -0,0 +1,856 @@
use crate::fs::{asyncify, File};
use std::io;
use std::path::Path;
cfg_io_uring! {
mod uring_open_options;
pub(crate) use uring_open_options::UringOpenOptions;
use crate::runtime::driver::op::Op;
}
#[cfg(test)]
mod mock_open_options;
#[cfg(test)]
use mock_open_options::MockOpenOptions as StdOpenOptions;
#[cfg(not(test))]
use std::fs::OpenOptions as StdOpenOptions;
#[cfg(unix)]
use std::os::unix::fs::OpenOptionsExt;
#[cfg(windows)]
use std::os::windows::fs::OpenOptionsExt;
/// Options and flags which can be used to configure how a file is opened.
///
/// This builder exposes the ability to configure how a [`File`] is opened and
/// what operations are permitted on the open file. The [`File::open`] and
/// [`File::create`] methods are aliases for commonly used options using this
/// builder.
///
/// Generally speaking, when using `OpenOptions`, you'll first call [`new`],
/// then chain calls to methods to set each option, then call [`open`], passing
/// the path of the file you're trying to open. This will give you a
/// [`io::Result`] with a [`File`] inside that you can further operate
/// on.
///
/// This is a specialized version of [`std::fs::OpenOptions`] for usage from
/// the Tokio runtime.
///
/// `From<std::fs::OpenOptions>` is implemented for more advanced configuration
/// than the methods provided here.
///
/// [`new`]: OpenOptions::new
/// [`open`]: OpenOptions::open
/// [`File`]: File
/// [`File::open`]: File::open
/// [`File::create`]: File::create
///
/// # Examples
///
/// Opening a file to read:
///
/// ```no_run
/// use tokio::fs::OpenOptions;
/// use std::io;
///
/// #[tokio::main]
/// async fn main() -> io::Result<()> {
/// let file = OpenOptions::new()
/// .read(true)
/// .open("foo.txt")
/// .await?;
///
/// Ok(())
/// }
/// ```
///
/// Opening a file for both reading and writing, as well as creating it if it
/// doesn't exist:
///
/// ```no_run
/// use tokio::fs::OpenOptions;
/// use std::io;
///
/// #[tokio::main]
/// async fn main() -> io::Result<()> {
/// let file = OpenOptions::new()
/// .read(true)
/// .write(true)
/// .create(true)
/// .open("foo.txt")
/// .await?;
///
/// Ok(())
/// }
/// ```
#[derive(Clone, Debug)]
pub struct OpenOptions {
inner: Kind,
}
#[derive(Debug, Clone)]
enum Kind {
Std(StdOpenOptions),
#[cfg(all(
tokio_unstable,
feature = "io-uring",
feature = "rt",
feature = "fs",
target_os = "linux"
))]
Uring(UringOpenOptions),
}
impl OpenOptions {
/// Creates a blank new set of options ready for configuration.
///
/// All options are initially set to `false`.
///
/// This is an async version of [`std::fs::OpenOptions::new`][std]
///
/// [std]: std::fs::OpenOptions::new
///
/// # Examples
///
/// ```no_run
/// use tokio::fs::OpenOptions;
///
/// let mut options = OpenOptions::new();
/// let future = options.read(true).open("foo.txt");
/// ```
pub fn new() -> OpenOptions {
#[cfg(all(
tokio_unstable,
feature = "io-uring",
feature = "rt",
feature = "fs",
target_os = "linux"
))]
let inner = Kind::Uring(UringOpenOptions::new());
#[cfg(not(all(
tokio_unstable,
feature = "io-uring",
feature = "rt",
feature = "fs",
target_os = "linux"
)))]
let inner = Kind::Std(StdOpenOptions::new());
OpenOptions { inner }
}
/// Sets the option for read access.
///
/// This option, when true, will indicate that the file should be
/// `read`-able if opened.
///
/// This is an async version of [`std::fs::OpenOptions::read`][std]
///
/// [std]: std::fs::OpenOptions::read
///
/// # Examples
///
/// ```no_run
/// use tokio::fs::OpenOptions;
/// use std::io;
///
/// #[tokio::main]
/// async fn main() -> io::Result<()> {
/// let file = OpenOptions::new()
/// .read(true)
/// .open("foo.txt")
/// .await?;
///
/// Ok(())
/// }
/// ```
pub fn read(&mut self, read: bool) -> &mut OpenOptions {
match &mut self.inner {
Kind::Std(opts) => {
opts.read(read);
}
#[cfg(all(
tokio_unstable,
feature = "io-uring",
feature = "rt",
feature = "fs",
target_os = "linux"
))]
Kind::Uring(opts) => {
opts.read(read);
}
}
self
}
/// Sets the option for write access.
///
/// This option, when true, will indicate that the file should be
/// `write`-able if opened.
///
/// This is an async version of [`std::fs::OpenOptions::write`][std]
///
/// [std]: std::fs::OpenOptions::write
///
/// # Examples
///
/// ```no_run
/// use tokio::fs::OpenOptions;
/// use std::io;
///
/// #[tokio::main]
/// async fn main() -> io::Result<()> {
/// let file = OpenOptions::new()
/// .write(true)
/// .open("foo.txt")
/// .await?;
///
/// Ok(())
/// }
/// ```
pub fn write(&mut self, write: bool) -> &mut OpenOptions {
match &mut self.inner {
Kind::Std(opts) => {
opts.write(write);
}
#[cfg(all(
tokio_unstable,
feature = "io-uring",
feature = "rt",
feature = "fs",
target_os = "linux"
))]
Kind::Uring(opts) => {
opts.write(write);
}
}
self
}
/// Sets the option for the append mode.
///
/// This option, when true, means that writes will append to a file instead
/// of overwriting previous contents. Note that setting
/// `.write(true).append(true)` has the same effect as setting only
/// `.append(true)`.
///
/// For most filesystems, the operating system guarantees that all writes are
/// atomic: no writes get mangled because another process writes at the same
/// time.
///
/// One maybe obvious note when using append-mode: make sure that all data
/// that belongs together is written to the file in one operation. This
/// can be done by concatenating strings before passing them to [`write()`],
/// or using a buffered writer (with a buffer of adequate size),
/// and calling [`flush()`] when the message is complete.
///
/// If a file is opened with both read and append access, beware that after
/// opening, and after every write, the position for reading may be set at the
/// end of the file. So, before writing, save the current position (using
/// [`seek`]`(`[`SeekFrom`]`::`[`Current`]`(0))`), and restore it before the next read.
///
/// This is an async version of [`std::fs::OpenOptions::append`][std]
///
/// [std]: std::fs::OpenOptions::append
///
/// ## Note
///
/// This function doesn't create the file if it doesn't exist. Use the [`create`]
/// method to do so.
///
/// [`write()`]: crate::io::AsyncWriteExt::write
/// [`flush()`]: crate::io::AsyncWriteExt::flush
/// [`seek`]: crate::io::AsyncSeekExt::seek
/// [`SeekFrom`]: std::io::SeekFrom
/// [`Current`]: std::io::SeekFrom::Current
/// [`create`]: OpenOptions::create
///
/// # Examples
///
/// ```no_run
/// use tokio::fs::OpenOptions;
/// use std::io;
///
/// #[tokio::main]
/// async fn main() -> io::Result<()> {
/// let file = OpenOptions::new()
/// .append(true)
/// .open("foo.txt")
/// .await?;
///
/// Ok(())
/// }
/// ```
pub fn append(&mut self, append: bool) -> &mut OpenOptions {
match &mut self.inner {
Kind::Std(opts) => {
opts.append(append);
}
#[cfg(all(
tokio_unstable,
feature = "io-uring",
feature = "rt",
feature = "fs",
target_os = "linux"
))]
Kind::Uring(opts) => {
opts.append(append);
}
}
self
}
/// Sets the option for truncating a previous file.
///
/// If a file is successfully opened with this option set it will truncate
/// the file to 0 length if it already exists.
///
/// The file must be opened with write access for truncate to work.
///
/// This is an async version of [`std::fs::OpenOptions::truncate`][std]
///
/// [std]: std::fs::OpenOptions::truncate
///
/// # Examples
///
/// ```no_run
/// use tokio::fs::OpenOptions;
/// use std::io;
///
/// #[tokio::main]
/// async fn main() -> io::Result<()> {
/// let file = OpenOptions::new()
/// .write(true)
/// .truncate(true)
/// .open("foo.txt")
/// .await?;
///
/// Ok(())
/// }
/// ```
pub fn truncate(&mut self, truncate: bool) -> &mut OpenOptions {
match &mut self.inner {
Kind::Std(opts) => {
opts.truncate(truncate);
}
#[cfg(all(
tokio_unstable,
feature = "io-uring",
feature = "rt",
feature = "fs",
target_os = "linux"
))]
Kind::Uring(opts) => {
opts.truncate(truncate);
}
}
self
}
/// Sets the option for creating a new file.
///
/// This option indicates whether a new file will be created if the file
/// does not yet already exist.
///
/// In order for the file to be created, [`write`] or [`append`] access must
/// be used.
///
/// This is an async version of [`std::fs::OpenOptions::create`][std]
///
/// [std]: std::fs::OpenOptions::create
/// [`write`]: OpenOptions::write
/// [`append`]: OpenOptions::append
///
/// # Examples
///
/// ```no_run
/// use tokio::fs::OpenOptions;
/// use std::io;
///
/// #[tokio::main]
/// async fn main() -> io::Result<()> {
/// let file = OpenOptions::new()
/// .write(true)
/// .create(true)
/// .open("foo.txt")
/// .await?;
///
/// Ok(())
/// }
/// ```
pub fn create(&mut self, create: bool) -> &mut OpenOptions {
match &mut self.inner {
Kind::Std(opts) => {
opts.create(create);
}
#[cfg(all(
tokio_unstable,
feature = "io-uring",
feature = "rt",
feature = "fs",
target_os = "linux"
))]
Kind::Uring(opts) => {
opts.create(create);
}
}
self
}
/// Sets the option to always create a new file.
///
/// This option indicates whether a new file will be created. No file is
/// allowed to exist at the target location, also no (dangling) symlink.
///
/// This option is useful because it is atomic. Otherwise between checking
/// whether a file exists and creating a new one, the file may have been
/// created by another process (a TOCTOU race condition / attack).
///
/// If `.create_new(true)` is set, [`.create()`] and [`.truncate()`] are
/// ignored.
///
/// The file must be opened with write or append access in order to create a
/// new file.
///
/// This is an async version of [`std::fs::OpenOptions::create_new`][std]
///
/// [std]: std::fs::OpenOptions::create_new
/// [`.create()`]: OpenOptions::create
/// [`.truncate()`]: OpenOptions::truncate
///
/// # Examples
///
/// ```no_run
/// use tokio::fs::OpenOptions;
/// use std::io;
///
/// #[tokio::main]
/// async fn main() -> io::Result<()> {
/// let file = OpenOptions::new()
/// .write(true)
/// .create_new(true)
/// .open("foo.txt")
/// .await?;
///
/// Ok(())
/// }
/// ```
pub fn create_new(&mut self, create_new: bool) -> &mut OpenOptions {
match &mut self.inner {
Kind::Std(opts) => {
opts.create_new(create_new);
}
#[cfg(all(
tokio_unstable,
feature = "io-uring",
feature = "rt",
feature = "fs",
target_os = "linux"
))]
Kind::Uring(opts) => {
opts.create_new(create_new);
}
}
self
}
/// Opens a file at `path` with the options specified by `self`.
///
/// This is an async version of [`std::fs::OpenOptions::open`][std]
///
/// [std]: std::fs::OpenOptions::open
///
/// # Errors
///
/// This function will return an error under a number of different
/// circumstances. Some of these error conditions are listed here, together
/// with their [`ErrorKind`]. The mapping to [`ErrorKind`]s is not part of
/// the compatibility contract of the function, especially the `Other` kind
/// might change to more specific kinds in the future.
///
/// * [`NotFound`]: The specified file does not exist and neither `create`
/// or `create_new` is set.
/// * [`NotFound`]: One of the directory components of the file path does
/// not exist.
/// * [`PermissionDenied`]: The user lacks permission to get the specified
/// access rights for the file.
/// * [`PermissionDenied`]: The user lacks permission to open one of the
/// directory components of the specified path.
/// * [`AlreadyExists`]: `create_new` was specified and the file already
/// exists.
/// * [`InvalidInput`]: Invalid combinations of open options (truncate
/// without write access, no access mode set, etc.).
/// * [`Other`]: One of the directory components of the specified file path
/// was not, in fact, a directory.
/// * [`Other`]: Filesystem-level errors: full disk, write permission
/// requested on a read-only file system, exceeded disk quota, too many
/// open files, too long filename, too many symbolic links in the
/// specified path (Unix-like systems only), etc.
///
/// # io_uring support
///
/// On Linux, you can also use `io_uring` for executing system calls.
/// To enable `io_uring`, you need to specify the `--cfg tokio_unstable`
/// flag at compile time, enable the `io-uring` cargo feature, and set the
/// `Builder::enable_io_uring` runtime option.
///
/// Support for `io_uring` is currently experimental, so its behavior may
/// change or it may be removed in future versions.
///
/// # Examples
///
/// ```no_run
/// use tokio::fs::OpenOptions;
/// use std::io;
///
/// #[tokio::main]
/// async fn main() -> io::Result<()> {
/// let file = OpenOptions::new().open("foo.txt").await?;
/// Ok(())
/// }
/// ```
///
/// [`ErrorKind`]: std::io::ErrorKind
/// [`AlreadyExists`]: std::io::ErrorKind::AlreadyExists
/// [`InvalidInput`]: std::io::ErrorKind::InvalidInput
/// [`NotFound`]: std::io::ErrorKind::NotFound
/// [`Other`]: std::io::ErrorKind::Other
/// [`PermissionDenied`]: std::io::ErrorKind::PermissionDenied
pub async fn open(&self, path: impl AsRef<Path>) -> io::Result<File> {
match &self.inner {
Kind::Std(opts) => Self::std_open(opts, path).await,
#[cfg(all(
tokio_unstable,
feature = "io-uring",
feature = "rt",
feature = "fs",
target_os = "linux"
))]
Kind::Uring(opts) => {
let handle = crate::runtime::Handle::current();
let driver_handle = handle.inner.driver().io();
if driver_handle
.check_and_init(io_uring::opcode::OpenAt::CODE)
.await?
{
Op::open(path.as_ref(), opts)?.await
} else {
let opts = opts.clone().into();
Self::std_open(&opts, path).await
}
}
}
}
async fn std_open(opts: &StdOpenOptions, path: impl AsRef<Path>) -> io::Result<File> {
let path = path.as_ref().to_owned();
let opts = opts.clone();
let std = asyncify(move || opts.open(path)).await?;
Ok(File::from_std(std))
}
#[cfg(windows)]
pub(super) fn as_inner_mut(&mut self) -> &mut StdOpenOptions {
match &mut self.inner {
Kind::Std(ref mut opts) => opts,
}
}
}
feature! {
#![unix]
impl OpenOptions {
/// Sets the mode bits that a new file will be created with.
///
/// If a new file is created as part of an `OpenOptions::open` call then this
/// specified `mode` will be used as the permission bits for the new file.
/// If no `mode` is set, the default of `0o666` will be used.
/// The operating system masks out bits with the system's `umask`, to produce
/// the final permissions.
///
/// # Examples
///
/// ```no_run
/// use tokio::fs::OpenOptions;
/// use std::io;
///
/// #[tokio::main]
/// async fn main() -> io::Result<()> {
/// let mut options = OpenOptions::new();
/// options.mode(0o644); // Give read/write for owner and read for others.
/// let file = options.open("foo.txt").await?;
///
/// Ok(())
/// }
/// ```
pub fn mode(&mut self, mode: u32) -> &mut OpenOptions {
match &mut self.inner {
Kind::Std(opts) => {
opts.mode(mode);
}
#[cfg(all(
tokio_unstable,
feature = "io-uring",
feature = "rt",
feature = "fs",
target_os = "linux"
))]
Kind::Uring(opts) => {
opts.mode(mode);
}
}
self
}
/// Passes custom flags to the `flags` argument of `open`.
///
/// The bits that define the access mode are masked out with `O_ACCMODE`, to
/// ensure they do not interfere with the access mode set by Rusts options.
///
/// Custom flags can only set flags, not remove flags set by Rusts options.
/// This options overwrites any previously set custom flags.
///
/// # Examples
///
/// ```no_run
/// use tokio::fs::OpenOptions;
/// use std::io;
///
/// #[tokio::main]
/// async fn main() -> io::Result<()> {
/// let mut options = OpenOptions::new();
/// options.write(true);
/// if cfg!(unix) {
/// options.custom_flags(libc::O_NOFOLLOW);
/// }
/// let file = options.open("foo.txt").await?;
///
/// Ok(())
/// }
/// ```
pub fn custom_flags(&mut self, flags: i32) -> &mut OpenOptions {
match &mut self.inner {
Kind::Std(opts) => {
opts.custom_flags(flags);
}
#[cfg(all(
tokio_unstable,
feature = "io-uring",
feature = "rt",
feature = "fs",
target_os = "linux"
))]
Kind::Uring(opts) => {
opts.custom_flags(flags);
}
}
self
}
}
}
cfg_windows! {
impl OpenOptions {
/// Overrides the `dwDesiredAccess` argument to the call to [`CreateFile`]
/// with the specified value.
///
/// This will override the `read`, `write`, and `append` flags on the
/// `OpenOptions` structure. This method provides fine-grained control over
/// the permissions to read, write and append data, attributes (like hidden
/// and system), and extended attributes.
///
/// # Examples
///
/// ```no_run
/// use tokio::fs::OpenOptions;
///
/// # #[tokio::main]
/// # async fn main() -> std::io::Result<()> {
/// // Open without read and write permission, for example if you only need
/// // to call `stat` on the file
/// let file = OpenOptions::new().access_mode(0).open("foo.txt").await?;
/// # Ok(())
/// # }
/// ```
///
/// [`CreateFile`]: https://docs.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfilea
pub fn access_mode(&mut self, access: u32) -> &mut OpenOptions {
self.as_inner_mut().access_mode(access);
self
}
/// Overrides the `dwShareMode` argument to the call to [`CreateFile`] with
/// the specified value.
///
/// By default `share_mode` is set to
/// `FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE`. This allows
/// other processes to read, write, and delete/rename the same file
/// while it is open. Removing any of the flags will prevent other
/// processes from performing the corresponding operation until the file
/// handle is closed.
///
/// # Examples
///
/// ```no_run
/// use tokio::fs::OpenOptions;
///
/// # #[tokio::main]
/// # async fn main() -> std::io::Result<()> {
/// // Do not allow others to read or modify this file while we have it open
/// // for writing.
/// let file = OpenOptions::new()
/// .write(true)
/// .share_mode(0)
/// .open("foo.txt").await?;
/// # Ok(())
/// # }
/// ```
///
/// [`CreateFile`]: https://docs.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfilea
pub fn share_mode(&mut self, share: u32) -> &mut OpenOptions {
self.as_inner_mut().share_mode(share);
self
}
/// Sets extra flags for the `dwFileFlags` argument to the call to
/// [`CreateFile2`] to the specified value (or combines it with
/// `attributes` and `security_qos_flags` to set the `dwFlagsAndAttributes`
/// for [`CreateFile`]).
///
/// Custom flags can only set flags, not remove flags set by Rust's options.
/// This option overwrites any previously set custom flags.
///
/// # Examples
///
/// ```no_run
/// use windows_sys::Win32::Storage::FileSystem::FILE_FLAG_DELETE_ON_CLOSE;
/// use tokio::fs::OpenOptions;
///
/// # #[tokio::main]
/// # async fn main() -> std::io::Result<()> {
/// let file = OpenOptions::new()
/// .create(true)
/// .write(true)
/// .custom_flags(FILE_FLAG_DELETE_ON_CLOSE)
/// .open("foo.txt").await?;
/// # Ok(())
/// # }
/// ```
///
/// [`CreateFile`]: https://docs.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfilea
/// [`CreateFile2`]: https://docs.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfile2
pub fn custom_flags(&mut self, flags: u32) -> &mut OpenOptions {
self.as_inner_mut().custom_flags(flags);
self
}
/// Sets the `dwFileAttributes` argument to the call to [`CreateFile2`] to
/// the specified value (or combines it with `custom_flags` and
/// `security_qos_flags` to set the `dwFlagsAndAttributes` for
/// [`CreateFile`]).
///
/// If a _new_ file is created because it does not yet exist and
/// `.create(true)` or `.create_new(true)` are specified, the new file is
/// given the attributes declared with `.attributes()`.
///
/// If an _existing_ file is opened with `.create(true).truncate(true)`, its
/// existing attributes are preserved and combined with the ones declared
/// with `.attributes()`.
///
/// In all other cases the attributes get ignored.
///
/// # Examples
///
/// ```no_run
/// use windows_sys::Win32::Storage::FileSystem::FILE_ATTRIBUTE_HIDDEN;
/// use tokio::fs::OpenOptions;
///
/// # #[tokio::main]
/// # async fn main() -> std::io::Result<()> {
/// let file = OpenOptions::new()
/// .write(true)
/// .create(true)
/// .attributes(FILE_ATTRIBUTE_HIDDEN)
/// .open("foo.txt").await?;
/// # Ok(())
/// # }
/// ```
///
/// [`CreateFile`]: https://docs.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfilea
/// [`CreateFile2`]: https://docs.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfile2
pub fn attributes(&mut self, attributes: u32) -> &mut OpenOptions {
self.as_inner_mut().attributes(attributes);
self
}
/// Sets the `dwSecurityQosFlags` argument to the call to [`CreateFile2`] to
/// the specified value (or combines it with `custom_flags` and `attributes`
/// to set the `dwFlagsAndAttributes` for [`CreateFile`]).
///
/// By default `security_qos_flags` is not set. It should be specified when
/// opening a named pipe, to control to which degree a server process can
/// act on behalf of a client process (security impersonation level).
///
/// When `security_qos_flags` is not set, a malicious program can gain the
/// elevated privileges of a privileged Rust process when it allows opening
/// user-specified paths, by tricking it into opening a named pipe. So
/// arguably `security_qos_flags` should also be set when opening arbitrary
/// paths. However the bits can then conflict with other flags, specifically
/// `FILE_FLAG_OPEN_NO_RECALL`.
///
/// For information about possible values, see [Impersonation Levels] on the
/// Windows Dev Center site. The `SECURITY_SQOS_PRESENT` flag is set
/// automatically when using this method.
///
/// # Examples
///
/// ```no_run
/// use windows_sys::Win32::Storage::FileSystem::SECURITY_IDENTIFICATION;
/// use tokio::fs::OpenOptions;
///
/// # #[tokio::main]
/// # async fn main() -> std::io::Result<()> {
/// let file = OpenOptions::new()
/// .write(true)
/// .create(true)
///
/// // Sets the flag value to `SecurityIdentification`.
/// .security_qos_flags(SECURITY_IDENTIFICATION)
///
/// .open(r"\\.\pipe\MyPipe").await?;
/// # Ok(())
/// # }
/// ```
///
/// [`CreateFile`]: https://docs.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfilea
/// [`CreateFile2`]: https://docs.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfile2
/// [Impersonation Levels]:
/// https://docs.microsoft.com/en-us/windows/win32/api/winnt/ne-winnt-security_impersonation_level
pub fn security_qos_flags(&mut self, flags: u32) -> &mut OpenOptions {
self.as_inner_mut().security_qos_flags(flags);
self
}
}
}
impl From<StdOpenOptions> for OpenOptions {
fn from(options: StdOpenOptions) -> OpenOptions {
OpenOptions {
inner: Kind::Std(options),
// TODO: Add support for converting `StdOpenOptions` to `UringOpenOptions`
// if user enables `io-uring` cargo feature. It is blocked by:
// * https://github.com/rust-lang/rust/issues/74943
// * https://github.com/rust-lang/rust/issues/76801
}
}
}
impl Default for OpenOptions {
fn default() -> Self {
Self::new()
}
}

View File

@@ -0,0 +1,39 @@
#![allow(unreachable_pub)]
//! Mock version of `std::fs::OpenOptions`;
use mockall::mock;
use crate::fs::mocks::MockFile;
#[cfg(unix)]
use std::os::unix::fs::OpenOptionsExt;
#[cfg(windows)]
use std::os::windows::fs::OpenOptionsExt;
use std::{io, path::Path};
mock! {
#[derive(Debug)]
pub OpenOptions {
pub fn append(&mut self, append: bool) -> &mut Self;
pub fn create(&mut self, create: bool) -> &mut Self;
pub fn create_new(&mut self, create_new: bool) -> &mut Self;
pub fn open<P: AsRef<Path> + 'static>(&self, path: P) -> io::Result<MockFile>;
pub fn read(&mut self, read: bool) -> &mut Self;
pub fn truncate(&mut self, truncate: bool) -> &mut Self;
pub fn write(&mut self, write: bool) -> &mut Self;
}
impl Clone for OpenOptions {
fn clone(&self) -> Self;
}
#[cfg(unix)]
impl OpenOptionsExt for OpenOptions {
fn custom_flags(&mut self, flags: i32) -> &mut Self;
fn mode(&mut self, mode: u32) -> &mut Self;
}
#[cfg(windows)]
impl OpenOptionsExt for OpenOptions {
fn access_mode(&mut self, access: u32) -> &mut Self;
fn share_mode(&mut self, val: u32) -> &mut Self;
fn custom_flags(&mut self, flags: u32) -> &mut Self;
fn attributes(&mut self, val: u32) -> &mut Self;
fn security_qos_flags(&mut self, flags: u32) -> &mut Self;
}
}

View File

@@ -0,0 +1,128 @@
use std::{io, os::unix::fs::OpenOptionsExt};
#[cfg(test)]
use super::mock_open_options::MockOpenOptions as StdOpenOptions;
#[cfg(not(test))]
use std::fs::OpenOptions as StdOpenOptions;
#[derive(Debug, Clone)]
pub(crate) struct UringOpenOptions {
pub(crate) read: bool,
pub(crate) write: bool,
pub(crate) append: bool,
pub(crate) truncate: bool,
pub(crate) create: bool,
pub(crate) create_new: bool,
pub(crate) mode: libc::mode_t,
pub(crate) custom_flags: libc::c_int,
}
impl UringOpenOptions {
pub(crate) fn new() -> Self {
Self {
read: false,
write: false,
append: false,
truncate: false,
create: false,
create_new: false,
mode: 0o666,
custom_flags: 0,
}
}
pub(crate) fn append(&mut self, append: bool) -> &mut Self {
self.append = append;
self
}
pub(crate) fn create(&mut self, create: bool) -> &mut Self {
self.create = create;
self
}
pub(crate) fn create_new(&mut self, create_new: bool) -> &mut Self {
self.create_new = create_new;
self
}
pub(crate) fn read(&mut self, read: bool) -> &mut Self {
self.read = read;
self
}
pub(crate) fn write(&mut self, write: bool) -> &mut Self {
self.write = write;
self
}
pub(crate) fn truncate(&mut self, truncate: bool) -> &mut Self {
self.truncate = truncate;
self
}
pub(crate) fn mode(&mut self, mode: u32) -> &mut Self {
self.mode = mode as libc::mode_t;
self
}
pub(crate) fn custom_flags(&mut self, flags: i32) -> &mut Self {
self.custom_flags = flags;
self
}
// Equivalent to https://github.com/rust-lang/rust/blob/64c81fd10509924ca4da5d93d6052a65b75418a5/library/std/src/sys/fs/unix.rs#L1118-L1127
pub(crate) fn access_mode(&self) -> io::Result<libc::c_int> {
match (self.read, self.write, self.append) {
(true, false, false) => Ok(libc::O_RDONLY),
(false, true, false) => Ok(libc::O_WRONLY),
(true, true, false) => Ok(libc::O_RDWR),
(false, _, true) => Ok(libc::O_WRONLY | libc::O_APPEND),
(true, _, true) => Ok(libc::O_RDWR | libc::O_APPEND),
(false, false, false) => Err(io::Error::from_raw_os_error(libc::EINVAL)),
}
}
// Equivalent to https://github.com/rust-lang/rust/blob/64c81fd10509924ca4da5d93d6052a65b75418a5/library/std/src/sys/fs/unix.rs#L1129-L1151
pub(crate) fn creation_mode(&self) -> io::Result<libc::c_int> {
match (self.write, self.append) {
(true, false) => {}
(false, false) => {
if self.truncate || self.create || self.create_new {
return Err(io::Error::from_raw_os_error(libc::EINVAL));
}
}
(_, true) => {
if self.truncate && !self.create_new {
return Err(io::Error::from_raw_os_error(libc::EINVAL));
}
}
}
Ok(match (self.create, self.truncate, self.create_new) {
(false, false, false) => 0,
(true, false, false) => libc::O_CREAT,
(false, true, false) => libc::O_TRUNC,
(true, true, false) => libc::O_CREAT | libc::O_TRUNC,
(_, _, true) => libc::O_CREAT | libc::O_EXCL,
})
}
}
impl From<UringOpenOptions> for StdOpenOptions {
fn from(value: UringOpenOptions) -> Self {
let mut std = StdOpenOptions::new();
std.append(value.append);
std.create(value.create);
std.create_new(value.create_new);
std.read(value.read);
std.truncate(value.truncate);
std.write(value.write);
std.mode(value.mode);
std.custom_flags(value.custom_flags);
std
}
}

80
vendor/tokio/src/fs/read.rs vendored Normal file
View File

@@ -0,0 +1,80 @@
use crate::fs::asyncify;
use std::{io, path::Path};
/// Reads the entire contents of a file into a bytes vector.
///
/// This is an async version of [`std::fs::read`].
///
/// This is a convenience function for using [`File::open`] and [`read_to_end`]
/// with fewer imports and without an intermediate variable. It pre-allocates a
/// buffer based on the file size when available, so it is generally faster than
/// reading into a vector created with `Vec::new()`.
///
/// This operation is implemented by running the equivalent blocking operation
/// on a separate thread pool using [`spawn_blocking`].
///
/// [`File::open`]: super::File::open
/// [`read_to_end`]: crate::io::AsyncReadExt::read_to_end
/// [`spawn_blocking`]: crate::task::spawn_blocking
///
/// # Errors
///
/// This function will return an error if `path` does not already exist.
/// Other errors may also be returned according to [`OpenOptions::open`].
///
/// [`OpenOptions::open`]: super::OpenOptions::open
///
/// It will also return an error if it encounters while reading an error
/// of a kind other than [`ErrorKind::Interrupted`].
///
/// [`ErrorKind::Interrupted`]: std::io::ErrorKind::Interrupted
///
/// # io_uring support
///
/// On Linux, you can also use io_uring for executing system calls. To enable
/// io_uring, you need to specify the `--cfg tokio_unstable` flag at compile time,
/// enable the io-uring cargo feature, and set the `Builder::enable_io_uring`
/// runtime option.
///
/// Support for io_uring is currently experimental, so its behavior may change
/// or it may be removed in future versions.
///
/// # Examples
///
/// ```no_run
/// use tokio::fs;
/// use std::net::SocketAddr;
///
/// #[tokio::main]
/// async fn main() -> Result<(), Box<dyn std::error::Error + 'static>> {
/// let contents = fs::read("address.txt").await?;
/// let foo: SocketAddr = String::from_utf8_lossy(&contents).parse()?;
/// Ok(())
/// }
/// ```
pub async fn read(path: impl AsRef<Path>) -> io::Result<Vec<u8>> {
let path = path.as_ref().to_owned();
#[cfg(all(
tokio_unstable,
feature = "io-uring",
feature = "rt",
feature = "fs",
target_os = "linux"
))]
{
use crate::fs::read_uring;
let handle = crate::runtime::Handle::current();
let driver_handle = handle.inner.driver().io();
if driver_handle
.check_and_init(io_uring::opcode::Read::CODE)
.await?
{
return read_uring(&path).await;
}
}
asyncify(move || std::fs::read(path)).await
}

357
vendor/tokio/src/fs/read_dir.rs vendored Normal file
View File

@@ -0,0 +1,357 @@
use crate::fs::asyncify;
use std::collections::VecDeque;
use std::ffi::OsString;
use std::fs::{FileType, Metadata};
use std::future::Future;
use std::io;
use std::path::{Path, PathBuf};
use std::pin::Pin;
use std::sync::Arc;
use std::task::{ready, Context, Poll};
#[cfg(test)]
use super::mocks::spawn_blocking;
#[cfg(test)]
use super::mocks::JoinHandle;
#[cfg(not(test))]
use crate::blocking::spawn_blocking;
#[cfg(not(test))]
use crate::blocking::JoinHandle;
const CHUNK_SIZE: usize = 32;
/// Returns a stream over the entries within a directory.
///
/// This is an async version of [`std::fs::read_dir`].
///
/// This operation is implemented by running the equivalent blocking
/// operation on a separate thread pool using [`spawn_blocking`].
///
/// [`spawn_blocking`]: crate::task::spawn_blocking
pub async fn read_dir(path: impl AsRef<Path>) -> io::Result<ReadDir> {
let path = path.as_ref().to_owned();
asyncify(|| -> io::Result<ReadDir> {
let mut std = std::fs::read_dir(path)?;
let mut buf = VecDeque::with_capacity(CHUNK_SIZE);
let remain = ReadDir::next_chunk(&mut buf, &mut std);
Ok(ReadDir(State::Idle(Some((buf, std, remain)))))
})
.await
}
/// Reads the entries in a directory.
///
/// This struct is returned from the [`read_dir`] function of this module and
/// will yield instances of [`DirEntry`]. Through a [`DirEntry`] information
/// like the entry's path and possibly other metadata can be learned.
///
/// A `ReadDir` can be turned into a `Stream` with [`ReadDirStream`].
///
/// [`ReadDirStream`]: https://docs.rs/tokio-stream/0.1/tokio_stream/wrappers/struct.ReadDirStream.html
///
/// # Errors
///
/// This stream will return an [`Err`] if there's some sort of intermittent
/// IO error during iteration.
///
/// [`read_dir`]: read_dir
/// [`DirEntry`]: DirEntry
/// [`Err`]: std::result::Result::Err
#[derive(Debug)]
#[must_use = "streams do nothing unless polled"]
pub struct ReadDir(State);
#[derive(Debug)]
enum State {
Idle(Option<(VecDeque<io::Result<DirEntry>>, std::fs::ReadDir, bool)>),
Pending(JoinHandle<(VecDeque<io::Result<DirEntry>>, std::fs::ReadDir, bool)>),
}
impl ReadDir {
/// Returns the next entry in the directory stream.
///
/// # Cancel safety
///
/// This method is cancellation safe.
pub async fn next_entry(&mut self) -> io::Result<Option<DirEntry>> {
use std::future::poll_fn;
poll_fn(|cx| self.poll_next_entry(cx)).await
}
/// Polls for the next directory entry in the stream.
///
/// This method returns:
///
/// * `Poll::Pending` if the next directory entry is not yet available.
/// * `Poll::Ready(Ok(Some(entry)))` if the next directory entry is available.
/// * `Poll::Ready(Ok(None))` if there are no more directory entries in this
/// stream.
/// * `Poll::Ready(Err(err))` if an IO error occurred while reading the next
/// directory entry.
///
/// When the method returns `Poll::Pending`, the `Waker` in the provided
/// `Context` is scheduled to receive a wakeup when the next directory entry
/// becomes available on the underlying IO resource.
///
/// Note that on multiple calls to `poll_next_entry`, only the `Waker` from
/// the `Context` passed to the most recent call is scheduled to receive a
/// wakeup.
pub fn poll_next_entry(&mut self, cx: &mut Context<'_>) -> Poll<io::Result<Option<DirEntry>>> {
loop {
match self.0 {
State::Idle(ref mut data) => {
let (buf, _, ref remain) = data.as_mut().unwrap();
if let Some(ent) = buf.pop_front() {
return Poll::Ready(ent.map(Some));
} else if !remain {
return Poll::Ready(Ok(None));
}
let (mut buf, mut std, _) = data.take().unwrap();
self.0 = State::Pending(spawn_blocking(move || {
let remain = ReadDir::next_chunk(&mut buf, &mut std);
(buf, std, remain)
}));
}
State::Pending(ref mut rx) => {
self.0 = State::Idle(Some(ready!(Pin::new(rx).poll(cx))?));
}
}
}
}
fn next_chunk(buf: &mut VecDeque<io::Result<DirEntry>>, std: &mut std::fs::ReadDir) -> bool {
for _ in 0..CHUNK_SIZE {
let ret = match std.next() {
Some(ret) => ret,
None => return false,
};
let success = ret.is_ok();
buf.push_back(ret.map(|std| DirEntry {
#[cfg(not(any(
target_os = "solaris",
target_os = "illumos",
target_os = "haiku",
target_os = "vxworks",
target_os = "aix",
target_os = "nto",
target_os = "vita",
)))]
file_type: std.file_type().ok(),
std: Arc::new(std),
}));
if !success {
break;
}
}
true
}
}
feature! {
#![unix]
use std::os::unix::fs::DirEntryExt;
impl DirEntry {
/// Returns the underlying `d_ino` field in the contained `dirent`
/// structure.
///
/// # Examples
///
/// ```
/// use tokio::fs;
///
/// # #[tokio::main]
/// # async fn main() -> std::io::Result<()> {
/// let mut entries = fs::read_dir(".").await?;
/// while let Some(entry) = entries.next_entry().await? {
/// // Here, `entry` is a `DirEntry`.
/// println!("{:?}: {}", entry.file_name(), entry.ino());
/// }
/// # Ok(())
/// # }
/// ```
pub fn ino(&self) -> u64 {
self.as_inner().ino()
}
}
}
/// Entries returned by the [`ReadDir`] stream.
///
/// [`ReadDir`]: struct@ReadDir
///
/// This is a specialized version of [`std::fs::DirEntry`] for usage from the
/// Tokio runtime.
///
/// An instance of `DirEntry` represents an entry inside of a directory on the
/// filesystem. Each entry can be inspected via methods to learn about the full
/// path or possibly other metadata through per-platform extension traits.
#[derive(Debug)]
pub struct DirEntry {
#[cfg(not(any(
target_os = "solaris",
target_os = "illumos",
target_os = "haiku",
target_os = "vxworks",
target_os = "aix",
target_os = "nto",
target_os = "vita",
)))]
file_type: Option<FileType>,
std: Arc<std::fs::DirEntry>,
}
impl DirEntry {
/// Returns the full path to the file that this entry represents.
///
/// The full path is created by joining the original path to `read_dir`
/// with the filename of this entry.
///
/// # Examples
///
/// ```no_run
/// use tokio::fs;
///
/// # async fn dox() -> std::io::Result<()> {
/// let mut entries = fs::read_dir(".").await?;
///
/// while let Some(entry) = entries.next_entry().await? {
/// println!("{:?}", entry.path());
/// }
/// # Ok(())
/// # }
/// ```
///
/// This prints output like:
///
/// ```text
/// "./whatever.txt"
/// "./foo.html"
/// "./hello_world.rs"
/// ```
///
/// The exact text, of course, depends on what files you have in `.`.
pub fn path(&self) -> PathBuf {
self.std.path()
}
/// Returns the bare file name of this directory entry without any other
/// leading path component.
///
/// # Examples
///
/// ```
/// use tokio::fs;
///
/// # async fn dox() -> std::io::Result<()> {
/// let mut entries = fs::read_dir(".").await?;
///
/// while let Some(entry) = entries.next_entry().await? {
/// println!("{:?}", entry.file_name());
/// }
/// # Ok(())
/// # }
/// ```
pub fn file_name(&self) -> OsString {
self.std.file_name()
}
/// Returns the metadata for the file that this entry points at.
///
/// This function will not traverse symlinks if this entry points at a
/// symlink.
///
/// # Platform-specific behavior
///
/// On Windows this function is cheap to call (no extra system calls
/// needed), but on Unix platforms this function is the equivalent of
/// calling `symlink_metadata` on the path.
///
/// # Examples
///
/// ```
/// use tokio::fs;
///
/// # async fn dox() -> std::io::Result<()> {
/// let mut entries = fs::read_dir(".").await?;
///
/// while let Some(entry) = entries.next_entry().await? {
/// if let Ok(metadata) = entry.metadata().await {
/// // Now let's show our entry's permissions!
/// println!("{:?}: {:?}", entry.path(), metadata.permissions());
/// } else {
/// println!("Couldn't get file type for {:?}", entry.path());
/// }
/// }
/// # Ok(())
/// # }
/// ```
pub async fn metadata(&self) -> io::Result<Metadata> {
let std = self.std.clone();
asyncify(move || std.metadata()).await
}
/// Returns the file type for the file that this entry points at.
///
/// This function will not traverse symlinks if this entry points at a
/// symlink.
///
/// # Platform-specific behavior
///
/// On Windows and most Unix platforms this function is free (no extra
/// system calls needed), but some Unix platforms may require the equivalent
/// call to `symlink_metadata` to learn about the target file type.
///
/// # Examples
///
/// ```
/// use tokio::fs;
///
/// # async fn dox() -> std::io::Result<()> {
/// let mut entries = fs::read_dir(".").await?;
///
/// while let Some(entry) = entries.next_entry().await? {
/// if let Ok(file_type) = entry.file_type().await {
/// // Now let's show our entry's file type!
/// println!("{:?}: {:?}", entry.path(), file_type);
/// } else {
/// println!("Couldn't get file type for {:?}", entry.path());
/// }
/// }
/// # Ok(())
/// # }
/// ```
pub async fn file_type(&self) -> io::Result<FileType> {
#[cfg(not(any(
target_os = "solaris",
target_os = "illumos",
target_os = "haiku",
target_os = "vxworks",
target_os = "aix",
target_os = "nto",
target_os = "vita",
)))]
if let Some(file_type) = self.file_type {
return Ok(file_type);
}
let std = self.std.clone();
asyncify(move || std.file_type()).await
}
/// Returns a reference to the underlying `std::fs::DirEntry`.
#[cfg(unix)]
pub(super) fn as_inner(&self) -> &std::fs::DirEntry {
&self.std
}
}

12
vendor/tokio/src/fs/read_link.rs vendored Normal file
View File

@@ -0,0 +1,12 @@
use crate::fs::asyncify;
use std::io;
use std::path::{Path, PathBuf};
/// Reads a symbolic link, returning the file that the link points to.
///
/// This is an async version of [`std::fs::read_link`].
pub async fn read_link(path: impl AsRef<Path>) -> io::Result<PathBuf> {
let path = path.as_ref().to_owned();
asyncify(move || std::fs::read_link(path)).await
}

30
vendor/tokio/src/fs/read_to_string.rs vendored Normal file
View File

@@ -0,0 +1,30 @@
use crate::fs::asyncify;
use std::{io, path::Path};
/// Creates a future which will open a file for reading and read the entire
/// contents into a string and return said string.
///
/// This is the async equivalent of [`std::fs::read_to_string`][std].
///
/// This operation is implemented by running the equivalent blocking operation
/// on a separate thread pool using [`spawn_blocking`].
///
/// [`spawn_blocking`]: crate::task::spawn_blocking
/// [std]: fn@std::fs::read_to_string
///
/// # Examples
///
/// ```no_run
/// use tokio::fs;
///
/// # async fn dox() -> std::io::Result<()> {
/// let contents = fs::read_to_string("foo.txt").await?;
/// println!("foo.txt contains {} bytes", contents.len());
/// # Ok(())
/// # }
/// ```
pub async fn read_to_string(path: impl AsRef<Path>) -> io::Result<String> {
let path = path.as_ref().to_owned();
asyncify(move || std::fs::read_to_string(path)).await
}

134
vendor/tokio/src/fs/read_uring.rs vendored Normal file
View File

@@ -0,0 +1,134 @@
use crate::fs::OpenOptions;
use crate::runtime::driver::op::Op;
use std::io;
use std::io::ErrorKind;
use std::os::fd::OwnedFd;
use std::path::Path;
// this algorithm is inspired from rust std lib version 1.90.0
// https://doc.rust-lang.org/1.90.0/src/std/io/mod.rs.html#409
const PROBE_SIZE: usize = 32;
const PROBE_SIZE_U32: u32 = PROBE_SIZE as u32;
// Max bytes we can read using io uring submission at a time
// SAFETY: cannot be higher than u32::MAX for safe cast
// Set to read max 64 MiB at time
const MAX_READ_SIZE: usize = 64 * 1024 * 1024;
pub(crate) async fn read_uring(path: &Path) -> io::Result<Vec<u8>> {
let file = OpenOptions::new().read(true).open(path).await?;
// TODO: use io uring in the future to obtain metadata
let size_hint: Option<usize> = file.metadata().await.map(|m| m.len() as usize).ok();
let fd: OwnedFd = file
.try_into_std()
.expect("unexpected in-flight operation detected")
.into();
let mut buf = Vec::new();
if let Some(size_hint) = size_hint {
buf.try_reserve(size_hint)?;
}
read_to_end_uring(fd, buf).await
}
async fn read_to_end_uring(mut fd: OwnedFd, mut buf: Vec<u8>) -> io::Result<Vec<u8>> {
let mut offset = 0;
let start_cap = buf.capacity();
loop {
if buf.len() == buf.capacity() && buf.capacity() == start_cap && buf.len() >= PROBE_SIZE {
// The buffer might be an exact fit. Let's read into a probe buffer
// and see if it returns `Ok(0)`. If so, we've avoided an
// unnecessary increasing of the capacity. But if not, append the
// probe buffer to the primary buffer and let its capacity grow.
let (r_fd, r_buf, is_eof) = small_probe_read(fd, buf, &mut offset).await?;
if is_eof {
return Ok(r_buf);
}
buf = r_buf;
fd = r_fd;
}
// buf is full, need more capacity
if buf.len() == buf.capacity() {
buf.try_reserve(PROBE_SIZE)?;
}
// prepare the spare capacity to be read into
let buf_len = usize::min(buf.spare_capacity_mut().len(), MAX_READ_SIZE);
// buf_len cannot be greater than u32::MAX because MAX_READ_SIZE
// is less than u32::MAX
let read_len = u32::try_from(buf_len).expect("buf_len must always fit in u32");
// read into spare capacity
let (r_fd, r_buf, is_eof) = op_read(fd, buf, &mut offset, read_len).await?;
if is_eof {
return Ok(r_buf);
}
fd = r_fd;
buf = r_buf;
}
}
async fn small_probe_read(
fd: OwnedFd,
mut buf: Vec<u8>,
offset: &mut u64,
) -> io::Result<(OwnedFd, Vec<u8>, bool)> {
let read_len = PROBE_SIZE_U32;
let mut temp_arr = [0; PROBE_SIZE];
// we don't call this function if the buffer's length < PROBE_SIZE
let back_bytes_len = buf.len() - PROBE_SIZE;
temp_arr.copy_from_slice(&buf[back_bytes_len..]);
// We're decreasing the length of the buffer and len is greater
// than PROBE_SIZE. So we can read into the discarded length
buf.truncate(back_bytes_len);
let (r_fd, mut r_buf, is_eof) = op_read(fd, buf, offset, read_len).await?;
// If `size_read` returns zero due to reasons such as the buffer's exact fit,
// then this `try_reserve` does not perform allocation.
r_buf.try_reserve(PROBE_SIZE)?;
r_buf.splice(back_bytes_len..back_bytes_len, temp_arr);
Ok((r_fd, r_buf, is_eof))
}
// Takes a length to read and returns a single read in the buffer
//
// Returns the file descriptor, buffer and EOF reached or not
async fn op_read(
mut fd: OwnedFd,
mut buf: Vec<u8>,
offset: &mut u64,
read_len: u32,
) -> io::Result<(OwnedFd, Vec<u8>, bool)> {
loop {
let (res, r_fd, r_buf) = Op::read(fd, buf, read_len, *offset).await;
match res {
Err(e) if e.kind() == ErrorKind::Interrupted => {
buf = r_buf;
fd = r_fd;
}
Err(e) => return Err(e),
Ok(size_read) => {
*offset += size_read as u64;
return Ok((r_fd, r_buf, size_read == 0));
}
}
}
}

12
vendor/tokio/src/fs/remove_dir.rs vendored Normal file
View File

@@ -0,0 +1,12 @@
use crate::fs::asyncify;
use std::io;
use std::path::Path;
/// Removes an existing, empty directory.
///
/// This is an async version of [`std::fs::remove_dir`].
pub async fn remove_dir(path: impl AsRef<Path>) -> io::Result<()> {
let path = path.as_ref().to_owned();
asyncify(move || std::fs::remove_dir(path)).await
}

14
vendor/tokio/src/fs/remove_dir_all.rs vendored Normal file
View File

@@ -0,0 +1,14 @@
use crate::fs::asyncify;
use std::io;
use std::path::Path;
/// Removes a directory at this path, after removing all its contents. Use carefully!
///
/// This is an async version of [`std::fs::remove_dir_all`][std]
///
/// [std]: fn@std::fs::remove_dir_all
pub async fn remove_dir_all(path: impl AsRef<Path>) -> io::Result<()> {
let path = path.as_ref().to_owned();
asyncify(move || std::fs::remove_dir_all(path)).await
}

16
vendor/tokio/src/fs/remove_file.rs vendored Normal file
View File

@@ -0,0 +1,16 @@
use crate::fs::asyncify;
use std::io;
use std::path::Path;
/// Removes a file from the filesystem.
///
/// Note that there is no guarantee that the file is immediately deleted (e.g.
/// depending on platform, other open file descriptors may prevent immediate
/// removal).
///
/// This is an async version of [`std::fs::remove_file`].
pub async fn remove_file(path: impl AsRef<Path>) -> io::Result<()> {
let path = path.as_ref().to_owned();
asyncify(move || std::fs::remove_file(path)).await
}

17
vendor/tokio/src/fs/rename.rs vendored Normal file
View File

@@ -0,0 +1,17 @@
use crate::fs::asyncify;
use std::io;
use std::path::Path;
/// Renames a file or directory to a new name, replacing the original file if
/// `to` already exists.
///
/// This will not work if the new name is on a different mount point.
///
/// This is an async version of [`std::fs::rename`].
pub async fn rename(from: impl AsRef<Path>, to: impl AsRef<Path>) -> io::Result<()> {
let from = from.as_ref().to_owned();
let to = to.as_ref().to_owned();
asyncify(move || std::fs::rename(from, to)).await
}

15
vendor/tokio/src/fs/set_permissions.rs vendored Normal file
View File

@@ -0,0 +1,15 @@
use crate::fs::asyncify;
use std::fs::Permissions;
use std::io;
use std::path::Path;
/// Changes the permissions found on a file or a directory.
///
/// This is an async version of [`std::fs::set_permissions`][std]
///
/// [std]: fn@std::fs::set_permissions
pub async fn set_permissions(path: impl AsRef<Path>, perm: Permissions) -> io::Result<()> {
let path = path.as_ref().to_owned();
asyncify(|| std::fs::set_permissions(path, perm)).await
}

16
vendor/tokio/src/fs/symlink.rs vendored Normal file
View File

@@ -0,0 +1,16 @@
use crate::fs::asyncify;
use std::io;
use std::path::Path;
/// Creates a new symbolic link on the filesystem.
///
/// The `link` path will be a symbolic link pointing to the `original` path.
///
/// This is an async version of [`std::os::unix::fs::symlink`].
pub async fn symlink(original: impl AsRef<Path>, link: impl AsRef<Path>) -> io::Result<()> {
let original = original.as_ref().to_owned();
let link = link.as_ref().to_owned();
asyncify(move || std::os::unix::fs::symlink(original, link)).await
}

19
vendor/tokio/src/fs/symlink_dir.rs vendored Normal file
View File

@@ -0,0 +1,19 @@
use crate::fs::asyncify;
use std::io;
use std::path::Path;
/// Creates a new directory symlink on the filesystem.
///
/// The `link` path will be a directory symbolic link pointing to the `original`
/// path.
///
/// This is an async version of [`std::os::windows::fs::symlink_dir`][std]
///
/// [std]: https://doc.rust-lang.org/std/os/windows/fs/fn.symlink_dir.html
pub async fn symlink_dir(original: impl AsRef<Path>, link: impl AsRef<Path>) -> io::Result<()> {
let original = original.as_ref().to_owned();
let link = link.as_ref().to_owned();
asyncify(move || std::os::windows::fs::symlink_dir(original, link)).await
}

19
vendor/tokio/src/fs/symlink_file.rs vendored Normal file
View File

@@ -0,0 +1,19 @@
use crate::fs::asyncify;
use std::io;
use std::path::Path;
/// Creates a new file symbolic link on the filesystem.
///
/// The `link` path will be a file symbolic link pointing to the `original`
/// path.
///
/// This is an async version of [`std::os::windows::fs::symlink_file`][std]
///
/// [std]: https://doc.rust-lang.org/std/os/windows/fs/fn.symlink_file.html
pub async fn symlink_file(original: impl AsRef<Path>, link: impl AsRef<Path>) -> io::Result<()> {
let original = original.as_ref().to_owned();
let link = link.as_ref().to_owned();
asyncify(move || std::os::windows::fs::symlink_file(original, link)).await
}

15
vendor/tokio/src/fs/symlink_metadata.rs vendored Normal file
View File

@@ -0,0 +1,15 @@
use crate::fs::asyncify;
use std::fs::Metadata;
use std::io;
use std::path::Path;
/// Queries the file system metadata for a path.
///
/// This is an async version of [`std::fs::symlink_metadata`][std]
///
/// [std]: fn@std::fs::symlink_metadata
pub async fn symlink_metadata(path: impl AsRef<Path>) -> io::Result<Metadata> {
let path = path.as_ref().to_owned();
asyncify(|| std::fs::symlink_metadata(path)).await
}

28
vendor/tokio/src/fs/try_exists.rs vendored Normal file
View File

@@ -0,0 +1,28 @@
use crate::fs::asyncify;
use std::io;
use std::path::Path;
/// Returns `Ok(true)` if the path points at an existing entity.
///
/// This function will traverse symbolic links to query information about the
/// destination file. In case of broken symbolic links this will return `Ok(false)`.
///
/// This is the async equivalent of [`std::path::Path::try_exists`][std].
///
/// [std]: fn@std::path::Path::try_exists
///
/// # Examples
///
/// ```no_run
/// use tokio::fs;
///
/// # async fn dox() -> std::io::Result<()> {
/// fs::try_exists("foo.txt").await?;
/// # Ok(())
/// # }
/// ```
pub async fn try_exists(path: impl AsRef<Path>) -> io::Result<bool> {
let path = path.as_ref().to_owned();
asyncify(move || path.try_exists()).await
}

99
vendor/tokio/src/fs/write.rs vendored Normal file
View File

@@ -0,0 +1,99 @@
use crate::{fs::asyncify, util::as_ref::OwnedBuf};
use std::{io, path::Path};
/// Creates a future that will open a file for writing and write the entire
/// contents of `contents` to it.
///
/// This is the async equivalent of [`std::fs::write`][std].
///
/// This operation is implemented by running the equivalent blocking operation
/// on a separate thread pool using [`spawn_blocking`].
///
/// [`spawn_blocking`]: crate::task::spawn_blocking
/// [std]: fn@std::fs::write
///
/// # Examples
///
/// ```no_run
/// use tokio::fs;
///
/// # async fn dox() -> std::io::Result<()> {
/// fs::write("foo.txt", b"Hello world!").await?;
/// # Ok(())
/// # }
/// ```
pub async fn write(path: impl AsRef<Path>, contents: impl AsRef<[u8]>) -> io::Result<()> {
let path = path.as_ref();
let contents = crate::util::as_ref::upgrade(contents);
#[cfg(all(
tokio_unstable,
feature = "io-uring",
feature = "rt",
feature = "fs",
target_os = "linux"
))]
{
let handle = crate::runtime::Handle::current();
let driver_handle = handle.inner.driver().io();
if driver_handle
.check_and_init(io_uring::opcode::Write::CODE)
.await?
{
return write_uring(path, contents).await;
}
}
write_spawn_blocking(path, contents).await
}
#[cfg(all(
tokio_unstable,
feature = "io-uring",
feature = "rt",
feature = "fs",
target_os = "linux"
))]
async fn write_uring(path: &Path, mut buf: OwnedBuf) -> io::Result<()> {
use crate::{fs::OpenOptions, runtime::driver::op::Op};
use std::os::fd::OwnedFd;
let file = OpenOptions::new()
.write(true)
.create(true)
.truncate(true)
.open(path)
.await?;
let mut fd: OwnedFd = file
.try_into_std()
.expect("unexpected in-flight operation detected")
.into();
let total: usize = buf.as_ref().len();
let mut buf_offset: usize = 0;
let mut file_offset: u64 = 0;
while buf_offset < total {
let (res, _buf, _fd) = Op::write_at(fd, buf, buf_offset, file_offset)?.await;
let n = match res {
Ok(0) => return Err(io::ErrorKind::WriteZero.into()),
Ok(n) => n,
Err(e) if e.kind() == io::ErrorKind::Interrupted => 0,
Err(e) => return Err(e),
};
buf = _buf;
fd = _fd;
buf_offset += n as usize;
file_offset += n as u64;
}
Ok(())
}
async fn write_spawn_blocking(path: &Path, contents: OwnedBuf) -> io::Result<()> {
let path = path.to_owned();
asyncify(move || std::fs::write(path, contents)).await
}

22
vendor/tokio/src/future/block_on.rs vendored Normal file
View File

@@ -0,0 +1,22 @@
use std::future::Future;
cfg_rt! {
#[track_caller]
pub(crate) fn block_on<F: Future>(f: F) -> F::Output {
let mut e = crate::runtime::context::try_enter_blocking_region().expect(
"Cannot block the current thread from within a runtime. This \
happens because a function attempted to block the current \
thread while the thread is being used to drive asynchronous \
tasks."
);
e.block_on(f).unwrap()
}
}
cfg_not_rt! {
#[track_caller]
pub(crate) fn block_on<F: Future>(f: F) -> F::Output {
let mut park = crate::runtime::park::CachedParkThread::new();
park.block_on(f).unwrap()
}
}

123
vendor/tokio/src/future/maybe_done.rs vendored Normal file
View File

@@ -0,0 +1,123 @@
//! Definition of the [`MaybeDone`] combinator.
use pin_project_lite::pin_project;
use std::future::{Future, IntoFuture};
use std::pin::Pin;
use std::task::{ready, Context, Poll};
pin_project! {
/// A future that may have completed.
#[derive(Debug)]
#[project = MaybeDoneProj]
#[project_replace = MaybeDoneProjReplace]
#[repr(C)] // https://github.com/rust-lang/miri/issues/3780
pub enum MaybeDone<Fut: Future> {
/// A not-yet-completed future.
Future { #[pin] future: Fut },
/// The output of the completed future.
Done { output: Fut::Output },
/// The empty variant after the result of a [`MaybeDone`] has been
/// taken using the [`take_output`](MaybeDone::take_output) method.
Gone,
}
}
/// Wraps a future into a `MaybeDone`.
pub fn maybe_done<F: IntoFuture>(future: F) -> MaybeDone<F::IntoFuture> {
MaybeDone::Future {
future: future.into_future(),
}
}
impl<Fut: Future> MaybeDone<Fut> {
/// Returns an [`Option`] containing a mutable reference to the output of the future.
/// The output of this method will be [`Some`] if and only if the inner
/// future has been completed and [`take_output`](MaybeDone::take_output)
/// has not yet been called.
pub fn output_mut(self: Pin<&mut Self>) -> Option<&mut Fut::Output> {
match self.project() {
MaybeDoneProj::Done { output } => Some(output),
_ => None,
}
}
/// Attempts to take the output of a `MaybeDone` without driving it
/// towards completion.
#[inline]
pub fn take_output(self: Pin<&mut Self>) -> Option<Fut::Output> {
match *self {
MaybeDone::Done { .. } => {}
MaybeDone::Future { .. } | MaybeDone::Gone => return None,
};
if let MaybeDoneProjReplace::Done { output } = self.project_replace(MaybeDone::Gone) {
Some(output)
} else {
unreachable!()
}
}
}
impl<Fut: Future> Future for MaybeDone<Fut> {
type Output = ();
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let output = match self.as_mut().project() {
MaybeDoneProj::Future { future } => ready!(future.poll(cx)),
MaybeDoneProj::Done { .. } => return Poll::Ready(()),
MaybeDoneProj::Gone => panic!("MaybeDone polled after value taken"),
};
self.set(MaybeDone::Done { output });
Poll::Ready(())
}
}
// Test for https://github.com/tokio-rs/tokio/issues/6729
#[cfg(test)]
mod miri_tests {
use super::maybe_done;
use std::{
future::Future,
pin::Pin,
sync::Arc,
task::{Context, Poll, Wake},
};
struct ThingAdder<'a> {
thing: &'a mut String,
}
impl Future for ThingAdder<'_> {
type Output = ();
fn poll(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<Self::Output> {
unsafe {
*self.get_unchecked_mut().thing += ", world";
}
Poll::Pending
}
}
#[test]
fn maybe_done_miri() {
let mut thing = "hello".to_owned();
// The async block is necessary to trigger the miri failure.
#[allow(clippy::redundant_async_block)]
let fut = async move { ThingAdder { thing: &mut thing }.await };
let mut fut = maybe_done(fut);
let mut fut = unsafe { Pin::new_unchecked(&mut fut) };
let waker = Arc::new(DummyWaker).into();
let mut ctx = Context::from_waker(&waker);
assert_eq!(fut.as_mut().poll(&mut ctx), Poll::Pending);
assert_eq!(fut.as_mut().poll(&mut ctx), Poll::Pending);
}
struct DummyWaker;
impl Wake for DummyWaker {
fn wake(self: Arc<Self>) {}
}
}

28
vendor/tokio/src/future/mod.rs vendored Normal file
View File

@@ -0,0 +1,28 @@
#![cfg_attr(not(feature = "macros"), allow(unreachable_pub))]
//! Asynchronous values.
#[cfg(any(feature = "macros", feature = "process"))]
pub(crate) mod maybe_done;
cfg_process! {
mod try_join;
pub(crate) use try_join::try_join3;
}
cfg_sync! {
mod block_on;
pub(crate) use block_on::block_on;
}
cfg_trace! {
mod trace;
#[allow(unused_imports)]
pub(crate) use trace::InstrumentedFuture as Future;
}
cfg_not_trace! {
cfg_rt! {
pub(crate) use std::future::Future;
}
}

11
vendor/tokio/src/future/trace.rs vendored Normal file
View File

@@ -0,0 +1,11 @@
use std::future::Future;
pub(crate) trait InstrumentedFuture: Future {
fn id(&self) -> Option<tracing::Id>;
}
impl<F: Future> InstrumentedFuture for tracing::instrument::Instrumented<F> {
fn id(&self) -> Option<tracing::Id> {
self.span().id()
}
}

82
vendor/tokio/src/future/try_join.rs vendored Normal file
View File

@@ -0,0 +1,82 @@
use crate::future::maybe_done::{maybe_done, MaybeDone};
use pin_project_lite::pin_project;
use std::future::Future;
use std::pin::Pin;
use std::task::{Context, Poll};
pub(crate) fn try_join3<T1, F1, T2, F2, T3, F3, E>(
future1: F1,
future2: F2,
future3: F3,
) -> TryJoin3<F1, F2, F3>
where
F1: Future<Output = Result<T1, E>>,
F2: Future<Output = Result<T2, E>>,
F3: Future<Output = Result<T3, E>>,
{
TryJoin3 {
future1: maybe_done(future1),
future2: maybe_done(future2),
future3: maybe_done(future3),
}
}
pin_project! {
pub(crate) struct TryJoin3<F1, F2, F3>
where
F1: Future,
F2: Future,
F3: Future,
{
#[pin]
future1: MaybeDone<F1>,
#[pin]
future2: MaybeDone<F2>,
#[pin]
future3: MaybeDone<F3>,
}
}
impl<T1, F1, T2, F2, T3, F3, E> Future for TryJoin3<F1, F2, F3>
where
F1: Future<Output = Result<T1, E>>,
F2: Future<Output = Result<T2, E>>,
F3: Future<Output = Result<T3, E>>,
{
type Output = Result<(T1, T2, T3), E>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let mut all_done = true;
let mut me = self.project();
if me.future1.as_mut().poll(cx).is_pending() {
all_done = false;
} else if me.future1.as_mut().output_mut().unwrap().is_err() {
return Poll::Ready(Err(me.future1.take_output().unwrap().err().unwrap()));
}
if me.future2.as_mut().poll(cx).is_pending() {
all_done = false;
} else if me.future2.as_mut().output_mut().unwrap().is_err() {
return Poll::Ready(Err(me.future2.take_output().unwrap().err().unwrap()));
}
if me.future3.as_mut().poll(cx).is_pending() {
all_done = false;
} else if me.future3.as_mut().output_mut().unwrap().is_err() {
return Poll::Ready(Err(me.future3.take_output().unwrap().err().unwrap()));
}
if all_done {
Poll::Ready(Ok((
me.future1.take_output().unwrap().ok().unwrap(),
me.future2.take_output().unwrap().ok().unwrap(),
me.future3.take_output().unwrap().ok().unwrap(),
)))
} else {
Poll::Pending
}
}
}

1
vendor/tokio/src/fuzz.rs vendored Normal file
View File

@@ -0,0 +1 @@
pub use crate::util::linked_list::tests::fuzz_linked_list;

117
vendor/tokio/src/io/async_buf_read.rs vendored Normal file
View File

@@ -0,0 +1,117 @@
use crate::io::AsyncRead;
use std::io;
use std::ops::DerefMut;
use std::pin::Pin;
use std::task::{Context, Poll};
/// Reads bytes asynchronously.
///
/// This trait is analogous to [`std::io::BufRead`], but integrates with
/// the asynchronous task system. In particular, the [`poll_fill_buf`] method,
/// unlike [`BufRead::fill_buf`], will automatically queue the current task for wakeup
/// and return if data is not yet available, rather than blocking the calling
/// thread.
///
/// Utilities for working with `AsyncBufRead` values are provided by
/// [`AsyncBufReadExt`].
///
/// [`std::io::BufRead`]: std::io::BufRead
/// [`poll_fill_buf`]: AsyncBufRead::poll_fill_buf
/// [`BufRead::fill_buf`]: std::io::BufRead::fill_buf
/// [`AsyncBufReadExt`]: crate::io::AsyncBufReadExt
pub trait AsyncBufRead: AsyncRead {
/// Attempts to return the contents of the internal buffer, filling it with more data
/// from the inner reader if it is empty.
///
/// On success, returns `Poll::Ready(Ok(buf))`.
///
/// If no data is available for reading, the method returns
/// `Poll::Pending` and arranges for the current task (via
/// `cx.waker().wake_by_ref()`) to receive a notification when the object becomes
/// readable or is closed.
///
/// This function is a lower-level call. It needs to be paired with the
/// [`consume`] method to function properly. When calling this
/// method, none of the contents will be "read" in the sense that later
/// calling [`poll_read`] may return the same contents. As such, [`consume`] must
/// be called with the number of bytes that are consumed from this buffer to
/// ensure that the bytes are never returned twice.
///
/// An empty buffer returned indicates that the stream has reached EOF.
///
/// [`poll_read`]: AsyncRead::poll_read
/// [`consume`]: AsyncBufRead::consume
fn poll_fill_buf(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<&[u8]>>;
/// Tells this buffer that `amt` bytes have been consumed from the buffer,
/// so they should no longer be returned in calls to [`poll_read`].
///
/// This function is a lower-level call. It needs to be paired with the
/// [`poll_fill_buf`] method to function properly. This function does
/// not perform any I/O, it simply informs this object that some amount of
/// its buffer, returned from [`poll_fill_buf`], has been consumed and should
/// no longer be returned. As such, this function may do odd things if
/// [`poll_fill_buf`] isn't called before calling it.
///
/// The `amt` must be `<=` the number of bytes in the buffer returned by
/// [`poll_fill_buf`].
///
/// [`poll_read`]: AsyncRead::poll_read
/// [`poll_fill_buf`]: AsyncBufRead::poll_fill_buf
fn consume(self: Pin<&mut Self>, amt: usize);
}
macro_rules! deref_async_buf_read {
() => {
fn poll_fill_buf(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<&[u8]>> {
Pin::new(&mut **self.get_mut()).poll_fill_buf(cx)
}
fn consume(mut self: Pin<&mut Self>, amt: usize) {
Pin::new(&mut **self).consume(amt)
}
};
}
impl<T: ?Sized + AsyncBufRead + Unpin> AsyncBufRead for Box<T> {
deref_async_buf_read!();
}
impl<T: ?Sized + AsyncBufRead + Unpin> AsyncBufRead for &mut T {
deref_async_buf_read!();
}
impl<P> AsyncBufRead for Pin<P>
where
P: DerefMut,
P::Target: AsyncBufRead,
{
fn poll_fill_buf(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<&[u8]>> {
crate::util::pin_as_deref_mut(self).poll_fill_buf(cx)
}
fn consume(self: Pin<&mut Self>, amt: usize) {
crate::util::pin_as_deref_mut(self).consume(amt);
}
}
impl AsyncBufRead for &[u8] {
fn poll_fill_buf(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<io::Result<&[u8]>> {
Poll::Ready(Ok(*self))
}
fn consume(mut self: Pin<&mut Self>, amt: usize) {
*self = &self[amt..];
}
}
impl<T: AsRef<[u8]> + Unpin> AsyncBufRead for io::Cursor<T> {
fn poll_fill_buf(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<io::Result<&[u8]>> {
Poll::Ready(io::BufRead::fill_buf(self.get_mut()))
}
fn consume(self: Pin<&mut Self>, amt: usize) {
io::BufRead::consume(self.get_mut(), amt);
}
}

1439
vendor/tokio/src/io/async_fd.rs vendored Normal file

File diff suppressed because it is too large Load Diff

133
vendor/tokio/src/io/async_read.rs vendored Normal file
View File

@@ -0,0 +1,133 @@
use super::ReadBuf;
use std::io;
use std::ops::DerefMut;
use std::pin::Pin;
use std::task::{Context, Poll};
/// Reads bytes from a source.
///
/// This trait is analogous to the [`std::io::Read`] trait, but integrates with
/// the asynchronous task system. In particular, the [`poll_read`] method,
/// unlike [`Read::read`], will automatically queue the current task for wakeup
/// and return if data is not yet available, rather than blocking the calling
/// thread.
///
/// Specifically, this means that the `poll_read` function will return one of
/// the following:
///
/// * `Poll::Ready(Ok(()))` means that data was immediately read and placed into
/// the output buffer. The amount of data read can be determined by the
/// increase in the length of the slice returned by `ReadBuf::filled`. If the
/// difference is 0, either EOF has been reached, or the output buffer had zero
/// capacity (i.e. `buf.remaining()` == 0).
///
/// * `Poll::Pending` means that no data was read into the buffer
/// provided. The I/O object is not currently readable but may become readable
/// in the future. Most importantly, **the current future's task is scheduled
/// to get unparked when the object is readable**. This means that like
/// `Future::poll` you'll receive a notification when the I/O object is
/// readable again.
///
/// * `Poll::Ready(Err(e))` for other errors are standard I/O errors coming from the
/// underlying object.
///
/// This trait importantly means that the `read` method only works in the
/// context of a future's task. The object may panic if used outside of a task.
///
/// Utilities for working with `AsyncRead` values are provided by
/// [`AsyncReadExt`].
///
/// [`poll_read`]: AsyncRead::poll_read
/// [`std::io::Read`]: std::io::Read
/// [`Read::read`]: std::io::Read::read
/// [`AsyncReadExt`]: crate::io::AsyncReadExt
pub trait AsyncRead {
/// Attempts to read from the `AsyncRead` into `buf`.
///
/// On success, returns `Poll::Ready(Ok(()))` and places data in the
/// unfilled portion of `buf`. If no data was read (`buf.filled().len()` is
/// unchanged), it implies that EOF has been reached, or the output buffer
/// had zero capacity (i.e. `buf.remaining()` == 0).
///
/// If no data is available for reading, the method returns `Poll::Pending`
/// and arranges for the current task (via `cx.waker()`) to receive a
/// notification when the object becomes readable or is closed.
fn poll_read(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut ReadBuf<'_>,
) -> Poll<io::Result<()>>;
}
macro_rules! deref_async_read {
() => {
fn poll_read(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut ReadBuf<'_>,
) -> Poll<io::Result<()>> {
Pin::new(&mut **self).poll_read(cx, buf)
}
};
}
impl<T: ?Sized + AsyncRead + Unpin> AsyncRead for Box<T> {
deref_async_read!();
}
impl<T: ?Sized + AsyncRead + Unpin> AsyncRead for &mut T {
deref_async_read!();
}
impl<P> AsyncRead for Pin<P>
where
P: DerefMut,
P::Target: AsyncRead,
{
fn poll_read(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut ReadBuf<'_>,
) -> Poll<io::Result<()>> {
crate::util::pin_as_deref_mut(self).poll_read(cx, buf)
}
}
impl AsyncRead for &[u8] {
fn poll_read(
mut self: Pin<&mut Self>,
_cx: &mut Context<'_>,
buf: &mut ReadBuf<'_>,
) -> Poll<io::Result<()>> {
let amt = std::cmp::min(self.len(), buf.remaining());
let (a, b) = self.split_at(amt);
buf.put_slice(a);
*self = b;
Poll::Ready(Ok(()))
}
}
impl<T: AsRef<[u8]> + Unpin> AsyncRead for io::Cursor<T> {
fn poll_read(
mut self: Pin<&mut Self>,
_cx: &mut Context<'_>,
buf: &mut ReadBuf<'_>,
) -> Poll<io::Result<()>> {
let pos = self.position();
let slice: &[u8] = (*self).get_ref().as_ref();
// The position could technically be out of bounds, so don't panic...
if pos > slice.len() as u64 {
return Poll::Ready(Ok(()));
}
let start = pos as usize;
let amt = std::cmp::min(slice.len() - start, buf.remaining());
// Add won't overflow because of pos check above.
let end = start + amt;
buf.put_slice(&slice[start..end]);
self.set_position(end as u64);
Poll::Ready(Ok(()))
}
}

96
vendor/tokio/src/io/async_seek.rs vendored Normal file
View File

@@ -0,0 +1,96 @@
use std::io::{self, SeekFrom};
use std::ops::DerefMut;
use std::pin::Pin;
use std::task::{Context, Poll};
/// Seek bytes asynchronously.
///
/// This trait is analogous to the [`std::io::Seek`] trait, but integrates
/// with the asynchronous task system. In particular, the `start_seek`
/// method, unlike [`Seek::seek`], will not block the calling thread.
///
/// Utilities for working with `AsyncSeek` values are provided by
/// [`AsyncSeekExt`].
///
/// [`std::io::Seek`]: std::io::Seek
/// [`Seek::seek`]: std::io::Seek::seek()
/// [`AsyncSeekExt`]: crate::io::AsyncSeekExt
pub trait AsyncSeek {
/// Attempts to seek to an offset, in bytes, in a stream.
///
/// A seek beyond the end of a stream is allowed, but behavior is defined
/// by the implementation.
///
/// If this function returns successfully, then the job has been submitted.
/// To find out when it completes, call `poll_complete`.
///
/// # Errors
///
/// This function can return [`io::ErrorKind::Other`] in case there is
/// another seek in progress. To avoid this, it is advisable that any call
/// to `start_seek` is preceded by a call to `poll_complete` to ensure all
/// pending seeks have completed.
fn start_seek(self: Pin<&mut Self>, position: SeekFrom) -> io::Result<()>;
/// Waits for a seek operation to complete.
///
/// If the seek operation completed successfully, this method returns the
/// new position from the start of the stream. That position can be used
/// later with [`SeekFrom::Start`].
///
/// The position returned by calling this method can only be relied on right
/// after `start_seek`. If you have changed the position by e.g. reading or
/// writing since calling `start_seek`, then it is unspecified whether the
/// returned position takes that position change into account. Similarly, if
/// `start_seek` has never been called, then it is unspecified whether
/// `poll_complete` returns the actual position or some other placeholder
/// value (such as 0).
///
/// # Errors
///
/// Seeking to a negative offset is considered an error.
fn poll_complete(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<u64>>;
}
macro_rules! deref_async_seek {
() => {
fn start_seek(mut self: Pin<&mut Self>, pos: SeekFrom) -> io::Result<()> {
Pin::new(&mut **self).start_seek(pos)
}
fn poll_complete(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<u64>> {
Pin::new(&mut **self).poll_complete(cx)
}
};
}
impl<T: ?Sized + AsyncSeek + Unpin> AsyncSeek for Box<T> {
deref_async_seek!();
}
impl<T: ?Sized + AsyncSeek + Unpin> AsyncSeek for &mut T {
deref_async_seek!();
}
impl<P> AsyncSeek for Pin<P>
where
P: DerefMut,
P::Target: AsyncSeek,
{
fn start_seek(self: Pin<&mut Self>, pos: SeekFrom) -> io::Result<()> {
crate::util::pin_as_deref_mut(self).start_seek(pos)
}
fn poll_complete(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<u64>> {
crate::util::pin_as_deref_mut(self).poll_complete(cx)
}
}
impl<T: AsRef<[u8]> + Unpin> AsyncSeek for io::Cursor<T> {
fn start_seek(mut self: Pin<&mut Self>, pos: SeekFrom) -> io::Result<()> {
io::Seek::seek(&mut *self, pos).map(drop)
}
fn poll_complete(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<io::Result<u64>> {
Poll::Ready(Ok(self.get_mut().position()))
}
}

402
vendor/tokio/src/io/async_write.rs vendored Normal file
View File

@@ -0,0 +1,402 @@
use std::io::{self, IoSlice};
use std::ops::DerefMut;
use std::pin::Pin;
use std::task::{Context, Poll};
/// Writes bytes asynchronously.
///
/// This trait is analogous to the [`std::io::Write`] trait, but integrates with
/// the asynchronous task system. In particular, the [`poll_write`] method,
/// unlike [`Write::write`], will automatically queue the current task for wakeup
/// and return if data is not yet available, rather than blocking the calling
/// thread.
///
/// Specifically, this means that the [`poll_write`] function will return one of
/// the following:
///
/// * `Poll::Ready(Ok(n))` means that `n` bytes of data was immediately
/// written.
///
/// * `Poll::Pending` means that no data was written from the buffer
/// provided. The I/O object is not currently writable but may become writable
/// in the future. Most importantly, **the current future's task is scheduled
/// to get unparked when the object is writable**. This means that like
/// `Future::poll` you'll receive a notification when the I/O object is
/// writable again.
///
/// * `Poll::Ready(Err(e))` for other errors are standard I/O errors coming from the
/// underlying object.
///
/// Utilities for working with `AsyncWrite` values are provided by
/// [`AsyncWriteExt`]. Most users will interact with `AsyncWrite` types through
/// these extension methods, which provide ergonomic async functions such as
/// `write_all` and `flush`.
///
/// [`std::io::Write`]: std::io::Write
/// [`Write::write`]: std::io::Write::write()
/// [`poll_write`]: AsyncWrite::poll_write()
/// [`AsyncWriteExt`]: crate::io::AsyncWriteExt
pub trait AsyncWrite {
/// Attempt to write bytes from `buf` into the object.
///
/// On success, returns `Poll::Ready(Ok(num_bytes_written))`. If successful,
/// then it must be guaranteed that `n <= buf.len()`. A return value of `0`
/// typically means that the underlying object is no longer able to accept
/// bytes and will likely not be able to in the future as well, or that the
/// buffer provided is empty.
///
/// If the object is not ready for writing, the method returns
/// `Poll::Pending` and arranges for the current task (via
/// `cx.waker()`) to receive a notification when the object becomes
/// writable or is closed.
fn poll_write(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<io::Result<usize>>;
/// Attempts to flush the object, ensuring that any buffered data reach
/// their destination.
///
/// On success, returns `Poll::Ready(Ok(()))`.
///
/// If flushing cannot immediately complete, this method returns
/// `Poll::Pending` and arranges for the current task (via
/// `cx.waker()`) to receive a notification when the object can make
/// progress towards flushing.
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>>;
/// Initiates or attempts to shut down this writer, returning success when
/// the I/O connection has completely shut down.
///
/// This method is intended to be used for asynchronous shutdown of I/O
/// connections. For example this is suitable for implementing shutdown of a
/// TLS connection or calling `TcpStream::shutdown` on a proxied connection.
/// Protocols sometimes need to flush out final pieces of data or otherwise
/// perform a graceful shutdown handshake, reading/writing more data as
/// appropriate. This method is the hook for such protocols to implement the
/// graceful shutdown logic.
///
/// This `shutdown` method is required by implementers of the
/// `AsyncWrite` trait. Wrappers typically just want to proxy this call
/// through to the wrapped type, and base types will typically implement
/// shutdown logic here or just return `Ok(().into())`. Note that if you're
/// wrapping an underlying `AsyncWrite` a call to `shutdown` implies that
/// transitively the entire stream has been shut down. After your wrapper's
/// shutdown logic has been executed you should shut down the underlying
/// stream.
///
/// Invocation of a `shutdown` implies an invocation of `flush`. Once this
/// method returns `Ready` it implies that a flush successfully happened
/// before the shutdown happened. That is, callers don't need to call
/// `flush` before calling `shutdown`. They can rely that by calling
/// `shutdown` any pending buffered data will be written out.
///
/// # Return value
///
/// This function returns a `Poll<io::Result<()>>` classified as such:
///
/// * `Poll::Ready(Ok(()))` - indicates that the connection was
/// successfully shut down and is now safe to deallocate/drop/close
/// resources associated with it. This method means that the current task
/// will no longer receive any notifications due to this method and the
/// I/O object itself is likely no longer usable.
///
/// * `Poll::Pending` - indicates that shutdown is initiated but could
/// not complete just yet. This may mean that more I/O needs to happen to
/// continue this shutdown operation. The current task is scheduled to
/// receive a notification when it's otherwise ready to continue the
/// shutdown operation. When woken up this method should be called again.
///
/// * `Poll::Ready(Err(e))` - indicates a fatal error has happened with shutdown,
/// indicating that the shutdown operation did not complete successfully.
/// This typically means that the I/O object is no longer usable.
///
/// # Errors
///
/// This function can return normal I/O errors through `Err`, described
/// above. Additionally this method may also render the underlying
/// `Write::write` method no longer usable (e.g. will return errors in the
/// future). It's recommended that once `shutdown` is called the
/// `write` method is no longer called.
///
/// # Panics
///
/// This function will panic if not called within the context of a future's
/// task.
fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>>;
/// Like [`poll_write`], except that it writes from a slice of buffers.
///
/// Data is copied from each buffer in order, with the final buffer
/// read from possibly being only partially consumed. This method must
/// behave as a call to [`write`] with the buffers concatenated would.
///
/// The default implementation calls [`poll_write`] with either the first nonempty
/// buffer provided, or an empty one if none exists.
///
/// On success, returns `Poll::Ready(Ok(num_bytes_written))`.
///
/// If the object is not ready for writing, the method returns
/// `Poll::Pending` and arranges for the current task (via
/// `cx.waker()`) to receive a notification when the object becomes
/// writable or is closed.
///
/// # Note
///
/// This should be implemented as a single "atomic" write action. If any
/// data has been partially written, it is wrong to return an error or
/// pending.
///
/// [`poll_write`]: AsyncWrite::poll_write
fn poll_write_vectored(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
bufs: &[IoSlice<'_>],
) -> Poll<io::Result<usize>> {
let buf = bufs
.iter()
.find(|b| !b.is_empty())
.map_or(&[][..], |b| &**b);
self.poll_write(cx, buf)
}
/// Determines if this writer has an efficient [`poll_write_vectored`]
/// implementation.
///
/// If a writer does not override the default [`poll_write_vectored`]
/// implementation, code using it may want to avoid the method all together
/// and coalesce writes into a single buffer for higher performance.
///
/// The default implementation returns `false`.
///
/// [`poll_write_vectored`]: AsyncWrite::poll_write_vectored
fn is_write_vectored(&self) -> bool {
false
}
}
macro_rules! deref_async_write {
() => {
fn poll_write(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<io::Result<usize>> {
Pin::new(&mut **self).poll_write(cx, buf)
}
fn poll_write_vectored(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
bufs: &[IoSlice<'_>],
) -> Poll<io::Result<usize>> {
Pin::new(&mut **self).poll_write_vectored(cx, bufs)
}
fn is_write_vectored(&self) -> bool {
(**self).is_write_vectored()
}
fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
Pin::new(&mut **self).poll_flush(cx)
}
fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
Pin::new(&mut **self).poll_shutdown(cx)
}
};
}
impl<T: ?Sized + AsyncWrite + Unpin> AsyncWrite for Box<T> {
deref_async_write!();
}
impl<T: ?Sized + AsyncWrite + Unpin> AsyncWrite for &mut T {
deref_async_write!();
}
impl<P> AsyncWrite for Pin<P>
where
P: DerefMut,
P::Target: AsyncWrite,
{
fn poll_write(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<io::Result<usize>> {
crate::util::pin_as_deref_mut(self).poll_write(cx, buf)
}
fn poll_write_vectored(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
bufs: &[IoSlice<'_>],
) -> Poll<io::Result<usize>> {
crate::util::pin_as_deref_mut(self).poll_write_vectored(cx, bufs)
}
fn is_write_vectored(&self) -> bool {
(**self).is_write_vectored()
}
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
crate::util::pin_as_deref_mut(self).poll_flush(cx)
}
fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
crate::util::pin_as_deref_mut(self).poll_shutdown(cx)
}
}
impl AsyncWrite for Vec<u8> {
fn poll_write(
self: Pin<&mut Self>,
_cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<io::Result<usize>> {
self.get_mut().extend_from_slice(buf);
Poll::Ready(Ok(buf.len()))
}
fn poll_write_vectored(
mut self: Pin<&mut Self>,
_: &mut Context<'_>,
bufs: &[IoSlice<'_>],
) -> Poll<io::Result<usize>> {
Poll::Ready(io::Write::write_vectored(&mut *self, bufs))
}
fn is_write_vectored(&self) -> bool {
true
}
fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<io::Result<()>> {
Poll::Ready(Ok(()))
}
fn poll_shutdown(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<io::Result<()>> {
Poll::Ready(Ok(()))
}
}
impl AsyncWrite for io::Cursor<&mut [u8]> {
fn poll_write(
mut self: Pin<&mut Self>,
_: &mut Context<'_>,
buf: &[u8],
) -> Poll<io::Result<usize>> {
Poll::Ready(io::Write::write(&mut *self, buf))
}
fn poll_write_vectored(
mut self: Pin<&mut Self>,
_: &mut Context<'_>,
bufs: &[IoSlice<'_>],
) -> Poll<io::Result<usize>> {
Poll::Ready(io::Write::write_vectored(&mut *self, bufs))
}
fn is_write_vectored(&self) -> bool {
true
}
fn poll_flush(mut self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<io::Result<()>> {
Poll::Ready(io::Write::flush(&mut *self))
}
fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
self.poll_flush(cx)
}
}
impl AsyncWrite for io::Cursor<&mut Vec<u8>> {
fn poll_write(
mut self: Pin<&mut Self>,
_: &mut Context<'_>,
buf: &[u8],
) -> Poll<io::Result<usize>> {
Poll::Ready(io::Write::write(&mut *self, buf))
}
fn poll_write_vectored(
mut self: Pin<&mut Self>,
_: &mut Context<'_>,
bufs: &[IoSlice<'_>],
) -> Poll<io::Result<usize>> {
Poll::Ready(io::Write::write_vectored(&mut *self, bufs))
}
fn is_write_vectored(&self) -> bool {
true
}
fn poll_flush(mut self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<io::Result<()>> {
Poll::Ready(io::Write::flush(&mut *self))
}
fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
self.poll_flush(cx)
}
}
impl AsyncWrite for io::Cursor<Vec<u8>> {
fn poll_write(
mut self: Pin<&mut Self>,
_: &mut Context<'_>,
buf: &[u8],
) -> Poll<io::Result<usize>> {
Poll::Ready(io::Write::write(&mut *self, buf))
}
fn poll_write_vectored(
mut self: Pin<&mut Self>,
_: &mut Context<'_>,
bufs: &[IoSlice<'_>],
) -> Poll<io::Result<usize>> {
Poll::Ready(io::Write::write_vectored(&mut *self, bufs))
}
fn is_write_vectored(&self) -> bool {
true
}
fn poll_flush(mut self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<io::Result<()>> {
Poll::Ready(io::Write::flush(&mut *self))
}
fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
self.poll_flush(cx)
}
}
impl AsyncWrite for io::Cursor<Box<[u8]>> {
fn poll_write(
mut self: Pin<&mut Self>,
_: &mut Context<'_>,
buf: &[u8],
) -> Poll<io::Result<usize>> {
Poll::Ready(io::Write::write(&mut *self, buf))
}
fn poll_write_vectored(
mut self: Pin<&mut Self>,
_: &mut Context<'_>,
bufs: &[IoSlice<'_>],
) -> Poll<io::Result<usize>> {
Poll::Ready(io::Write::write_vectored(&mut *self, bufs))
}
fn is_write_vectored(&self) -> bool {
true
}
fn poll_flush(mut self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<io::Result<()>> {
Poll::Ready(io::Write::flush(&mut *self))
}
fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
self.poll_flush(cx)
}
}

306
vendor/tokio/src/io/blocking.rs vendored Normal file
View File

@@ -0,0 +1,306 @@
use crate::io::sys;
use crate::io::{AsyncRead, AsyncWrite, ReadBuf};
use std::cmp;
use std::future::Future;
use std::io;
use std::io::prelude::*;
use std::mem::MaybeUninit;
use std::pin::Pin;
use std::task::{ready, Context, Poll};
/// `T` should not implement _both_ Read and Write.
#[derive(Debug)]
pub(crate) struct Blocking<T> {
inner: Option<T>,
state: State<T>,
/// `true` if the lower IO layer needs flushing.
need_flush: bool,
}
#[derive(Debug)]
pub(crate) struct Buf {
buf: Vec<u8>,
pos: usize,
}
pub(crate) const DEFAULT_MAX_BUF_SIZE: usize = 2 * 1024 * 1024;
#[derive(Debug)]
enum State<T> {
Idle(Option<Buf>),
Busy(sys::Blocking<(io::Result<usize>, Buf, T)>),
}
cfg_io_blocking! {
impl<T> Blocking<T> {
/// # Safety
///
/// The `Read` implementation of `inner` must never read from the buffer
/// it is borrowing and must correctly report the length of the data
/// written into the buffer.
#[cfg_attr(feature = "fs", allow(dead_code))]
pub(crate) unsafe fn new(inner: T) -> Blocking<T> {
Blocking {
inner: Some(inner),
state: State::Idle(Some(Buf::with_capacity(0))),
need_flush: false,
}
}
}
}
impl<T> AsyncRead for Blocking<T>
where
T: Read + Unpin + Send + 'static,
{
fn poll_read(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
dst: &mut ReadBuf<'_>,
) -> Poll<io::Result<()>> {
loop {
match self.state {
State::Idle(ref mut buf_cell) => {
let mut buf = buf_cell.take().unwrap();
if !buf.is_empty() {
buf.copy_to(dst);
*buf_cell = Some(buf);
return Poll::Ready(Ok(()));
}
let mut inner = self.inner.take().unwrap();
let max_buf_size = cmp::min(dst.remaining(), DEFAULT_MAX_BUF_SIZE);
self.state = State::Busy(sys::run(move || {
// SAFETY: the requirements are satisfied by `Blocking::new`.
let res = unsafe { buf.read_from(&mut inner, max_buf_size) };
(res, buf, inner)
}));
}
State::Busy(ref mut rx) => {
let (res, mut buf, inner) = ready!(Pin::new(rx).poll(cx))?;
self.inner = Some(inner);
match res {
Ok(_) => {
buf.copy_to(dst);
self.state = State::Idle(Some(buf));
return Poll::Ready(Ok(()));
}
Err(e) => {
assert!(buf.is_empty());
self.state = State::Idle(Some(buf));
return Poll::Ready(Err(e));
}
}
}
}
}
}
}
impl<T> AsyncWrite for Blocking<T>
where
T: Write + Unpin + Send + 'static,
{
fn poll_write(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
src: &[u8],
) -> Poll<io::Result<usize>> {
loop {
match self.state {
State::Idle(ref mut buf_cell) => {
let mut buf = buf_cell.take().unwrap();
assert!(buf.is_empty());
let n = buf.copy_from(src, DEFAULT_MAX_BUF_SIZE);
let mut inner = self.inner.take().unwrap();
self.state = State::Busy(sys::run(move || {
let n = buf.len();
let res = buf.write_to(&mut inner).map(|()| n);
(res, buf, inner)
}));
self.need_flush = true;
return Poll::Ready(Ok(n));
}
State::Busy(ref mut rx) => {
let (res, buf, inner) = ready!(Pin::new(rx).poll(cx))?;
self.state = State::Idle(Some(buf));
self.inner = Some(inner);
// If error, return
res?;
}
}
}
}
fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> {
loop {
let need_flush = self.need_flush;
match self.state {
// The buffer is not used here
State::Idle(ref mut buf_cell) => {
if need_flush {
let buf = buf_cell.take().unwrap();
let mut inner = self.inner.take().unwrap();
self.state = State::Busy(sys::run(move || {
let res = inner.flush().map(|()| 0);
(res, buf, inner)
}));
self.need_flush = false;
} else {
return Poll::Ready(Ok(()));
}
}
State::Busy(ref mut rx) => {
let (res, buf, inner) = ready!(Pin::new(rx).poll(cx))?;
self.state = State::Idle(Some(buf));
self.inner = Some(inner);
// If error, return
res?;
}
}
}
}
fn poll_shutdown(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> {
Poll::Ready(Ok(()))
}
}
/// Repeats operations that are interrupted.
macro_rules! uninterruptibly {
($e:expr) => {{
loop {
match $e {
Err(ref e) if e.kind() == io::ErrorKind::Interrupted => {}
res => break res,
}
}
}};
}
impl Buf {
pub(crate) fn with_capacity(n: usize) -> Buf {
Buf {
buf: Vec::with_capacity(n),
pos: 0,
}
}
pub(crate) fn is_empty(&self) -> bool {
self.len() == 0
}
pub(crate) fn len(&self) -> usize {
self.buf.len() - self.pos
}
pub(crate) fn copy_to(&mut self, dst: &mut ReadBuf<'_>) -> usize {
let n = cmp::min(self.len(), dst.remaining());
dst.put_slice(&self.bytes()[..n]);
self.pos += n;
if self.pos == self.buf.len() {
self.buf.truncate(0);
self.pos = 0;
}
n
}
pub(crate) fn copy_from(&mut self, src: &[u8], max_buf_size: usize) -> usize {
assert!(self.is_empty());
let n = cmp::min(src.len(), max_buf_size);
self.buf.extend_from_slice(&src[..n]);
n
}
pub(crate) fn bytes(&self) -> &[u8] {
&self.buf[self.pos..]
}
/// # Safety
///
/// `rd` must not read from the buffer `read` is borrowing and must correctly
/// report the length of the data written into the buffer.
pub(crate) unsafe fn read_from<T: Read>(
&mut self,
rd: &mut T,
max_buf_size: usize,
) -> io::Result<usize> {
assert!(self.is_empty());
self.buf.reserve(max_buf_size);
let buf = &mut self.buf.spare_capacity_mut()[..max_buf_size];
// SAFETY: The memory may be uninitialized, but `rd.read` will only write to the buffer.
let buf = unsafe { &mut *(buf as *mut [MaybeUninit<u8>] as *mut [u8]) };
let res = uninterruptibly!(rd.read(buf));
if let Ok(n) = res {
// SAFETY: the caller promises that `rd.read` initializes
// a section of `buf` and correctly reports that length.
// The `self.is_empty()` assertion verifies that `n`
// equals the length of the `buf` capacity that was written
// to (and that `buf` isn't being shrunk).
unsafe { self.buf.set_len(n) }
} else {
self.buf.clear();
}
assert_eq!(self.pos, 0);
res
}
pub(crate) fn write_to<T: Write>(&mut self, wr: &mut T) -> io::Result<()> {
assert_eq!(self.pos, 0);
// `write_all` already ignores interrupts
let res = wr.write_all(&self.buf);
self.buf.clear();
res
}
}
cfg_fs! {
impl Buf {
pub(crate) fn discard_read(&mut self) -> i64 {
let ret = -(self.bytes().len() as i64);
self.pos = 0;
self.buf.truncate(0);
ret
}
pub(crate) fn copy_from_bufs(&mut self, bufs: &[io::IoSlice<'_>], max_buf_size: usize) -> usize {
assert!(self.is_empty());
let mut rem = max_buf_size;
for buf in bufs {
if rem == 0 {
break
}
let len = buf.len().min(rem);
self.buf.extend_from_slice(&buf[..len]);
rem -= len;
}
max_buf_size - rem
}
}
}

197
vendor/tokio/src/io/bsd/poll_aio.rs vendored Normal file
View File

@@ -0,0 +1,197 @@
//! Use POSIX AIO futures with Tokio.
use crate::io::interest::Interest;
use crate::runtime::io::{ReadyEvent, Registration};
use crate::runtime::scheduler;
use mio::event::Source;
use mio::Registry;
use mio::Token;
use std::fmt;
use std::io;
use std::ops::{Deref, DerefMut};
use std::os::unix::io::AsRawFd;
use std::os::unix::prelude::RawFd;
use std::task::{ready, Context, Poll};
/// Like [`mio::event::Source`], but for POSIX AIO only.
///
/// Tokio's consumer must pass an implementor of this trait to create a
/// [`Aio`] object.
pub trait AioSource {
/// Registers this AIO event source with Tokio's reactor.
fn register(&mut self, kq: RawFd, token: usize);
/// Deregisters this AIO event source with Tokio's reactor.
fn deregister(&mut self);
}
/// Wraps the user's AioSource in order to implement mio::event::Source, which
/// is what the rest of the crate wants.
struct MioSource<T>(T);
impl<T: AioSource> Source for MioSource<T> {
fn register(
&mut self,
registry: &Registry,
token: Token,
interests: mio::Interest,
) -> io::Result<()> {
assert!(interests.is_aio() || interests.is_lio());
self.0.register(registry.as_raw_fd(), usize::from(token));
Ok(())
}
fn deregister(&mut self, _registry: &Registry) -> io::Result<()> {
self.0.deregister();
Ok(())
}
fn reregister(
&mut self,
registry: &Registry,
token: Token,
interests: mio::Interest,
) -> io::Result<()> {
assert!(interests.is_aio() || interests.is_lio());
self.0.register(registry.as_raw_fd(), usize::from(token));
Ok(())
}
}
/// Associates a POSIX AIO control block with the reactor that drives it.
///
/// `Aio`'s wrapped type must implement [`AioSource`] to be driven
/// by the reactor.
///
/// The wrapped source may be accessed through the `Aio` via the `Deref` and
/// `DerefMut` traits.
///
/// ## Clearing readiness
///
/// If [`Aio::poll_ready`] returns ready, but the consumer determines that the
/// Source is not completely ready and must return to the Pending state,
/// [`Aio::clear_ready`] may be used. This can be useful with
/// [`lio_listio`], which may generate a kevent when only a portion of the
/// operations have completed.
///
/// ## Platforms
///
/// Only FreeBSD implements POSIX AIO with kqueue notification, so
/// `Aio` is only available for that operating system.
///
/// [`lio_listio`]: https://pubs.opengroup.org/onlinepubs/9699919799/functions/lio_listio.html
// Note: Unlike every other kqueue event source, POSIX AIO registers events not
// via kevent(2) but when the aiocb is submitted to the kernel via aio_read,
// aio_write, etc. It needs the kqueue's file descriptor to do that. So
// AsyncFd can't be used for POSIX AIO.
//
// Note that Aio doesn't implement Drop. There's no need. Unlike other
// kqueue sources, simply dropping the object effectively deregisters it.
pub struct Aio<E> {
io: MioSource<E>,
registration: Registration,
}
// ===== impl Aio =====
impl<E: AioSource> Aio<E> {
/// Creates a new `Aio` suitable for use with POSIX AIO functions.
///
/// It will be associated with the default reactor. The runtime is usually
/// set implicitly when this function is called from a future driven by a
/// Tokio runtime, otherwise runtime can be set explicitly with
/// [`Runtime::enter`](crate::runtime::Runtime::enter) function.
pub fn new_for_aio(io: E) -> io::Result<Self> {
Self::new_with_interest(io, Interest::AIO)
}
/// Creates a new `Aio` suitable for use with [`lio_listio`].
///
/// It will be associated with the default reactor. The runtime is usually
/// set implicitly when this function is called from a future driven by a
/// Tokio runtime, otherwise runtime can be set explicitly with
/// [`Runtime::enter`](crate::runtime::Runtime::enter) function.
///
/// [`lio_listio`]: https://pubs.opengroup.org/onlinepubs/9699919799/functions/lio_listio.html
pub fn new_for_lio(io: E) -> io::Result<Self> {
Self::new_with_interest(io, Interest::LIO)
}
fn new_with_interest(io: E, interest: Interest) -> io::Result<Self> {
let mut io = MioSource(io);
let handle = scheduler::Handle::current();
let registration = Registration::new_with_interest_and_handle(&mut io, interest, handle)?;
Ok(Self { io, registration })
}
/// Indicates to Tokio that the source is no longer ready. The internal
/// readiness flag will be cleared, and tokio will wait for the next
/// edge-triggered readiness notification from the OS.
///
/// It is critical that this method not be called unless your code
/// _actually observes_ that the source is _not_ ready. The OS must
/// deliver a subsequent notification, or this source will block
/// forever. It is equally critical that you `do` call this method if you
/// resubmit the same structure to the kernel and poll it again.
///
/// This method is not very useful with AIO readiness, since each `aiocb`
/// structure is typically only used once. It's main use with
/// [`lio_listio`], which will sometimes send notification when only a
/// portion of its elements are complete. In that case, the caller must
/// call `clear_ready` before resubmitting it.
///
/// [`lio_listio`]: https://pubs.opengroup.org/onlinepubs/9699919799/functions/lio_listio.html
pub fn clear_ready(&self, ev: AioEvent) {
self.registration.clear_readiness(ev.0)
}
/// Destroy the [`Aio`] and return its inner source.
pub fn into_inner(self) -> E {
self.io.0
}
/// Polls for readiness. Either AIO or LIO counts.
///
/// This method returns:
/// * `Poll::Pending` if the underlying operation is not complete, whether
/// or not it completed successfully. This will be true if the OS is
/// still processing it, or if it has not yet been submitted to the OS.
/// * `Poll::Ready(Ok(_))` if the underlying operation is complete.
/// * `Poll::Ready(Err(_))` if the reactor has been shutdown. This does
/// _not_ indicate that the underlying operation encountered an error.
///
/// When the method returns `Poll::Pending`, the `Waker` in the provided `Context`
/// is scheduled to receive a wakeup when the underlying operation
/// completes. Note that on multiple calls to `poll_ready`, only the `Waker` from the
/// `Context` passed to the most recent call is scheduled to receive a wakeup.
pub fn poll_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<AioEvent>> {
let ev = ready!(self.registration.poll_read_ready(cx))?;
Poll::Ready(Ok(AioEvent(ev)))
}
}
impl<E: AioSource> Deref for Aio<E> {
type Target = E;
fn deref(&self) -> &E {
&self.io.0
}
}
impl<E: AioSource> DerefMut for Aio<E> {
fn deref_mut(&mut self) -> &mut E {
&mut self.io.0
}
}
impl<E: AioSource + fmt::Debug> fmt::Debug for Aio<E> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Aio").field("io", &self.io.0).finish()
}
}
/// Opaque data returned by [`Aio::poll_ready`].
///
/// It can be fed back to [`Aio::clear_ready`].
#[derive(Debug)]
pub struct AioEvent(ReadyEvent);

345
vendor/tokio/src/io/interest.rs vendored Normal file
View File

@@ -0,0 +1,345 @@
#![cfg_attr(not(feature = "net"), allow(dead_code, unreachable_pub))]
use crate::io::ready::Ready;
use std::fmt;
use std::ops;
// These must be unique.
// same as mio
const READABLE: usize = 0b0001;
const WRITABLE: usize = 0b0010;
// The following are not available on all platforms.
#[cfg(target_os = "freebsd")]
const AIO: usize = 0b0100;
#[cfg(target_os = "freebsd")]
const LIO: usize = 0b1000;
#[cfg(any(target_os = "linux", target_os = "android"))]
const PRIORITY: usize = 0b0001_0000;
// error is available on all platforms, but behavior is platform-specific
// mio does not have this interest
const ERROR: usize = 0b0010_0000;
/// Readiness event interest.
///
/// Specifies the readiness events the caller is interested in when awaiting on
/// I/O resource readiness states.
#[cfg_attr(docsrs, doc(cfg(feature = "net")))]
#[derive(Clone, Copy, Eq, PartialEq)]
pub struct Interest(usize);
impl Interest {
// The non-FreeBSD definitions in this block are active only when
// building documentation.
cfg_aio! {
/// Interest for POSIX AIO.
#[cfg(target_os = "freebsd")]
pub const AIO: Interest = Interest(AIO);
/// Interest for POSIX AIO.
#[cfg(not(target_os = "freebsd"))]
pub const AIO: Interest = Interest(READABLE);
/// Interest for POSIX AIO `lio_listio` events.
#[cfg(target_os = "freebsd")]
pub const LIO: Interest = Interest(LIO);
/// Interest for POSIX AIO `lio_listio` events.
#[cfg(not(target_os = "freebsd"))]
pub const LIO: Interest = Interest(READABLE);
}
/// Interest in all readable events.
///
/// Readable interest includes read-closed events.
pub const READABLE: Interest = Interest(READABLE);
/// Interest in all writable events.
///
/// Writable interest includes write-closed events.
pub const WRITABLE: Interest = Interest(WRITABLE);
/// Interest in error events.
///
/// Passes error interest to the underlying OS selector.
/// Behavior is platform-specific, read your platform's documentation.
pub const ERROR: Interest = Interest(ERROR);
/// Returns a `Interest` set representing priority completion interests.
#[cfg(any(target_os = "linux", target_os = "android"))]
#[cfg_attr(docsrs, doc(cfg(any(target_os = "linux", target_os = "android"))))]
pub const PRIORITY: Interest = Interest(PRIORITY);
/// Returns true if the value includes readable interest.
///
/// # Examples
///
/// ```
/// use tokio::io::Interest;
///
/// assert!(Interest::READABLE.is_readable());
/// assert!(!Interest::WRITABLE.is_readable());
///
/// let both = Interest::READABLE | Interest::WRITABLE;
/// assert!(both.is_readable());
/// ```
pub const fn is_readable(self) -> bool {
self.0 & READABLE != 0
}
/// Returns true if the value includes writable interest.
///
/// # Examples
///
/// ```
/// use tokio::io::Interest;
///
/// assert!(!Interest::READABLE.is_writable());
/// assert!(Interest::WRITABLE.is_writable());
///
/// let both = Interest::READABLE | Interest::WRITABLE;
/// assert!(both.is_writable());
/// ```
pub const fn is_writable(self) -> bool {
self.0 & WRITABLE != 0
}
/// Returns true if the value includes error interest.
///
/// # Examples
///
/// ```
/// use tokio::io::Interest;
///
/// assert!(Interest::ERROR.is_error());
/// assert!(!Interest::WRITABLE.is_error());
///
/// let combined = Interest::READABLE | Interest::ERROR;
/// assert!(combined.is_error());
/// ```
pub const fn is_error(self) -> bool {
self.0 & ERROR != 0
}
#[cfg(target_os = "freebsd")]
const fn is_aio(self) -> bool {
self.0 & AIO != 0
}
#[cfg(target_os = "freebsd")]
const fn is_lio(self) -> bool {
self.0 & LIO != 0
}
/// Returns true if the value includes priority interest.
///
/// # Examples
///
/// ```
/// use tokio::io::Interest;
///
/// assert!(!Interest::READABLE.is_priority());
/// assert!(Interest::PRIORITY.is_priority());
///
/// let both = Interest::READABLE | Interest::PRIORITY;
/// assert!(both.is_priority());
/// ```
#[cfg(any(target_os = "linux", target_os = "android"))]
#[cfg_attr(docsrs, doc(cfg(any(target_os = "linux", target_os = "android"))))]
pub const fn is_priority(self) -> bool {
self.0 & PRIORITY != 0
}
/// Add together two `Interest` values.
///
/// This function works from a `const` context.
///
/// # Examples
///
/// ```
/// use tokio::io::Interest;
///
/// const BOTH: Interest = Interest::READABLE.add(Interest::WRITABLE);
///
/// assert!(BOTH.is_readable());
/// assert!(BOTH.is_writable());
#[must_use = "this returns the result of the operation, without modifying the original"]
pub const fn add(self, other: Interest) -> Interest {
Self(self.0 | other.0)
}
/// Remove `Interest` from `self`.
///
/// Interests present in `other` but *not* in `self` are ignored.
///
/// Returns `None` if the set would be empty after removing `Interest`.
///
/// # Examples
///
/// ```
/// use tokio::io::Interest;
///
/// const RW_INTEREST: Interest = Interest::READABLE.add(Interest::WRITABLE);
///
/// let w_interest = RW_INTEREST.remove(Interest::READABLE).unwrap();
/// assert!(!w_interest.is_readable());
/// assert!(w_interest.is_writable());
///
/// // Removing all interests from the set returns `None`.
/// assert_eq!(w_interest.remove(Interest::WRITABLE), None);
///
/// // Remove all interests at once.
/// assert_eq!(RW_INTEREST.remove(RW_INTEREST), None);
/// ```
#[must_use = "this returns the result of the operation, without modifying the original"]
pub fn remove(self, other: Interest) -> Option<Interest> {
let value = self.0 & !other.0;
if value != 0 {
Some(Self(value))
} else {
None
}
}
// This function must be crate-private to avoid exposing a `mio` dependency.
pub(crate) fn to_mio(self) -> mio::Interest {
fn mio_add(wrapped: &mut Option<mio::Interest>, add: mio::Interest) {
match wrapped {
Some(inner) => *inner |= add,
None => *wrapped = Some(add),
}
}
// mio does not allow and empty interest, so use None for empty
let mut mio = None;
if self.is_readable() {
mio_add(&mut mio, mio::Interest::READABLE);
}
if self.is_writable() {
mio_add(&mut mio, mio::Interest::WRITABLE);
}
#[cfg(any(target_os = "linux", target_os = "android"))]
if self.is_priority() {
mio_add(&mut mio, mio::Interest::PRIORITY);
}
#[cfg(target_os = "freebsd")]
if self.is_aio() {
mio_add(&mut mio, mio::Interest::AIO);
}
#[cfg(target_os = "freebsd")]
if self.is_lio() {
mio_add(&mut mio, mio::Interest::LIO);
}
if self.is_error() {
// There is no error interest in mio, because error events are always reported.
// But mio interests cannot be empty and an interest is needed just for the registration.
//
// read readiness is filtered out in `Interest::mask` or `Ready::from_interest` if
// the read interest was not specified by the user.
mio_add(&mut mio, mio::Interest::READABLE);
}
// the default `mio::Interest::READABLE` should never be used in practice. Either
//
// - at least one tokio interest with a mio counterpart was used
// - only the error tokio interest was specified
//
// in both cases, `mio` is Some already
mio.unwrap_or(mio::Interest::READABLE)
}
pub(crate) fn mask(self) -> Ready {
match self {
Interest::READABLE => Ready::READABLE | Ready::READ_CLOSED,
Interest::WRITABLE => Ready::WRITABLE | Ready::WRITE_CLOSED,
#[cfg(any(target_os = "linux", target_os = "android"))]
Interest::PRIORITY => Ready::PRIORITY | Ready::READ_CLOSED,
Interest::ERROR => Ready::ERROR,
_ => Ready::EMPTY,
}
}
}
impl ops::BitOr for Interest {
type Output = Self;
#[inline]
fn bitor(self, other: Self) -> Self {
self.add(other)
}
}
impl ops::BitOrAssign for Interest {
#[inline]
fn bitor_assign(&mut self, other: Self) {
*self = *self | other;
}
}
impl fmt::Debug for Interest {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
let mut separator = false;
if self.is_readable() {
if separator {
write!(fmt, " | ")?;
}
write!(fmt, "READABLE")?;
separator = true;
}
if self.is_writable() {
if separator {
write!(fmt, " | ")?;
}
write!(fmt, "WRITABLE")?;
separator = true;
}
#[cfg(any(target_os = "linux", target_os = "android"))]
if self.is_priority() {
if separator {
write!(fmt, " | ")?;
}
write!(fmt, "PRIORITY")?;
separator = true;
}
#[cfg(target_os = "freebsd")]
if self.is_aio() {
if separator {
write!(fmt, " | ")?;
}
write!(fmt, "AIO")?;
separator = true;
}
#[cfg(target_os = "freebsd")]
if self.is_lio() {
if separator {
write!(fmt, " | ")?;
}
write!(fmt, "LIO")?;
separator = true;
}
if self.is_error() {
if separator {
write!(fmt, " | ")?;
}
write!(fmt, "ERROR")?;
separator = true;
}
let _ = separator;
Ok(())
}
}

130
vendor/tokio/src/io/join.rs vendored Normal file
View File

@@ -0,0 +1,130 @@
//! Join two values implementing `AsyncRead` and `AsyncWrite` into a single one.
use crate::io::{AsyncBufRead, AsyncRead, AsyncWrite, ReadBuf};
use std::io;
use std::pin::Pin;
use std::task::{Context, Poll};
/// Join two values implementing `AsyncRead` and `AsyncWrite` into a
/// single handle.
pub fn join<R, W>(reader: R, writer: W) -> Join<R, W>
where
R: AsyncRead,
W: AsyncWrite,
{
Join { reader, writer }
}
pin_project_lite::pin_project! {
/// Joins two values implementing `AsyncRead` and `AsyncWrite` into a
/// single handle.
#[derive(Debug)]
pub struct Join<R, W> {
#[pin]
reader: R,
#[pin]
writer: W,
}
}
impl<R, W> Join<R, W>
where
R: AsyncRead,
W: AsyncWrite,
{
/// Splits this `Join` back into its `AsyncRead` and `AsyncWrite`
/// components.
pub fn into_inner(self) -> (R, W) {
(self.reader, self.writer)
}
/// Returns a reference to the inner reader.
pub fn reader(&self) -> &R {
&self.reader
}
/// Returns a reference to the inner writer.
pub fn writer(&self) -> &W {
&self.writer
}
/// Returns a mutable reference to the inner reader.
pub fn reader_mut(&mut self) -> &mut R {
&mut self.reader
}
/// Returns a mutable reference to the inner writer.
pub fn writer_mut(&mut self) -> &mut W {
&mut self.writer
}
/// Returns a pinned mutable reference to the inner reader.
pub fn reader_pin_mut(self: Pin<&mut Self>) -> Pin<&mut R> {
self.project().reader
}
/// Returns a pinned mutable reference to the inner writer.
pub fn writer_pin_mut(self: Pin<&mut Self>) -> Pin<&mut W> {
self.project().writer
}
}
impl<R, W> AsyncRead for Join<R, W>
where
R: AsyncRead,
{
fn poll_read(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut ReadBuf<'_>,
) -> Poll<Result<(), io::Error>> {
self.project().reader.poll_read(cx, buf)
}
}
impl<R, W> AsyncWrite for Join<R, W>
where
W: AsyncWrite,
{
fn poll_write(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<Result<usize, io::Error>> {
self.project().writer.poll_write(cx, buf)
}
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> {
self.project().writer.poll_flush(cx)
}
fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> {
self.project().writer.poll_shutdown(cx)
}
fn poll_write_vectored(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
bufs: &[io::IoSlice<'_>],
) -> Poll<Result<usize, io::Error>> {
self.project().writer.poll_write_vectored(cx, bufs)
}
fn is_write_vectored(&self) -> bool {
self.writer.is_write_vectored()
}
}
impl<R, W> AsyncBufRead for Join<R, W>
where
R: AsyncBufRead,
{
fn poll_fill_buf(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<&[u8]>> {
self.project().reader.poll_fill_buf(cx)
}
fn consume(self: Pin<&mut Self>, amt: usize) {
self.project().reader.consume(amt)
}
}

308
vendor/tokio/src/io/mod.rs vendored Normal file
View File

@@ -0,0 +1,308 @@
//! Traits, helpers, and type definitions for asynchronous I/O functionality.
//!
//! This module is the asynchronous version of `std::io`. Primarily, it
//! defines two traits, [`AsyncRead`] and [`AsyncWrite`], which are asynchronous
//! versions of the [`Read`] and [`Write`] traits in the standard library.
//!
//! # `AsyncRead` and `AsyncWrite`
//!
//! Like the standard library's [`Read`] and [`Write`] traits, [`AsyncRead`] and
//! [`AsyncWrite`] provide the most general interface for reading and writing
//! input and output. Unlike the standard library's traits, however, they are
//! _asynchronous_ &mdash; meaning that reading from or writing to a `tokio::io`
//! type will _yield_ to the Tokio scheduler when IO is not ready, rather than
//! blocking. This allows other tasks to run while waiting on IO.
//!
//! Another difference is that `AsyncRead` and `AsyncWrite` only contain
//! core methods needed to provide asynchronous reading and writing
//! functionality. Instead, utility methods are defined in the [`AsyncReadExt`]
//! and [`AsyncWriteExt`] extension traits. These traits are automatically
//! implemented for all values that implement `AsyncRead` and `AsyncWrite`
//! respectively.
//!
//! End users will rarely interact directly with `AsyncRead` and
//! `AsyncWrite`. Instead, they will use the async functions defined in the
//! extension traits. Library authors are expected to implement `AsyncRead`
//! and `AsyncWrite` in order to provide types that behave like byte streams.
//!
//! Even with these differences, Tokio's `AsyncRead` and `AsyncWrite` traits
//! can be used in almost exactly the same manner as the standard library's
//! `Read` and `Write`. Most types in the standard library that implement `Read`
//! and `Write` have asynchronous equivalents in `tokio` that implement
//! `AsyncRead` and `AsyncWrite`, such as [`File`] and [`TcpStream`].
//!
//! For example, the standard library documentation introduces `Read` by
//! [demonstrating][std_example] reading some bytes from a [`std::fs::File`]. We
//! can do the same with [`tokio::fs::File`][`File`]:
//!
//! ```no_run
//! # #[cfg(not(target_family = "wasm"))]
//! # {
//! use tokio::io::{self, AsyncReadExt};
//! use tokio::fs::File;
//!
//! #[tokio::main]
//! async fn main() -> io::Result<()> {
//! let mut f = File::open("foo.txt").await?;
//! let mut buffer = [0; 10];
//!
//! // read up to 10 bytes
//! let n = f.read(&mut buffer).await?;
//!
//! println!("The bytes: {:?}", &buffer[..n]);
//! Ok(())
//! }
//! # }
//! ```
//!
//! [`File`]: crate::fs::File
//! [`TcpStream`]: crate::net::TcpStream
//! [`std::fs::File`]: std::fs::File
//! [std_example]: std::io#read-and-write
//!
//! ## Buffered Readers and Writers
//!
//! Byte-based interfaces are unwieldy and can be inefficient, as we'd need to be
//! making near-constant calls to the operating system. To help with this,
//! `std::io` comes with [support for _buffered_ readers and writers][stdbuf],
//! and therefore, `tokio::io` does as well.
//!
//! Tokio provides an async version of the [`std::io::BufRead`] trait,
//! [`AsyncBufRead`]; and async [`BufReader`] and [`BufWriter`] structs, which
//! wrap readers and writers. These wrappers use a buffer, reducing the number
//! of calls and providing nicer methods for accessing exactly what you want.
//!
//! For example, [`BufReader`] works with the [`AsyncBufRead`] trait to add
//! extra methods to any async reader:
//!
//! ```no_run
//! # #[cfg(not(target_family = "wasm"))]
//! # {
//! use tokio::io::{self, BufReader, AsyncBufReadExt};
//! use tokio::fs::File;
//!
//! #[tokio::main]
//! async fn main() -> io::Result<()> {
//! let f = File::open("foo.txt").await?;
//! let mut reader = BufReader::new(f);
//! let mut buffer = String::new();
//!
//! // read a line into buffer
//! reader.read_line(&mut buffer).await?;
//!
//! println!("{}", buffer);
//! Ok(())
//! }
//! # }
//! ```
//!
//! [`BufWriter`] doesn't add any new ways of writing; it just buffers every call
//! to [`write`](crate::io::AsyncWriteExt::write). However, you **must** flush
//! [`BufWriter`] to ensure that any buffered data is written.
//!
//! ```no_run
//! # #[cfg(not(target_family = "wasm"))]
//! # {
//! use tokio::io::{self, BufWriter, AsyncWriteExt};
//! use tokio::fs::File;
//!
//! #[tokio::main]
//! async fn main() -> io::Result<()> {
//! let f = File::create("foo.txt").await?;
//! {
//! let mut writer = BufWriter::new(f);
//!
//! // Write a byte to the buffer.
//! writer.write(&[42u8]).await?;
//!
//! // Flush the buffer before it goes out of scope.
//! writer.flush().await?;
//!
//! } // Unless flushed or shut down, the contents of the buffer is discarded on drop.
//!
//! Ok(())
//! }
//! # }
//! ```
//!
//! [stdbuf]: std::io#bufreader-and-bufwriter
//! [`std::io::BufRead`]: std::io::BufRead
//! [`AsyncBufRead`]: crate::io::AsyncBufRead
//! [`BufReader`]: crate::io::BufReader
//! [`BufWriter`]: crate::io::BufWriter
//!
//! ## Implementing `AsyncRead` and `AsyncWrite`
//!
//! Because they are traits, we can implement [`AsyncRead`] and [`AsyncWrite`] for
//! our own types, as well. Note that these traits must only be implemented for
//! non-blocking I/O types that integrate with the futures type system. In
//! other words, these types must never block the thread, and instead the
//! current task is notified when the I/O resource is ready.
//!
//! ## Conversion to and from Stream/Sink
//!
//! It is often convenient to encapsulate the reading and writing of bytes in a
//! [`Stream`] or [`Sink`] of data.
//!
//! Tokio provides simple wrappers for converting [`AsyncRead`] to [`Stream`]
//! and vice-versa in the [tokio-util] crate, see [`ReaderStream`] and
//! [`StreamReader`].
//!
//! There are also utility traits that abstract the asynchronous buffering
//! necessary to write your own adaptors for encoding and decoding bytes to/from
//! your structured data, allowing to transform something that implements
//! [`AsyncRead`]/[`AsyncWrite`] into a [`Stream`]/[`Sink`], see [`Decoder`] and
//! [`Encoder`] in the [tokio-util::codec] module.
//!
//! [tokio-util]: https://docs.rs/tokio-util
//! [tokio-util::codec]: https://docs.rs/tokio-util/latest/tokio_util/codec/index.html
//!
//! # Standard input and output
//!
//! Tokio provides asynchronous APIs to standard [input], [output], and [error].
//! These APIs are very similar to the ones provided by `std`, but they also
//! implement [`AsyncRead`] and [`AsyncWrite`].
//!
//! Note that the standard input / output APIs **must** be used from the
//! context of the Tokio runtime, as they require Tokio-specific features to
//! function. Calling these functions outside of a Tokio runtime will panic.
//!
//! [input]: fn@stdin
//! [output]: fn@stdout
//! [error]: fn@stderr
//!
//! # `std` re-exports
//!
//! Additionally, [`Error`], [`ErrorKind`], [`Result`], and [`SeekFrom`] are
//! re-exported from `std::io` for ease of use.
//!
//! [`AsyncRead`]: trait@AsyncRead
//! [`AsyncWrite`]: trait@AsyncWrite
//! [`AsyncReadExt`]: trait@AsyncReadExt
//! [`AsyncWriteExt`]: trait@AsyncWriteExt
//! ["codec"]: https://docs.rs/tokio-util/latest/tokio_util/codec/index.html
//! [`Encoder`]: https://docs.rs/tokio-util/latest/tokio_util/codec/trait.Encoder.html
//! [`Decoder`]: https://docs.rs/tokio-util/latest/tokio_util/codec/trait.Decoder.html
//! [`ReaderStream`]: https://docs.rs/tokio-util/latest/tokio_util/io/struct.ReaderStream.html
//! [`StreamReader`]: https://docs.rs/tokio-util/latest/tokio_util/io/struct.StreamReader.html
//! [`Error`]: struct@Error
//! [`ErrorKind`]: enum@ErrorKind
//! [`Result`]: type@Result
//! [`Read`]: std::io::Read
//! [`SeekFrom`]: enum@SeekFrom
//! [`Sink`]: https://docs.rs/futures/0.3/futures/sink/trait.Sink.html
//! [`Stream`]: https://docs.rs/futures/0.3/futures/stream/trait.Stream.html
//! [`Write`]: std::io::Write
#![cfg_attr(
not(all(feature = "rt", feature = "net")),
allow(dead_code, unused_imports)
)]
cfg_io_blocking! {
pub(crate) mod blocking;
}
mod async_buf_read;
pub use self::async_buf_read::AsyncBufRead;
mod async_read;
pub use self::async_read::AsyncRead;
mod async_seek;
pub use self::async_seek::AsyncSeek;
mod async_write;
pub use self::async_write::AsyncWrite;
mod read_buf;
pub use self::read_buf::ReadBuf;
// Re-export some types from `std::io` so that users don't have to deal
// with conflicts when `use`ing `tokio::io` and `std::io`.
#[doc(no_inline)]
pub use std::io::{Error, ErrorKind, Result, SeekFrom};
cfg_io_driver_impl! {
pub(crate) mod interest;
pub(crate) mod ready;
cfg_net_or_uring! {
pub use interest::Interest;
pub use ready::Ready;
}
#[cfg_attr(target_os = "wasi", allow(unused_imports))]
mod poll_evented;
#[cfg(not(loom))]
#[cfg_attr(target_os = "wasi", allow(unused_imports))]
pub(crate) use poll_evented::PollEvented;
}
// The bsd module can't be build on Windows, so we completely ignore it, even
// when building documentation.
#[cfg(unix)]
cfg_aio! {
/// BSD-specific I/O types.
pub mod bsd {
mod poll_aio;
pub use poll_aio::{Aio, AioEvent, AioSource};
}
}
cfg_net_unix! {
mod async_fd;
pub mod unix {
//! Asynchronous IO structures specific to Unix-like operating systems.
pub use super::async_fd::{AsyncFd, AsyncFdTryNewError, AsyncFdReadyGuard, AsyncFdReadyMutGuard, TryIoError};
}
}
cfg_io_std! {
mod stdio_common;
mod stderr;
pub use stderr::{stderr, Stderr};
mod stdin;
pub use stdin::{stdin, Stdin};
mod stdout;
pub use stdout::{stdout, Stdout};
}
cfg_io_util! {
mod split;
pub use split::{split, ReadHalf, WriteHalf};
mod join;
pub use join::{join, Join};
pub(crate) mod seek;
pub(crate) mod util;
pub use util::{
copy, copy_bidirectional, copy_bidirectional_with_sizes, copy_buf, duplex, empty, repeat, sink, simplex, AsyncBufReadExt, AsyncReadExt, AsyncSeekExt, AsyncWriteExt,
BufReader, BufStream, BufWriter, Chain, DuplexStream, Empty, Lines, Repeat, Sink, Split, Take, SimplexStream,
};
}
cfg_not_io_util! {
cfg_process! {
pub(crate) mod util;
}
}
cfg_io_blocking! {
/// Types in this module can be mocked out in tests.
mod sys {
// TODO: don't rename
pub(crate) use crate::blocking::spawn_blocking as run;
pub(crate) use crate::blocking::JoinHandle as Blocking;
}
}
cfg_io_uring! {
pub(crate) mod uring;
}

319
vendor/tokio/src/io/poll_evented.rs vendored Normal file
View File

@@ -0,0 +1,319 @@
use crate::io::interest::Interest;
use crate::runtime::io::Registration;
use crate::runtime::scheduler;
use mio::event::Source;
use std::fmt;
use std::io;
use std::ops::Deref;
use std::panic::{RefUnwindSafe, UnwindSafe};
use std::task::ready;
cfg_io_driver! {
/// Associates an I/O resource that implements the [`std::io::Read`] and/or
/// [`std::io::Write`] traits with the reactor that drives it.
///
/// `PollEvented` uses [`Registration`] internally to take a type that
/// implements [`mio::event::Source`] as well as [`std::io::Read`] and/or
/// [`std::io::Write`] and associate it with a reactor that will drive it.
///
/// Once the [`mio::event::Source`] type is wrapped by `PollEvented`, it can be
/// used from within the future's execution model. As such, the
/// `PollEvented` type provides [`AsyncRead`] and [`AsyncWrite`]
/// implementations using the underlying I/O resource as well as readiness
/// events provided by the reactor.
///
/// **Note**: While `PollEvented` is `Sync` (if the underlying I/O type is
/// `Sync`), the caller must ensure that there are at most two tasks that
/// use a `PollEvented` instance concurrently. One for reading and one for
/// writing. While violating this requirement is "safe" from a Rust memory
/// model point of view, it will result in unexpected behavior in the form
/// of lost notifications and tasks hanging.
///
/// ## Readiness events
///
/// Besides just providing [`AsyncRead`] and [`AsyncWrite`] implementations,
/// this type also supports access to the underlying readiness event stream.
/// While similar in function to what [`Registration`] provides, the
/// semantics are a bit different.
///
/// Two functions are provided to access the readiness events:
/// [`poll_read_ready`] and [`poll_write_ready`]. These functions return the
/// current readiness state of the `PollEvented` instance. If
/// [`poll_read_ready`] indicates read readiness, immediately calling
/// [`poll_read_ready`] again will also indicate read readiness.
///
/// When the operation is attempted and is unable to succeed due to the I/O
/// resource not being ready, the caller must call [`clear_readiness`].
/// This clears the readiness state until a new readiness event is received.
///
/// This allows the caller to implement additional functions. For example,
/// [`TcpListener`] implements `poll_accept` by using [`poll_read_ready`] and
/// [`clear_readiness`].
///
/// ## Platform-specific events
///
/// `PollEvented` also allows receiving platform-specific `mio::Ready` events.
/// These events are included as part of the read readiness event stream. The
/// write readiness event stream is only for `Ready::writable()` events.
///
/// [`AsyncRead`]: crate::io::AsyncRead
/// [`AsyncWrite`]: crate::io::AsyncWrite
/// [`TcpListener`]: crate::net::TcpListener
/// [`clear_readiness`]: Registration::clear_readiness
/// [`poll_read_ready`]: Registration::poll_read_ready
/// [`poll_write_ready`]: Registration::poll_write_ready
pub(crate) struct PollEvented<E: Source> {
io: Option<E>,
registration: Registration,
}
}
// ===== impl PollEvented =====
impl<E: Source> PollEvented<E> {
/// Creates a new `PollEvented` associated with the default reactor.
///
/// The returned `PollEvented` has readable and writable interests. For more control, use
/// [`Self::new_with_interest`].
///
/// # Panics
///
/// This function panics if thread-local runtime is not set.
///
/// The runtime is usually set implicitly when this function is called
/// from a future driven by a tokio runtime, otherwise runtime can be set
/// explicitly with [`Runtime::enter`](crate::runtime::Runtime::enter) function.
#[track_caller]
#[cfg_attr(feature = "signal", allow(unused))]
pub(crate) fn new(io: E) -> io::Result<Self> {
PollEvented::new_with_interest(io, Interest::READABLE | Interest::WRITABLE)
}
/// Creates a new `PollEvented` associated with the default reactor, for
/// specific `Interest` state. `new_with_interest` should be used over `new`
/// when you need control over the readiness state, such as when a file
/// descriptor only allows reads. This does not add `hup` or `error` so if
/// you are interested in those states, you will need to add them to the
/// readiness state passed to this function.
///
/// # Panics
///
/// This function panics if thread-local runtime is not set.
///
/// The runtime is usually set implicitly when this function is called from
/// a future driven by a tokio runtime, otherwise runtime can be set
/// explicitly with [`Runtime::enter`](crate::runtime::Runtime::enter)
/// function.
#[track_caller]
#[cfg_attr(feature = "signal", allow(unused))]
pub(crate) fn new_with_interest(io: E, interest: Interest) -> io::Result<Self> {
Self::new_with_interest_and_handle(io, interest, scheduler::Handle::current())
}
#[track_caller]
pub(crate) fn new_with_interest_and_handle(
mut io: E,
interest: Interest,
handle: scheduler::Handle,
) -> io::Result<Self> {
let registration = Registration::new_with_interest_and_handle(&mut io, interest, handle)?;
Ok(Self {
io: Some(io),
registration,
})
}
/// Returns a reference to the registration.
#[cfg(any(feature = "net", all(feature = "process", target_os = "linux")))]
pub(crate) fn registration(&self) -> &Registration {
&self.registration
}
/// Deregisters the inner io from the registration and returns a Result containing the inner io.
#[cfg(any(feature = "net", feature = "process"))]
pub(crate) fn into_inner(mut self) -> io::Result<E> {
let mut inner = self.io.take().unwrap(); // As io shouldn't ever be None, just unwrap here.
self.registration.deregister(&mut inner)?;
Ok(inner)
}
/// Re-register under new runtime with `interest`.
#[cfg(all(feature = "process", target_os = "linux"))]
pub(crate) fn reregister(&mut self, interest: Interest) -> io::Result<()> {
let io = self.io.as_mut().unwrap(); // As io shouldn't ever be None, just unwrap here.
let _ = self.registration.deregister(io);
self.registration =
Registration::new_with_interest_and_handle(io, interest, scheduler::Handle::current())?;
Ok(())
}
}
feature! {
#![any(feature = "net", all(unix, feature = "process"))]
use crate::io::ReadBuf;
use std::task::{Context, Poll};
impl<E: Source> PollEvented<E> {
// Safety: The caller must ensure that `E` can read into uninitialized memory
pub(crate) unsafe fn poll_read<'a>(
&'a self,
cx: &mut Context<'_>,
buf: &mut ReadBuf<'_>,
) -> Poll<io::Result<()>>
where
&'a E: io::Read + 'a,
{
use std::io::Read;
loop {
let evt = ready!(self.registration.poll_read_ready(cx))?;
let b = unsafe { &mut *(buf.unfilled_mut() as *mut [std::mem::MaybeUninit<u8>] as *mut [u8]) };
// used only when the cfgs below apply
#[allow(unused_variables)]
let len = b.len();
match self.io.as_ref().unwrap().read(b) {
Ok(n) => {
// When mio is using the epoll or kqueue selector, reading a partially full
// buffer is sufficient to show that the socket buffer has been drained.
//
// This optimization does not work for level-triggered selectors such as
// windows or when poll is used.
//
// Read more:
// https://github.com/tokio-rs/tokio/issues/5866
#[cfg(all(
// keep in sync with poll_write
not(mio_unsupported_force_poll_poll),
any(
// epoll
target_os = "android",
target_os = "illumos",
target_os = "linux",
target_os = "redox",
// kqueue
target_os = "dragonfly",
target_os = "freebsd",
target_os = "ios",
target_os = "macos",
target_os = "netbsd",
target_os = "openbsd",
target_os = "tvos",
target_os = "visionos",
target_os = "watchos",
)
))]
if 0 < n && n < len {
self.registration.clear_readiness(evt);
}
// Safety: We trust `TcpStream::read` to have filled up `n` bytes in the
// buffer.
unsafe { buf.assume_init(n) };
buf.advance(n);
return Poll::Ready(Ok(()));
},
Err(e) if e.kind() == io::ErrorKind::WouldBlock => {
self.registration.clear_readiness(evt);
}
Err(e) => return Poll::Ready(Err(e)),
}
}
}
pub(crate) fn poll_write<'a>(&'a self, cx: &mut Context<'_>, buf: &[u8]) -> Poll<io::Result<usize>>
where
&'a E: io::Write + 'a,
{
use std::io::Write;
loop {
let evt = ready!(self.registration.poll_write_ready(cx))?;
match self.io.as_ref().unwrap().write(buf) {
Ok(n) => {
// if we write only part of our buffer, this is sufficient on unix to show
// that the socket buffer is full. Unfortunately this assumption
// fails for level-triggered selectors (like on Windows or poll even for
// UNIX): https://github.com/tokio-rs/tokio/issues/5866
#[cfg(all(
// keep in sync with poll_read
not(mio_unsupported_force_poll_poll),
any(
// epoll
target_os = "android",
target_os = "illumos",
target_os = "linux",
target_os = "redox",
// kqueue
target_os = "dragonfly",
target_os = "freebsd",
target_os = "ios",
target_os = "macos",
target_os = "netbsd",
target_os = "openbsd",
target_os = "tvos",
target_os = "visionos",
target_os = "watchos",
)
))]
if 0 < n && n < buf.len() {
self.registration.clear_readiness(evt);
}
return Poll::Ready(Ok(n));
},
Err(e) if e.kind() == io::ErrorKind::WouldBlock => {
self.registration.clear_readiness(evt);
}
Err(e) => return Poll::Ready(Err(e)),
}
}
}
#[cfg(any(feature = "net", feature = "process"))]
pub(crate) fn poll_write_vectored<'a>(
&'a self,
cx: &mut Context<'_>,
bufs: &[io::IoSlice<'_>],
) -> Poll<io::Result<usize>>
where
&'a E: io::Write + 'a,
{
use std::io::Write;
self.registration.poll_write_io(cx, || self.io.as_ref().unwrap().write_vectored(bufs))
}
}
}
impl<E: Source> UnwindSafe for PollEvented<E> {}
impl<E: Source> RefUnwindSafe for PollEvented<E> {}
impl<E: Source> Deref for PollEvented<E> {
type Target = E;
fn deref(&self) -> &E {
self.io.as_ref().unwrap()
}
}
impl<E: Source + fmt::Debug> fmt::Debug for PollEvented<E> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("PollEvented").field("io", &self.io).finish()
}
}
impl<E: Source> Drop for PollEvented<E> {
fn drop(&mut self) {
if let Some(mut io) = self.io.take() {
// Ignore errors
let _ = self.registration.deregister(&mut io);
}
}
}

344
vendor/tokio/src/io/read_buf.rs vendored Normal file
View File

@@ -0,0 +1,344 @@
use std::fmt;
use std::mem::MaybeUninit;
/// A wrapper around a byte buffer that is incrementally filled and initialized.
///
/// This type is a sort of "double cursor". It tracks three regions in the
/// buffer: a region at the beginning of the buffer that has been logically
/// filled with data, a region that has been initialized at some point but not
/// yet logically filled, and a region at the end that may be uninitialized.
/// The filled region is guaranteed to be a subset of the initialized region.
///
/// In summary, the contents of the buffer can be visualized as:
///
/// ```not_rust
/// [ capacity ]
/// [ filled | unfilled ]
/// [ initialized | uninitialized ]
/// ```
///
/// It is undefined behavior to de-initialize any bytes from the uninitialized
/// region, since it is merely unknown whether this region is uninitialized or
/// not, and if part of it turns out to be initialized, it must stay initialized.
pub struct ReadBuf<'a> {
buf: &'a mut [MaybeUninit<u8>],
filled: usize,
initialized: usize,
}
impl<'a> ReadBuf<'a> {
/// Creates a new `ReadBuf` from a fully initialized buffer.
#[inline]
pub fn new(buf: &'a mut [u8]) -> ReadBuf<'a> {
let initialized = buf.len();
let buf = unsafe { slice_to_uninit_mut(buf) };
ReadBuf {
buf,
filled: 0,
initialized,
}
}
/// Creates a new `ReadBuf` from a buffer that may be uninitialized.
///
/// The internal cursor will mark the entire buffer as uninitialized. If
/// the buffer is known to be partially initialized, then use `assume_init`
/// to move the internal cursor.
#[inline]
pub fn uninit(buf: &'a mut [MaybeUninit<u8>]) -> ReadBuf<'a> {
ReadBuf {
buf,
filled: 0,
initialized: 0,
}
}
/// Returns the total capacity of the buffer.
#[inline]
pub fn capacity(&self) -> usize {
self.buf.len()
}
/// Returns a shared reference to the filled portion of the buffer.
#[inline]
pub fn filled(&self) -> &[u8] {
let slice = &self.buf[..self.filled];
// safety: filled describes how far into the buffer that the
// user has filled with bytes, so it's been initialized.
unsafe { slice_assume_init(slice) }
}
/// Returns a mutable reference to the filled portion of the buffer.
#[inline]
pub fn filled_mut(&mut self) -> &mut [u8] {
let slice = &mut self.buf[..self.filled];
// safety: filled describes how far into the buffer that the
// user has filled with bytes, so it's been initialized.
unsafe { slice_assume_init_mut(slice) }
}
/// Returns a new `ReadBuf` comprised of the unfilled section up to `n`.
#[inline]
pub fn take(&mut self, n: usize) -> ReadBuf<'_> {
let max = std::cmp::min(self.remaining(), n);
// Safety: We don't set any of the `unfilled_mut` with `MaybeUninit::uninit`.
unsafe { ReadBuf::uninit(&mut self.unfilled_mut()[..max]) }
}
/// Returns a shared reference to the initialized portion of the buffer.
///
/// This includes the filled portion.
#[inline]
pub fn initialized(&self) -> &[u8] {
let slice = &self.buf[..self.initialized];
// safety: initialized describes how far into the buffer that the
// user has at some point initialized with bytes.
unsafe { slice_assume_init(slice) }
}
/// Returns a mutable reference to the initialized portion of the buffer.
///
/// This includes the filled portion.
#[inline]
pub fn initialized_mut(&mut self) -> &mut [u8] {
let slice = &mut self.buf[..self.initialized];
// safety: initialized describes how far into the buffer that the
// user has at some point initialized with bytes.
unsafe { slice_assume_init_mut(slice) }
}
/// Returns a mutable reference to the entire buffer, without ensuring that it has been fully
/// initialized.
///
/// The elements between 0 and `self.filled().len()` are filled, and those between 0 and
/// `self.initialized().len()` are initialized (and so can be converted to a `&mut [u8]`).
///
/// The caller of this method must ensure that these invariants are upheld. For example, if the
/// caller initializes some of the uninitialized section of the buffer, it must call
/// [`assume_init`](Self::assume_init) with the number of bytes initialized.
///
/// # Safety
///
/// The caller must not de-initialize portions of the buffer that have already been initialized.
/// This includes any bytes in the region marked as uninitialized by `ReadBuf`.
#[inline]
pub unsafe fn inner_mut(&mut self) -> &mut [MaybeUninit<u8>] {
self.buf
}
/// Returns a mutable reference to the unfilled part of the buffer without ensuring that it has been fully
/// initialized.
///
/// # Safety
///
/// The caller must not de-initialize portions of the buffer that have already been initialized.
/// This includes any bytes in the region marked as uninitialized by `ReadBuf`.
#[inline]
pub unsafe fn unfilled_mut(&mut self) -> &mut [MaybeUninit<u8>] {
&mut self.buf[self.filled..]
}
/// Returns a mutable reference to the unfilled part of the buffer, ensuring it is fully initialized.
///
/// Since `ReadBuf` tracks the region of the buffer that has been initialized, this is effectively "free" after
/// the first use.
#[inline]
pub fn initialize_unfilled(&mut self) -> &mut [u8] {
self.initialize_unfilled_to(self.remaining())
}
/// Returns a mutable reference to the first `n` bytes of the unfilled part of the buffer, ensuring it is
/// fully initialized.
///
/// # Panics
///
/// Panics if `self.remaining()` is less than `n`.
#[inline]
#[track_caller]
pub fn initialize_unfilled_to(&mut self, n: usize) -> &mut [u8] {
assert!(self.remaining() >= n, "n overflows remaining");
// This can't overflow, otherwise the assert above would have failed.
let end = self.filled + n;
if self.initialized < end {
unsafe {
self.buf[self.initialized..end]
.as_mut_ptr()
.write_bytes(0, end - self.initialized);
}
self.initialized = end;
}
let slice = &mut self.buf[self.filled..end];
// safety: just above, we checked that the end of the buf has
// been initialized to some value.
unsafe { slice_assume_init_mut(slice) }
}
/// Returns the number of bytes at the end of the slice that have not yet been filled.
#[inline]
pub fn remaining(&self) -> usize {
self.capacity() - self.filled
}
/// Clears the buffer, resetting the filled region to empty.
///
/// The number of initialized bytes is not changed, and the contents of the buffer are not modified.
#[inline]
pub fn clear(&mut self) {
self.filled = 0;
}
/// Advances the size of the filled region of the buffer.
///
/// The number of initialized bytes is not changed.
///
/// # Panics
///
/// Panics if the filled region of the buffer would become larger than the initialized region.
#[inline]
#[track_caller]
pub fn advance(&mut self, n: usize) {
let new = self.filled.checked_add(n).expect("filled overflow");
self.set_filled(new);
}
/// Sets the size of the filled region of the buffer.
///
/// The number of initialized bytes is not changed.
///
/// Note that this can be used to *shrink* the filled region of the buffer in addition to growing it (for
/// example, by a `AsyncRead` implementation that compresses data in-place).
///
/// # Panics
///
/// Panics if the filled region of the buffer would become larger than the initialized region.
#[inline]
#[track_caller]
pub fn set_filled(&mut self, n: usize) {
assert!(
n <= self.initialized,
"filled must not become larger than initialized"
);
self.filled = n;
}
/// Asserts that the first `n` unfilled bytes of the buffer are initialized.
///
/// `ReadBuf` assumes that bytes are never de-initialized, so this method does nothing when called with fewer
/// bytes than are already known to be initialized.
///
/// # Safety
///
/// The caller must ensure that `n` unfilled bytes of the buffer have already been initialized.
#[inline]
pub unsafe fn assume_init(&mut self, n: usize) {
let new = self.filled + n;
if new > self.initialized {
self.initialized = new;
}
}
/// Appends data to the buffer, advancing the written position and possibly also the initialized position.
///
/// # Panics
///
/// Panics if `self.remaining()` is less than `buf.len()`.
#[inline]
#[track_caller]
pub fn put_slice(&mut self, buf: &[u8]) {
assert!(
self.remaining() >= buf.len(),
"buf.len() must fit in remaining(); buf.len() = {}, remaining() = {}",
buf.len(),
self.remaining()
);
let amt = buf.len();
// Cannot overflow, asserted above
let end = self.filled + amt;
// Safety: the length is asserted above
unsafe {
self.buf[self.filled..end]
.as_mut_ptr()
.cast::<u8>()
.copy_from_nonoverlapping(buf.as_ptr(), amt);
}
if self.initialized < end {
self.initialized = end;
}
self.filled = end;
}
}
#[cfg(feature = "io-util")]
#[cfg_attr(docsrs, doc(cfg(feature = "io-util")))]
unsafe impl<'a> bytes::BufMut for ReadBuf<'a> {
fn remaining_mut(&self) -> usize {
self.remaining()
}
// SAFETY: The caller guarantees that at least `cnt` unfilled bytes have been initialized.
unsafe fn advance_mut(&mut self, cnt: usize) {
unsafe {
self.assume_init(cnt);
}
self.advance(cnt);
}
fn chunk_mut(&mut self) -> &mut bytes::buf::UninitSlice {
// SAFETY: No region of `unfilled` will be deinitialized because it is
// exposed as an `UninitSlice`, whose API guarantees that the memory is
// never deinitialized.
let unfilled = unsafe { self.unfilled_mut() };
let len = unfilled.len();
let ptr = unfilled.as_mut_ptr() as *mut u8;
// SAFETY: The pointer is valid for `len` bytes because it comes from a
// slice of that length.
unsafe { bytes::buf::UninitSlice::from_raw_parts_mut(ptr, len) }
}
}
impl fmt::Debug for ReadBuf<'_> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("ReadBuf")
.field("filled", &self.filled)
.field("initialized", &self.initialized)
.field("capacity", &self.capacity())
.finish()
}
}
/// # Safety
///
/// The caller must ensure that `slice` is fully initialized
/// and never writes uninitialized bytes to the returned slice.
unsafe fn slice_to_uninit_mut(slice: &mut [u8]) -> &mut [MaybeUninit<u8>] {
// SAFETY: `MaybeUninit<u8>` has the same memory layout as u8, and the caller
// promises to not write uninitialized bytes to the returned slice.
unsafe { &mut *(slice as *mut [u8] as *mut [MaybeUninit<u8>]) }
}
/// # Safety
///
/// The caller must ensure that `slice` is fully initialized.
// TODO: This could use `MaybeUninit::slice_assume_init` when it is stable.
unsafe fn slice_assume_init(slice: &[MaybeUninit<u8>]) -> &[u8] {
// SAFETY: `MaybeUninit<u8>` has the same memory layout as u8, and the caller
// promises that `slice` is fully initialized.
unsafe { &*(slice as *const [MaybeUninit<u8>] as *const [u8]) }
}
/// # Safety
///
/// The caller must ensure that `slice` is fully initialized.
// TODO: This could use `MaybeUninit::slice_assume_init_mut` when it is stable.
unsafe fn slice_assume_init_mut(slice: &mut [MaybeUninit<u8>]) -> &mut [u8] {
// SAFETY: `MaybeUninit<u8>` has the same memory layout as `u8`, and the caller
// promises that `slice` is fully initialized.
unsafe { &mut *(slice as *mut [MaybeUninit<u8>] as *mut [u8]) }
}

322
vendor/tokio/src/io/ready.rs vendored Normal file
View File

@@ -0,0 +1,322 @@
#![cfg_attr(not(feature = "net"), allow(unreachable_pub))]
use crate::io::interest::Interest;
use std::fmt;
use std::ops;
const READABLE: usize = 0b0_01;
const WRITABLE: usize = 0b0_10;
const READ_CLOSED: usize = 0b0_0100;
const WRITE_CLOSED: usize = 0b0_1000;
#[cfg(any(target_os = "linux", target_os = "android"))]
const PRIORITY: usize = 0b1_0000;
const ERROR: usize = 0b10_0000;
/// Describes the readiness state of an I/O resources.
///
/// `Ready` tracks which operation an I/O resource is ready to perform.
#[cfg_attr(docsrs, doc(cfg(feature = "net")))]
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub struct Ready(usize);
impl Ready {
/// Returns the empty `Ready` set.
pub const EMPTY: Ready = Ready(0);
/// Returns a `Ready` representing readable readiness.
pub const READABLE: Ready = Ready(READABLE);
/// Returns a `Ready` representing writable readiness.
pub const WRITABLE: Ready = Ready(WRITABLE);
/// Returns a `Ready` representing read closed readiness.
pub const READ_CLOSED: Ready = Ready(READ_CLOSED);
/// Returns a `Ready` representing write closed readiness.
pub const WRITE_CLOSED: Ready = Ready(WRITE_CLOSED);
/// Returns a `Ready` representing priority readiness.
#[cfg(any(target_os = "linux", target_os = "android"))]
#[cfg_attr(docsrs, doc(cfg(any(target_os = "linux", target_os = "android"))))]
pub const PRIORITY: Ready = Ready(PRIORITY);
/// Returns a `Ready` representing error readiness.
pub const ERROR: Ready = Ready(ERROR);
/// Returns a `Ready` representing readiness for all operations.
#[cfg(any(target_os = "linux", target_os = "android"))]
pub const ALL: Ready =
Ready(READABLE | WRITABLE | READ_CLOSED | WRITE_CLOSED | ERROR | PRIORITY);
/// Returns a `Ready` representing readiness for all operations.
#[cfg(not(any(target_os = "linux", target_os = "android")))]
pub const ALL: Ready = Ready(READABLE | WRITABLE | READ_CLOSED | WRITE_CLOSED | ERROR);
// Must remain crate-private to avoid adding a public dependency on Mio.
pub(crate) fn from_mio(event: &mio::event::Event) -> Ready {
let mut ready = Ready::EMPTY;
#[cfg(all(target_os = "freebsd", feature = "net"))]
{
if event.is_aio() {
ready |= Ready::READABLE;
}
if event.is_lio() {
ready |= Ready::READABLE;
}
}
if event.is_readable() {
ready |= Ready::READABLE;
}
if event.is_writable() {
ready |= Ready::WRITABLE;
}
if event.is_read_closed() {
ready |= Ready::READ_CLOSED;
}
if event.is_write_closed() {
ready |= Ready::WRITE_CLOSED;
}
if event.is_error() {
ready |= Ready::ERROR;
}
#[cfg(any(target_os = "linux", target_os = "android"))]
{
if event.is_priority() {
ready |= Ready::PRIORITY;
}
}
ready
}
/// Returns true if `Ready` is the empty set.
///
/// # Examples
///
/// ```
/// use tokio::io::Ready;
///
/// assert!(Ready::EMPTY.is_empty());
/// assert!(!Ready::READABLE.is_empty());
/// ```
pub fn is_empty(self) -> bool {
self == Ready::EMPTY
}
/// Returns `true` if the value includes `readable`.
///
/// # Examples
///
/// ```
/// use tokio::io::Ready;
///
/// assert!(!Ready::EMPTY.is_readable());
/// assert!(Ready::READABLE.is_readable());
/// assert!(Ready::READ_CLOSED.is_readable());
/// assert!(!Ready::WRITABLE.is_readable());
/// ```
pub fn is_readable(self) -> bool {
self.contains(Ready::READABLE) || self.is_read_closed()
}
/// Returns `true` if the value includes writable `readiness`.
///
/// # Examples
///
/// ```
/// use tokio::io::Ready;
///
/// assert!(!Ready::EMPTY.is_writable());
/// assert!(!Ready::READABLE.is_writable());
/// assert!(Ready::WRITABLE.is_writable());
/// assert!(Ready::WRITE_CLOSED.is_writable());
/// ```
pub fn is_writable(self) -> bool {
self.contains(Ready::WRITABLE) || self.is_write_closed()
}
/// Returns `true` if the value includes read-closed `readiness`.
///
/// # Examples
///
/// ```
/// use tokio::io::Ready;
///
/// assert!(!Ready::EMPTY.is_read_closed());
/// assert!(!Ready::READABLE.is_read_closed());
/// assert!(Ready::READ_CLOSED.is_read_closed());
/// ```
pub fn is_read_closed(self) -> bool {
self.contains(Ready::READ_CLOSED)
}
/// Returns `true` if the value includes write-closed `readiness`.
///
/// # Examples
///
/// ```
/// use tokio::io::Ready;
///
/// assert!(!Ready::EMPTY.is_write_closed());
/// assert!(!Ready::WRITABLE.is_write_closed());
/// assert!(Ready::WRITE_CLOSED.is_write_closed());
/// ```
pub fn is_write_closed(self) -> bool {
self.contains(Ready::WRITE_CLOSED)
}
/// Returns `true` if the value includes priority `readiness`.
///
/// # Examples
///
/// ```
/// use tokio::io::Ready;
///
/// assert!(!Ready::EMPTY.is_priority());
/// assert!(!Ready::WRITABLE.is_priority());
/// assert!(Ready::PRIORITY.is_priority());
/// ```
#[cfg(any(target_os = "linux", target_os = "android"))]
#[cfg_attr(docsrs, doc(cfg(any(target_os = "linux", target_os = "android"))))]
pub fn is_priority(self) -> bool {
self.contains(Ready::PRIORITY)
}
/// Returns `true` if the value includes error `readiness`.
///
/// # Examples
///
/// ```
/// use tokio::io::Ready;
///
/// assert!(!Ready::EMPTY.is_error());
/// assert!(!Ready::WRITABLE.is_error());
/// assert!(Ready::ERROR.is_error());
/// ```
pub fn is_error(self) -> bool {
self.contains(Ready::ERROR)
}
/// Returns true if `self` is a superset of `other`.
///
/// `other` may represent more than one readiness operations, in which case
/// the function only returns true if `self` contains all readiness
/// specified in `other`.
pub(crate) fn contains<T: Into<Self>>(self, other: T) -> bool {
let other = other.into();
(self & other) == other
}
/// Creates a `Ready` instance using the given `usize` representation.
///
/// The `usize` representation must have been obtained from a call to
/// `Readiness::as_usize`.
///
/// This function is mainly provided to allow the caller to get a
/// readiness value from an `AtomicUsize`.
pub(crate) fn from_usize(val: usize) -> Ready {
Ready(val & Ready::ALL.as_usize())
}
/// Returns a `usize` representation of the `Ready` value.
///
/// This function is mainly provided to allow the caller to store a
/// readiness value in an `AtomicUsize`.
pub(crate) fn as_usize(self) -> usize {
self.0
}
pub(crate) fn from_interest(interest: Interest) -> Ready {
let mut ready = Ready::EMPTY;
if interest.is_readable() {
ready |= Ready::READABLE;
ready |= Ready::READ_CLOSED;
}
if interest.is_writable() {
ready |= Ready::WRITABLE;
ready |= Ready::WRITE_CLOSED;
}
#[cfg(any(target_os = "linux", target_os = "android"))]
if interest.is_priority() {
ready |= Ready::PRIORITY;
ready |= Ready::READ_CLOSED;
}
if interest.is_error() {
ready |= Ready::ERROR;
}
ready
}
pub(crate) fn intersection(self, interest: Interest) -> Ready {
Ready(self.0 & Ready::from_interest(interest).0)
}
pub(crate) fn satisfies(self, interest: Interest) -> bool {
self.0 & Ready::from_interest(interest).0 != 0
}
}
impl ops::BitOr<Ready> for Ready {
type Output = Ready;
#[inline]
fn bitor(self, other: Ready) -> Ready {
Ready(self.0 | other.0)
}
}
impl ops::BitOrAssign<Ready> for Ready {
#[inline]
fn bitor_assign(&mut self, other: Ready) {
self.0 |= other.0;
}
}
impl ops::BitAnd<Ready> for Ready {
type Output = Ready;
#[inline]
fn bitand(self, other: Ready) -> Ready {
Ready(self.0 & other.0)
}
}
impl ops::Sub<Ready> for Ready {
type Output = Ready;
#[inline]
fn sub(self, other: Ready) -> Ready {
Ready(self.0 & !other.0)
}
}
impl fmt::Debug for Ready {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
let mut fmt = fmt.debug_struct("Ready");
fmt.field("is_readable", &self.is_readable())
.field("is_writable", &self.is_writable())
.field("is_read_closed", &self.is_read_closed())
.field("is_write_closed", &self.is_write_closed())
.field("is_error", &self.is_error());
#[cfg(any(target_os = "linux", target_os = "android"))]
fmt.field("is_priority", &self.is_priority());
fmt.finish()
}
}

57
vendor/tokio/src/io/seek.rs vendored Normal file
View File

@@ -0,0 +1,57 @@
use crate::io::AsyncSeek;
use pin_project_lite::pin_project;
use std::future::Future;
use std::io::{self, SeekFrom};
use std::marker::PhantomPinned;
use std::pin::Pin;
use std::task::{ready, Context, Poll};
pin_project! {
/// Future for the [`seek`](crate::io::AsyncSeekExt::seek) method.
#[derive(Debug)]
#[must_use = "futures do nothing unless you `.await` or poll them"]
pub struct Seek<'a, S: ?Sized> {
seek: &'a mut S,
pos: Option<SeekFrom>,
// Make this future `!Unpin` for compatibility with async trait methods.
#[pin]
_pin: PhantomPinned,
}
}
pub(crate) fn seek<S>(seek: &mut S, pos: SeekFrom) -> Seek<'_, S>
where
S: AsyncSeek + ?Sized + Unpin,
{
Seek {
seek,
pos: Some(pos),
_pin: PhantomPinned,
}
}
impl<S> Future for Seek<'_, S>
where
S: AsyncSeek + ?Sized + Unpin,
{
type Output = io::Result<u64>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let me = self.project();
match me.pos {
Some(pos) => {
// ensure no seek in progress
ready!(Pin::new(&mut *me.seek).poll_complete(cx))?;
match Pin::new(&mut *me.seek).start_seek(*pos) {
Ok(()) => {
*me.pos = None;
Pin::new(&mut *me.seek).poll_complete(cx)
}
Err(e) => Poll::Ready(Err(e)),
}
}
None => Pin::new(&mut *me.seek).poll_complete(cx),
}
}
}

166
vendor/tokio/src/io/split.rs vendored Normal file
View File

@@ -0,0 +1,166 @@
//! Split a single value implementing `AsyncRead + AsyncWrite` into separate
//! `AsyncRead` and `AsyncWrite` handles.
//!
//! To restore this read/write object from its `split::ReadHalf` and
//! `split::WriteHalf` use `unsplit`.
use crate::io::{AsyncRead, AsyncWrite, ReadBuf};
use std::fmt;
use std::io;
use std::pin::Pin;
use std::sync::Arc;
use std::sync::Mutex;
use std::task::{Context, Poll};
cfg_io_util! {
/// The readable half of a value returned from [`split`](split()).
pub struct ReadHalf<T> {
inner: Arc<Inner<T>>,
}
/// The writable half of a value returned from [`split`](split()).
pub struct WriteHalf<T> {
inner: Arc<Inner<T>>,
}
/// Splits a single value implementing `AsyncRead + AsyncWrite` into separate
/// `AsyncRead` and `AsyncWrite` handles.
///
/// To restore this read/write object from its `ReadHalf` and
/// `WriteHalf` use [`unsplit`](ReadHalf::unsplit()).
pub fn split<T>(stream: T) -> (ReadHalf<T>, WriteHalf<T>)
where
T: AsyncRead + AsyncWrite,
{
let is_write_vectored = stream.is_write_vectored();
let inner = Arc::new(Inner {
stream: Mutex::new(stream),
is_write_vectored,
});
let rd = ReadHalf {
inner: inner.clone(),
};
let wr = WriteHalf { inner };
(rd, wr)
}
}
struct Inner<T> {
stream: Mutex<T>,
is_write_vectored: bool,
}
impl<T> Inner<T> {
fn with_lock<R>(&self, f: impl FnOnce(Pin<&mut T>) -> R) -> R {
let mut guard = self.stream.lock().unwrap();
// safety: we do not move the stream.
let stream = unsafe { Pin::new_unchecked(&mut *guard) };
f(stream)
}
}
impl<T> ReadHalf<T> {
/// Checks if this `ReadHalf` and some `WriteHalf` were split from the same
/// stream.
pub fn is_pair_of(&self, other: &WriteHalf<T>) -> bool {
other.is_pair_of(self)
}
/// Reunites with a previously split `WriteHalf`.
///
/// # Panics
///
/// If this `ReadHalf` and the given `WriteHalf` do not originate from the
/// same `split` operation this method will panic.
/// This can be checked ahead of time by calling [`is_pair_of()`](Self::is_pair_of).
#[track_caller]
pub fn unsplit(self, wr: WriteHalf<T>) -> T
where
T: Unpin,
{
if self.is_pair_of(&wr) {
drop(wr);
let inner = Arc::try_unwrap(self.inner)
.ok()
.expect("`Arc::try_unwrap` failed");
inner.stream.into_inner().unwrap()
} else {
panic!("Unrelated `split::Write` passed to `split::Read::unsplit`.")
}
}
}
impl<T> WriteHalf<T> {
/// Checks if this `WriteHalf` and some `ReadHalf` were split from the same
/// stream.
pub fn is_pair_of(&self, other: &ReadHalf<T>) -> bool {
Arc::ptr_eq(&self.inner, &other.inner)
}
}
impl<T: AsyncRead> AsyncRead for ReadHalf<T> {
fn poll_read(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut ReadBuf<'_>,
) -> Poll<io::Result<()>> {
self.inner.with_lock(|stream| stream.poll_read(cx, buf))
}
}
impl<T: AsyncWrite> AsyncWrite for WriteHalf<T> {
fn poll_write(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<Result<usize, io::Error>> {
self.inner.with_lock(|stream| stream.poll_write(cx, buf))
}
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> {
self.inner.with_lock(|stream| stream.poll_flush(cx))
}
fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> {
self.inner.with_lock(|stream| stream.poll_shutdown(cx))
}
fn poll_write_vectored(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
bufs: &[io::IoSlice<'_>],
) -> Poll<Result<usize, io::Error>> {
self.inner
.with_lock(|stream| stream.poll_write_vectored(cx, bufs))
}
fn is_write_vectored(&self) -> bool {
self.inner.is_write_vectored
}
}
unsafe impl<T: Send> Send for ReadHalf<T> {}
unsafe impl<T: Send> Send for WriteHalf<T> {}
unsafe impl<T: Sync> Sync for ReadHalf<T> {}
unsafe impl<T: Sync> Sync for WriteHalf<T> {}
impl<T: fmt::Debug> fmt::Debug for ReadHalf<T> {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt.debug_struct("split::ReadHalf").finish()
}
}
impl<T: fmt::Debug> fmt::Debug for WriteHalf<T> {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt.debug_struct("split::WriteHalf").finish()
}
}

148
vendor/tokio/src/io/stderr.rs vendored Normal file
View File

@@ -0,0 +1,148 @@
use crate::io::blocking::Blocking;
use crate::io::stdio_common::SplitByUtf8BoundaryIfWindows;
use crate::io::AsyncWrite;
use std::io;
use std::pin::Pin;
use std::task::Context;
use std::task::Poll;
cfg_io_std! {
/// A handle to the standard error stream of a process.
///
/// Concurrent writes to stderr must be executed with care: Only individual
/// writes to this [`AsyncWrite`] are guaranteed to be intact. In particular
/// you should be aware that writes using [`write_all`] are not guaranteed
/// to occur as a single write, so multiple threads writing data with
/// [`write_all`] may result in interleaved output.
///
/// Created by the [`stderr`] function.
///
/// [`stderr`]: stderr()
/// [`AsyncWrite`]: AsyncWrite
/// [`write_all`]: crate::io::AsyncWriteExt::write_all()
///
/// # Examples
///
/// ```
/// use tokio::io::{self, AsyncWriteExt};
///
/// #[tokio::main]
/// async fn main() -> io::Result<()> {
/// let mut stderr = io::stderr();
/// stderr.write_all(b"Print some error here.").await?;
/// Ok(())
/// }
/// ```
#[derive(Debug)]
pub struct Stderr {
std: SplitByUtf8BoundaryIfWindows<Blocking<std::io::Stderr>>,
}
/// Constructs a new handle to the standard error of the current process.
///
/// The returned handle allows writing to standard error from the within the
/// Tokio runtime.
///
/// Concurrent writes to stderr must be executed with care: Only individual
/// writes to this [`AsyncWrite`] are guaranteed to be intact. In particular
/// you should be aware that writes using [`write_all`] are not guaranteed
/// to occur as a single write, so multiple threads writing data with
/// [`write_all`] may result in interleaved output.
///
/// Note that unlike [`std::io::stderr`], each call to this `stderr()`
/// produces a new writer, so for example, this program does **not** flush stderr:
///
/// ```no_run
/// # use tokio::io::AsyncWriteExt;
/// # #[tokio::main]
/// # async fn main() -> std::io::Result<()> {
/// tokio::io::stderr().write_all(b"aa").await?;
/// tokio::io::stderr().flush().await?;
/// # Ok(())
/// # }
/// ```
///
/// [`std::io::stderr`]: std::io::stderr
/// [`AsyncWrite`]: AsyncWrite
/// [`write_all`]: crate::io::AsyncWriteExt::write_all()
///
/// # Examples
///
/// ```
/// use tokio::io::{self, AsyncWriteExt};
///
/// #[tokio::main]
/// async fn main() -> io::Result<()> {
/// let mut stderr = io::stderr();
/// stderr.write_all(b"Print some error here.").await?;
/// Ok(())
/// }
/// ```
pub fn stderr() -> Stderr {
let std = io::stderr();
// SAFETY: The `Read` implementation of `std` does not read from the
// buffer it is borrowing and correctly reports the length of the data
// written into the buffer.
let blocking = unsafe { Blocking::new(std) };
Stderr {
std: SplitByUtf8BoundaryIfWindows::new(blocking),
}
}
}
#[cfg(unix)]
mod sys {
use std::os::unix::io::{AsFd, AsRawFd, BorrowedFd, RawFd};
use super::Stderr;
impl AsRawFd for Stderr {
fn as_raw_fd(&self) -> RawFd {
std::io::stderr().as_raw_fd()
}
}
impl AsFd for Stderr {
fn as_fd(&self) -> BorrowedFd<'_> {
unsafe { BorrowedFd::borrow_raw(self.as_raw_fd()) }
}
}
}
cfg_windows! {
use crate::os::windows::io::{AsHandle, BorrowedHandle, AsRawHandle, RawHandle};
impl AsRawHandle for Stderr {
fn as_raw_handle(&self) -> RawHandle {
std::io::stderr().as_raw_handle()
}
}
impl AsHandle for Stderr {
fn as_handle(&self) -> BorrowedHandle<'_> {
unsafe { BorrowedHandle::borrow_raw(self.as_raw_handle()) }
}
}
}
impl AsyncWrite for Stderr {
fn poll_write(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<io::Result<usize>> {
Pin::new(&mut self.std).poll_write(cx, buf)
}
fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> {
Pin::new(&mut self.std).poll_flush(cx)
}
fn poll_shutdown(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Result<(), io::Error>> {
Pin::new(&mut self.std).poll_shutdown(cx)
}
}

98
vendor/tokio/src/io/stdin.rs vendored Normal file
View File

@@ -0,0 +1,98 @@
use crate::io::blocking::Blocking;
use crate::io::{AsyncRead, ReadBuf};
use std::io;
use std::pin::Pin;
use std::task::Context;
use std::task::Poll;
cfg_io_std! {
/// A handle to the standard input stream of a process.
///
/// The handle implements the [`AsyncRead`] trait, but beware that concurrent
/// reads of `Stdin` must be executed with care.
///
/// This handle is best used for non-interactive uses, such as when a file
/// is piped into the application. For technical reasons, `stdin` is
/// implemented by using an ordinary blocking read on a separate thread, and
/// it is impossible to cancel that read. This can make shutdown of the
/// runtime hang until the user presses enter.
///
/// For interactive uses, it is recommended to spawn a thread dedicated to
/// user input and use blocking IO directly in that thread.
///
/// Created by the [`stdin`] function.
///
/// [`stdin`]: fn@stdin
/// [`AsyncRead`]: trait@AsyncRead
#[derive(Debug)]
pub struct Stdin {
std: Blocking<std::io::Stdin>,
}
/// Constructs a new handle to the standard input of the current process.
///
/// This handle is best used for non-interactive uses, such as when a file
/// is piped into the application. For technical reasons, `stdin` is
/// implemented by using an ordinary blocking read on a separate thread, and
/// it is impossible to cancel that read. This can make shutdown of the
/// runtime hang until the user presses enter.
///
/// For interactive uses, it is recommended to spawn a thread dedicated to
/// user input and use blocking IO directly in that thread.
pub fn stdin() -> Stdin {
let std = io::stdin();
// SAFETY: The `Read` implementation of `std` does not read from the
// buffer it is borrowing and correctly reports the length of the data
// written into the buffer.
let std = unsafe { Blocking::new(std) };
Stdin {
std,
}
}
}
#[cfg(unix)]
mod sys {
use std::os::unix::io::{AsFd, AsRawFd, BorrowedFd, RawFd};
use super::Stdin;
impl AsRawFd for Stdin {
fn as_raw_fd(&self) -> RawFd {
std::io::stdin().as_raw_fd()
}
}
impl AsFd for Stdin {
fn as_fd(&self) -> BorrowedFd<'_> {
unsafe { BorrowedFd::borrow_raw(self.as_raw_fd()) }
}
}
}
cfg_windows! {
use crate::os::windows::io::{AsHandle, BorrowedHandle, AsRawHandle, RawHandle};
impl AsRawHandle for Stdin {
fn as_raw_handle(&self) -> RawHandle {
std::io::stdin().as_raw_handle()
}
}
impl AsHandle for Stdin {
fn as_handle(&self) -> BorrowedHandle<'_> {
unsafe { BorrowedHandle::borrow_raw(self.as_raw_handle()) }
}
}
}
impl AsyncRead for Stdin {
fn poll_read(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut ReadBuf<'_>,
) -> Poll<io::Result<()>> {
Pin::new(&mut self.std).poll_read(cx, buf)
}
}

222
vendor/tokio/src/io/stdio_common.rs vendored Normal file
View File

@@ -0,0 +1,222 @@
//! Contains utilities for stdout and stderr.
use crate::io::AsyncWrite;
use std::pin::Pin;
use std::task::{Context, Poll};
/// # Windows
/// [`AsyncWrite`] adapter that finds last char boundary in given buffer and does not write the rest,
/// if buffer contents seems to be `utf8`. Otherwise it only trims buffer down to `DEFAULT_MAX_BUF_SIZE`.
/// That's why, wrapped writer will always receive well-formed utf-8 bytes.
/// # Other platforms
/// Passes data to `inner` as is.
#[derive(Debug)]
pub(crate) struct SplitByUtf8BoundaryIfWindows<W> {
inner: W,
}
impl<W> SplitByUtf8BoundaryIfWindows<W> {
pub(crate) fn new(inner: W) -> Self {
Self { inner }
}
}
// this constant is defined by Unicode standard.
const MAX_BYTES_PER_CHAR: usize = 4;
// Subject for tweaking here
const MAGIC_CONST: usize = 8;
impl<W> crate::io::AsyncWrite for SplitByUtf8BoundaryIfWindows<W>
where
W: AsyncWrite + Unpin,
{
fn poll_write(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
mut buf: &[u8],
) -> Poll<Result<usize, std::io::Error>> {
// just a closure to avoid repetitive code
let mut call_inner = move |buf| Pin::new(&mut self.inner).poll_write(cx, buf);
// 1. Only windows stdio can suffer from non-utf8.
// We also check for `test` so that we can write some tests
// for further code. Since `AsyncWrite` can always shrink
// buffer at its discretion, excessive (i.e. in tests) shrinking
// does not break correctness.
// 2. If buffer is small, it will not be shrunk.
// That's why, it's "textness" will not change, so we don't have
// to fixup it.
if cfg!(not(any(target_os = "windows", test)))
|| buf.len() <= crate::io::blocking::DEFAULT_MAX_BUF_SIZE
{
return call_inner(buf);
}
buf = &buf[..crate::io::blocking::DEFAULT_MAX_BUF_SIZE];
// Now there are two possibilities.
// If caller gave is binary buffer, we **should not** shrink it
// anymore, because excessive shrinking hits performance.
// If caller gave as binary buffer, we **must** additionally
// shrink it to strip incomplete char at the end of buffer.
// that's why check we will perform now is allowed to have
// false-positive.
// Now let's look at the first MAX_BYTES_PER_CHAR * MAGIC_CONST bytes.
// if they are (possibly incomplete) utf8, then we can be quite sure
// that input buffer was utf8.
let have_to_fix_up = match std::str::from_utf8(&buf[..MAX_BYTES_PER_CHAR * MAGIC_CONST]) {
Ok(_) => true,
Err(err) => {
let incomplete_bytes = MAX_BYTES_PER_CHAR * MAGIC_CONST - err.valid_up_to();
incomplete_bytes < MAX_BYTES_PER_CHAR
}
};
if have_to_fix_up {
// We must pop several bytes at the end which form incomplete
// character. To achieve it, we exploit UTF8 encoding:
// for any code point, all bytes except first start with 0b10 prefix.
// see https://en.wikipedia.org/wiki/UTF-8#Encoding for details
let trailing_incomplete_char_size = buf
.iter()
.rev()
.take(MAX_BYTES_PER_CHAR)
.position(|byte| *byte < 0b1000_0000 || *byte >= 0b1100_0000)
.unwrap_or(0)
+ 1;
buf = &buf[..buf.len() - trailing_incomplete_char_size];
}
call_inner(buf)
}
fn poll_flush(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Result<(), std::io::Error>> {
Pin::new(&mut self.inner).poll_flush(cx)
}
fn poll_shutdown(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Result<(), std::io::Error>> {
Pin::new(&mut self.inner).poll_shutdown(cx)
}
}
#[cfg(test)]
#[cfg(not(loom))]
mod tests {
use crate::io::blocking::DEFAULT_MAX_BUF_SIZE;
use crate::io::AsyncWriteExt;
use std::io;
use std::pin::Pin;
use std::task::Context;
use std::task::Poll;
struct TextMockWriter;
impl crate::io::AsyncWrite for TextMockWriter {
fn poll_write(
self: Pin<&mut Self>,
_cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<Result<usize, io::Error>> {
assert!(buf.len() <= DEFAULT_MAX_BUF_SIZE);
assert!(std::str::from_utf8(buf).is_ok());
Poll::Ready(Ok(buf.len()))
}
fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> {
Poll::Ready(Ok(()))
}
fn poll_shutdown(
self: Pin<&mut Self>,
_cx: &mut Context<'_>,
) -> Poll<Result<(), io::Error>> {
Poll::Ready(Ok(()))
}
}
struct LoggingMockWriter {
write_history: Vec<usize>,
}
impl LoggingMockWriter {
fn new() -> Self {
LoggingMockWriter {
write_history: Vec::new(),
}
}
}
impl crate::io::AsyncWrite for LoggingMockWriter {
fn poll_write(
mut self: Pin<&mut Self>,
_cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<Result<usize, io::Error>> {
assert!(buf.len() <= DEFAULT_MAX_BUF_SIZE);
self.write_history.push(buf.len());
Poll::Ready(Ok(buf.len()))
}
fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> {
Poll::Ready(Ok(()))
}
fn poll_shutdown(
self: Pin<&mut Self>,
_cx: &mut Context<'_>,
) -> Poll<Result<(), io::Error>> {
Poll::Ready(Ok(()))
}
}
#[test]
#[cfg_attr(miri, ignore)] // takes a really long time with miri
fn test_splitter() {
let data = str::repeat("", DEFAULT_MAX_BUF_SIZE);
let mut wr = super::SplitByUtf8BoundaryIfWindows::new(TextMockWriter);
let fut = async move {
wr.write_all(data.as_bytes()).await.unwrap();
};
crate::runtime::Builder::new_current_thread()
.build()
.unwrap()
.block_on(fut);
}
#[test]
#[cfg_attr(miri, ignore)] // takes a really long time with miri
fn test_pseudo_text() {
// In this test we write a piece of binary data, whose beginning is
// text though. We then validate that even in this corner case buffer
// was not shrunk too much.
let checked_count = super::MAGIC_CONST * super::MAX_BYTES_PER_CHAR;
let mut data: Vec<u8> = str::repeat("a", checked_count).into();
data.extend(std::iter::repeat(0b1010_1010).take(DEFAULT_MAX_BUF_SIZE - checked_count + 1));
let mut writer = LoggingMockWriter::new();
let mut splitter = super::SplitByUtf8BoundaryIfWindows::new(&mut writer);
crate::runtime::Builder::new_current_thread()
.build()
.unwrap()
.block_on(async {
splitter.write_all(&data).await.unwrap();
});
// Check that at most two writes were performed
assert!(writer.write_history.len() <= 2);
// Check that all has been written
assert_eq!(
writer.write_history.iter().copied().sum::<usize>(),
data.len()
);
// Check that at most MAX_BYTES_PER_CHAR + 1 (i.e. 5) bytes were shrunk
// from the buffer: one because it was outside of DEFAULT_MAX_BUF_SIZE boundary, and
// up to one "utf8 code point".
assert!(data.len() - writer.write_history[0] <= super::MAX_BYTES_PER_CHAR + 1);
}
}

197
vendor/tokio/src/io/stdout.rs vendored Normal file
View File

@@ -0,0 +1,197 @@
use crate::io::blocking::Blocking;
use crate::io::stdio_common::SplitByUtf8BoundaryIfWindows;
use crate::io::AsyncWrite;
use std::io;
use std::pin::Pin;
use std::task::Context;
use std::task::Poll;
cfg_io_std! {
/// A handle to the standard output stream of a process.
///
/// Concurrent writes to stdout must be executed with care: Only individual
/// writes to this [`AsyncWrite`] are guaranteed to be intact. In particular
/// you should be aware that writes using [`write_all`] are not guaranteed
/// to occur as a single write, so multiple threads writing data with
/// [`write_all`] may result in interleaved output.
///
/// Created by the [`stdout`] function.
///
/// [`stdout`]: stdout()
/// [`AsyncWrite`]: AsyncWrite
/// [`write_all`]: crate::io::AsyncWriteExt::write_all()
///
/// # Examples
///
/// ```
/// use tokio::io::{self, AsyncWriteExt};
///
/// #[tokio::main]
/// async fn main() -> io::Result<()> {
/// let mut stdout = io::stdout();
/// stdout.write_all(b"Hello world!").await?;
/// Ok(())
/// }
/// ```
///
/// The following is an example of using `stdio` with loop.
///
/// ```
/// use tokio::io::{self, AsyncWriteExt};
///
/// #[tokio::main]
/// async fn main() {
/// let messages = vec!["hello", " world\n"];
///
/// // When you use `stdio` in a loop, it is recommended to create
/// // a single `stdio` instance outside the loop and call a write
/// // operation against that instance on each loop.
/// //
/// // Repeatedly creating `stdout` instances inside the loop and
/// // writing to that handle could result in mangled output since
/// // each write operation is handled by a different blocking thread.
/// let mut stdout = io::stdout();
///
/// for message in &messages {
/// stdout.write_all(message.as_bytes()).await.unwrap();
/// stdout.flush().await.unwrap();
/// }
/// }
/// ```
#[derive(Debug)]
pub struct Stdout {
std: SplitByUtf8BoundaryIfWindows<Blocking<std::io::Stdout>>,
}
/// Constructs a new handle to the standard output of the current process.
///
/// The returned handle allows writing to standard out from the within the
/// Tokio runtime.
///
/// Concurrent writes to stdout must be executed with care: Only individual
/// writes to this [`AsyncWrite`] are guaranteed to be intact. In particular
/// you should be aware that writes using [`write_all`] are not guaranteed
/// to occur as a single write, so multiple threads writing data with
/// [`write_all`] may result in interleaved output.
///
/// Note that unlike [`std::io::stdout`], each call to this `stdout()`
/// produces a new writer, so for example, this program does **not** flush stdout:
///
/// ```no_run
/// # use tokio::io::AsyncWriteExt;
/// # #[tokio::main]
/// # async fn main() -> std::io::Result<()> {
/// tokio::io::stdout().write_all(b"aa").await?;
/// tokio::io::stdout().flush().await?;
/// # Ok(())
/// # }
/// ```
///
/// [`std::io::stdout`]: std::io::stdout
/// [`AsyncWrite`]: AsyncWrite
/// [`write_all`]: crate::io::AsyncWriteExt::write_all()
///
/// # Examples
///
/// ```
/// use tokio::io::{self, AsyncWriteExt};
///
/// #[tokio::main]
/// async fn main() -> io::Result<()> {
/// let mut stdout = io::stdout();
/// stdout.write_all(b"Hello world!").await?;
/// Ok(())
/// }
/// ```
///
/// The following is an example of using `stdio` with loop.
///
/// ```
/// use tokio::io::{self, AsyncWriteExt};
///
/// #[tokio::main]
/// async fn main() {
/// let messages = vec!["hello", " world\n"];
///
/// // When you use `stdio` in a loop, it is recommended to create
/// // a single `stdio` instance outside the loop and call a write
/// // operation against that instance on each loop.
/// //
/// // Repeatedly creating `stdout` instances inside the loop and
/// // writing to that handle could result in mangled output since
/// // each write operation is handled by a different blocking thread.
/// let mut stdout = io::stdout();
///
/// for message in &messages {
/// stdout.write_all(message.as_bytes()).await.unwrap();
/// stdout.flush().await.unwrap();
/// }
/// }
/// ```
pub fn stdout() -> Stdout {
let std = io::stdout();
// SAFETY: The `Read` implementation of `std` does not read from the
// buffer it is borrowing and correctly reports the length of the data
// written into the buffer.
let blocking = unsafe { Blocking::new(std) };
Stdout {
std: SplitByUtf8BoundaryIfWindows::new(blocking),
}
}
}
#[cfg(unix)]
mod sys {
use std::os::unix::io::{AsFd, AsRawFd, BorrowedFd, RawFd};
use super::Stdout;
impl AsRawFd for Stdout {
fn as_raw_fd(&self) -> RawFd {
std::io::stdout().as_raw_fd()
}
}
impl AsFd for Stdout {
fn as_fd(&self) -> BorrowedFd<'_> {
unsafe { BorrowedFd::borrow_raw(self.as_raw_fd()) }
}
}
}
cfg_windows! {
use crate::os::windows::io::{AsHandle, BorrowedHandle, AsRawHandle, RawHandle};
impl AsRawHandle for Stdout {
fn as_raw_handle(&self) -> RawHandle {
std::io::stdout().as_raw_handle()
}
}
impl AsHandle for Stdout {
fn as_handle(&self) -> BorrowedHandle<'_> {
unsafe { BorrowedHandle::borrow_raw(self.as_raw_handle()) }
}
}
}
impl AsyncWrite for Stdout {
fn poll_write(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<io::Result<usize>> {
Pin::new(&mut self.std).poll_write(cx, buf)
}
fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> {
Pin::new(&mut self.std).poll_flush(cx)
}
fn poll_shutdown(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Result<(), io::Error>> {
Pin::new(&mut self.std).poll_shutdown(cx)
}
}

4
vendor/tokio/src/io/uring/mod.rs vendored Normal file
View File

@@ -0,0 +1,4 @@
pub(crate) mod open;
pub(crate) mod read;
pub(crate) mod utils;
pub(crate) mod write;

59
vendor/tokio/src/io/uring/open.rs vendored Normal file
View File

@@ -0,0 +1,59 @@
use super::utils::cstr;
use crate::fs::UringOpenOptions;
use crate::runtime::driver::op::{CancelData, Cancellable, Completable, CqeResult, Op};
use io_uring::{opcode, types};
use std::ffi::CString;
use std::io::{self, Error};
use std::os::fd::FromRawFd;
use std::path::Path;
#[derive(Debug)]
pub(crate) struct Open {
/// This field will be read by the kernel during the operation, so we
/// need to ensure it is valid for the entire duration of the operation.
#[allow(dead_code)]
path: CString,
}
impl Completable for Open {
type Output = io::Result<crate::fs::File>;
fn complete(self, cqe: CqeResult) -> Self::Output {
cqe.result
.map(|fd| unsafe { crate::fs::File::from_raw_fd(fd as i32) })
}
fn complete_with_error(self, err: Error) -> Self::Output {
Err(err)
}
}
impl Cancellable for Open {
fn cancel(self) -> CancelData {
CancelData::Open(self)
}
}
impl Op<Open> {
/// Submit a request to open a file.
pub(crate) fn open(path: &Path, options: &UringOpenOptions) -> io::Result<Op<Open>> {
let inner_opt = options;
let path = cstr(path)?;
let custom_flags = inner_opt.custom_flags;
let flags = libc::O_CLOEXEC
| options.access_mode()?
| options.creation_mode()?
| (custom_flags & !libc::O_ACCMODE);
let open_op = opcode::OpenAt::new(types::Fd(libc::AT_FDCWD), path.as_ptr())
.flags(flags)
.mode(inner_opt.mode)
.build();
// SAFETY: Parameters are valid for the entire duration of the operation
let op = unsafe { Op::new(open_op, Open { path }) };
Ok(op)
}
}

61
vendor/tokio/src/io/uring/read.rs vendored Normal file
View File

@@ -0,0 +1,61 @@
use crate::runtime::driver::op::{CancelData, Cancellable, Completable, CqeResult, Op};
use io_uring::{opcode, types};
use std::io::{self, Error};
use std::os::fd::{AsRawFd, OwnedFd};
#[derive(Debug)]
pub(crate) struct Read {
fd: OwnedFd,
buf: Vec<u8>,
}
impl Completable for Read {
type Output = (io::Result<u32>, OwnedFd, Vec<u8>);
fn complete(self, cqe: CqeResult) -> Self::Output {
let mut buf = self.buf;
if let Ok(len) = cqe.result {
let new_len = buf.len() + len as usize;
// SAFETY: Kernel read len bytes
unsafe { buf.set_len(new_len) };
}
(cqe.result, self.fd, buf)
}
fn complete_with_error(self, err: Error) -> Self::Output {
(Err(err), self.fd, self.buf)
}
}
impl Cancellable for Read {
fn cancel(self) -> CancelData {
CancelData::Read(self)
}
}
impl Op<Read> {
// Submit a request to read a FD at given length and offset into a
// dynamic buffer with uninitialized memory. The read happens on uninitialized
// buffer and no overwriting happens.
// SAFETY: The `len` of the amount to be read and the buffer that is passed
// should have capacity > len.
//
// If `len` read is higher than vector capacity then setting its length by
// the caller in terms of size_read can be unsound.
pub(crate) fn read(fd: OwnedFd, mut buf: Vec<u8>, len: u32, offset: u64) -> Self {
// don't overwrite on already written part
assert!(buf.spare_capacity_mut().len() >= len as usize);
let buf_mut_ptr = buf.spare_capacity_mut().as_mut_ptr().cast();
let read_op = opcode::Read::new(types::Fd(fd.as_raw_fd()), buf_mut_ptr, len)
.offset(offset)
.build();
// SAFETY: Parameters are valid for the entire duration of the operation
unsafe { Op::new(read_op, Read { fd, buf }) }
}
}

6
vendor/tokio/src/io/uring/utils.rs vendored Normal file
View File

@@ -0,0 +1,6 @@
use std::os::unix::ffi::OsStrExt;
use std::{ffi::CString, io, path::Path};
pub(crate) fn cstr(p: &Path) -> io::Result<CString> {
Ok(CString::new(p.as_os_str().as_bytes())?)
}

55
vendor/tokio/src/io/uring/write.rs vendored Normal file
View File

@@ -0,0 +1,55 @@
use crate::runtime::driver::op::{CancelData, Cancellable, Completable, CqeResult, Op};
use crate::util::as_ref::OwnedBuf;
use io_uring::{opcode, types};
use std::io::{self, Error};
use std::os::fd::{AsRawFd, OwnedFd};
#[derive(Debug)]
pub(crate) struct Write {
buf: OwnedBuf,
fd: OwnedFd,
}
impl Completable for Write {
type Output = (io::Result<u32>, OwnedBuf, OwnedFd);
fn complete(self, cqe: CqeResult) -> Self::Output {
(cqe.result, self.buf, self.fd)
}
fn complete_with_error(self, err: Error) -> Self::Output {
(Err(err), self.buf, self.fd)
}
}
impl Cancellable for Write {
fn cancel(self) -> CancelData {
CancelData::Write(self)
}
}
impl Op<Write> {
/// Issue a write that starts at `buf_offset` within `buf` and writes some bytes
/// into `file` at `file_offset`.
pub(crate) fn write_at(
fd: OwnedFd,
buf: OwnedBuf,
buf_offset: usize,
file_offset: u64,
) -> io::Result<Self> {
// There is a cap on how many bytes we can write in a single uring write operation.
// ref: https://github.com/axboe/liburing/discussions/497
let len = u32::try_from(buf.as_ref().len() - buf_offset).unwrap_or(u32::MAX);
let ptr = buf.as_ref()[buf_offset..buf_offset + len as usize].as_ptr();
let sqe = opcode::Write::new(types::Fd(fd.as_raw_fd()), ptr, len)
.offset(file_offset)
.build();
// SAFETY: parameters of the entry, such as `fd` and `buf`, are valid
// until this operation completes.
let op = unsafe { Op::new(sqe, Write { buf, fd }) };
Ok(op)
}
}

View File

@@ -0,0 +1,357 @@
use crate::io::util::fill_buf::{fill_buf, FillBuf};
use crate::io::util::lines::{lines, Lines};
use crate::io::util::read_line::{read_line, ReadLine};
use crate::io::util::read_until::{read_until, ReadUntil};
use crate::io::util::split::{split, Split};
use crate::io::AsyncBufRead;
cfg_io_util! {
/// An extension trait which adds utility methods to [`AsyncBufRead`] types.
///
/// [`AsyncBufRead`]: crate::io::AsyncBufRead
pub trait AsyncBufReadExt: AsyncBufRead {
/// Reads all bytes into `buf` until the delimiter `byte` or EOF is reached.
///
/// Equivalent to:
///
/// ```ignore
/// async fn read_until(&mut self, byte: u8, buf: &mut Vec<u8>) -> io::Result<usize>;
/// ```
///
/// This function will read bytes from the underlying stream until the
/// delimiter or EOF is found. Once found, all bytes up to, and including,
/// the delimiter (if found) will be appended to `buf`.
///
/// If successful, this function will return the total number of bytes read.
///
/// If this function returns `Ok(0)`, the stream has reached EOF.
///
/// # Errors
///
/// This function will ignore all instances of [`ErrorKind::Interrupted`] and
/// will otherwise return any errors returned by [`fill_buf`].
///
/// If an I/O error is encountered then all bytes read so far will be
/// present in `buf` and its length will have been adjusted appropriately.
///
/// [`fill_buf`]: AsyncBufRead::poll_fill_buf
/// [`ErrorKind::Interrupted`]: std::io::ErrorKind::Interrupted
///
/// # Cancel safety
///
/// If the method is used as the event in a
/// [`tokio::select!`](crate::select) statement and some other branch
/// completes first, then some data may have been partially read. Any
/// partially read bytes are appended to `buf`, and the method can be
/// called again to continue reading until `byte`.
///
/// This method returns the total number of bytes read. If you cancel
/// the call to `read_until` and then call it again to continue reading,
/// the counter is reset.
///
/// # Examples
///
/// [`std::io::Cursor`][`Cursor`] is a type that implements `BufRead`. In
/// this example, we use [`Cursor`] to read all the bytes in a byte slice
/// in hyphen delimited segments:
///
/// [`Cursor`]: std::io::Cursor
///
/// ```
/// use tokio::io::AsyncBufReadExt;
///
/// use std::io::Cursor;
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let mut cursor = Cursor::new(b"lorem-ipsum");
/// let mut buf = vec![];
///
/// // cursor is at 'l'
/// let num_bytes = cursor.read_until(b'-', &mut buf)
/// .await
/// .expect("reading from cursor won't fail");
///
/// assert_eq!(num_bytes, 6);
/// assert_eq!(buf, b"lorem-");
/// buf.clear();
///
/// // cursor is at 'i'
/// let num_bytes = cursor.read_until(b'-', &mut buf)
/// .await
/// .expect("reading from cursor won't fail");
///
/// assert_eq!(num_bytes, 5);
/// assert_eq!(buf, b"ipsum");
/// buf.clear();
///
/// // cursor is at EOF
/// let num_bytes = cursor.read_until(b'-', &mut buf)
/// .await
/// .expect("reading from cursor won't fail");
/// assert_eq!(num_bytes, 0);
/// assert_eq!(buf, b"");
/// # }
/// ```
fn read_until<'a>(&'a mut self, byte: u8, buf: &'a mut Vec<u8>) -> ReadUntil<'a, Self>
where
Self: Unpin,
{
read_until(self, byte, buf)
}
/// Reads all bytes until a newline (the 0xA byte) is reached, and append
/// them to the provided buffer.
///
/// Equivalent to:
///
/// ```ignore
/// async fn read_line(&mut self, buf: &mut String) -> io::Result<usize>;
/// ```
///
/// This function will read bytes from the underlying stream until the
/// newline delimiter (the 0xA byte) or EOF is found. Once found, all bytes
/// up to, and including, the delimiter (if found) will be appended to
/// `buf`.
///
/// If successful, this function will return the total number of bytes read.
///
/// If this function returns `Ok(0)`, the stream has reached EOF.
///
/// # Errors
///
/// This function has the same error semantics as [`read_until`] and will
/// also return an error if the read bytes are not valid UTF-8. If an I/O
/// error is encountered then `buf` may contain some bytes already read in
/// the event that all data read so far was valid UTF-8.
///
/// [`read_until`]: AsyncBufReadExt::read_until
///
/// # Cancel safety
///
/// This method is not cancellation safe. If the method is used as the
/// event in a [`tokio::select!`](crate::select) statement and some
/// other branch completes first, then some data may have been partially
/// read, and this data is lost. There are no guarantees regarding the
/// contents of `buf` when the call is cancelled. The current
/// implementation replaces `buf` with the empty string, but this may
/// change in the future.
///
/// This function does not behave like [`read_until`] because of the
/// requirement that a string contains only valid utf-8. If you need a
/// cancellation safe `read_line`, there are three options:
///
/// * Call [`read_until`] with a newline character and manually perform the utf-8 check.
/// * The stream returned by [`lines`] has a cancellation safe
/// [`next_line`] method.
/// * Use [`tokio_util::codec::LinesCodec`][LinesCodec].
///
/// [LinesCodec]: https://docs.rs/tokio-util/latest/tokio_util/codec/struct.LinesCodec.html
/// [`read_until`]: Self::read_until
/// [`lines`]: Self::lines
/// [`next_line`]: crate::io::Lines::next_line
///
/// # Examples
///
/// [`std::io::Cursor`][`Cursor`] is a type that implements
/// `AsyncBufRead`. In this example, we use [`Cursor`] to read all the
/// lines in a byte slice:
///
/// [`Cursor`]: std::io::Cursor
///
/// ```
/// use tokio::io::AsyncBufReadExt;
///
/// use std::io::Cursor;
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let mut cursor = Cursor::new(b"foo\nbar");
/// let mut buf = String::new();
///
/// // cursor is at 'f'
/// let num_bytes = cursor.read_line(&mut buf)
/// .await
/// .expect("reading from cursor won't fail");
///
/// assert_eq!(num_bytes, 4);
/// assert_eq!(buf, "foo\n");
/// buf.clear();
///
/// // cursor is at 'b'
/// let num_bytes = cursor.read_line(&mut buf)
/// .await
/// .expect("reading from cursor won't fail");
///
/// assert_eq!(num_bytes, 3);
/// assert_eq!(buf, "bar");
/// buf.clear();
///
/// // cursor is at EOF
/// let num_bytes = cursor.read_line(&mut buf)
/// .await
/// .expect("reading from cursor won't fail");
///
/// assert_eq!(num_bytes, 0);
/// assert_eq!(buf, "");
/// # }
/// ```
fn read_line<'a>(&'a mut self, buf: &'a mut String) -> ReadLine<'a, Self>
where
Self: Unpin,
{
read_line(self, buf)
}
/// Returns a stream of the contents of this reader split on the byte
/// `byte`.
///
/// This method is the asynchronous equivalent to
/// [`BufRead::split`](std::io::BufRead::split).
///
/// The stream returned from this function will yield instances of
/// [`io::Result`]`<`[`Option`]`<`[`Vec<u8>`]`>>`. Each vector returned will *not* have
/// the delimiter byte at the end.
///
/// [`io::Result`]: std::io::Result
/// [`Option`]: core::option::Option
/// [`Vec<u8>`]: std::vec::Vec
///
/// # Errors
///
/// Each item of the stream has the same error semantics as
/// [`AsyncBufReadExt::read_until`](AsyncBufReadExt::read_until).
///
/// # Examples
///
/// ```
/// # use tokio::io::AsyncBufRead;
/// use tokio::io::AsyncBufReadExt;
///
/// # async fn dox(my_buf_read: impl AsyncBufRead + Unpin) -> std::io::Result<()> {
/// let mut segments = my_buf_read.split(b'f');
///
/// while let Some(segment) = segments.next_segment().await? {
/// println!("length = {}", segment.len())
/// }
/// # Ok(())
/// # }
/// ```
fn split(self, byte: u8) -> Split<Self>
where
Self: Sized + Unpin,
{
split(self, byte)
}
/// Returns the contents of the internal buffer, filling it with more
/// data from the inner reader if it is empty.
///
/// This function is a lower-level call. It needs to be paired with the
/// [`consume`] method to function properly. When calling this method,
/// none of the contents will be "read" in the sense that later calling
/// `read` may return the same contents. As such, [`consume`] must be
/// called with the number of bytes that are consumed from this buffer
/// to ensure that the bytes are never returned twice.
///
/// An empty buffer returned indicates that the stream has reached EOF.
///
/// Equivalent to:
///
/// ```ignore
/// async fn fill_buf(&mut self) -> io::Result<&[u8]>;
/// ```
///
/// # Errors
///
/// This function will return an I/O error if the underlying reader was
/// read, but returned an error.
///
/// # Cancel safety
///
/// This method is cancel safe. If you use it as the event in a
/// [`tokio::select!`](crate::select) statement and some other branch
/// completes first, then it is guaranteed that no data was read.
///
/// [`consume`]: crate::io::AsyncBufReadExt::consume
fn fill_buf(&mut self) -> FillBuf<'_, Self>
where
Self: Unpin,
{
fill_buf(self)
}
/// Tells this buffer that `amt` bytes have been consumed from the
/// buffer, so they should no longer be returned in calls to [`read`].
///
/// This function is a lower-level call. It needs to be paired with the
/// [`fill_buf`] method to function properly. This function does not
/// perform any I/O, it simply informs this object that some amount of
/// its buffer, returned from [`fill_buf`], has been consumed and should
/// no longer be returned. As such, this function may do odd things if
/// [`fill_buf`] isn't called before calling it.
///
/// The `amt` must be less than the number of bytes in the buffer
/// returned by [`fill_buf`].
///
/// [`read`]: crate::io::AsyncReadExt::read
/// [`fill_buf`]: crate::io::AsyncBufReadExt::fill_buf
fn consume(&mut self, amt: usize)
where
Self: Unpin,
{
std::pin::Pin::new(self).consume(amt);
}
/// Returns a stream over the lines of this reader.
/// This method is the async equivalent to [`BufRead::lines`](std::io::BufRead::lines).
///
/// The stream returned from this function will yield instances of
/// [`io::Result`]`<`[`Option`]`<`[`String`]`>>`. Each string returned will *not* have a newline
/// byte (the 0xA byte) or `CRLF` (0xD, 0xA bytes) at the end.
///
/// [`io::Result`]: std::io::Result
/// [`Option`]: core::option::Option
/// [`String`]: String
///
/// # Errors
///
/// Each line of the stream has the same error semantics as [`AsyncBufReadExt::read_line`].
///
/// # Examples
///
/// [`std::io::Cursor`][`Cursor`] is a type that implements `BufRead`. In
/// this example, we use [`Cursor`] to iterate over all the lines in a byte
/// slice.
///
/// [`Cursor`]: std::io::Cursor
///
/// ```
/// use tokio::io::AsyncBufReadExt;
///
/// use std::io::Cursor;
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let cursor = Cursor::new(b"lorem\nipsum\r\ndolor");
///
/// let mut lines = cursor.lines();
///
/// assert_eq!(lines.next_line().await.unwrap(), Some(String::from("lorem")));
/// assert_eq!(lines.next_line().await.unwrap(), Some(String::from("ipsum")));
/// assert_eq!(lines.next_line().await.unwrap(), Some(String::from("dolor")));
/// assert_eq!(lines.next_line().await.unwrap(), None);
/// # }
/// ```
///
/// [`AsyncBufReadExt::read_line`]: AsyncBufReadExt::read_line
fn lines(self) -> Lines<Self>
where
Self: Sized,
{
lines(self)
}
}
}
impl<R: AsyncBufRead + ?Sized> AsyncBufReadExt for R {}

1454
vendor/tokio/src/io/util/async_read_ext.rs vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,96 @@
use crate::io::seek::{seek, Seek};
use crate::io::AsyncSeek;
use std::io::SeekFrom;
cfg_io_util! {
/// An extension trait that adds utility methods to [`AsyncSeek`] types.
///
/// # Examples
///
/// ```
/// use std::io::{self, Cursor, SeekFrom};
/// use tokio::io::{AsyncSeekExt, AsyncReadExt};
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() -> io::Result<()> {
/// let mut cursor = Cursor::new(b"abcdefg");
///
/// // the `seek` method is defined by this trait
/// cursor.seek(SeekFrom::Start(3)).await?;
///
/// let mut buf = [0; 1];
/// let n = cursor.read(&mut buf).await?;
/// assert_eq!(n, 1);
/// assert_eq!(buf, [b'd']);
///
/// Ok(())
/// # }
/// ```
///
/// See [module][crate::io] documentation for more details.
///
/// [`AsyncSeek`]: AsyncSeek
pub trait AsyncSeekExt: AsyncSeek {
/// Creates a future which will seek an IO object, and then yield the
/// new position in the object and the object itself.
///
/// Equivalent to:
///
/// ```ignore
/// async fn seek(&mut self, pos: SeekFrom) -> io::Result<u64>;
/// ```
///
/// In the case of an error the buffer and the object will be discarded, with
/// the error yielded.
///
/// # Examples
///
/// ```no_run
/// # #[cfg(not(target_family = "wasm"))]
/// # {
/// use tokio::fs::File;
/// use tokio::io::{AsyncSeekExt, AsyncReadExt};
///
/// use std::io::SeekFrom;
///
/// # async fn dox() -> std::io::Result<()> {
/// let mut file = File::open("foo.txt").await?;
/// file.seek(SeekFrom::Start(6)).await?;
///
/// let mut contents = vec![0u8; 10];
/// file.read_exact(&mut contents).await?;
/// # Ok(())
/// # }
/// # }
/// ```
fn seek(&mut self, pos: SeekFrom) -> Seek<'_, Self>
where
Self: Unpin,
{
seek(self, pos)
}
/// Creates a future which will rewind to the beginning of the stream.
///
/// This is convenience method, equivalent to `self.seek(SeekFrom::Start(0))`.
fn rewind(&mut self) -> Seek<'_, Self>
where
Self: Unpin,
{
self.seek(SeekFrom::Start(0))
}
/// Creates a future which will return the current seek position from the
/// start of the stream.
///
/// This is equivalent to `self.seek(SeekFrom::Current(0))`.
fn stream_position(&mut self) -> Seek<'_, Self>
where
Self: Unpin,
{
self.seek(SeekFrom::Current(0))
}
}
}
impl<S: AsyncSeek + ?Sized> AsyncSeekExt for S {}

File diff suppressed because it is too large Load Diff

311
vendor/tokio/src/io/util/buf_reader.rs vendored Normal file
View File

@@ -0,0 +1,311 @@
use crate::io::util::DEFAULT_BUF_SIZE;
use crate::io::{AsyncBufRead, AsyncRead, AsyncSeek, AsyncWrite, ReadBuf};
use pin_project_lite::pin_project;
use std::io::{self, IoSlice, SeekFrom};
use std::pin::Pin;
use std::task::{ready, Context, Poll};
use std::{cmp, fmt, mem};
pin_project! {
/// The `BufReader` struct adds buffering to any reader.
///
/// It can be excessively inefficient to work directly with a [`AsyncRead`]
/// instance. A `BufReader` performs large, infrequent reads on the underlying
/// [`AsyncRead`] and maintains an in-memory buffer of the results.
///
/// `BufReader` can improve the speed of programs that make *small* and
/// *repeated* read calls to the same file or network socket. It does not
/// help when reading very large amounts at once, or reading just one or a few
/// times. It also provides no advantage when reading from a source that is
/// already in memory, like a `Vec<u8>`.
///
/// When the `BufReader` is dropped, the contents of its buffer will be
/// discarded. Creating multiple instances of a `BufReader` on the same
/// stream can cause data loss.
#[cfg_attr(docsrs, doc(cfg(feature = "io-util")))]
pub struct BufReader<R> {
#[pin]
pub(super) inner: R,
pub(super) buf: Box<[u8]>,
pub(super) pos: usize,
pub(super) cap: usize,
pub(super) seek_state: SeekState,
}
}
impl<R: AsyncRead> BufReader<R> {
/// Creates a new `BufReader` with a default buffer capacity. The default is currently 8 KB,
/// but may change in the future.
pub fn new(inner: R) -> Self {
Self::with_capacity(DEFAULT_BUF_SIZE, inner)
}
/// Creates a new `BufReader` with the specified buffer capacity.
pub fn with_capacity(capacity: usize, inner: R) -> Self {
let buffer = vec![0; capacity];
Self {
inner,
buf: buffer.into_boxed_slice(),
pos: 0,
cap: 0,
seek_state: SeekState::Init,
}
}
/// Gets a reference to the underlying reader.
///
/// It is inadvisable to directly read from the underlying reader.
pub fn get_ref(&self) -> &R {
&self.inner
}
/// Gets a mutable reference to the underlying reader.
///
/// It is inadvisable to directly read from the underlying reader.
pub fn get_mut(&mut self) -> &mut R {
&mut self.inner
}
/// Gets a pinned mutable reference to the underlying reader.
///
/// It is inadvisable to directly read from the underlying reader.
pub fn get_pin_mut(self: Pin<&mut Self>) -> Pin<&mut R> {
self.project().inner
}
/// Consumes this `BufReader`, returning the underlying reader.
///
/// Note that any leftover data in the internal buffer is lost.
pub fn into_inner(self) -> R {
self.inner
}
/// Returns a reference to the internally buffered data.
///
/// Unlike `fill_buf`, this will not attempt to fill the buffer if it is empty.
pub fn buffer(&self) -> &[u8] {
&self.buf[self.pos..self.cap]
}
/// Invalidates all data in the internal buffer.
#[inline]
fn discard_buffer(self: Pin<&mut Self>) {
let me = self.project();
*me.pos = 0;
*me.cap = 0;
}
}
impl<R: AsyncRead> AsyncRead for BufReader<R> {
fn poll_read(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut ReadBuf<'_>,
) -> Poll<io::Result<()>> {
// If we don't have any buffered data and we're doing a massive read
// (larger than our internal buffer), bypass our internal buffer
// entirely.
if self.pos == self.cap && buf.remaining() >= self.buf.len() {
let res = ready!(self.as_mut().get_pin_mut().poll_read(cx, buf));
self.discard_buffer();
return Poll::Ready(res);
}
let rem = ready!(self.as_mut().poll_fill_buf(cx))?;
let amt = std::cmp::min(rem.len(), buf.remaining());
buf.put_slice(&rem[..amt]);
self.consume(amt);
Poll::Ready(Ok(()))
}
}
impl<R: AsyncRead> AsyncBufRead for BufReader<R> {
fn poll_fill_buf(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<&[u8]>> {
let me = self.project();
// If we've reached the end of our internal buffer then we need to fetch
// some more data from the underlying reader.
// Branch using `>=` instead of the more correct `==`
// to tell the compiler that the pos..cap slice is always valid.
if *me.pos >= *me.cap {
debug_assert!(*me.pos == *me.cap);
let mut buf = ReadBuf::new(me.buf);
ready!(me.inner.poll_read(cx, &mut buf))?;
*me.cap = buf.filled().len();
*me.pos = 0;
}
Poll::Ready(Ok(&me.buf[*me.pos..*me.cap]))
}
fn consume(self: Pin<&mut Self>, amt: usize) {
let me = self.project();
*me.pos = cmp::min(*me.pos + amt, *me.cap);
}
}
#[derive(Debug, Clone, Copy)]
pub(super) enum SeekState {
/// `start_seek` has not been called.
Init,
/// `start_seek` has been called, but `poll_complete` has not yet been called.
Start(SeekFrom),
/// Waiting for completion of the first `poll_complete` in the `n.checked_sub(remainder).is_none()` branch.
PendingOverflowed(i64),
/// Waiting for completion of `poll_complete`.
Pending,
}
/// Seeks to an offset, in bytes, in the underlying reader.
///
/// The position used for seeking with `SeekFrom::Current(_)` is the
/// position the underlying reader would be at if the `BufReader` had no
/// internal buffer.
///
/// Seeking always discards the internal buffer, even if the seek position
/// would otherwise fall within it. This guarantees that calling
/// `.into_inner()` immediately after a seek yields the underlying reader
/// at the same position.
///
/// See [`AsyncSeek`] for more details.
///
/// Note: In the edge case where you're seeking with `SeekFrom::Current(n)`
/// where `n` minus the internal buffer length overflows an `i64`, two
/// seeks will be performed instead of one. If the second seek returns
/// `Err`, the underlying reader will be left at the same position it would
/// have if you called `seek` with `SeekFrom::Current(0)`.
impl<R: AsyncRead + AsyncSeek> AsyncSeek for BufReader<R> {
fn start_seek(self: Pin<&mut Self>, pos: SeekFrom) -> io::Result<()> {
// We needs to call seek operation multiple times.
// And we should always call both start_seek and poll_complete,
// as start_seek alone cannot guarantee that the operation will be completed.
// poll_complete receives a Context and returns a Poll, so it cannot be called
// inside start_seek.
*self.project().seek_state = SeekState::Start(pos);
Ok(())
}
fn poll_complete(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<u64>> {
let res = match mem::replace(self.as_mut().project().seek_state, SeekState::Init) {
SeekState::Init => {
// 1.x AsyncSeek recommends calling poll_complete before start_seek.
// We don't have to guarantee that the value returned by
// poll_complete called without start_seek is correct,
// so we'll return 0.
return Poll::Ready(Ok(0));
}
SeekState::Start(SeekFrom::Current(n)) => {
let remainder = (self.cap - self.pos) as i64;
// it should be safe to assume that remainder fits within an i64 as the alternative
// means we managed to allocate 8 exbibytes and that's absurd.
// But it's not out of the realm of possibility for some weird underlying reader to
// support seeking by i64::MIN so we need to handle underflow when subtracting
// remainder.
if let Some(offset) = n.checked_sub(remainder) {
self.as_mut()
.get_pin_mut()
.start_seek(SeekFrom::Current(offset))?;
} else {
// seek backwards by our remainder, and then by the offset
self.as_mut()
.get_pin_mut()
.start_seek(SeekFrom::Current(-remainder))?;
if self.as_mut().get_pin_mut().poll_complete(cx)?.is_pending() {
*self.as_mut().project().seek_state = SeekState::PendingOverflowed(n);
return Poll::Pending;
}
// https://github.com/rust-lang/rust/pull/61157#issuecomment-495932676
self.as_mut().discard_buffer();
self.as_mut()
.get_pin_mut()
.start_seek(SeekFrom::Current(n))?;
}
self.as_mut().get_pin_mut().poll_complete(cx)?
}
SeekState::PendingOverflowed(n) => {
if self.as_mut().get_pin_mut().poll_complete(cx)?.is_pending() {
*self.as_mut().project().seek_state = SeekState::PendingOverflowed(n);
return Poll::Pending;
}
// https://github.com/rust-lang/rust/pull/61157#issuecomment-495932676
self.as_mut().discard_buffer();
self.as_mut()
.get_pin_mut()
.start_seek(SeekFrom::Current(n))?;
self.as_mut().get_pin_mut().poll_complete(cx)?
}
SeekState::Start(pos) => {
// Seeking with Start/End doesn't care about our buffer length.
self.as_mut().get_pin_mut().start_seek(pos)?;
self.as_mut().get_pin_mut().poll_complete(cx)?
}
SeekState::Pending => self.as_mut().get_pin_mut().poll_complete(cx)?,
};
match res {
Poll::Ready(res) => {
self.discard_buffer();
Poll::Ready(Ok(res))
}
Poll::Pending => {
*self.as_mut().project().seek_state = SeekState::Pending;
Poll::Pending
}
}
}
}
impl<R: AsyncRead + AsyncWrite> AsyncWrite for BufReader<R> {
fn poll_write(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<io::Result<usize>> {
self.get_pin_mut().poll_write(cx, buf)
}
fn poll_write_vectored(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
bufs: &[IoSlice<'_>],
) -> Poll<io::Result<usize>> {
self.get_pin_mut().poll_write_vectored(cx, bufs)
}
fn is_write_vectored(&self) -> bool {
self.get_ref().is_write_vectored()
}
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
self.get_pin_mut().poll_flush(cx)
}
fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
self.get_pin_mut().poll_shutdown(cx)
}
}
impl<R: fmt::Debug> fmt::Debug for BufReader<R> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("BufReader")
.field("reader", &self.inner)
.field(
"buffer",
&format_args!("{}/{}", self.cap - self.pos, self.buf.len()),
)
.finish()
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn assert_unpin() {
crate::is_unpin::<BufReader<()>>();
}
}

207
vendor/tokio/src/io/util/buf_stream.rs vendored Normal file
View File

@@ -0,0 +1,207 @@
use crate::io::util::{BufReader, BufWriter};
use crate::io::{AsyncBufRead, AsyncRead, AsyncSeek, AsyncWrite, ReadBuf};
use pin_project_lite::pin_project;
use std::io::{self, IoSlice, SeekFrom};
use std::pin::Pin;
use std::task::{Context, Poll};
pin_project! {
/// Wraps a type that is [`AsyncWrite`] and [`AsyncRead`], and buffers its input and output.
///
/// It can be excessively inefficient to work directly with something that implements [`AsyncWrite`]
/// and [`AsyncRead`]. For example, every `write`, however small, has to traverse the syscall
/// interface, and similarly, every read has to do the same. The [`BufWriter`] and [`BufReader`]
/// types aid with these problems respectively, but do so in only one direction. `BufStream` wraps
/// one in the other so that both directions are buffered. See their documentation for details.
#[derive(Debug)]
#[cfg_attr(docsrs, doc(cfg(feature = "io-util")))]
pub struct BufStream<RW> {
#[pin]
inner: BufReader<BufWriter<RW>>,
}
}
impl<RW: AsyncRead + AsyncWrite> BufStream<RW> {
/// Wraps a type in both [`BufWriter`] and [`BufReader`].
///
/// See the documentation for those types and [`BufStream`] for details.
pub fn new(stream: RW) -> BufStream<RW> {
BufStream {
inner: BufReader::new(BufWriter::new(stream)),
}
}
/// Creates a `BufStream` with the specified [`BufReader`] capacity and [`BufWriter`]
/// capacity.
///
/// See the documentation for those types and [`BufStream`] for details.
pub fn with_capacity(
reader_capacity: usize,
writer_capacity: usize,
stream: RW,
) -> BufStream<RW> {
BufStream {
inner: BufReader::with_capacity(
reader_capacity,
BufWriter::with_capacity(writer_capacity, stream),
),
}
}
/// Gets a reference to the underlying I/O object.
///
/// It is inadvisable to directly read from the underlying I/O object.
pub fn get_ref(&self) -> &RW {
self.inner.get_ref().get_ref()
}
/// Gets a mutable reference to the underlying I/O object.
///
/// It is inadvisable to directly read from the underlying I/O object.
pub fn get_mut(&mut self) -> &mut RW {
self.inner.get_mut().get_mut()
}
/// Gets a pinned mutable reference to the underlying I/O object.
///
/// It is inadvisable to directly read from the underlying I/O object.
pub fn get_pin_mut(self: Pin<&mut Self>) -> Pin<&mut RW> {
self.project().inner.get_pin_mut().get_pin_mut()
}
/// Consumes this `BufStream`, returning the underlying I/O object.
///
/// Note that any leftover data in the internal buffer is lost.
pub fn into_inner(self) -> RW {
self.inner.into_inner().into_inner()
}
}
impl<RW> From<BufReader<BufWriter<RW>>> for BufStream<RW> {
fn from(b: BufReader<BufWriter<RW>>) -> Self {
BufStream { inner: b }
}
}
impl<RW> From<BufWriter<BufReader<RW>>> for BufStream<RW> {
fn from(b: BufWriter<BufReader<RW>>) -> Self {
// we need to "invert" the reader and writer
let BufWriter {
inner:
BufReader {
inner,
buf: rbuf,
pos,
cap,
seek_state: rseek_state,
},
buf: wbuf,
written,
seek_state: wseek_state,
} = b;
BufStream {
inner: BufReader {
inner: BufWriter {
inner,
buf: wbuf,
written,
seek_state: wseek_state,
},
buf: rbuf,
pos,
cap,
seek_state: rseek_state,
},
}
}
}
impl<RW: AsyncRead + AsyncWrite> AsyncWrite for BufStream<RW> {
fn poll_write(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<io::Result<usize>> {
self.project().inner.poll_write(cx, buf)
}
fn poll_write_vectored(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
bufs: &[IoSlice<'_>],
) -> Poll<io::Result<usize>> {
self.project().inner.poll_write_vectored(cx, bufs)
}
fn is_write_vectored(&self) -> bool {
self.inner.is_write_vectored()
}
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
self.project().inner.poll_flush(cx)
}
fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
self.project().inner.poll_shutdown(cx)
}
}
impl<RW: AsyncRead + AsyncWrite> AsyncRead for BufStream<RW> {
fn poll_read(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut ReadBuf<'_>,
) -> Poll<io::Result<()>> {
self.project().inner.poll_read(cx, buf)
}
}
/// Seek to an offset, in bytes, in the underlying stream.
///
/// The position used for seeking with `SeekFrom::Current(_)` is the
/// position the underlying stream would be at if the `BufStream` had no
/// internal buffer.
///
/// Seeking always discards the internal buffer, even if the seek position
/// would otherwise fall within it. This guarantees that calling
/// `.into_inner()` immediately after a seek yields the underlying reader
/// at the same position.
///
/// See [`AsyncSeek`] for more details.
///
/// Note: In the edge case where you're seeking with `SeekFrom::Current(n)`
/// where `n` minus the internal buffer length overflows an `i64`, two
/// seeks will be performed instead of one. If the second seek returns
/// `Err`, the underlying reader will be left at the same position it would
/// have if you called `seek` with `SeekFrom::Current(0)`.
impl<RW: AsyncRead + AsyncWrite + AsyncSeek> AsyncSeek for BufStream<RW> {
fn start_seek(self: Pin<&mut Self>, position: SeekFrom) -> io::Result<()> {
self.project().inner.start_seek(position)
}
fn poll_complete(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<u64>> {
self.project().inner.poll_complete(cx)
}
}
impl<RW: AsyncRead + AsyncWrite> AsyncBufRead for BufStream<RW> {
fn poll_fill_buf(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<&[u8]>> {
self.project().inner.poll_fill_buf(cx)
}
fn consume(self: Pin<&mut Self>, amt: usize) {
self.project().inner.consume(amt);
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn assert_unpin() {
crate::is_unpin::<BufStream<()>>();
}
}

310
vendor/tokio/src/io/util/buf_writer.rs vendored Normal file
View File

@@ -0,0 +1,310 @@
use crate::io::util::DEFAULT_BUF_SIZE;
use crate::io::{AsyncBufRead, AsyncRead, AsyncSeek, AsyncWrite, ReadBuf};
use pin_project_lite::pin_project;
use std::fmt;
use std::io::{self, IoSlice, SeekFrom, Write};
use std::pin::Pin;
use std::task::{ready, Context, Poll};
pin_project! {
/// Wraps a writer and buffers its output.
///
/// It can be excessively inefficient to work directly with something that
/// implements [`AsyncWrite`]. A `BufWriter` keeps an in-memory buffer of data and
/// writes it to an underlying writer in large, infrequent batches.
///
/// `BufWriter` can improve the speed of programs that make *small* and
/// *repeated* write calls to the same file or network socket. It does not
/// help when writing very large amounts at once, or writing just one or a few
/// times. It also provides no advantage when writing to a destination that is
/// in memory, like a `Vec<u8>`.
///
/// When the `BufWriter` is dropped, the contents of its buffer will be
/// discarded. Creating multiple instances of a `BufWriter` on the same
/// stream can cause data loss. If you need to write out the contents of its
/// buffer, you must manually call flush before the writer is dropped.
///
/// [`AsyncWrite`]: AsyncWrite
/// [`flush`]: super::AsyncWriteExt::flush
///
#[cfg_attr(docsrs, doc(cfg(feature = "io-util")))]
pub struct BufWriter<W> {
#[pin]
pub(super) inner: W,
pub(super) buf: Vec<u8>,
pub(super) written: usize,
pub(super) seek_state: SeekState,
}
}
impl<W: AsyncWrite> BufWriter<W> {
/// Creates a new `BufWriter` with a default buffer capacity. The default is currently 8 KB,
/// but may change in the future.
pub fn new(inner: W) -> Self {
Self::with_capacity(DEFAULT_BUF_SIZE, inner)
}
/// Creates a new `BufWriter` with the specified buffer capacity.
pub fn with_capacity(cap: usize, inner: W) -> Self {
Self {
inner,
buf: Vec::with_capacity(cap),
written: 0,
seek_state: SeekState::Init,
}
}
fn flush_buf(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
let mut me = self.project();
let len = me.buf.len();
let mut ret = Ok(());
while *me.written < len {
match ready!(me.inner.as_mut().poll_write(cx, &me.buf[*me.written..])) {
Ok(0) => {
ret = Err(io::Error::new(
io::ErrorKind::WriteZero,
"failed to write the buffered data",
));
break;
}
Ok(n) => *me.written += n,
Err(e) => {
ret = Err(e);
break;
}
}
}
if *me.written > 0 {
me.buf.drain(..*me.written);
}
*me.written = 0;
Poll::Ready(ret)
}
/// Gets a reference to the underlying writer.
pub fn get_ref(&self) -> &W {
&self.inner
}
/// Gets a mutable reference to the underlying writer.
///
/// It is inadvisable to directly write to the underlying writer.
pub fn get_mut(&mut self) -> &mut W {
&mut self.inner
}
/// Gets a pinned mutable reference to the underlying writer.
///
/// It is inadvisable to directly write to the underlying writer.
pub fn get_pin_mut(self: Pin<&mut Self>) -> Pin<&mut W> {
self.project().inner
}
/// Consumes this `BufWriter`, returning the underlying writer.
///
/// Note that any leftover data in the internal buffer is lost.
pub fn into_inner(self) -> W {
self.inner
}
/// Returns a reference to the internally buffered data.
pub fn buffer(&self) -> &[u8] {
&self.buf
}
}
impl<W: AsyncWrite> AsyncWrite for BufWriter<W> {
fn poll_write(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<io::Result<usize>> {
if self.buf.len() + buf.len() > self.buf.capacity() {
ready!(self.as_mut().flush_buf(cx))?;
}
let me = self.project();
if buf.len() >= me.buf.capacity() {
me.inner.poll_write(cx, buf)
} else {
Poll::Ready(me.buf.write(buf))
}
}
fn poll_write_vectored(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
mut bufs: &[IoSlice<'_>],
) -> Poll<io::Result<usize>> {
if self.inner.is_write_vectored() {
let total_len = bufs
.iter()
.fold(0usize, |acc, b| acc.saturating_add(b.len()));
if total_len > self.buf.capacity() - self.buf.len() {
ready!(self.as_mut().flush_buf(cx))?;
}
let me = self.as_mut().project();
if total_len >= me.buf.capacity() {
// It's more efficient to pass the slices directly to the
// underlying writer than to buffer them.
// The case when the total_len calculation saturates at
// usize::MAX is also handled here.
me.inner.poll_write_vectored(cx, bufs)
} else {
bufs.iter().for_each(|b| me.buf.extend_from_slice(b));
Poll::Ready(Ok(total_len))
}
} else {
// Remove empty buffers at the beginning of bufs.
while bufs.first().map(|buf| buf.len()) == Some(0) {
bufs = &bufs[1..];
}
if bufs.is_empty() {
return Poll::Ready(Ok(0));
}
// Flush if the first buffer doesn't fit.
let first_len = bufs[0].len();
if first_len > self.buf.capacity() - self.buf.len() {
ready!(self.as_mut().flush_buf(cx))?;
debug_assert!(self.buf.is_empty());
}
let me = self.as_mut().project();
if first_len >= me.buf.capacity() {
// The slice is at least as large as the buffering capacity,
// so it's better to write it directly, bypassing the buffer.
debug_assert!(me.buf.is_empty());
return me.inner.poll_write(cx, &bufs[0]);
} else {
me.buf.extend_from_slice(&bufs[0]);
bufs = &bufs[1..];
}
let mut total_written = first_len;
debug_assert!(total_written != 0);
// Append the buffers that fit in the internal buffer.
for buf in bufs {
if buf.len() > me.buf.capacity() - me.buf.len() {
break;
} else {
me.buf.extend_from_slice(buf);
total_written += buf.len();
}
}
Poll::Ready(Ok(total_written))
}
}
fn is_write_vectored(&self) -> bool {
true
}
fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
ready!(self.as_mut().flush_buf(cx))?;
self.get_pin_mut().poll_flush(cx)
}
fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
ready!(self.as_mut().flush_buf(cx))?;
self.get_pin_mut().poll_shutdown(cx)
}
}
#[derive(Debug, Clone, Copy)]
pub(super) enum SeekState {
/// `start_seek` has not been called.
Init,
/// `start_seek` has been called, but `poll_complete` has not yet been called.
Start(SeekFrom),
/// Waiting for completion of `poll_complete`.
Pending,
}
/// Seek to the offset, in bytes, in the underlying writer.
///
/// Seeking always writes out the internal buffer before seeking.
impl<W: AsyncWrite + AsyncSeek> AsyncSeek for BufWriter<W> {
fn start_seek(self: Pin<&mut Self>, pos: SeekFrom) -> io::Result<()> {
// We need to flush the internal buffer before seeking.
// It receives a `Context` and returns a `Poll`, so it cannot be called
// inside `start_seek`.
*self.project().seek_state = SeekState::Start(pos);
Ok(())
}
fn poll_complete(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<u64>> {
let pos = match self.seek_state {
SeekState::Init => {
return self.project().inner.poll_complete(cx);
}
SeekState::Start(pos) => Some(pos),
SeekState::Pending => None,
};
// Flush the internal buffer before seeking.
ready!(self.as_mut().flush_buf(cx))?;
let mut me = self.project();
if let Some(pos) = pos {
// Ensure previous seeks have finished before starting a new one
ready!(me.inner.as_mut().poll_complete(cx))?;
if let Err(e) = me.inner.as_mut().start_seek(pos) {
*me.seek_state = SeekState::Init;
return Poll::Ready(Err(e));
}
}
match me.inner.poll_complete(cx) {
Poll::Ready(res) => {
*me.seek_state = SeekState::Init;
Poll::Ready(res)
}
Poll::Pending => {
*me.seek_state = SeekState::Pending;
Poll::Pending
}
}
}
}
impl<W: AsyncWrite + AsyncRead> AsyncRead for BufWriter<W> {
fn poll_read(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut ReadBuf<'_>,
) -> Poll<io::Result<()>> {
self.get_pin_mut().poll_read(cx, buf)
}
}
impl<W: AsyncWrite + AsyncBufRead> AsyncBufRead for BufWriter<W> {
fn poll_fill_buf(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<&[u8]>> {
self.get_pin_mut().poll_fill_buf(cx)
}
fn consume(self: Pin<&mut Self>, amt: usize) {
self.get_pin_mut().consume(amt);
}
}
impl<W: fmt::Debug> fmt::Debug for BufWriter<W> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("BufWriter")
.field("writer", &self.inner)
.field(
"buffer",
&format_args!("{}/{}", self.buf.len(), self.buf.capacity()),
)
.field("written", &self.written)
.finish()
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn assert_unpin() {
crate::is_unpin::<BufWriter<()>>();
}
}

144
vendor/tokio/src/io/util/chain.rs vendored Normal file
View File

@@ -0,0 +1,144 @@
use crate::io::{AsyncBufRead, AsyncRead, ReadBuf};
use pin_project_lite::pin_project;
use std::fmt;
use std::io;
use std::pin::Pin;
use std::task::{ready, Context, Poll};
pin_project! {
/// Stream for the [`chain`](super::AsyncReadExt::chain) method.
#[must_use = "streams do nothing unless polled"]
#[cfg_attr(docsrs, doc(cfg(feature = "io-util")))]
pub struct Chain<T, U> {
#[pin]
first: T,
#[pin]
second: U,
done_first: bool,
}
}
pub(super) fn chain<T, U>(first: T, second: U) -> Chain<T, U>
where
T: AsyncRead,
U: AsyncRead,
{
Chain {
first,
second,
done_first: false,
}
}
impl<T, U> Chain<T, U>
where
T: AsyncRead,
U: AsyncRead,
{
/// Gets references to the underlying readers in this `Chain`.
pub fn get_ref(&self) -> (&T, &U) {
(&self.first, &self.second)
}
/// Gets mutable references to the underlying readers in this `Chain`.
///
/// Care should be taken to avoid modifying the internal I/O state of the
/// underlying readers as doing so may corrupt the internal state of this
/// `Chain`.
pub fn get_mut(&mut self) -> (&mut T, &mut U) {
(&mut self.first, &mut self.second)
}
/// Gets pinned mutable references to the underlying readers in this `Chain`.
///
/// Care should be taken to avoid modifying the internal I/O state of the
/// underlying readers as doing so may corrupt the internal state of this
/// `Chain`.
pub fn get_pin_mut(self: Pin<&mut Self>) -> (Pin<&mut T>, Pin<&mut U>) {
let me = self.project();
(me.first, me.second)
}
/// Consumes the `Chain`, returning the wrapped readers.
pub fn into_inner(self) -> (T, U) {
(self.first, self.second)
}
}
impl<T, U> fmt::Debug for Chain<T, U>
where
T: fmt::Debug,
U: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Chain")
.field("t", &self.first)
.field("u", &self.second)
.finish()
}
}
impl<T, U> AsyncRead for Chain<T, U>
where
T: AsyncRead,
U: AsyncRead,
{
fn poll_read(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut ReadBuf<'_>,
) -> Poll<io::Result<()>> {
let me = self.project();
if !*me.done_first {
let rem = buf.remaining();
ready!(me.first.poll_read(cx, buf))?;
if buf.remaining() == rem {
*me.done_first = true;
} else {
return Poll::Ready(Ok(()));
}
}
me.second.poll_read(cx, buf)
}
}
impl<T, U> AsyncBufRead for Chain<T, U>
where
T: AsyncBufRead,
U: AsyncBufRead,
{
fn poll_fill_buf(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<&[u8]>> {
let me = self.project();
if !*me.done_first {
match ready!(me.first.poll_fill_buf(cx)?) {
[] => {
*me.done_first = true;
}
buf => return Poll::Ready(Ok(buf)),
}
}
me.second.poll_fill_buf(cx)
}
fn consume(self: Pin<&mut Self>, amt: usize) {
let me = self.project();
if !*me.done_first {
me.first.consume(amt)
} else {
me.second.consume(amt)
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn assert_unpin() {
crate::is_unpin::<Chain<(), ()>>();
}
}

303
vendor/tokio/src/io/util/copy.rs vendored Normal file
View File

@@ -0,0 +1,303 @@
use crate::io::{AsyncRead, AsyncWrite, ReadBuf};
use std::future::Future;
use std::io;
use std::pin::Pin;
use std::task::{ready, Context, Poll};
#[derive(Debug)]
pub(super) struct CopyBuffer {
read_done: bool,
need_flush: bool,
pos: usize,
cap: usize,
amt: u64,
buf: Box<[u8]>,
}
impl CopyBuffer {
pub(super) fn new(buf_size: usize) -> Self {
Self {
read_done: false,
need_flush: false,
pos: 0,
cap: 0,
amt: 0,
buf: vec![0; buf_size].into_boxed_slice(),
}
}
fn poll_fill_buf<R>(
&mut self,
cx: &mut Context<'_>,
reader: Pin<&mut R>,
) -> Poll<io::Result<()>>
where
R: AsyncRead + ?Sized,
{
let me = &mut *self;
let mut buf = ReadBuf::new(&mut me.buf);
buf.set_filled(me.cap);
let res = reader.poll_read(cx, &mut buf);
if let Poll::Ready(Ok(())) = res {
let filled_len = buf.filled().len();
me.read_done = me.cap == filled_len;
me.cap = filled_len;
}
res
}
fn poll_write_buf<R, W>(
&mut self,
cx: &mut Context<'_>,
mut reader: Pin<&mut R>,
mut writer: Pin<&mut W>,
) -> Poll<io::Result<usize>>
where
R: AsyncRead + ?Sized,
W: AsyncWrite + ?Sized,
{
let me = &mut *self;
match writer.as_mut().poll_write(cx, &me.buf[me.pos..me.cap]) {
Poll::Pending => {
// Top up the buffer towards full if we can read a bit more
// data - this should improve the chances of a large write
if !me.read_done && me.cap < me.buf.len() {
ready!(me.poll_fill_buf(cx, reader.as_mut()))?;
}
Poll::Pending
}
res => res,
}
}
pub(super) fn poll_copy<R, W>(
&mut self,
cx: &mut Context<'_>,
mut reader: Pin<&mut R>,
mut writer: Pin<&mut W>,
) -> Poll<io::Result<u64>>
where
R: AsyncRead + ?Sized,
W: AsyncWrite + ?Sized,
{
ready!(crate::trace::trace_leaf(cx));
#[cfg(any(
feature = "fs",
feature = "io-std",
feature = "net",
feature = "process",
feature = "rt",
feature = "signal",
feature = "sync",
feature = "time",
))]
// Keep track of task budget
let coop = ready!(crate::task::coop::poll_proceed(cx));
loop {
// If there is some space left in our buffer, then we try to read some
// data to continue, thus maximizing the chances of a large write.
if self.cap < self.buf.len() && !self.read_done {
match self.poll_fill_buf(cx, reader.as_mut()) {
Poll::Ready(Ok(())) => {
#[cfg(any(
feature = "fs",
feature = "io-std",
feature = "net",
feature = "process",
feature = "rt",
feature = "signal",
feature = "sync",
feature = "time",
))]
coop.made_progress();
}
Poll::Ready(Err(err)) => {
#[cfg(any(
feature = "fs",
feature = "io-std",
feature = "net",
feature = "process",
feature = "rt",
feature = "signal",
feature = "sync",
feature = "time",
))]
coop.made_progress();
return Poll::Ready(Err(err));
}
Poll::Pending => {
// Ignore pending reads when our buffer is not empty, because
// we can try to write data immediately.
if self.pos == self.cap {
// Try flushing when the reader has no progress to avoid deadlock
// when the reader depends on buffered writer.
if self.need_flush {
ready!(writer.as_mut().poll_flush(cx))?;
#[cfg(any(
feature = "fs",
feature = "io-std",
feature = "net",
feature = "process",
feature = "rt",
feature = "signal",
feature = "sync",
feature = "time",
))]
coop.made_progress();
self.need_flush = false;
}
return Poll::Pending;
}
}
}
}
// If our buffer has some data, let's write it out!
while self.pos < self.cap {
let i = ready!(self.poll_write_buf(cx, reader.as_mut(), writer.as_mut()))?;
#[cfg(any(
feature = "fs",
feature = "io-std",
feature = "net",
feature = "process",
feature = "rt",
feature = "signal",
feature = "sync",
feature = "time",
))]
coop.made_progress();
if i == 0 {
return Poll::Ready(Err(io::Error::new(
io::ErrorKind::WriteZero,
"write zero byte into writer",
)));
} else {
self.pos += i;
self.amt += i as u64;
self.need_flush = true;
}
}
// If pos larger than cap, this loop will never stop.
// In particular, user's wrong poll_write implementation returning
// incorrect written length may lead to thread blocking.
debug_assert!(
self.pos <= self.cap,
"writer returned length larger than input slice"
);
// All data has been written, the buffer can be considered empty again
self.pos = 0;
self.cap = 0;
// If we've written all the data and we've seen EOF, flush out the
// data and finish the transfer.
if self.read_done {
ready!(writer.as_mut().poll_flush(cx))?;
#[cfg(any(
feature = "fs",
feature = "io-std",
feature = "net",
feature = "process",
feature = "rt",
feature = "signal",
feature = "sync",
feature = "time",
))]
coop.made_progress();
return Poll::Ready(Ok(self.amt));
}
}
}
}
/// A future that asynchronously copies the entire contents of a reader into a
/// writer.
#[derive(Debug)]
#[must_use = "futures do nothing unless you `.await` or poll them"]
struct Copy<'a, R: ?Sized, W: ?Sized> {
reader: &'a mut R,
writer: &'a mut W,
buf: CopyBuffer,
}
cfg_io_util! {
/// Asynchronously copies the entire contents of a reader into a writer.
///
/// This function returns a future that will continuously read data from
/// `reader` and then write it into `writer` in a streaming fashion until
/// `reader` returns EOF or fails.
///
/// On success, the total number of bytes that were copied from `reader` to
/// `writer` is returned.
///
/// This is an asynchronous version of [`std::io::copy`][std].
///
/// A heap-allocated copy buffer with 8 KB is created to take data from the
/// reader to the writer, check [`copy_buf`] if you want an alternative for
/// [`AsyncBufRead`]. You can use `copy_buf` with [`BufReader`] to change the
/// buffer capacity.
///
/// # When to use async alternatives instead of `SyncIoBridge`
///
/// If you are looking to use [`std::io::copy`] with a synchronous consumer
/// (like a `hasher` or compressor), consider using async alternatives instead of
/// wrapping the reader with [`SyncIoBridge`].
/// See the [`SyncIoBridge`] documentation for detailed examples and guidance.
///
/// [std]: std::io::copy
/// [`copy_buf`]: crate::io::copy_buf
/// [`AsyncBufRead`]: crate::io::AsyncBufRead
/// [`BufReader`]: crate::io::BufReader
/// [`SyncIoBridge`]: https://docs.rs/tokio-util/latest/tokio_util/io/struct.SyncIoBridge.html
///
/// # Errors
///
/// The returned future will return an error immediately if any call to
/// `poll_read` or `poll_write` returns an error.
///
/// # Examples
///
/// ```
/// use tokio::io;
///
/// # async fn dox() -> std::io::Result<()> {
/// let mut reader: &[u8] = b"hello";
/// let mut writer: Vec<u8> = vec![];
///
/// io::copy(&mut reader, &mut writer).await?;
///
/// assert_eq!(&b"hello"[..], &writer[..]);
/// # Ok(())
/// # }
/// ```
pub async fn copy<'a, R, W>(reader: &'a mut R, writer: &'a mut W) -> io::Result<u64>
where
R: AsyncRead + Unpin + ?Sized,
W: AsyncWrite + Unpin + ?Sized,
{
Copy {
reader,
writer,
buf: CopyBuffer::new(super::DEFAULT_BUF_SIZE)
}.await
}
}
impl<R, W> Future for Copy<'_, R, W>
where
R: AsyncRead + Unpin + ?Sized,
W: AsyncWrite + Unpin + ?Sized,
{
type Output = io::Result<u64>;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<u64>> {
let me = &mut *self;
me.buf
.poll_copy(cx, Pin::new(&mut *me.reader), Pin::new(&mut *me.writer))
}
}

View File

@@ -0,0 +1,137 @@
use super::copy::CopyBuffer;
use crate::io::{AsyncRead, AsyncWrite};
use std::future::poll_fn;
use std::io;
use std::pin::Pin;
use std::task::{ready, Context, Poll};
enum TransferState {
Running(CopyBuffer),
ShuttingDown(u64),
Done(u64),
}
fn transfer_one_direction<A, B>(
cx: &mut Context<'_>,
state: &mut TransferState,
r: &mut A,
w: &mut B,
) -> Poll<io::Result<u64>>
where
A: AsyncRead + AsyncWrite + Unpin + ?Sized,
B: AsyncRead + AsyncWrite + Unpin + ?Sized,
{
let mut r = Pin::new(r);
let mut w = Pin::new(w);
loop {
match state {
TransferState::Running(buf) => {
let count = ready!(buf.poll_copy(cx, r.as_mut(), w.as_mut()))?;
*state = TransferState::ShuttingDown(count);
}
TransferState::ShuttingDown(count) => {
ready!(w.as_mut().poll_shutdown(cx))?;
*state = TransferState::Done(*count);
}
TransferState::Done(count) => return Poll::Ready(Ok(*count)),
}
}
}
/// Copies data in both directions between `a` and `b`.
///
/// This function returns a future that will read from both streams,
/// writing any data read to the opposing stream.
/// This happens in both directions concurrently.
///
/// If an EOF is observed on one stream, [`shutdown()`] will be invoked on
/// the other, and reading from that stream will stop. Copying of data in
/// the other direction will continue.
///
/// The future will complete successfully once both directions of communication has been shut down.
/// A direction is shut down when the reader reports EOF,
/// at which point [`shutdown()`] is called on the corresponding writer. When finished,
/// it will return a tuple of the number of bytes copied from a to b
/// and the number of bytes copied from b to a, in that order.
///
/// It uses two 8 KB buffers for transferring bytes between `a` and `b` by default.
/// To set your own buffers sizes use [`copy_bidirectional_with_sizes()`].
///
/// [`shutdown()`]: crate::io::AsyncWriteExt::shutdown
///
/// # Errors
///
/// The future will immediately return an error if any IO operation on `a`
/// or `b` returns an error. Some data read from either stream may be lost (not
/// written to the other stream) in this case.
///
/// # Return value
///
/// Returns a tuple of bytes copied `a` to `b` and bytes copied `b` to `a`.
#[cfg_attr(docsrs, doc(cfg(feature = "io-util")))]
pub async fn copy_bidirectional<A, B>(a: &mut A, b: &mut B) -> io::Result<(u64, u64)>
where
A: AsyncRead + AsyncWrite + Unpin + ?Sized,
B: AsyncRead + AsyncWrite + Unpin + ?Sized,
{
copy_bidirectional_impl(
a,
b,
CopyBuffer::new(super::DEFAULT_BUF_SIZE),
CopyBuffer::new(super::DEFAULT_BUF_SIZE),
)
.await
}
/// Copies data in both directions between `a` and `b` using buffers of the specified size.
///
/// This method is the same as the [`copy_bidirectional()`], except that it allows you to set the
/// size of the internal buffers used when copying data.
#[cfg_attr(docsrs, doc(cfg(feature = "io-util")))]
pub async fn copy_bidirectional_with_sizes<A, B>(
a: &mut A,
b: &mut B,
a_to_b_buf_size: usize,
b_to_a_buf_size: usize,
) -> io::Result<(u64, u64)>
where
A: AsyncRead + AsyncWrite + Unpin + ?Sized,
B: AsyncRead + AsyncWrite + Unpin + ?Sized,
{
copy_bidirectional_impl(
a,
b,
CopyBuffer::new(a_to_b_buf_size),
CopyBuffer::new(b_to_a_buf_size),
)
.await
}
async fn copy_bidirectional_impl<A, B>(
a: &mut A,
b: &mut B,
a_to_b_buffer: CopyBuffer,
b_to_a_buffer: CopyBuffer,
) -> io::Result<(u64, u64)>
where
A: AsyncRead + AsyncWrite + Unpin + ?Sized,
B: AsyncRead + AsyncWrite + Unpin + ?Sized,
{
let mut a_to_b = TransferState::Running(a_to_b_buffer);
let mut b_to_a = TransferState::Running(b_to_a_buffer);
poll_fn(|cx| {
let a_to_b = transfer_one_direction(cx, &mut a_to_b, a, b)?;
let b_to_a = transfer_one_direction(cx, &mut b_to_a, b, a)?;
// It is not a problem if ready! returns early because transfer_one_direction for the
// other direction will keep returning TransferState::Done(count) in future calls to poll
let a_to_b = ready!(a_to_b);
let b_to_a = ready!(b_to_a);
Poll::Ready(Ok((a_to_b, b_to_a)))
})
.await
}

117
vendor/tokio/src/io/util/copy_buf.rs vendored Normal file
View File

@@ -0,0 +1,117 @@
use crate::io::{AsyncBufRead, AsyncWrite};
use std::future::Future;
use std::io;
use std::pin::Pin;
use std::task::{ready, Context, Poll};
cfg_io_util! {
/// A future that asynchronously copies the entire contents of a reader into a
/// writer.
///
/// This struct is generally created by calling [`copy_buf`][copy_buf]. Please
/// see the documentation of `copy_buf()` for more details.
///
/// [copy_buf]: copy_buf()
#[derive(Debug)]
#[must_use = "futures do nothing unless you `.await` or poll them"]
struct CopyBuf<'a, R: ?Sized, W: ?Sized> {
reader: &'a mut R,
writer: &'a mut W,
amt: u64,
}
/// Asynchronously copies the entire contents of a reader into a writer.
///
/// This function returns a future that will continuously read data from
/// `reader` and then write it into `writer` in a streaming fashion until
/// `reader` returns EOF or fails.
///
/// On success, the total number of bytes that were copied from `reader` to
/// `writer` is returned.
///
/// This is a [`tokio::io::copy`] alternative for [`AsyncBufRead`] readers
/// with no extra buffer allocation, since [`AsyncBufRead`] allow access
/// to the reader's inner buffer.
///
/// # When to use async alternatives instead of `SyncIoBridge`
///
/// If you are looking to use [`std::io::copy`] with a synchronous consumer
/// (like a `hasher` or compressor), consider using async alternatives instead of
/// wrapping the reader with [`SyncIoBridge`]. See the [`SyncIoBridge`]
/// documentation for detailed examples and guidance on hashing, compression,
/// and data parsing.
///
/// [`tokio::io::copy`]: crate::io::copy
/// [`AsyncBufRead`]: crate::io::AsyncBufRead
/// [`SyncIoBridge`]: https://docs.rs/tokio-util/latest/tokio_util/io/struct.SyncIoBridge.html
///
/// # Errors
///
/// The returned future will finish with an error will return an error
/// immediately if any call to `poll_fill_buf` or `poll_write` returns an
/// error.
///
/// # Examples
///
/// ```
/// use tokio::io;
///
/// # async fn dox() -> std::io::Result<()> {
/// let mut reader: &[u8] = b"hello";
/// let mut writer: Vec<u8> = vec![];
///
/// io::copy_buf(&mut reader, &mut writer).await?;
///
/// assert_eq!(b"hello", &writer[..]);
/// # Ok(())
/// # }
/// ```
pub async fn copy_buf<'a, R, W>(reader: &'a mut R, writer: &'a mut W) -> io::Result<u64>
where
R: AsyncBufRead + Unpin + ?Sized,
W: AsyncWrite + Unpin + ?Sized,
{
CopyBuf {
reader,
writer,
amt: 0,
}.await
}
}
impl<R, W> Future for CopyBuf<'_, R, W>
where
R: AsyncBufRead + Unpin + ?Sized,
W: AsyncWrite + Unpin + ?Sized,
{
type Output = io::Result<u64>;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
loop {
let me = &mut *self;
let buffer = ready!(Pin::new(&mut *me.reader).poll_fill_buf(cx))?;
if buffer.is_empty() {
ready!(Pin::new(&mut self.writer).poll_flush(cx))?;
return Poll::Ready(Ok(self.amt));
}
let i = ready!(Pin::new(&mut *me.writer).poll_write(cx, buffer))?;
if i == 0 {
return Poll::Ready(Err(std::io::ErrorKind::WriteZero.into()));
}
self.amt += i as u64;
Pin::new(&mut *self.reader).consume(i);
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn assert_unpin() {
use std::marker::PhantomPinned;
crate::is_unpin::<CopyBuf<'_, PhantomPinned, PhantomPinned>>();
}
}

164
vendor/tokio/src/io/util/empty.rs vendored Normal file
View File

@@ -0,0 +1,164 @@
use crate::io::util::poll_proceed_and_make_progress;
use crate::io::{AsyncBufRead, AsyncRead, AsyncSeek, AsyncWrite, ReadBuf};
use std::fmt;
use std::io::{self, SeekFrom};
use std::pin::Pin;
use std::task::{ready, Context, Poll};
cfg_io_util! {
/// `Empty` ignores any data written via [`AsyncWrite`], and will always be empty
/// (returning zero bytes) when read via [`AsyncRead`].
///
/// This struct is generally created by calling [`empty`]. Please see
/// the documentation of [`empty()`][`empty`] for more details.
///
/// This is an asynchronous version of [`std::io::empty`][std].
///
/// [`empty`]: fn@empty
/// [std]: std::io::empty
pub struct Empty {
_p: (),
}
/// Creates a value that is always at EOF for reads, and ignores all data written.
///
/// All writes on the returned instance will return `Poll::Ready(Ok(buf.len()))`
/// and the contents of the buffer will not be inspected.
///
/// All reads from the returned instance will return `Poll::Ready(Ok(0))`.
///
/// This is an asynchronous version of [`std::io::empty`][std].
///
/// [std]: std::io::empty
///
/// # Examples
///
/// A slightly sad example of not reading anything into a buffer:
///
/// ```
/// use tokio::io::{self, AsyncReadExt};
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let mut buffer = String::new();
/// io::empty().read_to_string(&mut buffer).await.unwrap();
/// assert!(buffer.is_empty());
/// # }
/// ```
///
/// A convoluted way of getting the length of a buffer:
///
/// ```
/// use tokio::io::{self, AsyncWriteExt};
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let buffer = vec![1, 2, 3, 5, 8];
/// let num_bytes = io::empty().write(&buffer).await.unwrap();
/// assert_eq!(num_bytes, 5);
/// # }
/// ```
pub fn empty() -> Empty {
Empty { _p: () }
}
}
impl AsyncRead for Empty {
#[inline]
fn poll_read(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
_: &mut ReadBuf<'_>,
) -> Poll<io::Result<()>> {
ready!(crate::trace::trace_leaf(cx));
ready!(poll_proceed_and_make_progress(cx));
Poll::Ready(Ok(()))
}
}
impl AsyncBufRead for Empty {
#[inline]
fn poll_fill_buf(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<&[u8]>> {
ready!(crate::trace::trace_leaf(cx));
ready!(poll_proceed_and_make_progress(cx));
Poll::Ready(Ok(&[]))
}
#[inline]
fn consume(self: Pin<&mut Self>, _: usize) {}
}
impl AsyncWrite for Empty {
#[inline]
fn poll_write(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<io::Result<usize>> {
ready!(crate::trace::trace_leaf(cx));
ready!(poll_proceed_and_make_progress(cx));
Poll::Ready(Ok(buf.len()))
}
#[inline]
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> {
ready!(crate::trace::trace_leaf(cx));
ready!(poll_proceed_and_make_progress(cx));
Poll::Ready(Ok(()))
}
#[inline]
fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> {
ready!(crate::trace::trace_leaf(cx));
ready!(poll_proceed_and_make_progress(cx));
Poll::Ready(Ok(()))
}
#[inline]
fn is_write_vectored(&self) -> bool {
true
}
#[inline]
fn poll_write_vectored(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
bufs: &[io::IoSlice<'_>],
) -> Poll<Result<usize, io::Error>> {
ready!(crate::trace::trace_leaf(cx));
ready!(poll_proceed_and_make_progress(cx));
let num_bytes = bufs.iter().map(|b| b.len()).sum();
Poll::Ready(Ok(num_bytes))
}
}
impl AsyncSeek for Empty {
#[inline]
fn start_seek(self: Pin<&mut Self>, _position: SeekFrom) -> io::Result<()> {
Ok(())
}
#[inline]
fn poll_complete(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<u64>> {
ready!(crate::trace::trace_leaf(cx));
ready!(poll_proceed_and_make_progress(cx));
Poll::Ready(Ok(0))
}
}
impl fmt::Debug for Empty {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.pad("Empty { .. }")
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn assert_unpin() {
crate::is_unpin::<Empty>();
}
}

59
vendor/tokio/src/io/util/fill_buf.rs vendored Normal file
View File

@@ -0,0 +1,59 @@
use crate::io::AsyncBufRead;
use pin_project_lite::pin_project;
use std::future::Future;
use std::io;
use std::marker::PhantomPinned;
use std::pin::Pin;
use std::task::{Context, Poll};
pin_project! {
/// Future for the [`fill_buf`](crate::io::AsyncBufReadExt::fill_buf) method.
#[derive(Debug)]
#[must_use = "futures do nothing unless you `.await` or poll them"]
pub struct FillBuf<'a, R: ?Sized> {
reader: Option<&'a mut R>,
#[pin]
_pin: PhantomPinned,
}
}
pub(crate) fn fill_buf<R>(reader: &mut R) -> FillBuf<'_, R>
where
R: AsyncBufRead + ?Sized + Unpin,
{
FillBuf {
reader: Some(reader),
_pin: PhantomPinned,
}
}
impl<'a, R: AsyncBufRead + ?Sized + Unpin> Future for FillBuf<'a, R> {
type Output = io::Result<&'a [u8]>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let me = self.project();
let reader = me.reader.take().expect("Polled after completion.");
match Pin::new(&mut *reader).poll_fill_buf(cx) {
Poll::Ready(Ok(slice)) => unsafe {
// Safety: This is necessary only due to a limitation in the
// borrow checker. Once Rust starts using the polonius borrow
// checker, this can be simplified.
//
// The safety of this transmute relies on the fact that the
// value of `reader` is `None` when we return in this branch.
// Otherwise the caller could poll us again after
// completion, and access the mutable reference while the
// returned immutable reference still exists.
let slice = std::mem::transmute::<&[u8], &'a [u8]>(slice);
Poll::Ready(Ok(slice))
},
Poll::Ready(Err(err)) => Poll::Ready(Err(err)),
Poll::Pending => {
*me.reader = Some(reader);
Poll::Pending
}
}
}
}

47
vendor/tokio/src/io/util/flush.rs vendored Normal file
View File

@@ -0,0 +1,47 @@
use crate::io::AsyncWrite;
use pin_project_lite::pin_project;
use std::future::Future;
use std::io;
use std::marker::PhantomPinned;
use std::pin::Pin;
use std::task::{Context, Poll};
pin_project! {
/// A future used to fully flush an I/O object.
///
/// Created by the [`AsyncWriteExt::flush`][flush] function.
///
/// [flush]: crate::io::AsyncWriteExt::flush
#[derive(Debug)]
#[must_use = "futures do nothing unless you `.await` or poll them"]
pub struct Flush<'a, A: ?Sized> {
a: &'a mut A,
// Make this future `!Unpin` for compatibility with async trait methods.
#[pin]
_pin: PhantomPinned,
}
}
/// Creates a future which will entirely flush an I/O object.
pub(super) fn flush<A>(a: &mut A) -> Flush<'_, A>
where
A: AsyncWrite + Unpin + ?Sized,
{
Flush {
a,
_pin: PhantomPinned,
}
}
impl<A> Future for Flush<'_, A>
where
A: AsyncWrite + Unpin + ?Sized,
{
type Output = io::Result<()>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let me = self.project();
Pin::new(&mut *me.a).poll_flush(cx)
}
}

145
vendor/tokio/src/io/util/lines.rs vendored Normal file
View File

@@ -0,0 +1,145 @@
use crate::io::util::read_line::read_line_internal;
use crate::io::AsyncBufRead;
use pin_project_lite::pin_project;
use std::io;
use std::mem;
use std::pin::Pin;
use std::task::{ready, Context, Poll};
pin_project! {
/// Reads lines from an [`AsyncBufRead`].
///
/// A `Lines` can be turned into a `Stream` with [`LinesStream`].
///
/// This type is usually created using the [`lines`] method.
///
/// [`AsyncBufRead`]: crate::io::AsyncBufRead
/// [`LinesStream`]: https://docs.rs/tokio-stream/0.1/tokio_stream/wrappers/struct.LinesStream.html
/// [`lines`]: crate::io::AsyncBufReadExt::lines
#[derive(Debug)]
#[must_use = "streams do nothing unless polled"]
#[cfg_attr(docsrs, doc(cfg(feature = "io-util")))]
pub struct Lines<R> {
#[pin]
reader: R,
buf: String,
bytes: Vec<u8>,
read: usize,
}
}
pub(crate) fn lines<R>(reader: R) -> Lines<R>
where
R: AsyncBufRead,
{
Lines {
reader,
buf: String::new(),
bytes: Vec::new(),
read: 0,
}
}
impl<R> Lines<R>
where
R: AsyncBufRead + Unpin,
{
/// Returns the next line in the stream.
///
/// # Cancel safety
///
/// This method is cancellation safe.
///
/// # Examples
///
/// ```
/// # use tokio::io::AsyncBufRead;
/// use tokio::io::AsyncBufReadExt;
///
/// # async fn dox(my_buf_read: impl AsyncBufRead + Unpin) -> std::io::Result<()> {
/// let mut lines = my_buf_read.lines();
///
/// while let Some(line) = lines.next_line().await? {
/// println!("length = {}", line.len())
/// }
/// # Ok(())
/// # }
/// ```
pub async fn next_line(&mut self) -> io::Result<Option<String>> {
use std::future::poll_fn;
poll_fn(|cx| Pin::new(&mut *self).poll_next_line(cx)).await
}
/// Obtains a mutable reference to the underlying reader.
pub fn get_mut(&mut self) -> &mut R {
&mut self.reader
}
/// Obtains a reference to the underlying reader.
pub fn get_ref(&mut self) -> &R {
&self.reader
}
/// Unwraps this `Lines<R>`, returning the underlying reader.
///
/// Note that any leftover data in the internal buffer is lost.
/// Therefore, a following read from the underlying reader may lead to data loss.
pub fn into_inner(self) -> R {
self.reader
}
}
impl<R> Lines<R>
where
R: AsyncBufRead,
{
/// Polls for the next line in the stream.
///
/// This method returns:
///
/// * `Poll::Pending` if the next line is not yet available.
/// * `Poll::Ready(Ok(Some(line)))` if the next line is available.
/// * `Poll::Ready(Ok(None))` if there are no more lines in this stream.
/// * `Poll::Ready(Err(err))` if an IO error occurred while reading the next line.
///
/// When the method returns `Poll::Pending`, the `Waker` in the provided
/// `Context` is scheduled to receive a wakeup when more bytes become
/// available on the underlying IO resource. Note that on multiple calls to
/// `poll_next_line`, only the `Waker` from the `Context` passed to the most
/// recent call is scheduled to receive a wakeup.
pub fn poll_next_line(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<io::Result<Option<String>>> {
let me = self.project();
let n = ready!(read_line_internal(me.reader, cx, me.buf, me.bytes, me.read))?;
debug_assert_eq!(*me.read, 0);
if n == 0 && me.buf.is_empty() {
return Poll::Ready(Ok(None));
}
if me.buf.ends_with('\n') {
me.buf.pop();
if me.buf.ends_with('\r') {
me.buf.pop();
}
}
Poll::Ready(Ok(Some(mem::take(me.buf))))
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn assert_unpin() {
crate::is_unpin::<Lines<()>>();
}
}

429
vendor/tokio/src/io/util/mem.rs vendored Normal file
View File

@@ -0,0 +1,429 @@
//! In-process memory IO types.
use crate::io::{split, AsyncRead, AsyncWrite, ReadBuf, ReadHalf, WriteHalf};
use crate::loom::sync::Mutex;
use bytes::{Buf, BytesMut};
use std::{
pin::Pin,
sync::Arc,
task::{self, ready, Poll, Waker},
};
/// A bidirectional pipe to read and write bytes in memory.
///
/// A pair of `DuplexStream`s are created together, and they act as a "channel"
/// that can be used as in-memory IO types. Writing to one of the pairs will
/// allow that data to be read from the other, and vice versa.
///
/// # Closing a `DuplexStream`
///
/// If one end of the `DuplexStream` channel is dropped, any pending reads on
/// the other side will continue to read data until the buffer is drained, then
/// they will signal EOF by returning 0 bytes. Any writes to the other side,
/// including pending ones (that are waiting for free space in the buffer) will
/// return `Err(BrokenPipe)` immediately.
///
/// # Example
///
/// ```
/// # async fn ex() -> std::io::Result<()> {
/// # use tokio::io::{AsyncReadExt, AsyncWriteExt};
/// let (mut client, mut server) = tokio::io::duplex(64);
///
/// client.write_all(b"ping").await?;
///
/// let mut buf = [0u8; 4];
/// server.read_exact(&mut buf).await?;
/// assert_eq!(&buf, b"ping");
///
/// server.write_all(b"pong").await?;
///
/// client.read_exact(&mut buf).await?;
/// assert_eq!(&buf, b"pong");
/// # Ok(())
/// # }
/// ```
#[derive(Debug)]
#[cfg_attr(docsrs, doc(cfg(feature = "io-util")))]
pub struct DuplexStream {
read: Arc<Mutex<SimplexStream>>,
write: Arc<Mutex<SimplexStream>>,
}
/// A unidirectional pipe to read and write bytes in memory.
///
/// It can be constructed by [`simplex`] function which will create a pair of
/// reader and writer or by calling [`SimplexStream::new_unsplit`] that will
/// create a handle for both reading and writing.
///
/// # Example
///
/// ```
/// # async fn ex() -> std::io::Result<()> {
/// # use tokio::io::{AsyncReadExt, AsyncWriteExt};
/// let (mut receiver, mut sender) = tokio::io::simplex(64);
///
/// sender.write_all(b"ping").await?;
///
/// let mut buf = [0u8; 4];
/// receiver.read_exact(&mut buf).await?;
/// assert_eq!(&buf, b"ping");
/// # Ok(())
/// # }
/// ```
#[derive(Debug)]
#[cfg_attr(docsrs, doc(cfg(feature = "io-util")))]
pub struct SimplexStream {
/// The buffer storing the bytes written, also read from.
///
/// Using a `BytesMut` because it has efficient `Buf` and `BufMut`
/// functionality already. Additionally, it can try to copy data in the
/// same buffer if there read index has advanced far enough.
buffer: BytesMut,
/// Determines if the write side has been closed.
is_closed: bool,
/// The maximum amount of bytes that can be written before returning
/// `Poll::Pending`.
max_buf_size: usize,
/// If the `read` side has been polled and is pending, this is the waker
/// for that parked task.
read_waker: Option<Waker>,
/// If the `write` side has filled the `max_buf_size` and returned
/// `Poll::Pending`, this is the waker for that parked task.
write_waker: Option<Waker>,
}
// ===== impl DuplexStream =====
/// Create a new pair of `DuplexStream`s that act like a pair of connected sockets.
///
/// The `max_buf_size` argument is the maximum amount of bytes that can be
/// written to a side before the write returns `Poll::Pending`.
#[cfg_attr(docsrs, doc(cfg(feature = "io-util")))]
pub fn duplex(max_buf_size: usize) -> (DuplexStream, DuplexStream) {
let one = Arc::new(Mutex::new(SimplexStream::new_unsplit(max_buf_size)));
let two = Arc::new(Mutex::new(SimplexStream::new_unsplit(max_buf_size)));
(
DuplexStream {
read: one.clone(),
write: two.clone(),
},
DuplexStream {
read: two,
write: one,
},
)
}
impl AsyncRead for DuplexStream {
// Previous rustc required this `self` to be `mut`, even though newer
// versions recognize it isn't needed to call `lock()`. So for
// compatibility, we include the `mut` and `allow` the lint.
//
// See https://github.com/rust-lang/rust/issues/73592
#[allow(unused_mut)]
fn poll_read(
mut self: Pin<&mut Self>,
cx: &mut task::Context<'_>,
buf: &mut ReadBuf<'_>,
) -> Poll<std::io::Result<()>> {
Pin::new(&mut *self.read.lock()).poll_read(cx, buf)
}
}
impl AsyncWrite for DuplexStream {
#[allow(unused_mut)]
fn poll_write(
mut self: Pin<&mut Self>,
cx: &mut task::Context<'_>,
buf: &[u8],
) -> Poll<std::io::Result<usize>> {
Pin::new(&mut *self.write.lock()).poll_write(cx, buf)
}
fn poll_write_vectored(
self: Pin<&mut Self>,
cx: &mut task::Context<'_>,
bufs: &[std::io::IoSlice<'_>],
) -> Poll<Result<usize, std::io::Error>> {
Pin::new(&mut *self.write.lock()).poll_write_vectored(cx, bufs)
}
fn is_write_vectored(&self) -> bool {
true
}
#[allow(unused_mut)]
fn poll_flush(
mut self: Pin<&mut Self>,
cx: &mut task::Context<'_>,
) -> Poll<std::io::Result<()>> {
Pin::new(&mut *self.write.lock()).poll_flush(cx)
}
#[allow(unused_mut)]
fn poll_shutdown(
mut self: Pin<&mut Self>,
cx: &mut task::Context<'_>,
) -> Poll<std::io::Result<()>> {
Pin::new(&mut *self.write.lock()).poll_shutdown(cx)
}
}
impl Drop for DuplexStream {
fn drop(&mut self) {
// notify the other side of the closure
self.write.lock().close_write();
self.read.lock().close_read();
}
}
// ===== impl SimplexStream =====
/// Creates unidirectional buffer that acts like in memory pipe.
///
/// The `max_buf_size` argument is the maximum amount of bytes that can be
/// written to a buffer before the it returns `Poll::Pending`.
///
/// # Unify reader and writer
///
/// The reader and writer half can be unified into a single structure
/// of `SimplexStream` that supports both reading and writing or
/// the `SimplexStream` can be already created as unified structure
/// using [`SimplexStream::new_unsplit()`].
///
/// ```
/// # async fn ex() -> std::io::Result<()> {
/// # use tokio::io::{AsyncReadExt, AsyncWriteExt};
/// let (reader, writer) = tokio::io::simplex(64);
/// let mut simplex_stream = reader.unsplit(writer);
/// simplex_stream.write_all(b"hello").await?;
///
/// let mut buf = [0u8; 5];
/// simplex_stream.read_exact(&mut buf).await?;
/// assert_eq!(&buf, b"hello");
/// # Ok(())
/// # }
/// ```
#[cfg_attr(docsrs, doc(cfg(feature = "io-util")))]
pub fn simplex(max_buf_size: usize) -> (ReadHalf<SimplexStream>, WriteHalf<SimplexStream>) {
split(SimplexStream::new_unsplit(max_buf_size))
}
impl SimplexStream {
/// Creates unidirectional buffer that acts like in memory pipe. To create split
/// version with separate reader and writer you can use [`simplex`] function.
///
/// The `max_buf_size` argument is the maximum amount of bytes that can be
/// written to a buffer before the it returns `Poll::Pending`.
#[cfg_attr(docsrs, doc(cfg(feature = "io-util")))]
pub fn new_unsplit(max_buf_size: usize) -> SimplexStream {
SimplexStream {
buffer: BytesMut::new(),
is_closed: false,
max_buf_size,
read_waker: None,
write_waker: None,
}
}
fn close_write(&mut self) {
self.is_closed = true;
// needs to notify any readers that no more data will come
if let Some(waker) = self.read_waker.take() {
waker.wake();
}
}
fn close_read(&mut self) {
self.is_closed = true;
// needs to notify any writers that they have to abort
if let Some(waker) = self.write_waker.take() {
waker.wake();
}
}
fn poll_read_internal(
mut self: Pin<&mut Self>,
cx: &mut task::Context<'_>,
buf: &mut ReadBuf<'_>,
) -> Poll<std::io::Result<()>> {
if self.buffer.has_remaining() {
let max = self.buffer.remaining().min(buf.remaining());
buf.put_slice(&self.buffer[..max]);
self.buffer.advance(max);
if max > 0 {
// The passed `buf` might have been empty, don't wake up if
// no bytes have been moved.
if let Some(waker) = self.write_waker.take() {
waker.wake();
}
}
Poll::Ready(Ok(()))
} else if self.is_closed {
Poll::Ready(Ok(()))
} else {
self.read_waker = Some(cx.waker().clone());
Poll::Pending
}
}
fn poll_write_internal(
mut self: Pin<&mut Self>,
cx: &mut task::Context<'_>,
buf: &[u8],
) -> Poll<std::io::Result<usize>> {
if self.is_closed {
return Poll::Ready(Err(std::io::ErrorKind::BrokenPipe.into()));
}
let avail = self.max_buf_size - self.buffer.len();
if avail == 0 {
self.write_waker = Some(cx.waker().clone());
return Poll::Pending;
}
let len = buf.len().min(avail);
self.buffer.extend_from_slice(&buf[..len]);
if let Some(waker) = self.read_waker.take() {
waker.wake();
}
Poll::Ready(Ok(len))
}
fn poll_write_vectored_internal(
mut self: Pin<&mut Self>,
cx: &mut task::Context<'_>,
bufs: &[std::io::IoSlice<'_>],
) -> Poll<Result<usize, std::io::Error>> {
if self.is_closed {
return Poll::Ready(Err(std::io::ErrorKind::BrokenPipe.into()));
}
let avail = self.max_buf_size - self.buffer.len();
if avail == 0 {
self.write_waker = Some(cx.waker().clone());
return Poll::Pending;
}
let mut rem = avail;
for buf in bufs {
if rem == 0 {
break;
}
let len = buf.len().min(rem);
self.buffer.extend_from_slice(&buf[..len]);
rem -= len;
}
if let Some(waker) = self.read_waker.take() {
waker.wake();
}
Poll::Ready(Ok(avail - rem))
}
}
impl AsyncRead for SimplexStream {
cfg_coop! {
fn poll_read(
self: Pin<&mut Self>,
cx: &mut task::Context<'_>,
buf: &mut ReadBuf<'_>,
) -> Poll<std::io::Result<()>> {
ready!(crate::trace::trace_leaf(cx));
let coop = ready!(crate::task::coop::poll_proceed(cx));
let ret = self.poll_read_internal(cx, buf);
if ret.is_ready() {
coop.made_progress();
}
ret
}
}
cfg_not_coop! {
fn poll_read(
self: Pin<&mut Self>,
cx: &mut task::Context<'_>,
buf: &mut ReadBuf<'_>,
) -> Poll<std::io::Result<()>> {
ready!(crate::trace::trace_leaf(cx));
self.poll_read_internal(cx, buf)
}
}
}
impl AsyncWrite for SimplexStream {
cfg_coop! {
fn poll_write(
self: Pin<&mut Self>,
cx: &mut task::Context<'_>,
buf: &[u8],
) -> Poll<std::io::Result<usize>> {
ready!(crate::trace::trace_leaf(cx));
let coop = ready!(crate::task::coop::poll_proceed(cx));
let ret = self.poll_write_internal(cx, buf);
if ret.is_ready() {
coop.made_progress();
}
ret
}
}
cfg_not_coop! {
fn poll_write(
self: Pin<&mut Self>,
cx: &mut task::Context<'_>,
buf: &[u8],
) -> Poll<std::io::Result<usize>> {
ready!(crate::trace::trace_leaf(cx));
self.poll_write_internal(cx, buf)
}
}
cfg_coop! {
fn poll_write_vectored(
self: Pin<&mut Self>,
cx: &mut task::Context<'_>,
bufs: &[std::io::IoSlice<'_>],
) -> Poll<Result<usize, std::io::Error>> {
ready!(crate::trace::trace_leaf(cx));
let coop = ready!(crate::task::coop::poll_proceed(cx));
let ret = self.poll_write_vectored_internal(cx, bufs);
if ret.is_ready() {
coop.made_progress();
}
ret
}
}
cfg_not_coop! {
fn poll_write_vectored(
self: Pin<&mut Self>,
cx: &mut task::Context<'_>,
bufs: &[std::io::IoSlice<'_>],
) -> Poll<Result<usize, std::io::Error>> {
ready!(crate::trace::trace_leaf(cx));
self.poll_write_vectored_internal(cx, bufs)
}
}
fn is_write_vectored(&self) -> bool {
true
}
fn poll_flush(self: Pin<&mut Self>, _: &mut task::Context<'_>) -> Poll<std::io::Result<()>> {
Poll::Ready(Ok(()))
}
fn poll_shutdown(
mut self: Pin<&mut Self>,
_: &mut task::Context<'_>,
) -> Poll<std::io::Result<()>> {
self.close_write();
Poll::Ready(Ok(()))
}
}

112
vendor/tokio/src/io/util/mod.rs vendored Normal file
View File

@@ -0,0 +1,112 @@
#![allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411
cfg_io_util! {
mod async_buf_read_ext;
pub use async_buf_read_ext::AsyncBufReadExt;
mod async_read_ext;
pub use async_read_ext::AsyncReadExt;
mod async_seek_ext;
pub use async_seek_ext::AsyncSeekExt;
mod async_write_ext;
pub use async_write_ext::AsyncWriteExt;
mod buf_reader;
pub use buf_reader::BufReader;
mod buf_stream;
pub use buf_stream::BufStream;
mod buf_writer;
pub use buf_writer::BufWriter;
mod chain;
pub use chain::Chain;
mod copy;
pub use copy::copy;
mod copy_bidirectional;
pub use copy_bidirectional::{copy_bidirectional, copy_bidirectional_with_sizes};
mod copy_buf;
pub use copy_buf::copy_buf;
mod empty;
pub use empty::{empty, Empty};
mod flush;
mod lines;
pub use lines::Lines;
mod mem;
pub use mem::{duplex, simplex, DuplexStream, SimplexStream};
mod read;
mod read_buf;
mod read_exact;
mod read_int;
mod read_line;
mod fill_buf;
mod read_to_end;
mod vec_with_initialized;
cfg_process! {
pub(crate) use read_to_end::read_to_end;
}
mod read_to_string;
mod read_until;
mod repeat;
pub use repeat::{repeat, Repeat};
mod shutdown;
mod sink;
pub use sink::{sink, Sink};
mod split;
pub use split::Split;
mod take;
pub use take::Take;
mod write;
mod write_vectored;
mod write_all;
mod write_buf;
mod write_all_buf;
mod write_int;
// used by `BufReader` and `BufWriter`
// https://github.com/rust-lang/rust/blob/master/library/std/src/sys_common/io.rs#L1
const DEFAULT_BUF_SIZE: usize = 8 * 1024;
cfg_coop! {
fn poll_proceed_and_make_progress(cx: &mut std::task::Context<'_>) -> std::task::Poll<()> {
let coop = std::task::ready!(crate::task::coop::poll_proceed(cx));
coop.made_progress();
std::task::Poll::Ready(())
}
}
cfg_not_coop! {
fn poll_proceed_and_make_progress(_: &mut std::task::Context<'_>) -> std::task::Poll<()> {
std::task::Poll::Ready(())
}
}
}
cfg_not_io_util! {
cfg_process! {
mod vec_with_initialized;
mod read_to_end;
// Used by process
pub(crate) use read_to_end::read_to_end;
}
}

55
vendor/tokio/src/io/util/read.rs vendored Normal file
View File

@@ -0,0 +1,55 @@
use crate::io::{AsyncRead, ReadBuf};
use pin_project_lite::pin_project;
use std::future::Future;
use std::io;
use std::marker::PhantomPinned;
use std::marker::Unpin;
use std::pin::Pin;
use std::task::{ready, Context, Poll};
/// Tries to read some bytes directly into the given `buf` in asynchronous
/// manner, returning a future type.
///
/// The returned future will resolve to both the I/O stream and the buffer
/// as well as the number of bytes read once the read operation is completed.
pub(crate) fn read<'a, R>(reader: &'a mut R, buf: &'a mut [u8]) -> Read<'a, R>
where
R: AsyncRead + Unpin + ?Sized,
{
Read {
reader,
buf,
_pin: PhantomPinned,
}
}
pin_project! {
/// A future which can be used to easily read available number of bytes to fill
/// a buffer.
///
/// Created by the [`read`] function.
#[derive(Debug)]
#[must_use = "futures do nothing unless you `.await` or poll them"]
pub struct Read<'a, R: ?Sized> {
reader: &'a mut R,
buf: &'a mut [u8],
// Make this future `!Unpin` for compatibility with async trait methods.
#[pin]
_pin: PhantomPinned,
}
}
impl<R> Future for Read<'_, R>
where
R: AsyncRead + Unpin + ?Sized,
{
type Output = io::Result<usize>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<usize>> {
let me = self.project();
let mut buf = ReadBuf::new(me.buf);
ready!(Pin::new(me.reader).poll_read(cx, &mut buf))?;
Poll::Ready(Ok(buf.filled().len()))
}
}

71
vendor/tokio/src/io/util/read_buf.rs vendored Normal file
View File

@@ -0,0 +1,71 @@
use crate::io::AsyncRead;
use bytes::BufMut;
use pin_project_lite::pin_project;
use std::future::Future;
use std::io;
use std::marker::PhantomPinned;
use std::pin::Pin;
use std::task::{ready, Context, Poll};
pub(crate) fn read_buf<'a, R, B>(reader: &'a mut R, buf: &'a mut B) -> ReadBuf<'a, R, B>
where
R: AsyncRead + Unpin + ?Sized,
B: BufMut + ?Sized,
{
ReadBuf {
reader,
buf,
_pin: PhantomPinned,
}
}
pin_project! {
/// Future returned by [`read_buf`](crate::io::AsyncReadExt::read_buf).
#[derive(Debug)]
#[must_use = "futures do nothing unless you `.await` or poll them"]
pub struct ReadBuf<'a, R: ?Sized, B: ?Sized> {
reader: &'a mut R,
buf: &'a mut B,
#[pin]
_pin: PhantomPinned,
}
}
impl<R, B> Future for ReadBuf<'_, R, B>
where
R: AsyncRead + Unpin + ?Sized,
B: BufMut + ?Sized,
{
type Output = io::Result<usize>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<usize>> {
use crate::io::ReadBuf;
let me = self.project();
if !me.buf.has_remaining_mut() {
return Poll::Ready(Ok(0));
}
let n = {
let dst = me.buf.chunk_mut();
let dst = unsafe { dst.as_uninit_slice_mut() };
let mut buf = ReadBuf::uninit(dst);
let ptr = buf.filled().as_ptr();
ready!(Pin::new(me.reader).poll_read(cx, &mut buf)?);
// Ensure the pointer does not change from under us
assert_eq!(ptr, buf.filled().as_ptr());
buf.filled().len()
};
// Safety: This is guaranteed to be the number of initialized (and read)
// bytes due to the invariants provided by `ReadBuf::filled`.
unsafe {
me.buf.advance_mut(n);
}
Poll::Ready(Ok(n))
}
}

69
vendor/tokio/src/io/util/read_exact.rs vendored Normal file
View File

@@ -0,0 +1,69 @@
use crate::io::{AsyncRead, ReadBuf};
use pin_project_lite::pin_project;
use std::future::Future;
use std::io;
use std::marker::PhantomPinned;
use std::marker::Unpin;
use std::pin::Pin;
use std::task::{ready, Context, Poll};
/// A future which can be used to easily read exactly enough bytes to fill
/// a buffer.
///
/// Created by the [`AsyncReadExt::read_exact`][read_exact].
/// [`read_exact`]: [`crate::io::AsyncReadExt::read_exact`]
pub(crate) fn read_exact<'a, A>(reader: &'a mut A, buf: &'a mut [u8]) -> ReadExact<'a, A>
where
A: AsyncRead + Unpin + ?Sized,
{
ReadExact {
reader,
buf: ReadBuf::new(buf),
_pin: PhantomPinned,
}
}
pin_project! {
/// Creates a future which will read exactly enough bytes to fill `buf`,
/// returning an error if EOF is hit sooner.
///
/// On success the number of bytes is returned
#[derive(Debug)]
#[must_use = "futures do nothing unless you `.await` or poll them"]
pub struct ReadExact<'a, A: ?Sized> {
reader: &'a mut A,
buf: ReadBuf<'a>,
// Make this future `!Unpin` for compatibility with async trait methods.
#[pin]
_pin: PhantomPinned,
}
}
fn eof() -> io::Error {
io::Error::new(io::ErrorKind::UnexpectedEof, "early eof")
}
impl<A> Future for ReadExact<'_, A>
where
A: AsyncRead + Unpin + ?Sized,
{
type Output = io::Result<usize>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<usize>> {
let me = self.project();
loop {
// if our buffer is empty, then we need to read some data to continue.
let rem = me.buf.remaining();
if rem != 0 {
ready!(Pin::new(&mut *me.reader).poll_read(cx, me.buf))?;
if me.buf.remaining() == rem {
return Err(eof()).into();
}
} else {
return Poll::Ready(Ok(me.buf.capacity()));
}
}
}
}

158
vendor/tokio/src/io/util/read_int.rs vendored Normal file
View File

@@ -0,0 +1,158 @@
use crate::io::{AsyncRead, ReadBuf};
use bytes::Buf;
use pin_project_lite::pin_project;
use std::future::Future;
use std::io;
use std::io::ErrorKind::UnexpectedEof;
use std::marker::PhantomPinned;
use std::pin::Pin;
use std::task::{Context, Poll};
macro_rules! reader {
($name:ident, $ty:ty, $reader:ident) => {
reader!($name, $ty, $reader, std::mem::size_of::<$ty>());
};
($name:ident, $ty:ty, $reader:ident, $bytes:expr) => {
pin_project! {
#[doc(hidden)]
#[must_use = "futures do nothing unless you `.await` or poll them"]
pub struct $name<R> {
#[pin]
src: R,
buf: [u8; $bytes],
read: u8,
// Make this future `!Unpin` for compatibility with async trait methods.
#[pin]
_pin: PhantomPinned,
}
}
impl<R> $name<R> {
pub(crate) fn new(src: R) -> Self {
$name {
src,
buf: [0; $bytes],
read: 0,
_pin: PhantomPinned,
}
}
}
impl<R> Future for $name<R>
where
R: AsyncRead,
{
type Output = io::Result<$ty>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let mut me = self.project();
if *me.read == $bytes as u8 {
return Poll::Ready(Ok(Buf::$reader(&mut &me.buf[..])));
}
while *me.read < $bytes as u8 {
let mut buf = ReadBuf::new(&mut me.buf[*me.read as usize..]);
*me.read += match me.src.as_mut().poll_read(cx, &mut buf) {
Poll::Pending => return Poll::Pending,
Poll::Ready(Err(e)) => return Poll::Ready(Err(e.into())),
Poll::Ready(Ok(())) => {
let n = buf.filled().len();
if n == 0 {
return Poll::Ready(Err(UnexpectedEof.into()));
}
n as u8
}
};
}
let num = Buf::$reader(&mut &me.buf[..]);
Poll::Ready(Ok(num))
}
}
};
}
macro_rules! reader8 {
($name:ident, $ty:ty) => {
pin_project! {
/// Future returned from `read_u8`
#[doc(hidden)]
#[must_use = "futures do nothing unless you `.await` or poll them"]
pub struct $name<R> {
#[pin]
reader: R,
// Make this future `!Unpin` for compatibility with async trait methods.
#[pin]
_pin: PhantomPinned,
}
}
impl<R> $name<R> {
pub(crate) fn new(reader: R) -> $name<R> {
$name {
reader,
_pin: PhantomPinned,
}
}
}
impl<R> Future for $name<R>
where
R: AsyncRead,
{
type Output = io::Result<$ty>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let me = self.project();
let mut buf = [0; 1];
let mut buf = ReadBuf::new(&mut buf);
match me.reader.poll_read(cx, &mut buf) {
Poll::Pending => Poll::Pending,
Poll::Ready(Err(e)) => Poll::Ready(Err(e.into())),
Poll::Ready(Ok(())) => {
if buf.filled().len() == 0 {
return Poll::Ready(Err(UnexpectedEof.into()));
}
Poll::Ready(Ok(buf.filled()[0] as $ty))
}
}
}
}
};
}
reader8!(ReadU8, u8);
reader8!(ReadI8, i8);
reader!(ReadU16, u16, get_u16);
reader!(ReadU32, u32, get_u32);
reader!(ReadU64, u64, get_u64);
reader!(ReadU128, u128, get_u128);
reader!(ReadI16, i16, get_i16);
reader!(ReadI32, i32, get_i32);
reader!(ReadI64, i64, get_i64);
reader!(ReadI128, i128, get_i128);
reader!(ReadF32, f32, get_f32);
reader!(ReadF64, f64, get_f64);
reader!(ReadU16Le, u16, get_u16_le);
reader!(ReadU32Le, u32, get_u32_le);
reader!(ReadU64Le, u64, get_u64_le);
reader!(ReadU128Le, u128, get_u128_le);
reader!(ReadI16Le, i16, get_i16_le);
reader!(ReadI32Le, i32, get_i32_le);
reader!(ReadI64Le, i64, get_i64_le);
reader!(ReadI128Le, i128, get_i128_le);
reader!(ReadF32Le, f32, get_f32_le);
reader!(ReadF64Le, f64, get_f64_le);

119
vendor/tokio/src/io/util/read_line.rs vendored Normal file
View File

@@ -0,0 +1,119 @@
use crate::io::util::read_until::read_until_internal;
use crate::io::AsyncBufRead;
use pin_project_lite::pin_project;
use std::future::Future;
use std::io;
use std::marker::PhantomPinned;
use std::mem;
use std::pin::Pin;
use std::string::FromUtf8Error;
use std::task::{ready, Context, Poll};
pin_project! {
/// Future for the [`read_line`](crate::io::AsyncBufReadExt::read_line) method.
#[derive(Debug)]
#[must_use = "futures do nothing unless you `.await` or poll them"]
pub struct ReadLine<'a, R: ?Sized> {
reader: &'a mut R,
// This is the buffer we were provided. It will be replaced with an empty string
// while reading to postpone utf-8 handling until after reading.
output: &'a mut String,
// The actual allocation of the string is moved into this vector instead.
buf: Vec<u8>,
// The number of bytes appended to buf. This can be less than buf.len() if
// the buffer was not empty when the operation was started.
read: usize,
// Make this future `!Unpin` for compatibility with async trait methods.
#[pin]
_pin: PhantomPinned,
}
}
pub(crate) fn read_line<'a, R>(reader: &'a mut R, string: &'a mut String) -> ReadLine<'a, R>
where
R: AsyncBufRead + ?Sized + Unpin,
{
ReadLine {
reader,
buf: mem::take(string).into_bytes(),
output: string,
read: 0,
_pin: PhantomPinned,
}
}
fn put_back_original_data(output: &mut String, mut vector: Vec<u8>, num_bytes_read: usize) {
let original_len = vector.len() - num_bytes_read;
vector.truncate(original_len);
*output = String::from_utf8(vector).expect("The original data must be valid utf-8.");
}
/// This handles the various failure cases and puts the string back into `output`.
///
/// The `truncate_on_io_error` `bool` is necessary because `read_to_string` and `read_line`
/// disagree on what should happen when an IO error occurs.
pub(super) fn finish_string_read(
io_res: io::Result<usize>,
utf8_res: Result<String, FromUtf8Error>,
read: usize,
output: &mut String,
truncate_on_io_error: bool,
) -> Poll<io::Result<usize>> {
match (io_res, utf8_res) {
(Ok(num_bytes), Ok(string)) => {
debug_assert_eq!(read, 0);
*output = string;
Poll::Ready(Ok(num_bytes))
}
(Err(io_err), Ok(string)) => {
*output = string;
if truncate_on_io_error {
let original_len = output.len() - read;
output.truncate(original_len);
}
Poll::Ready(Err(io_err))
}
(Ok(num_bytes), Err(utf8_err)) => {
debug_assert_eq!(read, 0);
put_back_original_data(output, utf8_err.into_bytes(), num_bytes);
Poll::Ready(Err(io::Error::new(
io::ErrorKind::InvalidData,
"stream did not contain valid UTF-8",
)))
}
(Err(io_err), Err(utf8_err)) => {
put_back_original_data(output, utf8_err.into_bytes(), read);
Poll::Ready(Err(io_err))
}
}
}
pub(super) fn read_line_internal<R: AsyncBufRead + ?Sized>(
reader: Pin<&mut R>,
cx: &mut Context<'_>,
output: &mut String,
buf: &mut Vec<u8>,
read: &mut usize,
) -> Poll<io::Result<usize>> {
let io_res = ready!(read_until_internal(reader, cx, b'\n', buf, read));
let utf8_res = String::from_utf8(mem::take(buf));
// At this point both buf and output are empty. The allocation is in utf8_res.
debug_assert!(buf.is_empty());
debug_assert!(output.is_empty());
finish_string_read(io_res, utf8_res, *read, output, false)
}
impl<R: AsyncBufRead + ?Sized + Unpin> Future for ReadLine<'_, R> {
type Output = io::Result<usize>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let me = self.project();
read_line_internal(Pin::new(*me.reader), cx, me.output, me.buf, me.read)
}
}

143
vendor/tokio/src/io/util/read_to_end.rs vendored Normal file
View File

@@ -0,0 +1,143 @@
use crate::io::util::vec_with_initialized::{into_read_buf_parts, VecU8, VecWithInitialized};
use crate::io::{AsyncRead, ReadBuf};
use pin_project_lite::pin_project;
use std::future::Future;
use std::io;
use std::marker::PhantomPinned;
use std::mem::{self, MaybeUninit};
use std::pin::Pin;
use std::task::{ready, Context, Poll};
pin_project! {
#[derive(Debug)]
#[must_use = "futures do nothing unless you `.await` or poll them"]
pub struct ReadToEnd<'a, R: ?Sized> {
reader: &'a mut R,
buf: VecWithInitialized<&'a mut Vec<u8>>,
// The number of bytes appended to buf. This can be less than buf.len() if
// the buffer was not empty when the operation was started.
read: usize,
// Make this future `!Unpin` for compatibility with async trait methods.
#[pin]
_pin: PhantomPinned,
}
}
pub(crate) fn read_to_end<'a, R>(reader: &'a mut R, buffer: &'a mut Vec<u8>) -> ReadToEnd<'a, R>
where
R: AsyncRead + Unpin + ?Sized,
{
ReadToEnd {
reader,
buf: VecWithInitialized::new(buffer),
read: 0,
_pin: PhantomPinned,
}
}
pub(super) fn read_to_end_internal<V: VecU8, R: AsyncRead + ?Sized>(
buf: &mut VecWithInitialized<V>,
mut reader: Pin<&mut R>,
num_read: &mut usize,
cx: &mut Context<'_>,
) -> Poll<io::Result<usize>> {
loop {
let ret = ready!(poll_read_to_end(buf, reader.as_mut(), cx));
match ret {
Err(err) => return Poll::Ready(Err(err)),
Ok(0) => return Poll::Ready(Ok(mem::replace(num_read, 0))),
Ok(num) => {
*num_read += num;
}
}
}
}
/// Tries to read from the provided [`AsyncRead`].
///
/// The length of the buffer is increased by the number of bytes read.
fn poll_read_to_end<V: VecU8, R: AsyncRead + ?Sized>(
buf: &mut VecWithInitialized<V>,
read: Pin<&mut R>,
cx: &mut Context<'_>,
) -> Poll<io::Result<usize>> {
// This uses an adaptive system to extend the vector when it fills. We want to
// avoid paying to allocate and zero a huge chunk of memory if the reader only
// has 4 bytes while still making large reads if the reader does have a ton
// of data to return. Simply tacking on an extra DEFAULT_BUF_SIZE space every
// time is 4,500 times (!) slower than this if the reader has a very small
// amount of data to return. When the vector is full with its starting
// capacity, we first try to read into a small buffer to see if we reached
// an EOF. This only happens when the starting capacity is >= NUM_BYTES, since
// we allocate at least NUM_BYTES each time. This avoids the unnecessary
// allocation that we attempt before reading into the vector.
const NUM_BYTES: usize = 32;
let try_small_read = buf.try_small_read_first(NUM_BYTES);
// Get a ReadBuf into the vector.
let mut read_buf;
let poll_result;
let n = if try_small_read {
// Read some bytes using a small read.
let mut small_buf: [MaybeUninit<u8>; NUM_BYTES] = [MaybeUninit::uninit(); NUM_BYTES];
let mut small_read_buf = ReadBuf::uninit(&mut small_buf);
poll_result = read.poll_read(cx, &mut small_read_buf);
let to_write = small_read_buf.filled();
// Ensure we have enough space to fill our vector with what we read.
read_buf = buf.get_read_buf();
if to_write.len() > read_buf.remaining() {
buf.reserve(NUM_BYTES);
read_buf = buf.get_read_buf();
}
read_buf.put_slice(to_write);
to_write.len()
} else {
// Ensure we have enough space for reading.
buf.reserve(NUM_BYTES);
read_buf = buf.get_read_buf();
// Read data directly into vector.
let filled_before = read_buf.filled().len();
poll_result = read.poll_read(cx, &mut read_buf);
// Compute the number of bytes read.
read_buf.filled().len() - filled_before
};
// Update the length of the vector using the result of poll_read.
let read_buf_parts = into_read_buf_parts(read_buf);
buf.apply_read_buf(read_buf_parts);
match poll_result {
Poll::Pending => {
// In this case, nothing should have been read. However we still
// update the vector in case the poll_read call initialized parts of
// the vector's unused capacity.
debug_assert_eq!(n, 0);
Poll::Pending
}
Poll::Ready(Err(err)) => {
debug_assert_eq!(n, 0);
Poll::Ready(Err(err))
}
Poll::Ready(Ok(())) => Poll::Ready(Ok(n)),
}
}
impl<A> Future for ReadToEnd<'_, A>
where
A: AsyncRead + ?Sized + Unpin,
{
type Output = io::Result<usize>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let me = self.project();
read_to_end_internal(me.buf, Pin::new(*me.reader), me.read, cx)
}
}

View File

@@ -0,0 +1,78 @@
use crate::io::util::read_line::finish_string_read;
use crate::io::util::read_to_end::read_to_end_internal;
use crate::io::util::vec_with_initialized::VecWithInitialized;
use crate::io::AsyncRead;
use pin_project_lite::pin_project;
use std::future::Future;
use std::marker::PhantomPinned;
use std::pin::Pin;
use std::task::{ready, Context, Poll};
use std::{io, mem};
pin_project! {
/// Future for the [`read_to_string`](super::AsyncReadExt::read_to_string) method.
#[derive(Debug)]
#[must_use = "futures do nothing unless you `.await` or poll them"]
pub struct ReadToString<'a, R: ?Sized> {
reader: &'a mut R,
// This is the buffer we were provided. It will be replaced with an empty string
// while reading to postpone utf-8 handling until after reading.
output: &'a mut String,
// The actual allocation of the string is moved into this vector instead.
buf: VecWithInitialized<Vec<u8>>,
// The number of bytes appended to buf. This can be less than buf.len() if
// the buffer was not empty when the operation was started.
read: usize,
// Make this future `!Unpin` for compatibility with async trait methods.
#[pin]
_pin: PhantomPinned,
}
}
pub(crate) fn read_to_string<'a, R>(
reader: &'a mut R,
string: &'a mut String,
) -> ReadToString<'a, R>
where
R: AsyncRead + ?Sized + Unpin,
{
let buf = mem::take(string).into_bytes();
ReadToString {
reader,
buf: VecWithInitialized::new(buf),
output: string,
read: 0,
_pin: PhantomPinned,
}
}
fn read_to_string_internal<R: AsyncRead + ?Sized>(
reader: Pin<&mut R>,
output: &mut String,
buf: &mut VecWithInitialized<Vec<u8>>,
read: &mut usize,
cx: &mut Context<'_>,
) -> Poll<io::Result<usize>> {
let io_res = ready!(read_to_end_internal(buf, reader, read, cx));
let utf8_res = String::from_utf8(buf.take());
// At this point both buf and output are empty. The allocation is in utf8_res.
debug_assert!(buf.is_empty());
debug_assert!(output.is_empty());
finish_string_read(io_res, utf8_res, *read, output, true)
}
impl<A> Future for ReadToString<'_, A>
where
A: AsyncRead + ?Sized + Unpin,
{
type Output = io::Result<usize>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let me = self.project();
read_to_string_internal(Pin::new(*me.reader), me.output, me.buf, me.read, cx)
}
}

80
vendor/tokio/src/io/util/read_until.rs vendored Normal file
View File

@@ -0,0 +1,80 @@
use crate::io::AsyncBufRead;
use crate::util::memchr;
use pin_project_lite::pin_project;
use std::future::Future;
use std::io;
use std::marker::PhantomPinned;
use std::mem;
use std::pin::Pin;
use std::task::{ready, Context, Poll};
pin_project! {
/// Future for the [`read_until`](crate::io::AsyncBufReadExt::read_until) method.
/// The delimiter is included in the resulting vector.
#[derive(Debug)]
#[must_use = "futures do nothing unless you `.await` or poll them"]
pub struct ReadUntil<'a, R: ?Sized> {
reader: &'a mut R,
delimiter: u8,
buf: &'a mut Vec<u8>,
// The number of bytes appended to buf. This can be less than buf.len() if
// the buffer was not empty when the operation was started.
read: usize,
// Make this future `!Unpin` for compatibility with async trait methods.
#[pin]
_pin: PhantomPinned,
}
}
pub(crate) fn read_until<'a, R>(
reader: &'a mut R,
delimiter: u8,
buf: &'a mut Vec<u8>,
) -> ReadUntil<'a, R>
where
R: AsyncBufRead + ?Sized + Unpin,
{
ReadUntil {
reader,
delimiter,
buf,
read: 0,
_pin: PhantomPinned,
}
}
pub(super) fn read_until_internal<R: AsyncBufRead + ?Sized>(
mut reader: Pin<&mut R>,
cx: &mut Context<'_>,
delimiter: u8,
buf: &mut Vec<u8>,
read: &mut usize,
) -> Poll<io::Result<usize>> {
loop {
let (done, used) = {
let available = ready!(reader.as_mut().poll_fill_buf(cx))?;
if let Some(i) = memchr::memchr(delimiter, available) {
buf.extend_from_slice(&available[..=i]);
(true, i + 1)
} else {
buf.extend_from_slice(available);
(false, available.len())
}
};
reader.as_mut().consume(used);
*read += used;
if done || used == 0 {
return Poll::Ready(Ok(mem::replace(read, 0)));
}
}
}
impl<R: AsyncBufRead + ?Sized + Unpin> Future for ReadUntil<'_, R> {
type Output = io::Result<usize>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let me = self.project();
read_until_internal(Pin::new(*me.reader), cx, *me.delimiter, me.buf, me.read)
}
}

74
vendor/tokio/src/io/util/repeat.rs vendored Normal file
View File

@@ -0,0 +1,74 @@
use bytes::BufMut;
use crate::io::util::poll_proceed_and_make_progress;
use crate::io::{AsyncRead, ReadBuf};
use std::io;
use std::pin::Pin;
use std::task::{ready, Context, Poll};
cfg_io_util! {
/// An async reader which yields one byte over and over and over and over and
/// over and...
///
/// This struct is generally created by calling [`repeat`][repeat]. Please
/// see the documentation of `repeat()` for more details.
///
/// This is an asynchronous version of [`std::io::Repeat`][std].
///
/// [repeat]: fn@repeat
/// [std]: std::io::Repeat
#[derive(Debug)]
pub struct Repeat {
byte: u8,
}
/// Creates an instance of an async reader that infinitely repeats one byte.
///
/// All reads from this reader will succeed by filling the specified buffer with
/// the given byte.
///
/// This is an asynchronous version of [`std::io::repeat`][std].
///
/// [std]: std::io::repeat
///
/// # Examples
///
/// ```
/// use tokio::io::{self, AsyncReadExt};
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let mut buffer = [0; 3];
/// io::repeat(0b101).read_exact(&mut buffer).await.unwrap();
/// assert_eq!(buffer, [0b101, 0b101, 0b101]);
/// # }
/// ```
pub fn repeat(byte: u8) -> Repeat {
Repeat { byte }
}
}
impl AsyncRead for Repeat {
#[inline]
fn poll_read(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut ReadBuf<'_>,
) -> Poll<io::Result<()>> {
ready!(crate::trace::trace_leaf(cx));
ready!(poll_proceed_and_make_progress(cx));
buf.put_bytes(self.byte, buf.remaining());
Poll::Ready(Ok(()))
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn assert_unpin() {
crate::is_unpin::<Repeat>();
}
}

46
vendor/tokio/src/io/util/shutdown.rs vendored Normal file
View File

@@ -0,0 +1,46 @@
use crate::io::AsyncWrite;
use pin_project_lite::pin_project;
use std::future::Future;
use std::io;
use std::marker::PhantomPinned;
use std::pin::Pin;
use std::task::{Context, Poll};
pin_project! {
/// A future used to shutdown an I/O object.
///
/// Created by the [`AsyncWriteExt::shutdown`][shutdown] function.
/// [shutdown]: [`crate::io::AsyncWriteExt::shutdown`]
#[must_use = "futures do nothing unless you `.await` or poll them"]
#[derive(Debug)]
pub struct Shutdown<'a, A: ?Sized> {
a: &'a mut A,
// Make this future `!Unpin` for compatibility with async trait methods.
#[pin]
_pin: PhantomPinned,
}
}
/// Creates a future which will shutdown an I/O object.
pub(super) fn shutdown<A>(a: &mut A) -> Shutdown<'_, A>
where
A: AsyncWrite + Unpin + ?Sized,
{
Shutdown {
a,
_pin: PhantomPinned,
}
}
impl<A> Future for Shutdown<'_, A>
where
A: AsyncWrite + Unpin + ?Sized,
{
type Output = io::Result<()>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let me = self.project();
Pin::new(me.a).poll_shutdown(cx)
}
}

94
vendor/tokio/src/io/util/sink.rs vendored Normal file
View File

@@ -0,0 +1,94 @@
use crate::io::util::poll_proceed_and_make_progress;
use crate::io::AsyncWrite;
use std::fmt;
use std::io;
use std::pin::Pin;
use std::task::{ready, Context, Poll};
cfg_io_util! {
/// An async writer which will move data into the void.
///
/// This struct is generally created by calling [`sink`][sink]. Please
/// see the documentation of `sink()` for more details.
///
/// This is an asynchronous version of [`std::io::Sink`][std].
///
/// [sink]: sink()
/// [std]: std::io::Sink
pub struct Sink {
_p: (),
}
/// Creates an instance of an async writer which will successfully consume all
/// data.
///
/// All calls to [`poll_write`] on the returned instance will return
/// `Poll::Ready(Ok(buf.len()))` and the contents of the buffer will not be
/// inspected.
///
/// This is an asynchronous version of [`std::io::sink`][std].
///
/// [`poll_write`]: crate::io::AsyncWrite::poll_write()
/// [std]: std::io::sink
///
/// # Examples
///
/// ```
/// use tokio::io::{self, AsyncWriteExt};
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() -> io::Result<()> {
/// let buffer = vec![1, 2, 3, 5, 8];
/// let num_bytes = io::sink().write(&buffer).await?;
/// assert_eq!(num_bytes, 5);
/// Ok(())
/// # }
/// ```
pub fn sink() -> Sink {
Sink { _p: () }
}
}
impl AsyncWrite for Sink {
#[inline]
fn poll_write(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<Result<usize, io::Error>> {
ready!(crate::trace::trace_leaf(cx));
ready!(poll_proceed_and_make_progress(cx));
Poll::Ready(Ok(buf.len()))
}
#[inline]
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> {
ready!(crate::trace::trace_leaf(cx));
ready!(poll_proceed_and_make_progress(cx));
Poll::Ready(Ok(()))
}
#[inline]
fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> {
ready!(crate::trace::trace_leaf(cx));
ready!(poll_proceed_and_make_progress(cx));
Poll::Ready(Ok(()))
}
}
impl fmt::Debug for Sink {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.pad("Sink { .. }")
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn assert_unpin() {
crate::is_unpin::<Sink>();
}
}

121
vendor/tokio/src/io/util/split.rs vendored Normal file
View File

@@ -0,0 +1,121 @@
use crate::io::util::read_until::read_until_internal;
use crate::io::AsyncBufRead;
use pin_project_lite::pin_project;
use std::io;
use std::mem;
use std::pin::Pin;
use std::task::{ready, Context, Poll};
pin_project! {
/// Splitter for the [`split`](crate::io::AsyncBufReadExt::split) method.
///
/// A `Split` can be turned into a `Stream` with [`SplitStream`].
///
/// [`SplitStream`]: https://docs.rs/tokio-stream/0.1/tokio_stream/wrappers/struct.SplitStream.html
#[derive(Debug)]
#[must_use = "streams do nothing unless polled"]
#[cfg_attr(docsrs, doc(cfg(feature = "io-util")))]
pub struct Split<R> {
#[pin]
reader: R,
buf: Vec<u8>,
delim: u8,
read: usize,
}
}
pub(crate) fn split<R>(reader: R, delim: u8) -> Split<R>
where
R: AsyncBufRead,
{
Split {
reader,
buf: Vec::new(),
delim,
read: 0,
}
}
impl<R> Split<R>
where
R: AsyncBufRead + Unpin,
{
/// Returns the next segment in the stream.
///
/// # Examples
///
/// ```
/// # use tokio::io::AsyncBufRead;
/// use tokio::io::AsyncBufReadExt;
///
/// # async fn dox(my_buf_read: impl AsyncBufRead + Unpin) -> std::io::Result<()> {
/// let mut segments = my_buf_read.split(b'f');
///
/// while let Some(segment) = segments.next_segment().await? {
/// println!("length = {}", segment.len())
/// }
/// # Ok(())
/// # }
/// ```
pub async fn next_segment(&mut self) -> io::Result<Option<Vec<u8>>> {
use std::future::poll_fn;
poll_fn(|cx| Pin::new(&mut *self).poll_next_segment(cx)).await
}
}
impl<R> Split<R>
where
R: AsyncBufRead,
{
/// Polls for the next segment in the stream.
///
/// This method returns:
///
/// * `Poll::Pending` if the next segment is not yet available.
/// * `Poll::Ready(Ok(Some(segment)))` if the next segment is available.
/// * `Poll::Ready(Ok(None))` if there are no more segments in this stream.
/// * `Poll::Ready(Err(err))` if an IO error occurred while reading the
/// next segment.
///
/// When the method returns `Poll::Pending`, the `Waker` in the provided
/// `Context` is scheduled to receive a wakeup when more bytes become
/// available on the underlying IO resource.
///
/// Note that on multiple calls to `poll_next_segment`, only the `Waker`
/// from the `Context` passed to the most recent call is scheduled to
/// receive a wakeup.
pub fn poll_next_segment(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<io::Result<Option<Vec<u8>>>> {
let me = self.project();
let n = ready!(read_until_internal(
me.reader, cx, *me.delim, me.buf, me.read,
))?;
// read_until_internal resets me.read to zero once it finds the delimiter
debug_assert_eq!(*me.read, 0);
if n == 0 && me.buf.is_empty() {
return Poll::Ready(Ok(None));
}
if me.buf.last() == Some(me.delim) {
me.buf.pop();
}
Poll::Ready(Ok(Some(mem::take(me.buf))))
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn assert_unpin() {
crate::is_unpin::<Split<()>>();
}
}

138
vendor/tokio/src/io/util/take.rs vendored Normal file
View File

@@ -0,0 +1,138 @@
use crate::io::{AsyncBufRead, AsyncRead, ReadBuf};
use pin_project_lite::pin_project;
use std::convert::TryFrom;
use std::pin::Pin;
use std::task::{ready, Context, Poll};
use std::{cmp, io};
pin_project! {
/// Stream for the [`take`](super::AsyncReadExt::take) method.
#[derive(Debug)]
#[must_use = "streams do nothing unless you `.await` or poll them"]
#[cfg_attr(docsrs, doc(cfg(feature = "io-util")))]
pub struct Take<R> {
#[pin]
inner: R,
// Add '_' to avoid conflicts with `limit` method.
limit_: u64,
}
}
pub(super) fn take<R: AsyncRead>(inner: R, limit: u64) -> Take<R> {
Take {
inner,
limit_: limit,
}
}
impl<R: AsyncRead> Take<R> {
/// Returns the remaining number of bytes that can be
/// read before this instance will return EOF.
///
/// # Note
///
/// This instance may reach `EOF` after reading fewer bytes than indicated by
/// this method if the underlying [`AsyncRead`] instance reaches EOF.
pub fn limit(&self) -> u64 {
self.limit_
}
/// Sets the number of bytes that can be read before this instance will
/// return EOF. This is the same as constructing a new `Take` instance, so
/// the amount of bytes read and the previous limit value don't matter when
/// calling this method.
pub fn set_limit(&mut self, limit: u64) {
self.limit_ = limit;
}
/// Gets a reference to the underlying reader.
pub fn get_ref(&self) -> &R {
&self.inner
}
/// Gets a mutable reference to the underlying reader.
///
/// Care should be taken to avoid modifying the internal I/O state of the
/// underlying reader as doing so may corrupt the internal limit of this
/// `Take`.
pub fn get_mut(&mut self) -> &mut R {
&mut self.inner
}
/// Gets a pinned mutable reference to the underlying reader.
///
/// Care should be taken to avoid modifying the internal I/O state of the
/// underlying reader as doing so may corrupt the internal limit of this
/// `Take`.
pub fn get_pin_mut(self: Pin<&mut Self>) -> Pin<&mut R> {
self.project().inner
}
/// Consumes the `Take`, returning the wrapped reader.
pub fn into_inner(self) -> R {
self.inner
}
}
impl<R: AsyncRead> AsyncRead for Take<R> {
fn poll_read(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut ReadBuf<'_>,
) -> Poll<Result<(), io::Error>> {
if self.limit_ == 0 {
return Poll::Ready(Ok(()));
}
let me = self.project();
let mut b = buf.take(usize::try_from(*me.limit_).unwrap_or(usize::MAX));
let buf_ptr = b.filled().as_ptr();
ready!(me.inner.poll_read(cx, &mut b))?;
assert_eq!(b.filled().as_ptr(), buf_ptr);
let n = b.filled().len();
// We need to update the original ReadBuf
unsafe {
buf.assume_init(n);
}
buf.advance(n);
*me.limit_ -= n as u64;
Poll::Ready(Ok(()))
}
}
impl<R: AsyncBufRead> AsyncBufRead for Take<R> {
fn poll_fill_buf(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<&[u8]>> {
let me = self.project();
// Don't call into inner reader at all at EOF because it may still block
if *me.limit_ == 0 {
return Poll::Ready(Ok(&[]));
}
let buf = ready!(me.inner.poll_fill_buf(cx)?);
let cap = cmp::min(buf.len() as u64, *me.limit_) as usize;
Poll::Ready(Ok(&buf[..cap]))
}
fn consume(self: Pin<&mut Self>, amt: usize) {
let me = self.project();
// Don't let callers reset the limit by passing an overlarge value
let amt = cmp::min(amt as u64, *me.limit_) as usize;
*me.limit_ -= amt as u64;
me.inner.consume(amt);
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn assert_unpin() {
crate::is_unpin::<Take<()>>();
}
}

View File

@@ -0,0 +1,142 @@
use crate::io::ReadBuf;
use std::mem::MaybeUninit;
/// Something that looks like a `Vec<u8>`.
///
/// # Safety
///
/// The implementor must guarantee that the vector returned by the
/// `as_mut` and `as_mut` methods do not change from one call to
/// another.
pub(crate) unsafe trait VecU8: AsRef<Vec<u8>> + AsMut<Vec<u8>> {}
unsafe impl VecU8 for Vec<u8> {}
unsafe impl VecU8 for &mut Vec<u8> {}
/// This struct wraps a `Vec<u8>` or `&mut Vec<u8>`, combining it with a
/// `num_initialized`, which keeps track of the number of initialized bytes
/// in the unused capacity.
///
/// The purpose of this struct is to remember how many bytes were initialized
/// through a `ReadBuf` from call to call.
///
/// This struct has the safety invariant that the first `num_initialized` of the
/// vector's allocation must be initialized at any time.
#[derive(Debug)]
pub(crate) struct VecWithInitialized<V> {
vec: V,
// The number of initialized bytes in the vector.
// Always between `vec.len()` and `vec.capacity()`.
num_initialized: usize,
starting_capacity: usize,
}
impl VecWithInitialized<Vec<u8>> {
#[cfg(feature = "io-util")]
pub(crate) fn take(&mut self) -> Vec<u8> {
self.num_initialized = 0;
std::mem::take(&mut self.vec)
}
}
impl<V> VecWithInitialized<V>
where
V: VecU8,
{
pub(crate) fn new(mut vec: V) -> Self {
// SAFETY: The safety invariants of vector guarantee that the bytes up
// to its length are initialized.
Self {
num_initialized: vec.as_mut().len(),
starting_capacity: vec.as_ref().capacity(),
vec,
}
}
pub(crate) fn reserve(&mut self, num_bytes: usize) {
let vec = self.vec.as_mut();
if vec.capacity() - vec.len() >= num_bytes {
return;
}
// SAFETY: Setting num_initialized to `vec.len()` is correct as
// `reserve` does not change the length of the vector.
self.num_initialized = vec.len();
vec.reserve(num_bytes);
}
#[cfg(feature = "io-util")]
pub(crate) fn is_empty(&self) -> bool {
self.vec.as_ref().is_empty()
}
pub(crate) fn get_read_buf<'a>(&'a mut self) -> ReadBuf<'a> {
let num_initialized = self.num_initialized;
// SAFETY: Creating the slice is safe because of the safety invariants
// on Vec<u8>. The safety invariants of `ReadBuf` will further guarantee
// that no bytes in the slice are de-initialized.
let vec = self.vec.as_mut();
let len = vec.len();
let cap = vec.capacity();
let ptr = vec.as_mut_ptr().cast::<MaybeUninit<u8>>();
let slice = unsafe { std::slice::from_raw_parts_mut::<'a, MaybeUninit<u8>>(ptr, cap) };
// SAFETY: This is safe because the safety invariants of
// VecWithInitialized say that the first num_initialized bytes must be
// initialized.
let mut read_buf = ReadBuf::uninit(slice);
unsafe {
read_buf.assume_init(num_initialized);
}
read_buf.set_filled(len);
read_buf
}
pub(crate) fn apply_read_buf(&mut self, parts: ReadBufParts) {
let vec = self.vec.as_mut();
assert_eq!(vec.as_ptr(), parts.ptr);
// SAFETY:
// The ReadBufParts really does point inside `self.vec` due to the above
// check, and the safety invariants of `ReadBuf` guarantee that the
// first `parts.initialized` bytes of `self.vec` really have been
// initialized. Additionally, `ReadBuf` guarantees that `parts.len` is
// at most `parts.initialized`, so the first `parts.len` bytes are also
// initialized.
//
// Note that this relies on the fact that `V` is either `Vec<u8>` or
// `&mut Vec<u8>`, so the vector returned by `self.vec.as_mut()` cannot
// change from call to call.
unsafe {
self.num_initialized = parts.initialized;
vec.set_len(parts.len);
}
}
// Returns a boolean telling the caller to try reading into a small local buffer first if true.
// Doing so would avoid overallocating when vec is filled to capacity and we reached EOF.
pub(crate) fn try_small_read_first(&self, num_bytes: usize) -> bool {
let vec = self.vec.as_ref();
vec.capacity() - vec.len() < num_bytes
&& self.starting_capacity == vec.capacity()
&& self.starting_capacity >= num_bytes
}
}
pub(crate) struct ReadBufParts {
// Pointer is only used to check that the ReadBuf actually came from the
// right VecWithInitialized.
ptr: *const u8,
len: usize,
initialized: usize,
}
// This is needed to release the borrow on `VecWithInitialized<V>`.
pub(crate) fn into_read_buf_parts(rb: ReadBuf<'_>) -> ReadBufParts {
ReadBufParts {
ptr: rb.filled().as_ptr(),
len: rb.filled().len(),
initialized: rb.initialized().len(),
}
}

46
vendor/tokio/src/io/util/write.rs vendored Normal file
View File

@@ -0,0 +1,46 @@
use crate::io::AsyncWrite;
use pin_project_lite::pin_project;
use std::future::Future;
use std::io;
use std::marker::PhantomPinned;
use std::pin::Pin;
use std::task::{Context, Poll};
pin_project! {
/// A future to write some of the buffer to an `AsyncWrite`.
#[derive(Debug)]
#[must_use = "futures do nothing unless you `.await` or poll them"]
pub struct Write<'a, W: ?Sized> {
writer: &'a mut W,
buf: &'a [u8],
// Make this future `!Unpin` for compatibility with async trait methods.
#[pin]
_pin: PhantomPinned,
}
}
/// Tries to write some bytes from the given `buf` to the writer in an
/// asynchronous manner, returning a future.
pub(crate) fn write<'a, W>(writer: &'a mut W, buf: &'a [u8]) -> Write<'a, W>
where
W: AsyncWrite + Unpin + ?Sized,
{
Write {
writer,
buf,
_pin: PhantomPinned,
}
}
impl<W> Future for Write<'_, W>
where
W: AsyncWrite + Unpin + ?Sized,
{
type Output = io::Result<usize>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<usize>> {
let me = self.project();
Pin::new(&mut *me.writer).poll_write(cx, me.buf)
}
}

55
vendor/tokio/src/io/util/write_all.rs vendored Normal file
View File

@@ -0,0 +1,55 @@
use crate::io::AsyncWrite;
use pin_project_lite::pin_project;
use std::future::Future;
use std::io;
use std::marker::PhantomPinned;
use std::mem;
use std::pin::Pin;
use std::task::{ready, Context, Poll};
pin_project! {
#[derive(Debug)]
#[must_use = "futures do nothing unless you `.await` or poll them"]
pub struct WriteAll<'a, W: ?Sized> {
writer: &'a mut W,
buf: &'a [u8],
// Make this future `!Unpin` for compatibility with async trait methods.
#[pin]
_pin: PhantomPinned,
}
}
pub(crate) fn write_all<'a, W>(writer: &'a mut W, buf: &'a [u8]) -> WriteAll<'a, W>
where
W: AsyncWrite + Unpin + ?Sized,
{
WriteAll {
writer,
buf,
_pin: PhantomPinned,
}
}
impl<W> Future for WriteAll<'_, W>
where
W: AsyncWrite + Unpin + ?Sized,
{
type Output = io::Result<()>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
let me = self.project();
while !me.buf.is_empty() {
let n = ready!(Pin::new(&mut *me.writer).poll_write(cx, me.buf))?;
{
let (_, rest) = mem::take(&mut *me.buf).split_at(n);
*me.buf = rest;
}
if n == 0 {
return Poll::Ready(Err(io::ErrorKind::WriteZero.into()));
}
}
Poll::Ready(Ok(()))
}
}

View File

@@ -0,0 +1,64 @@
use crate::io::AsyncWrite;
use bytes::Buf;
use pin_project_lite::pin_project;
use std::future::Future;
use std::io::{self, IoSlice};
use std::marker::PhantomPinned;
use std::pin::Pin;
use std::task::{ready, Context, Poll};
pin_project! {
/// A future to write some of the buffer to an `AsyncWrite`.
#[derive(Debug)]
#[must_use = "futures do nothing unless you `.await` or poll them"]
pub struct WriteAllBuf<'a, W, B> {
writer: &'a mut W,
buf: &'a mut B,
#[pin]
_pin: PhantomPinned,
}
}
/// Tries to write some bytes from the given `buf` to the writer in an
/// asynchronous manner, returning a future.
pub(crate) fn write_all_buf<'a, W, B>(writer: &'a mut W, buf: &'a mut B) -> WriteAllBuf<'a, W, B>
where
W: AsyncWrite + Unpin,
B: Buf,
{
WriteAllBuf {
writer,
buf,
_pin: PhantomPinned,
}
}
impl<W, B> Future for WriteAllBuf<'_, W, B>
where
W: AsyncWrite + Unpin,
B: Buf,
{
type Output = io::Result<()>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
const MAX_VECTOR_ELEMENTS: usize = 64;
let me = self.project();
while me.buf.has_remaining() {
let n = if me.writer.is_write_vectored() {
let mut slices = [IoSlice::new(&[]); MAX_VECTOR_ELEMENTS];
let cnt = me.buf.chunks_vectored(&mut slices);
ready!(Pin::new(&mut *me.writer).poll_write_vectored(cx, &slices[..cnt]))?
} else {
ready!(Pin::new(&mut *me.writer).poll_write(cx, me.buf.chunk())?)
};
me.buf.advance(n);
if n == 0 {
return Poll::Ready(Err(io::ErrorKind::WriteZero.into()));
}
}
Poll::Ready(Ok(()))
}
}

64
vendor/tokio/src/io/util/write_buf.rs vendored Normal file
View File

@@ -0,0 +1,64 @@
use crate::io::AsyncWrite;
use bytes::Buf;
use pin_project_lite::pin_project;
use std::future::Future;
use std::io::{self, IoSlice};
use std::marker::PhantomPinned;
use std::pin::Pin;
use std::task::{ready, Context, Poll};
pin_project! {
/// A future to write some of the buffer to an `AsyncWrite`.
#[derive(Debug)]
#[must_use = "futures do nothing unless you `.await` or poll them"]
pub struct WriteBuf<'a, W, B> {
writer: &'a mut W,
buf: &'a mut B,
#[pin]
_pin: PhantomPinned,
}
}
/// Tries to write some bytes from the given `buf` to the writer in an
/// asynchronous manner, returning a future.
pub(crate) fn write_buf<'a, W, B>(writer: &'a mut W, buf: &'a mut B) -> WriteBuf<'a, W, B>
where
W: AsyncWrite + Unpin,
B: Buf,
{
WriteBuf {
writer,
buf,
_pin: PhantomPinned,
}
}
impl<W, B> Future for WriteBuf<'_, W, B>
where
W: AsyncWrite + Unpin,
B: Buf,
{
type Output = io::Result<usize>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<usize>> {
const MAX_VECTOR_ELEMENTS: usize = 64;
let me = self.project();
if !me.buf.has_remaining() {
return Poll::Ready(Ok(0));
}
let n = if me.writer.is_write_vectored() {
let mut slices = [IoSlice::new(&[]); MAX_VECTOR_ELEMENTS];
let cnt = me.buf.chunks_vectored(&mut slices);
ready!(Pin::new(&mut *me.writer).poll_write_vectored(cx, &slices[..cnt]))?
} else {
ready!(Pin::new(&mut *me.writer).poll_write(cx, me.buf.chunk()))?
};
me.buf.advance(n);
Poll::Ready(Ok(n))
}
}

151
vendor/tokio/src/io/util/write_int.rs vendored Normal file
View File

@@ -0,0 +1,151 @@
use crate::io::AsyncWrite;
use bytes::BufMut;
use pin_project_lite::pin_project;
use std::future::Future;
use std::io;
use std::marker::PhantomPinned;
use std::pin::Pin;
use std::task::{Context, Poll};
macro_rules! writer {
($name:ident, $ty:ty, $writer:ident) => {
writer!($name, $ty, $writer, std::mem::size_of::<$ty>());
};
($name:ident, $ty:ty, $writer:ident, $bytes:expr) => {
pin_project! {
#[doc(hidden)]
#[must_use = "futures do nothing unless you `.await` or poll them"]
pub struct $name<W> {
#[pin]
dst: W,
buf: [u8; $bytes],
written: u8,
// Make this future `!Unpin` for compatibility with async trait methods.
#[pin]
_pin: PhantomPinned,
}
}
impl<W> $name<W> {
pub(crate) fn new(w: W, value: $ty) -> Self {
let mut writer = Self {
buf: [0; $bytes],
written: 0,
dst: w,
_pin: PhantomPinned,
};
BufMut::$writer(&mut &mut writer.buf[..], value);
writer
}
}
impl<W> Future for $name<W>
where
W: AsyncWrite,
{
type Output = io::Result<()>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let mut me = self.project();
if *me.written == $bytes as u8 {
return Poll::Ready(Ok(()));
}
while *me.written < $bytes as u8 {
*me.written += match me
.dst
.as_mut()
.poll_write(cx, &me.buf[*me.written as usize..])
{
Poll::Pending => return Poll::Pending,
Poll::Ready(Err(e)) => return Poll::Ready(Err(e.into())),
Poll::Ready(Ok(0)) => {
return Poll::Ready(Err(io::ErrorKind::WriteZero.into()));
}
Poll::Ready(Ok(n)) => n as u8,
};
}
Poll::Ready(Ok(()))
}
}
};
}
macro_rules! writer8 {
($name:ident, $ty:ty) => {
pin_project! {
#[doc(hidden)]
#[must_use = "futures do nothing unless you `.await` or poll them"]
pub struct $name<W> {
#[pin]
dst: W,
byte: $ty,
// Make this future `!Unpin` for compatibility with async trait methods.
#[pin]
_pin: PhantomPinned,
}
}
impl<W> $name<W> {
pub(crate) fn new(dst: W, byte: $ty) -> Self {
Self {
dst,
byte,
_pin: PhantomPinned,
}
}
}
impl<W> Future for $name<W>
where
W: AsyncWrite,
{
type Output = io::Result<()>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let me = self.project();
let buf = [*me.byte as u8];
match me.dst.poll_write(cx, &buf[..]) {
Poll::Pending => Poll::Pending,
Poll::Ready(Err(e)) => Poll::Ready(Err(e.into())),
Poll::Ready(Ok(0)) => Poll::Ready(Err(io::ErrorKind::WriteZero.into())),
Poll::Ready(Ok(1)) => Poll::Ready(Ok(())),
Poll::Ready(Ok(_)) => unreachable!(),
}
}
}
};
}
writer8!(WriteU8, u8);
writer8!(WriteI8, i8);
writer!(WriteU16, u16, put_u16);
writer!(WriteU32, u32, put_u32);
writer!(WriteU64, u64, put_u64);
writer!(WriteU128, u128, put_u128);
writer!(WriteI16, i16, put_i16);
writer!(WriteI32, i32, put_i32);
writer!(WriteI64, i64, put_i64);
writer!(WriteI128, i128, put_i128);
writer!(WriteF32, f32, put_f32);
writer!(WriteF64, f64, put_f64);
writer!(WriteU16Le, u16, put_u16_le);
writer!(WriteU32Le, u32, put_u32_le);
writer!(WriteU64Le, u64, put_u64_le);
writer!(WriteU128Le, u128, put_u128_le);
writer!(WriteI16Le, i16, put_i16_le);
writer!(WriteI32Le, i32, put_i32_le);
writer!(WriteI64Le, i64, put_i64_le);
writer!(WriteI128Le, i128, put_i128_le);
writer!(WriteF32Le, f32, put_f32_le);
writer!(WriteF64Le, f64, put_f64_le);

View File

@@ -0,0 +1,47 @@
use crate::io::AsyncWrite;
use pin_project_lite::pin_project;
use std::io;
use std::marker::PhantomPinned;
use std::pin::Pin;
use std::task::{Context, Poll};
use std::{future::Future, io::IoSlice};
pin_project! {
/// A future to write a slice of buffers to an `AsyncWrite`.
#[derive(Debug)]
#[must_use = "futures do nothing unless you `.await` or poll them"]
pub struct WriteVectored<'a, 'b, W: ?Sized> {
writer: &'a mut W,
bufs: &'a [IoSlice<'b>],
// Make this future `!Unpin` for compatibility with async trait methods.
#[pin]
_pin: PhantomPinned,
}
}
pub(crate) fn write_vectored<'a, 'b, W>(
writer: &'a mut W,
bufs: &'a [IoSlice<'b>],
) -> WriteVectored<'a, 'b, W>
where
W: AsyncWrite + Unpin + ?Sized,
{
WriteVectored {
writer,
bufs,
_pin: PhantomPinned,
}
}
impl<W> Future for WriteVectored<'_, '_, W>
where
W: AsyncWrite + Unpin + ?Sized,
{
type Output = io::Result<usize>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<usize>> {
let me = self.project();
Pin::new(&mut *me.writer).poll_write_vectored(cx, me.bufs)
}
}

Some files were not shown because too many files have changed in this diff Show More