chore: checkpoint before Python removal

This commit is contained in:
2026-03-26 22:33:59 +00:00
parent 683cec9307
commit e568ddf82a
29972 changed files with 11269302 additions and 2 deletions

228
vendor/deadpool/tests/managed.rs vendored Normal file
View File

@@ -0,0 +1,228 @@
#![cfg(feature = "managed")]
use std::{convert::Infallible, time::Duration};
use tokio::time;
use deadpool::managed::{self, Metrics, Object, PoolError, RecycleResult, Timeouts};
type Pool = managed::Pool<Manager>;
struct Manager {}
impl managed::Manager for Manager {
type Type = usize;
type Error = Infallible;
async fn create(&self) -> Result<usize, Infallible> {
Ok(0)
}
async fn recycle(&self, _conn: &mut usize, _: &Metrics) -> RecycleResult<Infallible> {
Ok(())
}
}
#[tokio::test]
async fn basic() {
let mgr = Manager {};
let pool = Pool::builder(mgr).max_size(16).build().unwrap();
let status = pool.status();
assert_eq!(status.size, 0);
assert_eq!(status.available, 0);
assert_eq!(status.waiting, 0);
let obj0 = pool.get().await.unwrap();
let status = pool.status();
assert_eq!(status.size, 1);
assert_eq!(status.available, 0);
assert_eq!(status.waiting, 0);
let obj1 = pool.get().await.unwrap();
let status = pool.status();
assert_eq!(status.size, 2);
assert_eq!(status.available, 0);
assert_eq!(status.waiting, 0);
let obj2 = pool.get().await.unwrap();
let status = pool.status();
assert_eq!(status.size, 3);
assert_eq!(status.available, 0);
assert_eq!(status.waiting, 0);
drop(obj0);
let status = pool.status();
assert_eq!(status.size, 3);
assert_eq!(status.available, 1);
assert_eq!(status.waiting, 0);
drop(obj1);
let status = pool.status();
assert_eq!(status.size, 3);
assert_eq!(status.available, 2);
assert_eq!(status.waiting, 0);
drop(obj2);
let status = pool.status();
assert_eq!(status.size, 3);
assert_eq!(status.available, 3);
assert_eq!(status.waiting, 0);
}
#[tokio::test]
async fn closing() {
let mgr = Manager {};
let pool = Pool::builder(mgr).max_size(1).build().unwrap();
// fetch the only object from the pool
let obj = pool.get().await;
let join_handle = {
let pool = pool.clone();
tokio::spawn(async move { pool.get().await })
};
tokio::task::yield_now().await;
assert_eq!(pool.status().available, 0);
assert_eq!(pool.status().waiting, 1);
pool.close();
tokio::task::yield_now().await;
assert_eq!(pool.status().available, 0);
assert_eq!(pool.status().waiting, 0);
assert!(matches!(join_handle.await.unwrap(), Err(PoolError::Closed)));
assert!(matches!(pool.get().await, Err(PoolError::Closed)));
assert!(matches!(
pool.timeout_get(&Timeouts {
wait: Some(Duration::ZERO),
..pool.timeouts()
})
.await,
Err(PoolError::Closed)
));
drop(obj);
tokio::task::yield_now().await;
assert_eq!(pool.status().available, 0);
assert_eq!(pool.status().waiting, 0);
}
#[tokio::test(flavor = "multi_thread")]
async fn concurrent() {
let mgr = Manager {};
let pool = Pool::builder(mgr).max_size(3).build().unwrap();
// Spawn tasks
let futures = (0..100)
.map(|_| {
let pool = pool.clone();
tokio::spawn(async move {
let mut obj = pool.get().await.unwrap();
*obj += 1;
time::sleep(Duration::from_millis(1)).await;
})
})
.collect::<Vec<_>>();
// Await tasks to finish
for future in futures {
future.await.unwrap();
}
// Verify
let status = pool.status();
assert_eq!(status.size, 3);
assert_eq!(status.available, 3);
assert_eq!(status.waiting, 0);
let values = [
pool.get().await.unwrap(),
pool.get().await.unwrap(),
pool.get().await.unwrap(),
];
assert_eq!(values.iter().map(|obj| **obj).sum::<usize>(), 100);
}
#[tokio::test(flavor = "multi_thread")]
async fn object_take() {
let mgr = Manager {};
let pool = Pool::builder(mgr).max_size(2).build().unwrap();
let obj0 = pool.get().await.unwrap();
let obj1 = pool.get().await.unwrap();
let status = pool.status();
assert_eq!(status.size, 2);
assert_eq!(status.available, 0);
assert_eq!(status.waiting, 0);
let _ = Object::take(obj0);
let status = pool.status();
assert_eq!(status.size, 1);
assert_eq!(status.available, 0);
assert_eq!(status.waiting, 0);
let _ = Object::take(obj1);
let status = pool.status();
assert_eq!(status.size, 0);
assert_eq!(status.available, 0);
let obj0 = pool.get().await.unwrap();
let obj1 = pool.get().await.unwrap();
let status = pool.status();
assert_eq!(status.size, 2);
assert_eq!(status.available, 0);
assert_eq!(status.waiting, 0);
drop(obj0);
drop(obj1);
let status = pool.status();
assert_eq!(status.size, 2);
assert_eq!(status.available, 2);
assert_eq!(status.waiting, 0);
}
#[tokio::test]
async fn retain() {
let mgr = Manager {};
let pool = Pool::builder(mgr).max_size(4).build().unwrap();
{
let _a = pool.get().await;
let _b = pool.get().await;
tokio::time::sleep(Duration::from_millis(5)).await;
let _c = pool.get().await;
tokio::time::sleep(Duration::from_millis(5)).await;
}
assert_eq!(pool.status().size, 3);
let retain_result = pool.retain(|_, metrics| metrics.age() <= Duration::from_millis(10));
assert_eq!(retain_result.retained, 1);
assert_eq!(retain_result.removed.len(), 2);
assert_eq!(pool.status().size, 1);
tokio::time::sleep(Duration::from_millis(5)).await;
let retain_result = pool.retain(|_, metrics| metrics.age() <= Duration::from_millis(10));
assert_eq!(retain_result.retained, 0);
assert_eq!(retain_result.removed.len(), 1);
assert_eq!(pool.status().size, 0);
}
#[tokio::test]
async fn retain_fnmut() {
let mgr = Manager {};
let pool = Pool::builder(mgr).max_size(4).build().unwrap();
{
let _a = pool.get().await;
let _b = pool.get().await;
let _c = pool.get().await;
let _c = pool.get().await;
}
let mut removed = 0;
{
let retain_result = pool.retain(|_, _| {
removed += 1;
false
});
assert_eq!(retain_result.retained, 0);
assert_eq!(retain_result.removed.len(), 4);
}
assert_eq!(pool.status().size, 0);
}

View File

@@ -0,0 +1,164 @@
use std::time::Duration;
use deadpool::managed::{Hook, HookError, Manager, Metrics, Pool, RecycleResult};
use itertools::Itertools;
use tokio::time::{sleep, timeout};
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
enum Gate {
Ok,
Err,
Slow,
Never,
}
impl Gate {
async fn open(&self) -> Result<(), ()> {
match self {
Self::Ok => Ok(()),
Self::Err => Err(()),
Self::Never => {
sleep(Duration::MAX).await;
unreachable!();
}
Self::Slow => {
sleep(Duration::from_nanos(2)).await;
Ok(())
}
}
}
}
#[derive(Copy, Clone, Debug)]
struct Gates {
create: Gate,
recycle: Gate,
post_create: Gate,
pre_recycle: Gate,
post_recycle: Gate,
}
fn configs() -> impl Iterator<Item = Gates> {
(0..5)
.map(|_| &[Gate::Ok, Gate::Err, Gate::Slow, Gate::Never])
.multi_cartesian_product()
.map(move |gates| Gates {
create: *gates[0],
recycle: *gates[1],
post_create: *gates[2],
pre_recycle: *gates[3],
post_recycle: *gates[4],
})
}
fn pools(max_size: usize) -> impl Iterator<Item = Pool<GatedManager>> {
configs().map(move |gates| {
let manager = GatedManager { gates };
Pool::builder(manager)
.max_size(max_size)
.post_create(Hook::async_fn(move |_, _| {
Box::pin(async move {
gates
.post_create
.open()
.await
.map_err(|_| HookError::message("Fail"))?;
Ok(())
})
}))
.pre_recycle(Hook::async_fn(move |_, _| {
Box::pin(async move {
gates
.pre_recycle
.open()
.await
.map_err(|_| HookError::message("pre_recycle gate set to error"))?;
Ok(())
})
}))
.post_recycle(Hook::async_fn(move |_, _| {
Box::pin(async move {
gates
.post_recycle
.open()
.await
.map_err(|_| HookError::message("post_recycle gate set to error"))?;
Ok(())
})
}))
.build()
.unwrap()
})
}
struct GatedManager {
gates: Gates,
}
impl Manager for GatedManager {
type Type = ();
type Error = ();
async fn create(&self) -> Result<Self::Type, Self::Error> {
self.gates.create.open().await?;
Ok(())
}
async fn recycle(&self, _conn: &mut Self::Type, _: &Metrics) -> RecycleResult<Self::Error> {
self.gates.recycle.open().await?;
Ok(())
}
}
// This tests various combinations of configurations with
// succeeding, failing, slow and hanging managers and hooks.
// It currently tests 4^5 (=1024) possible combinations and
// therefore takes some time to complete. It is probably not
// neccesary to test all combinations, but doing so doesn't
// hurt either and it is a good stress test of the pool.
#[tokio::test(flavor = "multi_thread", worker_threads = 16)]
async fn test_cancellations() {
for pool in pools(2) {
let handles = (0..8)
.map(|i| {
let pool = pool.clone();
tokio::spawn(async move {
loop {
let _obj = timeout(Duration::from_nanos(i), pool.get()).await;
sleep(Duration::from_nanos(i)).await;
}
})
})
.collect::<Vec<_>>();
for _ in 0..10 {
tokio::time::sleep(Duration::from_millis(1)).await;
let status = pool.status();
assert!(
status.size <= status.max_size,
"size({}) > max_size({}), gates: {:?}",
status.size,
status.max_size,
pool.manager().gates
);
}
for handle in &handles {
handle.abort();
}
for handle in handles {
let _ = handle.await;
}
let status = pool.status();
assert!(
status.size <= status.max_size,
"size({}) > max_size({}), gates: {:?}",
status.size,
status.max_size,
pool.manager().gates
);
assert!(
status.available <= status.max_size,
"available({}) > max_size({}), gates: {:?}",
status.available,
status.max_size,
pool.manager().gates
);
}
}

64
vendor/deadpool/tests/managed_config.rs vendored Normal file
View File

@@ -0,0 +1,64 @@
#![cfg(all(feature = "managed", feature = "serde"))]
use std::{collections::HashMap, env, time::Duration};
use config::Config;
use serde::{Deserialize, Serialize};
use deadpool::managed::PoolConfig;
struct Env {
backup: HashMap<String, Option<String>>,
}
impl Env {
pub fn new() -> Self {
Self {
backup: HashMap::new(),
}
}
pub fn set(&mut self, name: &str, value: &str) {
self.backup.insert(name.to_string(), env::var(name).ok());
env::set_var(name, value);
}
}
impl Drop for Env {
fn drop(&mut self) {
for (name, value) in self.backup.iter() {
match value {
Some(value) => env::set_var(name.as_str(), value),
None => env::remove_var(name.as_str()),
}
}
}
}
#[derive(Debug, Serialize, Deserialize)]
struct TestConfig {
pool: PoolConfig,
}
#[test]
fn from_env() {
let mut env = Env::new();
env.set("POOL__MAX_SIZE", "42");
env.set("POOL__TIMEOUTS__WAIT__SECS", "1");
env.set("POOL__TIMEOUTS__WAIT__NANOS", "0");
env.set("POOL__TIMEOUTS__CREATE__SECS", "2");
env.set("POOL__TIMEOUTS__CREATE__NANOS", "0");
env.set("POOL__TIMEOUTS__RECYCLE__SECS", "3");
env.set("POOL__TIMEOUTS__RECYCLE__NANOS", "0");
let cfg = Config::builder()
.add_source(config::Environment::default().separator("__"))
.build()
.unwrap()
.try_deserialize::<TestConfig>()
.unwrap();
assert_eq!(cfg.pool.max_size, 42);
assert_eq!(cfg.pool.timeouts.wait, Some(Duration::from_secs(1)));
assert_eq!(cfg.pool.timeouts.create, Some(Duration::from_secs(2)));
assert_eq!(cfg.pool.timeouts.recycle, Some(Duration::from_secs(3)));
}

View File

@@ -0,0 +1,120 @@
#![cfg(feature = "managed")]
use std::{sync::Arc, time::Duration};
use tokio::{
sync::{mpsc, Mutex},
task, time,
};
use deadpool::managed::{self, Metrics, RecycleError, RecycleResult};
type Pool = managed::Pool<Manager>;
#[derive(Clone)]
struct Manager {
create_rx: Arc<Mutex<mpsc::Receiver<Result<(), ()>>>>,
recycle_rx: Arc<Mutex<mpsc::Receiver<Result<(), ()>>>>,
remote_control: RemoteControl,
}
#[derive(Clone)]
struct RemoteControl {
create_tx: mpsc::Sender<Result<(), ()>>,
_recycle_tx: mpsc::Sender<Result<(), ()>>,
}
impl RemoteControl {
pub fn create_ok(&mut self) {
self.create_tx.try_send(Ok(())).unwrap();
}
pub fn create_err(&mut self) {
self.create_tx.try_send(Err(())).unwrap();
}
/*
pub fn recycle_ok(&mut self) {
self.recycle_tx.try_send(Ok(())).unwrap();
}
pub fn recycle_err(&mut self) {
self.recycle_tx.try_send(Err(())).unwrap();
}
*/
}
impl Manager {
pub fn new() -> Self {
let (create_tx, create_rx) = mpsc::channel(16);
let (recycle_tx, recycle_rx) = mpsc::channel(16);
Self {
create_rx: Arc::new(Mutex::new(create_rx)),
recycle_rx: Arc::new(Mutex::new(recycle_rx)),
remote_control: RemoteControl {
create_tx,
_recycle_tx: recycle_tx,
},
}
}
}
impl managed::Manager for Manager {
type Type = ();
type Error = ();
async fn create(&self) -> Result<(), ()> {
self.create_rx.lock().await.recv().await.unwrap()
}
async fn recycle(&self, _conn: &mut (), _: &Metrics) -> RecycleResult<()> {
match self.recycle_rx.lock().await.recv().await.unwrap() {
Ok(()) => Ok(()),
Err(e) => Err(RecycleError::Backend(e)),
}
}
}
// When the pool is drained, all connections fail to create.
#[tokio::test(flavor = "current_thread")]
async fn pool_drained() {
let manager = Manager::new();
let mut rc = manager.remote_control.clone();
let pool = Pool::builder(manager).max_size(1).build().unwrap();
let pool_clone = pool.clone();
// let first task grab the only connection
let get_1 = tokio::spawn(async move { pool_clone.get().await });
task::yield_now().await;
assert_eq!(pool.status().size, 0);
assert_eq!(pool.status().available, 0);
assert_eq!(pool.status().waiting, 1);
// let second task wait for the connection
let pool_clone = pool.clone();
let get_2 = tokio::spawn(async move { pool_clone.get().await });
task::yield_now().await;
assert_eq!(pool.status().size, 0);
assert_eq!(pool.status().available, 0);
assert_eq!(pool.status().waiting, 2);
// first task receives an error
rc.create_err();
assert!(get_1.await.unwrap().is_err());
assert_eq!(pool.status().size, 0);
assert_eq!(pool.status().available, 0);
assert_eq!(pool.status().waiting, 1);
// the second task should now be able to create an object
rc.create_ok();
let get_2_result = time::timeout(Duration::from_millis(10), get_2).await;
assert!(get_2_result.is_ok(), "get_2 should not time out");
assert_eq!(pool.status().size, 1);
assert_eq!(pool.status().available, 0);
assert_eq!(pool.status().waiting, 0);
assert!(
get_2_result.unwrap().unwrap().is_ok(),
"get_2 should receive an object"
);
assert_eq!(pool.status().size, 1);
assert_eq!(pool.status().available, 1);
assert_eq!(pool.status().waiting, 0);
}

184
vendor/deadpool/tests/managed_hooks.rs vendored Normal file
View File

@@ -0,0 +1,184 @@
#![cfg(feature = "managed")]
use std::sync::atomic::{AtomicUsize, Ordering};
use deadpool::managed::{Hook, HookError, Manager, Metrics, Pool, RecycleResult};
struct Computer {
next_id: AtomicUsize,
}
impl Computer {
pub fn new(start: usize) -> Self {
Self {
next_id: AtomicUsize::new(start),
}
}
}
impl Manager for Computer {
type Type = usize;
type Error = ();
async fn create(&self) -> Result<Self::Type, Self::Error> {
Ok(self.next_id.fetch_add(1, Ordering::Relaxed))
}
async fn recycle(&self, _: &mut Self::Type, _: &Metrics) -> RecycleResult<Self::Error> {
Ok(())
}
}
#[tokio::test]
async fn post_create_ok() {
let manager = Computer::new(42);
let pool = Pool::<Computer>::builder(manager)
.max_size(1)
.post_create(Hook::sync_fn(|obj, _| {
*obj += 1;
Ok(())
}))
.build()
.unwrap();
assert!(*pool.get().await.unwrap() == 43);
}
#[tokio::test]
async fn post_create_ok_async() {
let manager = Computer::new(42);
let pool = Pool::<Computer>::builder(manager)
.max_size(1)
.post_create(Hook::async_fn(|obj, _| {
Box::pin(async move {
*obj += 1;
Ok(())
})
}))
.build()
.unwrap();
assert!(*pool.get().await.unwrap() == 43);
}
#[tokio::test]
async fn post_create_err_abort() {
let manager = Computer::new(0);
let pool = Pool::<Computer>::builder(manager)
.max_size(3)
.post_create(Hook::sync_fn(|obj, _| {
(*obj % 2 == 0)
.then_some(())
.ok_or(HookError::message("odd creation"))
}))
.build()
.unwrap();
let obj1 = pool.get().await.unwrap();
assert_eq!(*obj1, 0);
assert!(pool.get().await.is_err());
let obj2 = pool.get().await.unwrap();
assert_eq!(*obj2, 2);
assert!(pool.get().await.is_err());
let obj2 = pool.get().await.unwrap();
assert_eq!(*obj2, 4);
}
#[tokio::test]
async fn pre_recycle_ok() {
let manager = Computer::new(42);
let pool = Pool::<Computer>::builder(manager)
.max_size(1)
.pre_recycle(Hook::sync_fn(|obj, _| {
*obj += 1;
Ok(())
}))
.build()
.unwrap();
assert!(*pool.get().await.unwrap() == 42);
assert!(*pool.get().await.unwrap() == 43);
assert!(*pool.get().await.unwrap() == 44);
assert!(*pool.get().await.unwrap() == 45);
}
#[tokio::test]
async fn pre_recycle_err_continue() {
let manager = Computer::new(0);
let pool = Pool::<Computer>::builder(manager)
.max_size(1)
.pre_recycle(Hook::sync_fn(|_, metrics| {
if metrics.recycle_count > 0 {
Err(HookError::message("Fail!"))
} else {
Ok(())
}
}))
.build()
.unwrap();
assert_eq!(*pool.get().await.unwrap(), 0);
assert_eq!(pool.status().available, 1);
assert_eq!(pool.status().size, 1);
assert_eq!(*pool.get().await.unwrap(), 0);
assert_eq!(pool.status().available, 1);
assert_eq!(pool.status().size, 1);
assert_eq!(*pool.get().await.unwrap(), 1);
assert_eq!(pool.status().available, 1);
assert_eq!(pool.status().size, 1);
assert_eq!(*pool.get().await.unwrap(), 1);
assert_eq!(pool.status().available, 1);
assert_eq!(pool.status().size, 1);
assert_eq!(*pool.get().await.unwrap(), 2);
assert_eq!(pool.status().available, 1);
assert_eq!(pool.status().size, 1);
assert_eq!(*pool.get().await.unwrap(), 2);
assert_eq!(pool.status().available, 1);
assert_eq!(pool.status().size, 1);
}
#[tokio::test]
async fn post_recycle_ok() {
let manager = Computer::new(42);
let pool = Pool::<Computer>::builder(manager)
.max_size(1)
.post_recycle(Hook::sync_fn(|obj, _| {
*obj += 1;
Ok(())
}))
.build()
.unwrap();
assert!(*pool.get().await.unwrap() == 42);
assert!(*pool.get().await.unwrap() == 43);
assert!(*pool.get().await.unwrap() == 44);
assert!(*pool.get().await.unwrap() == 45);
}
#[tokio::test]
async fn post_recycle_err_continue() {
let manager = Computer::new(0);
let pool = Pool::<Computer>::builder(manager)
.max_size(1)
.post_recycle(Hook::sync_fn(|_, metrics| {
if metrics.recycle_count > 0 {
Err(HookError::message("Fail!"))
} else {
Ok(())
}
}))
.build()
.unwrap();
assert_eq!(*pool.get().await.unwrap(), 0);
assert_eq!(pool.status().available, 1);
assert_eq!(pool.status().size, 1);
assert_eq!(*pool.get().await.unwrap(), 0);
assert_eq!(pool.status().available, 1);
assert_eq!(pool.status().size, 1);
assert_eq!(*pool.get().await.unwrap(), 1);
assert_eq!(pool.status().available, 1);
assert_eq!(pool.status().size, 1);
assert_eq!(*pool.get().await.unwrap(), 1);
assert_eq!(pool.status().available, 1);
assert_eq!(pool.status().size, 1);
assert_eq!(*pool.get().await.unwrap(), 2);
assert_eq!(pool.status().available, 1);
assert_eq!(pool.status().size, 1);
assert_eq!(*pool.get().await.unwrap(), 2);
assert_eq!(pool.status().available, 1);
assert_eq!(pool.status().size, 1);
}

148
vendor/deadpool/tests/managed_resize.rs vendored Normal file
View File

@@ -0,0 +1,148 @@
#![cfg(feature = "managed")]
use std::convert::Infallible;
use deadpool::managed::{self, Metrics, Object, RecycleResult};
type Pool = managed::Pool<Manager, Object<Manager>>;
struct Manager {}
impl managed::Manager for Manager {
type Type = ();
type Error = Infallible;
async fn create(&self) -> Result<(), Infallible> {
Ok(())
}
async fn recycle(&self, _conn: &mut (), _: &Metrics) -> RecycleResult<Infallible> {
Ok(())
}
}
// Regression test for https://github.com/bikeshedder/deadpool/issues/380
#[tokio::test]
async fn test_grow_reuse_existing() {
// Shrink doesn't discard objects currently borrowed from the pool but
// keeps track of them so that repeatedly growing and shrinking will
// not cause excessive object creation. This logic used to contain a bug
// causing an overflow.
let mgr = Manager {};
let pool = Pool::builder(mgr).max_size(2).build().unwrap();
let obj1 = pool.get().await.unwrap();
let obj2 = pool.get().await.unwrap();
assert!(pool.status().size == 2);
assert!(pool.status().max_size == 2);
pool.resize(0);
// At this point the two objects are still tracked
assert!(pool.status().size == 2);
assert!(pool.status().max_size == 0);
pool.resize(1);
// Only one of the objects should be returned to the pool
assert!(pool.status().size == 2);
assert!(pool.status().max_size == 1);
drop(obj1);
// The first drop brings the size to 1.
assert!(pool.status().size == 1);
assert!(pool.status().max_size == 1);
drop(obj2);
assert!(pool.status().size == 1);
assert!(pool.status().max_size == 1);
}
#[tokio::test]
async fn resize_pool_shrink() {
let mgr = Manager {};
let pool = Pool::builder(mgr).max_size(2).build().unwrap();
let obj0 = pool.get().await.unwrap();
let obj1 = pool.get().await.unwrap();
pool.resize(1);
assert_eq!(pool.status().max_size, 1);
assert_eq!(pool.status().size, 2);
drop(obj1);
assert_eq!(pool.status().max_size, 1);
assert_eq!(pool.status().size, 1);
drop(obj0);
assert_eq!(pool.status().max_size, 1);
assert_eq!(pool.status().size, 1);
}
#[tokio::test]
async fn resize_pool_grow() {
let mgr = Manager {};
let pool = Pool::builder(mgr).max_size(1).build().unwrap();
let obj0 = pool.get().await.unwrap();
pool.resize(2);
assert_eq!(pool.status().max_size, 2);
assert_eq!(pool.status().size, 1);
let obj1 = pool.get().await.unwrap();
assert_eq!(pool.status().max_size, 2);
assert_eq!(pool.status().size, 2);
drop(obj1);
assert_eq!(pool.status().max_size, 2);
assert_eq!(pool.status().size, 2);
drop(obj0);
assert_eq!(pool.status().max_size, 2);
assert_eq!(pool.status().size, 2);
}
#[tokio::test]
async fn resize_pool_shrink_grow() {
let mgr = Manager {};
let pool = Pool::builder(mgr).max_size(1).build().unwrap();
let obj0 = pool.get().await.unwrap();
pool.resize(2);
pool.resize(0);
pool.resize(5);
assert_eq!(pool.status().max_size, 5);
assert_eq!(pool.status().size, 1);
drop(obj0);
assert_eq!(pool.status().max_size, 5);
assert_eq!(pool.status().size, 1);
}
#[tokio::test]
async fn resize_pool_grow_concurrent() {
let mgr = Manager {};
let pool = Pool::builder(mgr).max_size(0).build().unwrap();
let join_handle = {
let pool = pool.clone();
tokio::spawn(async move { pool.get().await })
};
tokio::task::yield_now().await;
assert_eq!(pool.status().max_size, 0);
assert_eq!(pool.status().size, 0);
assert_eq!(pool.status().available, 0);
assert_eq!(pool.status().waiting, 1);
pool.resize(1);
assert_eq!(pool.status().max_size, 1);
assert_eq!(pool.status().size, 0);
assert_eq!(pool.status().available, 0);
assert_eq!(pool.status().waiting, 1);
tokio::task::yield_now().await;
assert_eq!(pool.status().max_size, 1);
assert_eq!(pool.status().size, 1);
assert_eq!(pool.status().available, 0);
assert_eq!(pool.status().waiting, 0);
let obj0 = join_handle.await.unwrap().unwrap();
assert_eq!(pool.status().max_size, 1);
assert_eq!(pool.status().size, 1);
assert_eq!(pool.status().available, 0);
assert_eq!(pool.status().waiting, 0);
drop(obj0);
assert_eq!(pool.status().max_size, 1);
assert_eq!(pool.status().size, 1);
assert_eq!(pool.status().available, 1);
assert_eq!(pool.status().waiting, 0);
}
#[tokio::test]
async fn close_resize() {
let mgr = Manager {};
let pool = Pool::builder(mgr).max_size(1).build().unwrap();
pool.close();
pool.resize(16);
assert_eq!(pool.status().size, 0);
assert_eq!(pool.status().max_size, 0);
}

View File

@@ -0,0 +1,72 @@
#![cfg(all(
feature = "managed",
any(feature = "rt_tokio_1", feature = "rt_async-std_1")
))]
use std::{convert::Infallible, future::Future, pin::Pin, task, time::Duration};
use deadpool::{
managed::{self, Metrics, Object, PoolConfig, PoolError, RecycleResult, Timeouts},
Runtime,
};
type Pool = managed::Pool<Manager, Object<Manager>>;
struct Manager {}
struct Never;
impl Future for Never {
type Output = ();
fn poll(self: Pin<&mut Self>, _cx: &mut task::Context<'_>) -> task::Poll<Self::Output> {
task::Poll::Pending
}
}
impl managed::Manager for Manager {
type Type = usize;
type Error = Infallible;
async fn create(&self) -> Result<usize, Infallible> {
Never.await;
unreachable!();
}
async fn recycle(&self, _conn: &mut usize, _: &Metrics) -> RecycleResult<Infallible> {
Never.await;
unreachable!();
}
}
async fn test_managed_timeout(runtime: Runtime) {
let mgr = Manager {};
let cfg = PoolConfig {
max_size: 16,
timeouts: Timeouts {
create: Some(Duration::from_millis(0)),
wait: Some(Duration::from_millis(0)),
recycle: Some(Duration::from_millis(0)),
},
..Default::default()
};
let pool = Pool::builder(mgr)
.config(cfg)
.runtime(runtime)
.build()
.unwrap();
assert!(matches!(pool.get().await, Err(PoolError::Timeout(_))));
}
#[cfg(feature = "rt_tokio_1")]
#[tokio::test]
async fn rt_tokio_1() {
test_managed_timeout(Runtime::Tokio1).await;
}
#[cfg(feature = "rt_async-std_1")]
#[async_std::test]
async fn rt_async_std_1() {
test_managed_timeout(Runtime::AsyncStd1).await;
}

View File

@@ -0,0 +1,100 @@
#![cfg(feature = "managed")]
use std::{
sync::atomic::{AtomicUsize, Ordering},
time::Duration,
};
use tokio::time;
use deadpool::managed::{self, Metrics, RecycleError, RecycleResult};
type Pool = managed::Pool<Manager>;
struct Manager {
create_fail: bool,
recycle_fail: bool,
detached: AtomicUsize,
}
impl managed::Manager for Manager {
type Type = ();
type Error = ();
async fn create(&self) -> Result<(), ()> {
if self.create_fail {
Err(())
} else {
Ok(())
}
}
async fn recycle(&self, _conn: &mut (), _: &Metrics) -> RecycleResult<()> {
if self.recycle_fail {
Err(RecycleError::Backend(()))
} else {
Ok(())
}
}
fn detach(&self, _obj: &mut Self::Type) {
self.detached.fetch_add(1, Ordering::Relaxed);
}
}
#[tokio::test]
async fn create() {
let manager = Manager {
create_fail: true,
recycle_fail: false,
detached: AtomicUsize::new(0),
};
let pool = Pool::builder(manager).max_size(16).build().unwrap();
{
assert!(pool.get().await.is_err());
}
let status = pool.status();
assert_eq!(status.available, 0);
assert_eq!(status.size, 0);
{
assert!(time::timeout(Duration::from_millis(10), pool.get())
.await
.unwrap()
.is_err());
}
assert_eq!(status.available, 0);
assert_eq!(status.size, 0);
}
#[tokio::test]
async fn recycle() {
let manager = Manager {
create_fail: false,
recycle_fail: true,
detached: AtomicUsize::new(0),
};
let pool = Pool::builder(manager).max_size(16).build().unwrap();
{
let _a = pool.get().await.unwrap();
let _b = pool.get().await.unwrap();
}
let status = pool.status();
assert_eq!(status.available, 2);
assert_eq!(status.size, 2);
assert_eq!(pool.manager().detached.load(Ordering::Relaxed), 0);
{
let _a = pool.get().await.unwrap();
// All connections fail to recycle. Thus reducing the
// available counter to 0.
let status = pool.status();
assert_eq!(status.available, 0);
assert_eq!(status.size, 1);
assert_eq!(pool.manager().detached.load(Ordering::Relaxed), 2);
}
let status = pool.status();
assert_eq!(status.available, 1);
assert_eq!(status.size, 1);
}

179
vendor/deadpool/tests/unmanaged.rs vendored Normal file
View File

@@ -0,0 +1,179 @@
#![cfg(feature = "unmanaged")]
use std::time::Duration;
use tokio::{task, time};
use deadpool::unmanaged::{Pool, PoolError};
#[tokio::test]
async fn basic() {
let pool = Pool::from(vec![(), (), ()]);
let status = pool.status();
assert_eq!(status.size, 3);
assert_eq!(status.available, 3);
let _val0 = pool.get().await;
let status = pool.status();
assert_eq!(status.size, 3);
assert_eq!(status.available, 2);
let _val1 = pool.get().await;
let status = pool.status();
assert_eq!(status.size, 3);
assert_eq!(status.available, 1);
let _val2 = pool.get().await;
let status = pool.status();
assert_eq!(status.size, 3);
assert_eq!(status.available, 0);
}
#[tokio::test]
async fn closing() {
let pool = Pool::<i64>::new(1);
pool.try_add(42).unwrap();
let obj = pool.get().await.unwrap();
let join_handle = {
let pool = pool.clone();
tokio::spawn(async move { pool.get().await })
};
assert!(!pool.is_closed());
assert_eq!(pool.status().available, 0);
task::yield_now().await;
pool.close();
assert!(pool.is_closed());
task::yield_now().await;
assert_eq!(pool.status().available, 0);
assert!(matches!(join_handle.await.unwrap(), Err(PoolError::Closed)));
assert!(matches!(pool.get().await, Err(PoolError::Closed)));
assert!(matches!(pool.try_get(), Err(PoolError::Closed)));
drop(obj);
assert!(pool.is_closed());
assert!(matches!(pool.try_get(), Err(PoolError::Closed)));
assert_eq!(pool.status().available, 0);
}
#[tokio::test(flavor = "multi_thread")]
async fn concurrent() {
let pool = Pool::from(vec![0usize, 0, 0]);
// Spawn tasks
let futures = (0..100)
.map(|_| {
let pool = pool.clone();
tokio::spawn(async move {
*pool.get().await.unwrap() += 1;
})
})
.collect::<Vec<_>>();
// Await tasks to finish
for future in futures {
future.await.unwrap();
}
// Verify
let status = pool.status();
assert_eq!(status.size, 3);
assert_eq!(status.available, 3);
let values = [pool.get().await, pool.get().await, pool.get().await];
assert_eq!(
values
.iter()
.map(|obj| **obj.as_ref().unwrap())
.sum::<usize>(),
100,
);
}
#[tokio::test(flavor = "multi_thread")]
async fn test_unmanaged_add_remove() {
let pool = Pool::new(2);
pool.add(1).await.unwrap();
assert_eq!(pool.status().size, 1);
pool.add(2).await.unwrap();
assert_eq!(pool.status().size, 2);
assert!(
time::timeout(Duration::from_millis(10), pool.add(3))
.await
.is_err(),
"adding a third item should timeout"
);
pool.remove().await.unwrap();
assert_eq!(pool.status().size, 1);
assert!(
time::timeout(Duration::from_millis(10), pool.add(3))
.await
.is_ok(),
"adding a third item should not timeout"
);
pool.remove().await.unwrap();
assert_eq!(pool.status().size, 1);
pool.remove().await.unwrap();
assert_eq!(pool.status().size, 0);
}
#[tokio::test(flavor = "multi_thread")]
async fn try_add_try_remove() {
let pool = Pool::new(2);
pool.try_add(1).unwrap();
assert_eq!(pool.status().size, 1);
pool.try_add(2).unwrap();
assert_eq!(pool.status().size, 2);
assert!(pool.try_add(3).is_err());
pool.try_remove().unwrap();
assert_eq!(pool.status().size, 1);
assert!(pool.try_add(3).is_ok());
pool.try_remove().unwrap();
assert_eq!(pool.status().size, 1);
pool.try_remove().unwrap();
assert_eq!(pool.status().size, 0);
}
#[tokio::test(flavor = "multi_thread")]
async fn add_timeout() {
let pool = Pool::from(vec![1]);
let add = {
let pool = pool.clone();
tokio::spawn(async move {
pool.add(2).await.unwrap();
})
};
let mut iv = time::interval(Duration::from_millis(10));
iv.tick().await;
iv.tick().await;
pool.try_remove().unwrap();
assert!(
time::timeout(Duration::from_millis(10), add).await.is_ok(),
"add should not timeout"
);
assert_eq!(pool.status().size, 1);
assert_eq!(pool.try_remove().unwrap(), 2);
}

View File

@@ -0,0 +1,60 @@
#![cfg(feature = "unmanaged")]
use std::time::Duration;
use deadpool::{
unmanaged::{self, PoolConfig, PoolError},
Runtime,
};
type Pool = unmanaged::Pool<()>;
#[tokio::test]
async fn no_runtime() {
let pool = Pool::default();
assert!(matches!(
pool.timeout_get(Some(Duration::from_millis(1))).await,
Err(PoolError::NoRuntimeSpecified)
));
assert!(matches!(
pool.timeout_get(Some(Duration::from_millis(0))).await,
Err(PoolError::Timeout)
));
}
async fn _test_get(runtime: Runtime) {
let cfg = PoolConfig {
max_size: 16,
timeout: None,
runtime: Some(runtime),
};
let pool = Pool::from_config(&cfg);
assert!(matches!(
pool.timeout_get(Some(Duration::from_millis(1))).await,
Err(PoolError::Timeout),
));
}
async fn _test_config(runtime: Runtime) {
let cfg = PoolConfig {
max_size: 16,
timeout: Some(Duration::from_millis(1)),
runtime: Some(runtime),
};
let pool = Pool::from_config(&cfg);
assert!(matches!(pool.get().await, Err(PoolError::Timeout)));
}
#[cfg(feature = "rt_tokio_1")]
#[tokio::test]
async fn rt_tokio_1() {
_test_get(Runtime::Tokio1).await;
_test_config(Runtime::Tokio1).await;
}
#[cfg(feature = "rt_async-std_1")]
#[async_std::test]
async fn rt_async_std_1() {
_test_get(Runtime::AsyncStd1).await;
_test_config(Runtime::AsyncStd1).await;
}