chore: checkpoint before Python removal

This commit is contained in:
2026-03-26 22:33:59 +00:00
parent 683cec9307
commit e568ddf82a
29972 changed files with 11269302 additions and 2 deletions

1
vendor/deadpool/.cargo-checksum.json vendored Normal file
View File

@@ -0,0 +1 @@
{"files":{".cargo_vcs_info.json":"2cec8a063766b0862acf3a7b5a1b02596a8ecd7f489954314d2d2b19db59ffac","CHANGELOG.md":"c42c419af5a824ba242df5a69ebb6633d910a9b0df1e25df64321f528e6b111c","Cargo.lock":"b29351fc0570d72d20eca52a32fa5d78828ce2fbb0f3b63615291e6cc51ca8d2","Cargo.toml":"e4488e92bcc516f138eec63acc6fbe0d1e575021adcc03aeeb0c451a7976221f","Cargo.toml.orig":"259aa0c9c292cd1b1fe3cfe444f7519183beb1a50091115db53aae37491b903c","LICENSE-APACHE":"23fac4edada8cdd75ecd793864bfb3a60ef59da853425ed3e2b3d030dc730aa6","LICENSE-MIT":"8982e37af80786707d8b8c33fb4cfe98c8a17d9d34caaa2b86895f0e84eafcca","README.md":"d1d0fac4ce3acbf85af4953d4f79fee3db4cf64f40e0abc164e9fc1d3e75be51","benches/managed.rs":"fc5ee7bff8faca8a5f9f841e9fe5613ca4c35946e4e86441232952c71c55f791","benches/unmanaged.rs":"c62ed3cfa0b53b4a4f716c5f818bdfd8bb02ac860b118852b638078502fa1f13","ci.config.yml":"949e15a17c4d86870d1379091b9d01d7d64bfa9c54d3b7c43a6f83fb3a68ce9b","src/lib.rs":"df1bf82e2a4b703f780acbafae0820d359c9445b56932759ef9256f378dfa0c7","src/managed/builder.rs":"051db0a08b555457dffa9a2b8b8083be15b4fe1c457705dac8396430650c96f0","src/managed/config.rs":"530bcecf905dde6bbd20e2bb79008b115583be2e2da12c818daa19875fe5888e","src/managed/dropguard.rs":"3c11712e7fda28702957502e8978f6d6caf73acefeca120e862d529a2a94933b","src/managed/errors.rs":"464c7460044ca651465824f4f7441c17c78a02870a50fe9b80e1aeb32d1e0640","src/managed/hooks.rs":"75165fdd0c92e04e1ab3a6d00f9a36f31640e4be0ed8f6625beb511829997c2f","src/managed/manager.rs":"fd653119b2bc69619c9140e219c80c19ba8c44daf880f560eb0ba2701bf90672","src/managed/metrics.rs":"970fd31134b185aeb9fb5eca405de4044bcebb0734f3e0d27de4ac39041d3205","src/managed/mod.rs":"98795220b3ea3a1aae6b4ebe70c840eb866b9b8d70d1f78e492edb9d7231b394","src/managed/object.rs":"304e1084abf43c4f0a5d65209541c4a2811076d1ed6ed3597f9b325ca5519603","src/managed/pool.rs":"a22e17c8ce5d6120fbbec25701f7419c713c41a9155387b3a7042eb38f127194","src/managed/reexports.rs":"b9ab94f7f8780ed7a3392d96d0f584524769fac5809013699bcfff25fa5db740","src/unmanaged/config.rs":"e7f1b6b74d74a225a7072382339298b5449d0e310a412f67c31e1425904a32f2","src/unmanaged/errors.rs":"c3c82becfc5e7cea332f3eb7a1aefd15ee04a42b7ac9c8789b7836516d995f3e","src/unmanaged/mod.rs":"1605715f32ef7b4554ee648d9af719f28bc16a0e97f80241faa148444bd4e78b","src/util.rs":"dede24a0bfc839fbb41bdfd8901c4390da5228eb408f37ef14d4959a8cf03718","tests/managed.rs":"5350ff39588e348f319720b4346458ce6c88615ce637b11007a9bd94843b8edd","tests/managed_cancellation.rs":"a610540621dafa9829c1bcee1e69a2d94647184e7509b8e4032d6dc371d1bba4","tests/managed_config.rs":"7e2a5ab85805f02713c52b9bcb6a751d76f7cec921f3f8b7cfe91dbbc4f18678","tests/managed_deadlock.rs":"d2b8033e2dbdecdf71c70a8a30b0c65a5d753b2fb4a5bf513e4433f88133b1bb","tests/managed_hooks.rs":"aa133641de044d8a5b89e908b117f1ffab42fb5af74d7ba770648f4bf499b1fb","tests/managed_resize.rs":"2d9dbdf45b42ce3a7cdd27822334309ec573cf012ba1ce1c22df678b341fdec7","tests/managed_timeout.rs":"724ebd5b2489d93d606e700742d0a836a6f6ff5c4fd84db3b714fb0d3de756e6","tests/managed_unreliable_manager.rs":"1002d74cd4793d6ac10e5ec6f2a2de1c940dfee007acd4c0cac9ebf6ebcc434c","tests/unmanaged.rs":"0e9bf25216379b0bd8bb1dc3afe78af556bb1c585d0c17cd492a2c2e28869e3b","tests/unmanaged_timeout.rs":"e69433f037e03271fc80eb8489fc0980d149686920bc1abbd88441c931e73914"},"package":"0be2b1d1d6ec8d846f05e137292d0b89133caf95ef33695424c09568bdd39b1b"}

6
vendor/deadpool/.cargo_vcs_info.json vendored Normal file
View File

@@ -0,0 +1,6 @@
{
"git": {
"sha1": "c24a947bbc0b780b5cfedfab8e4849934ecbb212"
},
"path_in_vcs": "crates/deadpool"
}

244
vendor/deadpool/CHANGELOG.md vendored Normal file
View File

@@ -0,0 +1,244 @@
# Changelog
All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
<!-- next-header -->
## [0.12.3] - 2025-08-19
- Add unique `id` to objects which can be read via the `Object::id` method
- Add `WeakPool<T>` and `Pool::weak()` for non-owning, upgradeable pool references.
- Use `num_cpus::get() * 2` as default pool size instead of `num_cpu::get_physical() * 4`
which should result in the same pool size for most systems with hyper threading but fixes
the default pool size in containerized environments.
- Cache `num_cpus::get()` result to improve `Pool` creation performance.
## [0.12.2] - 2025-02-02
- Update `itertools` dependency to version `0.13.0`
- Change predicate parameter of `Pool::retain` method to `FnMut`
- Add `RetainResult` as return value of `Pool::retain` method
- Fix panic in `Pool::resize` method caused by shrinking and
growing the pool in quick succession.
## [0.12.1] - 2024-05-07
- Add WASM support
## [0.12.0] - 2024-05-04
- Add `Send` to `Manager::Type` and `Manager::Error` associated types
- Add `Send` to `Manager::create` and `Manager::recycle` return types
## [0.11.2] - 2024-04-10
- Make `Timeouts::new` and `Timeouts::wait_millis` functions const fns
## [0.11.1] - 2024-04-06
- Remove unused `console` dependency
## [0.11.0] - 2024-04-01
- Remove `async_trait` dependency
- Bump up MSRV to `1.75`
## [0.10.0] - 2023-09-25
- Remove unreachable enum variant `BuildError::Backend`
- Split `Status.available` into `available` and `waiting`.
- Add `QueueMode` configuration option for choosing between
a `FIFO` (default) and `LIFO` queue.
- Remove `HookError::Continue` and `HookError::Abort` variants
replacing it with the contents of `HookErrorCause`. Returning
a `HookError` from a `post_create` hook causes the `Pool::get`
operation to fail while returning it from a `pre_recycle` or
`post_recycle` hook the operation continues.
- Add `metrics` argument to `Manager::recycle` method.
- Remove deprecated `managed::sync` module.
- Remove deprecated `managed::Pool::try_get` method.
- Bump up MSRV to `1.63` to match the one of `tokio`.
## [0.9.5] - 2022-05-20
- Fix bug causing the pool to exceed its `max_size` in the
case of a recycling error.
- Fix panic caused by an integer overflow in the case of
a failing `post_create` hook.
## [0.9.4] - 2022-04-27
- Fix `HookError` and `HookErrorCause` in re-exports
## [0.9.3] - 2022-04-12
- Add `Pool::retain` method
- Fix `Pool::get_timeouts` method
- Deprecate `managed::Pool::try_get`
- Add `Pool::timeouts` method
## [0.9.2] - 2021-11-15
- `PoolConfig` now implements `Serialize`
## [0.9.1] - 2021-10-26
- Deprecate `managed::sync` module in favor of `deadpool-sync` crate
- Extract `runtime` module as separate `deadpool-runtime` crate
## [0.9.0] - 2021-10-18
- __Breaking:__ Replace `config` feature with `serde` (opted out by default)
- Fix `std::error::Error::source` implementations for library errors
- Add `Runtime::spawn_blocking` method
- Add `Runtime::spawn_blocking_background` method
- Remove `Runtime::None` in favor of `Option<Runtime>`
- Remove `Pool::new` method
- Add `Pool::builder` method and `PoolBuilder` struct
- Add `Object::metrics` method and `Metrics` struct
- Update `tokio` dependency to version `1.5.0`
- Add `post_create`, `pre_recycle` and `post_recycle` hooks
- Add `Pool::resize` method
- Add `managed_reexports` macro
## [0.8.2] - 2021-07-16
- Add `deadpool-diesel` to README
- Add `Sync + Send` as supertrait to `Manager`
- Fix usage of `PhantomData` in `Pool` struct: `Pool is now `Sync` regardless of the wrapper.
## [0.8.1] - 2021-07-04
- Add `Object::pool` method
## [0.8.0] - 2021-05-21
- Add support for closing pools
- Replace `crossbeam-queue` by `Mutex<VecDeque<_>>`
- Fix invalid `size` and `available` counts when recycling fails
- Update `config` dependency to version `0.11`
- Remove deprecated `from_env` methods
- Add support for wrappers returned by the pool
- Use associated types for traits
## [0.7.0] - 2020-12-26
- Update `tokio` dependency to version `1`
## [0.6.0] - 2020-11-04
- Update `tokio` dependency to version `0.3`
- Update `crossbeam-queue` dependency to version `0.3`
- Remove deprecated `deadpool::*` types
- Add `deadpool-memcached` to README
## [0.5.2] - 2020-07-14
- Deprecate `managed::Config::from_env`
- Deprecate `unmanaged::Config::from_env`
## [0.5.1] - 2020-01-18
- Add `managed::Object::take` method
## [0.5.0] - 2020-01-16
- Move current pool implementation into `managed` module
- Add unmanaged version of the `Pool` which does not use a `Manager`
to create and recycle objects.
- Add feature flags `"managed"` and `"unmanaged"` to enable only parts
of this crate.
- Add `max_size` to pool `Status`
- Add support for `config` crate
## [0.4.3] - 2019-12-23
- Add `std::error::Error` implementation for `PoolError` and `RecycleError`.
This makes it more convenient to use the `?` operator.
## [0.4.2] - 2019-12-23
- Replace `tokio::sync::mpsc::channel` by `crossbeam_queue::ArrayQueue`
which gets rid of the mutex when fetching an object from the pool.
## [0.4.1] - 2019-12-19
- Make `Pool::timeout_get` public
## [0.4.0] - 2019-12-19
- Add support for timeouts
- Make fields of pool status public
- Fix possible deadlock and make implementation a lot simpler by using
the new tokio `Semaphore` and `Receiver::try_recv`.
- Add `Pool::try_get` and `Pool::timeout_get` functions
## [0.3.0] - 2019-12-13
- Add `deadpool-lapin` to README
- Add `deadpool-redis` to README
- Fix possible stale state and deadlock if a future calling `Pool::get` is
aborted. This is related to <https://github.com/tokio-rs/tokio/issues/1898>
- Make recycling more robust by changing the `Manager::recycle` to a non
consuming API.
## [0.2.3] - 2019-12-02
- Add documentation for `docs.rs`
- Remove `PoolInner` and `PoolSize` struct from public interface
- Improve example in `README.md` and crate root
## [0.2.2] - 2019-12-02
- Update to `tokio 0.2`
## 0.2.1
- Version skipped; only `tokio-postgres` was updated.
## [0.2.0] - 2019-11-14
- Split `deadpool` and `deadpool-postgres` in separate crates instead of
one with feature flags.
## [0.1.0] - 2019-11-14
- First release
<!-- next-url -->
[Unreleased]: https://github.com/bikeshedder/deadpool/compare/deadpool-v0.12.3...HEAD
[0.12.3]: https://github.com/bikeshedder/deadpool/compare/deadpool-v0.12.2...deadpool-v0.12.3
[0.12.2]: https://github.com/bikeshedder/deadpool/compare/deadpool-v0.12.1...deadpool-v0.12.2
[0.12.1]: https://github.com/bikeshedder/deadpool/compare/deadpool-v0.12.0...deadpool-v0.12.1
[0.12.0]: https://github.com/bikeshedder/deadpool/compare/deadpool-v0.11.2...deadpool-v0.12.0
[0.11.2]: https://github.com/bikeshedder/deadpool/compare/deadpool-v0.11.1...deadpool-v0.11.2
[0.11.1]: https://github.com/bikeshedder/deadpool/compare/deadpool-v0.11.0...deadpool-v0.11.1
[0.11.0]: https://github.com/bikeshedder/deadpool/compare/deadpool-v0.10.0...deadpool-v0.11.0
[0.10.0]: https://github.com/bikeshedder/deadpool/compare/deadpool-v0.9.5...deadpool-v0.10.0
[0.9.5]: https://github.com/bikeshedder/deadpool/compare/deadpool-v0.9.4...deadpool-v0.9.5
[0.9.4]: https://github.com/bikeshedder/deadpool/compare/deadpool-v0.9.3...deadpool-v0.9.4
[0.9.3]: https://github.com/bikeshedder/deadpool/compare/deadpool-v0.9.2...deadpool-v0.9.3
[0.9.2]: https://github.com/bikeshedder/deadpool/compare/deadpool-v0.9.1...deadpool-v0.9.2
[0.9.1]: https://github.com/bikeshedder/deadpool/compare/deadpool-v0.9.0...deadpool-v0.9.1
[0.9.0]: https://github.com/bikeshedder/deadpool/compare/deadpool-v0.8.2...deadpool-v0.9.0
[0.8.2]: https://github.com/bikeshedder/deadpool/compare/deadpool-v0.8.1...deadpool-v0.8.2
[0.8.1]: https://github.com/bikeshedder/deadpool/compare/deadpool-v0.8.0...deadpool-v0.8.1
[0.8.0]: https://github.com/bikeshedder/deadpool/compare/deadpool-v0.7.0...deadpool-v0.8.0
[0.7.0]: https://github.com/bikeshedder/deadpool/compare/deadpool-v0.6.0...deadpool-v0.7.0
[0.6.0]: https://github.com/bikeshedder/deadpool/compare/deadpool-v0.5.2...deadpool-v0.6.0
[0.5.2]: https://github.com/bikeshedder/deadpool/compare/deadpool-v0.5.1...deadpool-v0.5.2
[0.5.1]: https://github.com/bikeshedder/deadpool/compare/deadpool-v0.5.0...deadpool-v0.5.1
[0.5.0]: https://github.com/bikeshedder/deadpool/compare/deadpool-v0.4.4...deadpool-v0.5.0
[0.4.4]: https://github.com/bikeshedder/deadpool/compare/deadpool-v0.4.3...deadpool-v0.4.4
[0.4.3]: https://github.com/bikeshedder/deadpool/compare/deadpool-v0.4.2...deadpool-v0.4.3
[0.4.2]: https://github.com/bikeshedder/deadpool/compare/deadpool-v0.4.1...deadpool-v0.4.2
[0.4.1]: https://github.com/bikeshedder/deadpool/compare/deadpool-v0.4.0...deadpool-v0.4.1
[0.4.0]: https://github.com/bikeshedder/deadpool/compare/deadpool-v0.3.0...deadpool-v0.4.0
[0.3.0]: https://github.com/bikeshedder/deadpool/compare/deadpool-v0.2.3...deadpool-v0.3.0
[0.2.3]: https://github.com/bikeshedder/deadpool/compare/deadpool-v0.2.2...deadpool-v0.2.3
[0.2.2]: https://github.com/bikeshedder/deadpool/compare/deadpool-v0.2.1...deadpool-v0.2.2
[0.2.0]: https://github.com/bikeshedder/deadpool/compare/deadpool-v0.1.0...deadpool-v0.2.0
[0.1.0]: https://github.com/bikeshedder/deadpool/releases/tag/deadpool-v0.1.0

1698
vendor/deadpool/Cargo.lock generated vendored Normal file

File diff suppressed because it is too large Load Diff

148
vendor/deadpool/Cargo.toml vendored Normal file
View File

@@ -0,0 +1,148 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
#
# When uploading crates to the registry Cargo will automatically
# "normalize" Cargo.toml files for maximal compatibility
# with all versions of Cargo and also rewrite `path` dependencies
# to registry (e.g., crates.io) dependencies.
#
# If you are reading this file be aware that the original Cargo.toml
# will likely look very different (and much more reasonable).
# See Cargo.toml.orig for the original contents.
[package]
edition = "2021"
rust-version = "1.75"
name = "deadpool"
version = "0.12.3"
authors = ["Michael P. Jung <michael.jung@terreon.de>"]
build = false
autolib = false
autobins = false
autoexamples = false
autotests = false
autobenches = false
description = "Dead simple async pool"
readme = "README.md"
keywords = [
"async",
"database",
"pool",
]
license = "MIT OR Apache-2.0"
repository = "https://github.com/bikeshedder/deadpool"
[package.metadata.docs.rs]
all-features = true
rustdoc-args = [
"--cfg",
"docsrs",
]
[features]
default = [
"managed",
"unmanaged",
]
managed = []
rt_async-std_1 = ["deadpool-runtime/async-std_1"]
rt_tokio_1 = ["deadpool-runtime/tokio_1"]
unmanaged = []
[lib]
name = "deadpool"
path = "src/lib.rs"
[[test]]
name = "managed"
path = "tests/managed.rs"
[[test]]
name = "managed_cancellation"
path = "tests/managed_cancellation.rs"
[[test]]
name = "managed_config"
path = "tests/managed_config.rs"
[[test]]
name = "managed_deadlock"
path = "tests/managed_deadlock.rs"
[[test]]
name = "managed_hooks"
path = "tests/managed_hooks.rs"
[[test]]
name = "managed_resize"
path = "tests/managed_resize.rs"
[[test]]
name = "managed_timeout"
path = "tests/managed_timeout.rs"
[[test]]
name = "managed_unreliable_manager"
path = "tests/managed_unreliable_manager.rs"
[[test]]
name = "unmanaged"
path = "tests/unmanaged.rs"
[[test]]
name = "unmanaged_timeout"
path = "tests/unmanaged_timeout.rs"
[[bench]]
name = "managed"
path = "benches/managed.rs"
harness = false
[[bench]]
name = "unmanaged"
path = "benches/unmanaged.rs"
harness = false
[dependencies.deadpool-runtime]
version = "0.1"
[dependencies.lazy_static]
version = "1.5.0"
[dependencies.num_cpus]
version = "1.11.1"
[dependencies.serde]
version = "1.0.103"
features = ["derive"]
optional = true
[dependencies.tokio]
version = "1.5"
features = ["sync"]
[dev-dependencies.async-std]
version = "1.0"
features = ["attributes"]
[dev-dependencies.config]
version = "0.15"
features = ["json"]
[dev-dependencies.criterion]
version = "0.5"
features = [
"html_reports",
"async_tokio",
]
[dev-dependencies.itertools]
version = "0.14"
[dev-dependencies.tokio]
version = "1.5.0"
features = [
"macros",
"rt",
"rt-multi-thread",
"time",
]

202
vendor/deadpool/LICENSE-APACHE vendored Normal file
View File

@@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2019 Michael P. Jung
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

21
vendor/deadpool/LICENSE-MIT vendored Normal file
View File

@@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2019 Michael P. Jung
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

204
vendor/deadpool/README.md vendored Normal file
View File

@@ -0,0 +1,204 @@
# Deadpool [![Latest Version](https://img.shields.io/crates/v/deadpool.svg)](https://crates.io/crates/deadpool) [![Build Status](https://img.shields.io/github/actions/workflow/status/bikeshedder/deadpool/ci.yml?branch=main)](https://github.com/bikeshedder/deadpool/actions?query=workflow%3ARust) ![Unsafe forbidden](https://img.shields.io/badge/unsafe-forbidden-success.svg "Unsafe forbidden") [![Rust 1.75+](https://img.shields.io/badge/rustc-1.75+-lightgray.svg "Rust 1.75+")](https://blog.rust-lang.org/2023/12/28/Rust-1.75.0.html)
Deadpool is a dead simple async pool for connections and objects
of any type.
This crate provides two implementations:
- Managed pool (`deadpool::managed::Pool`)
- Creates and recycles objects as needed
- Useful for [database connection pools](#database-connection-pools)
- Enabled via the `managed` feature in your `Cargo.toml`
- Unmanaged pool (`deadpool::unmanaged::Pool`)
- All objects either need to be created by the user and added to the
pool manually. It is also possible to create a pool from an existing
collection of objects.
- Enabled via the `unmanaged` feature in your `Cargo.toml`
## Features
| Feature | Description | Extra dependencies | Default |
| ------- | ----------- | ------------------ | ------- |
| `managed` | Enable managed pool implementation | - | yes |
| `unmanaged` | Enable unmanaged pool implementation | - | yes |
| `rt_tokio_1` | Enable support for [tokio](https://crates.io/crates/tokio) crate | `tokio/time` | no |
| `rt_async-std_1` | Enable support for [async-std](https://crates.io/crates/async-std) crate | `async-std` | no |
| `serde` | Enable support for deserializing pool config | `serde/derive` | no |
The runtime features (`rt_*`) are only needed if you need support for
timeouts. If you try to use timeouts without specifying a runtime at
pool creation the pool get methods will return an
`PoolError::NoRuntimeSpecified` error.
## Managed pool (aka. connection pool)
This is the obvious choice for connection pools of any kind. Deadpool already
comes with a couple of [database connection pools](#database-connection-pools)
which work out of the box.
### Example
```rust
use deadpool::managed;
#[derive(Debug)]
enum Error { Fail }
struct Computer {}
impl Computer {
async fn get_answer(&self) -> i32 {
42
}
}
struct Manager {}
impl managed::Manager for Manager {
type Type = Computer;
type Error = Error;
async fn create(&self) -> Result<Computer, Error> {
Ok(Computer {})
}
async fn recycle(&self, _: &mut Computer, _: &managed::Metrics) -> managed::RecycleResult<Error> {
Ok(())
}
}
type Pool = managed::Pool<Manager>;
#[tokio::main]
async fn main() {
let mgr = Manager {};
let pool = Pool::builder(mgr).build().unwrap();
let mut conn = pool.get().await.unwrap();
let answer = conn.get_answer().await;
assert_eq!(answer, 42);
}
```
### Database connection pools
Deadpool supports various database backends by implementing the
`deadpool::managed::Manager` trait. The following backends are
currently supported:
Database (Protocol) | Backend | Crate | Latest Version | Official [deadpool-rs](https://github.com/deadpool-rs/) crate¹ |
------------------- | ------- | ----- | -------------- | ------------------------------------------------------------- |
[PostgreSQL](https://www.postgresql.org/) | [tokio-postgres](https://crates.io/crates/tokio-postgres) | [deadpool-postgres](https://crates.io/crates/deadpool-postgres) | [![Latest Version](https://img.shields.io/crates/v/deadpool-postgres.svg)](https://crates.io/crates/deadpool-postgres) | ✔ |
[Valkey](https://valkey.io/), [Redis](https://redis.io/) | [redis](https://crates.io/crates/redis) | [deadpool-redis](https://crates.io/crates/deadpool-redis) | [![Latest Version](https://img.shields.io/crates/v/deadpool-redis.svg)](https://crates.io/crates/deadpool-redis) | ✔ |
[SQLite](https://sqlite.org) | [rusqlite](https://crates.io/crates/rusqlite) | [deadpool-sqlite](https://crates.io/crates/deadpool-sqlite) | [![Latest Version](https://img.shields.io/crates/v/deadpool-sqlite.svg)](https://crates.io/crates/deadpool-sqlite) | ✔ |
[RabbitMQ](https://www.rabbitmq.com/) ([AMQP](https://www.amqp.org/)) | [lapin](https://crates.io/crates/lapin) | [deadpool-lapin](https://crates.io/crates/deadpool-lapin) | [![Latest Version](https://img.shields.io/crates/v/deadpool-lapin.svg)](https://crates.io/crates/deadpool-lapin) | ✔ |
[Memcached](https://www.memcached.org/) | [async-memcached](https://crates.io/crates/async-memcached) | [deadpool-memcached](https://crates.io/crates/deadpool-memcached) | [![Latest Version](https://img.shields.io/crates/v/deadpool-memcached.svg)](https://crates.io/crates/deadpool-memcached) | ✔ |
— | [diesel](https://crates.io/crates/diesel) | [deadpool-diesel](https://crates.io/crates/deadpool-diesel) | [![Latest Version](https://img.shields.io/crates/v/deadpool-diesel.svg)](https://crates.io/crates/deadpool-diesel) | ✔ |
— | [r2d2](https://crates.io/crates/r2d2) | [deadpool-r2d2](https://crates.io/crates/deadpool-r2d2) | [![Latest Version](https://img.shields.io/crates/v/deadpool-r2d2.svg)](https://crates.io/crates/deadpool-r2d2) | ✔ |
[libSQL](https://docs.turso.tech/libsql) | [libsql](https://crates.io/crates/libsql) | [deadpool-libsql](https://crates.io/crates/deadpool-libsql) | [![Latest Version](https://img.shields.io/crates/v/deadpool-libsql.svg)](https://crates.io/crates/deadpool-libsql) | ✔ |
[Microsoft SQL Server](https://www.microsoft.com/sql-server/) | [tiberius](https://crates.io/crates/tiberius) | [deadpool-tiberius](https://crates.io/crates/deadpool-tiberius) | [![Latest Version](https://img.shields.io/crates/v/deadpool-tiberius.svg)](https://crates.io/crates/deadpool-tiberius) | |
[neo4j](https://neo4j.com/) ([Bolt](https://neo4j.com/docs/bolt/)) | [bolt-client](https://crates.io/crates/bolt-client) | [deadpool-bolt](https://crates.io/crates/deadpool-bolt) | [![Latest Version](https://img.shields.io/crates/v/deadpool-bolt.svg)](https://crates.io/crates/deadpool-bolt) | |
[rbatis](https://rbatis.github.io/rbatis.io/) | [rbatis](https://crates.io/crates/rbatis) | [rbatis](https://crates.io/crates/rbatis) | [![Latest Version](https://img.shields.io/crates/v/rbatis.svg)](https://crates.io/crates/rbatis) | |
[LDAP v3](https://www.rfc-editor.org/rfc/rfc4511.txt) | [ldap3](https://crates.io/crates/ldap3) | [deadpool-ldap3](https://crates.io/crates/deadpool-ldap3) | [![Latest Version](https://img.shields.io/crates/v/deadpool-ldap3.svg)](https://crates.io/crates/deadpool-ldap3) | |
[ClickHouse](https://clickhouse.com/) | [clickhouse](https://crates.io/crates/clickhouse) | [clickhouse-connection-pool](https://crates.io/crates/clickhouse-connection-pool) | [![Latest Version](https://img.shields.io/crates/v/clickhouse-connection-pool.svg)](https://crates.io/crates/clickhouse-connection-pool) | |
¹ "Official deadpool-rs crates" marks crates maintained by the [deadpool-rs](https://github.com/deadpool-rs/) project. This shows ownership only, not quality or support level. Third-party crates are welcome to join the deadpool-rs umbrella for centralized maintenance and collaboration.
### Reasons for yet another connection pool
Deadpool is by no means the only pool implementation available. It does
things a little different and that is the main reason for it to exist:
- **Deadpool is compatible with any executor.** Objects are returned to the
pool using the `Drop` trait. The health of those objects is checked upon
next retrieval and not when they are returned. Deadpool never performs any
actions in the background. This is the reason why deadpool does not need
to spawn futures and does not rely on a background thread or task of any
type.
- **Identical startup and runtime behaviour**. When writing long running
application there usually should be no difference between startup and
runtime if a database connection is temporarily not available. Nobody
would expect an application to crash if the database becomes unavailable
at runtime. So it should not crash on startup either. Creating the pool
never fails and errors are only ever returned when calling `Pool::get()`.
If you really want your application to crash on startup if objects can
not be created on startup simply call
`pool.get().await.expect("DB connection failed")` right after creating
the pool.
- **Deadpool is fast.** Whenever working with locking primitives they are
held for the shortest duration possible. When returning an object to the
pool a single mutex is locked and when retrieving objects from the pool
a Semaphore is used to make this Mutex as little contested as possible.
- **Deadpool is simple.** Dead simple. There is very little API surface.
The actual code is barely 100 lines of code and lives in the two functions
`Pool::get` and `Object::drop`.
- **Deadpool is extensible.** By using `post_create`, `pre_recycle` and
`post_recycle` hooks you can customize object creation and recycling
to fit your needs.
- **Deadpool provides insights.** All objects track `Metrics` and the pool
provides a `status` method that can be used to find out details about
the inner workings.
- **Deadpool is resizable.** You can grow and shrink the pool at runtime
without requiring an application restart.
## Unmanaged pool
An unmanaged pool is useful when you can't write a manager for the objects
you want to pool or simply don't want to. This pool implementation is slightly
faster than the managed pool because it does not use a `Manager` trait to
`create` and `recycle` objects but leaves it up to the user.
### Unmanaged pool example
```rust
use deadpool::unmanaged::Pool;
struct Computer {}
impl Computer {
async fn get_answer(&self) -> i32 {
42
}
}
#[tokio::main]
async fn main() {
let pool = Pool::from(vec![
Computer {},
Computer {},
]);
let s = pool.get().await.unwrap();
assert_eq!(s.get_answer().await, 42);
}
```
## FAQ
### Why does deadpool depend on `tokio`? I thought it was runtime agnostic...
Deadpool depends on `tokio::sync::Semaphore`. This does **not** mean that
the tokio runtime or anything else of tokio is being used or will be part
of your build. You can easily check this by running the following command
in your own code base:
```shell
cargo tree --format "{p} {f}"
```
## License
Licensed under either of
- Apache License, Version 2.0 ([LICENSE-APACHE](LICENSE-APACHE) or <http://www.apache.org/licenses/LICENSE-2.0)>
- MIT license ([LICENSE-MIT](LICENSE-MIT) or <http://opensource.org/licenses/MIT)>
at your option.

98
vendor/deadpool/benches/managed.rs vendored Normal file
View File

@@ -0,0 +1,98 @@
use std::{convert::TryInto, fmt::Display};
use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion};
use deadpool::managed::Metrics;
use tokio::task::JoinHandle;
//const ITERATIONS: usize = 1_048_576;
const ITERATIONS: usize = 1 << 15;
#[derive(Copy, Clone, Debug)]
struct Config {
pool_size: usize,
workers: usize,
}
impl Display for Config {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "w{}s{}", self.workers, self.pool_size)
}
}
impl Config {
fn operations_per_worker(&self) -> usize {
ITERATIONS / self.workers
}
}
#[rustfmt::skip]
const CONFIGS: &[Config] = &[
// 8 workers
Config { workers: 8, pool_size: 2 },
Config { workers: 8, pool_size: 4 },
Config { workers: 8, pool_size: 8 },
// 16 workers
Config { workers: 16, pool_size: 4 },
Config { workers: 16, pool_size: 8 },
Config { workers: 16, pool_size: 16 },
// 32 workers
Config { workers: 32, pool_size: 8 },
Config { workers: 32, pool_size: 16 },
Config { workers: 32, pool_size: 32 },
];
struct Manager {}
impl deadpool::managed::Manager for Manager {
type Type = ();
type Error = ();
async fn create(&self) -> Result<Self::Type, Self::Error> {
Ok(())
}
async fn recycle(
&self,
_: &mut Self::Type,
_: &Metrics,
) -> deadpool::managed::RecycleResult<Self::Error> {
Ok(())
}
}
type Pool = deadpool::managed::Pool<Manager>;
async fn bench_get(cfg: Config) {
let pool = Pool::builder(Manager {})
.max_size(cfg.pool_size)
.build()
.unwrap();
let join_handles: Vec<JoinHandle<()>> = (0..cfg.workers)
.map(|_| {
let pool = pool.clone();
tokio::spawn(async move {
for _ in 0..cfg.operations_per_worker() {
let _ = pool.get().await;
}
})
})
.collect();
for join_handle in join_handles {
join_handle.await.unwrap();
}
}
fn criterion_benchmark(c: &mut Criterion) {
let runtime = tokio::runtime::Runtime::new().unwrap();
let mut group = c.benchmark_group("managed");
group.throughput(criterion::Throughput::Elements(
ITERATIONS.try_into().expect("Can't convert u64 to usize"),
));
for &config in CONFIGS {
group.bench_with_input(BenchmarkId::new("get", config), &config, |b, &cfg| {
b.to_async(&runtime).iter(|| bench_get(cfg))
});
}
}
criterion_group!(benches, criterion_benchmark);
criterion_main!(benches);

21
vendor/deadpool/benches/unmanaged.rs vendored Normal file
View File

@@ -0,0 +1,21 @@
use criterion::{criterion_group, criterion_main, Criterion};
use deadpool::unmanaged::Pool;
const ITERATIONS: usize = 1_000_000;
#[tokio::main]
async fn use_pool() {
let pool = Pool::new(16);
pool.add(()).await.unwrap();
for _ in 0..ITERATIONS {
let _ = pool.get().await.unwrap();
}
}
fn criterion_benchmark(c: &mut Criterion) {
c.bench_function("use_pool", |b| b.iter(use_pool));
}
criterion_group!(benches, criterion_benchmark);
criterion_main!(benches);

23
vendor/deadpool/ci.config.yml vendored Normal file
View File

@@ -0,0 +1,23 @@
jobs:
check-deadpool:
name: Check deadpool
strategy:
fail-fast: false
matrix:
feature1:
- managed
- unmanaged
feature2:
- rt_tokio_1
- rt_async-std_1
- serde
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: stable
- run: cargo check
--no-default-features
--features ${{ matrix.feature1 }},${{ matrix.feature2 }}

58
vendor/deadpool/src/lib.rs vendored Normal file
View File

@@ -0,0 +1,58 @@
#![doc = include_str!("../README.md")]
#![cfg_attr(docsrs, feature(doc_cfg))]
#![deny(
nonstandard_style,
rust_2018_idioms,
rustdoc::broken_intra_doc_links,
rustdoc::private_intra_doc_links
)]
#![forbid(non_ascii_idents, unsafe_code)]
#![warn(
deprecated_in_future,
missing_copy_implementations,
missing_debug_implementations,
missing_docs,
unreachable_pub,
unused_import_braces,
unused_labels,
unused_lifetimes,
unused_qualifications,
unused_results
)]
#![allow(clippy::uninlined_format_args)]
#[cfg(feature = "managed")]
#[cfg_attr(docsrs, doc(cfg(feature = "managed")))]
pub mod managed;
#[cfg(feature = "unmanaged")]
#[cfg_attr(docsrs, doc(cfg(feature = "unmanaged")))]
pub mod unmanaged;
pub use deadpool_runtime::{Runtime, SpawnBlockingError};
/// The current pool status.
///
/// **The status returned by the pool is not guaranteed to be consistent!**
///
/// While this features provides [eventual consistency][1] the numbers will be
/// off when accessing the status of a pool under heavy load. These numbers
/// are meant for an overall insight.
///
/// [1]: https://en.wikipedia.org/wiki/Eventual_consistency
#[derive(Clone, Copy, Debug)]
pub struct Status {
/// The maximum size of the pool.
pub max_size: usize,
/// The current size of the pool.
pub size: usize,
/// The number of available objects in the pool.
pub available: usize,
/// The number of futures waiting for an object.
pub waiting: usize,
}
mod util;

187
vendor/deadpool/src/managed/builder.rs vendored Normal file
View File

@@ -0,0 +1,187 @@
use std::{fmt, marker::PhantomData, time::Duration};
use crate::Runtime;
use super::{
hooks::{Hook, Hooks},
Manager, Object, Pool, PoolConfig, QueueMode, Timeouts,
};
/// Possible errors returned when [`PoolBuilder::build()`] fails to build a
/// [`Pool`].
#[derive(Copy, Clone, Debug)]
pub enum BuildError {
/// [`Runtime`] is required du to configured timeouts.
NoRuntimeSpecified,
}
impl fmt::Display for BuildError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::NoRuntimeSpecified => write!(
f,
"Error occurred while building the pool: Timeouts require a runtime",
),
}
}
}
impl std::error::Error for BuildError {
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
match self {
Self::NoRuntimeSpecified => None,
}
}
}
/// Builder for [`Pool`]s.
///
/// Instances of this are created by calling the [`Pool::builder()`] method.
#[must_use = "builder does nothing itself, use `.build()` to build it"]
pub struct PoolBuilder<M, W = Object<M>>
where
M: Manager,
W: From<Object<M>>,
{
pub(crate) manager: M,
pub(crate) config: PoolConfig,
pub(crate) runtime: Option<Runtime>,
pub(crate) hooks: Hooks<M>,
_wrapper: PhantomData<fn() -> W>,
}
// Implemented manually to avoid unnecessary trait bound on `W` type parameter.
impl<M, W> fmt::Debug for PoolBuilder<M, W>
where
M: fmt::Debug + Manager,
W: From<Object<M>>,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("PoolBuilder")
.field("manager", &self.manager)
.field("config", &self.config)
.field("runtime", &self.runtime)
.field("hooks", &self.hooks)
.field("_wrapper", &self._wrapper)
.finish()
}
}
impl<M, W> PoolBuilder<M, W>
where
M: Manager,
W: From<Object<M>>,
{
pub(crate) fn new(manager: M) -> Self {
Self {
manager,
config: PoolConfig::default(),
runtime: None,
hooks: Hooks::default(),
_wrapper: PhantomData,
}
}
/// Builds the [`Pool`].
///
/// # Errors
///
/// See [`BuildError`] for details.
pub fn build(self) -> Result<Pool<M, W>, BuildError> {
// Return an error if a timeout is configured without runtime.
let t = &self.config.timeouts;
if (t.wait.is_some() || t.create.is_some() || t.recycle.is_some()) && self.runtime.is_none()
{
return Err(BuildError::NoRuntimeSpecified);
}
Ok(Pool::from_builder(self))
}
/// Sets a [`PoolConfig`] to build the [`Pool`] with.
pub fn config(mut self, value: PoolConfig) -> Self {
self.config = value;
self
}
/// Sets the [`PoolConfig::max_size`].
pub fn max_size(mut self, value: usize) -> Self {
self.config.max_size = value;
self
}
/// Sets the [`PoolConfig::timeouts`].
pub fn timeouts(mut self, value: Timeouts) -> Self {
self.config.timeouts = value;
self
}
/// Sets the [`Timeouts::wait`] value of the [`PoolConfig::timeouts`].
pub fn wait_timeout(mut self, value: Option<Duration>) -> Self {
self.config.timeouts.wait = value;
self
}
/// Sets the [`Timeouts::create`] value of the [`PoolConfig::timeouts`].
pub fn create_timeout(mut self, value: Option<Duration>) -> Self {
self.config.timeouts.create = value;
self
}
/// Sets the [`Timeouts::recycle`] value of the [`PoolConfig::timeouts`].
pub fn recycle_timeout(mut self, value: Option<Duration>) -> Self {
self.config.timeouts.recycle = value;
self
}
/// Sets the [`PoolConfig::queue_mode`].
pub fn queue_mode(mut self, value: QueueMode) -> Self {
self.config.queue_mode = value;
self
}
/// Attaches a `post_create` hook.
///
/// The given `hook` will be called each time right after a new [`Object`]
/// has been created.
pub fn post_create(mut self, hook: impl Into<Hook<M>>) -> Self {
self.hooks.post_create.push(hook.into());
self
}
/// Attaches a `pre_recycle` hook.
///
/// The given `hook` will be called each time right before an [`Object`] will
/// be recycled.
pub fn pre_recycle(mut self, hook: impl Into<Hook<M>>) -> Self {
self.hooks.pre_recycle.push(hook.into());
self
}
/// Attaches a `post_recycle` hook.
///
/// The given `hook` will be called each time right after an [`Object`] has
/// been recycled.
pub fn post_recycle(mut self, hook: impl Into<Hook<M>>) -> Self {
self.hooks.post_recycle.push(hook.into());
self
}
/// Sets the [`Runtime`].
///
/// # Important
///
/// The [`Runtime`] is optional. Most [`Pool`]s don't need a
/// [`Runtime`]. If want to utilize timeouts, however a [`Runtime`] must be
/// specified as you will otherwise get a [`PoolError::NoRuntimeSpecified`]
/// when trying to use [`Pool::timeout_get()`].
///
/// [`PoolBuilder::build()`] will fail with a
/// [`BuildError::NoRuntimeSpecified`] if you try to build a
/// [`Pool`] with timeouts and no [`Runtime`] specified.
///
/// [`PoolError::NoRuntimeSpecified`]: super::PoolError::NoRuntimeSpecified
pub fn runtime(mut self, value: Runtime) -> Self {
self.runtime = Some(value);
self
}
}

142
vendor/deadpool/src/managed/config.rs vendored Normal file
View File

@@ -0,0 +1,142 @@
use std::{fmt, time::Duration};
use super::BuildError;
/// [`Pool`] configuration.
///
/// [`Pool`]: super::Pool
#[derive(Clone, Copy, Debug)]
#[cfg_attr(feature = "serde", derive(serde::Deserialize, serde::Serialize))]
pub struct PoolConfig {
/// Maximum size of the [`Pool`].
///
/// Default: `cpu_count * 4`
///
/// [`Pool`]: super::Pool
pub max_size: usize,
/// Timeouts of the [`Pool`].
///
/// Default: No timeouts
///
/// [`Pool`]: super::Pool
#[cfg_attr(feature = "serde", serde(default))]
pub timeouts: Timeouts,
/// Queue mode of the [`Pool`].
///
/// Determines the order of objects being queued and dequeued.
///
/// Default: `Fifo`
///
/// [`Pool`]: super::Pool
#[cfg_attr(feature = "serde", serde(default))]
pub queue_mode: QueueMode,
}
impl PoolConfig {
/// Creates a new [`PoolConfig`] without any timeouts and with the provided
/// `max_size`.
#[must_use]
pub fn new(max_size: usize) -> Self {
Self {
max_size,
timeouts: Timeouts::default(),
queue_mode: QueueMode::default(),
}
}
}
impl Default for PoolConfig {
/// Create a [`PoolConfig`] where [`PoolConfig::max_size`] is set to
/// `cpu_core_count * 2` including logical cores (Hyper-Threading).
fn default() -> Self {
Self::new(crate::util::get_default_pool_max_size())
}
}
/// Timeouts when getting [`Object`]s from a [`Pool`].
///
/// [`Object`]: super::Object
/// [`Pool`]: super::Pool
#[derive(Clone, Copy, Debug)]
#[cfg_attr(feature = "serde", derive(serde::Deserialize, serde::Serialize))]
pub struct Timeouts {
/// Timeout when waiting for a slot to become available.
pub wait: Option<Duration>,
/// Timeout when creating a new object.
pub create: Option<Duration>,
/// Timeout when recycling an object.
pub recycle: Option<Duration>,
}
impl Timeouts {
/// Create an empty [`Timeouts`] config (no timeouts set).
#[must_use]
pub const fn new() -> Self {
Self {
create: None,
wait: None,
recycle: None,
}
}
/// Creates a new [`Timeouts`] config with only the `wait` timeout being
/// set.
#[must_use]
pub const fn wait_millis(wait: u64) -> Self {
Self {
create: None,
wait: Some(Duration::from_millis(wait)),
recycle: None,
}
}
}
// Implemented manually to provide a custom documentation.
impl Default for Timeouts {
/// Creates an empty [`Timeouts`] config (no timeouts set).
fn default() -> Self {
Self::new()
}
}
/// Mode for dequeuing [`Object`]s from a [`Pool`].
///
/// [`Object`]: super::Object
/// [`Pool`]: super::Pool
#[derive(Clone, Copy, Debug, Default)]
#[cfg_attr(feature = "serde", derive(serde::Deserialize, serde::Serialize))]
pub enum QueueMode {
/// Dequeue the object that was least recently added (first in first out).
#[default]
Fifo,
/// Dequeue the object that was most recently added (last in first out).
Lifo,
}
/// This error is used when building pools via the config `create_pool`
/// methods.
#[derive(Debug)]
pub enum CreatePoolError<C> {
/// This variant is used for configuration errors
Config(C),
/// This variant is used for errors while building the pool
Build(BuildError),
}
impl<C> fmt::Display for CreatePoolError<C>
where
C: fmt::Display,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::Config(e) => write!(f, "Config: {}", e),
Self::Build(e) => write!(f, "Build: {}", e),
}
}
}
impl<C> std::error::Error for CreatePoolError<C> where C: std::error::Error {}

View File

@@ -0,0 +1,47 @@
/// This structure calls a function/closure when it is dropped.
/// The [`DropGuard::disarm`] method stops this from happening.
pub(crate) struct DropGuard<F: Fn()>(pub(crate) F);
impl<F: Fn()> DropGuard<F> {
pub(crate) fn disarm(self) {
std::mem::forget(self)
}
}
impl<F> Drop for DropGuard<F>
where
F: Fn(),
{
fn drop(&mut self) {
(self.0)()
}
}
#[test]
fn test_dropguard_drop() {
use std::sync::atomic::{AtomicUsize, Ordering};
let count = AtomicUsize::new(0);
assert_eq!(count.load(Ordering::Relaxed), 0);
{
let _ = count.fetch_add(1, Ordering::Relaxed);
let _ = DropGuard(|| {
let _ = count.fetch_sub(1, Ordering::Relaxed);
});
}
assert_eq!(count.load(Ordering::Relaxed), 0);
}
#[test]
fn test_dropguard_disarm() {
use std::sync::atomic::{AtomicUsize, Ordering};
let count = AtomicUsize::new(0);
assert_eq!(count.load(Ordering::Relaxed), 0);
{
let _ = count.fetch_add(1, Ordering::Relaxed);
let guard = DropGuard(|| {
let _ = count.fetch_sub(1, Ordering::Relaxed);
});
guard.disarm();
}
assert_eq!(count.load(Ordering::Relaxed), 1);
}

123
vendor/deadpool/src/managed/errors.rs vendored Normal file
View File

@@ -0,0 +1,123 @@
use std::{borrow::Cow, fmt};
use super::hooks::HookError;
/// Possible errors returned by the [`Manager::recycle()`] method.
///
/// [`Manager::recycle()`]: super::Manager::recycle
#[derive(Debug)]
pub enum RecycleError<E> {
/// Recycling failed for some other reason.
Message(Cow<'static, str>),
/// Error caused by the backend.
Backend(E),
}
impl<E> RecycleError<E> {
/// Convenience constructor function for the `HookError::Message`
/// variant.
pub fn message(msg: impl Into<Cow<'static, str>>) -> Self {
Self::Message(msg.into())
}
}
impl<E> From<E> for RecycleError<E> {
fn from(e: E) -> Self {
Self::Backend(e)
}
}
impl<E: fmt::Display> fmt::Display for RecycleError<E> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::Message(msg) => write!(f, "Error occurred while recycling an object: {}", msg),
Self::Backend(e) => write!(f, "Error occurred while recycling an object: {}", e),
}
}
}
impl<E: std::error::Error + 'static> std::error::Error for RecycleError<E> {
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
match self {
Self::Message(_) => None,
Self::Backend(e) => Some(e),
}
}
}
/// Possible steps causing the timeout in an error returned by [`Pool::get()`]
/// method.
///
/// [`Pool::get()`]: super::Pool::get
#[derive(Clone, Copy, Debug)]
pub enum TimeoutType {
/// Timeout happened while waiting for a slot to become available.
Wait,
/// Timeout happened while creating a new object.
Create,
/// Timeout happened while recycling an object.
Recycle,
}
/// Possible errors returned by [`Pool::get()`] method.
///
/// [`Pool::get()`]: super::Pool::get
#[derive(Debug)]
pub enum PoolError<E> {
/// Timeout happened.
Timeout(TimeoutType),
/// Backend reported an error.
Backend(E),
/// [`Pool`] has been closed.
///
/// [`Pool`]: super::Pool
Closed,
/// No [`Runtime`] was specified.
///
/// [`Runtime`]: crate::Runtime
NoRuntimeSpecified,
/// A `post_create` hook reported an error.
PostCreateHook(HookError<E>),
}
impl<E> From<E> for PoolError<E> {
fn from(e: E) -> Self {
Self::Backend(e)
}
}
impl<E: fmt::Display> fmt::Display for PoolError<E> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::Timeout(tt) => match tt {
TimeoutType::Wait => write!(
f,
"Timeout occurred while waiting for a slot to become available"
),
TimeoutType::Create => write!(f, "Timeout occurred while creating a new object"),
TimeoutType::Recycle => write!(f, "Timeout occurred while recycling an object"),
},
Self::Backend(e) => write!(f, "Error occurred while creating a new object: {}", e),
Self::Closed => write!(f, "Pool has been closed"),
Self::NoRuntimeSpecified => write!(f, "No runtime specified"),
Self::PostCreateHook(e) => writeln!(f, "`post_create` hook failed: {}", e),
}
}
}
impl<E: std::error::Error + 'static> std::error::Error for PoolError<E> {
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
match self {
Self::Timeout(_) | Self::Closed | Self::NoRuntimeSpecified => None,
Self::Backend(e) => Some(e),
Self::PostCreateHook(e) => Some(e),
}
}
}

169
vendor/deadpool/src/managed/hooks.rs vendored Normal file
View File

@@ -0,0 +1,169 @@
//! Hooks allowing to run code when creating and/or recycling objects.
use std::{borrow::Cow, fmt, future::Future, pin::Pin};
use crate::managed::object::ObjectInner;
use super::{Manager, Metrics};
/// The result returned by hooks
pub type HookResult<E> = Result<(), HookError<E>>;
/// The boxed future that should be returned by async hooks
pub type HookFuture<'a, E> = Pin<Box<dyn Future<Output = HookResult<E>> + Send + 'a>>;
/// Function signature for sync callbacks
type SyncFn<M> =
dyn Fn(&mut <M as Manager>::Type, &Metrics) -> HookResult<<M as Manager>::Error> + Sync + Send;
/// Function siganture for async callbacks
type AsyncFn<M> = dyn for<'a> Fn(&'a mut <M as Manager>::Type, &'a Metrics) -> HookFuture<'a, <M as Manager>::Error>
+ Sync
+ Send;
/// Wrapper for hook functions
pub enum Hook<M: Manager> {
/// Use a plain function (non-async) as a hook
Fn(Box<SyncFn<M>>),
/// Use an async function as a hook
AsyncFn(Box<AsyncFn<M>>),
}
impl<M: Manager> Hook<M> {
/// Create Hook from sync function
pub fn sync_fn(
f: impl Fn(&mut M::Type, &Metrics) -> HookResult<M::Error> + Sync + Send + 'static,
) -> Self {
Self::Fn(Box::new(f))
}
/// Create Hook from async function
pub fn async_fn(
f: impl for<'a> Fn(&'a mut M::Type, &'a Metrics) -> HookFuture<'a, M::Error>
+ Sync
+ Send
+ 'static,
) -> Self {
Self::AsyncFn(Box::new(f))
}
}
impl<M: Manager> fmt::Debug for Hook<M> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::Fn(_) => f
.debug_tuple("Fn")
//.field(arg0)
.finish(),
Self::AsyncFn(_) => f
.debug_tuple("AsyncFn")
//.field(arg0)
.finish(),
}
}
}
/// Error which is returned by `pre_create`, `pre_recycle` and
/// `post_recycle` hooks.
#[derive(Debug)]
pub enum HookError<E> {
/// Hook failed for some other reason.
Message(Cow<'static, str>),
/// Error caused by the backend.
Backend(E),
}
impl<E> HookError<E> {
/// Convenience constructor function for the `HookError::Message`
/// variant.
pub fn message(msg: impl Into<Cow<'static, str>>) -> Self {
Self::Message(msg.into())
}
}
impl<E: fmt::Display> fmt::Display for HookError<E> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::Message(msg) => write!(f, "{}", msg),
Self::Backend(e) => write!(f, "{}", e),
}
}
}
impl<E: std::error::Error + 'static> std::error::Error for HookError<E> {
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
match self {
Self::Message(_) => None,
Self::Backend(e) => Some(e),
}
}
}
pub(crate) struct HookVec<M: Manager> {
vec: Vec<Hook<M>>,
}
// Implemented manually to avoid unnecessary trait bound on `M` type parameter.
impl<M: Manager> fmt::Debug for HookVec<M> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("HookVec")
//.field("fns", &self.fns)
.finish_non_exhaustive()
}
}
// Implemented manually to avoid unnecessary trait bound on `M` type parameter.
impl<M: Manager> Default for HookVec<M> {
fn default() -> Self {
Self { vec: Vec::new() }
}
}
impl<M: Manager> HookVec<M> {
pub(crate) async fn apply(
&self,
inner: &mut ObjectInner<M>,
) -> Result<(), HookError<M::Error>> {
for hook in &self.vec {
match hook {
Hook::Fn(f) => f(&mut inner.obj, &inner.metrics)?,
Hook::AsyncFn(f) => f(&mut inner.obj, &inner.metrics).await?,
};
}
Ok(())
}
pub(crate) fn push(&mut self, hook: Hook<M>) {
self.vec.push(hook);
}
}
/// Collection of all the hooks that can be configured for a [`Pool`].
///
/// [`Pool`]: super::Pool
pub(crate) struct Hooks<M: Manager> {
pub(crate) post_create: HookVec<M>,
pub(crate) pre_recycle: HookVec<M>,
pub(crate) post_recycle: HookVec<M>,
}
// Implemented manually to avoid unnecessary trait bound on `M` type parameter.
impl<M: Manager> fmt::Debug for Hooks<M> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Hooks")
.field("post_create", &self.post_create)
.field("pre_recycle", &self.post_recycle)
.field("post_recycle", &self.post_recycle)
.finish()
}
}
// Implemented manually to avoid unnecessary trait bound on `M` type parameter.
impl<M: Manager> Default for Hooks<M> {
fn default() -> Self {
Self {
pre_recycle: HookVec::default(),
post_create: HookVec::default(),
post_recycle: HookVec::default(),
}
}
}

37
vendor/deadpool/src/managed/manager.rs vendored Normal file
View File

@@ -0,0 +1,37 @@
use std::future::Future;
use crate::managed::{Metrics, RecycleError};
/// Manager responsible for creating new [`super::Object`]s or recycling existing ones.
pub trait Manager: Sync + Send {
/// Type of [`super::Object`]s that this [`Manager`] creates and recycles.
type Type: Send;
/// Error that this [`Manager`] can return when creating and/or recycling
/// [`super::Object`]s.
type Error: Send;
/// Creates a new instance of [`Manager::Type`].
fn create(&self) -> impl Future<Output = Result<Self::Type, Self::Error>> + Send;
/// Tries to recycle an instance of [`Manager::Type`].
///
/// # Errors
///
/// Returns [`Manager::Error`] if the instance couldn't be recycled.
fn recycle(
&self,
obj: &mut Self::Type,
metrics: &Metrics,
) -> impl Future<Output = RecycleResult<Self::Error>> + Send;
/// Detaches an instance of [`Manager::Type`] from this [`Manager`].
///
/// This method is called when using the [`super::Object::take()`] method for
/// removing an [`super::Object`] from a [`super::Pool`]. If the [`Manager`] doesn't hold
/// any references to the handed out [`super::Object`]s then the default
/// implementation can be used which does nothing.
fn detach(&self, _obj: &mut Self::Type) {}
}
/// Result type of the [`Manager::recycle()`] method.
pub type RecycleResult<E> = Result<(), RecycleError<E>>;

41
vendor/deadpool/src/managed/metrics.rs vendored Normal file
View File

@@ -0,0 +1,41 @@
#[cfg(not(target_arch = "wasm32"))]
use std::time::{Duration, Instant};
/// Statistics regarding an object returned by the pool
#[derive(Clone, Copy, Debug)]
#[must_use]
pub struct Metrics {
#[cfg(not(target_arch = "wasm32"))]
/// The instant when this object was created
pub created: Instant,
#[cfg(not(target_arch = "wasm32"))]
/// The instant when this object was last used
pub recycled: Option<Instant>,
/// The number of times the objects was recycled
pub recycle_count: usize,
}
impl Metrics {
#[cfg(not(target_arch = "wasm32"))]
/// Access the age of this object
pub fn age(&self) -> Duration {
self.created.elapsed()
}
#[cfg(not(target_arch = "wasm32"))]
/// Get the time elapsed when this object was last used
pub fn last_used(&self) -> Duration {
self.recycled.unwrap_or(self.created).elapsed()
}
}
impl Default for Metrics {
fn default() -> Self {
Self {
#[cfg(not(target_arch = "wasm32"))]
created: Instant::now(),
#[cfg(not(target_arch = "wasm32"))]
recycled: None,
recycle_count: 0,
}
}
}

73
vendor/deadpool/src/managed/mod.rs vendored Normal file
View File

@@ -0,0 +1,73 @@
//! Managed version of the pool.
//!
//! "Managed" means that it requires a [`Manager`] which is responsible for
//! creating and recycling objects as they are needed.
//!
//! # Example
//!
//! ```rust
//! use deadpool::managed;
//!
//! #[derive(Debug)]
//! enum Error { Fail }
//!
//! struct Computer {}
//!
//! impl Computer {
//! async fn get_answer(&self) -> i32 {
//! 42
//! }
//! }
//!
//! struct Manager {}
//!
//! impl managed::Manager for Manager {
//! type Type = Computer;
//! type Error = Error;
//!
//! async fn create(&self) -> Result<Computer, Error> {
//! Ok(Computer {})
//! }
//! async fn recycle(&self, conn: &mut Computer, _: &managed::Metrics) -> managed::RecycleResult<Error> {
//! Ok(())
//! }
//! }
//!
//! type Pool = managed::Pool<Manager>;
//!
//! #[tokio::main]
//! async fn main() {
//! let mgr = Manager {};
//! let pool = Pool::builder(mgr).max_size(16).build().unwrap();
//! let mut conn = pool.get().await.unwrap();
//! let answer = conn.get_answer().await;
//! assert_eq!(answer, 42);
//! }
//! ```
//!
//! For a more complete example please see
//! [`deadpool-postgres`](https://crates.io/crates/deadpool-postgres) crate.
mod builder;
mod config;
mod dropguard;
mod errors;
mod hooks;
mod manager;
mod metrics;
mod object;
mod pool;
pub mod reexports;
pub use crate::Status;
pub use self::{
builder::{BuildError, PoolBuilder},
config::{CreatePoolError, PoolConfig, QueueMode, Timeouts},
errors::{PoolError, RecycleError, TimeoutType},
hooks::{Hook, HookError, HookFuture, HookResult},
manager::{Manager, RecycleResult},
metrics::Metrics,
object::{Object, ObjectId},
pool::{Pool, RetainResult, WeakPool},
};

140
vendor/deadpool/src/managed/object.rs vendored Normal file
View File

@@ -0,0 +1,140 @@
use std::{
fmt,
ops::{Deref, DerefMut},
};
use crate::managed::{Manager, Metrics, Pool, WeakPool};
/// Wrapper around the actual pooled object which implements [`Deref`],
/// [`DerefMut`] and [`Drop`] traits.
///
/// Use this object just as if it was of type `T` and upon leaving a scope the
/// [`Drop::drop()`] will take care of returning it to the pool.
#[must_use]
pub struct Object<M: Manager> {
/// The actual object
pub(crate) inner: Option<ObjectInner<M>>,
/// Pool to return the pooled object to.
pub(crate) pool: WeakPool<M>,
}
impl<M> fmt::Debug for Object<M>
where
M: fmt::Debug + Manager,
M::Type: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Object")
.field("inner", &self.inner)
.finish()
}
}
#[derive(Debug)]
pub(crate) struct ObjectInner<M: Manager> {
/// Actual pooled object.
pub obj: M::Type,
/// The ID of this object. IDs are assigned in increasing order and
/// generally reflect the creation order of objects.
///
/// This can be used to discard objects after a configuration change
/// or simply identify an objects for debugging purposes.
pub id: usize,
/// Object metrics.
pub metrics: Metrics,
}
impl<M: Manager> Object<M> {
/// Takes this [`Object`] from its [`Pool`] permanently. This reduces the
/// size of the [`Pool`].
#[must_use]
pub fn take(mut this: Self) -> M::Type {
let mut inner = this.inner.take().unwrap().obj;
if let Some(pool) = Object::pool(&this) {
pool.inner.detach_object(&mut inner)
}
inner
}
/// Returns the unique ID of this object.
///
/// Object IDs are strictly monotonically increasing — each new object
/// receives an ID greater than that of the previously created object.
/// However, IDs are not guaranteed to be consecutive; gaps may exist.
pub fn id(this: &Self) -> ObjectId {
ObjectId(this.inner.as_ref().unwrap().id)
}
/// Get object statistics
pub fn metrics(this: &Self) -> &Metrics {
&this.inner.as_ref().unwrap().metrics
}
/// Returns the [`Pool`] this [`Object`] belongs to.
///
/// Since [`Object`]s only hold a [`std::sync::Weak`] reference to the
/// [`Pool`] they come from, this can fail and return [`None`] instead.
pub fn pool(this: &Self) -> Option<Pool<M>> {
this.pool.upgrade()
}
}
impl<M: Manager> Drop for Object<M> {
fn drop(&mut self) {
if let Some(inner) = self.inner.take() {
if let Some(pool) = self.pool.upgrade() {
pool.inner.return_object(inner)
}
}
}
}
impl<M: Manager> Deref for Object<M> {
type Target = M::Type;
fn deref(&self) -> &M::Type {
&self.inner.as_ref().unwrap().obj
}
}
impl<M: Manager> DerefMut for Object<M> {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.inner.as_mut().unwrap().obj
}
}
impl<M: Manager> AsRef<M::Type> for Object<M> {
fn as_ref(&self) -> &M::Type {
self
}
}
impl<M: Manager> AsMut<M::Type> for Object<M> {
fn as_mut(&mut self) -> &mut M::Type {
self
}
}
/// A unique identifier for an object within a pool.
///
/// `ObjectId` is an opaque wrapper around a numeric identifier.
/// IDs are guaranteed to be unique and monotonically increasing
/// **within a single pool**. Each new object receives an identifier
/// greater than the previously created object, but IDs are not
/// guaranteed to be consecutive (gaps may exist).
///
/// This type is intended to be used as an opaque handle for
/// identifying objects. It implements common traits such as
/// [`Copy`], [`Clone`], [`Eq`], [`Ord`], and [`Hash`] so that
/// it can be compared, ordered, or stored in sets and maps.
/// It should not be used for arithmetic or treated as a raw number.
#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq, Ord, PartialOrd)]
pub struct ObjectId(usize);
impl fmt::Display for ObjectId {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", self.0)
}
}

537
vendor/deadpool/src/managed/pool.rs vendored Normal file
View File

@@ -0,0 +1,537 @@
#[cfg(not(target_arch = "wasm32"))]
use std::time::Instant;
use std::{
collections::VecDeque,
fmt,
future::Future,
marker::PhantomData,
sync::{
atomic::{AtomicUsize, Ordering},
Arc, Mutex, Weak,
},
time::Duration,
};
use deadpool_runtime::Runtime;
use tokio::sync::{Semaphore, TryAcquireError};
use crate::{
managed::{
dropguard::DropGuard, hooks::Hooks, object::ObjectInner, Manager, Metrics, Object,
PoolBuilder, PoolConfig, PoolError, QueueMode, TimeoutType, Timeouts,
},
Status,
};
/// Generic object and connection pool.
///
/// This struct can be cloned and transferred across thread boundaries and uses
/// reference counting for its internal state.
pub struct Pool<M: Manager, W: From<Object<M>> = Object<M>> {
pub(crate) inner: Arc<PoolInner<M>>,
pub(crate) _wrapper: PhantomData<fn() -> W>,
}
// Implemented manually to avoid unnecessary trait bound on `W` type parameter.
impl<M, W> fmt::Debug for Pool<M, W>
where
M: fmt::Debug + Manager,
M::Type: fmt::Debug,
W: From<Object<M>>,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Pool")
.field("inner", &self.inner)
.field("wrapper", &self._wrapper)
.finish()
}
}
impl<M: Manager, W: From<Object<M>>> Clone for Pool<M, W> {
fn clone(&self) -> Self {
Self {
inner: self.inner.clone(),
_wrapper: PhantomData,
}
}
}
impl<M: Manager, W: From<Object<M>>> Pool<M, W> {
/// Instantiates a builder for a new [`Pool`].
///
/// This is the only way to create a [`Pool`] instance.
pub fn builder(manager: M) -> PoolBuilder<M, W> {
PoolBuilder::new(manager)
}
pub(crate) fn from_builder(builder: PoolBuilder<M, W>) -> Self {
Self {
inner: Arc::new(PoolInner {
manager: builder.manager,
next_id: AtomicUsize::new(0),
slots: Mutex::new(Slots {
vec: VecDeque::with_capacity(builder.config.max_size),
size: 0,
max_size: builder.config.max_size,
}),
users: AtomicUsize::new(0),
semaphore: Semaphore::new(builder.config.max_size),
config: builder.config,
hooks: builder.hooks,
runtime: builder.runtime,
}),
_wrapper: PhantomData,
}
}
/// Retrieves an [`Object`] from this [`Pool`] or waits for one to
/// become available.
///
/// # Errors
///
/// See [`PoolError`] for details.
pub async fn get(&self) -> Result<W, PoolError<M::Error>> {
self.timeout_get(&self.timeouts()).await
}
/// Retrieves an [`Object`] from this [`Pool`] using a different `timeout`
/// than the configured one.
///
/// # Errors
///
/// See [`PoolError`] for details.
pub async fn timeout_get(&self, timeouts: &Timeouts) -> Result<W, PoolError<M::Error>> {
let _ = self.inner.users.fetch_add(1, Ordering::Relaxed);
let users_guard = DropGuard(|| {
let _ = self.inner.users.fetch_sub(1, Ordering::Relaxed);
});
let non_blocking = match timeouts.wait {
Some(t) => t.as_nanos() == 0,
None => false,
};
let permit = if non_blocking {
self.inner.semaphore.try_acquire().map_err(|e| match e {
TryAcquireError::Closed => PoolError::Closed,
TryAcquireError::NoPermits => PoolError::Timeout(TimeoutType::Wait),
})?
} else {
apply_timeout(
self.inner.runtime,
TimeoutType::Wait,
timeouts.wait,
async {
self.inner
.semaphore
.acquire()
.await
.map_err(|_| PoolError::Closed)
},
)
.await?
};
let inner_obj = loop {
let inner_obj = match self.inner.config.queue_mode {
QueueMode::Fifo => self.inner.slots.lock().unwrap().vec.pop_front(),
QueueMode::Lifo => self.inner.slots.lock().unwrap().vec.pop_back(),
};
let inner_obj = if let Some(inner_obj) = inner_obj {
self.try_recycle(timeouts, inner_obj).await?
} else {
self.try_create(timeouts).await?
};
if let Some(inner_obj) = inner_obj {
break inner_obj;
}
};
users_guard.disarm();
permit.forget();
Ok(Object {
inner: Some(inner_obj),
pool: self.weak(),
}
.into())
}
#[inline]
async fn try_recycle(
&self,
timeouts: &Timeouts,
inner_obj: ObjectInner<M>,
) -> Result<Option<ObjectInner<M>>, PoolError<M::Error>> {
let mut unready_obj = UnreadyObject {
inner: Some(inner_obj),
pool: &self.inner,
};
let inner = unready_obj.inner();
// Apply pre_recycle hooks
if let Err(_e) = self.inner.hooks.pre_recycle.apply(inner).await {
// TODO log pre_recycle error
return Ok(None);
}
if apply_timeout(
self.inner.runtime,
TimeoutType::Recycle,
timeouts.recycle,
self.inner.manager.recycle(&mut inner.obj, &inner.metrics),
)
.await
.is_err()
{
return Ok(None);
}
// Apply post_recycle hooks
if let Err(_e) = self.inner.hooks.post_recycle.apply(inner).await {
// TODO log post_recycle error
return Ok(None);
}
inner.metrics.recycle_count += 1;
#[cfg(not(target_arch = "wasm32"))]
{
inner.metrics.recycled = Some(Instant::now());
}
Ok(Some(unready_obj.ready()))
}
#[inline]
async fn try_create(
&self,
timeouts: &Timeouts,
) -> Result<Option<ObjectInner<M>>, PoolError<M::Error>> {
let mut unready_obj = UnreadyObject {
inner: Some(ObjectInner {
obj: apply_timeout(
self.inner.runtime,
TimeoutType::Create,
timeouts.create,
self.inner.manager.create(),
)
.await?,
id: self.inner.next_id.fetch_add(1, Ordering::Relaxed),
metrics: Metrics::default(),
}),
pool: &self.inner,
};
self.inner.slots.lock().unwrap().size += 1;
// Apply post_create hooks
if let Err(e) = self
.inner
.hooks
.post_create
.apply(unready_obj.inner())
.await
{
return Err(PoolError::PostCreateHook(e));
}
Ok(Some(unready_obj.ready()))
}
/**
* Resize the pool. This change the `max_size` of the pool dropping
* excess objects and/or making space for new ones.
*
* If the pool is closed this method does nothing. The [`Pool::status`] method
* always reports a `max_size` of 0 for closed pools.
*/
pub fn resize(&self, max_size: usize) {
if self.inner.semaphore.is_closed() {
return;
}
let mut slots = self.inner.slots.lock().unwrap();
let old_max_size = slots.max_size;
slots.max_size = max_size;
// shrink pool
if max_size < old_max_size {
while slots.size > slots.max_size {
if let Ok(permit) = self.inner.semaphore.try_acquire() {
permit.forget();
if slots.vec.pop_front().is_some() {
slots.size -= 1;
}
} else {
break;
}
}
// Create a new VecDeque with a smaller capacity
let mut vec = VecDeque::with_capacity(max_size);
for obj in slots.vec.drain(..) {
vec.push_back(obj);
}
slots.vec = vec;
}
// grow pool
if max_size > old_max_size {
let additional = slots.max_size - old_max_size;
slots.vec.reserve_exact(additional);
self.inner.semaphore.add_permits(additional);
}
}
/// Retains only the objects specified by the given function.
///
/// This function is typically used to remove objects from
/// the pool based on their current state or metrics.
///
/// **Caution:** This function blocks the entire pool while
/// it is running. Therefore the given function should not
/// block.
///
/// The following example starts a background task that
/// runs every 30 seconds and removes objects from the pool
/// that haven't been used for more than one minute.
///
/// ```rust,ignore
/// let interval = Duration::from_secs(30);
/// let max_age = Duration::from_secs(60);
/// tokio::spawn(async move {
/// loop {
/// tokio::time::sleep(interval).await;
/// pool.retain(|_, metrics| metrics.last_used() < max_age);
/// }
/// });
/// ```
pub fn retain(
&self,
mut predicate: impl FnMut(&M::Type, Metrics) -> bool,
) -> RetainResult<M::Type> {
let mut removed = Vec::with_capacity(self.status().size);
let mut guard = self.inner.slots.lock().unwrap();
let mut i = 0;
// This code can be simplified once `Vec::extract_if` lands in stable Rust.
// https://doc.rust-lang.org/std/vec/struct.Vec.html#method.extract_if
while i < guard.vec.len() {
let obj = &mut guard.vec[i];
if predicate(&mut obj.obj, obj.metrics) {
i += 1;
} else {
let mut obj = guard.vec.remove(i).unwrap();
self.manager().detach(&mut obj.obj);
removed.push(obj.obj);
}
}
guard.size -= removed.len();
RetainResult {
retained: i,
removed,
}
}
/// Get current timeout configuration
pub fn timeouts(&self) -> Timeouts {
self.inner.config.timeouts
}
/// Closes this [`Pool`].
///
/// All current and future tasks waiting for [`Object`]s will return
/// [`PoolError::Closed`] immediately.
///
/// This operation resizes the pool to 0.
pub fn close(&self) {
self.resize(0);
self.inner.semaphore.close();
}
/// Indicates whether this [`Pool`] has been closed.
pub fn is_closed(&self) -> bool {
self.inner.semaphore.is_closed()
}
/// Retrieves [`Status`] of this [`Pool`].
#[must_use]
pub fn status(&self) -> Status {
let slots = self.inner.slots.lock().unwrap();
let users = self.inner.users.load(Ordering::Relaxed);
let (available, waiting) = if users < slots.size {
(slots.size - users, 0)
} else {
(0, users - slots.size)
};
Status {
max_size: slots.max_size,
size: slots.size,
available,
waiting,
}
}
/// Returns [`Manager`] of this [`Pool`].
#[must_use]
pub fn manager(&self) -> &M {
&self.inner.manager
}
/// Returns a [`WeakPool<T>`] of this [`Pool`].
pub fn weak(&self) -> WeakPool<M> {
WeakPool {
inner: Arc::downgrade(&self.inner),
_wrapper: PhantomData,
}
}
}
/// A weak reference to a [`Pool<T>`], used to avoid keeping the pool alive.
///
/// `WeakPool<T>` is analogous to [`std::sync::Weak<T>`] for [`Pool<T>`], and
/// is typically used in situations where you need a non-owning reference to a pool,
/// such as in background tasks, managers, or callbacks that should not extend
/// the lifetime of the pool.
///
/// This allows components to retain a reference to the pool while avoiding
/// reference cycles or prolonging its lifetime unnecessarily.
///
/// To access the pool, use [`WeakPool::upgrade`] to attempt to get a strong reference.
#[derive(Debug)]
pub struct WeakPool<M: Manager, W: From<Object<M>> = Object<M>> {
inner: Weak<PoolInner<M>>,
_wrapper: PhantomData<fn() -> W>,
}
impl<M: Manager, W: From<Object<M>>> WeakPool<M, W> {
/// Attempts to upgrade the `WeakPool` to a strong [`Pool<T>`] reference.
///
/// If the pool has already been dropped (i.e., no strong references remain),
/// this returns `None`.
pub fn upgrade(&self) -> Option<Pool<M, W>> {
Some(Pool {
inner: self.inner.upgrade()?,
_wrapper: PhantomData,
})
}
}
pub(crate) struct PoolInner<M: Manager> {
manager: M,
next_id: AtomicUsize,
slots: Mutex<Slots<ObjectInner<M>>>,
/// Number of ['Pool'] users. A user is both a future which is waiting for an ['Object'] or one
/// with an ['Object'] which hasn't been returned, yet.
users: AtomicUsize,
semaphore: Semaphore,
config: PoolConfig,
runtime: Option<Runtime>,
hooks: Hooks<M>,
}
#[derive(Debug)]
struct Slots<T> {
vec: VecDeque<T>,
size: usize,
max_size: usize,
}
// Implemented manually to avoid unnecessary trait bound on the struct.
impl<M> fmt::Debug for PoolInner<M>
where
M: fmt::Debug + Manager,
M::Type: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("PoolInner")
.field("manager", &self.manager)
.field("slots", &self.slots)
.field("used", &self.users)
.field("semaphore", &self.semaphore)
.field("config", &self.config)
.field("runtime", &self.runtime)
.field("hooks", &self.hooks)
.finish()
}
}
impl<M: Manager> PoolInner<M> {
pub(crate) fn return_object(&self, mut inner: ObjectInner<M>) {
let _ = self.users.fetch_sub(1, Ordering::Relaxed);
let mut slots = self.slots.lock().unwrap();
if slots.size <= slots.max_size {
slots.vec.push_back(inner);
drop(slots);
self.semaphore.add_permits(1);
} else {
slots.size -= 1;
drop(slots);
self.manager.detach(&mut inner.obj);
}
}
pub(crate) fn detach_object(&self, obj: &mut M::Type) {
let _ = self.users.fetch_sub(1, Ordering::Relaxed);
let mut slots = self.slots.lock().unwrap();
let add_permits = slots.size <= slots.max_size;
slots.size -= 1;
drop(slots);
if add_permits {
self.semaphore.add_permits(1);
}
self.manager.detach(obj);
}
}
struct UnreadyObject<'a, M: Manager> {
inner: Option<ObjectInner<M>>,
pool: &'a PoolInner<M>,
}
impl<M: Manager> UnreadyObject<'_, M> {
fn ready(mut self) -> ObjectInner<M> {
self.inner.take().unwrap()
}
fn inner(&mut self) -> &mut ObjectInner<M> {
self.inner.as_mut().unwrap()
}
}
impl<M: Manager> Drop for UnreadyObject<'_, M> {
fn drop(&mut self) {
if let Some(mut inner) = self.inner.take() {
self.pool.slots.lock().unwrap().size -= 1;
self.pool.manager.detach(&mut inner.obj);
}
}
}
async fn apply_timeout<O, E>(
runtime: Option<Runtime>,
timeout_type: TimeoutType,
duration: Option<Duration>,
future: impl Future<Output = Result<O, impl Into<PoolError<E>>>>,
) -> Result<O, PoolError<E>> {
match (runtime, duration) {
(_, None) => future.await.map_err(Into::into),
(Some(runtime), Some(duration)) => runtime
.timeout(duration, future)
.await
.ok_or(PoolError::Timeout(timeout_type))?
.map_err(Into::into),
(None, Some(_)) => Err(PoolError::NoRuntimeSpecified),
}
}
#[derive(Debug)]
/// This is the result returned by `Pool::retain`
pub struct RetainResult<T> {
/// Number of retained objects
pub retained: usize,
/// Objects that were removed from the pool
pub removed: Vec<T>,
}
impl<T> Default for RetainResult<T> {
fn default() -> Self {
Self {
retained: Default::default(),
removed: Default::default(),
}
}
}

View File

@@ -0,0 +1,58 @@
//! This module contains all things that should be reexported
//! by backend implementations in order to avoid direct dependencies
//! on the `deadpool` crate itself.
//!
//! Crates based on `deadpool::managed` should include this line:
//! ```rust,ignore
//! pub use deadpool::managed::reexports::*;
//! deadpool::managed_reexports!(
//! "name_of_crate",
//! Manager,
//! Object<Manager>,
//! Error,
//! ConfigError
//! );
//! ```
pub use crate::{
managed::{Metrics, ObjectId, PoolConfig, Status, TimeoutType, Timeouts},
Runtime,
};
/// This macro creates all the type aliases usually reexported by
/// deadpool-* crates. Crates that implement a deadpool manager should
/// be considered stand alone crates and users of it should not need
/// to use `deadpool` directly.
#[macro_export]
macro_rules! managed_reexports {
($crate_name:literal, $Manager:ty, $Wrapper:ty, $Error:ty, $ConfigError:ty) => {
#[doc=concat!("Type alias for using [`deadpool::managed::Pool`] with [`", $crate_name, "`].")]
pub type Pool = deadpool::managed::Pool<$Manager, $Wrapper>;
#[doc=concat!("Type alias for using [`deadpool::managed::Pool`] with [`", $crate_name, "`].")]
pub type WeakPool = deadpool::managed::WeakPool<$Manager, $Wrapper>;
#[doc=concat!("Type alias for using [`deadpool::managed::PoolBuilder`] with [`", $crate_name, "`].")]
pub type PoolBuilder = deadpool::managed::PoolBuilder<$Manager, $Wrapper>;
#[doc=concat!("Type alias for using [`deadpool::managed::BuildError`] with [`", $crate_name, "`].")]
pub type BuildError = deadpool::managed::BuildError;
#[doc=concat!("Type alias for using [`deadpool::managed::CreatePoolError`] with [`", $crate_name, "`].")]
pub type CreatePoolError = deadpool::managed::CreatePoolError<$ConfigError>;
#[doc=concat!("Type alias for using [`deadpool::managed::PoolError`] with [`", $crate_name, "`].")]
pub type PoolError = deadpool::managed::PoolError<$Error>;
#[doc=concat!("Type alias for using [`deadpool::managed::Object`] with [`", $crate_name, "`].")]
pub type Object = deadpool::managed::Object<$Manager>;
#[doc=concat!("Type alias for using [`deadpool::managed::Hook`] with [`", $crate_name, "`].")]
pub type Hook = deadpool::managed::Hook<$Manager>;
#[doc=concat!("Type alias for using [`deadpool::managed::HookError`] with [`", $crate_name, "`].")]
pub type HookError = deadpool::managed::HookError<$Error>;
};
}

40
vendor/deadpool/src/unmanaged/config.rs vendored Normal file
View File

@@ -0,0 +1,40 @@
use std::time::Duration;
use crate::Runtime;
/// Pool configuration.
#[derive(Clone, Copy, Debug)]
#[cfg_attr(feature = "serde", derive(serde::Deserialize, serde::Serialize))]
pub struct PoolConfig {
/// Maximum size of the pool.
pub max_size: usize,
/// Timeout for [`Pool::get()`] operation.
///
/// [`Pool::get()`]: super::Pool::get
pub timeout: Option<Duration>,
/// [`Runtime`] to be used.
#[cfg_attr(feature = "serde", serde(skip))]
pub runtime: Option<Runtime>,
}
impl PoolConfig {
/// Create a new [`PoolConfig`] without any timeouts.
#[must_use]
pub fn new(max_size: usize) -> Self {
Self {
max_size,
timeout: None,
runtime: None,
}
}
}
impl Default for PoolConfig {
/// Create a [`PoolConfig`] where [`PoolConfig::max_size`] is set to
/// `cpu_core_count * 2` including logical cores (Hyper-Threading).
fn default() -> Self {
Self::new(crate::util::get_default_pool_max_size())
}
}

31
vendor/deadpool/src/unmanaged/errors.rs vendored Normal file
View File

@@ -0,0 +1,31 @@
use std::fmt;
/// Possible errors of [`Pool::get()`] operation.
///
/// [`Pool::get()`]: super::Pool::get
#[derive(Clone, Copy, Debug)]
pub enum PoolError {
/// Operation timeout happened.
Timeout,
/// Pool has been closed.
Closed,
/// No runtime specified.
NoRuntimeSpecified,
}
impl fmt::Display for PoolError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::Timeout => write!(
f,
"Timeout occurred while waiting for an object to become available",
),
Self::Closed => write!(f, "Pool has been closed"),
Self::NoRuntimeSpecified => write!(f, "No runtime specified"),
}
}
}
impl std::error::Error for PoolError {}

424
vendor/deadpool/src/unmanaged/mod.rs vendored Normal file
View File

@@ -0,0 +1,424 @@
//! Unmanaged version of the pool.
//!
//! "Unmanaged" means that no manager is used to create and recycle objects.
//! Objects either need to be created upfront or by adding them using the
//! [`Pool::add()`] or [`Pool::try_add()`] methods.
//!
//! # Example
//!
//! ```rust
//! use deadpool::unmanaged::Pool;
//!
//! struct Computer {}
//!
//! impl Computer {
//! async fn get_answer(&self) -> i32 {
//! 42
//! }
//! }
//!
//! #[tokio::main]
//! async fn main() {
//! let pool = Pool::from(vec![
//! Computer {},
//! Computer {},
//! ]);
//! let s = pool.get().await.unwrap();
//! assert_eq!(s.get_answer().await, 42);
//! }
//! ```
mod config;
mod errors;
use std::{
convert::TryInto,
ops::{Deref, DerefMut},
sync::{
atomic::{AtomicIsize, AtomicUsize, Ordering},
Arc, Mutex, Weak,
},
time::Duration,
};
use tokio::sync::{Semaphore, TryAcquireError};
pub use crate::Status;
pub use self::{config::PoolConfig, errors::PoolError};
/// Wrapper around the actual pooled object which implements [`Deref`],
/// [`DerefMut`] and [`Drop`] traits.
///
/// Use this object just as if it was of type `T` and upon leaving a scope the
/// [`Drop::drop()`] will take care of returning it to the pool.
#[derive(Debug)]
#[must_use]
pub struct Object<T> {
/// Actual pooled object.
obj: Option<T>,
/// Pool to return the pooled object to.
pool: Weak<PoolInner<T>>,
}
impl<T> Object<T> {
/// Takes this object from the pool permanently. This reduces the size of
/// the pool. If needed, the object can later be added back to the pool
/// using the [`Pool::add()`] or [`Pool::try_add()`] methods.
#[must_use]
pub fn take(mut this: Self) -> T {
if let Some(pool) = this.pool.upgrade() {
let _ = pool.size.fetch_sub(1, Ordering::Relaxed);
pool.size_semaphore.add_permits(1);
}
this.obj.take().unwrap()
}
}
impl<T> Drop for Object<T> {
fn drop(&mut self) {
if let Some(obj) = self.obj.take() {
if let Some(pool) = self.pool.upgrade() {
{
let mut queue = pool.queue.lock().unwrap();
queue.push(obj);
}
let _ = pool.available.fetch_add(1, Ordering::Relaxed);
pool.semaphore.add_permits(1);
pool.clean_up();
}
}
}
}
impl<T> Deref for Object<T> {
type Target = T;
fn deref(&self) -> &T {
self.obj.as_ref().unwrap()
}
}
impl<T> DerefMut for Object<T> {
fn deref_mut(&mut self) -> &mut T {
self.obj.as_mut().unwrap()
}
}
impl<T> AsRef<T> for Object<T> {
fn as_ref(&self) -> &T {
self
}
}
impl<T> AsMut<T> for Object<T> {
fn as_mut(&mut self) -> &mut T {
self
}
}
/// Generic object and connection pool. This is the static version of the pool
/// which doesn't include.
///
/// This struct can be cloned and transferred across thread boundaries and uses
/// reference counting for its internal state.
///
/// A pool of existing objects can be created from an existing collection of
/// objects if it has a known exact size:
/// ```rust
/// use deadpool::unmanaged::Pool;
/// let pool = Pool::from(vec![1, 2, 3]);
/// ```
#[derive(Debug)]
pub struct Pool<T> {
inner: Arc<PoolInner<T>>,
}
impl<T> Clone for Pool<T> {
fn clone(&self) -> Self {
Self {
inner: self.inner.clone(),
}
}
}
impl<T> Default for Pool<T> {
fn default() -> Self {
Self::from_config(&PoolConfig::default())
}
}
impl<T> Pool<T> {
/// Creates a new empty [`Pool`] with the given `max_size`.
#[must_use]
pub fn new(max_size: usize) -> Self {
Self::from_config(&PoolConfig::new(max_size))
}
/// Create a new empty [`Pool`] using the given [`PoolConfig`].
#[must_use]
pub fn from_config(config: &PoolConfig) -> Self {
Self {
inner: Arc::new(PoolInner {
config: *config,
queue: Mutex::new(Vec::with_capacity(config.max_size)),
size: AtomicUsize::new(0),
size_semaphore: Semaphore::new(config.max_size),
available: AtomicIsize::new(0),
semaphore: Semaphore::new(0),
}),
}
}
/// Retrieves an [`Object`] from this [`Pool`] or waits for the one to
/// become available.
///
/// # Errors
///
/// See [`PoolError`] for details.
pub async fn get(&self) -> Result<Object<T>, PoolError> {
self.timeout_get(self.inner.config.timeout).await
}
/// Retrieves an [`Object`] from this [`Pool`] and doesn't wait if there is
/// currently no [`Object`] is available and the maximum [`Pool`] size has
/// been reached.
///
/// # Errors
///
/// See [`PoolError`] for details.
pub fn try_get(&self) -> Result<Object<T>, PoolError> {
let inner = self.inner.as_ref();
let permit = inner.semaphore.try_acquire().map_err(|e| match e {
TryAcquireError::NoPermits => PoolError::Timeout,
TryAcquireError::Closed => PoolError::Closed,
})?;
let obj = {
let mut queue = inner.queue.lock().unwrap();
queue.pop().unwrap()
};
permit.forget();
let _ = inner.available.fetch_sub(1, Ordering::Relaxed);
Ok(Object {
pool: Arc::downgrade(&self.inner),
obj: Some(obj),
})
}
/// Retrieves an [`Object`] from this [`Pool`] using a different `timeout`
/// than the configured one.
///
/// # Errors
///
/// See [`PoolError`] for details.
pub async fn timeout_get(&self, timeout: Option<Duration>) -> Result<Object<T>, PoolError> {
let inner = self.inner.as_ref();
let permit = match (timeout, inner.config.runtime) {
(None, _) => inner
.semaphore
.acquire()
.await
.map_err(|_| PoolError::Closed),
(Some(timeout), _) if timeout.as_nanos() == 0 => {
inner.semaphore.try_acquire().map_err(|e| match e {
TryAcquireError::NoPermits => PoolError::Timeout,
TryAcquireError::Closed => PoolError::Closed,
})
}
(Some(timeout), Some(runtime)) => runtime
.timeout(timeout, inner.semaphore.acquire())
.await
.ok_or(PoolError::Timeout)?
.map_err(|_| PoolError::Closed),
(Some(_), None) => Err(PoolError::NoRuntimeSpecified),
}?;
let obj = {
let mut queue = inner.queue.lock().unwrap();
queue.pop().unwrap()
};
permit.forget();
let _ = inner.available.fetch_sub(1, Ordering::Relaxed);
Ok(Object {
pool: Arc::downgrade(&self.inner),
obj: Some(obj),
})
}
/// Adds an `object` to this [`Pool`].
///
/// If the [`Pool`] size has already reached its maximum, then this function
/// blocks until the `object` can be added to the [`Pool`].
///
/// # Errors
///
/// If the [`Pool`] has been closed a tuple containing the `object` and
/// the [`PoolError`] is returned instead.
pub async fn add(&self, object: T) -> Result<(), (T, PoolError)> {
match self.inner.size_semaphore.acquire().await {
Ok(permit) => {
permit.forget();
self._add(object);
Ok(())
}
Err(_) => Err((object, PoolError::Closed)),
}
}
/// Tries to add an `object` to this [`Pool`].
///
/// # Errors
///
/// If the [`Pool`] size has already reached its maximum, or the [`Pool`]
/// has been closed, then a tuple containing the `object` and the
/// [`PoolError`] is returned instead.
pub fn try_add(&self, object: T) -> Result<(), (T, PoolError)> {
match self.inner.size_semaphore.try_acquire() {
Ok(permit) => {
permit.forget();
self._add(object);
Ok(())
}
Err(e) => Err(match e {
TryAcquireError::NoPermits => (object, PoolError::Timeout),
TryAcquireError::Closed => (object, PoolError::Closed),
}),
}
}
/// Internal function which adds an `object` to this [`Pool`].
///
/// Prior calling this it must be guaranteed that `size` doesn't exceed
/// `max_size`. In the methods `add` and `try_add` this is ensured by using
/// the `size_semaphore`.
fn _add(&self, object: T) {
let _ = self.inner.size.fetch_add(1, Ordering::Relaxed);
{
let mut queue = self.inner.queue.lock().unwrap();
queue.push(object);
}
let _ = self.inner.available.fetch_add(1, Ordering::Relaxed);
self.inner.semaphore.add_permits(1);
}
/// Removes an [`Object`] from this [`Pool`].
pub async fn remove(&self) -> Result<T, PoolError> {
self.get().await.map(Object::take)
}
/// Tries to remove an [`Object`] from this [`Pool`].
pub fn try_remove(&self) -> Result<T, PoolError> {
self.try_get().map(Object::take)
}
/// Removes an [`Object`] from this [`Pool`] using a different `timeout`
/// than the configured one.
pub async fn timeout_remove(&self, timeout: Option<Duration>) -> Result<T, PoolError> {
self.timeout_get(timeout).await.map(Object::take)
}
/// Closes this [`Pool`].
///
/// All current and future tasks waiting for [`Object`]s will return
/// [`PoolError::Closed`] immediately.
pub fn close(&self) {
self.inner.semaphore.close();
self.inner.size_semaphore.close();
self.inner.clear();
}
/// Indicates whether this [`Pool`] has been closed.
pub fn is_closed(&self) -> bool {
self.inner.is_closed()
}
/// Retrieves [`Status`] of this [`Pool`].
#[must_use]
pub fn status(&self) -> Status {
let max_size = self.inner.config.max_size;
let size = self.inner.size.load(Ordering::Relaxed);
let available = self.inner.available.load(Ordering::Relaxed);
Status {
max_size,
size,
available: if available > 0 { available as usize } else { 0 },
waiting: if available < 0 {
(-available) as usize
} else {
0
},
}
}
}
#[derive(Debug)]
struct PoolInner<T> {
config: PoolConfig,
queue: Mutex<Vec<T>>,
size: AtomicUsize,
/// This semaphore has as many permits as `max_size - size`. Every time
/// an [`Object`] is added to the [`Pool`] a permit is removed from the
/// semaphore and every time an [`Object`] is removed a permit is returned
/// back.
size_semaphore: Semaphore,
/// Number of available [`Object`]s in the [`Pool`]. If there are no
/// [`Object`]s in the [`Pool`] this number can become negative and store
/// the number of [`Future`]s waiting for an [`Object`].
///
/// [`Future`]: std::future::Future
available: AtomicIsize,
semaphore: Semaphore,
}
impl<T> PoolInner<T> {
/// Cleans up internals of this [`Pool`].
///
/// This method is called after closing the [`Pool`] and whenever an
/// [`Object`] is returned to the [`Pool`] and makes sure closed [`Pool`]s
/// don't contain any [`Object`]s.
fn clean_up(&self) {
if self.is_closed() {
self.clear();
}
}
/// Removes all the [`Object`]s which are currently part of this [`Pool`].
fn clear(&self) {
let mut queue = self.queue.lock().unwrap();
let _ = self.size.fetch_sub(queue.len(), Ordering::Relaxed);
let _ = self
.available
.fetch_sub(queue.len() as isize, Ordering::Relaxed);
queue.clear();
}
/// Indicates whether this [`Pool`] has been closed.
fn is_closed(&self) -> bool {
matches!(
self.semaphore.try_acquire_many(0),
Err(TryAcquireError::Closed)
)
}
}
impl<T, I> From<I> for Pool<T>
where
I: IntoIterator<Item = T>,
<I as IntoIterator>::IntoIter: ExactSizeIterator,
{
/// Creates a new [`Pool`] from the given [`ExactSizeIterator`] of
/// [`Object`]s.
fn from(iter: I) -> Self {
let queue = iter.into_iter().collect::<Vec<_>>();
let len = queue.len();
Self {
inner: Arc::new(PoolInner {
queue: Mutex::new(queue),
config: PoolConfig::new(len),
size: AtomicUsize::new(len),
size_semaphore: Semaphore::new(0),
available: AtomicIsize::new(len.try_into().unwrap()),
semaphore: Semaphore::new(len),
}),
}
}
}

12
vendor/deadpool/src/util.rs vendored Normal file
View File

@@ -0,0 +1,12 @@
lazy_static::lazy_static! {
/// Cache the physical CPU count to avoid calling `num_cpus::get()`
/// multiple times, which is expensive when creating pools in quick
/// succession.
static ref CPU_COUNT: usize = num_cpus::get();
}
/// Get the default maximum size of a pool, which is `cpu_core_count * 2`
/// including logical cores (Hyper-Threading).
pub(crate) fn get_default_pool_max_size() -> usize {
*CPU_COUNT * 2
}

228
vendor/deadpool/tests/managed.rs vendored Normal file
View File

@@ -0,0 +1,228 @@
#![cfg(feature = "managed")]
use std::{convert::Infallible, time::Duration};
use tokio::time;
use deadpool::managed::{self, Metrics, Object, PoolError, RecycleResult, Timeouts};
type Pool = managed::Pool<Manager>;
struct Manager {}
impl managed::Manager for Manager {
type Type = usize;
type Error = Infallible;
async fn create(&self) -> Result<usize, Infallible> {
Ok(0)
}
async fn recycle(&self, _conn: &mut usize, _: &Metrics) -> RecycleResult<Infallible> {
Ok(())
}
}
#[tokio::test]
async fn basic() {
let mgr = Manager {};
let pool = Pool::builder(mgr).max_size(16).build().unwrap();
let status = pool.status();
assert_eq!(status.size, 0);
assert_eq!(status.available, 0);
assert_eq!(status.waiting, 0);
let obj0 = pool.get().await.unwrap();
let status = pool.status();
assert_eq!(status.size, 1);
assert_eq!(status.available, 0);
assert_eq!(status.waiting, 0);
let obj1 = pool.get().await.unwrap();
let status = pool.status();
assert_eq!(status.size, 2);
assert_eq!(status.available, 0);
assert_eq!(status.waiting, 0);
let obj2 = pool.get().await.unwrap();
let status = pool.status();
assert_eq!(status.size, 3);
assert_eq!(status.available, 0);
assert_eq!(status.waiting, 0);
drop(obj0);
let status = pool.status();
assert_eq!(status.size, 3);
assert_eq!(status.available, 1);
assert_eq!(status.waiting, 0);
drop(obj1);
let status = pool.status();
assert_eq!(status.size, 3);
assert_eq!(status.available, 2);
assert_eq!(status.waiting, 0);
drop(obj2);
let status = pool.status();
assert_eq!(status.size, 3);
assert_eq!(status.available, 3);
assert_eq!(status.waiting, 0);
}
#[tokio::test]
async fn closing() {
let mgr = Manager {};
let pool = Pool::builder(mgr).max_size(1).build().unwrap();
// fetch the only object from the pool
let obj = pool.get().await;
let join_handle = {
let pool = pool.clone();
tokio::spawn(async move { pool.get().await })
};
tokio::task::yield_now().await;
assert_eq!(pool.status().available, 0);
assert_eq!(pool.status().waiting, 1);
pool.close();
tokio::task::yield_now().await;
assert_eq!(pool.status().available, 0);
assert_eq!(pool.status().waiting, 0);
assert!(matches!(join_handle.await.unwrap(), Err(PoolError::Closed)));
assert!(matches!(pool.get().await, Err(PoolError::Closed)));
assert!(matches!(
pool.timeout_get(&Timeouts {
wait: Some(Duration::ZERO),
..pool.timeouts()
})
.await,
Err(PoolError::Closed)
));
drop(obj);
tokio::task::yield_now().await;
assert_eq!(pool.status().available, 0);
assert_eq!(pool.status().waiting, 0);
}
#[tokio::test(flavor = "multi_thread")]
async fn concurrent() {
let mgr = Manager {};
let pool = Pool::builder(mgr).max_size(3).build().unwrap();
// Spawn tasks
let futures = (0..100)
.map(|_| {
let pool = pool.clone();
tokio::spawn(async move {
let mut obj = pool.get().await.unwrap();
*obj += 1;
time::sleep(Duration::from_millis(1)).await;
})
})
.collect::<Vec<_>>();
// Await tasks to finish
for future in futures {
future.await.unwrap();
}
// Verify
let status = pool.status();
assert_eq!(status.size, 3);
assert_eq!(status.available, 3);
assert_eq!(status.waiting, 0);
let values = [
pool.get().await.unwrap(),
pool.get().await.unwrap(),
pool.get().await.unwrap(),
];
assert_eq!(values.iter().map(|obj| **obj).sum::<usize>(), 100);
}
#[tokio::test(flavor = "multi_thread")]
async fn object_take() {
let mgr = Manager {};
let pool = Pool::builder(mgr).max_size(2).build().unwrap();
let obj0 = pool.get().await.unwrap();
let obj1 = pool.get().await.unwrap();
let status = pool.status();
assert_eq!(status.size, 2);
assert_eq!(status.available, 0);
assert_eq!(status.waiting, 0);
let _ = Object::take(obj0);
let status = pool.status();
assert_eq!(status.size, 1);
assert_eq!(status.available, 0);
assert_eq!(status.waiting, 0);
let _ = Object::take(obj1);
let status = pool.status();
assert_eq!(status.size, 0);
assert_eq!(status.available, 0);
let obj0 = pool.get().await.unwrap();
let obj1 = pool.get().await.unwrap();
let status = pool.status();
assert_eq!(status.size, 2);
assert_eq!(status.available, 0);
assert_eq!(status.waiting, 0);
drop(obj0);
drop(obj1);
let status = pool.status();
assert_eq!(status.size, 2);
assert_eq!(status.available, 2);
assert_eq!(status.waiting, 0);
}
#[tokio::test]
async fn retain() {
let mgr = Manager {};
let pool = Pool::builder(mgr).max_size(4).build().unwrap();
{
let _a = pool.get().await;
let _b = pool.get().await;
tokio::time::sleep(Duration::from_millis(5)).await;
let _c = pool.get().await;
tokio::time::sleep(Duration::from_millis(5)).await;
}
assert_eq!(pool.status().size, 3);
let retain_result = pool.retain(|_, metrics| metrics.age() <= Duration::from_millis(10));
assert_eq!(retain_result.retained, 1);
assert_eq!(retain_result.removed.len(), 2);
assert_eq!(pool.status().size, 1);
tokio::time::sleep(Duration::from_millis(5)).await;
let retain_result = pool.retain(|_, metrics| metrics.age() <= Duration::from_millis(10));
assert_eq!(retain_result.retained, 0);
assert_eq!(retain_result.removed.len(), 1);
assert_eq!(pool.status().size, 0);
}
#[tokio::test]
async fn retain_fnmut() {
let mgr = Manager {};
let pool = Pool::builder(mgr).max_size(4).build().unwrap();
{
let _a = pool.get().await;
let _b = pool.get().await;
let _c = pool.get().await;
let _c = pool.get().await;
}
let mut removed = 0;
{
let retain_result = pool.retain(|_, _| {
removed += 1;
false
});
assert_eq!(retain_result.retained, 0);
assert_eq!(retain_result.removed.len(), 4);
}
assert_eq!(pool.status().size, 0);
}

View File

@@ -0,0 +1,164 @@
use std::time::Duration;
use deadpool::managed::{Hook, HookError, Manager, Metrics, Pool, RecycleResult};
use itertools::Itertools;
use tokio::time::{sleep, timeout};
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
enum Gate {
Ok,
Err,
Slow,
Never,
}
impl Gate {
async fn open(&self) -> Result<(), ()> {
match self {
Self::Ok => Ok(()),
Self::Err => Err(()),
Self::Never => {
sleep(Duration::MAX).await;
unreachable!();
}
Self::Slow => {
sleep(Duration::from_nanos(2)).await;
Ok(())
}
}
}
}
#[derive(Copy, Clone, Debug)]
struct Gates {
create: Gate,
recycle: Gate,
post_create: Gate,
pre_recycle: Gate,
post_recycle: Gate,
}
fn configs() -> impl Iterator<Item = Gates> {
(0..5)
.map(|_| &[Gate::Ok, Gate::Err, Gate::Slow, Gate::Never])
.multi_cartesian_product()
.map(move |gates| Gates {
create: *gates[0],
recycle: *gates[1],
post_create: *gates[2],
pre_recycle: *gates[3],
post_recycle: *gates[4],
})
}
fn pools(max_size: usize) -> impl Iterator<Item = Pool<GatedManager>> {
configs().map(move |gates| {
let manager = GatedManager { gates };
Pool::builder(manager)
.max_size(max_size)
.post_create(Hook::async_fn(move |_, _| {
Box::pin(async move {
gates
.post_create
.open()
.await
.map_err(|_| HookError::message("Fail"))?;
Ok(())
})
}))
.pre_recycle(Hook::async_fn(move |_, _| {
Box::pin(async move {
gates
.pre_recycle
.open()
.await
.map_err(|_| HookError::message("pre_recycle gate set to error"))?;
Ok(())
})
}))
.post_recycle(Hook::async_fn(move |_, _| {
Box::pin(async move {
gates
.post_recycle
.open()
.await
.map_err(|_| HookError::message("post_recycle gate set to error"))?;
Ok(())
})
}))
.build()
.unwrap()
})
}
struct GatedManager {
gates: Gates,
}
impl Manager for GatedManager {
type Type = ();
type Error = ();
async fn create(&self) -> Result<Self::Type, Self::Error> {
self.gates.create.open().await?;
Ok(())
}
async fn recycle(&self, _conn: &mut Self::Type, _: &Metrics) -> RecycleResult<Self::Error> {
self.gates.recycle.open().await?;
Ok(())
}
}
// This tests various combinations of configurations with
// succeeding, failing, slow and hanging managers and hooks.
// It currently tests 4^5 (=1024) possible combinations and
// therefore takes some time to complete. It is probably not
// neccesary to test all combinations, but doing so doesn't
// hurt either and it is a good stress test of the pool.
#[tokio::test(flavor = "multi_thread", worker_threads = 16)]
async fn test_cancellations() {
for pool in pools(2) {
let handles = (0..8)
.map(|i| {
let pool = pool.clone();
tokio::spawn(async move {
loop {
let _obj = timeout(Duration::from_nanos(i), pool.get()).await;
sleep(Duration::from_nanos(i)).await;
}
})
})
.collect::<Vec<_>>();
for _ in 0..10 {
tokio::time::sleep(Duration::from_millis(1)).await;
let status = pool.status();
assert!(
status.size <= status.max_size,
"size({}) > max_size({}), gates: {:?}",
status.size,
status.max_size,
pool.manager().gates
);
}
for handle in &handles {
handle.abort();
}
for handle in handles {
let _ = handle.await;
}
let status = pool.status();
assert!(
status.size <= status.max_size,
"size({}) > max_size({}), gates: {:?}",
status.size,
status.max_size,
pool.manager().gates
);
assert!(
status.available <= status.max_size,
"available({}) > max_size({}), gates: {:?}",
status.available,
status.max_size,
pool.manager().gates
);
}
}

64
vendor/deadpool/tests/managed_config.rs vendored Normal file
View File

@@ -0,0 +1,64 @@
#![cfg(all(feature = "managed", feature = "serde"))]
use std::{collections::HashMap, env, time::Duration};
use config::Config;
use serde::{Deserialize, Serialize};
use deadpool::managed::PoolConfig;
struct Env {
backup: HashMap<String, Option<String>>,
}
impl Env {
pub fn new() -> Self {
Self {
backup: HashMap::new(),
}
}
pub fn set(&mut self, name: &str, value: &str) {
self.backup.insert(name.to_string(), env::var(name).ok());
env::set_var(name, value);
}
}
impl Drop for Env {
fn drop(&mut self) {
for (name, value) in self.backup.iter() {
match value {
Some(value) => env::set_var(name.as_str(), value),
None => env::remove_var(name.as_str()),
}
}
}
}
#[derive(Debug, Serialize, Deserialize)]
struct TestConfig {
pool: PoolConfig,
}
#[test]
fn from_env() {
let mut env = Env::new();
env.set("POOL__MAX_SIZE", "42");
env.set("POOL__TIMEOUTS__WAIT__SECS", "1");
env.set("POOL__TIMEOUTS__WAIT__NANOS", "0");
env.set("POOL__TIMEOUTS__CREATE__SECS", "2");
env.set("POOL__TIMEOUTS__CREATE__NANOS", "0");
env.set("POOL__TIMEOUTS__RECYCLE__SECS", "3");
env.set("POOL__TIMEOUTS__RECYCLE__NANOS", "0");
let cfg = Config::builder()
.add_source(config::Environment::default().separator("__"))
.build()
.unwrap()
.try_deserialize::<TestConfig>()
.unwrap();
assert_eq!(cfg.pool.max_size, 42);
assert_eq!(cfg.pool.timeouts.wait, Some(Duration::from_secs(1)));
assert_eq!(cfg.pool.timeouts.create, Some(Duration::from_secs(2)));
assert_eq!(cfg.pool.timeouts.recycle, Some(Duration::from_secs(3)));
}

View File

@@ -0,0 +1,120 @@
#![cfg(feature = "managed")]
use std::{sync::Arc, time::Duration};
use tokio::{
sync::{mpsc, Mutex},
task, time,
};
use deadpool::managed::{self, Metrics, RecycleError, RecycleResult};
type Pool = managed::Pool<Manager>;
#[derive(Clone)]
struct Manager {
create_rx: Arc<Mutex<mpsc::Receiver<Result<(), ()>>>>,
recycle_rx: Arc<Mutex<mpsc::Receiver<Result<(), ()>>>>,
remote_control: RemoteControl,
}
#[derive(Clone)]
struct RemoteControl {
create_tx: mpsc::Sender<Result<(), ()>>,
_recycle_tx: mpsc::Sender<Result<(), ()>>,
}
impl RemoteControl {
pub fn create_ok(&mut self) {
self.create_tx.try_send(Ok(())).unwrap();
}
pub fn create_err(&mut self) {
self.create_tx.try_send(Err(())).unwrap();
}
/*
pub fn recycle_ok(&mut self) {
self.recycle_tx.try_send(Ok(())).unwrap();
}
pub fn recycle_err(&mut self) {
self.recycle_tx.try_send(Err(())).unwrap();
}
*/
}
impl Manager {
pub fn new() -> Self {
let (create_tx, create_rx) = mpsc::channel(16);
let (recycle_tx, recycle_rx) = mpsc::channel(16);
Self {
create_rx: Arc::new(Mutex::new(create_rx)),
recycle_rx: Arc::new(Mutex::new(recycle_rx)),
remote_control: RemoteControl {
create_tx,
_recycle_tx: recycle_tx,
},
}
}
}
impl managed::Manager for Manager {
type Type = ();
type Error = ();
async fn create(&self) -> Result<(), ()> {
self.create_rx.lock().await.recv().await.unwrap()
}
async fn recycle(&self, _conn: &mut (), _: &Metrics) -> RecycleResult<()> {
match self.recycle_rx.lock().await.recv().await.unwrap() {
Ok(()) => Ok(()),
Err(e) => Err(RecycleError::Backend(e)),
}
}
}
// When the pool is drained, all connections fail to create.
#[tokio::test(flavor = "current_thread")]
async fn pool_drained() {
let manager = Manager::new();
let mut rc = manager.remote_control.clone();
let pool = Pool::builder(manager).max_size(1).build().unwrap();
let pool_clone = pool.clone();
// let first task grab the only connection
let get_1 = tokio::spawn(async move { pool_clone.get().await });
task::yield_now().await;
assert_eq!(pool.status().size, 0);
assert_eq!(pool.status().available, 0);
assert_eq!(pool.status().waiting, 1);
// let second task wait for the connection
let pool_clone = pool.clone();
let get_2 = tokio::spawn(async move { pool_clone.get().await });
task::yield_now().await;
assert_eq!(pool.status().size, 0);
assert_eq!(pool.status().available, 0);
assert_eq!(pool.status().waiting, 2);
// first task receives an error
rc.create_err();
assert!(get_1.await.unwrap().is_err());
assert_eq!(pool.status().size, 0);
assert_eq!(pool.status().available, 0);
assert_eq!(pool.status().waiting, 1);
// the second task should now be able to create an object
rc.create_ok();
let get_2_result = time::timeout(Duration::from_millis(10), get_2).await;
assert!(get_2_result.is_ok(), "get_2 should not time out");
assert_eq!(pool.status().size, 1);
assert_eq!(pool.status().available, 0);
assert_eq!(pool.status().waiting, 0);
assert!(
get_2_result.unwrap().unwrap().is_ok(),
"get_2 should receive an object"
);
assert_eq!(pool.status().size, 1);
assert_eq!(pool.status().available, 1);
assert_eq!(pool.status().waiting, 0);
}

184
vendor/deadpool/tests/managed_hooks.rs vendored Normal file
View File

@@ -0,0 +1,184 @@
#![cfg(feature = "managed")]
use std::sync::atomic::{AtomicUsize, Ordering};
use deadpool::managed::{Hook, HookError, Manager, Metrics, Pool, RecycleResult};
struct Computer {
next_id: AtomicUsize,
}
impl Computer {
pub fn new(start: usize) -> Self {
Self {
next_id: AtomicUsize::new(start),
}
}
}
impl Manager for Computer {
type Type = usize;
type Error = ();
async fn create(&self) -> Result<Self::Type, Self::Error> {
Ok(self.next_id.fetch_add(1, Ordering::Relaxed))
}
async fn recycle(&self, _: &mut Self::Type, _: &Metrics) -> RecycleResult<Self::Error> {
Ok(())
}
}
#[tokio::test]
async fn post_create_ok() {
let manager = Computer::new(42);
let pool = Pool::<Computer>::builder(manager)
.max_size(1)
.post_create(Hook::sync_fn(|obj, _| {
*obj += 1;
Ok(())
}))
.build()
.unwrap();
assert!(*pool.get().await.unwrap() == 43);
}
#[tokio::test]
async fn post_create_ok_async() {
let manager = Computer::new(42);
let pool = Pool::<Computer>::builder(manager)
.max_size(1)
.post_create(Hook::async_fn(|obj, _| {
Box::pin(async move {
*obj += 1;
Ok(())
})
}))
.build()
.unwrap();
assert!(*pool.get().await.unwrap() == 43);
}
#[tokio::test]
async fn post_create_err_abort() {
let manager = Computer::new(0);
let pool = Pool::<Computer>::builder(manager)
.max_size(3)
.post_create(Hook::sync_fn(|obj, _| {
(*obj % 2 == 0)
.then_some(())
.ok_or(HookError::message("odd creation"))
}))
.build()
.unwrap();
let obj1 = pool.get().await.unwrap();
assert_eq!(*obj1, 0);
assert!(pool.get().await.is_err());
let obj2 = pool.get().await.unwrap();
assert_eq!(*obj2, 2);
assert!(pool.get().await.is_err());
let obj2 = pool.get().await.unwrap();
assert_eq!(*obj2, 4);
}
#[tokio::test]
async fn pre_recycle_ok() {
let manager = Computer::new(42);
let pool = Pool::<Computer>::builder(manager)
.max_size(1)
.pre_recycle(Hook::sync_fn(|obj, _| {
*obj += 1;
Ok(())
}))
.build()
.unwrap();
assert!(*pool.get().await.unwrap() == 42);
assert!(*pool.get().await.unwrap() == 43);
assert!(*pool.get().await.unwrap() == 44);
assert!(*pool.get().await.unwrap() == 45);
}
#[tokio::test]
async fn pre_recycle_err_continue() {
let manager = Computer::new(0);
let pool = Pool::<Computer>::builder(manager)
.max_size(1)
.pre_recycle(Hook::sync_fn(|_, metrics| {
if metrics.recycle_count > 0 {
Err(HookError::message("Fail!"))
} else {
Ok(())
}
}))
.build()
.unwrap();
assert_eq!(*pool.get().await.unwrap(), 0);
assert_eq!(pool.status().available, 1);
assert_eq!(pool.status().size, 1);
assert_eq!(*pool.get().await.unwrap(), 0);
assert_eq!(pool.status().available, 1);
assert_eq!(pool.status().size, 1);
assert_eq!(*pool.get().await.unwrap(), 1);
assert_eq!(pool.status().available, 1);
assert_eq!(pool.status().size, 1);
assert_eq!(*pool.get().await.unwrap(), 1);
assert_eq!(pool.status().available, 1);
assert_eq!(pool.status().size, 1);
assert_eq!(*pool.get().await.unwrap(), 2);
assert_eq!(pool.status().available, 1);
assert_eq!(pool.status().size, 1);
assert_eq!(*pool.get().await.unwrap(), 2);
assert_eq!(pool.status().available, 1);
assert_eq!(pool.status().size, 1);
}
#[tokio::test]
async fn post_recycle_ok() {
let manager = Computer::new(42);
let pool = Pool::<Computer>::builder(manager)
.max_size(1)
.post_recycle(Hook::sync_fn(|obj, _| {
*obj += 1;
Ok(())
}))
.build()
.unwrap();
assert!(*pool.get().await.unwrap() == 42);
assert!(*pool.get().await.unwrap() == 43);
assert!(*pool.get().await.unwrap() == 44);
assert!(*pool.get().await.unwrap() == 45);
}
#[tokio::test]
async fn post_recycle_err_continue() {
let manager = Computer::new(0);
let pool = Pool::<Computer>::builder(manager)
.max_size(1)
.post_recycle(Hook::sync_fn(|_, metrics| {
if metrics.recycle_count > 0 {
Err(HookError::message("Fail!"))
} else {
Ok(())
}
}))
.build()
.unwrap();
assert_eq!(*pool.get().await.unwrap(), 0);
assert_eq!(pool.status().available, 1);
assert_eq!(pool.status().size, 1);
assert_eq!(*pool.get().await.unwrap(), 0);
assert_eq!(pool.status().available, 1);
assert_eq!(pool.status().size, 1);
assert_eq!(*pool.get().await.unwrap(), 1);
assert_eq!(pool.status().available, 1);
assert_eq!(pool.status().size, 1);
assert_eq!(*pool.get().await.unwrap(), 1);
assert_eq!(pool.status().available, 1);
assert_eq!(pool.status().size, 1);
assert_eq!(*pool.get().await.unwrap(), 2);
assert_eq!(pool.status().available, 1);
assert_eq!(pool.status().size, 1);
assert_eq!(*pool.get().await.unwrap(), 2);
assert_eq!(pool.status().available, 1);
assert_eq!(pool.status().size, 1);
}

148
vendor/deadpool/tests/managed_resize.rs vendored Normal file
View File

@@ -0,0 +1,148 @@
#![cfg(feature = "managed")]
use std::convert::Infallible;
use deadpool::managed::{self, Metrics, Object, RecycleResult};
type Pool = managed::Pool<Manager, Object<Manager>>;
struct Manager {}
impl managed::Manager for Manager {
type Type = ();
type Error = Infallible;
async fn create(&self) -> Result<(), Infallible> {
Ok(())
}
async fn recycle(&self, _conn: &mut (), _: &Metrics) -> RecycleResult<Infallible> {
Ok(())
}
}
// Regression test for https://github.com/bikeshedder/deadpool/issues/380
#[tokio::test]
async fn test_grow_reuse_existing() {
// Shrink doesn't discard objects currently borrowed from the pool but
// keeps track of them so that repeatedly growing and shrinking will
// not cause excessive object creation. This logic used to contain a bug
// causing an overflow.
let mgr = Manager {};
let pool = Pool::builder(mgr).max_size(2).build().unwrap();
let obj1 = pool.get().await.unwrap();
let obj2 = pool.get().await.unwrap();
assert!(pool.status().size == 2);
assert!(pool.status().max_size == 2);
pool.resize(0);
// At this point the two objects are still tracked
assert!(pool.status().size == 2);
assert!(pool.status().max_size == 0);
pool.resize(1);
// Only one of the objects should be returned to the pool
assert!(pool.status().size == 2);
assert!(pool.status().max_size == 1);
drop(obj1);
// The first drop brings the size to 1.
assert!(pool.status().size == 1);
assert!(pool.status().max_size == 1);
drop(obj2);
assert!(pool.status().size == 1);
assert!(pool.status().max_size == 1);
}
#[tokio::test]
async fn resize_pool_shrink() {
let mgr = Manager {};
let pool = Pool::builder(mgr).max_size(2).build().unwrap();
let obj0 = pool.get().await.unwrap();
let obj1 = pool.get().await.unwrap();
pool.resize(1);
assert_eq!(pool.status().max_size, 1);
assert_eq!(pool.status().size, 2);
drop(obj1);
assert_eq!(pool.status().max_size, 1);
assert_eq!(pool.status().size, 1);
drop(obj0);
assert_eq!(pool.status().max_size, 1);
assert_eq!(pool.status().size, 1);
}
#[tokio::test]
async fn resize_pool_grow() {
let mgr = Manager {};
let pool = Pool::builder(mgr).max_size(1).build().unwrap();
let obj0 = pool.get().await.unwrap();
pool.resize(2);
assert_eq!(pool.status().max_size, 2);
assert_eq!(pool.status().size, 1);
let obj1 = pool.get().await.unwrap();
assert_eq!(pool.status().max_size, 2);
assert_eq!(pool.status().size, 2);
drop(obj1);
assert_eq!(pool.status().max_size, 2);
assert_eq!(pool.status().size, 2);
drop(obj0);
assert_eq!(pool.status().max_size, 2);
assert_eq!(pool.status().size, 2);
}
#[tokio::test]
async fn resize_pool_shrink_grow() {
let mgr = Manager {};
let pool = Pool::builder(mgr).max_size(1).build().unwrap();
let obj0 = pool.get().await.unwrap();
pool.resize(2);
pool.resize(0);
pool.resize(5);
assert_eq!(pool.status().max_size, 5);
assert_eq!(pool.status().size, 1);
drop(obj0);
assert_eq!(pool.status().max_size, 5);
assert_eq!(pool.status().size, 1);
}
#[tokio::test]
async fn resize_pool_grow_concurrent() {
let mgr = Manager {};
let pool = Pool::builder(mgr).max_size(0).build().unwrap();
let join_handle = {
let pool = pool.clone();
tokio::spawn(async move { pool.get().await })
};
tokio::task::yield_now().await;
assert_eq!(pool.status().max_size, 0);
assert_eq!(pool.status().size, 0);
assert_eq!(pool.status().available, 0);
assert_eq!(pool.status().waiting, 1);
pool.resize(1);
assert_eq!(pool.status().max_size, 1);
assert_eq!(pool.status().size, 0);
assert_eq!(pool.status().available, 0);
assert_eq!(pool.status().waiting, 1);
tokio::task::yield_now().await;
assert_eq!(pool.status().max_size, 1);
assert_eq!(pool.status().size, 1);
assert_eq!(pool.status().available, 0);
assert_eq!(pool.status().waiting, 0);
let obj0 = join_handle.await.unwrap().unwrap();
assert_eq!(pool.status().max_size, 1);
assert_eq!(pool.status().size, 1);
assert_eq!(pool.status().available, 0);
assert_eq!(pool.status().waiting, 0);
drop(obj0);
assert_eq!(pool.status().max_size, 1);
assert_eq!(pool.status().size, 1);
assert_eq!(pool.status().available, 1);
assert_eq!(pool.status().waiting, 0);
}
#[tokio::test]
async fn close_resize() {
let mgr = Manager {};
let pool = Pool::builder(mgr).max_size(1).build().unwrap();
pool.close();
pool.resize(16);
assert_eq!(pool.status().size, 0);
assert_eq!(pool.status().max_size, 0);
}

View File

@@ -0,0 +1,72 @@
#![cfg(all(
feature = "managed",
any(feature = "rt_tokio_1", feature = "rt_async-std_1")
))]
use std::{convert::Infallible, future::Future, pin::Pin, task, time::Duration};
use deadpool::{
managed::{self, Metrics, Object, PoolConfig, PoolError, RecycleResult, Timeouts},
Runtime,
};
type Pool = managed::Pool<Manager, Object<Manager>>;
struct Manager {}
struct Never;
impl Future for Never {
type Output = ();
fn poll(self: Pin<&mut Self>, _cx: &mut task::Context<'_>) -> task::Poll<Self::Output> {
task::Poll::Pending
}
}
impl managed::Manager for Manager {
type Type = usize;
type Error = Infallible;
async fn create(&self) -> Result<usize, Infallible> {
Never.await;
unreachable!();
}
async fn recycle(&self, _conn: &mut usize, _: &Metrics) -> RecycleResult<Infallible> {
Never.await;
unreachable!();
}
}
async fn test_managed_timeout(runtime: Runtime) {
let mgr = Manager {};
let cfg = PoolConfig {
max_size: 16,
timeouts: Timeouts {
create: Some(Duration::from_millis(0)),
wait: Some(Duration::from_millis(0)),
recycle: Some(Duration::from_millis(0)),
},
..Default::default()
};
let pool = Pool::builder(mgr)
.config(cfg)
.runtime(runtime)
.build()
.unwrap();
assert!(matches!(pool.get().await, Err(PoolError::Timeout(_))));
}
#[cfg(feature = "rt_tokio_1")]
#[tokio::test]
async fn rt_tokio_1() {
test_managed_timeout(Runtime::Tokio1).await;
}
#[cfg(feature = "rt_async-std_1")]
#[async_std::test]
async fn rt_async_std_1() {
test_managed_timeout(Runtime::AsyncStd1).await;
}

View File

@@ -0,0 +1,100 @@
#![cfg(feature = "managed")]
use std::{
sync::atomic::{AtomicUsize, Ordering},
time::Duration,
};
use tokio::time;
use deadpool::managed::{self, Metrics, RecycleError, RecycleResult};
type Pool = managed::Pool<Manager>;
struct Manager {
create_fail: bool,
recycle_fail: bool,
detached: AtomicUsize,
}
impl managed::Manager for Manager {
type Type = ();
type Error = ();
async fn create(&self) -> Result<(), ()> {
if self.create_fail {
Err(())
} else {
Ok(())
}
}
async fn recycle(&self, _conn: &mut (), _: &Metrics) -> RecycleResult<()> {
if self.recycle_fail {
Err(RecycleError::Backend(()))
} else {
Ok(())
}
}
fn detach(&self, _obj: &mut Self::Type) {
self.detached.fetch_add(1, Ordering::Relaxed);
}
}
#[tokio::test]
async fn create() {
let manager = Manager {
create_fail: true,
recycle_fail: false,
detached: AtomicUsize::new(0),
};
let pool = Pool::builder(manager).max_size(16).build().unwrap();
{
assert!(pool.get().await.is_err());
}
let status = pool.status();
assert_eq!(status.available, 0);
assert_eq!(status.size, 0);
{
assert!(time::timeout(Duration::from_millis(10), pool.get())
.await
.unwrap()
.is_err());
}
assert_eq!(status.available, 0);
assert_eq!(status.size, 0);
}
#[tokio::test]
async fn recycle() {
let manager = Manager {
create_fail: false,
recycle_fail: true,
detached: AtomicUsize::new(0),
};
let pool = Pool::builder(manager).max_size(16).build().unwrap();
{
let _a = pool.get().await.unwrap();
let _b = pool.get().await.unwrap();
}
let status = pool.status();
assert_eq!(status.available, 2);
assert_eq!(status.size, 2);
assert_eq!(pool.manager().detached.load(Ordering::Relaxed), 0);
{
let _a = pool.get().await.unwrap();
// All connections fail to recycle. Thus reducing the
// available counter to 0.
let status = pool.status();
assert_eq!(status.available, 0);
assert_eq!(status.size, 1);
assert_eq!(pool.manager().detached.load(Ordering::Relaxed), 2);
}
let status = pool.status();
assert_eq!(status.available, 1);
assert_eq!(status.size, 1);
}

179
vendor/deadpool/tests/unmanaged.rs vendored Normal file
View File

@@ -0,0 +1,179 @@
#![cfg(feature = "unmanaged")]
use std::time::Duration;
use tokio::{task, time};
use deadpool::unmanaged::{Pool, PoolError};
#[tokio::test]
async fn basic() {
let pool = Pool::from(vec![(), (), ()]);
let status = pool.status();
assert_eq!(status.size, 3);
assert_eq!(status.available, 3);
let _val0 = pool.get().await;
let status = pool.status();
assert_eq!(status.size, 3);
assert_eq!(status.available, 2);
let _val1 = pool.get().await;
let status = pool.status();
assert_eq!(status.size, 3);
assert_eq!(status.available, 1);
let _val2 = pool.get().await;
let status = pool.status();
assert_eq!(status.size, 3);
assert_eq!(status.available, 0);
}
#[tokio::test]
async fn closing() {
let pool = Pool::<i64>::new(1);
pool.try_add(42).unwrap();
let obj = pool.get().await.unwrap();
let join_handle = {
let pool = pool.clone();
tokio::spawn(async move { pool.get().await })
};
assert!(!pool.is_closed());
assert_eq!(pool.status().available, 0);
task::yield_now().await;
pool.close();
assert!(pool.is_closed());
task::yield_now().await;
assert_eq!(pool.status().available, 0);
assert!(matches!(join_handle.await.unwrap(), Err(PoolError::Closed)));
assert!(matches!(pool.get().await, Err(PoolError::Closed)));
assert!(matches!(pool.try_get(), Err(PoolError::Closed)));
drop(obj);
assert!(pool.is_closed());
assert!(matches!(pool.try_get(), Err(PoolError::Closed)));
assert_eq!(pool.status().available, 0);
}
#[tokio::test(flavor = "multi_thread")]
async fn concurrent() {
let pool = Pool::from(vec![0usize, 0, 0]);
// Spawn tasks
let futures = (0..100)
.map(|_| {
let pool = pool.clone();
tokio::spawn(async move {
*pool.get().await.unwrap() += 1;
})
})
.collect::<Vec<_>>();
// Await tasks to finish
for future in futures {
future.await.unwrap();
}
// Verify
let status = pool.status();
assert_eq!(status.size, 3);
assert_eq!(status.available, 3);
let values = [pool.get().await, pool.get().await, pool.get().await];
assert_eq!(
values
.iter()
.map(|obj| **obj.as_ref().unwrap())
.sum::<usize>(),
100,
);
}
#[tokio::test(flavor = "multi_thread")]
async fn test_unmanaged_add_remove() {
let pool = Pool::new(2);
pool.add(1).await.unwrap();
assert_eq!(pool.status().size, 1);
pool.add(2).await.unwrap();
assert_eq!(pool.status().size, 2);
assert!(
time::timeout(Duration::from_millis(10), pool.add(3))
.await
.is_err(),
"adding a third item should timeout"
);
pool.remove().await.unwrap();
assert_eq!(pool.status().size, 1);
assert!(
time::timeout(Duration::from_millis(10), pool.add(3))
.await
.is_ok(),
"adding a third item should not timeout"
);
pool.remove().await.unwrap();
assert_eq!(pool.status().size, 1);
pool.remove().await.unwrap();
assert_eq!(pool.status().size, 0);
}
#[tokio::test(flavor = "multi_thread")]
async fn try_add_try_remove() {
let pool = Pool::new(2);
pool.try_add(1).unwrap();
assert_eq!(pool.status().size, 1);
pool.try_add(2).unwrap();
assert_eq!(pool.status().size, 2);
assert!(pool.try_add(3).is_err());
pool.try_remove().unwrap();
assert_eq!(pool.status().size, 1);
assert!(pool.try_add(3).is_ok());
pool.try_remove().unwrap();
assert_eq!(pool.status().size, 1);
pool.try_remove().unwrap();
assert_eq!(pool.status().size, 0);
}
#[tokio::test(flavor = "multi_thread")]
async fn add_timeout() {
let pool = Pool::from(vec![1]);
let add = {
let pool = pool.clone();
tokio::spawn(async move {
pool.add(2).await.unwrap();
})
};
let mut iv = time::interval(Duration::from_millis(10));
iv.tick().await;
iv.tick().await;
pool.try_remove().unwrap();
assert!(
time::timeout(Duration::from_millis(10), add).await.is_ok(),
"add should not timeout"
);
assert_eq!(pool.status().size, 1);
assert_eq!(pool.try_remove().unwrap(), 2);
}

View File

@@ -0,0 +1,60 @@
#![cfg(feature = "unmanaged")]
use std::time::Duration;
use deadpool::{
unmanaged::{self, PoolConfig, PoolError},
Runtime,
};
type Pool = unmanaged::Pool<()>;
#[tokio::test]
async fn no_runtime() {
let pool = Pool::default();
assert!(matches!(
pool.timeout_get(Some(Duration::from_millis(1))).await,
Err(PoolError::NoRuntimeSpecified)
));
assert!(matches!(
pool.timeout_get(Some(Duration::from_millis(0))).await,
Err(PoolError::Timeout)
));
}
async fn _test_get(runtime: Runtime) {
let cfg = PoolConfig {
max_size: 16,
timeout: None,
runtime: Some(runtime),
};
let pool = Pool::from_config(&cfg);
assert!(matches!(
pool.timeout_get(Some(Duration::from_millis(1))).await,
Err(PoolError::Timeout),
));
}
async fn _test_config(runtime: Runtime) {
let cfg = PoolConfig {
max_size: 16,
timeout: Some(Duration::from_millis(1)),
runtime: Some(runtime),
};
let pool = Pool::from_config(&cfg);
assert!(matches!(pool.get().await, Err(PoolError::Timeout)));
}
#[cfg(feature = "rt_tokio_1")]
#[tokio::test]
async fn rt_tokio_1() {
_test_get(Runtime::Tokio1).await;
_test_config(Runtime::Tokio1).await;
}
#[cfg(feature = "rt_async-std_1")]
#[async_std::test]
async fn rt_async_std_1() {
_test_get(Runtime::AsyncStd1).await;
_test_config(Runtime::AsyncStd1).await;
}