chore: checkpoint before Python removal

This commit is contained in:
2026-03-26 22:33:59 +00:00
parent 683cec9307
commit e568ddf82a
29972 changed files with 11269302 additions and 2 deletions

1
vendor/flurry/.cargo-checksum.json vendored Normal file
View File

@@ -0,0 +1 @@
{"files":{".cargo_vcs_info.json":"6fc4c17ec3a548be2a3ea4d1b9bd3e14325692fd122c8e0cdf0bd793c9713efd",".github/DOCS.md":"6a3c2248bb234e7fde0ce6ee9d986c7c0dbd25f33807b1808a62490d47c799be",".github/codecov.yml":"f3fae76df57421bc1738129f96fe8408e94cf3a408e307d9cc4f2d331eb5c63f",".github/dependabot.yml":"0721729dc23500350d26acccee5ed86474da5b75c909533994922f6b6cd0d4cb",".github/workflows/check.yml":"f8d314b755df3004734e63eb1ddaf874c7699320f0ec162b4147ccaf42126fcf",".github/workflows/coverage.yml":"8032f490f7abe63d6a85b9b1dba6a1e37f0087ad28e05da52cb8908094e662ce",".github/workflows/safety.yml":"14e75232b2d1be716059aeddd135ebfd2c746eb66804661a7dd4d85a8929a4ac",".github/workflows/scheduled.yml":"5b8d25904e105c07bb79593abb86e6d28826275ea2f6b7fd1d8f1679444ffccb",".github/workflows/test.yml":"4afff142ae3ef6e9a5327af881b99bedc14ca73a4bd0a279ad33a8100ac4c7cd","CHANGELOG.md":"2682c3d7cb9f13b4ad2bc51a022ba84e4e72a5f696a0de3e8ec296b12fb6196e","Cargo.toml":"b1d7701ec8a5b9319c710f8bc5897adc5d7d9425ffa399b5e172c8f1bf0b188f","Cargo.toml.orig":"16e17da6732ebd02778a6c685a46c853de5a43165112338a993cf42dd384fffb","LICENSE-APACHE":"be0118a3cb526bf53f2c56124557db1e4be46e78b136c2a54769ec24040d354b","LICENSE-MIT":"718304891df596a55ef6ac88cec918aea35a69868c4ae877b484020cb2da775d","README.md":"78d6233594988f7bf3ed6dbe23b4c100ee6e9b90d94f863996d525e759bddcc8","benches/README.md":"a75194998432b2b5e79bf539105bec5397b50c3e5cf7773e2aeb69434c4eb8b6","benches/dashmap.LICENSE":"3ce3cce00b62cb4bb26ff4a3d7d4f5a73d1b928e835bf74a1844a59367327215","benches/flurry_dashmap.rs":"ad8497f79547d0742610b99de1718f89d2cc0078db34526afb361d6acb8dbe0d","benches/flurry_hashbrown.rs":"61934d68fe93ec32ebe7f32e4c5b4a024d1cb28bd3f6e3488a538bd656814932","benches/hashbrown.LICENSE":"1af5435435e07a525010f2ef8d72485783c36d2f86c299802e787c3868ba2757","codecov.yml":"b036b45d5da3d8d71a3e4adaac2e8c4b2d87ce79dd8b5274fbaefa410d11863a","deny.toml":"62c29047a8ea428bdee6fb31a045ddc872f6aab6e308920d1e0ace74d846839f","lsan-suppressions.txt":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855","rustfmt.toml":"d8e7f616455a670ba75e3e94bf6f88f4c168c481664d12501820c7dfff5c3cc2","src/iter/mod.rs":"fb2d960bb213957d6b1eac8ce1cc2faae383be86bfaa8b902ecd0067a61f6909","src/iter/traverser.rs":"39ecf048808a4f3ea03411b20103fed55e1718227f31ae7d1f8dc592dcbc11d4","src/lib.rs":"77be60e21b927d6479fdc8d9334ae9c91d1b4db0cc8d401ffc8da44519cd906f","src/map.rs":"2f9d2a9b6a04533404a1dfb560186cf8a99dea542767aa65d752af413275a147","src/map_ref.rs":"ded329899a27954eba75e61a98382ed17f304796578abfdbc33469cd26a60299","src/node.rs":"4c6f9a284f587e276c95f2ac6c40af0384c76f54a47d9f7942ae5cb9576788d6","src/raw/mod.rs":"d5231e1aa8e1560d8b73d1edad3ee0859fff15efec947411af50de0f30f219c2","src/rayon_impls.rs":"4d7614640b41d33fe1999142cc4673b02e79619913f373a67a3724f87e948220","src/reclaim.rs":"319777ede6a604d16bc49369e5d68239a6b8c914fa2eb6a1e4580d8229f4eb5e","src/serde_impls.rs":"a023e9eacdbaa4732755b62974006f1158e54b0e4be7abc68647d697968a122e","src/set.rs":"3b05358a4c07bcafb93e22f07d936b15d2b00d79848da65a1b45fa94ea5a44b0","src/set_ref.rs":"833ce3d16abd510c0d3bcd80afd62c4603cff3150d08940306deb0d924aa4fa6","tests/basic.rs":"faaeca0d3aadbcd917e509dd317042d2a2a612b146f6ed994ffbf19a1d9f564a","tests/basic_ref.rs":"173f0fa794d378354aa11c0d739496e0e2f44f62ff8c365c20945e481c26e9bd","tests/borrow.rs":"866fae971f5451e97a7778eef64671968f7a7db84142e603e56f83ccd7bc9fd5","tests/cuckoo/README.md":"1198cb2e1f3f3b4c2e1d1637b2fe261e6eb54889bba9693986b263404ad9daa0","tests/cuckoo/main.rs":"f0c1dd83b03cefbcd8ebd0e82d3d1eec4c80b52bf1883f3bd50d017760911038","tests/cuckoo/stress.rs":"3576e0c1590c6b4acae6011c95a75b2533f7eb1a70ce5c0a2c640023cbb8176b","tests/hasher.rs":"c4c59477dce647b2d59569bb8782c265dae5d83c68ab9eccb98d68dd2bd94c04","tests/jdk/README.md":"eb104559826d7e47d3ec8d69e4c3c8b0070512e51fb3cec10d545749886427f4","tests/jdk/concurrent_associate.rs":"0673c4d59e784f0dd849bf84069d72bb4cc58a5468ef926d3c5f2f97749e861f","tests/jdk/concurrent_contains.rs":"5406f08359be5642ba6c35bd722a760e7ff726c2733bb79f38d34e9b3bc759ed","tests/jdk/main.rs":"c416181bed514720678c4df3ce0817ed42759d4359c1e3858fe6a3afa6f14ae5","tests/jdk/map_check.rs":"72954d3f86f36bb309500dd10085dffe91525ed5789647abb4febd7b5c8f028a","tests/jsr166/README.md":"1384935cd918cfed82b76cf35edd4b9b663e13d9c0110aaede55f907eeeb6374","tests/jsr166/main.rs":"3c91385be0aadeeca18cb7cc42786a31af20dd84da5fa31741b5f896c4966f94","tests/regressions.rs":"d951bd7f804659b28ec17bdb0a7917a79637171ca11ef31d91013bcc519b4d53","tests/set.rs":"8fdff126c7c7e3039f2719b74d841ad6d67393b5155b5b34e3af511d30f3dfa6"},"package":"cf5efcf77a4da27927d3ab0509dec5b0954bb3bc59da5a1de9e52642ebd4cdf9"}

6
vendor/flurry/.cargo_vcs_info.json vendored Normal file
View File

@@ -0,0 +1,6 @@
{
"git": {
"sha1": "c0a84ac093d73b69916ed873ca8a9119b08d177b"
},
"path_in_vcs": ""
}

23
vendor/flurry/.github/DOCS.md vendored Normal file
View File

@@ -0,0 +1,23 @@
# Github config and workflows
In this folder there is configuration for codecoverage, dependabot, and ci
workflows that check the library more deeply than the default configurations.
This folder can be or was merged using a --allow-unrelated-histories merge
strategy from <https://github.com/jonhoo/rust-ci-conf/> which provides a
reasonably sensible base for writing your own ci on. By using this strategy
the history of the CI repo is included in your repo, and future updates to
the CI can be merged later.
To perform this merge run:
```shell
git remote add ci https://github.com/jonhoo/rust-ci-conf.git
git fetch ci
git merge --allow-unrelated-histories ci/main
```
An overview of the files in this project is available at:
<https://www.youtube.com/watch?v=xUH-4y92jPg&t=491s>, which contains some
rationale for decisions and runs through an example of solving minimal version
and OpenSSL issues.

21
vendor/flurry/.github/codecov.yml vendored Normal file
View File

@@ -0,0 +1,21 @@
# ref: https://docs.codecov.com/docs/codecovyml-reference
coverage:
# Hold ourselves to a high bar
range: 85..100
round: down
precision: 1
status:
# ref: https://docs.codecov.com/docs/commit-status
project:
default:
# Avoid false negatives
threshold: 1%
# Test files aren't important for coverage
ignore:
- "tests"
# Make comments less noisy
comment:
layout: "files"
require_changes: true

19
vendor/flurry/.github/dependabot.yml vendored Normal file
View File

@@ -0,0 +1,19 @@
version: 2
updates:
- package-ecosystem: github-actions
directory: /
schedule:
interval: daily
- package-ecosystem: cargo
directory: /
schedule:
interval: daily
ignore:
- dependency-name: "*"
# patch and minor updates don't matter for libraries as consumers of this library build
# with their own lockfile, rather than the version specified in this library's lockfile
# remove this ignore rule if your package has binaries to ensure that the binaries are
# built with the exact set of dependencies and those are up to date.
update-types:
- "version-update:semver-patch"
- "version-update:semver-minor"

View File

@@ -0,0 +1,139 @@
# This workflow runs whenever a PR is opened or updated, or a commit is pushed to main. It runs
# several checks:
# - fmt: checks that the code is formatted according to rustfmt
# - clippy: checks that the code does not contain any clippy warnings
# - doc: checks that the code can be documented without errors
# - hack: check combinations of feature flags
# - msrv: check that the msrv specified in the crate is correct
permissions:
contents: read
# This configuration allows maintainers of this repo to create a branch and pull request based on
# the new branch. Restricting the push trigger to the main branch ensures that the PR only gets
# built once.
on:
push:
branches: [main]
pull_request:
# If new code is pushed to a PR branch, then cancel in progress workflows for that PR. Ensures that
# we don't waste CI time, and returns results quicker https://github.com/jonhoo/rust-ci-conf/pull/5
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
cancel-in-progress: true
name: check
jobs:
fmt:
runs-on: ubuntu-latest
name: stable / fmt
steps:
- uses: actions/checkout@v4
with:
submodules: true
- name: Install stable
uses: dtolnay/rust-toolchain@stable
with:
components: rustfmt
- name: cargo fmt --check
run: cargo fmt --check
clippy:
runs-on: ubuntu-latest
name: ${{ matrix.toolchain }} / clippy
permissions:
contents: read
checks: write
strategy:
fail-fast: false
matrix:
# Get early warning of new lints which are regularly introduced in beta channels.
toolchain: [stable, beta]
steps:
- uses: actions/checkout@v4
with:
submodules: true
- name: Install ${{ matrix.toolchain }}
uses: dtolnay/rust-toolchain@master
with:
toolchain: ${{ matrix.toolchain }}
components: clippy
- name: cargo clippy
uses: giraffate/clippy-action@v1
with:
reporter: 'github-pr-check'
github_token: ${{ secrets.GITHUB_TOKEN }}
semver:
runs-on: ubuntu-latest
name: semver
steps:
- uses: actions/checkout@v4
with:
submodules: true
- name: Install stable
uses: dtolnay/rust-toolchain@stable
with:
components: rustfmt
- name: cargo-semver-checks
uses: obi1kenobi/cargo-semver-checks-action@v2
doc:
# run docs generation on nightly rather than stable. This enables features like
# https://doc.rust-lang.org/beta/unstable-book/language-features/doc-cfg.html which allows an
# API be documented as only available in some specific platforms.
runs-on: ubuntu-latest
name: nightly / doc
steps:
- uses: actions/checkout@v4
with:
submodules: true
- name: Install nightly
uses: dtolnay/rust-toolchain@nightly
- name: Install cargo-docs-rs
uses: dtolnay/install@cargo-docs-rs
- name: cargo docs-rs
run: cargo docs-rs
deny:
runs-on: ubuntu-latest
name: cargo-deny
steps:
- uses: actions/checkout@v4
with:
submodules: true
- name: Install stable
uses: dtolnay/rust-toolchain@stable
- name: cargo install cargo-deny
uses: taiki-e/install-action@cargo-deny
- name: cargo deny
run: cargo deny check
hack:
# cargo-hack checks combinations of feature flags to ensure that features are all additive
# which is required for feature unification
runs-on: ubuntu-latest
name: ubuntu / stable / features
steps:
- uses: actions/checkout@v4
with:
submodules: true
- name: Install stable
uses: dtolnay/rust-toolchain@stable
- name: cargo install cargo-hack
uses: taiki-e/install-action@cargo-hack
# intentionally no target specifier; see https://github.com/jonhoo/rust-ci-conf/pull/4
# --feature-powerset runs for every combination of features
- name: cargo hack
run: cargo hack --feature-powerset check
msrv:
# check that we can build using the minimal rust version that is specified by this crate
runs-on: ubuntu-latest
# we use a matrix here just because env can't be used in job names
# https://docs.github.com/en/actions/learn-github-actions/contexts#context-availability
strategy:
matrix:
msrv: ["1.72.0"] # ahash 0.8
name: ubuntu / ${{ matrix.msrv }}
steps:
- uses: actions/checkout@v4
with:
submodules: true
- name: Install ${{ matrix.msrv }}
uses: dtolnay/rust-toolchain@master
with:
toolchain: ${{ matrix.msrv }}
- name: cargo +${{ matrix.msrv }} check
run: cargo check

View File

@@ -0,0 +1,28 @@
permissions:
contents: read
on:
push:
branches: [main]
pull_request:
name: coverage
jobs:
coverage:
runs-on: ubuntu-latest
container:
image: xd009642/tarpaulin:develop-nightly
options: --security-opt seccomp=unconfined
steps:
- uses: actions/checkout@v4
with:
submodules: true
- name: Generate code coverage
run: |
cargo tarpaulin --verbose --all-features --timeout 120 --run-types doctests --run-types lib --run-types tests --out xml
- name: Record Rust version
run: echo "RUST=$(rustc --version)" >> "$GITHUB_ENV"
- name: Upload to codecov.io
uses: codecov/codecov-action@v5
with:
fail_ci_if_error: true
token: ${{ secrets.CODECOV_TOKEN }}
env_vars: OS,RUST

View File

@@ -0,0 +1,71 @@
# This workflow runs checks for unsafe code. In crates that don't have any unsafe code, this can be
# removed. Runs:
# - miri - detects undefined behavior and memory leaks
# - address sanitizer - detects memory errors
# - leak sanitizer - detects memory leaks
# - loom - Permutation testing for concurrent code https://crates.io/crates/loom
# See check.yml for information about how the concurrency cancellation and workflow triggering works
permissions:
contents: read
on:
push:
branches: [main]
pull_request:
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
cancel-in-progress: true
name: safety
jobs:
sanitizers:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
with:
submodules: true
- name: Install nightly
uses: dtolnay/rust-toolchain@nightly
- run: |
# to get the symbolizer for debug symbol resolution
sudo apt install llvm
# to fix buggy leak analyzer:
# https://github.com/japaric/rust-san#unrealiable-leaksanitizer
# ensure there's a profile.dev section
if ! grep -qE '^[ \t]*[profile.dev]' Cargo.toml; then
echo >> Cargo.toml
echo '[profile.dev]' >> Cargo.toml
fi
# remove pre-existing opt-levels in profile.dev
sed -i '/^\s*\[profile.dev\]/,/^\s*\[/ {/^\s*opt-level/d}' Cargo.toml
# now set opt-level to 1
sed -i '/^\s*\[profile.dev\]/a opt-level = 1' Cargo.toml
cat Cargo.toml
name: Enable debug symbols
- name: cargo test -Zsanitizer=address
# only --lib --tests b/c of https://github.com/rust-lang/rust/issues/53945
run: cargo test --lib --tests --all-features --target x86_64-unknown-linux-gnu
env:
ASAN_OPTIONS: "detect_odr_violation=0"
RUSTFLAGS: "-Z sanitizer=address"
- name: cargo test -Zsanitizer=leak
if: always()
run: cargo test --all-features --target x86_64-unknown-linux-gnu
env:
LSAN_OPTIONS: "suppressions=lsan-suppressions.txt"
RUSTFLAGS: "-Z sanitizer=leak"
miri:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
with:
submodules: true
- run: |
echo "NIGHTLY=nightly-$(curl -s https://rust-lang.github.io/rustup-components-history/x86_64-unknown-linux-gnu/miri)" >> "$GITHUB_ENV"
- name: Install ${{ env.NIGHTLY }}
uses: dtolnay/rust-toolchain@master
with:
toolchain: ${{ env.NIGHTLY }}
components: miri
- name: cargo miri test
run: cargo miri test
env:
MIRIFLAGS: "-Zmiri-disable-isolation"

View File

@@ -0,0 +1,58 @@
# Run scheduled (rolling) jobs on a nightly basis, as your crate may break independently of any
# given PR. E.g., updates to rust nightly and updates to this crates dependencies. See check.yml for
# information about how the concurrency cancellation and workflow triggering works
permissions:
contents: read
on:
push:
branches: [main]
pull_request:
schedule:
- cron: '7 7 * * *'
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
cancel-in-progress: true
name: rolling
jobs:
# https://twitter.com/mycoliza/status/1571295690063753218
nightly:
runs-on: ubuntu-latest
name: ubuntu / nightly
steps:
- uses: actions/checkout@v4
with:
submodules: true
- name: Install nightly
uses: dtolnay/rust-toolchain@nightly
- name: cargo generate-lockfile
if: hashFiles('Cargo.lock') == ''
run: cargo generate-lockfile
- name: cargo test --locked
run: cargo test --locked --all-features --all-targets
# https://twitter.com/alcuadrado/status/1571291687837732873
update:
# This action checks that updating the dependencies of this crate to the latest available that
# satisfy the versions in Cargo.toml does not break this crate. This is important as consumers
# of this crate will generally use the latest available crates. This is subject to the standard
# Cargo semver rules (i.e cargo does not update to a new major version unless explicitly told
# to).
runs-on: ubuntu-latest
name: ubuntu / beta / updated
# There's no point running this if no Cargo.lock was checked in in the first place, since we'd
# just redo what happened in the regular test job. Unfortunately, hashFiles only works in if on
# steps, so we repeat it.
steps:
- uses: actions/checkout@v4
with:
submodules: true
- name: Install beta
if: hashFiles('Cargo.lock') != ''
uses: dtolnay/rust-toolchain@beta
- name: cargo update
if: hashFiles('Cargo.lock') != ''
run: cargo update
- name: cargo test
if: hashFiles('Cargo.lock') != ''
run: cargo test --locked --all-features --all-targets
env:
RUSTFLAGS: -D deprecated

111
vendor/flurry/.github/workflows/test.yml vendored Normal file
View File

@@ -0,0 +1,111 @@
# This is the main CI workflow that runs the test suite on all pushes to main and all pull requests.
# It runs the following jobs:
# - required: runs the test suite on ubuntu with stable and beta rust toolchains
# - minimal: runs the test suite with the minimal versions of the dependencies that satisfy the
# requirements of this crate, and its dependencies
# - os-check: runs the test suite on mac and windows
# - coverage: runs the test suite and collects coverage information
# See check.yml for information about how the concurrency cancellation and workflow triggering works
permissions:
contents: read
on:
push:
branches: [main]
pull_request:
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
cancel-in-progress: true
name: test
jobs:
required:
runs-on: ubuntu-latest
name: ubuntu / ${{ matrix.toolchain }}
strategy:
matrix:
# run on stable and beta to ensure that tests won't break on the next version of the rust
# toolchain
toolchain: [stable, beta]
steps:
- uses: actions/checkout@v4
with:
submodules: true
- name: Install ${{ matrix.toolchain }}
uses: dtolnay/rust-toolchain@master
with:
toolchain: ${{ matrix.toolchain }}
- name: cargo generate-lockfile
# enable this ci template to run regardless of whether the lockfile is checked in or not
if: hashFiles('Cargo.lock') == ''
run: cargo generate-lockfile
# https://twitter.com/jonhoo/status/1571290371124260865
- name: cargo test --locked
run: cargo test --locked --all-features --all-targets
# https://github.com/rust-lang/cargo/issues/6669
- name: cargo test --doc
run: cargo test --locked --all-features --doc
minimal:
# This action chooses the oldest version of the dependencies permitted by Cargo.toml to ensure
# that this crate is compatible with the minimal version that this crate and its dependencies
# require. This will pickup issues where this create relies on functionality that was introduced
# later than the actual version specified (e.g., when we choose just a major version, but a
# method was added after this version).
#
# This particular check can be difficult to get to succeed as often transitive dependencies may
# be incorrectly specified (e.g., a dependency specifies 1.0 but really requires 1.1.5). There
# is an alternative flag available -Zdirect-minimal-versions that uses the minimal versions for
# direct dependencies of this crate, while selecting the maximal versions for the transitive
# dependencies. Alternatively, you can add a line in your Cargo.toml to artificially increase
# the minimal dependency, which you do with e.g.:
# ```toml
# # for minimal-versions
# [target.'cfg(any())'.dependencies]
# openssl = { version = "0.10.55", optional = true } # needed to allow foo to build with -Zminimal-versions
# ```
# The optional = true is necessary in case that dependency isn't otherwise transitively required
# by your library, and the target bit is so that this dependency edge never actually affects
# Cargo build order. See also
# https://github.com/jonhoo/fantoccini/blob/fde336472b712bc7ebf5b4e772023a7ba71b2262/Cargo.toml#L47-L49.
# This action is run on ubuntu with the stable toolchain, as it is not expected to fail
runs-on: ubuntu-latest
name: ubuntu / stable / minimal-versions
steps:
- uses: actions/checkout@v4
with:
submodules: true
- name: Install stable
uses: dtolnay/rust-toolchain@stable
- name: Install nightly for -Zminimal-versions
uses: dtolnay/rust-toolchain@nightly
- name: rustup default stable
run: rustup default stable
- name: cargo update -Zminimal-versions
run: cargo +nightly update -Zminimal-versions
- name: cargo test
run: cargo test --locked --all-features --all-targets
os-check:
# run cargo test on mac and windows
runs-on: ${{ matrix.os }}
name: ${{ matrix.os }} / stable
strategy:
fail-fast: false
matrix:
os: [macos-latest, windows-latest]
steps:
# if your project needs OpenSSL, uncomment this to fix Windows builds.
# it's commented out by default as the install command takes 5-10m.
# - run: echo "VCPKG_ROOT=$env:VCPKG_INSTALLATION_ROOT" | Out-File -FilePath $env:GITHUB_ENV -Append
# if: runner.os == 'Windows'
# - run: vcpkg install openssl:x64-windows-static-md
# if: runner.os == 'Windows'
- uses: actions/checkout@v4
with:
submodules: true
- name: Install stable
uses: dtolnay/rust-toolchain@stable
- name: cargo generate-lockfile
if: hashFiles('Cargo.lock') == ''
run: cargo generate-lockfile
- name: cargo test
run: cargo test --locked --all-features --all-targets
# continue using our own coverage.yml for now to get doctest checking
# https://github.com/taiki-e/cargo-llvm-cov/issues/2

70
vendor/flurry/CHANGELOG.md vendored Normal file
View File

@@ -0,0 +1,70 @@
# Changelog
All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## [Unreleased]
### Added
### Changed
### Removed
## [0.5.2] - 2024-11-19
### Added
- Alternative (better) crates section ([#128])
[#128]: https://github.com/jonhoo/flurry/pull/128
## [0.5.1] - 2024-04-21
### Changed
- Updated to `seize 0.3` ([#123])
- Fixed long-standing miri warning ([#123])
## [0.5.0] - 2024-02-11
### Changed
- (BREAKING) Updated to `ahash 0.8` ([#121])
## [0.4.0] - 2022-02-26
### Changed
- Moved memory management to [`seize`]() ([#102])
- Bumped `ahash` and `parking_lot` ([#105])
[`seize`]: https://docs.rs/seize/latest/seize/
[#102]: https://github.com/jonhoo/flurry/pull/102
[#105]: https://github.com/jonhoo/flurry/pull/105
## [0.3.1] - 2020-08-28
### Added
- Basic `rayon` support (#89)
- Miri leak checking
### Changed
- Fixed panic when `no_replacement` is used
## [0.3.0] - 2020-04-13
### Added
- `HashMap::try_insert` (#74)
- `HashSetRef` (#78)
- Serialization support with `serde` (#79; behind a feature flag).
### Changed
- Changelog. Which will now (in theory) be updated with every release.
- We now implement Java's "tree bin optimization" (#72).
- Many more tests have been ported over.
- Fixed several memory leaks.
## 0.1.0 - 2020-02-04
### Added
- First "real" release.
[Unreleased]: https://github.com/jonhoo/flurry/compare/v0.5.2...HEAD
[0.5.2]: https://github.com/jonhoo/flurry/compare/v0.5.1...v0.5.2
[0.5.1]: https://github.com/jonhoo/flurry/compare/v0.5.0...v0.5.1
[0.5.0]: https://github.com/jonhoo/flurry/compare/v0.4.0...v0.5.0
[0.4.1]: https://github.com/jonhoo/flurry/compare/v0.4.0...v0.5.0
[0.4.0]: https://github.com/jonhoo/flurry/compare/v0.3.1...v0.4.0
[0.3.1]: https://github.com/jonhoo/flurry/compare/v0.3.0...v0.3.1
[0.3.0]: https://github.com/jonhoo/flurry/compare/v0.2.1...v0.3.0

137
vendor/flurry/Cargo.toml vendored Normal file
View File

@@ -0,0 +1,137 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
#
# When uploading crates to the registry Cargo will automatically
# "normalize" Cargo.toml files for maximal compatibility
# with all versions of Cargo and also rewrite `path` dependencies
# to registry (e.g., crates.io) dependencies.
#
# If you are reading this file be aware that the original Cargo.toml
# will likely look very different (and much more reasonable).
# See Cargo.toml.orig for the original contents.
[package]
edition = "2021"
name = "flurry"
version = "0.5.2"
authors = ["Jon Gjengset <jon@thesquareplanet.com>"]
build = false
exclude = ["/jsr166/**"]
autolib = false
autobins = false
autoexamples = false
autotests = false
autobenches = false
description = "Rust port of Java's ConcurrentHashMap"
readme = "README.md"
keywords = [
"hashmap",
"concurrent",
"map",
]
categories = [
"concurrency",
"data-structures",
]
license = "MIT OR Apache-2.0"
repository = "https://github.com/jonhoo/flurry.git"
[lib]
name = "flurry"
path = "src/lib.rs"
[[test]]
name = "basic"
path = "tests/basic.rs"
[[test]]
name = "basic_ref"
path = "tests/basic_ref.rs"
[[test]]
name = "borrow"
path = "tests/borrow.rs"
[[test]]
name = "cuckoo"
path = "tests/cuckoo/main.rs"
[[test]]
name = "hasher"
path = "tests/hasher.rs"
[[test]]
name = "jdk"
path = "tests/jdk/main.rs"
[[test]]
name = "jsr166"
path = "tests/jsr166/main.rs"
[[test]]
name = "regressions"
path = "tests/regressions.rs"
[[test]]
name = "set"
path = "tests/set.rs"
[[bench]]
name = "flurry_dashmap"
path = "benches/flurry_dashmap.rs"
harness = false
[[bench]]
name = "flurry_hashbrown"
path = "benches/flurry_hashbrown.rs"
harness = false
[dependencies.ahash]
version = "0.8.4"
features = ["compile-time-rng"]
default-features = false
[dependencies.num_cpus]
version = "1.12.0"
[dependencies.parking_lot]
version = "0.12"
[dependencies.rayon]
version = "1.3"
optional = true
[dependencies.seize]
version = "0.3.3"
[dependencies.serde]
version = "1.0.105"
optional = true
[dev-dependencies.criterion]
version = "0.5"
[dev-dependencies.rand]
version = "0.8"
[dev-dependencies.rayon]
version = "1.3"
[dev-dependencies.serde_json]
version = "1.0.50"
[target."cfg(any())".dependencies.regex]
version = "1.6.0"
optional = true
[badges.azure-devops]
build = "15"
pipeline = "flurry"
project = "jonhoo/jonhoo"
[badges.codecov]
branch = "master"
repository = "jonhoo/flurry"
service = "github"
[badges.maintenance]
status = "experimental"

201
vendor/flurry/LICENSE-APACHE vendored Normal file
View File

@@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2020 Jon Gjengset
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

21
vendor/flurry/LICENSE-MIT vendored Normal file
View File

@@ -0,0 +1,21 @@
MIT License
Copyright (c) 2020 Jon Gjengset
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

46
vendor/flurry/README.md vendored Normal file
View File

@@ -0,0 +1,46 @@
[![Crates.io](https://img.shields.io/crates/v/flurry.svg)](https://crates.io/crates/flurry)
[![Documentation](https://docs.rs/flurry/badge.svg)](https://docs.rs/flurry/)
[![Codecov](https://codecov.io/github/jonhoo/flurry/coverage.svg?branch=master)](https://codecov.io/gh/jonhoo/flurry)
[![Dependency status](https://deps.rs/repo/github/jonhoo/flurry/status.svg)](https://deps.rs/repo/github/jonhoo/flurry)
A port of Java's [`java.util.concurrent.ConcurrentHashMap`](https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ConcurrentHashMap.html) to Rust.
The port is based on the public domain [source file from JSR166] as of
CVS revision [1.323], and is jointly licensed under MIT and Apache 2.0
to match the [Rust API guidelines]. The Java source files are included
under the `jsr166/` subdirectory for easy reference.
The port was developed as part of a series of [live coding streams]
kicked off by [this tweet].
## Better Alternatives
Flurry currently suffers performance and memory usage issues under load.
You may wish to consider [papaya] or [dashmap] as alternatives if this is
important to you.
## License
Licensed under either of
* Apache License, Version 2.0
([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
* MIT license
([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT)
at your option.
## Contribution
Unless you explicitly state otherwise, any contribution intentionally submitted
for inclusion in the work by you, as defined in the Apache-2.0 license, shall be
dual licensed as above, without any additional terms or conditions.
[source file from JSR166]: http://gee.cs.oswego.edu/dl/concurrency-interest/index.html
[1.323]: http://gee.cs.oswego.edu/cgi-bin/viewcvs.cgi/jsr166/jsr166/src/main/java/util/concurrent/ConcurrentHashMap.java?revision=1.323&view=markup
[Rust API guidelines]: https://rust-lang.github.io/api-guidelines/necessities.html#crate-and-its-dependencies-have-a-permissive-license-c-permissive
[live coding streams]: https://www.youtube.com/playlist?list=PLqbS7AVVErFj824-6QgnK_Za1187rNfnl
[this tweet]: https://twitter.com/jonhoo/status/1194969578855714816
[upstream tests]: https://hg.openjdk.java.net/jdk/jdk13/file/tip/test/jdk/java/util/concurrent/ConcurrentHashMap
[papaya]: https://github.com/ibraheemdev/papaya
[dashmap]: https://github.com/xacrimon/dashmap

35
vendor/flurry/benches/README.md vendored Normal file
View File

@@ -0,0 +1,35 @@
## Benchmarks
Currently, benchmarks following those of [`dashmap`](https://github.com/xacrimon/dashmap/tree/master/benches) and [`hashbrown`](https://github.com/rust-lang/hashbrown/blob/master/benches/bench.rs) are provided.
To compare against other hashmap implementations, the benchmarks located in the respective repositories may be executed.
Note that `flurry`, like `dashmap`, uses [`criterion`](https://docs.rs/criterion/0.3.1/criterion/) (and [`rayon`](https://docs.rs/rayon/1.3.0/rayon/) for parallel testing), while `hashbrown` uses [`test::bench`](https://doc.rust-lang.org/test/bench/index.html).
To run the `flurry` benchmarks, just run
```console
$ cargo bench
```
or
```console
$ cargo bench <BENCHNAME>
```
to only run benches containing `<BENCHNAME>` in their names.
To run the original `dashmap` benchmarks:
```console
$ git clone https://github.com/xacrimon/dashmap.git
$ cd dashmap
$ cargo bench [<BENCHNAME>]
```
To run the original `hashbrown` benchmarks:
```console
$ git clone https://github.com/rust-lang/hashbrown.git
$ cd hashbrown
$ cargo bench [<BENCHNAME>]
```

21
vendor/flurry/benches/dashmap.LICENSE vendored Normal file
View File

@@ -0,0 +1,21 @@
MIT License
Copyright (c) 2019 Acrimon
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

181
vendor/flurry/benches/flurry_dashmap.rs vendored Normal file
View File

@@ -0,0 +1,181 @@
/* Benchmarks from `dashmap` (https://github.com/xacrimon/dashmap),
* adapted to flurry for comparison:
*
* This benchmark suite contains benchmarks for concurrent insertion
* and retrieval (get).
* Currently, this file provides two versions of each test, one which
* follows the original implementation in using `par_iter().for_each()`,
* which necessitates creating a new guard for each operation since
* guards are not `Send + Sync`, and one version which uses threads
* spawned in scopes. The latter version is able to create only one
* guard per thread, but incurs overhead from the setup of the more
* heavyweight threading environment.
*
* For the associated license information, please refer to dashmap.LICENSE.
*/
use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion, Throughput};
use flurry::HashMap;
use rayon;
use rayon::prelude::*;
use std::sync::Arc;
/* DASHMAP */
const ITER: u64 = 32 * 1024;
fn task_insert_flurry_u64_u64_guard_every_it() -> HashMap<u64, u64> {
let map = HashMap::with_capacity(ITER as usize);
(0..ITER).into_par_iter().for_each(|i| {
let guard = map.guard();
map.insert(i, i + 7, &guard);
});
map
}
fn insert_flurry_u64_u64_guard_every_it(c: &mut Criterion) {
let mut group = c.benchmark_group("insert_flurry_u64_u64_guard_every_it");
group.throughput(Throughput::Elements(ITER as u64));
let max = num_cpus::get();
for threads in 1..=max {
group.bench_with_input(
BenchmarkId::from_parameter(threads),
&threads,
|b, &threads| {
let pool = rayon::ThreadPoolBuilder::new()
.num_threads(threads)
.build()
.unwrap();
pool.install(|| b.iter(task_insert_flurry_u64_u64_guard_every_it));
},
);
}
group.finish();
}
fn task_insert_flurry_u64_u64_guard_once(threads: usize) -> HashMap<u64, u64> {
let map = Arc::new(HashMap::with_capacity(ITER as usize));
let inc = ITER / (threads as u64);
rayon::scope(|s| {
for t in 1..=(threads as u64) {
let m = map.clone();
s.spawn(move |_| {
let start = t * inc;
let guard = m.guard();
for i in start..(start + inc) {
m.insert(i, i + 7, &guard);
}
});
}
});
Arc::try_unwrap(map).unwrap()
}
fn insert_flurry_u64_u64_guard_once(c: &mut Criterion) {
let mut group = c.benchmark_group("insert_flurry_u64_u64_guard_once");
group.throughput(Throughput::Elements(ITER as u64));
let max = num_cpus::get();
for threads in 1..=max {
group.bench_with_input(
BenchmarkId::from_parameter(threads),
&threads,
|b, &threads| {
let pool = rayon::ThreadPoolBuilder::new()
.num_threads(threads)
.build()
.unwrap();
pool.install(|| b.iter(|| task_insert_flurry_u64_u64_guard_once(threads)));
},
);
}
group.finish();
}
fn task_get_flurry_u64_u64_guard_every_it(map: &HashMap<u64, u64>) {
(0..ITER).into_par_iter().for_each(|i| {
let guard = map.guard();
assert_eq!(*map.get(&i, &guard).unwrap(), i + 7);
});
}
fn get_flurry_u64_u64_guard_every_it(c: &mut Criterion) {
let mut group = c.benchmark_group("get_flurry_u64_u64_guard_every_it");
group.throughput(Throughput::Elements(ITER as u64));
let max = num_cpus::get();
for threads in 1..=max {
let map = task_insert_flurry_u64_u64_guard_every_it();
group.bench_with_input(
BenchmarkId::from_parameter(threads),
&threads,
|b, &threads| {
let pool = rayon::ThreadPoolBuilder::new()
.num_threads(threads)
.build()
.unwrap();
pool.install(|| b.iter(|| task_get_flurry_u64_u64_guard_every_it(&map)));
},
);
}
group.finish();
}
fn task_get_flurry_u64_u64_guard_once(threads: usize, map: Arc<HashMap<u64, u64>>) {
let inc = ITER / (threads as u64);
rayon::scope(|s| {
for t in 1..=(threads as u64) {
let m = map.clone();
s.spawn(move |_| {
let start = t * inc;
let guard = m.guard();
for i in start..(start + inc) {
if let Some(&v) = m.get(&i, &guard) {
assert_eq!(v, i + 7);
}
}
});
}
});
}
fn get_flurry_u64_u64_guard_once(c: &mut Criterion) {
let mut group = c.benchmark_group("get_flurry_u64_u64_guard_once");
group.throughput(Throughput::Elements(ITER as u64));
let max = num_cpus::get();
for threads in 1..=max {
let map = Arc::new(task_insert_flurry_u64_u64_guard_every_it());
group.bench_with_input(
BenchmarkId::from_parameter(threads),
&threads,
|b, &threads| {
let pool = rayon::ThreadPoolBuilder::new()
.num_threads(threads)
.build()
.unwrap();
pool.install(|| {
b.iter(|| task_get_flurry_u64_u64_guard_once(threads, map.clone()))
});
},
);
}
group.finish();
}
criterion_group!(
benches,
insert_flurry_u64_u64_guard_every_it,
get_flurry_u64_u64_guard_every_it,
insert_flurry_u64_u64_guard_once,
get_flurry_u64_u64_guard_once,
);
criterion_main!(benches);

View File

@@ -0,0 +1,201 @@
/* Benchmarks from `hashbrown` (https://github.com/rust-lang/hashbrown),
* adapted to flurry for comparison:
*
* This benchmark suite contains some benchmarks along a set of dimensions:
* Int key distribution: low bit heavy, top bit heavy, and random.
* Task: basic functionality: insert, insert_erase, lookup, lookup_fail, iter
*
* For the associated license information, please refer to hashbrown.LICENSE.
*/
use criterion::{black_box, criterion_group, criterion_main, BenchmarkId, Criterion, Throughput};
use flurry::HashMap;
const SIZE: usize = 1000;
#[derive(Clone, Copy)]
struct RandomKeys {
state: usize,
}
impl RandomKeys {
fn new() -> Self {
RandomKeys { state: 0 }
}
}
impl Iterator for RandomKeys {
type Item = usize;
fn next(&mut self) -> Option<usize> {
// Add 1 then multiply by some 32 bit prime.
self.state = self.state.wrapping_add(1).wrapping_mul(3_787_392_781);
Some(self.state)
}
}
macro_rules! bench_suite {
($bench_macro:ident, $bench_fn_name:ident, $group_name:expr $(,)?) => {
fn $bench_fn_name(c: &mut Criterion) {
let mut group = c.benchmark_group($group_name);
group.throughput(Throughput::Elements(SIZE as u64));
$bench_macro!(group, 0.., "low_guard_once");
$bench_macro!(group, (0..).map(usize::swap_bytes), "high_guard_once");
$bench_macro!(group, RandomKeys::new(), "random_guard_once");
group.finish();
}
};
}
macro_rules! bench_insert {
($group:ident, $keydist:expr, $bench_id: expr) => {
$group.bench_function(BenchmarkId::from_parameter($bench_id), |b| {
let map: HashMap<_, _> = HashMap::with_capacity(SIZE as usize);
b.iter(|| {
let guard = map.guard();
map.clear(&guard);
($keydist).take(SIZE).for_each(|i| {
map.insert(i, i, &guard);
});
black_box(&map);
});
});
};
}
bench_suite!(
bench_insert,
insert_flurry_hashbrown,
"insert_flurry_hashbrown",
);
macro_rules! bench_insert_erase {
($group:ident, $keydist:expr, $bench_id: expr) => {
let base: HashMap<_, _> = HashMap::with_capacity(SIZE as usize);
{
// NOTE: in testing, I tried running this without the local scope.
// not dropping the guard and pinning the epoch for the entire benchmark literally
// crashed multiple programs on my PC, so I advise not to do that...
let guard = base.guard();
($keydist).take(SIZE).for_each(|i| {
base.insert(i, i, &guard);
});
}
let skip = ($keydist).take(SIZE);
$group.bench_function(BenchmarkId::from_parameter($bench_id), |b| {
b.iter(|| {
let mut map = base.clone();
let mut add_iter = skip.clone();
let mut remove_iter = $keydist;
// While keeping the size constant,
// replace the first keydist with the second.
let guard = map.guard();
(&mut add_iter)
.zip(&mut remove_iter)
.take(SIZE)
.for_each(|(add, remove)| {
map.insert(add, add, &guard);
black_box(map.remove(&remove, &guard));
});
drop(guard);
black_box(&mut map);
});
});
};
}
bench_suite!(
bench_insert_erase,
insert_erase_flurry_hashbrown,
"insert_erase_flurry_hashbrown",
);
macro_rules! bench_lookup {
($group:ident, $keydist:expr, $bench_id: expr) => {
let map: HashMap<_, _> = HashMap::with_capacity(SIZE as usize);
{
// see bench_insert_erase for a comment on the local scope
let guard = map.guard();
($keydist).take(SIZE).for_each(|i| {
map.insert(i, i, &guard);
});
}
$group.bench_function(BenchmarkId::from_parameter($bench_id), |b| {
b.iter(|| {
let guard = map.guard();
($keydist).take(SIZE).for_each(|i| {
black_box(map.get(&i, &guard));
});
});
});
};
}
bench_suite!(bench_lookup, get_flurry_hashbrown, "get_flurry_hashbrown",);
macro_rules! bench_lookup_fail {
($group:ident, $keydist:expr, $bench_id: expr) => {
let map: HashMap<_, _> = HashMap::with_capacity(SIZE as usize);
let mut iter = $keydist;
{
// see bench_insert_erase for a comment on the local scope
let guard = map.guard();
(&mut iter).take(SIZE).for_each(|i| {
map.insert(i, i, &guard);
});
}
$group.bench_function(BenchmarkId::from_parameter($bench_id), |b| {
b.iter(|| {
let guard = map.guard();
(&mut iter).take(SIZE).for_each(|i| {
black_box(map.get(&i, &guard));
});
});
});
};
}
bench_suite!(
bench_lookup_fail,
get_absent_flurry_hashbrown,
"get_absent_flurry_hashbrown",
);
macro_rules! bench_iter {
($group:ident, $keydist:expr, $bench_id: expr) => {
let map: HashMap<_, _> = HashMap::with_capacity(SIZE as usize);
{
// see bench_insert_erase for a comment on the local scope
let guard = map.guard();
($keydist).take(SIZE).for_each(|i| {
map.insert(i, i, &guard);
});
}
$group.bench_function(BenchmarkId::from_parameter($bench_id), |b| {
b.iter(|| {
let guard = map.guard();
for k in map.iter(&guard) {
black_box(k);
}
});
});
};
}
bench_suite!(bench_iter, iter_flurry_hashbrown, "iter_flurry_hashbrown",);
criterion_group!(
benches,
insert_flurry_hashbrown,
insert_erase_flurry_hashbrown,
get_flurry_hashbrown,
get_absent_flurry_hashbrown,
iter_flurry_hashbrown,
);
criterion_main!(benches);

236
vendor/flurry/benches/hashbrown.LICENSE vendored Normal file
View File

@@ -0,0 +1,236 @@
Hashbrown is licensed under either the Apache License,
Version 2.0 (http://www.apache.org/licenses/LICENSE-2.0),
or the MIT license (http://opensource.org/licenses/MIT),
at your option.
--------------------- MIT ---------------------
Copyright (c) 2016 Amanieu d'Antras
Permission is hereby granted, free of charge, to any
person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the
Software without restriction, including without
limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice
shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
--------------------- APACHE ---------------------
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

19
vendor/flurry/codecov.yml vendored Normal file
View File

@@ -0,0 +1,19 @@
# Hold ourselves to a high bar
coverage:
range: 85..100
round: down
precision: 2
status:
project:
default:
threshold: 1%
# Tests and Java files aren't important for coverage
ignore:
- "tests"
- "jsr166"
# Make less noisy comments
comment:
layout: "files"
require_changes: yes

38
vendor/flurry/deny.toml vendored Normal file
View File

@@ -0,0 +1,38 @@
[advisories]
version = 2
ignore = []
[licenses]
version = 2
allow = [
"MIT",
"BSD-2-Clause",
"CC0-1.0",
"Apache-2.0",
"Apache-2.0 WITH LLVM-exception",
]
confidence-threshold = 0.8
[bans]
multiple-versions = "warn"
highlight = "all"
allow = [
#{ name = "ansi_term", version = "=0.11.0" },
]
deny = [
# Each entry the name of a crate and a version range. If version is
# not specified, all versions will be matched.
#{ name = "ansi_term", version = "=0.11.0" },
]
skip = [
#{ name = "ansi_term", version = "=0.11.0" },
]
skip-tree = [
#{ name = "ansi_term", version = "=0.11.0", depth = 20 },
]
[sources]
unknown-registry = "warn"
unknown-git = "warn"
allow-git = []

0
vendor/flurry/lsan-suppressions.txt vendored Normal file
View File

1
vendor/flurry/rustfmt.toml vendored Normal file
View File

@@ -0,0 +1 @@
edition = "2018"

119
vendor/flurry/src/iter/mod.rs vendored Normal file
View File

@@ -0,0 +1,119 @@
mod traverser;
pub(crate) use traverser::NodeIter;
use crate::reclaim::{Guard, Shared};
use std::sync::atomic::Ordering;
/// An iterator over a map's entries.
///
/// See [`HashMap::iter`](crate::HashMap::iter) for details.
#[derive(Debug)]
pub struct Iter<'g, K, V> {
pub(crate) node_iter: NodeIter<'g, K, V>,
pub(crate) guard: &'g Guard<'g>,
}
impl<'g, K, V> Iter<'g, K, V> {
pub(crate) fn next_internal(&mut self) -> Option<(&'g K, Shared<'g, V>)> {
let node = self.node_iter.next()?;
let value = node.value.load(Ordering::SeqCst, self.guard);
Some((&node.key, value))
}
}
impl<'g, K, V> Iterator for Iter<'g, K, V> {
type Item = (&'g K, &'g V);
fn next(&mut self) -> Option<Self::Item> {
// safety: flurry does not drop or move until after guard drop
self.next_internal()
.map(|(k, v)| unsafe { (k, &**v.deref()) })
}
}
/// An iterator over a map's keys.
///
/// See [`HashMap::keys`](crate::HashMap::keys) for details.
#[derive(Debug)]
pub struct Keys<'g, K, V> {
pub(crate) node_iter: NodeIter<'g, K, V>,
}
impl<'g, K, V> Iterator for Keys<'g, K, V> {
type Item = &'g K;
fn next(&mut self) -> Option<Self::Item> {
let node = self.node_iter.next()?;
Some(&node.key)
}
}
/// An iterator over a map's values.
///
/// See [`HashMap::values`](crate::HashMap::values) for details.
#[derive(Debug)]
pub struct Values<'g, K, V> {
pub(crate) node_iter: NodeIter<'g, K, V>,
pub(crate) guard: &'g Guard<'g>,
}
impl<'g, K, V> Iterator for Values<'g, K, V> {
type Item = &'g V;
fn next(&mut self) -> Option<Self::Item> {
let node = self.node_iter.next()?;
let value = node.value.load(Ordering::SeqCst, self.guard);
// safety: flurry does not drop or move until after guard drop
let value = unsafe { value.deref() };
Some(value)
}
}
#[cfg(test)]
mod tests {
use crate::HashMap;
use std::collections::HashSet;
use std::iter::FromIterator;
#[test]
fn iter() {
let map = HashMap::<usize, usize>::new();
let guard = map.guard();
map.insert(1, 42, &guard);
map.insert(2, 84, &guard);
let guard = map.guard();
assert_eq!(
map.iter(&guard).collect::<HashSet<(&usize, &usize)>>(),
HashSet::from_iter(vec![(&1, &42), (&2, &84)])
);
}
#[test]
fn keys() {
let map = HashMap::<usize, usize>::new();
let guard = map.guard();
map.insert(1, 42, &guard);
map.insert(2, 84, &guard);
let guard = map.guard();
assert_eq!(
map.keys(&guard).collect::<HashSet<&usize>>(),
HashSet::from_iter(vec![&1, &2])
);
}
#[test]
fn values() {
let map = HashMap::<usize, usize>::new();
let mut guard = map.guard();
map.insert(1, 42, &guard);
map.insert(2, 84, &guard);
guard.refresh();
assert_eq!(
map.values(&guard).collect::<HashSet<&usize>>(),
HashSet::from_iter(vec![&42, &84])
);
}
}

320
vendor/flurry/src/iter/traverser.rs vendored Normal file
View File

@@ -0,0 +1,320 @@
use crate::node::{BinEntry, Node, TreeNode};
use crate::raw::Table;
use crate::reclaim::{Guard, Linked, Shared};
use std::sync::atomic::Ordering;
#[derive(Debug)]
pub(crate) struct NodeIter<'g, K, V> {
/// Current table; update if resized
table: Option<&'g Linked<Table<K, V>>>,
stack: Option<Box<TableStack<'g, K, V>>>,
spare: Option<Box<TableStack<'g, K, V>>>,
/// The last bin entry iterated over
prev: Option<&'g Node<K, V>>,
/// Index of bin to use next
index: usize,
/// Current index of initial table
base_index: usize,
/// Index bound for inital table
base_limit: usize,
/// Initial table size
base_size: usize,
guard: &'g Guard<'g>,
}
impl<'g, K, V> NodeIter<'g, K, V> {
pub(crate) fn new(table: Shared<'g, Table<K, V>>, guard: &'g Guard<'_>) -> Self {
let (table, len) = if table.is_null() {
(None, 0)
} else {
// safety: flurry guarantees that a table read under a guard is never dropped or moved
// until after that guard is dropped.
let table = unsafe { table.deref() };
(Some(table), table.len())
};
Self {
table,
stack: None,
spare: None,
prev: None,
base_size: len,
base_index: 0,
index: 0,
base_limit: len,
guard,
}
}
fn push_state(&mut self, t: &'g Linked<Table<K, V>>, i: usize, n: usize) {
let mut s = self.spare.take();
if let Some(ref mut s) = s {
self.spare = s.next.take();
}
let target = TableStack {
table: t,
length: n,
index: i,
next: self.stack.take(),
};
self.stack = if let Some(mut s) = s {
*s = target;
Some(s)
} else {
Some(Box::new(target))
};
}
fn recover_state(&mut self, mut n: usize) {
while let Some(ref mut s) = self.stack {
if self.index + s.length < n {
// if we haven't checked the high "side" of this bucket,
// then do _not_ pop the stack frame,
// and instead moveon to that bin.
self.index += s.length;
break;
}
// we _are_ popping the stack
let mut s = self.stack.take().expect("while let Some");
n = s.length;
self.index = s.index;
self.table = Some(s.table);
self.stack = s.next.take();
// save stack frame for re-use
s.next = self.spare.take();
self.spare = Some(s);
}
if self.stack.is_none() {
// move to next "part" of the top-level bin in the largest table
self.index += self.base_size;
if self.index >= n {
// we've gone past the last part of this top-level bin,
// so move to the _next_ top-level bin.
self.base_index += 1;
self.index = self.base_index;
}
}
}
}
impl<'g, K, V> Iterator for NodeIter<'g, K, V> {
type Item = &'g Node<K, V>;
fn next(&mut self) -> Option<Self::Item> {
let mut e = None;
if let Some(prev) = self.prev {
let next = prev.next.load(Ordering::SeqCst, self.guard);
if !next.is_null() {
// we have to check if we are iterating over a regular bin or a
// TreeBin. the Java code gets away without this due to
// inheritance (everything is a node), but we have to explicitly
// check
// safety: flurry does not drop or move until after guard drop
match **unsafe { next.deref() } {
BinEntry::Node(ref node) => {
e = Some(node);
}
BinEntry::TreeNode(ref tree_node) => {
e = Some(&tree_node.node);
}
BinEntry::Moved | BinEntry::Tree(_) => {
unreachable!("Nodes can only point to Nodes or TreeNodes")
}
}
}
}
loop {
if e.is_some() {
self.prev = e;
return e;
}
// safety: flurry does not drop or move until after guard drop
if self.base_index >= self.base_limit
|| self.table.is_none()
|| self.table.as_ref().unwrap().len() <= self.index
{
self.prev = None;
return None;
}
let t = self.table.expect("is_none in if above");
let i = self.index;
let n = t.len();
let bin = t.bin(i, self.guard);
if !bin.is_null() {
// safety: flurry does not drop or move until after guard drop
let bin = unsafe { bin.deref() };
match **bin {
BinEntry::Moved => {
// recurse down into the target table
// safety: same argument as for following Moved in Table::find
self.table = Some(unsafe { t.next_table(self.guard).deref() });
self.prev = None;
// make sure we can get back "up" to where we're at
self.push_state(t, i, n);
continue;
}
BinEntry::Node(ref node) => {
e = Some(node);
}
BinEntry::Tree(ref tree_bin) => {
// since we want to iterate over all entries, TreeBins
// are also traversed via the `next` pointers of their
// contained node
e = Some(
// safety: `bin` was read under our guard, at which
// point the tree was valid. Since our guard marks
// the current thread as active, the TreeNodes remain valid for
// at least as long as we hold onto the guard.
// Structurally, TreeNodes always point to TreeNodes, so this is sound.
&unsafe {
TreeNode::get_tree_node(
tree_bin.first.load(Ordering::SeqCst, self.guard),
)
}
.node,
);
}
BinEntry::TreeNode(_) => unreachable!(
"The head of a bin cannot be a TreeNode directly without BinEntry::Tree"
),
}
}
if self.stack.is_some() {
self.recover_state(n);
} else {
self.index = i + self.base_size;
if self.index >= n {
self.base_index += 1;
self.index = self.base_index;
}
}
}
}
}
#[derive(Debug)]
struct TableStack<'g, K, V> {
length: usize,
index: usize,
table: &'g Linked<Table<K, V>>,
next: Option<Box<TableStack<'g, K, V>>>,
}
#[cfg(test)]
mod tests {
use super::*;
use crate::raw::Table;
use crate::reclaim::Atomic;
use parking_lot::Mutex;
#[test]
fn iter_new() {
let guard = unsafe { seize::Guard::unprotected() };
let iter = NodeIter::<usize, usize>::new(Shared::null(), &guard);
assert_eq!(iter.count(), 0);
}
#[test]
fn iter_empty() {
let collector = seize::Collector::new();
let table = Shared::boxed(Table::<usize, usize>::new(16, &collector), &collector);
let guard = collector.enter();
let iter = NodeIter::new(table, &guard);
assert_eq!(iter.count(), 0);
// safety: nothing holds on to references into the table any more
let mut t = unsafe { table.into_box() };
t.drop_bins();
}
#[test]
fn iter_simple() {
let collector = seize::Collector::new();
let mut bins = vec![Atomic::null(); 16];
bins[8] = Atomic::from(Shared::boxed(
BinEntry::Node(Node {
hash: 0,
key: 0usize,
value: Atomic::from(Shared::boxed(0usize, &collector)),
next: Atomic::null(),
lock: Mutex::new(()),
}),
&collector,
));
let table = Shared::boxed(Table::from(bins, &collector), &collector);
let guard = collector.enter();
{
let mut iter = NodeIter::new(table, &guard);
let e = iter.next().unwrap();
assert_eq!(e.key, 0);
assert!(iter.next().is_none());
}
// safety: nothing holds on to references into the table any more
let mut t = unsafe { table.into_box() };
t.drop_bins();
}
#[test]
fn iter_fw() {
// construct the forwarded-to table
let collector = seize::Collector::new();
let mut deep_bins = vec![Atomic::null(); 16];
deep_bins[8] = Atomic::from(Shared::boxed(
BinEntry::Node(Node {
hash: 0,
key: 0usize,
value: Atomic::from(Shared::boxed(0usize, &collector)),
next: Atomic::null(),
lock: Mutex::new(()),
}),
&collector,
));
let guard = collector.enter();
let deep_table = Shared::boxed(Table::from(deep_bins, &collector), &collector);
// construct the forwarded-from table
let mut bins = vec![Shared::null(); 16];
let table = Table::<usize, usize>::new(bins.len(), &collector);
for bin in &mut bins[8..] {
// this also sets table.next_table to deep_table
*bin = table.get_moved(deep_table, &guard);
}
// this cannot use Table::from(bins), since we need the table to get
// the Moved and set its next_table
for i in 0..bins.len() {
table.store_bin(i, bins[i]);
}
let table = Shared::boxed(table, &collector);
{
let mut iter = NodeIter::new(table, &guard);
let e = iter.next().unwrap();
assert_eq!(e.key, 0);
assert!(iter.next().is_none());
}
// safety: nothing holds on to references into the table any more
let mut t = unsafe { table.into_box() };
t.drop_bins();
// no one besides this test case uses deep_table
unsafe { deep_table.into_box() }.drop_bins();
}
}

360
vendor/flurry/src/lib.rs vendored Normal file
View File

@@ -0,0 +1,360 @@
//! A concurrent hash table based on Java's `ConcurrentHashMap`.
//!
//! A hash table that supports full concurrency of retrievals and high expected concurrency for
//! updates. This type is functionally very similar to `std::collections::HashMap`, and for the
//! most part has a similar API. Even though all operations on the map are thread-safe and operate
//! on shared references, retrieval operations do *not* entail locking, and there is *not* any
//! support for locking the entire table in a way that prevents all access.
//!
//! # Better Alternatives
//!
//! Flurry currently suffers performance and memory usage issues under load.
//! You may wish to consider [`papaya`] or [`dashmap`] as alternatives if this is
//! important to you.
//!
//! # A note on `Guard` and memory use
//!
//! You may have noticed that many of the access methods on this map take a reference to a
//! [`Guard`]. The exact details of this are beyond the scope of this documentation (for
//! that, see the [`seize`] crate), but some of the implications bear repeating here. You obtain a
//! `Guard` using [`HashMap::guard`], and you can use references to the same guard to make multiple API
//! calls if you wish. Whenever you get a reference to something stored in the map, that reference
//! is tied to the lifetime of the `Guard` that you provided. This is because each `Guard` prevents
//! the destruction of any item associated with it. Whenever something is read under a `Guard`,
//! that something stays around for _at least_ as long as the `Guard` does. The map delays
//! deallocating values until it safe to do so, and in order to amortize the cost of the necessary
//! bookkeeping it may delay even further until there's a _batch_ of items that need to be
//! deallocated.
//!
//! Notice that there is a trade-off here. Creating and dropping a `Guard` is not free, since it
//! also needs to interact with said bookkeeping. But if you keep one around for a long time, you
//! may accumulate much garbage which will take up valuable free memory on your system. Use your
//! best judgement in deciding whether or not to re-use a `Guard`.
//!
//! # Consistency
//!
//! Retrieval operations (including [`get`](HashMap::get)) generally do not block, so may
//! overlap with update operations (including [`insert`](HashMap::insert)). Retrievals
//! reflect the results of the most recently *completed* update operations holding upon their
//! onset. (More formally, an update operation for a given key bears a _happens-before_ relation
//! with any successful retrieval for that key reporting the updated value.)
//!
//! Operations that inspect the map as a whole, rather than a single key, operate on a snapshot of
//! the underlying table. For example, iterators return elements reflecting the state of the hash
//! table at some point at or since the creation of the iterator. Aggregate status methods like
//! [`len`](HashMap::len) are typically useful only when a map is not undergoing concurrent
//! updates in other threads. Otherwise the results of these methods reflect transient states that
//! may be adequate for monitoring or estimation purposes, but not for program control.
//! Similarly, [`Clone`](std::clone::Clone) may not produce a "perfect" clone if the underlying
//! map is being concurrently modified.
//!
//! # Resizing behavior
//!
//! The table is dynamically expanded when there are too many collisions (i.e., keys that have
//! distinct hash codes but fall into the same slot modulo the table size), with the expected
//! average effect of maintaining roughly two bins per mapping (corresponding to a 0.75 load factor
//! threshold for resizing). There may be much variance around this average as mappings are added
//! and removed, but overall, this maintains a commonly accepted time/space tradeoff for hash
//! tables. However, resizing this or any other kind of hash table may be a relatively slow
//! operation. When possible, it is a good idea to provide a size estimate by using the
//! [`with_capacity`](HashMap::with_capacity) constructor. Note that using many keys with
//! exactly the same [`Hash`](std::hash::Hash) value is a sure way to slow down performance of any
//! hash table. To ameliorate impact, keys are required to be [`Ord`](std::cmp::Ord). This is used
//! by the map to more efficiently store bins that contain a large number of elements with
//! colliding hashes using the comparison order on their keys.
//!
/*
//! TODO: dynamic load factor
//! */
//! # Hash Sets
//!
//! Flurry also supports concurrent hash sets, which may be created through [`HashSet`]. Hash sets
//! offer the same instantiation options as [`HashMap`], such as [`new`](HashSet::new) and
//! [`with_capacity`](HashSet::with_capacity).
//!
/*
//! TODO: frequency map through computeIfAbsent
//!
//! TODO: bulk operations like forEach, search, and reduce
//! */
//! # Implementation notes
//!
//! This data-structure is a pretty direct port of Java's `java.util.concurrent.ConcurrentHashMap`
//! [from Doug Lea and the rest of the JSR166
//! team](http://gee.cs.oswego.edu/dl/concurrency-interest/). Huge thanks to them for releasing the
//! code into the public domain! Much of the documentation is also lifted from there. What follows
//! is a slightly modified version of their implementation notes from within the [source
//! file](http://gee.cs.oswego.edu/cgi-bin/viewcvs.cgi/jsr166/jsr166/src/main/java/util/concurrent/ConcurrentHashMap.java?revision=1.323&view=markup).
//!
//! The primary design goal of this hash table is to maintain concurrent readability (typically
//! method [`get`](HashMap::get), but also iterators and related methods) while minimizing update contention.
//! Secondary goals are to keep space consumption about the same or better than java.util.HashMap,
//! and to support high initial insertion rates on an empty table by many threads.
//!
//! This map usually acts as a binned (bucketed) hash table. Each key-value mapping is held in a
//! `BinEntry`. Most nodes are of type `BinEntry::Node` with hash, key, value, and a `next` field.
//! However, some other types of nodes exist: `BinEntry::TreeNode`s are arranged in balanced trees
//! instead of linear lists. Bins of type `BinEntry::Tree` hold the roots of sets of `BinEntry::TreeNode`s.
//! Some nodes are of type `BinEntry::Moved`; these "forwarding nodes" are placed at the
//! heads of bins during resizing. The Java version also has other special node types, but these
//! have not yet been implemented in this port. These special nodes are all either uncommon or
//! transient.
//!
/*
//! TODO: TreeNodes, ReservationNodes
*/
//! The table is lazily initialized to a power-of-two size upon the first insertion. Each bin in
//! the table normally contains a list of nodes (most often, the list has only zero or one
//! `BinEntry`). Table accesses require atomic reads, writes, and CASes.
//!
//! Insertion (via `put`) of the first node in an empty bin is performed by just CASing it to the
//! bin. This is by far the most common case for put operations under most key/hash distributions.
//! Other update operations (insert, delete, and replace) require locks. We do not want to waste
//! the space required to associate a distinct lock object with each bin, so we instead embed a
//! lock inside each node, and use the lock in the the first node of a bin list as the lock for the
//! bin.
//!
//! Using the first node of a list as a lock does not by itself suffice though: When a node is
//! locked, any update must first validate that it is still the first node after locking it, and
//! retry if not. Because new nodes are always appended to lists, once a node is first in a bin, it
//! remains first until deleted or the bin becomes invalidated (upon resizing).
//!
//! The main disadvantage of per-bin locks is that other update operations on other nodes in a bin
//! list protected by the same lock can stall, for example when user `Eq` implementations or
//! mapping functions take a long time. However, statistically, under random hash codes, this is
//! not a common problem. Ideally, the frequency of nodes in bins follows a Poisson distribution
//! (http://en.wikipedia.org/wiki/Poisson_distribution) with a parameter of about 0.5 on average,
//! given the resizing threshold of 0.75, although with a large variance because of resizing
//! granularity. Ignoring variance, the expected occurrences of list size `k` are `exp(-0.5) *
//! pow(0.5, k) / factorial(k)`. The first values are:
//!
//! ```text
//! 0: 0.60653066
//! 1: 0.30326533
//! 2: 0.07581633
//! 3: 0.01263606
//! 4: 0.00157952
//! 5: 0.00015795
//! 6: 0.00001316
//! 7: 0.00000094
//! 8: 0.00000006
//! more: less than 1 in ten million
//! ```
//!
//! Lock contention probability for two threads accessing distinct elements is roughly `1 / (8 *
//! #elements)` under random hashes.
//!
//! Actual hash code distributions encountered in practice sometimes deviate significantly from
//! uniform randomness. This includes the case when `N > (1<<30)`, so some keys MUST collide.
//! Similarly for dumb or hostile usages in which multiple keys are designed to have identical hash
//! codes or ones that differs only in masked-out high bits. So we use secondary strategy that
//! applies when the number of nodes in a bin exceeds a threshold. These `BinEntry::Tree` bins use
//! a balanced tree to hold nodes (a specialized form of red-black trees), bounding search time to
//! `O(log N)`. Each search step in such a bin is at least twice as slow as in a regular list, but
//! given that N cannot exceed `(1<<64)` (before running out of adresses) this bounds search steps,
//! lock hold times, etc, to reasonable constants (roughly 100 nodes inspected per operation worst
//! case). `BinEntry::Tree` nodes (`BinEntry::TreeNode`s) also maintain the same `next` traversal
//! pointers as regular nodes, so can be traversed in iterators in a similar way.
//!
//! The table is resized when occupancy exceeds a percentage threshold (nominally, 0.75, but see
//! below). Any thread noticing an overfull bin may assist in resizing after the initiating thread
//! allocates and sets up the replacement array. However, rather than stalling, these other threads
//! may proceed with insertions etc. The use of `BinEntry::Tree` bins shields us from the worst case
//! effects of overfilling while resizes are in progress. Resizing proceeds by transferring bins,
//! one by one, from the table to the next table. However, threads claim small blocks of indices to
//! transfer (via the field `transfer_index`) before doing so, reducing contention. A generation
//! stamp in the field `size_ctl` ensures that resizings do not overlap. Because we are using
//! power-of-two expansion, the elements from each bin must either stay at same index, or move with
//! a power of two offset. We eliminate unnecessary node creation by catching cases where old nodes
//! can be reused because their next fields won't change. On average, only about one-sixth of them
//! need cloning when a table doubles. The nodes they replace will be garbage collectible as soon
//! as they are no longer referenced by any reader thread that may be in the midst of concurrently
//! traversing table. Upon transfer, the old table bin contains only a special forwarding node
//! (`BinEntry::Moved`) that contains the next table as its key. On encountering a forwarding node,
//! access and update operations restart, using the new table.
//!
//! Each bin transfer requires its bin lock, which can stall waiting for locks while resizing.
//! However, because other threads can join in and help resize rather than contend for locks,
//! average aggregate waits become shorter as resizing progresses. The transfer operation must
//! also ensure that all accessible bins in both the old and new table are usable by any traversal.
//! This is arranged in part by proceeding from the last bin `table.length - 1` up towards the
//! first. Upon seeing a forwarding node, traversals (see `iter::traverser::Traverser`) arrange to
//! move to the new table without revisiting nodes. To ensure that no intervening nodes are
//! skipped even when moved out of order, a stack (see class `iter::traverser::TableStack`) is
//! created on first encounter of a forwarding node during a traversal, to maintain its place if
//! later processing the current table. The need for these save/restore mechanics is relatively
//! rare, but when one forwarding node is encountered, typically many more will be. So `Traversers`
//! use a simple caching scheme to avoid creating so many new `TableStack` nodes. (Thanks to Peter
//! Levart for suggesting use of a stack here.)
//!
/* TODO:
//!
//! Lazy table initialization minimizes footprint until first use, and also avoids resizings when
//! the first operation is from a `from_iter`, `From::from`, or deserialization. These cases
//! attempt to override the initial capacity settings, but harmlessly fail to take effect in cases
//! of races.
*/
/*
//! TODO:
//!
//! The element count is maintained using a specialization of LongAdder. We need to incorporate a
//! specialization rather than just use a LongAdder in order to access implicit contention-sensing
//! that leads to creation of multiple CounterCells. The counter mechanics avoid contention on
//! updates but can encounter cache thrashing if read too frequently during concurrent access. To
//! avoid reading so often, resizing under contention is attempted only upon adding to a bin
//! already holding two or more nodes. Under uniform hash distributions, the probability of this
//! occurring at threshold is around 13%, meaning that only about 1 in 8 puts check threshold (and
//! after resizing, many fewer do so).
//! */
//!
/* NOTE that we don't actually use most of the Java Code's complicated comparisons and tiebreakers
* since we require total ordering among the keys via `Ord` as opposed to a runtime check against
* Java's `Comparable` interface. */
//! `BinEntry::Tree` bins use a special form of comparison for search and related operations (which
//! is the main reason we cannot use existing collections such as tree maps). The contained tree
//! is primarily ordered by hash value, then by [`cmp`](std::cmp::Ord::cmp) order on keys. The
//! red-black balancing code is updated from pre-jdk collections (http://gee.cs.oswego.edu/dl/classes/collections/RBCell.java)
//! based in turn on Cormen, Leiserson, and Rivest "Introduction to Algorithms" (CLR).
//!
//! `BinEntry::Tree` bins also require an additional locking mechanism. While list traversal is
//! always possible by readers even during updates, tree traversal is not, mainly because of
//! tree-rotations that may change the root node and/or its linkages. Tree bins include a simple
//! read-write lock mechanism parasitic on the main bin-synchronization strategy: Structural
//! adjustments associated with an insertion or removal are already bin-locked (and so cannot
//! conflict with other writers) but must wait for ongoing readers to finish. Since there can be
//! only one such waiter, we use a simple scheme using a single `waiter` field to block writers.
//! However, readers need never block. If the root lock is held, they proceed along the slow
//! traversal path (via next-pointers) until the lock becomes available or the list is exhausted,
//! whichever comes first. These cases are not fast, but maximize aggregate expected throughput.
//!
//! ## Garbage collection
//!
//! The Java implementation can rely on Java's runtime garbage collection to safely deallocate
//! deleted or removed nodes, keys, and values. Since Rust does not have such a runtime, we must
//! ensure through some other mechanism that we do not drop values before all references to them
//! have gone away. We do this using [`seize`], which provides a garbage collection scheme based
//! on batch reference-counting. This forces us to make certain API changes such as requiring
//! `Guard` arguments to many methods or wrapping the return values, but provides much more efficient
//! operation than if every individual value had to be atomically reference-counted.
//!
//! [`seize`]: https://docs.rs/seize
//! [`papaya`]: https://docs.rs/papaya
//! [`dashmap`]: https://docs.rs/dashmap
#![deny(
missing_docs,
missing_debug_implementations,
unreachable_pub,
rustdoc::broken_intra_doc_links
)]
#![warn(rust_2018_idioms)]
#![allow(clippy::cognitive_complexity)]
mod map;
mod map_ref;
mod node;
mod raw;
mod reclaim;
mod set;
mod set_ref;
#[cfg(feature = "rayon")]
mod rayon_impls;
#[cfg(feature = "serde")]
mod serde_impls;
/// Iterator types.
pub mod iter;
pub use map::{HashMap, TryInsertError};
pub use map_ref::HashMapRef;
pub use set::HashSet;
pub use set_ref::HashSetRef;
pub use seize::Guard;
/// Default hash builder for [`HashMap`].
// NOTE: This and the below exists solely to avoid ahash being part of the public flurry API,
// so that we can bump the ahash major version without bumping flurry's major version.
#[derive(Debug, Clone, Default)]
#[repr(transparent)]
pub struct DefaultHashBuilder(ahash::RandomState);
/// Default hasher for [`HashMap`].
#[derive(Debug, Clone)]
#[repr(transparent)]
pub struct DefaultHasher(ahash::AHasher);
impl std::hash::BuildHasher for DefaultHashBuilder {
type Hasher = DefaultHasher;
fn build_hasher(&self) -> Self::Hasher {
DefaultHasher(self.0.build_hasher())
}
// NOTE: also implement hash_one so we can forward to ahash::RandomState's optimized impl.
fn hash_one<T: std::hash::Hash>(&self, x: T) -> u64
where
Self: Sized,
{
self.0.hash_one(x)
}
}
impl std::hash::Hasher for DefaultHasher {
fn finish(&self) -> u64 {
self.0.finish()
}
fn write(&mut self, bytes: &[u8]) {
self.0.write(bytes)
}
fn write_u8(&mut self, i: u8) {
self.0.write_u8(i)
}
fn write_u16(&mut self, i: u16) {
self.0.write_u16(i)
}
fn write_u32(&mut self, i: u32) {
self.0.write_u32(i)
}
fn write_u64(&mut self, i: u64) {
self.0.write_u64(i)
}
fn write_u128(&mut self, i: u128) {
self.0.write_u128(i)
}
fn write_usize(&mut self, i: usize) {
self.0.write_usize(i)
}
fn write_i8(&mut self, i: i8) {
self.0.write_i8(i)
}
fn write_i16(&mut self, i: i16) {
self.0.write_i16(i)
}
fn write_i32(&mut self, i: i32) {
self.0.write_i32(i)
}
fn write_i64(&mut self, i: i64) {
self.0.write_i64(i)
}
fn write_i128(&mut self, i: i128) {
self.0.write_i128(i)
}
fn write_isize(&mut self, i: isize) {
self.0.write_isize(i)
}
}

3552
vendor/flurry/src/map.rs vendored Normal file

File diff suppressed because it is too large Load Diff

306
vendor/flurry/src/map_ref.rs vendored Normal file
View File

@@ -0,0 +1,306 @@
use crate::iter::*;
use crate::reclaim::{Guard, GuardRef};
use crate::{HashMap, TryInsertError};
use std::borrow::Borrow;
use std::fmt::{self, Debug, Formatter};
use std::hash::{BuildHasher, Hash};
use std::ops::Index;
/// A reference to a [`HashMap`], constructed with [`HashMap::pin`] or [`HashMap::with_guard`].
///
/// The current thread will be pinned for the duration of this reference.
/// Keep in mind that this prevents the collection of garbage generated by the map.
pub struct HashMapRef<'map, K, V, S = crate::DefaultHashBuilder> {
pub(crate) map: &'map HashMap<K, V, S>,
guard: GuardRef<'map>,
}
impl<K, V, S> HashMap<K, V, S> {
/// Get a reference to this map with the current thread pinned.
///
/// Keep in mind that for as long as you hold onto this, you are preventing the collection of
/// garbage generated by the map.
pub fn pin(&self) -> HashMapRef<'_, K, V, S> {
HashMapRef {
guard: GuardRef::Owned(self.guard()),
map: self,
}
}
/// Get a reference to this map with the given guard.
pub fn with_guard<'g>(&'g self, guard: &'g Guard<'_>) -> HashMapRef<'g, K, V, S> {
HashMapRef {
guard: GuardRef::Ref(guard),
map: self,
}
}
}
impl<K, V, S> HashMapRef<'_, K, V, S> {
/// Returns the number of entries in the map.
///
/// See also [`HashMap::len`].
pub fn len(&self) -> usize {
self.map.len()
}
/// Returns `true` if the map is empty. Otherwise returns `false`.
///
/// See also [`HashMap::is_empty`].
pub fn is_empty(&self) -> bool {
self.map.is_empty()
}
/// An iterator visiting all key-value pairs in arbitrary order.
///
/// The iterator element type is `(&'g K, &'g V)`.
///
/// See also [`HashMap::iter`].
pub fn iter(&self) -> Iter<'_, K, V> {
self.map.iter(&self.guard)
}
/// An iterator visiting all keys in arbitrary order.
///
/// The iterator element type is `&'g K`.
///
/// See also [`HashMap::keys`].
pub fn keys(&self) -> Keys<'_, K, V> {
self.map.keys(&self.guard)
}
/// An iterator visiting all values in arbitrary order.
///
/// The iterator element type is `&'g V`.
///
/// See also [`HashMap::values`].
pub fn values(&self) -> Values<'_, K, V> {
self.map.values(&self.guard)
}
}
impl<K, V, S> HashMapRef<'_, K, V, S>
where
K: Clone + Ord,
{
/// Tries to reserve capacity for at least `additional` more elements to be inserted in the
/// `HashMap`.
///
/// The collection may reserve more space to avoid frequent reallocations.
///
/// See also [`HashMap::reserve`].
pub fn reserve(&self, additional: usize) {
self.map.reserve(additional, &self.guard)
}
}
impl<K, V, S> HashMapRef<'_, K, V, S>
where
K: Hash + Ord,
S: BuildHasher,
{
/// Returns `true` if the map contains a value for the specified key.
///
/// See also [`HashMap::contains_key`].
pub fn contains_key<Q>(&self, key: &Q) -> bool
where
K: Borrow<Q>,
Q: ?Sized + Hash + Ord,
{
self.map.contains_key(key, &self.guard)
}
/// Returns a reference to the value corresponding to the key.
///
/// See also [`HashMap::get`].
#[inline]
pub fn get<'g, Q>(&'g self, key: &Q) -> Option<&'g V>
where
K: Borrow<Q>,
Q: ?Sized + Hash + Ord,
{
self.map.get(key, &self.guard)
}
/// Returns the key-value pair corresponding to `key`.
///
/// See also [`HashMap::get_key_value`].
#[inline]
pub fn get_key_value<'g, Q>(&'g self, key: &Q) -> Option<(&'g K, &'g V)>
where
K: Borrow<Q>,
Q: ?Sized + Hash + Ord,
{
self.map.get_key_value(key, &self.guard)
}
}
impl<K, V, S> HashMapRef<'_, K, V, S>
where
K: Clone + Ord,
{
/// Clears the map, removing all key-value pairs.
///
/// See also [`HashMap::clear`].
pub fn clear(&self) {
self.map.clear(&self.guard);
}
}
impl<K, V, S> HashMapRef<'_, K, V, S>
where
K: Sync + Send + Clone + Hash + Ord,
V: Sync + Send,
S: BuildHasher,
{
/// Inserts a key-value pair into the map.
///
/// See also [`HashMap::insert`].
pub fn insert(&self, key: K, value: V) -> Option<&'_ V> {
self.map.insert(key, value, &self.guard)
}
/// Inserts a key-value pair into the map unless the key already exists.
///
/// See also [`HashMap::try_insert`].
#[inline]
pub fn try_insert(&self, key: K, value: V) -> Result<&'_ V, TryInsertError<'_, V>> {
self.map.try_insert(key, value, &self.guard)
}
/// If the value for the specified `key` is present, attempts to
/// compute a new mapping given the key and its current mapped value.
///
/// See also [`HashMap::compute_if_present`].
pub fn compute_if_present<'g, Q, F>(&'g self, key: &Q, remapping_function: F) -> Option<&'g V>
where
K: Borrow<Q>,
Q: ?Sized + Hash + Ord,
F: FnOnce(&K, &V) -> Option<V>,
{
self.map
.compute_if_present(key, remapping_function, &self.guard)
}
/// Removes a key-value pair from the map, and returns the removed value (if any).
///
/// See also [`HashMap::remove`].
pub fn remove<'g, Q>(&'g self, key: &Q) -> Option<&'g V>
where
K: Borrow<Q>,
Q: ?Sized + Hash + Ord,
{
self.map.remove(key, &self.guard)
}
/// Removes a key from the map, returning the stored key and value if the
/// key was previously in the map.
///
/// See also [`HashMap::remove_entry`].
pub fn remove_entry<'g, Q>(&'g self, key: &Q) -> Option<(&'g K, &'g V)>
where
K: Borrow<Q>,
Q: ?Sized + Hash + Ord,
{
self.map.remove_entry(key, &self.guard)
}
/// Retains only the elements specified by the predicate.
///
/// See also [`HashMap::retain`].
pub fn retain<F>(&self, f: F)
where
F: FnMut(&K, &V) -> bool,
{
self.map.retain(f, &self.guard);
}
/// Retains only the elements specified by the predicate.
///
/// See also [`HashMap::retain_force`].
pub fn retain_force<F>(&self, f: F)
where
F: FnMut(&K, &V) -> bool,
{
self.map.retain_force(f, &self.guard);
}
}
impl<'g, K, V, S> IntoIterator for &'g HashMapRef<'_, K, V, S> {
type IntoIter = Iter<'g, K, V>;
type Item = (&'g K, &'g V);
fn into_iter(self) -> Self::IntoIter {
self.map.iter(&self.guard)
}
}
impl<K, V, S> Debug for HashMapRef<'_, K, V, S>
where
K: Debug,
V: Debug,
{
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
f.debug_map().entries(self).finish()
}
}
impl<K, V, S> Clone for HashMapRef<'_, K, V, S> {
fn clone(&self) -> Self {
self.map.pin()
}
}
impl<K, V, S> PartialEq for HashMapRef<'_, K, V, S>
where
K: Hash + Ord,
V: PartialEq,
S: BuildHasher,
{
fn eq(&self, other: &Self) -> bool {
self.map.guarded_eq(other.map, &self.guard, &other.guard)
}
}
impl<K, V, S> PartialEq<HashMap<K, V, S>> for HashMapRef<'_, K, V, S>
where
K: Hash + Ord,
V: PartialEq,
S: BuildHasher,
{
fn eq(&self, other: &HashMap<K, V, S>) -> bool {
self.map.guarded_eq(other, &self.guard, &other.guard())
}
}
impl<K, V, S> PartialEq<HashMapRef<'_, K, V, S>> for HashMap<K, V, S>
where
K: Hash + Ord,
V: PartialEq,
S: BuildHasher,
{
fn eq(&self, other: &HashMapRef<'_, K, V, S>) -> bool {
self.guarded_eq(other.map, &self.guard(), &other.guard)
}
}
impl<K, V, S> Eq for HashMapRef<'_, K, V, S>
where
K: Hash + Ord,
V: Eq,
S: BuildHasher,
{
}
impl<K, Q, V, S> Index<&'_ Q> for HashMapRef<'_, K, V, S>
where
K: Hash + Ord + Borrow<Q>,
Q: ?Sized + Hash + Ord,
S: BuildHasher,
{
type Output = V;
fn index(&self, key: &Q) -> &V {
self.get(key).expect("no entry found for key")
}
}

1628
vendor/flurry/src/node.rs vendored Normal file

File diff suppressed because it is too large Load Diff

325
vendor/flurry/src/raw/mod.rs vendored Normal file
View File

@@ -0,0 +1,325 @@
use seize::Linked;
use crate::node::*;
use crate::reclaim::{self, Atomic, Collector, Guard, Shared};
use std::borrow::Borrow;
use std::fmt::Debug;
use std::sync::atomic::Ordering;
#[derive(Debug)]
pub(crate) struct Table<K, V> {
bins: Box<[Atomic<BinEntry<K, V>>]>,
// since a Moved does not contain associated information,
// one instance is sufficient and shared across all bins in this table
moved: Atomic<BinEntry<K, V>>,
// since the information content of moved nodes is the same across
// the table, we share this information
//
// safety: next_table is a valid pointer if it was read as consequence of loading _this_
// table as `map::HashMap.table` and reading a BinEntry::Moved while still holding the
// guard used for this load:
//
// When loading the current table of the HashMap with a guard g, the current thread will be
// marked as active by g. This happens _before_ the resize which put the Moved entry into the
// this table finishes, as otherwise a different table would have been loaded (see
// `map::HashMap::transfer`).
//
// Hence:
//
// - When trying to access next_table during the current resize, it points to
// map::HashMap.next_table and is thus valid.
//
// - After the current resize and before another resize, `next_table == map::HashMap.table`
// as the "next table" it pointed to during the resize has become the current table. Thus,
// next_table is still valid.
//
// - The above is true until a subsequent resize ends, at which point `map::HashMap.table´ is
// set to another new table != next_table and next_table is `Guard::retire_shared`ed
// (again, see `map::HashMap::transfer`). At this point, next_table is not referenced by the
// map anymore, however `Guard::retire_shared` guarantees that next_table remains valid for at least the
// lifetime of g and, in particular, cannot be dropped before _this_ table.
//
// - After releasing g, either the current resize is finished and operations on the map
// cannot access next_table anymore (as a more recent table will be loaded as the current
// table; see once again `map::HashMap::transfer`), or the argument is as above.
//
// Since finishing a resize is the only time a table is `defer_destroy`ed, the above covers
// all cases.
next_table: Atomic<Table<K, V>>,
}
impl<K, V> Table<K, V> {
pub(crate) fn from(bins: Vec<Atomic<BinEntry<K, V>>>, collector: &Collector) -> Self {
Self {
bins: bins.into_boxed_slice(),
moved: Atomic::from(Shared::boxed(BinEntry::Moved, collector)),
next_table: Atomic::null(),
}
}
pub(crate) fn new(bins: usize, collector: &Collector) -> Self {
Self::from(vec![Atomic::null(); bins], collector)
}
pub(crate) fn is_empty(&self) -> bool {
self.bins.is_empty()
}
pub(crate) fn len(&self) -> usize {
self.bins.len()
}
pub(crate) fn get_moved<'g>(
&'g self,
for_table: Shared<'g, Table<K, V>>,
guard: &'g Guard<'_>,
) -> Shared<'g, BinEntry<K, V>> {
match self.next_table(guard) {
t if t.is_null() => {
// if a no next table is yet associated with this table,
// create one and store it in `self.next_table`
match self.next_table.compare_exchange(
Shared::null(),
for_table,
Ordering::SeqCst,
Ordering::Relaxed,
guard,
) {
Ok(_) => {}
Err(changed) => {
assert!(!changed.current.is_null());
assert_eq!(changed.current, for_table);
}
}
}
t => {
assert_eq!(t, for_table);
}
}
// return a shared pointer to BinEntry::Moved
self.moved.load(Ordering::SeqCst, guard)
}
pub(crate) fn find<'g, Q>(
&'g self,
bin: &Linked<BinEntry<K, V>>,
hash: u64,
key: &Q,
guard: &'g Guard<'_>,
) -> Shared<'g, BinEntry<K, V>>
where
K: Borrow<Q>,
Q: ?Sized + Ord,
{
match **bin {
BinEntry::Node(_) => {
let mut node = bin;
loop {
let BinEntry::Node(ref n) = **node else {
unreachable!("BinEntry::Node only points to BinEntry::Node");
};
if n.hash == hash && n.key.borrow() == key {
// safety: this cast is fine because find
// is only used to return shared references
return Shared::from(node as *const _ as *mut _);
}
let next = n.next.load(Ordering::SeqCst, guard);
if next.is_null() {
return Shared::null();
}
// safety: next will only be dropped, if bin are dropped. bin was read under
// a guard, and so cannot be dropped until we drop the guard at the earliest.
node = unsafe { next.deref() };
}
}
BinEntry::Moved => {
// safety: `self` is a reference to the old table. We got that under the given Guard.
// Since we have not yet dropped that guard, _this_ table has not been garbage collected,
// and so the _later_ table in `next_table`, _definitely_ hasn't.
let mut table = unsafe { self.next_table(guard).deref() };
loop {
if table.is_empty() {
return Shared::null();
}
let bini = table.bini(hash);
let bin = table.bin(bini, guard);
if bin.is_null() {
return Shared::null();
}
// safety: the table is protected by the guard, and so is the bin.
let bin = unsafe { bin.deref() };
match **bin {
BinEntry::Node(_) | BinEntry::Tree(_) => {
break table.find(bin, hash, key, guard)
}
BinEntry::Moved => {
// safety: same as above.
table = unsafe { table.next_table(guard).deref() };
continue;
}
BinEntry::TreeNode(_) => unreachable!("`find` was called on a Moved entry pointing to a TreeNode, which cannot be the first entry in a bin"),
}
}
}
BinEntry::TreeNode(_) => {
unreachable!(
"`find` was called on a TreeNode, which cannot be the first entry in a bin"
);
}
BinEntry::Tree(_) => {
// safety: this cast is fine because TreeBin::find
// only needs a shared reference to the bin
TreeBin::find(Shared::from(bin as *const _ as *mut _), hash, key, guard)
}
}
}
pub(crate) fn drop_bins(&mut self) {
// safety: we have &mut self _and_ all references we have returned are bound to the
// lifetime of their borrow of self, so there cannot be any outstanding references to
// anything in the map.
let guard = unsafe { Guard::unprotected() };
for bin in Vec::from(std::mem::replace(&mut self.bins, vec![].into_boxed_slice())) {
if bin.load(Ordering::SeqCst, &guard).is_null() {
// bin was never used
continue;
}
// use deref first so we down turn shared BinEntry::Moved pointers to owned
// note that dropping the shared Moved, if it exists, is the responsibility
// of `drop`
// safety: same as above
let bin_entry = unsafe { bin.load(Ordering::SeqCst, &guard).deref() };
match **bin_entry {
BinEntry::Moved => {}
BinEntry::Node(_) => {
// safety: same as above + we own the bin - Nodes are not shared across the table
let mut p = unsafe { bin.into_box() };
loop {
// safety below:
// we're dropping the entire map, so no-one else is accessing it.
// we replaced the bin with a NULL, so there's no future way to access it
// either; we own all the nodes in the list.
let BinEntry::Node(node) = p.value else {
unreachable!();
};
// first, drop the value in this node
let _ = unsafe { node.value.into_box() };
// then we move to the next node
if node.next.load(Ordering::SeqCst, &guard).is_null() {
break;
}
p = unsafe { node.next.into_box() };
}
}
BinEntry::Tree(_) => {
// safety: same as for BinEntry::Node
let p = unsafe { bin.into_box() };
let BinEntry::Tree(bin) = p.value else {
unreachable!()
};
// TreeBin::drop will take care of freeing the contained TreeNodes and their values
drop(bin);
}
BinEntry::TreeNode(_) => unreachable!(
"The head of a bin cannot be a TreeNode directly without BinEntry::Tree"
),
}
}
}
}
impl<K, V> Drop for Table<K, V> {
fn drop(&mut self) {
// safety: we have &mut self _and_ all references we have returned are bound to the
// lifetime of their borrow of self, so there cannot be any outstanding references to
// anything in the map.
let guard = unsafe { Guard::unprotected() };
// since BinEntry::Nodes are either dropped by drop_bins or transferred to a new table,
// all bins are empty or contain a Shared pointing to shared the BinEntry::Moved (if
// self.bins was not replaced by drop_bins anyway)
let bins = Vec::from(std::mem::replace(&mut self.bins, vec![].into_boxed_slice()));
// when testing, we check the above invariant. in production, we assume it to be true
if cfg!(debug_assertions) {
for bin in bins.iter() {
let bin = bin.load(Ordering::SeqCst, &guard);
if bin.is_null() {
continue;
} else {
// safety: we have mut access to self, so no-one else will drop this value under us.
let bin = unsafe { bin.deref() };
if let BinEntry::Moved = **bin {
} else {
unreachable!("dropped table with non-empty bin");
}
}
}
}
// as outlined above, at this point `bins` may still contain pointers to the shared
// forwarding node. dropping `bins` here makes sure there is no way to accidentally access
// the shared Moved after it gets dropped below.
drop(bins);
// we need to drop the shared forwarding node (since it is heap allocated).
// Note that this needs to happen _independently_ of whether or not there was
// a previous call to drop_bins.
let moved = self.moved.swap(Shared::null(), Ordering::SeqCst, &guard);
assert!(
!moved.is_null(),
"self.moved is initialized together with the table"
);
// safety: we have mut access to self, so no-one else will drop this value under us.
let moved = unsafe { moved.into_box() };
drop(moved);
// NOTE that the current table _is not_ responsible for `defer_destroy`ing the _next_ table
}
}
impl<K, V> Table<K, V> {
#[inline]
pub(crate) fn bini(&self, hash: u64) -> usize {
let mask = self.bins.len() as u64 - 1;
(hash & mask) as usize
}
#[inline]
pub(crate) fn bin<'g>(&'g self, i: usize, guard: &'g Guard<'_>) -> Shared<'g, BinEntry<K, V>> {
self.bins[i].load(Ordering::Acquire, guard)
}
#[inline]
#[allow(clippy::type_complexity)]
pub(crate) fn cas_bin<'g>(
&'g self,
i: usize,
current: Shared<'_, BinEntry<K, V>>,
new: Shared<'g, BinEntry<K, V>>,
guard: &'g Guard<'_>,
) -> Result<Shared<'g, BinEntry<K, V>>, reclaim::CompareExchangeError<'g, BinEntry<K, V>>> {
self.bins[i].compare_exchange(current, new, Ordering::AcqRel, Ordering::Acquire, guard)
}
#[inline]
pub(crate) fn store_bin(&self, i: usize, new: Shared<'_, BinEntry<K, V>>) {
self.bins[i].store(new, Ordering::Release)
}
#[inline]
pub(crate) fn next_table<'g>(&'g self, guard: &'g Guard<'_>) -> Shared<'g, Table<K, V>> {
self.next_table.load(Ordering::SeqCst, guard)
}
}

340
vendor/flurry/src/rayon_impls.rs vendored Normal file
View File

@@ -0,0 +1,340 @@
use crate::{HashMap, HashMapRef, HashSet, HashSetRef};
use rayon::iter::{FromParallelIterator, IntoParallelIterator, ParallelExtend, ParallelIterator};
use std::hash::{BuildHasher, Hash};
impl<K, V, S> FromParallelIterator<(K, V)> for HashMap<K, V, S>
where
K: Clone + Hash + Ord + Send + Sync,
V: Send + Sync,
S: BuildHasher + Default + Sync,
{
fn from_par_iter<I>(par_iter: I) -> Self
where
I: IntoParallelIterator<Item = (K, V)>,
{
let mut created_map = HashMap::with_hasher(S::default());
created_map.par_extend(par_iter);
created_map
}
}
impl<K, V, S> ParallelExtend<(K, V)> for HashMap<K, V, S>
where
K: Clone + Hash + Ord + Send + Sync,
V: Send + Sync,
S: BuildHasher + Sync,
{
fn par_extend<I>(&mut self, par_iter: I)
where
I: IntoParallelIterator<Item = (K, V)>,
{
(&*self).par_extend(par_iter);
}
}
impl<K, V, S> ParallelExtend<(K, V)> for &HashMap<K, V, S>
where
K: Clone + Hash + Ord + Send + Sync,
V: Send + Sync,
S: BuildHasher + Sync,
{
fn par_extend<I>(&mut self, par_iter: I)
where
I: IntoParallelIterator<Item = (K, V)>,
{
par_iter.into_par_iter().for_each_init(
|| self.guard(),
|guard, (k, v)| {
self.insert(k, v, guard);
},
);
}
}
impl<'map, K, V, S> ParallelExtend<(K, V)> for HashMapRef<'map, K, V, S>
where
K: Clone + Hash + Ord + Send + Sync,
V: Send + Sync,
S: BuildHasher + Sync,
{
fn par_extend<I>(&mut self, par_iter: I)
where
I: IntoParallelIterator<Item = (K, V)>,
{
self.map.par_extend(par_iter);
}
}
impl<K, S> FromParallelIterator<K> for HashSet<K, S>
where
K: Clone + Hash + Ord + Send + Sync,
S: BuildHasher + Default + Sync,
{
fn from_par_iter<I>(par_iter: I) -> Self
where
I: IntoParallelIterator<Item = K>,
{
let mut created_set = HashSet::with_hasher(S::default());
created_set.par_extend(par_iter);
created_set
}
}
impl<K, S> ParallelExtend<K> for HashSet<K, S>
where
K: Clone + Hash + Ord + Send + Sync,
S: BuildHasher + Sync,
{
fn par_extend<I>(&mut self, par_iter: I)
where
I: IntoParallelIterator<Item = K>,
{
(&*self).par_extend(par_iter);
}
}
impl<K, S> ParallelExtend<K> for &HashSet<K, S>
where
K: Clone + Hash + Ord + Send + Sync,
S: BuildHasher + Sync,
{
fn par_extend<I>(&mut self, par_iter: I)
where
I: IntoParallelIterator<Item = K>,
{
let tuple_iter = par_iter.into_par_iter().map(|k| (k, ()));
(&self.map).par_extend(tuple_iter);
}
}
impl<'set, K, S> ParallelExtend<K> for HashSetRef<'set, K, S>
where
K: Clone + Hash + Ord + Send + Sync,
S: BuildHasher + Sync,
{
fn par_extend<I>(&mut self, par_iter: I)
where
I: IntoParallelIterator<Item = K>,
{
self.set.par_extend(par_iter);
}
}
#[cfg(test)]
mod test {
use crate::{HashMap, HashSet};
use rayon::iter::{FromParallelIterator, IntoParallelIterator, ParallelExtend};
#[test]
fn hm_from_empty_parallel_iter() {
let to_create_from: Vec<(i32, i32)> = Vec::new();
let created_map: HashMap<i32, i32> = HashMap::from_par_iter(to_create_from.into_par_iter());
assert_eq!(created_map.len(), 0);
}
#[test]
fn hm_from_large_parallel_iter() {
let mut to_create_from: Vec<(i32, i32)> = Vec::new();
for i in 0..100 {
to_create_from.push((i + 100, i * 10));
}
let created_map: HashMap<i32, i32> = HashMap::from_par_iter(to_create_from.into_par_iter());
assert_eq!(created_map.len(), 100);
let guard = created_map.guard();
assert_eq!(created_map.get(&100, &guard), Some(&0));
assert_eq!(created_map.get(&199, &guard), Some(&990));
}
#[test]
fn hs_from_empty_parallel_iter() {
let to_create_from: Vec<i32> = Vec::new();
let created_set: HashSet<i32> = HashSet::from_par_iter(to_create_from.into_par_iter());
assert_eq!(created_set.len(), 0);
}
#[test]
fn hs_from_large_parallel_iter() {
let mut to_create_from: Vec<(i32, i32)> = Vec::new();
for i in 0..100 {
to_create_from.push((i + 100, i * 10));
}
let created_map: HashSet<(i32, i32)> =
HashSet::from_par_iter(to_create_from.into_par_iter());
assert_eq!(created_map.len(), 100);
let guard = created_map.guard();
assert!(created_map.contains(&(100, 0), &guard));
assert!(!created_map.contains(&(100, 10000), &guard));
}
#[test]
fn hm_parallel_extend_by_nothing() {
let to_extend_with = Vec::new();
let mut map = HashMap::new();
{
let guard = map.guard();
map.insert(1, 2, &guard);
map.insert(3, 4, &guard);
}
map.par_extend(to_extend_with.into_par_iter());
assert_eq!(map.len(), 2);
let guard = map.guard();
assert_eq!(map.get(&1, &guard), Some(&2));
assert_eq!(map.get(&3, &guard), Some(&4));
}
#[test]
fn hm_parallel_extend_by_a_bunch() {
let mut to_extend_with = Vec::new();
for i in 0..100 {
to_extend_with.push((i + 100, i * 10));
}
let mut map = HashMap::new();
{
let guard = map.guard();
map.insert(1, 2, &guard);
map.insert(3, 4, &guard);
}
map.par_extend(to_extend_with.into_par_iter());
assert_eq!(map.len(), 102);
let guard = map.guard();
assert_eq!(map.get(&1, &guard), Some(&2));
assert_eq!(map.get(&3, &guard), Some(&4));
assert_eq!(map.get(&100, &guard), Some(&0));
assert_eq!(map.get(&199, &guard), Some(&990));
}
#[test]
fn hm_ref_parallel_extend_by_nothing() {
let to_extend_with = Vec::new();
let map = HashMap::new();
let guard = map.guard();
map.insert(1, 2, &guard);
map.insert(3, 4, &guard);
map.pin().par_extend(to_extend_with.into_par_iter());
assert_eq!(map.len(), 2);
assert_eq!(map.get(&1, &guard), Some(&2));
assert_eq!(map.get(&3, &guard), Some(&4));
}
#[test]
fn hm_ref_parallel_extend_by_a_bunch() {
let mut to_extend_with = Vec::new();
for i in 0..100 {
to_extend_with.push((i + 100, i * 10));
}
let map = HashMap::new();
let guard = map.guard();
map.insert(1, 2, &guard);
map.insert(3, 4, &guard);
map.pin().par_extend(to_extend_with.into_par_iter());
assert_eq!(map.len(), 102);
assert_eq!(map.get(&1, &guard), Some(&2));
assert_eq!(map.get(&3, &guard), Some(&4));
assert_eq!(map.get(&100, &guard), Some(&0));
assert_eq!(map.get(&199, &guard), Some(&990));
}
#[test]
fn hs_parallel_extend_by_nothing() {
let to_extend_with = Vec::new();
let mut set = HashSet::new();
{
let guard = set.guard();
set.insert(1, &guard);
set.insert(3, &guard);
}
set.par_extend(to_extend_with.into_par_iter());
assert_eq!(set.len(), 2);
let guard = set.guard();
assert!(set.contains(&1, &guard));
assert!(!set.contains(&17, &guard));
}
#[test]
fn hs_parallel_extend_by_a_bunch() {
let mut to_extend_with = Vec::new();
for i in 0..100 {
to_extend_with.push((i + 100, i * 10));
}
let mut set = HashSet::new();
{
let guard = set.guard();
set.insert((1, 2), &guard);
set.insert((3, 4), &guard);
}
set.par_extend(to_extend_with.into_par_iter());
assert_eq!(set.len(), 102);
let guard = set.guard();
assert!(set.contains(&(1, 2), &guard));
assert!(set.contains(&(199, 990), &guard));
assert!(!set.contains(&(199, 167), &guard));
}
#[test]
fn hs_ref_parallel_extend_by_nothing() {
let to_extend_with = Vec::new();
let mut set = HashSet::new();
{
let guard = set.guard();
set.insert((1, 2), &guard);
set.insert((3, 4), &guard);
}
set.par_extend(to_extend_with.into_par_iter());
assert_eq!(set.len(), 2);
let guard = set.guard();
assert!(set.contains(&(1, 2), &guard));
assert!(!set.contains(&(199, 990), &guard));
assert!(!set.contains(&(199, 167), &guard));
}
#[test]
fn hs_ref_parallel_extend_by_a_bunch() {
let mut to_extend_with = Vec::new();
for i in 0..100 {
to_extend_with.push((i + 100, i * 10));
}
let set = HashSet::new();
let mut set_ref = set.pin();
set_ref.insert((1, 2));
set_ref.insert((3, 4));
set_ref.par_extend(to_extend_with.into_par_iter());
assert_eq!(set.len(), 102);
assert!(set_ref.contains(&(1, 2)));
assert!(set_ref.contains(&(199, 990)));
assert!(!set_ref.contains(&(199, 167)));
}
}

170
vendor/flurry/src/reclaim.rs vendored Normal file
View File

@@ -0,0 +1,170 @@
pub(crate) use seize::{Collector, Guard, Linked};
use std::marker::PhantomData;
use std::ops::Deref;
use std::sync::atomic::{AtomicPtr, Ordering};
use std::{fmt, ptr};
pub(crate) struct Atomic<T>(AtomicPtr<Linked<T>>);
impl<T> Atomic<T> {
pub(crate) fn null() -> Self {
Self(AtomicPtr::default())
}
pub(crate) fn load<'g>(&self, ordering: Ordering, guard: &'g Guard<'_>) -> Shared<'g, T> {
guard.protect(&self.0, ordering).into()
}
pub(crate) fn store(&self, new: Shared<'_, T>, ordering: Ordering) {
self.0.store(new.ptr, ordering);
}
pub(crate) unsafe fn into_box(self) -> Box<Linked<T>> {
Box::from_raw(self.0.into_inner())
}
pub(crate) fn swap<'g>(
&self,
new: Shared<'_, T>,
ord: Ordering,
_: &'g Guard<'_>,
) -> Shared<'g, T> {
self.0.swap(new.ptr, ord).into()
}
pub(crate) fn compare_exchange<'g>(
&self,
current: Shared<'_, T>,
new: Shared<'g, T>,
success: Ordering,
failure: Ordering,
_: &'g Guard<'_>,
) -> Result<Shared<'g, T>, CompareExchangeError<'g, T>> {
match self
.0
.compare_exchange(current.ptr, new.ptr, success, failure)
{
Ok(ptr) => Ok(ptr.into()),
Err(current) => Err(CompareExchangeError {
current: current.into(),
new,
}),
}
}
}
impl<T> From<Shared<'_, T>> for Atomic<T> {
fn from(shared: Shared<'_, T>) -> Self {
Atomic(shared.ptr.into())
}
}
impl<T> Clone for Atomic<T> {
fn clone(&self) -> Self {
Atomic(self.0.load(Ordering::Relaxed).into())
}
}
impl<T> fmt::Debug for Shared<'_, T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{:p}", self.ptr)
}
}
impl<T> fmt::Debug for Atomic<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{:p}", self.0.load(Ordering::SeqCst))
}
}
pub(crate) struct CompareExchangeError<'g, T> {
pub(crate) current: Shared<'g, T>,
pub(crate) new: Shared<'g, T>,
}
pub(crate) struct Shared<'g, T> {
ptr: *mut Linked<T>,
_g: PhantomData<&'g ()>,
}
impl<'g, T> Shared<'g, T> {
pub(crate) fn null() -> Self {
Shared::from(ptr::null_mut())
}
pub(crate) fn boxed(value: T, collector: &Collector) -> Self {
Shared::from(collector.link_boxed(value))
}
pub(crate) unsafe fn into_box(self) -> Box<Linked<T>> {
Box::from_raw(self.ptr)
}
pub(crate) unsafe fn as_ptr(&self) -> *mut Linked<T> {
self.ptr
}
pub(crate) unsafe fn as_ref(&self) -> Option<&'g Linked<T>> {
self.ptr.as_ref()
}
pub(crate) unsafe fn deref(&self) -> &'g Linked<T> {
&*self.ptr
}
pub(crate) fn is_null(&self) -> bool {
self.ptr.is_null()
}
}
impl<'g, T> PartialEq<Shared<'g, T>> for Shared<'g, T> {
fn eq(&self, other: &Self) -> bool {
self.ptr == other.ptr
}
}
impl<T> Eq for Shared<'_, T> {}
impl<T> Clone for Shared<'_, T> {
fn clone(&self) -> Self {
*self
}
}
impl<T> Copy for Shared<'_, T> {}
impl<T> From<*mut Linked<T>> for Shared<'_, T> {
fn from(ptr: *mut Linked<T>) -> Self {
Shared {
ptr,
_g: PhantomData,
}
}
}
pub(crate) trait RetireShared {
unsafe fn retire_shared<T>(&self, shared: Shared<'_, T>);
}
impl RetireShared for Guard<'_> {
unsafe fn retire_shared<T>(&self, shared: Shared<'_, T>) {
self.defer_retire(shared.ptr, seize::reclaim::boxed::<Linked<T>>);
}
}
pub(crate) enum GuardRef<'g> {
Owned(Guard<'g>),
Ref(&'g Guard<'g>),
}
impl<'g> Deref for GuardRef<'g> {
type Target = Guard<'g>;
#[inline]
fn deref(&self) -> &Guard<'g> {
match *self {
GuardRef::Owned(ref guard) | GuardRef::Ref(&ref guard) => guard,
}
}
}

241
vendor/flurry/src/serde_impls.rs vendored Normal file
View File

@@ -0,0 +1,241 @@
use crate::{HashMap, HashMapRef, HashSet, HashSetRef};
use serde::{
de::{MapAccess, SeqAccess, Visitor},
Deserialize, Deserializer, Serialize, Serializer,
};
use std::fmt::{self, Formatter};
use std::hash::{BuildHasher, Hash};
use std::marker::PhantomData;
struct HashMapVisitor<K, V, S> {
key_marker: PhantomData<K>,
value_marker: PhantomData<V>,
hash_builder_marker: PhantomData<S>,
}
impl<K, V, S> Serialize for HashMapRef<'_, K, V, S>
where
K: Serialize,
V: Serialize,
{
fn serialize<Sr>(&self, serializer: Sr) -> Result<Sr::Ok, Sr::Error>
where
Sr: Serializer,
{
serializer.collect_map(self.iter())
}
}
impl<K, V, S> Serialize for HashMap<K, V, S>
where
K: Serialize,
V: Serialize,
{
fn serialize<Sr>(&self, serializer: Sr) -> Result<Sr::Ok, Sr::Error>
where
Sr: Serializer,
{
self.pin().serialize(serializer)
}
}
impl<'de, K, V, S> Deserialize<'de> for HashMap<K, V, S>
where
K: Deserialize<'de> + Send + Sync + Hash + Clone + Ord,
V: Deserialize<'de> + Send + Sync,
S: Default + BuildHasher,
{
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
deserializer.deserialize_map(HashMapVisitor::new())
}
}
impl<K, V, S> HashMapVisitor<K, V, S> {
pub(crate) fn new() -> Self {
Self {
key_marker: PhantomData,
value_marker: PhantomData,
hash_builder_marker: PhantomData,
}
}
}
impl<'de, K, V, S> Visitor<'de> for HashMapVisitor<K, V, S>
where
K: Deserialize<'de> + Send + Sync + Hash + Clone + Ord,
V: Deserialize<'de> + Send + Sync,
S: Default + BuildHasher,
{
type Value = HashMap<K, V, S>;
fn expecting(&self, f: &mut Formatter<'_>) -> fmt::Result {
write!(f, "a map")
}
fn visit_map<M>(self, mut access: M) -> Result<Self::Value, M::Error>
where
M: MapAccess<'de>,
{
let map = match access.size_hint() {
Some(n) => HashMap::with_capacity_and_hasher(n, S::default()),
None => HashMap::with_hasher(S::default()),
};
{
let guard = map.guard();
while let Some((key, value)) = access.next_entry()? {
if let Some(_old_value) = map.insert(key, value, &guard) {
unreachable!("Serialized map held two values with the same key");
}
}
}
Ok(map)
}
}
impl<T, S> Serialize for HashSetRef<'_, T, S>
where
T: Serialize,
{
fn serialize<Sr>(&self, serilizer: Sr) -> Result<Sr::Ok, Sr::Error>
where
Sr: Serializer,
{
serilizer.collect_seq(self.iter())
}
}
impl<T, S> Serialize for HashSet<T, S>
where
T: Serialize,
{
fn serialize<Sr>(&self, serializer: Sr) -> Result<Sr::Ok, Sr::Error>
where
Sr: Serializer,
{
self.pin().serialize(serializer)
}
}
impl<'de, T, S> Deserialize<'de> for HashSet<T, S>
where
T: Deserialize<'de> + Send + Sync + Hash + Clone + Ord,
S: Default + BuildHasher,
{
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
deserializer.deserialize_seq(HashSetVisitor::new())
}
}
struct HashSetVisitor<T, S> {
type_marker: PhantomData<T>,
hash_builder_marker: PhantomData<S>,
}
impl<T, S> HashSetVisitor<T, S> {
pub(crate) fn new() -> Self {
Self {
type_marker: PhantomData,
hash_builder_marker: PhantomData,
}
}
}
impl<'de, T, S> Visitor<'de> for HashSetVisitor<T, S>
where
T: Deserialize<'de> + Send + Sync + Hash + Clone + Ord,
S: Default + BuildHasher,
{
type Value = HashSet<T, S>;
fn expecting(&self, f: &mut Formatter<'_>) -> fmt::Result {
write!(f, "a set")
}
fn visit_seq<A>(self, mut access: A) -> Result<Self::Value, A::Error>
where
A: SeqAccess<'de>,
{
let set = HashSet::default();
{
let guard = set.guard();
while let Some(value) = access.next_element()? {
let _ = set.insert(value, &guard);
}
}
Ok(set)
}
}
#[cfg(test)]
mod test {
use crate::{HashMap, HashSet};
#[test]
fn test_map() {
let map: HashMap<u8, u8> = HashMap::with_capacity(5);
let guard = map.guard();
let _ = map.insert(0, 4, &guard);
let _ = map.insert(1, 3, &guard);
let _ = map.insert(2, 2, &guard);
let _ = map.insert(3, 1, &guard);
let _ = map.insert(4, 0, &guard);
let serialized = serde_json::to_string(&map).expect("Couldn't serialize map");
let deserialized: HashMap<u8, u8> =
serde_json::from_str(&serialized).expect("Couldn't deserialize map");
assert_eq!(map, deserialized);
}
#[test]
fn test_set() {
let set: HashSet<u8> = HashSet::with_capacity(5);
let guard = set.guard();
let _ = set.insert(0, &guard);
let _ = set.insert(1, &guard);
let _ = set.insert(2, &guard);
let _ = set.insert(3, &guard);
let _ = set.insert(4, &guard);
let serialized = serde_json::to_string(&set).expect("Couldn't serialize map");
let deserialized: HashSet<u8> =
serde_json::from_str(&serialized).expect("Couldn't deserialize map");
assert_eq!(set, deserialized);
}
#[test]
fn test_map_no_ord() {
let map: HashMap<u8, f64> = HashMap::with_capacity(5);
let guard = map.guard();
let _ = map.insert(0, 4.0, &guard);
let _ = map.insert(1, 3.0, &guard);
let _ = map.insert(2, 2.0, &guard);
let _ = map.insert(3, 1.0, &guard);
let _ = map.insert(4, 0.0, &guard);
let serialized = serde_json::to_string(&map).expect("Couldn't serialize map");
let deserialized: HashMap<u8, f64> =
serde_json::from_str(&serialized).expect("Couldn't deserialize map");
assert_eq!(map, deserialized);
}
}

610
vendor/flurry/src/set.rs vendored Normal file
View File

@@ -0,0 +1,610 @@
//! A concurrent hash set.
//!
//! See `HashSet` for details.
use crate::iter::Keys;
use crate::reclaim::Guard;
use crate::HashMap;
use std::borrow::Borrow;
use std::fmt::{self, Debug, Formatter};
use std::hash::{BuildHasher, Hash};
/// A concurrent hash set implemented as a `HashMap` where the value is `()`.
///
/// # Examples
///
/// ```
/// use flurry::HashSet;
///
/// // Initialize a new hash set.
/// let books = HashSet::new();
/// let guard = books.guard();
///
/// // Add some books
/// books.insert("Fight Club", &guard);
/// books.insert("Three Men In A Raft", &guard);
/// books.insert("The Book of Dust", &guard);
/// books.insert("The Dry", &guard);
///
/// // Check for a specific one.
/// if !books.contains(&"The Drunken Botanist", &guard) {
/// println!("We don't have The Drunken Botanist.");
/// }
///
/// // Remove a book.
/// books.remove(&"Three Men In A Raft", &guard);
///
/// // Iterate over everything.
/// for book in books.iter(&guard) {
/// println!("{}", book);
/// }
/// ```
pub struct HashSet<T, S = crate::DefaultHashBuilder> {
pub(crate) map: HashMap<T, (), S>,
}
impl<T> HashSet<T, crate::DefaultHashBuilder> {
/// Creates an empty `HashSet`.
///
/// The hash set is initially created with a capacity of 0, so it will not allocate until it
/// is first inserted into.
///
/// # Examples
///
/// ```
/// use flurry::HashSet;
/// let set: HashSet<i32> = HashSet::new();
/// ```
pub fn new() -> Self {
Self::default()
}
/// Creates an empty `HashSet` with the specified capacity.
///
/// The hash map will be able to hold at least `capacity` elements without
/// reallocating. If `capacity` is 0, the hash map will not allocate.
///
/// # Examples
///
/// ```
/// use flurry::HashSet;
/// let map: HashSet<&str, _> = HashSet::with_capacity(10);
/// ```
///
/// # Notes
///
/// There is no guarantee that the HashSet will not resize if `capacity`
/// elements are inserted. The set will resize based on key collision, so
/// bad key distribution may cause a resize before `capacity` is reached.
/// For more information see the [`resizing behavior`] of HashMap.
///
/// [`resizing behavior`]: index.html#resizing-behavior
pub fn with_capacity(capacity: usize) -> Self {
Self::with_capacity_and_hasher(capacity, crate::DefaultHashBuilder::default())
}
}
impl<T, S> Default for HashSet<T, S>
where
S: Default,
{
fn default() -> Self {
Self::with_hasher(S::default())
}
}
impl<T, S> HashSet<T, S> {
/// Creates an empty set which will use `hash_builder` to hash values.
///
/// The created set has the default initial capacity.
///
/// Warning: `hash_builder` is normally randomly generated, and is designed to
/// allow the set to be resistant to attacks that cause many collisions and
/// very poor performance. Setting it manually using this
/// function can expose a DoS attack vector.
///
/// # Examples
///
/// ```
/// use flurry::{HashSet, DefaultHashBuilder};
///
/// let set = HashSet::with_hasher(DefaultHashBuilder::default());
/// let guard = set.guard();
/// set.insert(1, &guard);
/// ```
pub fn with_hasher(hash_builder: S) -> Self {
Self {
map: HashMap::with_hasher(hash_builder),
}
}
/// Creates an empty set with the specified `capacity`, using `hash_builder` to hash the
/// values.
///
/// The set will be sized to accommodate `capacity` elements with a low chance of reallocating
/// (assuming uniformly distributed hashes). If `capacity` is 0, the call will not allocate,
/// and is equivalent to [`HashSet::new`].
///
/// Warning: `hash_builder` is normally randomly generated, and is designed to allow the set
/// to be resistant to attacks that cause many collisions and very poor performance.
/// Setting it manually using this function can expose a DoS attack vector.
///
/// # Examples
///
/// ```
/// use flurry::HashSet;
/// use std::collections::hash_map::RandomState;
///
/// let s = RandomState::new();
/// let set = HashSet::with_capacity_and_hasher(10, s);
/// let guard = set.guard();
/// set.insert(1, &guard);
/// ```
pub fn with_capacity_and_hasher(capacity: usize, hash_builder: S) -> Self {
Self {
map: HashMap::with_capacity_and_hasher(capacity, hash_builder),
}
}
/// Pin a `Guard` for use with this set.
///
/// Keep in mind that for as long as you hold onto this `Guard`, you are preventing the
/// collection of garbage generated by the set.
pub fn guard(&self) -> Guard<'_> {
self.map.guard()
}
/// Returns the number of elements in the set.
///
/// # Examples
///
/// ```
/// use flurry::HashSet;
///
/// let set = HashSet::new();
///
/// let guard = set.guard();
/// set.insert(1, &guard);
/// set.insert(2, &guard);
/// assert_eq!(set.len(), 2);
/// ```
pub fn len(&self) -> usize {
self.map.len()
}
/// Returns `true` if the set is empty. Otherwise returns `false`.
///
/// # Examples
///
/// ```
/// use flurry::HashSet;
///
/// let set = HashSet::new();
/// assert!(set.is_empty());
/// set.insert("a", &set.guard());
/// assert!(!set.is_empty());
/// ```
pub fn is_empty(&self) -> bool {
self.len() == 0
}
/// An iterator visiting all elements in arbitrary order.
///
/// The iterator element type is `&'g T`.
///
/// See [`HashMap::keys`] for details.
///
/// # Examples
///
/// ```
/// use flurry::HashSet;
///
/// let set = HashSet::new();
/// let guard = set.guard();
/// set.insert(1, &guard);
/// set.insert(2, &guard);
///
/// for x in set.iter(&guard) {
/// println!("{}", x);
/// }
/// ```
pub fn iter<'g>(&'g self, guard: &'g Guard<'_>) -> Keys<'g, T, ()> {
self.map.keys(guard)
}
}
impl<T, S> HashSet<T, S>
where
T: Hash + Ord,
S: BuildHasher,
{
/// Returns `true` if the given value is an element of this set.
///
/// The value may be any borrowed form of the set's value type, but
/// [`Hash`] and [`Ord`] on the borrowed form *must* match those for
/// the value type.
///
/// [`Ord`]: std::cmp::Ord
/// [`Hash`]: std::hash::Hash
///
/// # Examples
///
/// ```
/// use flurry::HashSet;
///
/// let set = HashSet::new();
/// let guard = set.guard();
/// set.insert(2, &guard);
///
/// assert!(set.contains(&2, &guard));
/// assert!(!set.contains(&1, &guard));
/// ```
#[inline]
pub fn contains<Q>(&self, value: &Q, guard: &Guard<'_>) -> bool
where
T: Borrow<Q>,
Q: ?Sized + Hash + Ord,
{
self.map.contains_key(value, guard)
}
/// Returns a reference to the element in the set, if any, that is equal to the given value.
///
/// The value may be any borrowed form of the set's value type, but
/// [`Hash`] and [`Ord`] on the borrowed form *must* match those for
/// the value type.
///
/// [`Ord`]: std::cmp::Ord
/// [`Hash`]: std::hash::Hash
///
/// # Examples
///
/// ```
/// use flurry::HashSet;
///
/// let set: HashSet<_> = [1, 2, 3].iter().cloned().collect();
/// let guard = set.guard();
/// assert_eq!(set.get(&2, &guard), Some(&2));
/// assert_eq!(set.get(&4, &guard), None);
/// ```
pub fn get<'g, Q>(&'g self, value: &Q, guard: &'g Guard<'_>) -> Option<&'g T>
where
T: Borrow<Q>,
Q: ?Sized + Hash + Ord,
{
self.map.get_key_value(value, guard).map(|(k, _)| k)
}
/// Returns `true` if `self` has no elements in common with `other`.
///
/// This is equivalent to checking for an empty intersection.
///
/// # Examples
///
/// ```
/// use std::iter::FromIterator;
/// use flurry::HashSet;
///
/// let a = HashSet::from_iter(&[1, 2, 3]);
/// let b = HashSet::new();
///
/// assert!(a.pin().is_disjoint(&b.pin()));
/// b.pin().insert(4);
/// assert!(a.pin().is_disjoint(&b.pin()));
/// b.pin().insert(1);
/// assert!(!a.pin().is_disjoint(&b.pin()));
///
/// ```
pub fn is_disjoint(
&self,
other: &HashSet<T, S>,
our_guard: &Guard<'_>,
their_guard: &Guard<'_>,
) -> bool {
for value in self.iter(our_guard) {
if other.contains(value, their_guard) {
return false;
}
}
true
}
/// Returns `true` if the set is a subset of another, i.e., `other` contains at least all the values in `self`.
///
/// # Examples
///
/// ```
/// use std::iter::FromIterator;
/// use flurry::HashSet;
///
/// let sup = HashSet::from_iter(&[1, 2, 3]);
/// let set = HashSet::new();
///
/// assert!(set.pin().is_subset(&sup.pin()));
/// set.pin().insert(2);
/// assert!(set.pin().is_subset(&sup.pin()));
/// set.pin().insert(4);
/// assert!(!set.pin().is_subset(&sup.pin()));
/// ```
pub fn is_subset(
&self,
other: &HashSet<T, S>,
our_guard: &Guard<'_>,
their_guard: &Guard<'_>,
) -> bool {
for value in self.iter(our_guard) {
if !other.contains(value, their_guard) {
return false;
}
}
true
}
/// Returns `true` if the set is a superset of another, i.e., `self` contains at least all the values in `other`.
///
/// # Examples
///
/// ```
/// use std::iter::FromIterator;
/// use flurry::HashSet;
///
/// let sub = HashSet::from_iter(&[1, 2]);
/// let set = HashSet::new();
///
/// assert!(!set.pin().is_superset(&sub.pin()));
///
/// set.pin().insert(0);
/// set.pin().insert(1);
/// assert!(!set.pin().is_superset(&sub.pin()));
///
/// set.pin().insert(2);
/// assert!(set.pin().is_superset(&sub.pin()));
/// ```
pub fn is_superset(
&self,
other: &HashSet<T, S>,
our_guard: &Guard<'_>,
their_guard: &Guard<'_>,
) -> bool {
other.is_subset(self, their_guard, our_guard)
}
pub(crate) fn guarded_eq(
&self,
other: &Self,
our_guard: &Guard<'_>,
their_guard: &Guard<'_>,
) -> bool {
self.map.guarded_eq(&other.map, our_guard, their_guard)
}
}
impl<T, S> HashSet<T, S>
where
T: Sync + Send + Clone + Hash + Ord,
S: BuildHasher,
{
/// Adds a value to the set.
///
/// If the set did not have this value present, `true` is returned.
///
/// If the set did have this value present, `false` is returned.
///
/// # Examples
///
/// ```
/// use flurry::HashSet;
///
/// let set = HashSet::new();
/// let guard = set.guard();
///
/// assert_eq!(set.insert(2, &guard), true);
/// assert_eq!(set.insert(2, &guard), false);
/// assert!(set.contains(&2, &guard));
/// ```
pub fn insert(&self, value: T, guard: &Guard<'_>) -> bool {
let old = self.map.insert(value, (), guard);
old.is_none()
}
/// Removes a value from the set.
///
/// If the set did not have this value present, `false` is returned.
///
/// If the set did have this value present, `true` is returned.
///
/// The value may be any borrowed form of the set's value type, but
/// [`Hash`] and [`Ord`] on the borrowed form *must* match those for
/// the value type.
///
/// [`Ord`]: std::cmp::Ord
/// [`Hash`]: std::hash::Hash
///
/// # Examples
///
/// ```
/// use flurry::HashSet;
///
/// let set = HashSet::new();
/// let guard = set.guard();
/// set.insert(2, &guard);
///
/// assert_eq!(set.remove(&2, &guard), true);
/// assert!(!set.contains(&2, &guard));
/// assert_eq!(set.remove(&2, &guard), false);
/// ```
pub fn remove<Q>(&self, value: &Q, guard: &Guard<'_>) -> bool
where
T: Borrow<Q>,
Q: ?Sized + Hash + Ord,
{
let removed = self.map.remove(value, guard);
removed.is_some()
}
/// Removes and returns the value in the set, if any, that is equal to the given one.
///
/// The value may be any borrowed form of the set's value type, but
/// [`Hash`] and [`Ord`] on the borrowed form *must* match those for
/// the value type.
///
/// [`Ord`]: std::cmp::Ord
/// [`Hash`]: std::hash::Hash
///
/// # Examples
///
/// ```
/// use flurry::HashSet;
///
/// let mut set: HashSet<_> = [1, 2, 3].iter().cloned().collect();
/// let guard = set.guard();
/// assert_eq!(set.take(&2, &guard), Some(&2));
/// assert_eq!(set.take(&2, &guard), None);
/// ```
pub fn take<'g, Q>(&'g self, value: &Q, guard: &'g Guard<'_>) -> Option<&'g T>
where
T: Borrow<Q>,
Q: ?Sized + Hash + Ord,
{
self.map.remove_entry(value, guard).map(|(k, _)| k)
}
/// Retains only the elements specified by the predicate.
///
/// In other words, remove all elements `e` such that `f(&e)` returns `false`.
///
/// # Examples
///
/// ```
/// use flurry::HashSet;
///
/// let set = HashSet::new();
///
/// for i in 0..8 {
/// set.pin().insert(i);
/// }
/// set.pin().retain(|&e| e % 2 == 0);
/// assert_eq!(set.pin().len(), 4);
/// ```
pub fn retain<F>(&self, mut f: F, guard: &Guard<'_>)
where
F: FnMut(&T) -> bool,
{
self.map.retain(|value, ()| f(value), guard)
}
}
impl<T, S> HashSet<T, S>
where
T: Clone + Ord,
{
/// Clears the set, removing all elements.
///
/// # Examples
///
/// ```
/// use flurry::HashSet;
///
/// let set = HashSet::new();
///
/// set.pin().insert("a");
/// set.pin().clear();
/// assert!(set.pin().is_empty());
/// ```
pub fn clear(&self, guard: &Guard<'_>) {
self.map.clear(guard)
}
/// Tries to reserve capacity for at least `additional` more elements to
/// be inserted in the `HashSet`.
///
/// The collection may reserve more space to avoid frequent reallocations.
pub fn reserve(&self, additional: usize, guard: &Guard<'_>) {
self.map.reserve(additional, guard)
}
}
impl<T, S> PartialEq for HashSet<T, S>
where
T: Ord + Hash,
S: BuildHasher,
{
fn eq(&self, other: &Self) -> bool {
self.map == other.map
}
}
impl<T, S> Eq for HashSet<T, S>
where
T: Ord + Hash,
S: BuildHasher,
{
}
impl<T, S> fmt::Debug for HashSet<T, S>
where
T: Debug,
{
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
let guard = self.guard();
f.debug_set().entries(self.iter(&guard)).finish()
}
}
impl<T, S> Extend<T> for &HashSet<T, S>
where
T: Sync + Send + Clone + Hash + Ord,
S: BuildHasher,
{
fn extend<I: IntoIterator<Item = T>>(&mut self, iter: I) {
Extend::extend(&mut &self.map, iter.into_iter().map(|v| (v, ())))
}
}
impl<'a, T, S> Extend<&'a T> for &HashSet<T, S>
where
T: Sync + Send + Copy + Hash + Ord,
S: BuildHasher,
{
fn extend<I: IntoIterator<Item = &'a T>>(&mut self, iter: I) {
Extend::extend(&mut &self.map, iter.into_iter().map(|&v| (v, ())))
}
}
impl<T, S> FromIterator<T> for HashSet<T, S>
where
T: Sync + Send + Clone + Hash + Ord,
S: BuildHasher + Default,
{
fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> Self {
Self {
map: iter.into_iter().map(|v| (v, ())).collect(),
}
}
}
impl<'a, T, S> FromIterator<&'a T> for HashSet<T, S>
where
T: Sync + Send + Copy + Hash + Ord,
S: BuildHasher + Default,
{
fn from_iter<I: IntoIterator<Item = &'a T>>(iter: I) -> Self {
Self {
map: iter.into_iter().map(|&v| (v, ())).collect(),
}
}
}
impl<T, S> Clone for HashSet<T, S>
where
T: Sync + Send + Clone + Hash + Ord,
S: BuildHasher + Clone,
{
fn clone(&self) -> HashSet<T, S> {
Self {
map: self.map.clone(),
}
}
}

237
vendor/flurry/src/set_ref.rs vendored Normal file
View File

@@ -0,0 +1,237 @@
use crate::iter::*;
use crate::reclaim::{Guard, GuardRef};
use crate::HashSet;
use std::borrow::Borrow;
use std::fmt::{self, Debug, Formatter};
use std::hash::{BuildHasher, Hash};
/// A reference to a [`HashSet`], constructed with [`HashSet::pin`] or [`HashSet::with_guard`].
///
/// The current thread will be pinned for the duration of this reference.
/// Keep in mind that this prevents the collection of garbage generated by the set.
pub struct HashSetRef<'set, T, S = crate::DefaultHashBuilder> {
pub(crate) set: &'set HashSet<T, S>,
guard: GuardRef<'set>,
}
impl<T, S> HashSet<T, S> {
/// Get a reference to this set with the current thread pinned.
///
/// Keep in mind that for as long as you hold onto this, you are preventing the collection of
/// garbage generated by the set.
pub fn pin(&self) -> HashSetRef<'_, T, S> {
HashSetRef {
guard: GuardRef::Owned(self.guard()),
set: self,
}
}
/// Get a reference to this set with the given guard.
pub fn with_guard<'g>(&'g self, guard: &'g Guard<'_>) -> HashSetRef<'g, T, S> {
HashSetRef {
guard: GuardRef::Ref(guard),
set: self,
}
}
}
impl<T, S> HashSetRef<'_, T, S> {
/// Returns the number of elements in the set.
///
/// See also [`HashSet::len`].
pub fn len(&self) -> usize {
self.set.len()
}
/// Returns `true` if the set is empty. Otherwise returns `false`.
///
/// See also [`HashSet::is_empty`].
pub fn is_empty(&self) -> bool {
self.set.is_empty()
}
/// An iterator visiting all elements in arbitrary order.
///
/// The iterator element type is `&'g T`.
///
/// See also [`HashSet::iter`].
pub fn iter(&self) -> Keys<'_, T, ()> {
self.set.iter(&self.guard)
}
}
impl<T, S> HashSetRef<'_, T, S>
where
T: Hash + Ord,
S: BuildHasher,
{
/// Returns `true` if the given value is an element of this set.
///
/// See also [`HashSet::contains`].
#[inline]
pub fn contains<Q>(&self, value: &Q) -> bool
where
T: Borrow<Q>,
Q: ?Sized + Hash + Ord,
{
self.set.contains(value, &self.guard)
}
/// Returns a reference to the element in the set, if any, that is equal to the given value.
///
/// See also [`HashSet::get`].
pub fn get<'g, Q>(&'g self, value: &Q) -> Option<&'g T>
where
T: Borrow<Q>,
Q: ?Sized + Hash + Ord,
{
self.set.get(value, &self.guard)
}
/// Returns `true` if `self` has no elements in common with `other`.
///
/// See also [`HashSet::is_disjoint`].
pub fn is_disjoint(&self, other: &HashSetRef<'_, T, S>) -> bool {
self.set.is_disjoint(other.set, &self.guard, &other.guard)
}
/// Returns `true` if the set is a subset of another, i.e., `other` contains at least all the values in `self`.
///
/// See also [`HashSet::is_subset`].
pub fn is_subset(&self, other: &HashSetRef<'_, T, S>) -> bool {
self.set.is_subset(other.set, &self.guard, &other.guard)
}
/// Returns `true` if the set is a superset of another, i.e., `self` contains at least all the values in `other`.
///
/// See also [`HashSet::is_superset`].
pub fn is_superset(&self, other: &HashSetRef<'_, T, S>) -> bool {
self.set.is_superset(other.set, &self.guard, &other.guard)
}
}
impl<T, S> HashSetRef<'_, T, S>
where
T: Sync + Send + Clone + Hash + Ord,
S: BuildHasher,
{
/// Adds a value to the set.
///
/// See also [`HashSet::insert`].
pub fn insert(&self, value: T) -> bool {
self.set.insert(value, &self.guard)
}
/// Removes a value from the set.
///
/// See also [`HashSet::remove`].
pub fn remove<Q>(&self, value: &Q) -> bool
where
T: Borrow<Q>,
Q: ?Sized + Hash + Ord,
{
self.set.remove(value, &self.guard)
}
/// Removes and returns the value in the set, if any, that is equal to the given one.
///
/// See also [`HashSet::take`].
pub fn take<'g, Q>(&'g self, value: &Q) -> Option<&'g T>
where
T: Borrow<Q>,
Q: ?Sized + Hash + Ord,
{
self.set.take(value, &self.guard)
}
/// Retains only the elements specified by the predicate.
///
/// See also [`HashSet::retain`].
pub fn retain<F>(&self, f: F)
where
F: FnMut(&T) -> bool,
{
self.set.retain(f, &self.guard);
}
}
impl<T, S> HashSetRef<'_, T, S>
where
T: Clone + Ord,
{
/// Clears the set, removing all elements.
///
/// See also [`HashSet::clear`].
pub fn clear(&self) {
self.set.clear(&self.guard);
}
/// Tries to reserve capacity for at least `additional` more elements to
/// be inserted into the underlying `HashSet`.
///
/// See also [`HashSet::reserve`].
pub fn reserve(&self, additional: usize) {
self.set.reserve(additional, &self.guard)
}
}
impl<'g, T, S> IntoIterator for &'g HashSetRef<'_, T, S> {
type IntoIter = Keys<'g, T, ()>;
type Item = &'g T;
fn into_iter(self) -> Self::IntoIter {
self.set.iter(&self.guard)
}
}
impl<T, S> Debug for HashSetRef<'_, T, S>
where
T: Debug,
{
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
f.debug_set().entries(self).finish()
}
}
impl<T, S> Clone for HashSetRef<'_, T, S> {
fn clone(&self) -> Self {
self.set.pin()
}
}
impl<T, S> PartialEq for HashSetRef<'_, T, S>
where
T: Hash + Ord,
S: BuildHasher,
{
fn eq(&self, other: &Self) -> bool {
self.set == other.set
}
}
impl<T, S> PartialEq<HashSet<T, S>> for HashSetRef<'_, T, S>
where
T: Hash + Ord,
S: BuildHasher,
{
fn eq(&self, other: &HashSet<T, S>) -> bool {
self.set.guarded_eq(other, &self.guard, &other.guard())
}
}
impl<T, S> PartialEq<HashSetRef<'_, T, S>> for HashSet<T, S>
where
T: Hash + Ord,
S: BuildHasher,
{
fn eq(&self, other: &HashSetRef<'_, T, S>) -> bool {
self.guarded_eq(other.set, &self.guard(), &other.guard)
}
}
impl<T, S> Eq for HashSetRef<'_, T, S>
where
T: Hash + Ord,
S: BuildHasher,
{
}

Binary file not shown.

View File

Binary file not shown.

View File

@@ -0,0 +1 @@
{"name":"flurry","vers":"0.5.2","deps":[{"name":"ahash","req":"^0.8.4","features":["compile-time-rng"],"optional":false,"default_features":false,"target":null,"kind":"normal","registry":"https://github.com/rust-lang/crates.io-index","package":null,"public":null,"artifact":null,"bindep_target":null,"lib":false},{"name":"num_cpus","req":"^1.12.0","features":[],"optional":false,"default_features":true,"target":null,"kind":"normal","registry":"https://github.com/rust-lang/crates.io-index","package":null,"public":null,"artifact":null,"bindep_target":null,"lib":false},{"name":"parking_lot","req":"^0.12","features":[],"optional":false,"default_features":true,"target":null,"kind":"normal","registry":"https://github.com/rust-lang/crates.io-index","package":null,"public":null,"artifact":null,"bindep_target":null,"lib":false},{"name":"rayon","req":"^1.3","features":[],"optional":true,"default_features":true,"target":null,"kind":"normal","registry":"https://github.com/rust-lang/crates.io-index","package":null,"public":null,"artifact":null,"bindep_target":null,"lib":false},{"name":"seize","req":"^0.3.3","features":[],"optional":false,"default_features":true,"target":null,"kind":"normal","registry":"https://github.com/rust-lang/crates.io-index","package":null,"public":null,"artifact":null,"bindep_target":null,"lib":false},{"name":"serde","req":"^1.0.105","features":[],"optional":true,"default_features":true,"target":null,"kind":"normal","registry":"https://github.com/rust-lang/crates.io-index","package":null,"public":null,"artifact":null,"bindep_target":null,"lib":false},{"name":"criterion","req":"^0.5","features":[],"optional":false,"default_features":true,"target":null,"kind":"dev","registry":"https://github.com/rust-lang/crates.io-index","package":null,"public":null,"artifact":null,"bindep_target":null,"lib":false},{"name":"rand","req":"^0.8","features":[],"optional":false,"default_features":true,"target":null,"kind":"dev","registry":"https://github.com/rust-lang/crates.io-index","package":null,"public":null,"artifact":null,"bindep_target":null,"lib":false},{"name":"rayon","req":"^1.3","features":[],"optional":false,"default_features":true,"target":null,"kind":"dev","registry":"https://github.com/rust-lang/crates.io-index","package":null,"public":null,"artifact":null,"bindep_target":null,"lib":false},{"name":"serde_json","req":"^1.0.50","features":[],"optional":false,"default_features":true,"target":null,"kind":"dev","registry":"https://github.com/rust-lang/crates.io-index","package":null,"public":null,"artifact":null,"bindep_target":null,"lib":false},{"name":"regex","req":"^1.6.0","features":[],"optional":true,"default_features":true,"target":"cfg(any())","kind":"normal","registry":"https://github.com/rust-lang/crates.io-index","package":null,"public":null,"artifact":null,"bindep_target":null,"lib":false}],"features":{},"features2":null,"cksum":"767bd628f73f4dc1b7925b208671b4e04ea644ac51ac081ca7b66b2df3ff16b7","yanked":null,"links":null,"rust_version":null,"v":2}

621
vendor/flurry/tests/basic.rs vendored Normal file
View File

@@ -0,0 +1,621 @@
use flurry::*;
use std::sync::Arc;
#[test]
fn new() {
let _map = HashMap::<usize, usize>::new();
}
#[test]
fn clear() {
let map = HashMap::<usize, usize>::new();
let guard = map.guard();
{
map.insert(0, 1, &guard);
map.insert(1, 1, &guard);
map.insert(2, 1, &guard);
map.insert(3, 1, &guard);
map.insert(4, 1, &guard);
}
map.clear(&guard);
assert!(map.is_empty());
}
#[test]
fn insert() {
let map = HashMap::<usize, usize>::new();
let guard = map.guard();
let old = map.insert(42, 0, &guard);
assert!(old.is_none());
}
#[test]
fn get_empty() {
let map = HashMap::<usize, usize>::new();
{
let guard = map.guard();
let e = map.get(&42, &guard);
assert!(e.is_none());
}
}
#[test]
fn get_key_value_empty() {
let map = HashMap::<usize, usize>::new();
{
let guard = map.guard();
let e = map.get_key_value(&42, &guard);
assert!(e.is_none());
}
}
#[test]
fn remove_empty() {
let map = HashMap::<usize, usize>::new();
{
let guard = map.guard();
let old = map.remove(&42, &guard);
assert!(old.is_none());
}
}
#[test]
fn insert_and_remove() {
let map = HashMap::<usize, usize>::new();
{
let guard = map.guard();
map.insert(42, 0, &guard);
let old = map.remove(&42, &guard).unwrap();
assert_eq!(old, &0);
assert!(map.get(&42, &guard).is_none());
}
}
#[test]
fn insert_and_get() {
let map = HashMap::<usize, usize>::new();
map.insert(42, 0, &map.guard());
{
let guard = map.guard();
let e = map.get(&42, &guard).unwrap();
assert_eq!(e, &0);
}
}
#[test]
fn insert_and_get_key_value() {
let map = HashMap::<usize, usize>::new();
map.insert(42, 0, &map.guard());
{
let guard = map.guard();
let e = map.get_key_value(&42, &guard).unwrap();
assert_eq!(e, (&42, &0));
}
}
mod hasher;
use hasher::ZeroHashBuilder;
#[test]
fn one_bucket() {
let map = HashMap::<&'static str, usize, _>::with_hasher(ZeroHashBuilder);
let guard = map.guard();
// we want to check that all operations work regardless on whether
// we are operating on the head of a bucket, the tail of the bucket,
// or somewhere in the middle.
let v = map.insert("head", 0, &guard);
assert_eq!(v, None);
let v = map.insert("middle", 10, &guard);
assert_eq!(v, None);
let v = map.insert("tail", 100, &guard);
assert_eq!(v, None);
let e = map.get("head", &guard).unwrap();
assert_eq!(e, &0);
let e = map.get("middle", &guard).unwrap();
assert_eq!(e, &10);
let e = map.get("tail", &guard).unwrap();
assert_eq!(e, &100);
// check that replacing the keys returns the correct old value
let v = map.insert("head", 1, &guard);
assert_eq!(v, Some(&0));
let v = map.insert("middle", 11, &guard);
assert_eq!(v, Some(&10));
let v = map.insert("tail", 101, &guard);
assert_eq!(v, Some(&100));
// and updated the right value
let e = map.get("head", &guard).unwrap();
assert_eq!(e, &1);
let e = map.get("middle", &guard).unwrap();
assert_eq!(e, &11);
let e = map.get("tail", &guard).unwrap();
assert_eq!(e, &101);
// and that remove produces the right value
// note that we must remove them in a particular order
// so that we test all three node positions
let v = map.remove("middle", &guard);
assert_eq!(v, Some(&11));
let v = map.remove("tail", &guard);
assert_eq!(v, Some(&101));
let v = map.remove("head", &guard);
assert_eq!(v, Some(&1));
}
#[test]
fn update() {
let map = HashMap::<usize, usize>::new();
let guard = map.guard();
map.insert(42, 0, &guard);
let old = map.insert(42, 1, &guard);
assert_eq!(old, Some(&0));
{
let guard = map.guard();
let e = map.get(&42, &guard).unwrap();
assert_eq!(e, &1);
}
}
#[test]
fn compute_if_present() {
let map = HashMap::<usize, usize>::new();
let guard = map.guard();
map.insert(42, 0, &guard);
let new = map.compute_if_present(&42, |_, v| Some(v + 1), &guard);
assert_eq!(new, Some(&1));
{
let guard = map.guard();
let e = map.get(&42, &guard).unwrap();
assert_eq!(e, &1);
}
}
#[test]
fn compute_if_present_empty() {
let map = HashMap::<usize, usize>::new();
let guard = map.guard();
let new = map.compute_if_present(&42, |_, v| Some(v + 1), &guard);
assert!(new.is_none());
{
let guard = map.guard();
assert!(map.get(&42, &guard).is_none());
}
}
#[test]
fn compute_if_present_remove() {
let map = HashMap::<usize, usize>::new();
let guard = map.guard();
map.insert(42, 0, &guard);
let new = map.compute_if_present(&42, |_, _| None, &guard);
assert!(new.is_none());
{
let guard = map.guard();
assert!(map.get(&42, &guard).is_none());
}
}
#[test]
#[cfg_attr(miri, ignore)]
fn concurrent_insert() {
let map = Arc::new(HashMap::<usize, usize>::new());
let map1 = map.clone();
let t1 = std::thread::spawn(move || {
for i in 0..64 {
map1.insert(i, 0, &map1.guard());
}
});
let map2 = map.clone();
let t2 = std::thread::spawn(move || {
for i in 0..64 {
map2.insert(i, 1, &map2.guard());
}
});
t1.join().unwrap();
t2.join().unwrap();
let guard = map.guard();
for i in 0..64 {
let v = map.get(&i, &guard).unwrap();
assert!(v == &0 || v == &1);
let kv = map.get_key_value(&i, &guard).unwrap();
assert!(kv == (&i, &0) || kv == (&i, &1));
}
}
#[test]
#[cfg_attr(miri, ignore)]
fn concurrent_remove() {
let map = Arc::new(HashMap::<usize, usize>::new());
{
let guard = map.guard();
for i in 0..64 {
map.insert(i, i, &guard);
}
}
let map1 = map.clone();
let t1 = std::thread::spawn(move || {
let guard = map1.guard();
for i in 0..64 {
if let Some(v) = map1.remove(&i, &guard) {
assert_eq!(v, &i);
}
}
});
let map2 = map.clone();
let t2 = std::thread::spawn(move || {
let guard = map2.guard();
for i in 0..64 {
if let Some(v) = map2.remove(&i, &guard) {
assert_eq!(v, &i);
}
}
});
t1.join().unwrap();
t2.join().unwrap();
// after joining the threads, the map should be empty
let guard = map.guard();
for i in 0..64 {
assert!(map.get(&i, &guard).is_none());
}
}
#[test]
#[cfg_attr(miri, ignore)]
fn concurrent_compute_if_present() {
let map = Arc::new(HashMap::<usize, usize>::new());
{
let guard = map.guard();
for i in 0..64 {
map.insert(i, i, &guard);
}
}
let map1 = map.clone();
let t1 = std::thread::spawn(move || {
let guard = map1.guard();
for i in 0..64 {
let new = map1.compute_if_present(&i, |_, _| None, &guard);
assert!(new.is_none());
}
});
let map2 = map.clone();
let t2 = std::thread::spawn(move || {
let guard = map2.guard();
for i in 0..64 {
let new = map2.compute_if_present(&i, |_, _| None, &guard);
assert!(new.is_none());
}
});
t1.join().unwrap();
t2.join().unwrap();
// after joining the threads, the map should be empty
let guard = map.guard();
for i in 0..64 {
assert!(map.get(&i, &guard).is_none());
}
}
#[test]
#[cfg_attr(miri, ignore)]
fn concurrent_resize_and_get() {
let map = Arc::new(HashMap::<usize, usize>::new());
{
let guard = map.guard();
for i in 0..1024 {
map.insert(i, i, &guard);
}
}
let map1 = map.clone();
// t1 is using reserve to trigger a bunch of resizes
let t1 = std::thread::spawn(move || {
let guard = map1.guard();
// there should be 2 ** 10 capacity already, so trigger additional resizes
for power in 11..16 {
map1.reserve(1 << power, &guard);
}
});
let map2 = map.clone();
// t2 is retrieving existing keys a lot, attempting to encounter a BinEntry::Moved
let t2 = std::thread::spawn(move || {
let guard = map2.guard();
for _ in 0..32 {
for i in 0..1024 {
let v = map2.get(&i, &guard).unwrap();
assert_eq!(v, &i);
}
}
});
t1.join().unwrap();
t2.join().unwrap();
// make sure all the entries still exist after all the resizes
{
let guard = map.guard();
for i in 0..1024 {
let v = map.get(&i, &guard).unwrap();
assert_eq!(v, &i);
}
}
}
#[test]
fn current_kv_dropped() {
let dropped1 = Arc::new(0);
let dropped2 = Arc::new(0);
let map = HashMap::<Arc<usize>, Arc<usize>>::new();
map.insert(dropped1.clone(), dropped2.clone(), &map.guard());
assert_eq!(Arc::strong_count(&dropped1), 2);
assert_eq!(Arc::strong_count(&dropped2), 2);
drop(map);
// dropping the map should immediately drop (not deferred) all keys and values
assert_eq!(Arc::strong_count(&dropped1), 1);
assert_eq!(Arc::strong_count(&dropped2), 1);
}
#[test]
fn empty_maps_equal() {
let map1 = HashMap::<usize, usize>::new();
let map2 = HashMap::<usize, usize>::new();
assert_eq!(map1, map2);
assert_eq!(map2, map1);
}
#[test]
fn different_size_maps_not_equal() {
let map1 = HashMap::<usize, usize>::new();
let map2 = HashMap::<usize, usize>::new();
{
let guard1 = map1.guard();
let guard2 = map2.guard();
map1.insert(1, 0, &guard1);
map1.insert(2, 0, &guard1);
map1.insert(3, 0, &guard1);
map2.insert(1, 0, &guard2);
map2.insert(2, 0, &guard2);
}
assert_ne!(map1, map2);
assert_ne!(map2, map1);
}
#[test]
fn same_values_equal() {
let map1 = HashMap::<usize, usize>::new();
let map2 = HashMap::<usize, usize>::new();
{
map1.pin().insert(1, 0);
map2.pin().insert(1, 0);
}
assert_eq!(map1, map2);
assert_eq!(map2, map1);
}
#[test]
fn different_values_not_equal() {
let map1 = HashMap::<usize, usize>::new();
let map2 = HashMap::<usize, usize>::new();
{
map1.pin().insert(1, 0);
map2.pin().insert(1, 1);
}
assert_ne!(map1, map2);
assert_ne!(map2, map1);
}
#[test]
#[ignore]
// ignored because we cannot control when destructors run
fn drop_value() {
let dropped1 = Arc::new(0);
let dropped2 = Arc::new(1);
let map = HashMap::<usize, Arc<usize>>::new();
map.insert(42, dropped1.clone(), &map.guard());
assert_eq!(Arc::strong_count(&dropped1), 2);
assert_eq!(Arc::strong_count(&dropped2), 1);
map.insert(42, dropped2.clone(), &map.guard());
assert_eq!(Arc::strong_count(&dropped2), 2);
drop(map);
// First NotifyOnDrop was dropped when it was replaced by the second
assert_eq!(Arc::strong_count(&dropped1), 1);
// Second NotifyOnDrop was dropped when the map was dropped
assert_eq!(Arc::strong_count(&dropped2), 1);
}
#[test]
fn clone_map_empty() {
let map = HashMap::<&'static str, u32>::new();
let cloned_map = map.clone();
assert_eq!(map.len(), cloned_map.len());
assert_eq!(&map, &cloned_map);
assert_eq!(cloned_map.len(), 0);
}
#[test]
// Test that same values exists in both maps (original and cloned)
fn clone_map_filled() {
let map = HashMap::<&'static str, u32>::new();
map.insert("FooKey", 0, &map.guard());
map.insert("BarKey", 10, &map.guard());
let cloned_map = map.clone();
assert_eq!(map.len(), cloned_map.len());
assert_eq!(&map, &cloned_map);
// test that we are not mapping the same tables
map.insert("NewItem", 100, &map.guard());
assert_ne!(&map, &cloned_map);
}
#[test]
fn default() {
let map: HashMap<usize, usize> = Default::default();
let guard = map.guard();
map.insert(42, 0, &guard);
assert_eq!(map.get(&42, &guard), Some(&0));
}
#[test]
fn debug() {
let map: HashMap<usize, usize> = HashMap::new();
let guard = map.guard();
map.insert(42, 0, &guard);
map.insert(16, 8, &guard);
let formatted = format!("{:?}", map);
assert!(formatted == "{42: 0, 16: 8}" || formatted == "{16: 8, 42: 0}");
}
#[test]
fn extend() {
let map: HashMap<usize, usize> = HashMap::new();
let guard = map.guard();
let mut entries: Vec<(usize, usize)> = vec![(42, 0), (16, 6), (38, 42)];
entries.sort_unstable();
(&map).extend(entries.clone().into_iter());
let mut collected: Vec<(usize, usize)> = map
.iter(&guard)
.map(|(key, value)| (*key, *value))
.collect();
collected.sort_unstable();
assert_eq!(entries, collected);
}
#[test]
fn extend_ref() {
let map: HashMap<usize, usize> = HashMap::new();
let mut entries: Vec<(&usize, &usize)> = vec![(&42, &0), (&16, &6), (&38, &42)];
entries.sort();
(&map).extend(entries.clone().into_iter());
let guard = map.guard();
let mut collected: Vec<(&usize, &usize)> = map.iter(&guard).collect();
collected.sort();
assert_eq!(entries, collected);
}
#[test]
fn from_iter_ref() {
use std::iter::FromIterator;
let mut entries: Vec<(&usize, &usize)> = vec![(&42, &0), (&16, &6), (&38, &42)];
entries.sort();
let map: HashMap<usize, usize> = HashMap::from_iter(entries.clone().into_iter());
let guard = map.guard();
let mut collected: Vec<(&usize, &usize)> = map.iter(&guard).collect();
collected.sort();
assert_eq!(entries, entries)
}
#[test]
fn from_iter_empty() {
use std::iter::FromIterator;
let entries: Vec<(usize, usize)> = Vec::new();
let map: HashMap<usize, usize> = HashMap::from_iter(entries.into_iter());
assert_eq!(map.len(), 0)
}
#[test]
fn retain_empty() {
let map = HashMap::<&'static str, u32>::new();
let guard = map.guard();
map.retain(|_, _| false, &guard);
assert_eq!(map.len(), 0);
}
#[test]
fn retain_all_false() {
let map: HashMap<u32, u32> = (0..10_u32).map(|x| (x, x)).collect();
let guard = map.guard();
map.retain(|_, _| false, &guard);
assert_eq!(map.len(), 0);
}
#[test]
fn retain_all_true() {
let size = 10usize;
let map: HashMap<usize, usize> = (0..size).map(|x| (x, x)).collect();
let guard = map.guard();
map.retain(|_, _| true, &guard);
assert_eq!(map.len(), size);
}
#[test]
fn retain_some() {
let map: HashMap<u32, u32> = (0..10).map(|x| (x, x)).collect();
let guard = map.guard();
let expected_map: HashMap<u32, u32> = (5..10).map(|x| (x, x)).collect();
map.retain(|_, v| *v >= 5, &guard);
assert_eq!(map.len(), 5);
assert_eq!(map, expected_map);
}
#[test]
fn retain_force_empty() {
let map = HashMap::<&'static str, u32>::new();
let guard = map.guard();
map.retain_force(|_, _| false, &guard);
assert_eq!(map.len(), 0);
}
#[test]
fn retain_force_some() {
let map: HashMap<u32, u32> = (0..10).map(|x| (x, x)).collect();
let guard = map.guard();
let expected_map: HashMap<u32, u32> = (5..10).map(|x| (x, x)).collect();
map.retain_force(|_, v| *v >= 5, &guard);
assert_eq!(map.len(), 5);
assert_eq!(map, expected_map);
}

525
vendor/flurry/tests/basic_ref.rs vendored Normal file
View File

@@ -0,0 +1,525 @@
use flurry::*;
use std::sync::Arc;
#[test]
fn pin() {
let map = HashMap::<usize, usize>::new();
let _pinned = map.pin();
}
#[test]
fn with_guard() {
let map = HashMap::<usize, usize>::new();
let guard = map.guard();
let _pinned = map.with_guard(&guard);
}
#[test]
fn clear() {
let map = HashMap::<usize, usize>::new();
let map = map.pin();
{
map.insert(0, 1);
map.insert(1, 1);
map.insert(2, 1);
map.insert(3, 1);
map.insert(4, 1);
}
map.clear();
assert!(map.is_empty());
}
#[test]
fn insert() {
let map = HashMap::<usize, usize>::new();
let map = map.pin();
let old = map.insert(42, 0);
assert!(old.is_none());
}
#[test]
fn get_empty() {
let map = HashMap::<usize, usize>::new();
let map = map.pin();
let e = map.get(&42);
assert!(e.is_none());
}
#[test]
fn get_key_value_empty() {
let map = HashMap::<usize, usize>::new();
let map = map.pin();
let e = map.get_key_value(&42);
assert!(e.is_none());
}
#[test]
fn remove_empty() {
let map = HashMap::<usize, usize>::new();
let map = map.pin();
let old = map.remove(&42);
assert!(old.is_none());
}
#[test]
fn insert_and_remove() {
let map = HashMap::<usize, usize>::new();
let map = map.pin();
map.insert(42, 0);
let old = map.remove(&42).unwrap();
assert_eq!(old, &0);
assert!(map.get(&42).is_none());
}
#[test]
fn insert_and_get() {
let map = HashMap::<usize, usize>::new();
map.pin().insert(42, 0);
{
let map = map.pin();
let e = map.get(&42).unwrap();
assert_eq!(e, &0);
}
}
#[test]
fn insert_and_get_key_value() {
let map = HashMap::<usize, usize>::new();
map.pin().insert(42, 0);
{
let map = map.pin();
let e = map.get_key_value(&42).unwrap();
assert_eq!(e, (&42, &0));
}
}
mod hasher;
use hasher::ZeroHashBuilder;
#[test]
fn one_bucket() {
let map = HashMap::<&'static str, usize, _>::with_hasher(ZeroHashBuilder);
let map = map.pin();
// we want to check that all operations work regardless on whether
// we are operating on the head of a bucket, the tail of the bucket,
// or somewhere in the middle.
let v = map.insert("head", 0);
assert_eq!(v, None);
let v = map.insert("middle", 10);
assert_eq!(v, None);
let v = map.insert("tail", 100);
assert_eq!(v, None);
let e = map.get("head").unwrap();
assert_eq!(e, &0);
let e = map.get("middle").unwrap();
assert_eq!(e, &10);
let e = map.get("tail").unwrap();
assert_eq!(e, &100);
// check that replacing the keys returns the correct old value
let v = map.insert("head", 1);
assert_eq!(v, Some(&0));
let v = map.insert("middle", 11);
assert_eq!(v, Some(&10));
let v = map.insert("tail", 101);
assert_eq!(v, Some(&100));
// and updated the right value
let e = map.get("head").unwrap();
assert_eq!(e, &1);
let e = map.get("middle").unwrap();
assert_eq!(e, &11);
let e = map.get("tail").unwrap();
assert_eq!(e, &101);
// and that remove produces the right value
// note that we must remove them in a particular order
// so that we test all three node positions
let v = map.remove("middle");
assert_eq!(v, Some(&11));
let v = map.remove("tail");
assert_eq!(v, Some(&101));
let v = map.remove("head");
assert_eq!(v, Some(&1));
}
#[test]
fn update() {
let map = HashMap::<usize, usize>::new();
let map1 = map.pin();
map1.insert(42, 0);
let old = map1.insert(42, 1);
assert_eq!(old, Some(&0));
{
let map2 = map.pin();
let e = map2.get(&42).unwrap();
assert_eq!(e, &1);
}
}
#[test]
fn compute_if_present() {
let map = HashMap::<usize, usize>::new();
let map1 = map.pin();
map1.insert(42, 0);
let new = map1.compute_if_present(&42, |_, v| Some(v + 1));
assert_eq!(new, Some(&1));
{
let map2 = map.pin();
let e = map2.get(&42).unwrap();
assert_eq!(e, &1);
}
}
#[test]
fn compute_if_present_empty() {
let map = HashMap::<usize, usize>::new();
let map1 = map.pin();
let new = map1.compute_if_present(&42, |_, v| Some(v + 1));
assert!(new.is_none());
{
let map2 = map.pin();
assert!(map2.get(&42).is_none());
}
}
#[test]
fn compute_if_present_remove() {
let map = HashMap::<usize, usize>::new();
let map1 = map.pin();
map1.insert(42, 0);
let new = map1.compute_if_present(&42, |_, _| None);
assert!(new.is_none());
{
let map2 = map.pin();
assert!(map2.get(&42).is_none());
}
}
#[test]
fn empty_maps_equal() {
let map1 = HashMap::<usize, usize>::new();
let map2 = HashMap::<usize, usize>::new();
assert_eq!(map1, map2.pin());
assert_eq!(map1.pin(), map2);
assert_eq!(map1.pin(), map2.pin());
assert_eq!(map2.pin(), map1.pin());
}
#[test]
fn different_size_maps_not_equal() {
let map1 = HashMap::<usize, usize>::new();
let map2 = HashMap::<usize, usize>::new();
{
let map1 = map1.pin();
let map2 = map2.pin();
map1.insert(1, 0);
map1.insert(2, 0);
map2.insert(1, 0);
}
assert_ne!(map1, map2.pin());
assert_ne!(map1.pin(), map2);
assert_ne!(map1.pin(), map2.pin());
assert_ne!(map2.pin(), map1.pin());
}
#[test]
fn same_values_equal() {
let map1 = HashMap::<usize, usize>::new();
let map2 = HashMap::<usize, usize>::new();
{
let map1 = map1.pin();
let map2 = map2.pin();
map1.insert(1, 0);
map2.insert(1, 0);
}
assert_eq!(map1, map2.pin());
assert_eq!(map1.pin(), map2);
assert_eq!(map1.pin(), map2.pin());
assert_eq!(map2.pin(), map1.pin());
}
#[test]
fn different_values_not_equal() {
let map1 = HashMap::<usize, usize>::new();
let map2 = HashMap::<usize, usize>::new();
{
let map1 = map1.pin();
let map2 = map2.pin();
map1.insert(1, 0);
map2.insert(1, 1);
}
assert_ne!(map1, map2.pin());
assert_ne!(map1.pin(), map2);
assert_ne!(map1.pin(), map2.pin());
assert_ne!(map2.pin(), map1.pin());
}
#[test]
#[ignore]
// ignored because we cannot control when destructors run
fn drop_value() {
let dropped1 = Arc::new(0);
let dropped2 = Arc::new(1);
let map = HashMap::<usize, Arc<usize>>::new();
let map = map.pin();
map.insert(42, dropped1.clone());
assert_eq!(Arc::strong_count(&dropped1), 2);
assert_eq!(Arc::strong_count(&dropped2), 1);
map.insert(42, dropped2.clone());
assert_eq!(Arc::strong_count(&dropped2), 2);
drop(map);
// First NotifyOnDrop was dropped when it was replaced by the second
assert_eq!(Arc::strong_count(&dropped1), 1);
// Second NotifyOnDrop was dropped when the map was dropped
assert_eq!(Arc::strong_count(&dropped2), 1);
}
#[test]
fn clone_map_empty() {
let map = HashMap::<&'static str, u32>::new();
let map = map.pin();
let cloned_map = map.clone();
assert_eq!(map.len(), cloned_map.len());
assert_eq!(&map, &cloned_map);
assert_eq!(cloned_map.len(), 0);
}
#[test]
// Test that same values exists in both maps (original and cloned)
fn clone_map_filled() {
let map_ref = HashMap::<&'static str, u32>::new();
let map_ref = map_ref.pin();
map_ref.insert("FooKey", 0);
map_ref.insert("BarKey", 10);
let cloned_map_ref = map_ref.clone();
assert_eq!(map_ref.len(), cloned_map_ref.len());
assert_eq!(&map_ref, &cloned_map_ref);
// test that both maps are equal,
// because the ref and the cloned ref, point to the same map
map_ref.insert("NewItem", 100);
assert_eq!(&map_ref, &cloned_map_ref);
}
#[test]
fn default() {
let map: HashMap<usize, usize> = Default::default();
let map = map.pin();
map.insert(42, 0);
assert_eq!(map.get(&42), Some(&0));
}
#[test]
fn debug() {
let map: HashMap<usize, usize> = HashMap::new();
let map = map.pin();
map.insert(42, 0);
map.insert(16, 8);
let formatted = format!("{:?}", map);
assert!(formatted == "{42: 0, 16: 8}" || formatted == "{16: 8, 42: 0}");
}
#[test]
fn from_iter_ref() {
use std::iter::FromIterator;
let mut entries: Vec<(&usize, &usize)> = vec![(&42, &0), (&16, &6), (&38, &42)];
entries.sort();
let map: HashMap<usize, usize> = HashMap::from_iter(entries.clone().into_iter());
let map = map.pin();
let mut collected: Vec<(&usize, &usize)> = map.iter().collect();
collected.sort();
assert_eq!(entries, entries)
}
#[test]
fn from_iter_empty() {
use std::iter::FromIterator;
let entries: Vec<(usize, usize)> = Vec::new();
let map: HashMap<usize, usize> = HashMap::from_iter(entries.into_iter());
let map = map.pin();
assert_eq!(map.len(), 0)
}
#[test]
fn retain_empty() {
let map = HashMap::<&'static str, u32>::new();
let map = map.pin();
map.retain(|_, _| false);
assert_eq!(map.len(), 0);
}
#[test]
fn retain_all_false() {
let map: HashMap<u32, u32> = (0..10 as u32).map(|x| (x, x)).collect();
let map = map.pin();
map.retain(|_, _| false);
assert_eq!(map.len(), 0);
}
#[test]
fn retain_all_true() {
let size = 10usize;
let map: HashMap<usize, usize> = (0..size).map(|x| (x, x)).collect();
let map = map.pin();
map.retain(|_, _| true);
assert_eq!(map.len(), size);
}
#[test]
fn retain_some() {
let map: HashMap<u32, u32> = (0..10).map(|x| (x, x)).collect();
let map = map.pin();
let expected_map: HashMap<u32, u32> = (5..10).map(|x| (x, x)).collect();
let expected_map = expected_map.pin();
map.retain(|_, v| *v >= 5);
assert_eq!(map.len(), 5);
assert_eq!(map, expected_map);
}
#[test]
fn retain_force_empty() {
let map = HashMap::<&'static str, u32>::new();
let map = map.pin();
map.retain_force(|_, _| false);
assert_eq!(map.len(), 0);
}
#[test]
fn retain_force_some() {
let map: HashMap<u32, u32> = (0..10).map(|x| (x, x)).collect();
let map = map.pin();
let expected_map: HashMap<u32, u32> = (5..10).map(|x| (x, x)).collect();
map.retain_force(|_, v| *v >= 5);
assert_eq!(map.len(), 5);
assert_eq!(map, expected_map);
}
#[test]
#[cfg_attr(miri, ignore)]
fn concurrent_insert() {
let map = Arc::new(HashMap::<usize, usize>::new());
let map1 = map.clone();
let t1 = std::thread::spawn(move || {
for i in 0..64 {
map1.pin().insert(i, 0);
}
});
let map2 = map.clone();
let t2 = std::thread::spawn(move || {
for i in 0..64 {
map2.pin().insert(i, 0);
}
});
t1.join().unwrap();
t2.join().unwrap();
let map = map.pin();
for i in 0..64 {
let v = map.get(&i).unwrap();
assert!(v == &0 || v == &1);
let kv = map.get_key_value(&i).unwrap();
assert!(kv == (&i, &0) || kv == (&i, &1));
}
}
#[test]
#[cfg_attr(miri, ignore)]
fn concurrent_remove() {
let map = Arc::new(HashMap::<usize, usize>::new());
{
let map = map.pin();
for i in 0..64 {
map.insert(i, i);
}
}
let map1 = map.clone();
let t1 = std::thread::spawn(move || {
let map1 = map1.pin();
for i in 0..64 {
if let Some(v) = map1.remove(&i) {
assert_eq!(v, &i);
}
}
});
let map2 = map.clone();
let t2 = std::thread::spawn(move || {
let map2 = map2.pin();
for i in 0..64 {
if let Some(v) = map2.remove(&i) {
assert_eq!(v, &i);
}
}
});
t1.join().unwrap();
t2.join().unwrap();
// after joining the threads, the map should be empty
let map = map.pin();
for i in 0..64 {
assert!(map.get(&i).is_none());
}
}
#[test]
#[cfg_attr(miri, ignore)]
fn concurrent_compute_if_present() {
let map = Arc::new(HashMap::<usize, usize>::new());
{
let map = map.pin();
for i in 0..64 {
map.insert(i, i);
}
}
let map1 = map.clone();
let t1 = std::thread::spawn(move || {
let map1 = map1.pin();
for i in 0..64 {
let new = map1.compute_if_present(&i, |_, _| None);
assert!(new.is_none());
}
});
let map2 = map.clone();
let t2 = std::thread::spawn(move || {
let map2 = map2.pin();
for i in 0..64 {
let new = map2.compute_if_present(&i, |_, _| None);
assert!(new.is_none());
}
});
t1.join().unwrap();
t2.join().unwrap();
// after joining the threads, the map should be empty
let map = map.pin();
for i in 0..64 {
assert!(map.get(&i).is_none());
}
}

141
vendor/flurry/tests/borrow.rs vendored Normal file
View File

@@ -0,0 +1,141 @@
use flurry::*;
use std::sync::Arc;
// These tests all use `K = String` and `Q = str` for `Borrow`-based lookups
#[test]
fn get_empty() {
let map = HashMap::<String, usize>::new();
{
let guard = map.guard();
let e = map.get("foo", &guard);
assert!(e.is_none());
}
}
#[test]
fn remove_empty() {
let map = HashMap::<String, usize>::new();
{
let guard = map.guard();
let old = map.remove("foo", &guard);
assert!(old.is_none());
}
}
#[test]
fn insert_and_remove() {
let map = HashMap::<String, usize>::new();
{
let guard = map.guard();
map.insert("foo".to_string(), 0, &guard);
let old = map.remove("foo", &guard).unwrap();
assert_eq!(old, &0);
assert!(map.get("foo", &guard).is_none());
}
}
#[test]
fn insert_and_get() {
let map = HashMap::<String, usize>::new();
map.insert("foo".to_string(), 0, &map.guard());
{
let guard = map.guard();
let e = map.get("foo", &guard).unwrap();
assert_eq!(e, &0);
}
}
#[test]
fn update() {
let map = HashMap::<String, usize>::new();
let guard = map.guard();
map.insert("foo".to_string(), 0, &guard);
let old = map.insert("foo".to_string(), 1, &guard);
assert_eq!(old, Some(&0));
{
let guard = map.guard();
let e = map.get("foo", &guard).unwrap();
assert_eq!(e, &1);
}
}
#[test]
#[cfg_attr(miri, ignore)]
fn concurrent_insert() {
let map = Arc::new(HashMap::<String, usize>::new());
let keys = Arc::new((0..64).map(|i| i.to_string()).collect::<Vec<_>>());
let map1 = map.clone();
let keys1 = keys.clone();
let t1 = std::thread::spawn(move || {
for key in keys1.iter() {
map1.insert(key.clone(), 0, &map1.guard());
}
});
let map2 = map.clone();
let keys2 = keys.clone();
let t2 = std::thread::spawn(move || {
for key in keys2.iter() {
map2.insert(key.clone(), 1, &map2.guard());
}
});
t1.join().unwrap();
t2.join().unwrap();
let guard = map.guard();
for key in keys.iter() {
let v = map.get(key.as_str(), &guard).unwrap();
assert!(v == &0 || v == &1);
}
}
#[test]
#[cfg_attr(miri, ignore)]
fn concurrent_remove() {
let map = Arc::new(HashMap::<String, usize>::new());
let keys = Arc::new((0..64).map(|i| i.to_string()).collect::<Vec<_>>());
{
let guard = map.guard();
for (i, key) in keys.iter().enumerate() {
map.insert(key.clone(), i, &guard);
}
}
let map1 = map.clone();
let keys1 = keys.clone();
let t1 = std::thread::spawn(move || {
let guard = map1.guard();
for (i, key) in keys1.iter().enumerate() {
if let Some(v) = map1.remove(key.as_str(), &guard) {
assert_eq!(v, &i);
}
}
});
let map2 = map.clone();
let keys2 = keys.clone();
let t2 = std::thread::spawn(move || {
let guard = map2.guard();
for (i, key) in keys2.iter().enumerate() {
if let Some(v) = map2.remove(key.as_str(), &guard) {
assert_eq!(v, &i);
}
}
});
t1.join().unwrap();
t2.join().unwrap();
// after joining the threads, the map should be empty
let guard = map.guard();
for key in keys.iter() {
assert!(map.get(key.as_str(), &guard).is_none());
}
}

1
vendor/flurry/tests/cuckoo/README.md vendored Normal file
View File

@@ -0,0 +1 @@
These tests are ported from https://github.com/efficient/libcuckoo/tree/master/tests

1
vendor/flurry/tests/cuckoo/main.rs vendored Normal file
View File

@@ -0,0 +1 @@
mod stress;

190
vendor/flurry/tests/cuckoo/stress.rs vendored Normal file
View File

@@ -0,0 +1,190 @@
// https://github.com/efficient/libcuckoo/tree/master/tests/stress-tests
use flurry::*;
use parking_lot::Mutex;
use rand::distributions::{Distribution, Uniform};
use std::sync::{
atomic::{AtomicBool, Ordering},
Arc,
};
use std::thread;
/// Number of keys and values to work with.
const NUM_KEYS: usize = 1 << 12;
/// Number of threads that should be started.
const NUM_THREADS: usize = 4;
/// How long the stress test will run (in milliseconds).
const TEST_LEN: u64 = 5000;
type Key = usize;
type Value = usize;
struct Environment {
table1: HashMap<Key, Value>,
table2: HashMap<Key, Value>,
keys: Vec<Key>,
vals1: Mutex<Vec<Value>>,
vals2: Mutex<Vec<Value>>,
ind_dist: Uniform<usize>,
val_dist1: Uniform<Value>,
val_dist2: Uniform<Value>,
in_table: Mutex<Vec<bool>>,
in_use: Mutex<Vec<AtomicBool>>,
finished: AtomicBool,
}
impl Environment {
pub fn new() -> Self {
let mut keys = Vec::with_capacity(NUM_KEYS);
let mut in_use = Vec::with_capacity(NUM_KEYS);
for i in 0..NUM_KEYS {
keys.push(i);
in_use.push(AtomicBool::new(false));
}
Self {
table1: HashMap::new(),
table2: HashMap::new(),
keys,
vals1: Mutex::new(vec![0usize; NUM_KEYS]),
vals2: Mutex::new(vec![0usize; NUM_KEYS]),
ind_dist: Uniform::from(0..NUM_KEYS - 1),
val_dist1: Uniform::from(Value::min_value()..Value::max_value()),
val_dist2: Uniform::from(Value::min_value()..Value::max_value()),
in_table: Mutex::new(vec![false; NUM_KEYS]),
in_use: Mutex::new(in_use),
finished: AtomicBool::new(false),
}
}
}
fn stress_insert_thread(env: Arc<Environment>) {
let mut rng = rand::thread_rng();
let guard1 = env.table1.guard();
let guard2 = env.table2.guard();
while !env.finished.load(Ordering::SeqCst) {
let idx = env.ind_dist.sample(&mut rng);
let in_use = env.in_use.lock();
if (*in_use)[idx]
.compare_exchange(false, true, Ordering::SeqCst, Ordering::Relaxed)
.is_ok()
{
let key = env.keys[idx];
let val1 = env.val_dist1.sample(&mut rng);
let val2 = env.val_dist2.sample(&mut rng);
let res1 = if !env.table1.contains_key(&key, &guard1) {
env.table1
.insert(key, val1, &guard1)
.map_or(true, |_| false)
} else {
false
};
let res2 = if !env.table2.contains_key(&key, &guard2) {
env.table2
.insert(key, val2, &guard2)
.map_or(true, |_| false)
} else {
false
};
let mut in_table = env.in_table.lock();
assert_ne!(res1, (*in_table)[idx]);
assert_ne!(res2, (*in_table)[idx]);
if res1 {
assert_eq!(Some(&val1), env.table1.get(&key, &guard1));
assert_eq!(Some(&val2), env.table2.get(&key, &guard2));
let mut vals1 = env.vals1.lock();
let mut vals2 = env.vals2.lock();
(*vals1)[idx] = val1;
(*vals2)[idx] = val2;
(*in_table)[idx] = true;
}
(*in_use)[idx].swap(false, Ordering::SeqCst);
}
}
}
fn stress_delete_thread(env: Arc<Environment>) {
let mut rng = rand::thread_rng();
let guard1 = env.table1.guard();
let guard2 = env.table2.guard();
while !env.finished.load(Ordering::SeqCst) {
let idx = env.ind_dist.sample(&mut rng);
let in_use = env.in_use.lock();
if (*in_use)[idx]
.compare_exchange(false, true, Ordering::SeqCst, Ordering::Relaxed)
.is_ok()
{
let key = env.keys[idx];
let res1 = env.table1.remove(&key, &guard1).map_or(false, |_| true);
let res2 = env.table2.remove(&key, &guard2).map_or(false, |_| true);
let mut in_table = env.in_table.lock();
assert_eq!(res1, (*in_table)[idx]);
assert_eq!(res2, (*in_table)[idx]);
if res1 {
assert!(env.table1.get(&key, &guard1).is_none());
assert!(env.table2.get(&key, &guard2).is_none());
(*in_table)[idx] = false;
}
(*in_use)[idx].swap(false, Ordering::SeqCst);
}
}
}
fn stress_find_thread(env: Arc<Environment>) {
let mut rng = rand::thread_rng();
let guard1 = env.table1.guard();
let guard2 = env.table2.guard();
while !env.finished.load(Ordering::SeqCst) {
let idx = env.ind_dist.sample(&mut rng);
let in_use = env.in_use.lock();
if (*in_use)[idx]
.compare_exchange(false, true, Ordering::SeqCst, Ordering::Relaxed)
.is_ok()
{
let key = env.keys[idx];
let in_table = env.in_table.lock();
let val1 = (*env.vals1.lock())[idx];
let val2 = (*env.vals2.lock())[idx];
let value = env.table1.get(&key, &guard1);
if value.is_some() {
assert_eq!(&val1, value.unwrap());
assert!((*in_table)[idx]);
}
let value = env.table2.get(&key, &guard2);
if value.is_some() {
assert_eq!(&val2, value.unwrap());
assert!((*in_table)[idx]);
}
(*in_use)[idx].swap(false, Ordering::SeqCst);
}
}
}
#[test]
#[cfg_attr(miri, ignore)]
fn stress_test() {
let root = Arc::new(Environment::new());
let mut threads = Vec::new();
for _ in 0..NUM_THREADS {
let env = Arc::clone(&root);
threads.push(thread::spawn(move || stress_insert_thread(env)));
let env = Arc::clone(&root);
threads.push(thread::spawn(move || stress_delete_thread(env)));
let env = Arc::clone(&root);
threads.push(thread::spawn(move || stress_find_thread(env)));
}
thread::sleep(std::time::Duration::from_millis(TEST_LEN));
root.finished.swap(true, Ordering::SeqCst);
for t in threads {
t.join().expect("failed to join thread");
}
let in_table = &*root.in_table.lock();
let num_filled = in_table.iter().filter(|b| **b).count();
assert_eq!(num_filled, root.table1.len());
assert_eq!(num_filled, root.table2.len());
}

64
vendor/flurry/tests/hasher.rs vendored Normal file
View File

@@ -0,0 +1,64 @@
use flurry::{DefaultHashBuilder, HashMap};
use std::hash::{BuildHasher, BuildHasherDefault, Hasher};
#[derive(Default)]
pub struct ZeroHasher;
pub struct ZeroHashBuilder;
impl Hasher for ZeroHasher {
fn finish(&self) -> u64 {
0
}
fn write(&mut self, _: &[u8]) {}
}
impl BuildHasher for ZeroHashBuilder {
type Hasher = ZeroHasher;
fn build_hasher(&self) -> ZeroHasher {
ZeroHasher
}
}
fn check<S: BuildHasher + Default>() {
let range = if cfg!(miri) { 0..16 } else { 0..1000 };
let map = HashMap::<i32, i32, S>::default();
let guard = map.guard();
for i in range.clone() {
map.insert(i, i, &guard);
}
assert!(!map.contains_key(&i32::min_value(), &guard));
assert!(!map.contains_key(&(range.start - 1), &guard));
for i in range.clone() {
assert!(map.contains_key(&i, &guard));
}
assert!(!map.contains_key(&range.end, &guard));
assert!(!map.contains_key(&i32::max_value(), &guard));
}
#[test]
fn test_default_hasher() {
check::<DefaultHashBuilder>();
}
#[test]
fn test_zero_hasher() {
check::<BuildHasherDefault<ZeroHasher>>();
}
#[test]
fn test_max_hasher() {
#[derive(Default)]
struct MaxHasher;
impl Hasher for MaxHasher {
fn finish(&self) -> u64 {
u64::max_value()
}
fn write(&mut self, _: &[u8]) {}
}
check::<BuildHasherDefault<MaxHasher>>();
}

1
vendor/flurry/tests/jdk/README.md vendored Normal file
View File

@@ -0,0 +1 @@
These tests are ported from https://hg.openjdk.java.net/jdk/jdk13/file/tip/test/jdk/java/util/concurrent/ConcurrentHashMap

View File

@@ -0,0 +1,62 @@
use flurry::HashMap;
use rand::Rng;
use std::sync::Arc;
/// Number of entries for each thread to place in the map.
const NUM_ENTRIES: usize = 128;
/// Number of iterations for each test
const ITERATIONS: usize = 64;
#[derive(Hash, PartialEq, Eq, PartialOrd, Ord, Clone, Copy)]
struct KeyVal {
_data: usize,
}
impl KeyVal {
pub fn new() -> Self {
let mut rng = rand::thread_rng();
Self { _data: rng.gen() }
}
}
fn insert(map: Arc<HashMap<KeyVal, KeyVal>>, k: KeyVal) {
map.insert(k, k, &map.guard());
}
#[test]
#[cfg_attr(miri, ignore)]
fn test_concurrent_insert<'g>() {
test(insert);
}
fn test<F>(associator: F)
where
F: Fn(Arc<HashMap<KeyVal, KeyVal>>, KeyVal) + Send + Copy + 'static,
{
for _ in 0..ITERATIONS {
test_once(associator);
}
}
fn test_once<F>(associator: F)
where
F: Fn(Arc<HashMap<KeyVal, KeyVal>>, KeyVal) + Send + Copy + 'static,
{
let map = Arc::new(HashMap::new());
let mut threads = Vec::new();
for _ in 0..num_cpus::get().min(8) {
let map = map.clone();
let handle = std::thread::spawn(move || {
for _ in 0..NUM_ENTRIES {
let key = KeyVal::new();
associator(map.clone(), key);
assert!(map.contains_key(&key, &map.guard()));
}
});
threads.push(handle);
}
for t in threads {
t.join().expect("failed to join thread");
}
}

View File

@@ -0,0 +1,52 @@
use flurry::HashMap;
use std::{sync::Arc, thread};
/// Number of entries for each thread to place in the map.
const NUM_ENTRIES: usize = 16;
/// Number of iterations for each test
const ITERATIONS: usize = 256;
/// Number of rounds every thread perfoms per entry.
const ROUNDS: usize = 32;
#[test]
#[cfg_attr(miri, ignore)]
fn test_concurrent_contains_key() {
let map = HashMap::new();
let mut content = [0; NUM_ENTRIES];
{
let guard = map.guard();
for k in 0..NUM_ENTRIES {
map.insert(k, k, &guard);
content[k] = k;
}
}
test(content, Arc::new(map));
}
fn test(content: [usize; NUM_ENTRIES], map: Arc<HashMap<usize, usize>>) {
for _ in 0..ITERATIONS {
test_once(content, map.clone());
}
}
fn test_once(content: [usize; NUM_ENTRIES], map: Arc<HashMap<usize, usize>>) {
let mut threads = Vec::new();
for _ in 0..num_cpus::get().min(8) {
let map = map.clone();
let content = content;
let handle = thread::spawn(move || {
let guard = map.guard();
let map = map.clone();
for i in 0..NUM_ENTRIES * ROUNDS {
let key = content[i % content.len()];
assert!(map.contains_key(&key, &guard));
}
});
threads.push(handle);
}
for t in threads {
t.join().expect("failed to join thread");
}
}

3
vendor/flurry/tests/jdk/main.rs vendored Normal file
View File

@@ -0,0 +1,3 @@
mod concurrent_associate;
mod concurrent_contains;
mod map_check;

201
vendor/flurry/tests/jdk/map_check.rs vendored Normal file
View File

@@ -0,0 +1,201 @@
use flurry::*;
use rand::prelude::*;
use std::hash::Hash;
#[cfg(not(miri))]
const SIZE: usize = 50_000;
#[cfg(miri)]
const SIZE: usize = 12;
// there must be more things absent than present!
#[cfg(not(miri))]
const ABSENT_SIZE: usize = 1 << 17;
#[cfg(miri)]
const ABSENT_SIZE: usize = 1 << 5;
const ABSENT_MASK: usize = ABSENT_SIZE - 1;
fn t1<K, V>(map: &HashMap<K, V>, keys: &[K], expect: usize)
where
K: Sync + Send + Clone + Hash + Ord,
V: Sync + Send,
{
let mut sum = 0;
let iters = 4;
let guard = map.guard();
for _ in 0..iters {
for key in keys {
if map.get(key, &guard).is_some() {
sum += 1;
}
}
}
assert_eq!(sum, expect * iters);
}
fn t2<K>(map: &HashMap<K, usize>, keys: &[K], expect: usize)
where
K: Sync + Send + Copy + Hash + Ord + std::fmt::Display,
{
let mut sum = 0;
let guard = map.guard();
for key in keys {
if map.remove(key, &guard).is_some() {
sum += 1;
}
}
assert_eq!(sum, expect);
}
fn t3<K>(map: &HashMap<K, usize>, keys: &[K], expect: usize)
where
K: Sync + Send + Copy + Hash + Ord,
{
let mut sum = 0;
let guard = map.guard();
for i in 0..keys.len() {
if map.insert(keys[i], 0, &guard).is_none() {
sum += 1;
}
}
assert_eq!(sum, expect);
}
fn t4<K>(map: &HashMap<K, usize>, keys: &[K], expect: usize)
where
K: Sync + Send + Copy + Hash + Ord,
{
let mut sum = 0;
let guard = map.guard();
for i in 0..keys.len() {
if map.contains_key(&keys[i], &guard) {
sum += 1;
}
}
assert_eq!(sum, expect);
}
fn t5<K>(map: &HashMap<K, usize>, keys: &[K], expect: usize)
where
K: Sync + Send + Copy + Hash + Ord,
{
let mut sum = 0;
let guard = map.guard();
let mut i = keys.len() as isize - 2;
while i >= 0 {
if map.remove(&keys[i as usize], &guard).is_some() {
sum += 1;
}
i -= 2;
}
assert_eq!(sum, expect);
}
fn t6<K, V>(map: &HashMap<K, V>, keys1: &[K], keys2: &[K], expect: usize)
where
K: Sync + Send + Clone + Hash + Ord,
V: Sync + Send,
{
let mut sum = 0;
let guard = map.guard();
for i in 0..expect {
if map.get(&keys1[i], &guard).is_some() {
sum += 1;
}
if map.get(&keys2[i & ABSENT_MASK], &guard).is_some() {
sum += 1;
}
}
assert_eq!(sum, expect);
}
fn t7<K>(map: &HashMap<K, usize>, k1: &[K], k2: &[K])
where
K: Sync + Send + Copy + Hash + Ord,
{
let mut sum = 0;
let guard = map.guard();
for i in 0..k1.len() {
if map.contains_key(&k1[i], &guard) {
sum += 1;
}
if map.contains_key(&k2[i], &guard) {
sum += 1;
}
}
assert_eq!(sum, k1.len());
}
fn ittest1<K>(map: &HashMap<K, usize>, expect: usize)
where
K: Sync + Send + Copy + Hash + Eq,
{
let mut sum = 0;
let guard = map.guard();
for _ in map.keys(&guard) {
sum += 1;
}
assert_eq!(sum, expect);
}
fn ittest2<K>(map: &HashMap<K, usize>, expect: usize)
where
K: Sync + Send + Copy + Hash + Eq,
{
let mut sum = 0;
let guard = map.guard();
for _ in map.values(&guard) {
sum += 1;
}
assert_eq!(sum, expect);
}
fn ittest3<K>(map: &HashMap<K, usize>, expect: usize)
where
K: Sync + Send + Copy + Hash + Eq,
{
let mut sum = 0;
let guard = map.guard();
for _ in map.iter(&guard) {
sum += 1;
}
assert_eq!(sum, expect);
}
#[test]
fn everything() {
let mut rng = rand::thread_rng();
let map = HashMap::new();
let mut keys: Vec<_> = (0..ABSENT_SIZE + SIZE).collect();
keys.shuffle(&mut rng);
let absent_keys = &keys[0..ABSENT_SIZE];
let keys = &keys[ABSENT_SIZE..];
// put (absent)
t3(&map, keys, SIZE);
// put (present)
t3(&map, keys, 0);
// contains_key (present & absent)
t7(&map, keys, absent_keys);
// contains_key (present)
t4(&map, keys, SIZE);
// contains_key (absent)
t4(&map, absent_keys, 0);
// get
t6(&map, keys, absent_keys, SIZE);
// get (present)
t1(&map, keys, SIZE);
// get (absent)
t1(&map, absent_keys, 0);
// remove (absent)
t2(&map, absent_keys, 0);
// remove (present)
t5(&map, keys, SIZE / 2);
// put (half present)
t3(&map, keys, SIZE / 2);
// iter, keys, values (present)
ittest1(&map, SIZE);
ittest2(&map, SIZE);
ittest3(&map, SIZE);
}

1
vendor/flurry/tests/jsr166/README.md vendored Normal file
View File

@@ -0,0 +1 @@
These tests are ported from http://gee.cs.oswego.edu/cgi-bin/viewcvs.cgi/jsr166/src/test/tck/

61
vendor/flurry/tests/jsr166/main.rs vendored Normal file
View File

@@ -0,0 +1,61 @@
use flurry::*;
use std::iter::FromIterator;
const ITER: [(usize, &'static str); 5] = [(1, "A"), (2, "B"), (3, "C"), (4, "D"), (5, "E")];
#[test]
fn test_from_iter() {
let guard = unsafe { seize::Guard::unprotected() };
let map1 = from_iter_contron();
let map2: HashMap<_, _> = HashMap::from_iter(ITER.iter());
// TODO: improve when `Map: Eq`
let mut fst: Vec<_> = map1.iter(&guard).collect();
let mut snd: Vec<_> = map2.iter(&guard).collect();
fst.sort();
snd.sort();
assert_eq!(fst, snd);
}
fn from_iter_contron() -> HashMap<usize, &'static str> {
let guard = unsafe { seize::Guard::unprotected() };
let map = HashMap::with_capacity(5);
assert!(map.is_empty());
for (key, value) in &ITER {
map.insert(*key, *value, &guard);
}
assert!(!map.is_empty());
assert_eq!(ITER.len(), map.len());
map
}
fn map5() -> HashMap<isize, String> {
let map = HashMap::new();
// TODO: add is_empty check once method exists
// assert!(map.is_empty());
let guard = map.guard();
map.insert(1, "A".to_owned(), &guard);
map.insert(2, "B".to_owned(), &guard);
map.insert(3, "C".to_owned(), &guard);
map.insert(4, "D".to_owned(), &guard);
map.insert(5, "E".to_owned(), &guard);
// TODO: add is_empty and len check once methods exist
// assert!(!map.is_empty());
// assert_eq!(map.len(), 5);
drop(guard);
map
}
// remove removes the correct key-value pair from the map
#[test]
fn test_remove() {
let map = map5();
let guard = map.guard();
map.remove(&5, &guard);
// TODO: add len check once method exists
// assert_eq!(map.len(), 4);
assert!(!map.contains_key(&5, &guard));
}

18
vendor/flurry/tests/regressions.rs vendored Normal file
View File

@@ -0,0 +1,18 @@
use flurry::*;
use rand::{thread_rng, Rng};
#[test]
fn issue90() {
#[cfg(not(miri))]
const ITERATIONS: usize = 100_000;
#[cfg(miri)]
const ITERATIONS: usize = 100;
let mut rng = thread_rng();
let map = HashMap::new();
let g = map.guard();
for _ in 0..ITERATIONS {
let el = rng.gen_range(0..1000);
let _ = map.try_insert(el, el, &g);
}
}

319
vendor/flurry/tests/set.rs vendored Normal file
View File

@@ -0,0 +1,319 @@
use flurry::HashSet;
use std::sync::Arc;
#[test]
fn new() {
let _set = HashSet::<usize>::new();
}
#[test]
fn insert() {
let set = HashSet::new();
let guard = set.guard();
let did_set = set.insert(42, &guard);
assert!(did_set);
let did_set = set.insert(42, &guard);
assert!(!did_set);
}
#[test]
fn no_contains() {
let set = HashSet::<usize>::new();
{
let guard = set.guard();
let contained = set.contains(&42, &guard);
assert!(!contained);
}
}
#[test]
fn get_no_contains() {
let set = HashSet::<usize>::new();
{
let guard = set.guard();
let e = set.get(&42, &guard);
assert!(e.is_none());
}
}
#[test]
fn remove_empty() {
let set = HashSet::<usize>::new();
{
let guard = set.guard();
let removed = set.remove(&42, &guard);
assert!(!removed);
}
}
#[test]
fn insert_and_remove() {
let set = HashSet::new();
{
let guard = set.guard();
set.insert(42, &guard);
let removed = set.remove(&42, &guard);
assert!(removed);
assert!(!set.contains(&42, &guard));
}
}
#[test]
fn insert_and_contains() {
let set = HashSet::new();
set.insert(42, &set.guard());
{
let guard = set.guard();
let got = set.contains(&42, &guard);
assert!(got);
}
}
#[test]
fn insert_and_get() {
let set = HashSet::new();
set.insert(42, &set.guard());
{
let guard = set.guard();
let e = set.get(&42, &guard).unwrap();
assert_eq!(e, &42);
}
}
#[test]
fn update() {
let set = HashSet::new();
let guard = set.guard();
set.insert(42, &guard);
let was_new = set.insert(42, &guard);
assert!(!was_new);
assert!(set.contains(&42, &guard));
}
#[test]
#[cfg_attr(miri, ignore)]
fn concurrent_insert() {
let set = Arc::new(HashSet::<usize>::new());
let set1 = set.clone();
let t1 = std::thread::spawn(move || {
for i in 0..64 {
set1.insert(i, &set1.guard());
}
});
let set2 = set.clone();
let t2 = std::thread::spawn(move || {
for i in 0..64 {
set2.insert(i, &set2.guard());
}
});
t1.join().unwrap();
t2.join().unwrap();
let guard = set.guard();
for i in 0..64 {
assert!(set.contains(&i, &guard));
let key = set.get(&i, &guard).unwrap();
assert_eq!(key, &i);
}
}
#[test]
#[cfg_attr(miri, ignore)]
fn concurrent_remove() {
let set = Arc::new(HashSet::<usize>::new());
{
let guard = set.guard();
for i in 0..64 {
set.insert(i, &guard);
}
}
let set1 = set.clone();
let t1 = std::thread::spawn(move || {
let guard = set1.guard();
for i in 0..64 {
set1.remove(&i, &guard);
}
});
let set2 = set.clone();
let t2 = std::thread::spawn(move || {
let guard = set2.guard();
for i in 0..64 {
set2.remove(&i, &guard);
}
});
t1.join().unwrap();
t2.join().unwrap();
// after joining the threads, the map should be empty
let guard = set.guard();
for i in 0..64 {
assert!(!set.contains(&i, &guard));
}
}
#[test]
fn empty_sets_equal() {
let set1 = HashSet::<usize>::new();
let set2 = HashSet::<usize>::new();
assert_eq!(set1, set2);
assert_eq!(set2, set1);
}
#[test]
fn different_size_maps_not_equal() {
let set1 = HashSet::<usize>::new();
let set2 = HashSet::<usize>::new();
{
set1.pin().insert(1);
set1.pin().insert(2);
set2.pin().insert(1);
}
assert_ne!(set1, set2);
assert_ne!(set2, set1);
}
#[test]
fn same_values_equal() {
let set1 = HashSet::<usize>::new();
let set2 = HashSet::<usize>::new();
{
set1.pin().insert(1);
set2.pin().insert(1);
}
assert_eq!(set1, set2);
assert_eq!(set2, set1);
}
#[test]
fn different_values_not_equal() {
let set1 = HashSet::<usize>::new();
let set2 = HashSet::<usize>::new();
{
set1.pin().insert(1);
set2.pin().insert(2);
}
assert_ne!(set1, set2);
assert_ne!(set2, set1);
}
#[test]
fn clone_set_empty() {
let set = HashSet::<&'static str>::new();
let cloned_set = set.clone();
assert_eq!(set.len(), cloned_set.len());
assert_eq!(&set, &cloned_set);
assert_eq!(cloned_set.len(), 0);
}
#[test]
// Test that same values exists in both maps (original and cloned)
fn clone_set_filled() {
let set = HashSet::<&'static str>::new();
set.insert("FooKey", &set.guard());
set.insert("BarKey", &set.guard());
let cloned_set = set.clone();
assert_eq!(set.len(), cloned_set.len());
assert_eq!(&set, &cloned_set);
// test that we are not mapping the same tables
set.insert("NewItem", &set.guard());
assert_ne!(&set, &cloned_set);
}
#[test]
fn default() {
let set: HashSet<usize> = Default::default();
let guard = set.guard();
set.insert(42, &guard);
assert!(set.contains(&42, &guard));
}
#[test]
fn debug() {
let set: HashSet<usize> = HashSet::new();
let guard = set.guard();
set.insert(42, &guard);
set.insert(16, &guard);
let formatted = format!("{:?}", set);
assert!(formatted == "{42, 16}" || formatted == "{16, 42}");
}
#[test]
fn extend() {
let set: HashSet<usize> = HashSet::new();
let guard = set.guard();
let mut entries = vec![42, 16, 38];
entries.sort_unstable();
(&set).extend(entries.clone().into_iter());
let mut collected: Vec<_> = set.iter(&guard).copied().collect();
collected.sort_unstable();
assert_eq!(entries, collected);
}
#[test]
fn extend_ref() {
let set: HashSet<usize> = HashSet::new();
let mut entries = vec![&42, &16, &38];
entries.sort();
(&set).extend(entries.clone().into_iter());
let guard = set.guard();
let mut collected: Vec<_> = set.iter(&guard).collect();
collected.sort();
assert_eq!(entries, collected);
}
#[test]
fn from_iter_ref() {
use std::iter::FromIterator;
let mut entries: Vec<_> = vec![&42, &16, &38];
entries.sort();
let set: HashSet<usize> = HashSet::from_iter(entries.clone().into_iter());
let guard = set.guard();
let mut collected: Vec<_> = set.iter(&guard).collect();
collected.sort();
assert_eq!(entries, entries)
}
#[test]
fn from_iter_empty() {
use std::iter::FromIterator;
let set: HashSet<_> = HashSet::from_iter(std::iter::empty::<usize>());
assert_eq!(set.len(), 0)
}