chore: checkpoint before Python removal

This commit is contained in:
2026-03-26 22:33:59 +00:00
parent 683cec9307
commit e568ddf82a
29972 changed files with 11269302 additions and 2 deletions

243
vendor/rand/src/distr/bernoulli.rs vendored Normal file
View File

@@ -0,0 +1,243 @@
// Copyright 2018 Developers of the Rand project.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! The Bernoulli distribution `Bernoulli(p)`.
use crate::distr::Distribution;
use crate::Rng;
use core::fmt;
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
/// The [Bernoulli distribution](https://en.wikipedia.org/wiki/Bernoulli_distribution) `Bernoulli(p)`.
///
/// This distribution describes a single boolean random variable, which is true
/// with probability `p` and false with probability `1 - p`.
/// It is a special case of the Binomial distribution with `n = 1`.
///
/// # Plot
///
/// The following plot shows the Bernoulli distribution with `p = 0.1`,
/// `p = 0.5`, and `p = 0.9`.
///
/// ![Bernoulli distribution](https://raw.githubusercontent.com/rust-random/charts/main/charts/bernoulli.svg)
///
/// # Example
///
/// ```rust
/// use rand::distr::{Bernoulli, Distribution};
///
/// let d = Bernoulli::new(0.3).unwrap();
/// let v = d.sample(&mut rand::rng());
/// println!("{} is from a Bernoulli distribution", v);
/// ```
///
/// # Precision
///
/// This `Bernoulli` distribution uses 64 bits from the RNG (a `u64`),
/// so only probabilities that are multiples of 2<sup>-64</sup> can be
/// represented.
#[derive(Clone, Copy, Debug, PartialEq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct Bernoulli {
/// Probability of success, relative to the maximal integer.
p_int: u64,
}
// To sample from the Bernoulli distribution we use a method that compares a
// random `u64` value `v < (p * 2^64)`.
//
// If `p == 1.0`, the integer `v` to compare against can not represented as a
// `u64`. We manually set it to `u64::MAX` instead (2^64 - 1 instead of 2^64).
// Note that value of `p < 1.0` can never result in `u64::MAX`, because an
// `f64` only has 53 bits of precision, and the next largest value of `p` will
// result in `2^64 - 2048`.
//
// Also there is a 100% theoretical concern: if someone consistently wants to
// generate `true` using the Bernoulli distribution (i.e. by using a probability
// of `1.0`), just using `u64::MAX` is not enough. On average it would return
// false once every 2^64 iterations. Some people apparently care about this
// case.
//
// That is why we special-case `u64::MAX` to always return `true`, without using
// the RNG, and pay the performance price for all uses that *are* reasonable.
// Luckily, if `new()` and `sample` are close, the compiler can optimize out the
// extra check.
const ALWAYS_TRUE: u64 = u64::MAX;
// This is just `2.0.powi(64)`, but written this way because it is not available
// in `no_std` mode.
const SCALE: f64 = 2.0 * (1u64 << 63) as f64;
/// Error type returned from [`Bernoulli::new`].
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum BernoulliError {
/// `p < 0` or `p > 1`.
InvalidProbability,
}
impl fmt::Display for BernoulliError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str(match self {
BernoulliError::InvalidProbability => "p is outside [0, 1] in Bernoulli distribution",
})
}
}
#[cfg(feature = "std")]
impl std::error::Error for BernoulliError {}
impl Bernoulli {
/// Construct a new `Bernoulli` with the given probability of success `p`.
///
/// # Precision
///
/// For `p = 1.0`, the resulting distribution will always generate true.
/// For `p = 0.0`, the resulting distribution will always generate false.
///
/// This method is accurate for any input `p` in the range `[0, 1]` which is
/// a multiple of 2<sup>-64</sup>. (Note that not all multiples of
/// 2<sup>-64</sup> in `[0, 1]` can be represented as a `f64`.)
#[inline]
pub fn new(p: f64) -> Result<Bernoulli, BernoulliError> {
if !(0.0..1.0).contains(&p) {
if p == 1.0 {
return Ok(Bernoulli { p_int: ALWAYS_TRUE });
}
return Err(BernoulliError::InvalidProbability);
}
Ok(Bernoulli {
p_int: (p * SCALE) as u64,
})
}
/// Construct a new `Bernoulli` with the probability of success of
/// `numerator`-in-`denominator`. I.e. `new_ratio(2, 3)` will return
/// a `Bernoulli` with a 2-in-3 chance, or about 67%, of returning `true`.
///
/// return `true`. If `numerator == 0` it will always return `false`.
/// For `numerator > denominator` and `denominator == 0`, this returns an
/// error. Otherwise, for `numerator == denominator`, samples are always
/// true; for `numerator == 0` samples are always false.
#[inline]
pub fn from_ratio(numerator: u32, denominator: u32) -> Result<Bernoulli, BernoulliError> {
if numerator > denominator || denominator == 0 {
return Err(BernoulliError::InvalidProbability);
}
if numerator == denominator {
return Ok(Bernoulli { p_int: ALWAYS_TRUE });
}
let p_int = ((f64::from(numerator) / f64::from(denominator)) * SCALE) as u64;
Ok(Bernoulli { p_int })
}
#[inline]
/// Returns the probability (`p`) of the distribution.
///
/// This value may differ slightly from the input due to loss of precision.
pub fn p(&self) -> f64 {
if self.p_int == ALWAYS_TRUE {
1.0
} else {
(self.p_int as f64) / SCALE
}
}
}
impl Distribution<bool> for Bernoulli {
#[inline]
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> bool {
// Make sure to always return true for p = 1.0.
if self.p_int == ALWAYS_TRUE {
return true;
}
let v: u64 = rng.random();
v < self.p_int
}
}
#[cfg(test)]
mod test {
use super::Bernoulli;
use crate::distr::Distribution;
use crate::Rng;
#[test]
#[cfg(feature = "serde")]
fn test_serializing_deserializing_bernoulli() {
let coin_flip = Bernoulli::new(0.5).unwrap();
let de_coin_flip: Bernoulli =
bincode::deserialize(&bincode::serialize(&coin_flip).unwrap()).unwrap();
assert_eq!(coin_flip.p_int, de_coin_flip.p_int);
}
#[test]
fn test_trivial() {
// We prefer to be explicit here.
#![allow(clippy::bool_assert_comparison)]
let mut r = crate::test::rng(1);
let always_false = Bernoulli::new(0.0).unwrap();
let always_true = Bernoulli::new(1.0).unwrap();
for _ in 0..5 {
assert_eq!(r.sample::<bool, _>(&always_false), false);
assert_eq!(r.sample::<bool, _>(&always_true), true);
assert_eq!(Distribution::<bool>::sample(&always_false, &mut r), false);
assert_eq!(Distribution::<bool>::sample(&always_true, &mut r), true);
}
}
#[test]
#[cfg_attr(miri, ignore)] // Miri is too slow
fn test_average() {
const P: f64 = 0.3;
const NUM: u32 = 3;
const DENOM: u32 = 10;
let d1 = Bernoulli::new(P).unwrap();
let d2 = Bernoulli::from_ratio(NUM, DENOM).unwrap();
const N: u32 = 100_000;
let mut sum1: u32 = 0;
let mut sum2: u32 = 0;
let mut rng = crate::test::rng(2);
for _ in 0..N {
if d1.sample(&mut rng) {
sum1 += 1;
}
if d2.sample(&mut rng) {
sum2 += 1;
}
}
let avg1 = (sum1 as f64) / (N as f64);
assert!((avg1 - P).abs() < 5e-3);
let avg2 = (sum2 as f64) / (N as f64);
assert!((avg2 - (NUM as f64) / (DENOM as f64)).abs() < 5e-3);
}
#[test]
fn value_stability() {
let mut rng = crate::test::rng(3);
let distr = Bernoulli::new(0.4532).unwrap();
let mut buf = [false; 10];
for x in &mut buf {
*x = rng.sample(distr);
}
assert_eq!(
buf,
[true, false, false, true, false, false, true, true, true, true]
);
}
#[test]
fn bernoulli_distributions_can_be_compared() {
assert_eq!(Bernoulli::new(1.0), Bernoulli::new(1.0));
}
}

269
vendor/rand/src/distr/distribution.rs vendored Normal file
View File

@@ -0,0 +1,269 @@
// Copyright 2018 Developers of the Rand project.
// Copyright 2013-2017 The Rust Project Developers.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Distribution trait and associates
use crate::Rng;
#[cfg(feature = "alloc")]
use alloc::string::String;
use core::iter;
/// Types (distributions) that can be used to create a random instance of `T`.
///
/// It is possible to sample from a distribution through both the
/// `Distribution` and [`Rng`] traits, via `distr.sample(&mut rng)` and
/// `rng.sample(distr)`. They also both offer the [`sample_iter`] method, which
/// produces an iterator that samples from the distribution.
///
/// All implementations are expected to be immutable; this has the significant
/// advantage of not needing to consider thread safety, and for most
/// distributions efficient state-less sampling algorithms are available.
///
/// Implementations are typically expected to be portable with reproducible
/// results when used with a PRNG with fixed seed; see the
/// [portability chapter](https://rust-random.github.io/book/portability.html)
/// of The Rust Rand Book. In some cases this does not apply, e.g. the `usize`
/// type requires different sampling on 32-bit and 64-bit machines.
///
/// [`sample_iter`]: Distribution::sample_iter
pub trait Distribution<T> {
/// Generate a random value of `T`, using `rng` as the source of randomness.
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> T;
/// Create an iterator that generates random values of `T`, using `rng` as
/// the source of randomness.
///
/// Note that this function takes `self` by value. This works since
/// `Distribution<T>` is impl'd for `&D` where `D: Distribution<T>`,
/// however borrowing is not automatic hence `distr.sample_iter(...)` may
/// need to be replaced with `(&distr).sample_iter(...)` to borrow or
/// `(&*distr).sample_iter(...)` to reborrow an existing reference.
///
/// # Example
///
/// ```
/// use rand::distr::{Distribution, Alphanumeric, Uniform, StandardUniform};
///
/// let mut rng = rand::rng();
///
/// // Vec of 16 x f32:
/// let v: Vec<f32> = StandardUniform.sample_iter(&mut rng).take(16).collect();
///
/// // String:
/// let s: String = Alphanumeric
/// .sample_iter(&mut rng)
/// .take(7)
/// .map(char::from)
/// .collect();
///
/// // Dice-rolling:
/// let die_range = Uniform::new_inclusive(1, 6).unwrap();
/// let mut roll_die = die_range.sample_iter(&mut rng);
/// while roll_die.next().unwrap() != 6 {
/// println!("Not a 6; rolling again!");
/// }
/// ```
fn sample_iter<R>(self, rng: R) -> Iter<Self, R, T>
where
R: Rng,
Self: Sized,
{
Iter {
distr: self,
rng,
phantom: core::marker::PhantomData,
}
}
/// Map sampled values to type `S`
///
/// # Example
///
/// ```
/// use rand::distr::{Distribution, Uniform};
///
/// let die = Uniform::new_inclusive(1, 6).unwrap();
/// let even_number = die.map(|num| num % 2 == 0);
/// while !even_number.sample(&mut rand::rng()) {
/// println!("Still odd; rolling again!");
/// }
/// ```
fn map<F, S>(self, func: F) -> Map<Self, F, T, S>
where
F: Fn(T) -> S,
Self: Sized,
{
Map {
distr: self,
func,
phantom: core::marker::PhantomData,
}
}
}
impl<T, D: Distribution<T> + ?Sized> Distribution<T> for &D {
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> T {
(*self).sample(rng)
}
}
/// An iterator over a [`Distribution`]
///
/// This iterator yields random values of type `T` with distribution `D`
/// from a random generator of type `R`.
///
/// Construct this `struct` using [`Distribution::sample_iter`] or
/// [`Rng::sample_iter`]. It is also used by [`Rng::random_iter`] and
/// [`crate::random_iter`].
#[derive(Debug)]
pub struct Iter<D, R, T> {
distr: D,
rng: R,
phantom: core::marker::PhantomData<T>,
}
impl<D, R, T> Iterator for Iter<D, R, T>
where
D: Distribution<T>,
R: Rng,
{
type Item = T;
#[inline(always)]
fn next(&mut self) -> Option<T> {
// Here, self.rng may be a reference, but we must take &mut anyway.
// Even if sample could take an R: Rng by value, we would need to do this
// since Rng is not copyable and we cannot enforce that this is "reborrowable".
Some(self.distr.sample(&mut self.rng))
}
fn size_hint(&self) -> (usize, Option<usize>) {
(usize::MAX, None)
}
}
impl<D, R, T> iter::FusedIterator for Iter<D, R, T>
where
D: Distribution<T>,
R: Rng,
{
}
/// A [`Distribution`] which maps sampled values to type `S`
///
/// This `struct` is created by the [`Distribution::map`] method.
/// See its documentation for more.
#[derive(Debug)]
pub struct Map<D, F, T, S> {
distr: D,
func: F,
phantom: core::marker::PhantomData<fn(T) -> S>,
}
impl<D, F, T, S> Distribution<S> for Map<D, F, T, S>
where
D: Distribution<T>,
F: Fn(T) -> S,
{
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> S {
(self.func)(self.distr.sample(rng))
}
}
/// Sample or extend a [`String`]
///
/// Helper methods to extend a [`String`] or sample a new [`String`].
#[cfg(feature = "alloc")]
pub trait SampleString {
/// Append `len` random chars to `string`
///
/// Note: implementations may leave `string` with excess capacity. If this
/// is undesirable, consider calling [`String::shrink_to_fit`] after this
/// method.
fn append_string<R: Rng + ?Sized>(&self, rng: &mut R, string: &mut String, len: usize);
/// Generate a [`String`] of `len` random chars
///
/// Note: implementations may leave the string with excess capacity. If this
/// is undesirable, consider calling [`String::shrink_to_fit`] after this
/// method.
#[inline]
fn sample_string<R: Rng + ?Sized>(&self, rng: &mut R, len: usize) -> String {
let mut s = String::new();
self.append_string(rng, &mut s, len);
s
}
}
#[cfg(test)]
mod tests {
use crate::distr::{Distribution, Uniform};
use crate::Rng;
#[test]
fn test_distributions_iter() {
use crate::distr::Open01;
let mut rng = crate::test::rng(210);
let distr = Open01;
let mut iter = Distribution::<f32>::sample_iter(distr, &mut rng);
let mut sum: f32 = 0.;
for _ in 0..100 {
sum += iter.next().unwrap();
}
assert!(0. < sum && sum < 100.);
}
#[test]
fn test_distributions_map() {
let dist = Uniform::new_inclusive(0, 5).unwrap().map(|val| val + 15);
let mut rng = crate::test::rng(212);
let val = dist.sample(&mut rng);
assert!((15..=20).contains(&val));
}
#[test]
fn test_make_an_iter() {
fn ten_dice_rolls_other_than_five<R: Rng>(rng: &mut R) -> impl Iterator<Item = i32> + '_ {
Uniform::new_inclusive(1, 6)
.unwrap()
.sample_iter(rng)
.filter(|x| *x != 5)
.take(10)
}
let mut rng = crate::test::rng(211);
let mut count = 0;
for val in ten_dice_rolls_other_than_five(&mut rng) {
assert!((1..=6).contains(&val) && val != 5);
count += 1;
}
assert_eq!(count, 10);
}
#[test]
#[cfg(feature = "alloc")]
fn test_dist_string() {
use crate::distr::{Alphabetic, Alphanumeric, SampleString, StandardUniform};
use core::str;
let mut rng = crate::test::rng(213);
let s1 = Alphanumeric.sample_string(&mut rng, 20);
assert_eq!(s1.len(), 20);
assert_eq!(str::from_utf8(s1.as_bytes()), Ok(s1.as_str()));
let s2 = StandardUniform.sample_string(&mut rng, 20);
assert_eq!(s2.chars().count(), 20);
assert_eq!(str::from_utf8(s2.as_bytes()), Ok(s2.as_str()));
let s3 = Alphabetic.sample_string(&mut rng, 20);
assert_eq!(s3.len(), 20);
assert_eq!(str::from_utf8(s3.as_bytes()), Ok(s3.as_str()));
}
}

344
vendor/rand/src/distr/float.rs vendored Normal file
View File

@@ -0,0 +1,344 @@
// Copyright 2018 Developers of the Rand project.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Basic floating-point number distributions
use crate::distr::utils::{FloatAsSIMD, FloatSIMDUtils, IntAsSIMD};
use crate::distr::{Distribution, StandardUniform};
use crate::Rng;
use core::mem;
#[cfg(feature = "simd_support")]
use core::simd::prelude::*;
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
/// A distribution to sample floating point numbers uniformly in the half-open
/// interval `(0, 1]`, i.e. including 1 but not 0.
///
/// All values that can be generated are of the form `n * ε/2`. For `f32`
/// the 24 most significant random bits of a `u32` are used and for `f64` the
/// 53 most significant bits of a `u64` are used. The conversion uses the
/// multiplicative method.
///
/// See also: [`StandardUniform`] which samples from `[0, 1)`, [`Open01`]
/// which samples from `(0, 1)` and [`Uniform`] which samples from arbitrary
/// ranges.
///
/// # Example
/// ```
/// use rand::Rng;
/// use rand::distr::OpenClosed01;
///
/// let val: f32 = rand::rng().sample(OpenClosed01);
/// println!("f32 from (0, 1): {}", val);
/// ```
///
/// [`StandardUniform`]: crate::distr::StandardUniform
/// [`Open01`]: crate::distr::Open01
/// [`Uniform`]: crate::distr::uniform::Uniform
#[derive(Clone, Copy, Debug, Default)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct OpenClosed01;
/// A distribution to sample floating point numbers uniformly in the open
/// interval `(0, 1)`, i.e. not including either endpoint.
///
/// All values that can be generated are of the form `n * ε + ε/2`. For `f32`
/// the 23 most significant random bits of an `u32` are used, for `f64` 52 from
/// an `u64`. The conversion uses a transmute-based method.
///
/// See also: [`StandardUniform`] which samples from `[0, 1)`, [`OpenClosed01`]
/// which samples from `(0, 1]` and [`Uniform`] which samples from arbitrary
/// ranges.
///
/// # Example
/// ```
/// use rand::Rng;
/// use rand::distr::Open01;
///
/// let val: f32 = rand::rng().sample(Open01);
/// println!("f32 from (0, 1): {}", val);
/// ```
///
/// [`StandardUniform`]: crate::distr::StandardUniform
/// [`OpenClosed01`]: crate::distr::OpenClosed01
/// [`Uniform`]: crate::distr::uniform::Uniform
#[derive(Clone, Copy, Debug, Default)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct Open01;
// This trait is needed by both this lib and rand_distr hence is a hidden export
#[doc(hidden)]
pub trait IntoFloat {
type F;
/// Helper method to combine the fraction and a constant exponent into a
/// float.
///
/// Only the least significant bits of `self` may be set, 23 for `f32` and
/// 52 for `f64`.
/// The resulting value will fall in a range that depends on the exponent.
/// As an example the range with exponent 0 will be
/// [2<sup>0</sup>..2<sup>1</sup>), which is [1..2).
fn into_float_with_exponent(self, exponent: i32) -> Self::F;
}
macro_rules! float_impls {
($($meta:meta)?, $ty:ident, $uty:ident, $f_scalar:ident, $u_scalar:ty,
$fraction_bits:expr, $exponent_bias:expr) => {
$(#[cfg($meta)])?
impl IntoFloat for $uty {
type F = $ty;
#[inline(always)]
fn into_float_with_exponent(self, exponent: i32) -> $ty {
// The exponent is encoded using an offset-binary representation
let exponent_bits: $u_scalar =
(($exponent_bias + exponent) as $u_scalar) << $fraction_bits;
$ty::from_bits(self | $uty::splat(exponent_bits))
}
}
$(#[cfg($meta)])?
impl Distribution<$ty> for StandardUniform {
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> $ty {
// Multiply-based method; 24/53 random bits; [0, 1) interval.
// We use the most significant bits because for simple RNGs
// those are usually more random.
let float_size = mem::size_of::<$f_scalar>() as $u_scalar * 8;
let precision = $fraction_bits + 1;
let scale = 1.0 / ((1 as $u_scalar << precision) as $f_scalar);
let value: $uty = rng.random();
let value = value >> $uty::splat(float_size - precision);
$ty::splat(scale) * $ty::cast_from_int(value)
}
}
$(#[cfg($meta)])?
impl Distribution<$ty> for OpenClosed01 {
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> $ty {
// Multiply-based method; 24/53 random bits; (0, 1] interval.
// We use the most significant bits because for simple RNGs
// those are usually more random.
let float_size = mem::size_of::<$f_scalar>() as $u_scalar * 8;
let precision = $fraction_bits + 1;
let scale = 1.0 / ((1 as $u_scalar << precision) as $f_scalar);
let value: $uty = rng.random();
let value = value >> $uty::splat(float_size - precision);
// Add 1 to shift up; will not overflow because of right-shift:
$ty::splat(scale) * $ty::cast_from_int(value + $uty::splat(1))
}
}
$(#[cfg($meta)])?
impl Distribution<$ty> for Open01 {
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> $ty {
// Transmute-based method; 23/52 random bits; (0, 1) interval.
// We use the most significant bits because for simple RNGs
// those are usually more random.
let float_size = mem::size_of::<$f_scalar>() as $u_scalar * 8;
let value: $uty = rng.random();
let fraction = value >> $uty::splat(float_size - $fraction_bits);
fraction.into_float_with_exponent(0) - $ty::splat(1.0 - $f_scalar::EPSILON / 2.0)
}
}
}
}
float_impls! { , f32, u32, f32, u32, 23, 127 }
float_impls! { , f64, u64, f64, u64, 52, 1023 }
#[cfg(feature = "simd_support")]
float_impls! { feature = "simd_support", f32x2, u32x2, f32, u32, 23, 127 }
#[cfg(feature = "simd_support")]
float_impls! { feature = "simd_support", f32x4, u32x4, f32, u32, 23, 127 }
#[cfg(feature = "simd_support")]
float_impls! { feature = "simd_support", f32x8, u32x8, f32, u32, 23, 127 }
#[cfg(feature = "simd_support")]
float_impls! { feature = "simd_support", f32x16, u32x16, f32, u32, 23, 127 }
#[cfg(feature = "simd_support")]
float_impls! { feature = "simd_support", f64x2, u64x2, f64, u64, 52, 1023 }
#[cfg(feature = "simd_support")]
float_impls! { feature = "simd_support", f64x4, u64x4, f64, u64, 52, 1023 }
#[cfg(feature = "simd_support")]
float_impls! { feature = "simd_support", f64x8, u64x8, f64, u64, 52, 1023 }
#[cfg(test)]
mod tests {
use super::*;
use crate::test::const_rng;
const EPSILON32: f32 = f32::EPSILON;
const EPSILON64: f64 = f64::EPSILON;
macro_rules! test_f32 {
($fnn:ident, $ty:ident, $ZERO:expr, $EPSILON:expr) => {
#[test]
fn $fnn() {
let two = $ty::splat(2.0);
// StandardUniform
let mut zeros = const_rng(0);
assert_eq!(zeros.random::<$ty>(), $ZERO);
let mut one = const_rng(1 << 8 | 1 << (8 + 32));
assert_eq!(one.random::<$ty>(), $EPSILON / two);
let mut max = const_rng(!0);
assert_eq!(max.random::<$ty>(), $ty::splat(1.0) - $EPSILON / two);
// OpenClosed01
let mut zeros = const_rng(0);
assert_eq!(zeros.sample::<$ty, _>(OpenClosed01), $ZERO + $EPSILON / two);
let mut one = const_rng(1 << 8 | 1 << (8 + 32));
assert_eq!(one.sample::<$ty, _>(OpenClosed01), $EPSILON);
let mut max = const_rng(!0);
assert_eq!(max.sample::<$ty, _>(OpenClosed01), $ZERO + $ty::splat(1.0));
// Open01
let mut zeros = const_rng(0);
assert_eq!(zeros.sample::<$ty, _>(Open01), $ZERO + $EPSILON / two);
let mut one = const_rng(1 << 9 | 1 << (9 + 32));
assert_eq!(
one.sample::<$ty, _>(Open01),
$EPSILON / two * $ty::splat(3.0)
);
let mut max = const_rng(!0);
assert_eq!(
max.sample::<$ty, _>(Open01),
$ty::splat(1.0) - $EPSILON / two
);
}
};
}
test_f32! { f32_edge_cases, f32, 0.0, EPSILON32 }
#[cfg(feature = "simd_support")]
test_f32! { f32x2_edge_cases, f32x2, f32x2::splat(0.0), f32x2::splat(EPSILON32) }
#[cfg(feature = "simd_support")]
test_f32! { f32x4_edge_cases, f32x4, f32x4::splat(0.0), f32x4::splat(EPSILON32) }
#[cfg(feature = "simd_support")]
test_f32! { f32x8_edge_cases, f32x8, f32x8::splat(0.0), f32x8::splat(EPSILON32) }
#[cfg(feature = "simd_support")]
test_f32! { f32x16_edge_cases, f32x16, f32x16::splat(0.0), f32x16::splat(EPSILON32) }
macro_rules! test_f64 {
($fnn:ident, $ty:ident, $ZERO:expr, $EPSILON:expr) => {
#[test]
fn $fnn() {
let two = $ty::splat(2.0);
// StandardUniform
let mut zeros = const_rng(0);
assert_eq!(zeros.random::<$ty>(), $ZERO);
let mut one = const_rng(1 << 11);
assert_eq!(one.random::<$ty>(), $EPSILON / two);
let mut max = const_rng(!0);
assert_eq!(max.random::<$ty>(), $ty::splat(1.0) - $EPSILON / two);
// OpenClosed01
let mut zeros = const_rng(0);
assert_eq!(zeros.sample::<$ty, _>(OpenClosed01), $ZERO + $EPSILON / two);
let mut one = const_rng(1 << 11);
assert_eq!(one.sample::<$ty, _>(OpenClosed01), $EPSILON);
let mut max = const_rng(!0);
assert_eq!(max.sample::<$ty, _>(OpenClosed01), $ZERO + $ty::splat(1.0));
// Open01
let mut zeros = const_rng(0);
assert_eq!(zeros.sample::<$ty, _>(Open01), $ZERO + $EPSILON / two);
let mut one = const_rng(1 << 12);
assert_eq!(
one.sample::<$ty, _>(Open01),
$EPSILON / two * $ty::splat(3.0)
);
let mut max = const_rng(!0);
assert_eq!(
max.sample::<$ty, _>(Open01),
$ty::splat(1.0) - $EPSILON / two
);
}
};
}
test_f64! { f64_edge_cases, f64, 0.0, EPSILON64 }
#[cfg(feature = "simd_support")]
test_f64! { f64x2_edge_cases, f64x2, f64x2::splat(0.0), f64x2::splat(EPSILON64) }
#[cfg(feature = "simd_support")]
test_f64! { f64x4_edge_cases, f64x4, f64x4::splat(0.0), f64x4::splat(EPSILON64) }
#[cfg(feature = "simd_support")]
test_f64! { f64x8_edge_cases, f64x8, f64x8::splat(0.0), f64x8::splat(EPSILON64) }
#[test]
fn value_stability() {
fn test_samples<T: Copy + core::fmt::Debug + PartialEq, D: Distribution<T>>(
distr: &D,
zero: T,
expected: &[T],
) {
let mut rng = crate::test::rng(0x6f44f5646c2a7334);
let mut buf = [zero; 3];
for x in &mut buf {
*x = rng.sample(distr);
}
assert_eq!(&buf, expected);
}
test_samples(
&StandardUniform,
0f32,
&[0.0035963655, 0.7346052, 0.09778172],
);
test_samples(
&StandardUniform,
0f64,
&[0.7346051961657583, 0.20298547462974248, 0.8166436635290655],
);
test_samples(&OpenClosed01, 0f32, &[0.003596425, 0.73460525, 0.09778178]);
test_samples(
&OpenClosed01,
0f64,
&[0.7346051961657584, 0.2029854746297426, 0.8166436635290656],
);
test_samples(&Open01, 0f32, &[0.0035963655, 0.73460525, 0.09778172]);
test_samples(
&Open01,
0f64,
&[0.7346051961657584, 0.20298547462974248, 0.8166436635290656],
);
#[cfg(feature = "simd_support")]
{
// We only test a sub-set of types here. Values are identical to
// non-SIMD types; we assume this pattern continues across all
// SIMD types.
test_samples(
&StandardUniform,
f32x2::from([0.0, 0.0]),
&[
f32x2::from([0.0035963655, 0.7346052]),
f32x2::from([0.09778172, 0.20298547]),
f32x2::from([0.34296435, 0.81664366]),
],
);
test_samples(
&StandardUniform,
f64x2::from([0.0, 0.0]),
&[
f64x2::from([0.7346051961657583, 0.20298547462974248]),
f64x2::from([0.8166436635290655, 0.7423708925400552]),
f64x2::from([0.16387782224016323, 0.9087068770169618]),
],
);
}
}
}

307
vendor/rand/src/distr/integer.rs vendored Normal file
View File

@@ -0,0 +1,307 @@
// Copyright 2018 Developers of the Rand project.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! The implementations of the `StandardUniform` distribution for integer types.
use crate::distr::{Distribution, StandardUniform};
use crate::Rng;
#[cfg(all(target_arch = "x86", feature = "simd_support"))]
use core::arch::x86::__m512i;
#[cfg(target_arch = "x86")]
use core::arch::x86::{__m128i, __m256i};
#[cfg(all(target_arch = "x86_64", feature = "simd_support"))]
use core::arch::x86_64::__m512i;
#[cfg(target_arch = "x86_64")]
use core::arch::x86_64::{__m128i, __m256i};
use core::num::{
NonZeroI128, NonZeroI16, NonZeroI32, NonZeroI64, NonZeroI8, NonZeroU128, NonZeroU16,
NonZeroU32, NonZeroU64, NonZeroU8,
};
#[cfg(feature = "simd_support")]
use core::simd::*;
impl Distribution<u8> for StandardUniform {
#[inline]
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> u8 {
rng.next_u32() as u8
}
}
impl Distribution<u16> for StandardUniform {
#[inline]
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> u16 {
rng.next_u32() as u16
}
}
impl Distribution<u32> for StandardUniform {
#[inline]
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> u32 {
rng.next_u32()
}
}
impl Distribution<u64> for StandardUniform {
#[inline]
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> u64 {
rng.next_u64()
}
}
impl Distribution<u128> for StandardUniform {
#[inline]
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> u128 {
// Use LE; we explicitly generate one value before the next.
let x = u128::from(rng.next_u64());
let y = u128::from(rng.next_u64());
(y << 64) | x
}
}
macro_rules! impl_int_from_uint {
($ty:ty, $uty:ty) => {
impl Distribution<$ty> for StandardUniform {
#[inline]
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> $ty {
rng.random::<$uty>() as $ty
}
}
};
}
impl_int_from_uint! { i8, u8 }
impl_int_from_uint! { i16, u16 }
impl_int_from_uint! { i32, u32 }
impl_int_from_uint! { i64, u64 }
impl_int_from_uint! { i128, u128 }
macro_rules! impl_nzint {
($ty:ty, $new:path) => {
impl Distribution<$ty> for StandardUniform {
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> $ty {
loop {
if let Some(nz) = $new(rng.random()) {
break nz;
}
}
}
}
};
}
impl_nzint!(NonZeroU8, NonZeroU8::new);
impl_nzint!(NonZeroU16, NonZeroU16::new);
impl_nzint!(NonZeroU32, NonZeroU32::new);
impl_nzint!(NonZeroU64, NonZeroU64::new);
impl_nzint!(NonZeroU128, NonZeroU128::new);
impl_nzint!(NonZeroI8, NonZeroI8::new);
impl_nzint!(NonZeroI16, NonZeroI16::new);
impl_nzint!(NonZeroI32, NonZeroI32::new);
impl_nzint!(NonZeroI64, NonZeroI64::new);
impl_nzint!(NonZeroI128, NonZeroI128::new);
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
impl Distribution<__m128i> for StandardUniform {
#[inline]
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> __m128i {
// NOTE: It's tempting to use the u128 impl here, but confusingly this
// results in different code (return via rdx, r10 instead of rax, rdx
// with u128 impl) and is much slower (+130 time). This version calls
// impls::fill_bytes_via_next but performs well.
let mut buf = [0_u8; core::mem::size_of::<__m128i>()];
rng.fill_bytes(&mut buf);
// x86 is little endian so no need for conversion
// SAFETY: All byte sequences of `buf` represent values of the output type.
unsafe { core::mem::transmute(buf) }
}
}
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
impl Distribution<__m256i> for StandardUniform {
#[inline]
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> __m256i {
let mut buf = [0_u8; core::mem::size_of::<__m256i>()];
rng.fill_bytes(&mut buf);
// x86 is little endian so no need for conversion
// SAFETY: All byte sequences of `buf` represent values of the output type.
unsafe { core::mem::transmute(buf) }
}
}
#[cfg(all(
any(target_arch = "x86", target_arch = "x86_64"),
feature = "simd_support"
))]
impl Distribution<__m512i> for StandardUniform {
#[inline]
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> __m512i {
let mut buf = [0_u8; core::mem::size_of::<__m512i>()];
rng.fill_bytes(&mut buf);
// x86 is little endian so no need for conversion
// SAFETY: All byte sequences of `buf` represent values of the output type.
unsafe { core::mem::transmute(buf) }
}
}
#[cfg(feature = "simd_support")]
macro_rules! simd_impl {
($($ty:ty),+) => {$(
/// Requires nightly Rust and the [`simd_support`] feature
///
/// [`simd_support`]: https://github.com/rust-random/rand#crate-features
#[cfg(feature = "simd_support")]
impl<const LANES: usize> Distribution<Simd<$ty, LANES>> for StandardUniform
where
LaneCount<LANES>: SupportedLaneCount,
{
#[inline]
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Simd<$ty, LANES> {
let mut vec = Simd::default();
rng.fill(vec.as_mut_array().as_mut_slice());
vec
}
}
)+};
}
#[cfg(feature = "simd_support")]
simd_impl!(u8, i8, u16, i16, u32, i32, u64, i64);
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_integers() {
let mut rng = crate::test::rng(806);
rng.sample::<i8, _>(StandardUniform);
rng.sample::<i16, _>(StandardUniform);
rng.sample::<i32, _>(StandardUniform);
rng.sample::<i64, _>(StandardUniform);
rng.sample::<i128, _>(StandardUniform);
rng.sample::<u8, _>(StandardUniform);
rng.sample::<u16, _>(StandardUniform);
rng.sample::<u32, _>(StandardUniform);
rng.sample::<u64, _>(StandardUniform);
rng.sample::<u128, _>(StandardUniform);
}
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
#[test]
fn x86_integers() {
let mut rng = crate::test::rng(807);
rng.sample::<__m128i, _>(StandardUniform);
rng.sample::<__m256i, _>(StandardUniform);
#[cfg(feature = "simd_support")]
rng.sample::<__m512i, _>(StandardUniform);
}
#[test]
fn value_stability() {
fn test_samples<T: Copy + core::fmt::Debug + PartialEq>(zero: T, expected: &[T])
where
StandardUniform: Distribution<T>,
{
let mut rng = crate::test::rng(807);
let mut buf = [zero; 3];
for x in &mut buf {
*x = rng.sample(StandardUniform);
}
assert_eq!(&buf, expected);
}
test_samples(0u8, &[9, 247, 111]);
test_samples(0u16, &[32265, 42999, 38255]);
test_samples(0u32, &[2220326409, 2575017975, 2018088303]);
test_samples(
0u64,
&[
11059617991457472009,
16096616328739788143,
1487364411147516184,
],
);
test_samples(
0u128,
&[
296930161868957086625409848350820761097,
145644820879247630242265036535529306392,
111087889832015897993126088499035356354,
],
);
test_samples(0i8, &[9, -9, 111]);
// Skip further i* types: they are simple reinterpretation of u* samples
#[cfg(feature = "simd_support")]
{
// We only test a sub-set of types here and make assumptions about the rest.
test_samples(
u8x4::default(),
&[
u8x4::from([9, 126, 87, 132]),
u8x4::from([247, 167, 123, 153]),
u8x4::from([111, 149, 73, 120]),
],
);
test_samples(
u8x8::default(),
&[
u8x8::from([9, 126, 87, 132, 247, 167, 123, 153]),
u8x8::from([111, 149, 73, 120, 68, 171, 98, 223]),
u8x8::from([24, 121, 1, 50, 13, 46, 164, 20]),
],
);
test_samples(
i64x8::default(),
&[
i64x8::from([
-7387126082252079607,
-2350127744969763473,
1487364411147516184,
7895421560427121838,
602190064936008898,
6022086574635100741,
-5080089175222015595,
-4066367846667249123,
]),
i64x8::from([
9180885022207963908,
3095981199532211089,
6586075293021332726,
419343203796414657,
3186951873057035255,
5287129228749947252,
444726432079249540,
-1587028029513790706,
]),
i64x8::from([
6075236523189346388,
1351763722368165432,
-6192309979959753740,
-7697775502176768592,
-4482022114172078123,
7522501477800909500,
-1837258847956201231,
-586926753024886735,
]),
],
);
}
}
}

214
vendor/rand/src/distr/mod.rs vendored Normal file
View File

@@ -0,0 +1,214 @@
// Copyright 2018 Developers of the Rand project.
// Copyright 2013-2017 The Rust Project Developers.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Generating random samples from probability distributions
//!
//! This module is the home of the [`Distribution`] trait and several of its
//! implementations. It is the workhorse behind some of the convenient
//! functionality of the [`Rng`] trait, e.g. [`Rng::random`] and of course
//! [`Rng::sample`].
//!
//! Abstractly, a [probability distribution] describes the probability of
//! occurrence of each value in its sample space.
//!
//! More concretely, an implementation of `Distribution<T>` for type `X` is an
//! algorithm for choosing values from the sample space (a subset of `T`)
//! according to the distribution `X` represents, using an external source of
//! randomness (an RNG supplied to the `sample` function).
//!
//! A type `X` may implement `Distribution<T>` for multiple types `T`.
//! Any type implementing [`Distribution`] is stateless (i.e. immutable),
//! but it may have internal parameters set at construction time (for example,
//! [`Uniform`] allows specification of its sample space as a range within `T`).
//!
//!
//! # The Standard Uniform distribution
//!
//! The [`StandardUniform`] distribution is important to mention. This is the
//! distribution used by [`Rng::random`] and represents the "default" way to
//! produce a random value for many different types, including most primitive
//! types, tuples, arrays, and a few derived types. See the documentation of
//! [`StandardUniform`] for more details.
//!
//! Implementing [`Distribution<T>`] for [`StandardUniform`] for user types `T` makes it
//! possible to generate type `T` with [`Rng::random`], and by extension also
//! with the [`random`] function.
//!
//! ## Other standard uniform distributions
//!
//! [`Alphanumeric`] is a simple distribution to sample random letters and
//! numbers of the `char` type; in contrast [`StandardUniform`] may sample any valid
//! `char`.
//!
//! There's also an [`Alphabetic`] distribution which acts similarly to [`Alphanumeric`] but
//! doesn't include digits.
//!
//! For floats (`f32`, `f64`), [`StandardUniform`] samples from `[0, 1)`. Also
//! provided are [`Open01`] (samples from `(0, 1)`) and [`OpenClosed01`]
//! (samples from `(0, 1]`). No option is provided to sample from `[0, 1]`; it
//! is suggested to use one of the above half-open ranges since the failure to
//! sample a value which would have a low chance of being sampled anyway is
//! rarely an issue in practice.
//!
//! # Parameterized Uniform distributions
//!
//! The [`Uniform`] distribution provides uniform sampling over a specified
//! range on a subset of the types supported by the above distributions.
//!
//! Implementations support single-value-sampling via
//! [`Rng::random_range(Range)`](Rng::random_range).
//! Where a fixed (non-`const`) range will be sampled many times, it is likely
//! faster to pre-construct a [`Distribution`] object using
//! [`Uniform::new`], [`Uniform::new_inclusive`] or `From<Range>`.
//!
//! # Non-uniform sampling
//!
//! Sampling a simple true/false outcome with a given probability has a name:
//! the [`Bernoulli`] distribution (this is used by [`Rng::random_bool`]).
//!
//! For weighted sampling of discrete values see the [`weighted`] module.
//!
//! This crate no longer includes other non-uniform distributions; instead
//! it is recommended that you use either [`rand_distr`] or [`statrs`].
//!
//!
//! [probability distribution]: https://en.wikipedia.org/wiki/Probability_distribution
//! [`rand_distr`]: https://crates.io/crates/rand_distr
//! [`statrs`]: https://crates.io/crates/statrs
//! [`random`]: crate::random
//! [`rand_distr`]: https://crates.io/crates/rand_distr
//! [`statrs`]: https://crates.io/crates/statrs
mod bernoulli;
mod distribution;
mod float;
mod integer;
mod other;
mod utils;
#[doc(hidden)]
pub mod hidden_export {
pub use super::float::IntoFloat; // used by rand_distr
}
pub mod slice;
pub mod uniform;
#[cfg(feature = "alloc")]
pub mod weighted;
pub use self::bernoulli::{Bernoulli, BernoulliError};
#[cfg(feature = "alloc")]
pub use self::distribution::SampleString;
pub use self::distribution::{Distribution, Iter, Map};
pub use self::float::{Open01, OpenClosed01};
pub use self::other::{Alphabetic, Alphanumeric};
#[doc(inline)]
pub use self::uniform::Uniform;
#[allow(unused)]
use crate::Rng;
/// The Standard Uniform distribution
///
/// This [`Distribution`] is the *standard* parameterization of [`Uniform`]. Bounds
/// are selected according to the output type.
///
/// Assuming the provided `Rng` is well-behaved, these implementations
/// generate values with the following ranges and distributions:
///
/// * Integers (`i8`, `i32`, `u64`, etc.) are uniformly distributed
/// over the whole range of the type (thus each possible value may be sampled
/// with equal probability).
/// * `char` is uniformly distributed over all Unicode scalar values, i.e. all
/// code points in the range `0...0x10_FFFF`, except for the range
/// `0xD800...0xDFFF` (the surrogate code points). This includes
/// unassigned/reserved code points.
/// For some uses, the [`Alphanumeric`] or [`Alphabetic`] distribution will be more
/// appropriate.
/// * `bool` samples `false` or `true`, each with probability 0.5.
/// * Floating point types (`f32` and `f64`) are uniformly distributed in the
/// half-open range `[0, 1)`. See also the [notes below](#floating-point-implementation).
/// * Wrapping integers ([`Wrapping<T>`]), besides the type identical to their
/// normal integer variants.
/// * Non-zero integers ([`NonZeroU8`]), which are like their normal integer
/// variants but cannot sample zero.
///
/// The `StandardUniform` distribution also supports generation of the following
/// compound types where all component types are supported:
///
/// * Tuples (up to 12 elements): each element is sampled sequentially and
/// independently (thus, assuming a well-behaved RNG, there is no correlation
/// between elements).
/// * Arrays `[T; n]` where `T` is supported. Each element is sampled
/// sequentially and independently. Note that for small `T` this usually
/// results in the RNG discarding random bits; see also [`Rng::fill`] which
/// offers a more efficient approach to filling an array of integer types
/// with random data.
/// * SIMD types (requires [`simd_support`] feature) like x86's [`__m128i`]
/// and `std::simd`'s [`u32x4`], [`f32x4`] and [`mask32x4`] types are
/// effectively arrays of integer or floating-point types. Each lane is
/// sampled independently, potentially with more efficient random-bit-usage
/// (and a different resulting value) than would be achieved with sequential
/// sampling (as with the array types above).
///
/// ## Custom implementations
///
/// The [`StandardUniform`] distribution may be implemented for user types as follows:
///
/// ```
/// # #![allow(dead_code)]
/// use rand::Rng;
/// use rand::distr::{Distribution, StandardUniform};
///
/// struct MyF32 {
/// x: f32,
/// }
///
/// impl Distribution<MyF32> for StandardUniform {
/// fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> MyF32 {
/// MyF32 { x: rng.random() }
/// }
/// }
/// ```
///
/// ## Example usage
/// ```
/// use rand::prelude::*;
/// use rand::distr::StandardUniform;
///
/// let val: f32 = rand::rng().sample(StandardUniform);
/// println!("f32 from [0, 1): {}", val);
/// ```
///
/// # Floating point implementation
/// The floating point implementations for `StandardUniform` generate a random value in
/// the half-open interval `[0, 1)`, i.e. including 0 but not 1.
///
/// All values that can be generated are of the form `n * ε/2`. For `f32`
/// the 24 most significant random bits of a `u32` are used and for `f64` the
/// 53 most significant bits of a `u64` are used. The conversion uses the
/// multiplicative method: `(rng.gen::<$uty>() >> N) as $ty * (ε/2)`.
///
/// See also: [`Open01`] which samples from `(0, 1)`, [`OpenClosed01`] which
/// samples from `(0, 1]` and `Rng::random_range(0..1)` which also samples from
/// `[0, 1)`. Note that `Open01` uses transmute-based methods which yield 1 bit
/// less precision but may perform faster on some architectures (on modern Intel
/// CPUs all methods have approximately equal performance).
///
/// [`Uniform`]: uniform::Uniform
/// [`Wrapping<T>`]: std::num::Wrapping
/// [`NonZeroU8`]: std::num::NonZeroU8
/// [`__m128i`]: https://doc.rust-lang.org/core/arch/x86/struct.__m128i.html
/// [`u32x4`]: std::simd::u32x4
/// [`f32x4`]: std::simd::f32x4
/// [`mask32x4`]: std::simd::mask32x4
/// [`simd_support`]: https://github.com/rust-random/rand#crate-features
#[derive(Clone, Copy, Debug, Default)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct StandardUniform;

444
vendor/rand/src/distr/other.rs vendored Normal file
View File

@@ -0,0 +1,444 @@
// Copyright 2018 Developers of the Rand project.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! The implementations of the `StandardUniform` distribution for other built-in types.
#[cfg(feature = "alloc")]
use alloc::string::String;
use core::array;
use core::char;
use core::num::Wrapping;
#[cfg(feature = "alloc")]
use crate::distr::SampleString;
use crate::distr::{Distribution, StandardUniform, Uniform};
use crate::Rng;
#[cfg(feature = "simd_support")]
use core::simd::prelude::*;
#[cfg(feature = "simd_support")]
use core::simd::{LaneCount, MaskElement, SupportedLaneCount};
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
// ----- Sampling distributions -----
/// Sample a `u8`, uniformly distributed over ASCII letters and numbers:
/// a-z, A-Z and 0-9.
///
/// # Example
///
/// ```
/// use rand::Rng;
/// use rand::distr::Alphanumeric;
///
/// let mut rng = rand::rng();
/// let chars: String = (0..7).map(|_| rng.sample(Alphanumeric) as char).collect();
/// println!("Random chars: {}", chars);
/// ```
///
/// The [`SampleString`] trait provides an easier method of generating
/// a random [`String`], and offers more efficient allocation:
/// ```
/// use rand::distr::{Alphanumeric, SampleString};
/// let string = Alphanumeric.sample_string(&mut rand::rng(), 16);
/// println!("Random string: {}", string);
/// ```
///
/// # Passwords
///
/// Users sometimes ask whether it is safe to use a string of random characters
/// as a password. In principle, all RNGs in Rand implementing `CryptoRng` are
/// suitable as a source of randomness for generating passwords (if they are
/// properly seeded), but it is more conservative to only use randomness
/// directly from the operating system via the `getrandom` crate, or the
/// corresponding bindings of a crypto library.
///
/// When generating passwords or keys, it is important to consider the threat
/// model and in some cases the memorability of the password. This is out of
/// scope of the Rand project, and therefore we defer to the following
/// references:
///
/// - [Wikipedia article on Password Strength](https://en.wikipedia.org/wiki/Password_strength)
/// - [Diceware for generating memorable passwords](https://en.wikipedia.org/wiki/Diceware)
#[derive(Debug, Clone, Copy, Default)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct Alphanumeric;
/// Sample a [`u8`], uniformly distributed over letters:
/// a-z and A-Z.
///
/// # Example
///
/// You're able to generate random Alphabetic characters via mapping or via the
/// [`SampleString::sample_string`] method like so:
///
/// ```
/// use rand::Rng;
/// use rand::distr::{Alphabetic, SampleString};
///
/// // Manual mapping
/// let mut rng = rand::rng();
/// let chars: String = (0..7).map(|_| rng.sample(Alphabetic) as char).collect();
/// println!("Random chars: {}", chars);
///
/// // Using [`SampleString::sample_string`]
/// let string = Alphabetic.sample_string(&mut rand::rng(), 16);
/// println!("Random string: {}", string);
/// ```
///
/// # Passwords
///
/// Refer to [`Alphanumeric#Passwords`].
#[derive(Debug, Clone, Copy, Default)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct Alphabetic;
// ----- Implementations of distributions -----
impl Distribution<char> for StandardUniform {
#[inline]
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> char {
// A valid `char` is either in the interval `[0, 0xD800)` or
// `(0xDFFF, 0x11_0000)`. All `char`s must therefore be in
// `[0, 0x11_0000)` but not in the "gap" `[0xD800, 0xDFFF]` which is
// reserved for surrogates. This is the size of that gap.
const GAP_SIZE: u32 = 0xDFFF - 0xD800 + 1;
// Uniform::new(0, 0x11_0000 - GAP_SIZE) can also be used, but it
// seemed slower.
let range = Uniform::new(GAP_SIZE, 0x11_0000).unwrap();
let mut n = range.sample(rng);
if n <= 0xDFFF {
n -= GAP_SIZE;
}
// SAFETY: We ensure above that `n` represents a `char`.
unsafe { char::from_u32_unchecked(n) }
}
}
#[cfg(feature = "alloc")]
impl SampleString for StandardUniform {
fn append_string<R: Rng + ?Sized>(&self, rng: &mut R, s: &mut String, len: usize) {
// A char is encoded with at most four bytes, thus this reservation is
// guaranteed to be sufficient. We do not shrink_to_fit afterwards so
// that repeated usage on the same `String` buffer does not reallocate.
s.reserve(4 * len);
s.extend(Distribution::<char>::sample_iter(self, rng).take(len));
}
}
impl Distribution<u8> for Alphanumeric {
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> u8 {
const RANGE: u32 = 26 + 26 + 10;
const GEN_ASCII_STR_CHARSET: &[u8] = b"ABCDEFGHIJKLMNOPQRSTUVWXYZ\
abcdefghijklmnopqrstuvwxyz\
0123456789";
// We can pick from 62 characters. This is so close to a power of 2, 64,
// that we can do better than `Uniform`. Use a simple bitshift and
// rejection sampling. We do not use a bitmask, because for small RNGs
// the most significant bits are usually of higher quality.
loop {
let var = rng.next_u32() >> (32 - 6);
if var < RANGE {
return GEN_ASCII_STR_CHARSET[var as usize];
}
}
}
}
impl Distribution<u8> for Alphabetic {
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> u8 {
const RANGE: u8 = 26 + 26;
let offset = rng.random_range(0..RANGE) + b'A';
// Account for upper-cases
offset + (offset > b'Z') as u8 * (b'a' - b'Z' - 1)
}
}
#[cfg(feature = "alloc")]
impl SampleString for Alphanumeric {
fn append_string<R: Rng + ?Sized>(&self, rng: &mut R, string: &mut String, len: usize) {
// SAFETY: `self` only samples alphanumeric characters, which are valid UTF-8.
unsafe {
let v = string.as_mut_vec();
v.extend(
self.sample_iter(rng)
.take(len)
.inspect(|b| debug_assert!(b.is_ascii_alphanumeric())),
);
}
}
}
#[cfg(feature = "alloc")]
impl SampleString for Alphabetic {
fn append_string<R: Rng + ?Sized>(&self, rng: &mut R, string: &mut String, len: usize) {
// SAFETY: With this distribution we guarantee that we're working with valid ASCII
// characters.
// See [#1590](https://github.com/rust-random/rand/issues/1590).
unsafe {
let v = string.as_mut_vec();
v.reserve_exact(len);
v.extend(self.sample_iter(rng).take(len));
}
}
}
impl Distribution<bool> for StandardUniform {
#[inline]
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> bool {
// We can compare against an arbitrary bit of an u32 to get a bool.
// Because the least significant bits of a lower quality RNG can have
// simple patterns, we compare against the most significant bit. This is
// easiest done using a sign test.
(rng.next_u32() as i32) < 0
}
}
/// Note that on some hardware like x86/64 mask operations like [`_mm_blendv_epi8`]
/// only care about a single bit. This means that you could use uniform random bits
/// directly:
///
/// ```ignore
/// // this may be faster...
/// let x = unsafe { _mm_blendv_epi8(a.into(), b.into(), rng.random::<__m128i>()) };
///
/// // ...than this
/// let x = rng.random::<mask8x16>().select(b, a);
/// ```
///
/// Since most bits are unused you could also generate only as many bits as you need, i.e.:
/// ```
/// #![feature(portable_simd)]
/// use std::simd::prelude::*;
/// use rand::prelude::*;
/// let mut rng = rand::rng();
///
/// let x = u16x8::splat(rng.random::<u8>() as u16);
/// let mask = u16x8::splat(1) << u16x8::from([0, 1, 2, 3, 4, 5, 6, 7]);
/// let rand_mask = (x & mask).simd_eq(mask);
/// ```
///
/// [`_mm_blendv_epi8`]: https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_blendv_epi8&ig_expand=514/
/// [`simd_support`]: https://github.com/rust-random/rand#crate-features
#[cfg(feature = "simd_support")]
impl<T, const LANES: usize> Distribution<Mask<T, LANES>> for StandardUniform
where
T: MaskElement + Default,
LaneCount<LANES>: SupportedLaneCount,
StandardUniform: Distribution<Simd<T, LANES>>,
Simd<T, LANES>: SimdPartialOrd<Mask = Mask<T, LANES>>,
{
#[inline]
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Mask<T, LANES> {
// `MaskElement` must be a signed integer, so this is equivalent
// to the scalar `i32 < 0` method
let var = rng.random::<Simd<T, LANES>>();
var.simd_lt(Simd::default())
}
}
/// Implement `Distribution<(A, B, C, ...)> for StandardUniform`, using the list of
/// identifiers
macro_rules! tuple_impl {
($($tyvar:ident)*) => {
impl< $($tyvar,)* > Distribution<($($tyvar,)*)> for StandardUniform
where $(
StandardUniform: Distribution< $tyvar >,
)*
{
#[inline]
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> ( $($tyvar,)* ) {
let out = ($(
// use the $tyvar's to get the appropriate number of
// repeats (they're not actually needed)
rng.random::<$tyvar>()
,)*);
// Suppress the unused variable warning for empty tuple
let _rng = rng;
out
}
}
}
}
/// Looping wrapper for `tuple_impl`. Given (A, B, C), it also generates
/// implementations for (A, B) and (A,)
macro_rules! tuple_impls {
($($tyvar:ident)*) => {tuple_impls!{[] $($tyvar)*}};
([$($prefix:ident)*] $head:ident $($tail:ident)*) => {
tuple_impl!{$($prefix)*}
tuple_impls!{[$($prefix)* $head] $($tail)*}
};
([$($prefix:ident)*]) => {
tuple_impl!{$($prefix)*}
};
}
tuple_impls! {A B C D E F G H I J K L}
impl<T, const N: usize> Distribution<[T; N]> for StandardUniform
where
StandardUniform: Distribution<T>,
{
#[inline]
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> [T; N] {
array::from_fn(|_| rng.random())
}
}
impl<T> Distribution<Wrapping<T>> for StandardUniform
where
StandardUniform: Distribution<T>,
{
#[inline]
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Wrapping<T> {
Wrapping(rng.random())
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::RngCore;
#[test]
fn test_misc() {
let rng: &mut dyn RngCore = &mut crate::test::rng(820);
rng.sample::<char, _>(StandardUniform);
rng.sample::<bool, _>(StandardUniform);
}
#[cfg(feature = "alloc")]
#[test]
fn test_chars() {
use core::iter;
let mut rng = crate::test::rng(805);
// Test by generating a relatively large number of chars, so we also
// take the rejection sampling path.
let word: String = iter::repeat(())
.map(|()| rng.random::<char>())
.take(1000)
.collect();
assert!(!word.is_empty());
}
#[test]
fn test_alphanumeric() {
let mut rng = crate::test::rng(806);
// Test by generating a relatively large number of chars, so we also
// take the rejection sampling path.
let mut incorrect = false;
for _ in 0..100 {
let c: char = rng.sample(Alphanumeric).into();
incorrect |= !c.is_ascii_alphanumeric();
}
assert!(!incorrect);
}
#[test]
fn test_alphabetic() {
let mut rng = crate::test::rng(806);
// Test by generating a relatively large number of chars, so we also
// take the rejection sampling path.
let mut incorrect = false;
for _ in 0..100 {
let c: char = rng.sample(Alphabetic).into();
incorrect |= !c.is_ascii_alphabetic();
}
assert!(!incorrect);
}
#[test]
fn value_stability() {
fn test_samples<T: Copy + core::fmt::Debug + PartialEq, D: Distribution<T>>(
distr: &D,
zero: T,
expected: &[T],
) {
let mut rng = crate::test::rng(807);
let mut buf = [zero; 5];
for x in &mut buf {
*x = rng.sample(distr);
}
assert_eq!(&buf, expected);
}
test_samples(
&StandardUniform,
'a',
&[
'\u{8cdac}',
'\u{a346a}',
'\u{80120}',
'\u{ed692}',
'\u{35888}',
],
);
test_samples(&Alphanumeric, 0, &[104, 109, 101, 51, 77]);
test_samples(&Alphabetic, 0, &[97, 102, 89, 116, 75]);
test_samples(&StandardUniform, false, &[true, true, false, true, false]);
test_samples(
&StandardUniform,
Wrapping(0i32),
&[
Wrapping(-2074640887),
Wrapping(-1719949321),
Wrapping(2018088303),
Wrapping(-547181756),
Wrapping(838957336),
],
);
// We test only sub-sets of tuple and array impls
test_samples(&StandardUniform, (), &[(), (), (), (), ()]);
test_samples(
&StandardUniform,
(false,),
&[(true,), (true,), (false,), (true,), (false,)],
);
test_samples(
&StandardUniform,
(false, false),
&[
(true, true),
(false, true),
(false, false),
(true, false),
(false, false),
],
);
test_samples(&StandardUniform, [0u8; 0], &[[], [], [], [], []]);
test_samples(
&StandardUniform,
[0u8; 3],
&[
[9, 247, 111],
[68, 24, 13],
[174, 19, 194],
[172, 69, 213],
[149, 207, 29],
],
);
}
}

167
vendor/rand/src/distr/slice.rs vendored Normal file
View File

@@ -0,0 +1,167 @@
// Copyright 2021 Developers of the Rand project.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Distributions over slices
use core::num::NonZeroUsize;
use crate::distr::uniform::{UniformSampler, UniformUsize};
use crate::distr::Distribution;
#[cfg(feature = "alloc")]
use alloc::string::String;
/// A distribution to uniformly sample elements of a slice
///
/// Like [`IndexedRandom::choose`], this uniformly samples elements of a slice
/// without modification of the slice (so called "sampling with replacement").
/// This distribution object may be a little faster for repeated sampling (but
/// slower for small numbers of samples).
///
/// ## Examples
///
/// Since this is a distribution, [`Rng::sample_iter`] and
/// [`Distribution::sample_iter`] may be used, for example:
/// ```
/// use rand::distr::{Distribution, slice::Choose};
///
/// let vowels = ['a', 'e', 'i', 'o', 'u'];
/// let vowels_dist = Choose::new(&vowels).unwrap();
///
/// // build a string of 10 vowels
/// let vowel_string: String = vowels_dist
/// .sample_iter(&mut rand::rng())
/// .take(10)
/// .collect();
///
/// println!("{}", vowel_string);
/// assert_eq!(vowel_string.len(), 10);
/// assert!(vowel_string.chars().all(|c| vowels.contains(&c)));
/// ```
///
/// For a single sample, [`IndexedRandom::choose`] may be preferred:
/// ```
/// use rand::seq::IndexedRandom;
///
/// let vowels = ['a', 'e', 'i', 'o', 'u'];
/// let mut rng = rand::rng();
///
/// println!("{}", vowels.choose(&mut rng).unwrap());
/// ```
///
/// [`IndexedRandom::choose`]: crate::seq::IndexedRandom::choose
/// [`Rng::sample_iter`]: crate::Rng::sample_iter
#[derive(Debug, Clone, Copy)]
pub struct Choose<'a, T> {
slice: &'a [T],
range: UniformUsize,
num_choices: NonZeroUsize,
}
impl<'a, T> Choose<'a, T> {
/// Create a new `Choose` instance which samples uniformly from the slice.
///
/// Returns error [`Empty`] if the slice is empty.
pub fn new(slice: &'a [T]) -> Result<Self, Empty> {
let num_choices = NonZeroUsize::new(slice.len()).ok_or(Empty)?;
Ok(Self {
slice,
range: UniformUsize::new(0, num_choices.get()).unwrap(),
num_choices,
})
}
/// Returns the count of choices in this distribution
pub fn num_choices(&self) -> NonZeroUsize {
self.num_choices
}
}
impl<'a, T> Distribution<&'a T> for Choose<'a, T> {
fn sample<R: crate::Rng + ?Sized>(&self, rng: &mut R) -> &'a T {
let idx = self.range.sample(rng);
debug_assert!(
idx < self.slice.len(),
"Uniform::new(0, {}) somehow returned {}",
self.slice.len(),
idx
);
// Safety: at construction time, it was ensured that the slice was
// non-empty, and that the `Uniform` range produces values in range
// for the slice
unsafe { self.slice.get_unchecked(idx) }
}
}
/// Error: empty slice
///
/// This error is returned when [`Choose::new`] is given an empty slice.
#[derive(Debug, Clone, Copy)]
pub struct Empty;
impl core::fmt::Display for Empty {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
write!(
f,
"Tried to create a `rand::distr::slice::Choose` with an empty slice"
)
}
}
#[cfg(feature = "std")]
impl std::error::Error for Empty {}
#[cfg(feature = "alloc")]
impl super::SampleString for Choose<'_, char> {
fn append_string<R: crate::Rng + ?Sized>(&self, rng: &mut R, string: &mut String, len: usize) {
// Get the max char length to minimize extra space.
// Limit this check to avoid searching for long slice.
let max_char_len = if self.slice.len() < 200 {
self.slice
.iter()
.try_fold(1, |max_len, char| {
// When the current max_len is 4, the result max_char_len will be 4.
Some(max_len.max(char.len_utf8())).filter(|len| *len < 4)
})
.unwrap_or(4)
} else {
4
};
// Split the extension of string to reuse the unused capacities.
// Skip the split for small length or only ascii slice.
let mut extend_len = if max_char_len == 1 || len < 100 {
len
} else {
len / 4
};
let mut remain_len = len;
while extend_len > 0 {
string.reserve(max_char_len * extend_len);
string.extend(self.sample_iter(&mut *rng).take(extend_len));
remain_len -= extend_len;
extend_len = extend_len.min(remain_len);
}
}
}
#[cfg(test)]
mod test {
use super::*;
use core::iter;
#[test]
fn value_stability() {
let rng = crate::test::rng(651);
let slice = Choose::new(b"escaped emus explore extensively").unwrap();
let expected = b"eaxee";
assert!(iter::zip(slice.sample_iter(rng), expected).all(|(a, b)| a == b));
}
}

622
vendor/rand/src/distr/uniform.rs vendored Normal file
View File

@@ -0,0 +1,622 @@
// Copyright 2018-2020 Developers of the Rand project.
// Copyright 2017 The Rust Project Developers.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! A distribution uniformly sampling numbers within a given range.
//!
//! [`Uniform`] is the standard distribution to sample uniformly from a range;
//! e.g. `Uniform::new_inclusive(1, 6).unwrap()` can sample integers from 1 to 6, like a
//! standard die. [`Rng::random_range`] is implemented over [`Uniform`].
//!
//! # Example usage
//!
//! ```
//! use rand::Rng;
//! use rand::distr::Uniform;
//!
//! let mut rng = rand::rng();
//! let side = Uniform::new(-10.0, 10.0).unwrap();
//!
//! // sample between 1 and 10 points
//! for _ in 0..rng.random_range(1..=10) {
//! // sample a point from the square with sides -10 - 10 in two dimensions
//! let (x, y) = (rng.sample(side), rng.sample(side));
//! println!("Point: {}, {}", x, y);
//! }
//! ```
//!
//! # Extending `Uniform` to support a custom type
//!
//! To extend [`Uniform`] to support your own types, write a back-end which
//! implements the [`UniformSampler`] trait, then implement the [`SampleUniform`]
//! helper trait to "register" your back-end. See the `MyF32` example below.
//!
//! At a minimum, the back-end needs to store any parameters needed for sampling
//! (e.g. the target range) and implement `new`, `new_inclusive` and `sample`.
//! Those methods should include an assertion to check the range is valid (i.e.
//! `low < high`). The example below merely wraps another back-end.
//!
//! The `new`, `new_inclusive`, `sample_single` and `sample_single_inclusive`
//! functions use arguments of
//! type `SampleBorrow<X>` to support passing in values by reference or
//! by value. In the implementation of these functions, you can choose to
//! simply use the reference returned by [`SampleBorrow::borrow`], or you can choose
//! to copy or clone the value, whatever is appropriate for your type.
//!
//! ```
//! use rand::prelude::*;
//! use rand::distr::uniform::{Uniform, SampleUniform,
//! UniformSampler, UniformFloat, SampleBorrow, Error};
//!
//! struct MyF32(f32);
//!
//! #[derive(Clone, Copy, Debug)]
//! struct UniformMyF32(UniformFloat<f32>);
//!
//! impl UniformSampler for UniformMyF32 {
//! type X = MyF32;
//!
//! fn new<B1, B2>(low: B1, high: B2) -> Result<Self, Error>
//! where B1: SampleBorrow<Self::X> + Sized,
//! B2: SampleBorrow<Self::X> + Sized
//! {
//! UniformFloat::<f32>::new(low.borrow().0, high.borrow().0).map(UniformMyF32)
//! }
//! fn new_inclusive<B1, B2>(low: B1, high: B2) -> Result<Self, Error>
//! where B1: SampleBorrow<Self::X> + Sized,
//! B2: SampleBorrow<Self::X> + Sized
//! {
//! UniformFloat::<f32>::new_inclusive(low.borrow().0, high.borrow().0).map(UniformMyF32)
//! }
//! fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Self::X {
//! MyF32(self.0.sample(rng))
//! }
//! }
//!
//! impl SampleUniform for MyF32 {
//! type Sampler = UniformMyF32;
//! }
//!
//! let (low, high) = (MyF32(17.0f32), MyF32(22.0f32));
//! let uniform = Uniform::new(low, high).unwrap();
//! let x = uniform.sample(&mut rand::rng());
//! ```
//!
//! [`SampleUniform`]: crate::distr::uniform::SampleUniform
//! [`UniformSampler`]: crate::distr::uniform::UniformSampler
//! [`UniformInt`]: crate::distr::uniform::UniformInt
//! [`UniformFloat`]: crate::distr::uniform::UniformFloat
//! [`UniformDuration`]: crate::distr::uniform::UniformDuration
//! [`SampleBorrow::borrow`]: crate::distr::uniform::SampleBorrow::borrow
#[path = "uniform_float.rs"]
mod float;
#[doc(inline)]
pub use float::UniformFloat;
#[path = "uniform_int.rs"]
mod int;
#[doc(inline)]
pub use int::{UniformInt, UniformUsize};
#[path = "uniform_other.rs"]
mod other;
#[doc(inline)]
pub use other::{UniformChar, UniformDuration};
use core::fmt;
use core::ops::{Range, RangeInclusive, RangeTo, RangeToInclusive};
use crate::distr::Distribution;
use crate::{Rng, RngCore};
/// Error type returned from [`Uniform::new`] and `new_inclusive`.
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum Error {
/// `low > high`, or equal in case of exclusive range.
EmptyRange,
/// Input or range `high - low` is non-finite. Not relevant to integer types.
NonFinite,
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str(match self {
Error::EmptyRange => "low > high (or equal if exclusive) in uniform distribution",
Error::NonFinite => "Non-finite range in uniform distribution",
})
}
}
#[cfg(feature = "std")]
impl std::error::Error for Error {}
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
/// Sample values uniformly between two bounds.
///
/// # Construction
///
/// [`Uniform::new`] and [`Uniform::new_inclusive`] construct a uniform
/// distribution sampling from the given `low` and `high` limits. `Uniform` may
/// also be constructed via [`TryFrom`] as in `Uniform::try_from(1..=6).unwrap()`.
///
/// Constructors may do extra work up front to allow faster sampling of multiple
/// values. Where only a single sample is required it is suggested to use
/// [`Rng::random_range`] or one of the `sample_single` methods instead.
///
/// When sampling from a constant range, many calculations can happen at
/// compile-time and all methods should be fast; for floating-point ranges and
/// the full range of integer types, this should have comparable performance to
/// the [`StandardUniform`](super::StandardUniform) distribution.
///
/// # Provided implementations
///
/// - `char` ([`UniformChar`]): samples a range over the implementation for `u32`
/// - `f32`, `f64` ([`UniformFloat`]): samples approximately uniformly within a
/// range; bias may be present in the least-significant bit of the significand
/// and the limits of the input range may be sampled even when an open
/// (exclusive) range is used
/// - Integer types ([`UniformInt`]) may show a small bias relative to the
/// expected uniform distribution of output. In the worst case, bias affects
/// 1 in `2^n` samples where n is 56 (`i8` and `u8`), 48 (`i16` and `u16`), 96
/// (`i32` and `u32`), 64 (`i64` and `u64`), 128 (`i128` and `u128`).
/// The `unbiased` feature flag fixes this bias.
/// - `usize` ([`UniformUsize`]) is handled specially, using the `u32`
/// implementation where possible to enable portable results across 32-bit and
/// 64-bit CPU architectures.
/// - `Duration` ([`UniformDuration`]): samples a range over the implementation
/// for `u32` or `u64`
/// - SIMD types (requires [`simd_support`] feature) like x86's [`__m128i`]
/// and `std::simd`'s [`u32x4`], [`f32x4`] and [`mask32x4`] types are
/// effectively arrays of integer or floating-point types. Each lane is
/// sampled independently from its own range, potentially with more efficient
/// random-bit-usage than would be achieved with sequential sampling.
///
/// # Example
///
/// ```
/// use rand::distr::{Distribution, Uniform};
///
/// let between = Uniform::try_from(10..10000).unwrap();
/// let mut rng = rand::rng();
/// let mut sum = 0;
/// for _ in 0..1000 {
/// sum += between.sample(&mut rng);
/// }
/// println!("{}", sum);
/// ```
///
/// For a single sample, [`Rng::random_range`] may be preferred:
///
/// ```
/// use rand::Rng;
///
/// let mut rng = rand::rng();
/// println!("{}", rng.random_range(0..10));
/// ```
///
/// [`new`]: Uniform::new
/// [`new_inclusive`]: Uniform::new_inclusive
/// [`Rng::random_range`]: Rng::random_range
/// [`__m128i`]: https://doc.rust-lang.org/core/arch/x86/struct.__m128i.html
/// [`u32x4`]: std::simd::u32x4
/// [`f32x4`]: std::simd::f32x4
/// [`mask32x4`]: std::simd::mask32x4
/// [`simd_support`]: https://github.com/rust-random/rand#crate-features
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "serde", serde(bound(serialize = "X::Sampler: Serialize")))]
#[cfg_attr(
feature = "serde",
serde(bound(deserialize = "X::Sampler: Deserialize<'de>"))
)]
pub struct Uniform<X: SampleUniform>(X::Sampler);
impl<X: SampleUniform> Uniform<X> {
/// Create a new `Uniform` instance, which samples uniformly from the half
/// open range `[low, high)` (excluding `high`).
///
/// For discrete types (e.g. integers), samples will always be strictly less
/// than `high`. For (approximations of) continuous types (e.g. `f32`, `f64`),
/// samples may equal `high` due to loss of precision but may not be
/// greater than `high`.
///
/// Fails if `low >= high`, or if `low`, `high` or the range `high - low` is
/// non-finite. In release mode, only the range is checked.
pub fn new<B1, B2>(low: B1, high: B2) -> Result<Uniform<X>, Error>
where
B1: SampleBorrow<X> + Sized,
B2: SampleBorrow<X> + Sized,
{
X::Sampler::new(low, high).map(Uniform)
}
/// Create a new `Uniform` instance, which samples uniformly from the closed
/// range `[low, high]` (inclusive).
///
/// Fails if `low > high`, or if `low`, `high` or the range `high - low` is
/// non-finite. In release mode, only the range is checked.
pub fn new_inclusive<B1, B2>(low: B1, high: B2) -> Result<Uniform<X>, Error>
where
B1: SampleBorrow<X> + Sized,
B2: SampleBorrow<X> + Sized,
{
X::Sampler::new_inclusive(low, high).map(Uniform)
}
}
impl<X: SampleUniform> Distribution<X> for Uniform<X> {
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> X {
self.0.sample(rng)
}
}
/// Helper trait for creating objects using the correct implementation of
/// [`UniformSampler`] for the sampling type.
///
/// See the [module documentation] on how to implement [`Uniform`] range
/// sampling for a custom type.
///
/// [module documentation]: crate::distr::uniform
pub trait SampleUniform: Sized {
/// The `UniformSampler` implementation supporting type `X`.
type Sampler: UniformSampler<X = Self>;
}
/// Helper trait handling actual uniform sampling.
///
/// See the [module documentation] on how to implement [`Uniform`] range
/// sampling for a custom type.
///
/// Implementation of [`sample_single`] is optional, and is only useful when
/// the implementation can be faster than `Self::new(low, high).sample(rng)`.
///
/// [module documentation]: crate::distr::uniform
/// [`sample_single`]: UniformSampler::sample_single
pub trait UniformSampler: Sized {
/// The type sampled by this implementation.
type X;
/// Construct self, with inclusive lower bound and exclusive upper bound `[low, high)`.
///
/// For discrete types (e.g. integers), samples will always be strictly less
/// than `high`. For (approximations of) continuous types (e.g. `f32`, `f64`),
/// samples may equal `high` due to loss of precision but may not be
/// greater than `high`.
///
/// Usually users should not call this directly but prefer to use
/// [`Uniform::new`].
fn new<B1, B2>(low: B1, high: B2) -> Result<Self, Error>
where
B1: SampleBorrow<Self::X> + Sized,
B2: SampleBorrow<Self::X> + Sized;
/// Construct self, with inclusive bounds `[low, high]`.
///
/// Usually users should not call this directly but prefer to use
/// [`Uniform::new_inclusive`].
fn new_inclusive<B1, B2>(low: B1, high: B2) -> Result<Self, Error>
where
B1: SampleBorrow<Self::X> + Sized,
B2: SampleBorrow<Self::X> + Sized;
/// Sample a value.
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Self::X;
/// Sample a single value uniformly from a range with inclusive lower bound
/// and exclusive upper bound `[low, high)`.
///
/// For discrete types (e.g. integers), samples will always be strictly less
/// than `high`. For (approximations of) continuous types (e.g. `f32`, `f64`),
/// samples may equal `high` due to loss of precision but may not be
/// greater than `high`.
///
/// By default this is implemented using
/// `UniformSampler::new(low, high).sample(rng)`. However, for some types
/// more optimal implementations for single usage may be provided via this
/// method (which is the case for integers and floats).
/// Results may not be identical.
///
/// Note that to use this method in a generic context, the type needs to be
/// retrieved via `SampleUniform::Sampler` as follows:
/// ```
/// use rand::distr::uniform::{SampleUniform, UniformSampler};
/// # #[allow(unused)]
/// fn sample_from_range<T: SampleUniform>(lb: T, ub: T) -> T {
/// let mut rng = rand::rng();
/// <T as SampleUniform>::Sampler::sample_single(lb, ub, &mut rng).unwrap()
/// }
/// ```
fn sample_single<R: Rng + ?Sized, B1, B2>(
low: B1,
high: B2,
rng: &mut R,
) -> Result<Self::X, Error>
where
B1: SampleBorrow<Self::X> + Sized,
B2: SampleBorrow<Self::X> + Sized,
{
let uniform: Self = UniformSampler::new(low, high)?;
Ok(uniform.sample(rng))
}
/// Sample a single value uniformly from a range with inclusive lower bound
/// and inclusive upper bound `[low, high]`.
///
/// By default this is implemented using
/// `UniformSampler::new_inclusive(low, high).sample(rng)`. However, for
/// some types more optimal implementations for single usage may be provided
/// via this method.
/// Results may not be identical.
fn sample_single_inclusive<R: Rng + ?Sized, B1, B2>(
low: B1,
high: B2,
rng: &mut R,
) -> Result<Self::X, Error>
where
B1: SampleBorrow<Self::X> + Sized,
B2: SampleBorrow<Self::X> + Sized,
{
let uniform: Self = UniformSampler::new_inclusive(low, high)?;
Ok(uniform.sample(rng))
}
}
impl<X: SampleUniform> TryFrom<Range<X>> for Uniform<X> {
type Error = Error;
fn try_from(r: Range<X>) -> Result<Uniform<X>, Error> {
Uniform::new(r.start, r.end)
}
}
impl<X: SampleUniform> TryFrom<RangeInclusive<X>> for Uniform<X> {
type Error = Error;
fn try_from(r: ::core::ops::RangeInclusive<X>) -> Result<Uniform<X>, Error> {
Uniform::new_inclusive(r.start(), r.end())
}
}
/// Helper trait similar to [`Borrow`] but implemented
/// only for [`SampleUniform`] and references to [`SampleUniform`]
/// in order to resolve ambiguity issues.
///
/// [`Borrow`]: std::borrow::Borrow
pub trait SampleBorrow<Borrowed> {
/// Immutably borrows from an owned value. See [`Borrow::borrow`]
///
/// [`Borrow::borrow`]: std::borrow::Borrow::borrow
fn borrow(&self) -> &Borrowed;
}
impl<Borrowed> SampleBorrow<Borrowed> for Borrowed
where
Borrowed: SampleUniform,
{
#[inline(always)]
fn borrow(&self) -> &Borrowed {
self
}
}
impl<Borrowed> SampleBorrow<Borrowed> for &Borrowed
where
Borrowed: SampleUniform,
{
#[inline(always)]
fn borrow(&self) -> &Borrowed {
self
}
}
/// Range that supports generating a single sample efficiently.
///
/// Any type implementing this trait can be used to specify the sampled range
/// for `Rng::random_range`.
pub trait SampleRange<T> {
/// Generate a sample from the given range.
fn sample_single<R: RngCore + ?Sized>(self, rng: &mut R) -> Result<T, Error>;
/// Check whether the range is empty.
fn is_empty(&self) -> bool;
}
impl<T: SampleUniform + PartialOrd> SampleRange<T> for Range<T> {
#[inline]
fn sample_single<R: RngCore + ?Sized>(self, rng: &mut R) -> Result<T, Error> {
T::Sampler::sample_single(self.start, self.end, rng)
}
#[inline]
fn is_empty(&self) -> bool {
!(self.start < self.end)
}
}
impl<T: SampleUniform + PartialOrd> SampleRange<T> for RangeInclusive<T> {
#[inline]
fn sample_single<R: RngCore + ?Sized>(self, rng: &mut R) -> Result<T, Error> {
T::Sampler::sample_single_inclusive(self.start(), self.end(), rng)
}
#[inline]
fn is_empty(&self) -> bool {
!(self.start() <= self.end())
}
}
macro_rules! impl_sample_range_u {
($t:ty) => {
impl SampleRange<$t> for RangeTo<$t> {
#[inline]
fn sample_single<R: RngCore + ?Sized>(self, rng: &mut R) -> Result<$t, Error> {
<$t as SampleUniform>::Sampler::sample_single(0, self.end, rng)
}
#[inline]
fn is_empty(&self) -> bool {
0 == self.end
}
}
impl SampleRange<$t> for RangeToInclusive<$t> {
#[inline]
fn sample_single<R: RngCore + ?Sized>(self, rng: &mut R) -> Result<$t, Error> {
<$t as SampleUniform>::Sampler::sample_single_inclusive(0, self.end, rng)
}
#[inline]
fn is_empty(&self) -> bool {
false
}
}
};
}
impl_sample_range_u!(u8);
impl_sample_range_u!(u16);
impl_sample_range_u!(u32);
impl_sample_range_u!(u64);
impl_sample_range_u!(u128);
impl_sample_range_u!(usize);
#[cfg(test)]
mod tests {
use super::*;
use core::time::Duration;
#[test]
#[cfg(feature = "serde")]
fn test_uniform_serialization() {
let unit_box: Uniform<i32> = Uniform::new(-1, 1).unwrap();
let de_unit_box: Uniform<i32> =
bincode::deserialize(&bincode::serialize(&unit_box).unwrap()).unwrap();
assert_eq!(unit_box.0, de_unit_box.0);
let unit_box: Uniform<f32> = Uniform::new(-1., 1.).unwrap();
let de_unit_box: Uniform<f32> =
bincode::deserialize(&bincode::serialize(&unit_box).unwrap()).unwrap();
assert_eq!(unit_box.0, de_unit_box.0);
}
#[test]
fn test_custom_uniform() {
use crate::distr::uniform::{SampleBorrow, SampleUniform, UniformFloat, UniformSampler};
#[derive(Clone, Copy, PartialEq, PartialOrd)]
struct MyF32 {
x: f32,
}
#[derive(Clone, Copy, Debug)]
struct UniformMyF32(UniformFloat<f32>);
impl UniformSampler for UniformMyF32 {
type X = MyF32;
fn new<B1, B2>(low: B1, high: B2) -> Result<Self, Error>
where
B1: SampleBorrow<Self::X> + Sized,
B2: SampleBorrow<Self::X> + Sized,
{
UniformFloat::<f32>::new(low.borrow().x, high.borrow().x).map(UniformMyF32)
}
fn new_inclusive<B1, B2>(low: B1, high: B2) -> Result<Self, Error>
where
B1: SampleBorrow<Self::X> + Sized,
B2: SampleBorrow<Self::X> + Sized,
{
UniformSampler::new(low, high)
}
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Self::X {
MyF32 {
x: self.0.sample(rng),
}
}
}
impl SampleUniform for MyF32 {
type Sampler = UniformMyF32;
}
let (low, high) = (MyF32 { x: 17.0f32 }, MyF32 { x: 22.0f32 });
let uniform = Uniform::new(low, high).unwrap();
let mut rng = crate::test::rng(804);
for _ in 0..100 {
let x: MyF32 = rng.sample(uniform);
assert!(low <= x && x < high);
}
}
#[test]
fn value_stability() {
fn test_samples<T: SampleUniform + Copy + fmt::Debug + PartialEq>(
lb: T,
ub: T,
expected_single: &[T],
expected_multiple: &[T],
) where
Uniform<T>: Distribution<T>,
{
let mut rng = crate::test::rng(897);
let mut buf = [lb; 3];
for x in &mut buf {
*x = T::Sampler::sample_single(lb, ub, &mut rng).unwrap();
}
assert_eq!(&buf, expected_single);
let distr = Uniform::new(lb, ub).unwrap();
for x in &mut buf {
*x = rng.sample(&distr);
}
assert_eq!(&buf, expected_multiple);
}
test_samples(
0f32,
1e-2f32,
&[0.0003070104, 0.0026630748, 0.00979833],
&[0.008194133, 0.00398172, 0.007428536],
);
test_samples(
-1e10f64,
1e10f64,
&[-4673848682.871551, 6388267422.932352, 4857075081.198343],
&[1173375212.1808167, 1917642852.109581, 2365076174.3153973],
);
test_samples(
Duration::new(2, 0),
Duration::new(4, 0),
&[
Duration::new(2, 532615131),
Duration::new(3, 638826742),
Duration::new(3, 485707508),
],
&[
Duration::new(3, 117337521),
Duration::new(3, 191764285),
Duration::new(3, 236507617),
],
);
}
#[test]
fn uniform_distributions_can_be_compared() {
assert_eq!(
Uniform::new(1.0, 2.0).unwrap(),
Uniform::new(1.0, 2.0).unwrap()
);
// To cover UniformInt
assert_eq!(
Uniform::new(1_u32, 2_u32).unwrap(),
Uniform::new(1_u32, 2_u32).unwrap()
);
}
}

454
vendor/rand/src/distr/uniform_float.rs vendored Normal file
View File

@@ -0,0 +1,454 @@
// Copyright 2018-2020 Developers of the Rand project.
// Copyright 2017 The Rust Project Developers.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! `UniformFloat` implementation
use super::{Error, SampleBorrow, SampleUniform, UniformSampler};
use crate::distr::float::IntoFloat;
use crate::distr::utils::{BoolAsSIMD, FloatAsSIMD, FloatSIMDUtils, IntAsSIMD};
use crate::Rng;
#[cfg(feature = "simd_support")]
use core::simd::prelude::*;
// #[cfg(feature = "simd_support")]
// use core::simd::{LaneCount, SupportedLaneCount};
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
/// The back-end implementing [`UniformSampler`] for floating-point types.
///
/// Unless you are implementing [`UniformSampler`] for your own type, this type
/// should not be used directly, use [`Uniform`] instead.
///
/// # Implementation notes
///
/// `UniformFloat` implementations convert RNG output to a float in the range
/// `[1, 2)` via transmutation, map this to `[0, 1)`, then scale and translate
/// to the desired range. Values produced this way have what equals 23 bits of
/// random digits for an `f32` and 52 for an `f64`.
///
/// # Bias and range errors
///
/// Bias may be expected within the least-significant bit of the significand.
/// It is not guaranteed that exclusive limits of a range are respected; i.e.
/// when sampling the range `[a, b)` it is not guaranteed that `b` is never
/// sampled.
///
/// [`new`]: UniformSampler::new
/// [`new_inclusive`]: UniformSampler::new_inclusive
/// [`StandardUniform`]: crate::distr::StandardUniform
/// [`Uniform`]: super::Uniform
#[derive(Clone, Copy, Debug, PartialEq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct UniformFloat<X> {
low: X,
scale: X,
}
macro_rules! uniform_float_impl {
($($meta:meta)?, $ty:ty, $uty:ident, $f_scalar:ident, $u_scalar:ident, $bits_to_discard:expr) => {
$(#[cfg($meta)])?
impl UniformFloat<$ty> {
/// Construct, reducing `scale` as required to ensure that rounding
/// can never yield values greater than `high`.
///
/// Note: though it may be tempting to use a variant of this method
/// to ensure that samples from `[low, high)` are always strictly
/// less than `high`, this approach may be very slow where
/// `scale.abs()` is much smaller than `high.abs()`
/// (example: `low=0.99999999997819644, high=1.`).
fn new_bounded(low: $ty, high: $ty, mut scale: $ty) -> Self {
let max_rand = <$ty>::splat(1.0 as $f_scalar - $f_scalar::EPSILON);
loop {
let mask = (scale * max_rand + low).gt_mask(high);
if !mask.any() {
break;
}
scale = scale.decrease_masked(mask);
}
debug_assert!(<$ty>::splat(0.0).all_le(scale));
UniformFloat { low, scale }
}
}
$(#[cfg($meta)])?
impl SampleUniform for $ty {
type Sampler = UniformFloat<$ty>;
}
$(#[cfg($meta)])?
impl UniformSampler for UniformFloat<$ty> {
type X = $ty;
fn new<B1, B2>(low_b: B1, high_b: B2) -> Result<Self, Error>
where
B1: SampleBorrow<Self::X> + Sized,
B2: SampleBorrow<Self::X> + Sized,
{
let low = *low_b.borrow();
let high = *high_b.borrow();
#[cfg(debug_assertions)]
if !(low.all_finite()) || !(high.all_finite()) {
return Err(Error::NonFinite);
}
if !(low.all_lt(high)) {
return Err(Error::EmptyRange);
}
let scale = high - low;
if !(scale.all_finite()) {
return Err(Error::NonFinite);
}
Ok(Self::new_bounded(low, high, scale))
}
fn new_inclusive<B1, B2>(low_b: B1, high_b: B2) -> Result<Self, Error>
where
B1: SampleBorrow<Self::X> + Sized,
B2: SampleBorrow<Self::X> + Sized,
{
let low = *low_b.borrow();
let high = *high_b.borrow();
#[cfg(debug_assertions)]
if !(low.all_finite()) || !(high.all_finite()) {
return Err(Error::NonFinite);
}
if !low.all_le(high) {
return Err(Error::EmptyRange);
}
let max_rand = <$ty>::splat(1.0 as $f_scalar - $f_scalar::EPSILON);
let scale = (high - low) / max_rand;
if !scale.all_finite() {
return Err(Error::NonFinite);
}
Ok(Self::new_bounded(low, high, scale))
}
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Self::X {
// Generate a value in the range [1, 2)
let value1_2 = (rng.random::<$uty>() >> $uty::splat($bits_to_discard)).into_float_with_exponent(0);
// Get a value in the range [0, 1) to avoid overflow when multiplying by scale
let value0_1 = value1_2 - <$ty>::splat(1.0);
// We don't use `f64::mul_add`, because it is not available with
// `no_std`. Furthermore, it is slower for some targets (but
// faster for others). However, the order of multiplication and
// addition is important, because on some platforms (e.g. ARM)
// it will be optimized to a single (non-FMA) instruction.
value0_1 * self.scale + self.low
}
#[inline]
fn sample_single<R: Rng + ?Sized, B1, B2>(low_b: B1, high_b: B2, rng: &mut R) -> Result<Self::X, Error>
where
B1: SampleBorrow<Self::X> + Sized,
B2: SampleBorrow<Self::X> + Sized,
{
Self::sample_single_inclusive(low_b, high_b, rng)
}
#[inline]
fn sample_single_inclusive<R: Rng + ?Sized, B1, B2>(low_b: B1, high_b: B2, rng: &mut R) -> Result<Self::X, Error>
where
B1: SampleBorrow<Self::X> + Sized,
B2: SampleBorrow<Self::X> + Sized,
{
let low = *low_b.borrow();
let high = *high_b.borrow();
#[cfg(debug_assertions)]
if !low.all_finite() || !high.all_finite() {
return Err(Error::NonFinite);
}
if !low.all_le(high) {
return Err(Error::EmptyRange);
}
let scale = high - low;
if !scale.all_finite() {
return Err(Error::NonFinite);
}
// Generate a value in the range [1, 2)
let value1_2 =
(rng.random::<$uty>() >> $uty::splat($bits_to_discard)).into_float_with_exponent(0);
// Get a value in the range [0, 1) to avoid overflow when multiplying by scale
let value0_1 = value1_2 - <$ty>::splat(1.0);
// Doing multiply before addition allows some architectures
// to use a single instruction.
Ok(value0_1 * scale + low)
}
}
};
}
uniform_float_impl! { , f32, u32, f32, u32, 32 - 23 }
uniform_float_impl! { , f64, u64, f64, u64, 64 - 52 }
#[cfg(feature = "simd_support")]
uniform_float_impl! { feature = "simd_support", f32x2, u32x2, f32, u32, 32 - 23 }
#[cfg(feature = "simd_support")]
uniform_float_impl! { feature = "simd_support", f32x4, u32x4, f32, u32, 32 - 23 }
#[cfg(feature = "simd_support")]
uniform_float_impl! { feature = "simd_support", f32x8, u32x8, f32, u32, 32 - 23 }
#[cfg(feature = "simd_support")]
uniform_float_impl! { feature = "simd_support", f32x16, u32x16, f32, u32, 32 - 23 }
#[cfg(feature = "simd_support")]
uniform_float_impl! { feature = "simd_support", f64x2, u64x2, f64, u64, 64 - 52 }
#[cfg(feature = "simd_support")]
uniform_float_impl! { feature = "simd_support", f64x4, u64x4, f64, u64, 64 - 52 }
#[cfg(feature = "simd_support")]
uniform_float_impl! { feature = "simd_support", f64x8, u64x8, f64, u64, 64 - 52 }
#[cfg(test)]
mod tests {
use super::*;
use crate::distr::{utils::FloatSIMDScalarUtils, Uniform};
use crate::test::{const_rng, step_rng};
#[test]
#[cfg_attr(miri, ignore)] // Miri is too slow
fn test_floats() {
let mut rng = crate::test::rng(252);
let mut zero_rng = const_rng(0);
let mut max_rng = const_rng(0xffff_ffff_ffff_ffff);
macro_rules! t {
($ty:ty, $f_scalar:ident, $bits_shifted:expr) => {{
let v: &[($f_scalar, $f_scalar)] = &[
(0.0, 100.0),
(-1e35, -1e25),
(1e-35, 1e-25),
(-1e35, 1e35),
(<$f_scalar>::from_bits(0), <$f_scalar>::from_bits(3)),
(-<$f_scalar>::from_bits(10), -<$f_scalar>::from_bits(1)),
(-<$f_scalar>::from_bits(5), 0.0),
(-<$f_scalar>::from_bits(7), -0.0),
(0.1 * $f_scalar::MAX, $f_scalar::MAX),
(-$f_scalar::MAX * 0.2, $f_scalar::MAX * 0.7),
];
for &(low_scalar, high_scalar) in v.iter() {
for lane in 0..<$ty>::LEN {
let low = <$ty>::splat(0.0 as $f_scalar).replace(lane, low_scalar);
let high = <$ty>::splat(1.0 as $f_scalar).replace(lane, high_scalar);
let my_uniform = Uniform::new(low, high).unwrap();
let my_incl_uniform = Uniform::new_inclusive(low, high).unwrap();
for _ in 0..100 {
let v = rng.sample(my_uniform).extract_lane(lane);
assert!(low_scalar <= v && v <= high_scalar);
let v = rng.sample(my_incl_uniform).extract_lane(lane);
assert!(low_scalar <= v && v <= high_scalar);
let v =
<$ty as SampleUniform>::Sampler::sample_single(low, high, &mut rng)
.unwrap()
.extract_lane(lane);
assert!(low_scalar <= v && v <= high_scalar);
let v = <$ty as SampleUniform>::Sampler::sample_single_inclusive(
low, high, &mut rng,
)
.unwrap()
.extract_lane(lane);
assert!(low_scalar <= v && v <= high_scalar);
}
assert_eq!(
rng.sample(Uniform::new_inclusive(low, low).unwrap())
.extract_lane(lane),
low_scalar
);
assert_eq!(zero_rng.sample(my_uniform).extract_lane(lane), low_scalar);
assert_eq!(
zero_rng.sample(my_incl_uniform).extract_lane(lane),
low_scalar
);
assert_eq!(
<$ty as SampleUniform>::Sampler::sample_single(
low,
high,
&mut zero_rng
)
.unwrap()
.extract_lane(lane),
low_scalar
);
assert_eq!(
<$ty as SampleUniform>::Sampler::sample_single_inclusive(
low,
high,
&mut zero_rng
)
.unwrap()
.extract_lane(lane),
low_scalar
);
assert!(max_rng.sample(my_uniform).extract_lane(lane) <= high_scalar);
assert!(max_rng.sample(my_incl_uniform).extract_lane(lane) <= high_scalar);
// sample_single cannot cope with max_rng:
// assert!(<$ty as SampleUniform>::Sampler
// ::sample_single(low, high, &mut max_rng).unwrap()
// .extract(lane) <= high_scalar);
assert!(
<$ty as SampleUniform>::Sampler::sample_single_inclusive(
low,
high,
&mut max_rng
)
.unwrap()
.extract_lane(lane)
<= high_scalar
);
// Don't run this test for really tiny differences between high and low
// since for those rounding might result in selecting high for a very
// long time.
if (high_scalar - low_scalar) > 0.0001 {
let mut lowering_max_rng =
step_rng(0xffff_ffff_ffff_ffff, (-1i64 << $bits_shifted) as u64);
assert!(
<$ty as SampleUniform>::Sampler::sample_single(
low,
high,
&mut lowering_max_rng
)
.unwrap()
.extract_lane(lane)
<= high_scalar
);
}
}
}
assert_eq!(
rng.sample(Uniform::new_inclusive($f_scalar::MAX, $f_scalar::MAX).unwrap()),
$f_scalar::MAX
);
assert_eq!(
rng.sample(Uniform::new_inclusive(-$f_scalar::MAX, -$f_scalar::MAX).unwrap()),
-$f_scalar::MAX
);
}};
}
t!(f32, f32, 32 - 23);
t!(f64, f64, 64 - 52);
#[cfg(feature = "simd_support")]
{
t!(f32x2, f32, 32 - 23);
t!(f32x4, f32, 32 - 23);
t!(f32x8, f32, 32 - 23);
t!(f32x16, f32, 32 - 23);
t!(f64x2, f64, 64 - 52);
t!(f64x4, f64, 64 - 52);
t!(f64x8, f64, 64 - 52);
}
}
#[test]
fn test_float_overflow() {
assert_eq!(Uniform::try_from(f64::MIN..f64::MAX), Err(Error::NonFinite));
}
#[test]
#[should_panic]
fn test_float_overflow_single() {
let mut rng = crate::test::rng(252);
rng.random_range(f64::MIN..f64::MAX);
}
#[test]
#[cfg(all(feature = "std", panic = "unwind"))]
fn test_float_assertions() {
use super::SampleUniform;
fn range<T: SampleUniform>(low: T, high: T) -> Result<T, Error> {
let mut rng = crate::test::rng(253);
T::Sampler::sample_single(low, high, &mut rng)
}
macro_rules! t {
($ty:ident, $f_scalar:ident) => {{
let v: &[($f_scalar, $f_scalar)] = &[
($f_scalar::NAN, 0.0),
(1.0, $f_scalar::NAN),
($f_scalar::NAN, $f_scalar::NAN),
(1.0, 0.5),
($f_scalar::MAX, -$f_scalar::MAX),
($f_scalar::INFINITY, $f_scalar::INFINITY),
($f_scalar::NEG_INFINITY, $f_scalar::NEG_INFINITY),
($f_scalar::NEG_INFINITY, 5.0),
(5.0, $f_scalar::INFINITY),
($f_scalar::NAN, $f_scalar::INFINITY),
($f_scalar::NEG_INFINITY, $f_scalar::NAN),
($f_scalar::NEG_INFINITY, $f_scalar::INFINITY),
];
for &(low_scalar, high_scalar) in v.iter() {
for lane in 0..<$ty>::LEN {
let low = <$ty>::splat(0.0 as $f_scalar).replace(lane, low_scalar);
let high = <$ty>::splat(1.0 as $f_scalar).replace(lane, high_scalar);
assert!(range(low, high).is_err());
assert!(Uniform::new(low, high).is_err());
assert!(Uniform::new_inclusive(low, high).is_err());
assert!(Uniform::new(low, low).is_err());
}
}
}};
}
t!(f32, f32);
t!(f64, f64);
#[cfg(feature = "simd_support")]
{
t!(f32x2, f32);
t!(f32x4, f32);
t!(f32x8, f32);
t!(f32x16, f32);
t!(f64x2, f64);
t!(f64x4, f64);
t!(f64x8, f64);
}
}
#[test]
fn test_uniform_from_std_range() {
let r = Uniform::try_from(2.0f64..7.0).unwrap();
assert_eq!(r.0.low, 2.0);
assert_eq!(r.0.scale, 5.0);
}
#[test]
fn test_uniform_from_std_range_bad_limits() {
#![allow(clippy::reversed_empty_ranges)]
assert!(Uniform::try_from(100.0..10.0).is_err());
assert!(Uniform::try_from(100.0..100.0).is_err());
}
#[test]
fn test_uniform_from_std_range_inclusive() {
let r = Uniform::try_from(2.0f64..=7.0).unwrap();
assert_eq!(r.0.low, 2.0);
assert!(r.0.scale > 5.0);
assert!(r.0.scale < 5.0 + 1e-14);
}
#[test]
fn test_uniform_from_std_range_inclusive_bad_limits() {
#![allow(clippy::reversed_empty_ranges)]
assert!(Uniform::try_from(100.0..=10.0).is_err());
assert!(Uniform::try_from(100.0..=99.0).is_err());
}
}

903
vendor/rand/src/distr/uniform_int.rs vendored Normal file
View File

@@ -0,0 +1,903 @@
// Copyright 2018-2020 Developers of the Rand project.
// Copyright 2017 The Rust Project Developers.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! `UniformInt` implementation
use super::{Error, SampleBorrow, SampleUniform, UniformSampler};
use crate::distr::utils::WideningMultiply;
#[cfg(feature = "simd_support")]
use crate::distr::{Distribution, StandardUniform};
use crate::Rng;
#[cfg(feature = "simd_support")]
use core::simd::prelude::*;
#[cfg(feature = "simd_support")]
use core::simd::{LaneCount, SupportedLaneCount};
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
/// The back-end implementing [`UniformSampler`] for integer types.
///
/// Unless you are implementing [`UniformSampler`] for your own type, this type
/// should not be used directly, use [`Uniform`] instead.
///
/// # Implementation notes
///
/// For simplicity, we use the same generic struct `UniformInt<X>` for all
/// integer types `X`. This gives us only one field type, `X`; to store unsigned
/// values of this size, we take use the fact that these conversions are no-ops.
///
/// For a closed range, the number of possible numbers we should generate is
/// `range = (high - low + 1)`. To avoid bias, we must ensure that the size of
/// our sample space, `zone`, is a multiple of `range`; other values must be
/// rejected (by replacing with a new random sample).
///
/// As a special case, we use `range = 0` to represent the full range of the
/// result type (i.e. for `new_inclusive($ty::MIN, $ty::MAX)`).
///
/// The optimum `zone` is the largest product of `range` which fits in our
/// (unsigned) target type. We calculate this by calculating how many numbers we
/// must reject: `reject = (MAX + 1) % range = (MAX - range + 1) % range`. Any (large)
/// product of `range` will suffice, thus in `sample_single` we multiply by a
/// power of 2 via bit-shifting (faster but may cause more rejections).
///
/// The smallest integer PRNGs generate is `u32`. For 8- and 16-bit outputs we
/// use `u32` for our `zone` and samples (because it's not slower and because
/// it reduces the chance of having to reject a sample). In this case we cannot
/// store `zone` in the target type since it is too large, however we know
/// `ints_to_reject < range <= $uty::MAX`.
///
/// An alternative to using a modulus is widening multiply: After a widening
/// multiply by `range`, the result is in the high word. Then comparing the low
/// word against `zone` makes sure our distribution is uniform.
///
/// # Bias
///
/// Unless the `unbiased` feature flag is used, outputs may have a small bias.
/// In the worst case, bias affects 1 in `2^n` samples where n is
/// 56 (`i8` and `u8`), 48 (`i16` and `u16`), 96 (`i32` and `u32`), 64 (`i64`
/// and `u64`), 128 (`i128` and `u128`).
///
/// [`Uniform`]: super::Uniform
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct UniformInt<X> {
pub(super) low: X,
pub(super) range: X,
thresh: X, // effectively 2.pow(max(64, uty_bits)) % range
}
macro_rules! uniform_int_impl {
($ty:ty, $uty:ty, $sample_ty:ident) => {
impl SampleUniform for $ty {
type Sampler = UniformInt<$ty>;
}
impl UniformSampler for UniformInt<$ty> {
// We play free and fast with unsigned vs signed here
// (when $ty is signed), but that's fine, since the
// contract of this macro is for $ty and $uty to be
// "bit-equal", so casting between them is a no-op.
type X = $ty;
#[inline] // if the range is constant, this helps LLVM to do the
// calculations at compile-time.
fn new<B1, B2>(low_b: B1, high_b: B2) -> Result<Self, Error>
where
B1: SampleBorrow<Self::X> + Sized,
B2: SampleBorrow<Self::X> + Sized,
{
let low = *low_b.borrow();
let high = *high_b.borrow();
if !(low < high) {
return Err(Error::EmptyRange);
}
UniformSampler::new_inclusive(low, high - 1)
}
#[inline] // if the range is constant, this helps LLVM to do the
// calculations at compile-time.
fn new_inclusive<B1, B2>(low_b: B1, high_b: B2) -> Result<Self, Error>
where
B1: SampleBorrow<Self::X> + Sized,
B2: SampleBorrow<Self::X> + Sized,
{
let low = *low_b.borrow();
let high = *high_b.borrow();
if !(low <= high) {
return Err(Error::EmptyRange);
}
let range = high.wrapping_sub(low).wrapping_add(1) as $uty;
let thresh = if range > 0 {
let range = $sample_ty::from(range);
(range.wrapping_neg() % range)
} else {
0
};
Ok(UniformInt {
low,
range: range as $ty, // type: $uty
thresh: thresh as $uty as $ty, // type: $sample_ty
})
}
/// Sample from distribution, Lemire's method, unbiased
#[inline]
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Self::X {
let range = self.range as $uty as $sample_ty;
if range == 0 {
return rng.random();
}
let thresh = self.thresh as $uty as $sample_ty;
let hi = loop {
let (hi, lo) = rng.random::<$sample_ty>().wmul(range);
if lo >= thresh {
break hi;
}
};
self.low.wrapping_add(hi as $ty)
}
#[inline]
fn sample_single<R: Rng + ?Sized, B1, B2>(
low_b: B1,
high_b: B2,
rng: &mut R,
) -> Result<Self::X, Error>
where
B1: SampleBorrow<Self::X> + Sized,
B2: SampleBorrow<Self::X> + Sized,
{
let low = *low_b.borrow();
let high = *high_b.borrow();
if !(low < high) {
return Err(Error::EmptyRange);
}
Self::sample_single_inclusive(low, high - 1, rng)
}
/// Sample single value, Canon's method, biased
///
/// In the worst case, bias affects 1 in `2^n` samples where n is
/// 56 (`i8`), 48 (`i16`), 96 (`i32`), 64 (`i64`), 128 (`i128`).
#[cfg(not(feature = "unbiased"))]
#[inline]
fn sample_single_inclusive<R: Rng + ?Sized, B1, B2>(
low_b: B1,
high_b: B2,
rng: &mut R,
) -> Result<Self::X, Error>
where
B1: SampleBorrow<Self::X> + Sized,
B2: SampleBorrow<Self::X> + Sized,
{
let low = *low_b.borrow();
let high = *high_b.borrow();
if !(low <= high) {
return Err(Error::EmptyRange);
}
let range = high.wrapping_sub(low).wrapping_add(1) as $uty as $sample_ty;
if range == 0 {
// Range is MAX+1 (unrepresentable), so we need a special case
return Ok(rng.random());
}
// generate a sample using a sensible integer type
let (mut result, lo_order) = rng.random::<$sample_ty>().wmul(range);
// if the sample is biased...
if lo_order > range.wrapping_neg() {
// ...generate a new sample to reduce bias...
let (new_hi_order, _) = (rng.random::<$sample_ty>()).wmul(range as $sample_ty);
// ... incrementing result on overflow
let is_overflow = lo_order.checked_add(new_hi_order as $sample_ty).is_none();
result += is_overflow as $sample_ty;
}
Ok(low.wrapping_add(result as $ty))
}
/// Sample single value, Canon's method, unbiased
#[cfg(feature = "unbiased")]
#[inline]
fn sample_single_inclusive<R: Rng + ?Sized, B1, B2>(
low_b: B1,
high_b: B2,
rng: &mut R,
) -> Result<Self::X, Error>
where
B1: SampleBorrow<$ty> + Sized,
B2: SampleBorrow<$ty> + Sized,
{
let low = *low_b.borrow();
let high = *high_b.borrow();
if !(low <= high) {
return Err(Error::EmptyRange);
}
let range = high.wrapping_sub(low).wrapping_add(1) as $uty as $sample_ty;
if range == 0 {
// Range is MAX+1 (unrepresentable), so we need a special case
return Ok(rng.random());
}
let (mut result, mut lo) = rng.random::<$sample_ty>().wmul(range);
// In contrast to the biased sampler, we use a loop:
while lo > range.wrapping_neg() {
let (new_hi, new_lo) = (rng.random::<$sample_ty>()).wmul(range);
match lo.checked_add(new_hi) {
Some(x) if x < $sample_ty::MAX => {
// Anything less than MAX: last term is 0
break;
}
None => {
// Overflow: last term is 1
result += 1;
break;
}
_ => {
// Unlikely case: must check next sample
lo = new_lo;
continue;
}
}
}
Ok(low.wrapping_add(result as $ty))
}
}
};
}
uniform_int_impl! { i8, u8, u32 }
uniform_int_impl! { i16, u16, u32 }
uniform_int_impl! { i32, u32, u32 }
uniform_int_impl! { i64, u64, u64 }
uniform_int_impl! { i128, u128, u128 }
uniform_int_impl! { u8, u8, u32 }
uniform_int_impl! { u16, u16, u32 }
uniform_int_impl! { u32, u32, u32 }
uniform_int_impl! { u64, u64, u64 }
uniform_int_impl! { u128, u128, u128 }
#[cfg(feature = "simd_support")]
macro_rules! uniform_simd_int_impl {
($ty:ident, $unsigned:ident) => {
// The "pick the largest zone that can fit in an `u32`" optimization
// is less useful here. Multiple lanes complicate things, we don't
// know the PRNG's minimal output size, and casting to a larger vector
// is generally a bad idea for SIMD performance. The user can still
// implement it manually.
#[cfg(feature = "simd_support")]
impl<const LANES: usize> SampleUniform for Simd<$ty, LANES>
where
LaneCount<LANES>: SupportedLaneCount,
Simd<$unsigned, LANES>:
WideningMultiply<Output = (Simd<$unsigned, LANES>, Simd<$unsigned, LANES>)>,
StandardUniform: Distribution<Simd<$unsigned, LANES>>,
{
type Sampler = UniformInt<Simd<$ty, LANES>>;
}
#[cfg(feature = "simd_support")]
impl<const LANES: usize> UniformSampler for UniformInt<Simd<$ty, LANES>>
where
LaneCount<LANES>: SupportedLaneCount,
Simd<$unsigned, LANES>:
WideningMultiply<Output = (Simd<$unsigned, LANES>, Simd<$unsigned, LANES>)>,
StandardUniform: Distribution<Simd<$unsigned, LANES>>,
{
type X = Simd<$ty, LANES>;
#[inline] // if the range is constant, this helps LLVM to do the
// calculations at compile-time.
fn new<B1, B2>(low_b: B1, high_b: B2) -> Result<Self, Error>
where B1: SampleBorrow<Self::X> + Sized,
B2: SampleBorrow<Self::X> + Sized
{
let low = *low_b.borrow();
let high = *high_b.borrow();
if !(low.simd_lt(high).all()) {
return Err(Error::EmptyRange);
}
UniformSampler::new_inclusive(low, high - Simd::splat(1))
}
#[inline] // if the range is constant, this helps LLVM to do the
// calculations at compile-time.
fn new_inclusive<B1, B2>(low_b: B1, high_b: B2) -> Result<Self, Error>
where B1: SampleBorrow<Self::X> + Sized,
B2: SampleBorrow<Self::X> + Sized
{
let low = *low_b.borrow();
let high = *high_b.borrow();
if !(low.simd_le(high).all()) {
return Err(Error::EmptyRange);
}
// NOTE: all `Simd` operations are inherently wrapping,
// see https://doc.rust-lang.org/std/simd/struct.Simd.html
let range: Simd<$unsigned, LANES> = ((high - low) + Simd::splat(1)).cast();
// We must avoid divide-by-zero by using 0 % 1 == 0.
let not_full_range = range.simd_gt(Simd::splat(0));
let modulo = not_full_range.select(range, Simd::splat(1));
let ints_to_reject = range.wrapping_neg() % modulo;
Ok(UniformInt {
low,
// These are really $unsigned values, but store as $ty:
range: range.cast(),
thresh: ints_to_reject.cast(),
})
}
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Self::X {
let range: Simd<$unsigned, LANES> = self.range.cast();
let thresh: Simd<$unsigned, LANES> = self.thresh.cast();
// This might seem very slow, generating a whole new
// SIMD vector for every sample rejection. For most uses
// though, the chance of rejection is small and provides good
// general performance. With multiple lanes, that chance is
// multiplied. To mitigate this, we replace only the lanes of
// the vector which fail, iteratively reducing the chance of
// rejection. The replacement method does however add a little
// overhead. Benchmarking or calculating probabilities might
// reveal contexts where this replacement method is slower.
let mut v: Simd<$unsigned, LANES> = rng.random();
loop {
let (hi, lo) = v.wmul(range);
let mask = lo.simd_ge(thresh);
if mask.all() {
let hi: Simd<$ty, LANES> = hi.cast();
// wrapping addition
let result = self.low + hi;
// `select` here compiles to a blend operation
// When `range.eq(0).none()` the compare and blend
// operations are avoided.
let v: Simd<$ty, LANES> = v.cast();
return range.simd_gt(Simd::splat(0)).select(result, v);
}
// Replace only the failing lanes
v = mask.select(v, rng.random());
}
}
}
};
// bulk implementation
($(($unsigned:ident, $signed:ident)),+) => {
$(
uniform_simd_int_impl!($unsigned, $unsigned);
uniform_simd_int_impl!($signed, $unsigned);
)+
};
}
#[cfg(feature = "simd_support")]
uniform_simd_int_impl! { (u8, i8), (u16, i16), (u32, i32), (u64, i64) }
/// The back-end implementing [`UniformSampler`] for `usize`.
///
/// # Implementation notes
///
/// Sampling a `usize` value is usually used in relation to the length of an
/// array or other memory structure, thus it is reasonable to assume that the
/// vast majority of use-cases will have a maximum size under [`u32::MAX`].
/// In part to optimise for this use-case, but mostly to ensure that results
/// are portable across 32-bit and 64-bit architectures (as far as is possible),
/// this implementation will use 32-bit sampling when possible.
#[cfg(any(target_pointer_width = "32", target_pointer_width = "64"))]
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
#[cfg_attr(all(feature = "serde"), derive(Serialize))]
// To be able to deserialize on 32-bit we need to replace this with a custom
// implementation of the Deserialize trait, to be able to:
// - panic when `mode64` is `true` on 32-bit,
// - assign the default value to `mode64` when it's missing on 64-bit,
// - panic when the `usize` fields are greater than `u32::MAX` on 32-bit.
#[cfg_attr(
all(feature = "serde", target_pointer_width = "64"),
derive(Deserialize)
)]
pub struct UniformUsize {
/// The lowest possible value.
low: usize,
/// The number of possible values. `0` has a special meaning: all.
range: usize,
/// Threshold used when sampling to obtain a uniform distribution.
thresh: usize,
/// Whether the largest possible value is greater than `u32::MAX`.
#[cfg(target_pointer_width = "64")]
// Handle missing field when deserializing on 64-bit an object serialized
// on 32-bit. Can be removed when switching to a custom deserializer.
#[cfg_attr(feature = "serde", serde(default))]
mode64: bool,
}
#[cfg(any(target_pointer_width = "32", target_pointer_width = "64"))]
impl SampleUniform for usize {
type Sampler = UniformUsize;
}
#[cfg(any(target_pointer_width = "32", target_pointer_width = "64"))]
impl UniformSampler for UniformUsize {
type X = usize;
#[inline] // if the range is constant, this helps LLVM to do the
// calculations at compile-time.
fn new<B1, B2>(low_b: B1, high_b: B2) -> Result<Self, Error>
where
B1: SampleBorrow<Self::X> + Sized,
B2: SampleBorrow<Self::X> + Sized,
{
let low = *low_b.borrow();
let high = *high_b.borrow();
if !(low < high) {
return Err(Error::EmptyRange);
}
UniformSampler::new_inclusive(low, high - 1)
}
#[inline] // if the range is constant, this helps LLVM to do the
// calculations at compile-time.
fn new_inclusive<B1, B2>(low_b: B1, high_b: B2) -> Result<Self, Error>
where
B1: SampleBorrow<Self::X> + Sized,
B2: SampleBorrow<Self::X> + Sized,
{
let low = *low_b.borrow();
let high = *high_b.borrow();
if !(low <= high) {
return Err(Error::EmptyRange);
}
#[cfg(target_pointer_width = "64")]
let mode64 = high > (u32::MAX as usize);
#[cfg(target_pointer_width = "32")]
let mode64 = false;
let (range, thresh);
if cfg!(target_pointer_width = "64") && !mode64 {
let range32 = (high as u32).wrapping_sub(low as u32).wrapping_add(1);
range = range32 as usize;
thresh = if range32 > 0 {
(range32.wrapping_neg() % range32) as usize
} else {
0
};
} else {
range = high.wrapping_sub(low).wrapping_add(1);
thresh = if range > 0 {
range.wrapping_neg() % range
} else {
0
};
}
Ok(UniformUsize {
low,
range,
thresh,
#[cfg(target_pointer_width = "64")]
mode64,
})
}
#[inline]
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> usize {
#[cfg(target_pointer_width = "32")]
let mode32 = true;
#[cfg(target_pointer_width = "64")]
let mode32 = !self.mode64;
if mode32 {
let range = self.range as u32;
if range == 0 {
return rng.random::<u32>() as usize;
}
let thresh = self.thresh as u32;
let hi = loop {
let (hi, lo) = rng.random::<u32>().wmul(range);
if lo >= thresh {
break hi;
}
};
self.low.wrapping_add(hi as usize)
} else {
let range = self.range as u64;
if range == 0 {
return rng.random::<u64>() as usize;
}
let thresh = self.thresh as u64;
let hi = loop {
let (hi, lo) = rng.random::<u64>().wmul(range);
if lo >= thresh {
break hi;
}
};
self.low.wrapping_add(hi as usize)
}
}
#[inline]
fn sample_single<R: Rng + ?Sized, B1, B2>(
low_b: B1,
high_b: B2,
rng: &mut R,
) -> Result<Self::X, Error>
where
B1: SampleBorrow<Self::X> + Sized,
B2: SampleBorrow<Self::X> + Sized,
{
let low = *low_b.borrow();
let high = *high_b.borrow();
if !(low < high) {
return Err(Error::EmptyRange);
}
if cfg!(target_pointer_width = "64") && high > (u32::MAX as usize) {
return UniformInt::<u64>::sample_single(low as u64, high as u64, rng)
.map(|x| x as usize);
}
UniformInt::<u32>::sample_single(low as u32, high as u32, rng).map(|x| x as usize)
}
#[inline]
fn sample_single_inclusive<R: Rng + ?Sized, B1, B2>(
low_b: B1,
high_b: B2,
rng: &mut R,
) -> Result<Self::X, Error>
where
B1: SampleBorrow<Self::X> + Sized,
B2: SampleBorrow<Self::X> + Sized,
{
let low = *low_b.borrow();
let high = *high_b.borrow();
if !(low <= high) {
return Err(Error::EmptyRange);
}
if cfg!(target_pointer_width = "64") && high > (u32::MAX as usize) {
return UniformInt::<u64>::sample_single_inclusive(low as u64, high as u64, rng)
.map(|x| x as usize);
}
UniformInt::<u32>::sample_single_inclusive(low as u32, high as u32, rng).map(|x| x as usize)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::distr::{Distribution, Uniform};
use core::fmt::Debug;
use core::ops::Add;
#[test]
fn test_uniform_bad_limits_equal_int() {
assert_eq!(Uniform::new(10, 10), Err(Error::EmptyRange));
}
#[test]
fn test_uniform_good_limits_equal_int() {
let mut rng = crate::test::rng(804);
let dist = Uniform::new_inclusive(10, 10).unwrap();
for _ in 0..20 {
assert_eq!(rng.sample(dist), 10);
}
}
#[test]
fn test_uniform_bad_limits_flipped_int() {
assert_eq!(Uniform::new(10, 5), Err(Error::EmptyRange));
}
#[test]
#[cfg_attr(miri, ignore)] // Miri is too slow
fn test_integers() {
let mut rng = crate::test::rng(251);
macro_rules! t {
($ty:ident, $v:expr, $le:expr, $lt:expr) => {{
for &(low, high) in $v.iter() {
let my_uniform = Uniform::new(low, high).unwrap();
for _ in 0..1000 {
let v: $ty = rng.sample(my_uniform);
assert!($le(low, v) && $lt(v, high));
}
let my_uniform = Uniform::new_inclusive(low, high).unwrap();
for _ in 0..1000 {
let v: $ty = rng.sample(my_uniform);
assert!($le(low, v) && $le(v, high));
}
let my_uniform = Uniform::new(&low, high).unwrap();
for _ in 0..1000 {
let v: $ty = rng.sample(my_uniform);
assert!($le(low, v) && $lt(v, high));
}
let my_uniform = Uniform::new_inclusive(&low, &high).unwrap();
for _ in 0..1000 {
let v: $ty = rng.sample(my_uniform);
assert!($le(low, v) && $le(v, high));
}
for _ in 0..1000 {
let v = <$ty as SampleUniform>::Sampler::sample_single(low, high, &mut rng).unwrap();
assert!($le(low, v) && $lt(v, high));
}
for _ in 0..1000 {
let v = <$ty as SampleUniform>::Sampler::sample_single_inclusive(low, high, &mut rng).unwrap();
assert!($le(low, v) && $le(v, high));
}
}
}};
// scalar bulk
($($ty:ident),*) => {{
$(t!(
$ty,
[(0, 10), (10, 127), ($ty::MIN, $ty::MAX)],
|x, y| x <= y,
|x, y| x < y
);)*
}};
// simd bulk
($($ty:ident),* => $scalar:ident) => {{
$(t!(
$ty,
[
($ty::splat(0), $ty::splat(10)),
($ty::splat(10), $ty::splat(127)),
($ty::splat($scalar::MIN), $ty::splat($scalar::MAX)),
],
|x: $ty, y| x.simd_le(y).all(),
|x: $ty, y| x.simd_lt(y).all()
);)*
}};
}
t!(i8, i16, i32, i64, i128, u8, u16, u32, u64, usize, u128);
#[cfg(feature = "simd_support")]
{
t!(u8x4, u8x8, u8x16, u8x32, u8x64 => u8);
t!(i8x4, i8x8, i8x16, i8x32, i8x64 => i8);
t!(u16x2, u16x4, u16x8, u16x16, u16x32 => u16);
t!(i16x2, i16x4, i16x8, i16x16, i16x32 => i16);
t!(u32x2, u32x4, u32x8, u32x16 => u32);
t!(i32x2, i32x4, i32x8, i32x16 => i32);
t!(u64x2, u64x4, u64x8 => u64);
t!(i64x2, i64x4, i64x8 => i64);
}
}
#[test]
fn test_uniform_from_std_range() {
let r = Uniform::try_from(2u32..7).unwrap();
assert_eq!(r.0.low, 2);
assert_eq!(r.0.range, 5);
}
#[test]
fn test_uniform_from_std_range_bad_limits() {
#![allow(clippy::reversed_empty_ranges)]
assert!(Uniform::try_from(100..10).is_err());
assert!(Uniform::try_from(100..100).is_err());
}
#[test]
fn test_uniform_from_std_range_inclusive() {
let r = Uniform::try_from(2u32..=6).unwrap();
assert_eq!(r.0.low, 2);
assert_eq!(r.0.range, 5);
}
#[test]
fn test_uniform_from_std_range_inclusive_bad_limits() {
#![allow(clippy::reversed_empty_ranges)]
assert!(Uniform::try_from(100..=10).is_err());
assert!(Uniform::try_from(100..=99).is_err());
}
#[test]
fn value_stability() {
fn test_samples<T: SampleUniform + Copy + Debug + PartialEq + Add<T>>(
lb: T,
ub: T,
ub_excl: T,
expected: &[T],
) where
Uniform<T>: Distribution<T>,
{
let mut rng = crate::test::rng(897);
let mut buf = [lb; 6];
for x in &mut buf[0..3] {
*x = T::Sampler::sample_single_inclusive(lb, ub, &mut rng).unwrap();
}
let distr = Uniform::new_inclusive(lb, ub).unwrap();
for x in &mut buf[3..6] {
*x = rng.sample(&distr);
}
assert_eq!(&buf, expected);
let mut rng = crate::test::rng(897);
for x in &mut buf[0..3] {
*x = T::Sampler::sample_single(lb, ub_excl, &mut rng).unwrap();
}
let distr = Uniform::new(lb, ub_excl).unwrap();
for x in &mut buf[3..6] {
*x = rng.sample(&distr);
}
assert_eq!(&buf, expected);
}
test_samples(-105i8, 111, 112, &[-99, -48, 107, 72, -19, 56]);
test_samples(2i16, 1352, 1353, &[43, 361, 1325, 1109, 539, 1005]);
test_samples(
-313853i32,
13513,
13514,
&[-303803, -226673, 6912, -45605, -183505, -70668],
);
test_samples(
131521i64,
6542165,
6542166,
&[1838724, 5384489, 4893692, 3712948, 3951509, 4094926],
);
test_samples(
-0x8000_0000_0000_0000_0000_0000_0000_0000i128,
-1,
0,
&[
-30725222750250982319765550926688025855,
-75088619368053423329503924805178012357,
-64950748766625548510467638647674468829,
-41794017901603587121582892414659436495,
-63623852319608406524605295913876414006,
-17404679390297612013597359206379189023,
],
);
test_samples(11u8, 218, 219, &[17, 66, 214, 181, 93, 165]);
test_samples(11u16, 218, 219, &[17, 66, 214, 181, 93, 165]);
test_samples(11u32, 218, 219, &[17, 66, 214, 181, 93, 165]);
test_samples(11u64, 218, 219, &[66, 181, 165, 127, 134, 139]);
test_samples(11u128, 218, 219, &[181, 127, 139, 167, 141, 197]);
test_samples(11usize, 218, 219, &[17, 66, 214, 181, 93, 165]);
#[cfg(feature = "simd_support")]
{
let lb = Simd::from([11u8, 0, 128, 127]);
let ub = Simd::from([218, 254, 254, 254]);
let ub_excl = ub + Simd::splat(1);
test_samples(
lb,
ub,
ub_excl,
&[
Simd::from([13, 5, 237, 130]),
Simd::from([126, 186, 149, 161]),
Simd::from([103, 86, 234, 252]),
Simd::from([35, 18, 225, 231]),
Simd::from([106, 153, 246, 177]),
Simd::from([195, 168, 149, 222]),
],
);
}
}
#[test]
fn test_uniform_usize_empty_range() {
assert_eq!(UniformUsize::new(10, 10), Err(Error::EmptyRange));
assert!(UniformUsize::new(10, 11).is_ok());
assert_eq!(UniformUsize::new_inclusive(10, 9), Err(Error::EmptyRange));
assert!(UniformUsize::new_inclusive(10, 10).is_ok());
}
#[test]
fn test_uniform_usize_constructors() {
assert_eq!(
UniformUsize::new_inclusive(u32::MAX as usize, u32::MAX as usize),
Ok(UniformUsize {
low: u32::MAX as usize,
range: 1,
thresh: 0,
#[cfg(target_pointer_width = "64")]
mode64: false
})
);
assert_eq!(
UniformUsize::new_inclusive(0, u32::MAX as usize),
Ok(UniformUsize {
low: 0,
range: 0,
thresh: 0,
#[cfg(target_pointer_width = "64")]
mode64: false
})
);
#[cfg(target_pointer_width = "64")]
assert_eq!(
UniformUsize::new_inclusive(0, u32::MAX as usize + 1),
Ok(UniformUsize {
low: 0,
range: u32::MAX as usize + 2,
thresh: 1,
mode64: true
})
);
#[cfg(target_pointer_width = "64")]
assert_eq!(
UniformUsize::new_inclusive(u32::MAX as usize, u64::MAX as usize),
Ok(UniformUsize {
low: u32::MAX as usize,
range: u64::MAX as usize - u32::MAX as usize + 1,
thresh: u32::MAX as usize,
mode64: true
})
);
}
// This could be run also on 32-bit when deserialization is implemented.
#[cfg(all(feature = "serde", target_pointer_width = "64"))]
#[test]
fn test_uniform_usize_deserialization() {
use serde_json;
let original = UniformUsize::new_inclusive(10, 100).expect("creation");
let serialized = serde_json::to_string(&original).expect("serialization");
let deserialized: UniformUsize =
serde_json::from_str(&serialized).expect("deserialization");
assert_eq!(deserialized, original);
}
#[cfg(all(feature = "serde", target_pointer_width = "64"))]
#[test]
fn test_uniform_usize_deserialization_from_32bit() {
use serde_json;
let serialized_on_32bit = r#"{"low":10,"range":91,"thresh":74}"#;
let deserialized: UniformUsize =
serde_json::from_str(&serialized_on_32bit).expect("deserialization");
assert_eq!(
deserialized,
UniformUsize::new_inclusive(10, 100).expect("creation")
);
}
#[cfg(all(feature = "serde", target_pointer_width = "64"))]
#[test]
fn test_uniform_usize_deserialization_64bit() {
use serde_json;
let original = UniformUsize::new_inclusive(1, u64::MAX as usize - 1).expect("creation");
assert!(original.mode64);
let serialized = serde_json::to_string(&original).expect("serialization");
let deserialized: UniformUsize =
serde_json::from_str(&serialized).expect("deserialization");
assert_eq!(deserialized, original);
}
}

319
vendor/rand/src/distr/uniform_other.rs vendored Normal file
View File

@@ -0,0 +1,319 @@
// Copyright 2018-2020 Developers of the Rand project.
// Copyright 2017 The Rust Project Developers.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! `UniformChar`, `UniformDuration` implementations
use super::{Error, SampleBorrow, SampleUniform, Uniform, UniformInt, UniformSampler};
use crate::distr::Distribution;
use crate::Rng;
use core::time::Duration;
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
impl SampleUniform for char {
type Sampler = UniformChar;
}
/// The back-end implementing [`UniformSampler`] for `char`.
///
/// Unless you are implementing [`UniformSampler`] for your own type, this type
/// should not be used directly, use [`Uniform`] instead.
///
/// This differs from integer range sampling since the range `0xD800..=0xDFFF`
/// are used for surrogate pairs in UCS and UTF-16, and consequently are not
/// valid Unicode code points. We must therefore avoid sampling values in this
/// range.
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct UniformChar {
sampler: UniformInt<u32>,
}
/// UTF-16 surrogate range start
const CHAR_SURROGATE_START: u32 = 0xD800;
/// UTF-16 surrogate range size
const CHAR_SURROGATE_LEN: u32 = 0xE000 - CHAR_SURROGATE_START;
/// Convert `char` to compressed `u32`
fn char_to_comp_u32(c: char) -> u32 {
match c as u32 {
c if c >= CHAR_SURROGATE_START => c - CHAR_SURROGATE_LEN,
c => c,
}
}
impl UniformSampler for UniformChar {
type X = char;
#[inline] // if the range is constant, this helps LLVM to do the
// calculations at compile-time.
fn new<B1, B2>(low_b: B1, high_b: B2) -> Result<Self, Error>
where
B1: SampleBorrow<Self::X> + Sized,
B2: SampleBorrow<Self::X> + Sized,
{
let low = char_to_comp_u32(*low_b.borrow());
let high = char_to_comp_u32(*high_b.borrow());
let sampler = UniformInt::<u32>::new(low, high);
sampler.map(|sampler| UniformChar { sampler })
}
#[inline] // if the range is constant, this helps LLVM to do the
// calculations at compile-time.
fn new_inclusive<B1, B2>(low_b: B1, high_b: B2) -> Result<Self, Error>
where
B1: SampleBorrow<Self::X> + Sized,
B2: SampleBorrow<Self::X> + Sized,
{
let low = char_to_comp_u32(*low_b.borrow());
let high = char_to_comp_u32(*high_b.borrow());
let sampler = UniformInt::<u32>::new_inclusive(low, high);
sampler.map(|sampler| UniformChar { sampler })
}
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Self::X {
let mut x = self.sampler.sample(rng);
if x >= CHAR_SURROGATE_START {
x += CHAR_SURROGATE_LEN;
}
// SAFETY: x must not be in surrogate range or greater than char::MAX.
// This relies on range constructors which accept char arguments.
// Validity of input char values is assumed.
unsafe { core::char::from_u32_unchecked(x) }
}
}
#[cfg(feature = "alloc")]
impl crate::distr::SampleString for Uniform<char> {
fn append_string<R: Rng + ?Sized>(
&self,
rng: &mut R,
string: &mut alloc::string::String,
len: usize,
) {
// Getting the hi value to assume the required length to reserve in string.
let mut hi = self.0.sampler.low + self.0.sampler.range - 1;
if hi >= CHAR_SURROGATE_START {
hi += CHAR_SURROGATE_LEN;
}
// Get the utf8 length of hi to minimize extra space.
let max_char_len = char::from_u32(hi).map(char::len_utf8).unwrap_or(4);
string.reserve(max_char_len * len);
string.extend(self.sample_iter(rng).take(len))
}
}
/// The back-end implementing [`UniformSampler`] for `Duration`.
///
/// Unless you are implementing [`UniformSampler`] for your own types, this type
/// should not be used directly, use [`Uniform`] instead.
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct UniformDuration {
mode: UniformDurationMode,
offset: u32,
}
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
enum UniformDurationMode {
Small {
secs: u64,
nanos: Uniform<u32>,
},
Medium {
nanos: Uniform<u64>,
},
Large {
max_secs: u64,
max_nanos: u32,
secs: Uniform<u64>,
},
}
impl SampleUniform for Duration {
type Sampler = UniformDuration;
}
impl UniformSampler for UniformDuration {
type X = Duration;
#[inline]
fn new<B1, B2>(low_b: B1, high_b: B2) -> Result<Self, Error>
where
B1: SampleBorrow<Self::X> + Sized,
B2: SampleBorrow<Self::X> + Sized,
{
let low = *low_b.borrow();
let high = *high_b.borrow();
if !(low < high) {
return Err(Error::EmptyRange);
}
UniformDuration::new_inclusive(low, high - Duration::new(0, 1))
}
#[inline]
fn new_inclusive<B1, B2>(low_b: B1, high_b: B2) -> Result<Self, Error>
where
B1: SampleBorrow<Self::X> + Sized,
B2: SampleBorrow<Self::X> + Sized,
{
let low = *low_b.borrow();
let high = *high_b.borrow();
if !(low <= high) {
return Err(Error::EmptyRange);
}
let low_s = low.as_secs();
let low_n = low.subsec_nanos();
let mut high_s = high.as_secs();
let mut high_n = high.subsec_nanos();
if high_n < low_n {
high_s -= 1;
high_n += 1_000_000_000;
}
let mode = if low_s == high_s {
UniformDurationMode::Small {
secs: low_s,
nanos: Uniform::new_inclusive(low_n, high_n)?,
}
} else {
let max = high_s
.checked_mul(1_000_000_000)
.and_then(|n| n.checked_add(u64::from(high_n)));
if let Some(higher_bound) = max {
let lower_bound = low_s * 1_000_000_000 + u64::from(low_n);
UniformDurationMode::Medium {
nanos: Uniform::new_inclusive(lower_bound, higher_bound)?,
}
} else {
// An offset is applied to simplify generation of nanoseconds
let max_nanos = high_n - low_n;
UniformDurationMode::Large {
max_secs: high_s,
max_nanos,
secs: Uniform::new_inclusive(low_s, high_s)?,
}
}
};
Ok(UniformDuration {
mode,
offset: low_n,
})
}
#[inline]
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Duration {
match self.mode {
UniformDurationMode::Small { secs, nanos } => {
let n = nanos.sample(rng);
Duration::new(secs, n)
}
UniformDurationMode::Medium { nanos } => {
let nanos = nanos.sample(rng);
Duration::new(nanos / 1_000_000_000, (nanos % 1_000_000_000) as u32)
}
UniformDurationMode::Large {
max_secs,
max_nanos,
secs,
} => {
// constant folding means this is at least as fast as `Rng::sample(Range)`
let nano_range = Uniform::new(0, 1_000_000_000).unwrap();
loop {
let s = secs.sample(rng);
let n = nano_range.sample(rng);
if !(s == max_secs && n > max_nanos) {
let sum = n + self.offset;
break Duration::new(s, sum);
}
}
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
#[cfg(feature = "serde")]
fn test_serialization_uniform_duration() {
let distr = UniformDuration::new(Duration::from_secs(10), Duration::from_secs(60)).unwrap();
let de_distr: UniformDuration =
bincode::deserialize(&bincode::serialize(&distr).unwrap()).unwrap();
assert_eq!(distr, de_distr);
}
#[test]
#[cfg_attr(miri, ignore)] // Miri is too slow
fn test_char() {
let mut rng = crate::test::rng(891);
let mut max = core::char::from_u32(0).unwrap();
for _ in 0..100 {
let c = rng.random_range('A'..='Z');
assert!(c.is_ascii_uppercase());
max = max.max(c);
}
assert_eq!(max, 'Z');
let d = Uniform::new(
core::char::from_u32(0xD7F0).unwrap(),
core::char::from_u32(0xE010).unwrap(),
)
.unwrap();
for _ in 0..100 {
let c = d.sample(&mut rng);
assert!((c as u32) < 0xD800 || (c as u32) > 0xDFFF);
}
#[cfg(feature = "alloc")]
{
use crate::distr::SampleString;
let string1 = d.sample_string(&mut rng, 100);
assert_eq!(string1.capacity(), 300);
let string2 = Uniform::new(
core::char::from_u32(0x0000).unwrap(),
core::char::from_u32(0x0080).unwrap(),
)
.unwrap()
.sample_string(&mut rng, 100);
assert_eq!(string2.capacity(), 100);
let string3 = Uniform::new_inclusive(
core::char::from_u32(0x0000).unwrap(),
core::char::from_u32(0x0080).unwrap(),
)
.unwrap()
.sample_string(&mut rng, 100);
assert_eq!(string3.capacity(), 200);
}
}
#[test]
#[cfg_attr(miri, ignore)] // Miri is too slow
fn test_durations() {
let mut rng = crate::test::rng(253);
let v = &[
(Duration::new(10, 50000), Duration::new(100, 1234)),
(Duration::new(0, 100), Duration::new(1, 50)),
(Duration::new(0, 0), Duration::new(u64::MAX, 999_999_999)),
];
for &(low, high) in v.iter() {
let my_uniform = Uniform::new(low, high).unwrap();
for _ in 0..1000 {
let v = rng.sample(my_uniform);
assert!(low <= v && v < high);
}
}
}
}

408
vendor/rand/src/distr/utils.rs vendored Normal file
View File

@@ -0,0 +1,408 @@
// Copyright 2018 Developers of the Rand project.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Math helper functions
#[cfg(feature = "simd_support")]
use core::simd::prelude::*;
#[cfg(feature = "simd_support")]
use core::simd::{LaneCount, SimdElement, SupportedLaneCount};
pub(crate) trait WideningMultiply<RHS = Self> {
type Output;
fn wmul(self, x: RHS) -> Self::Output;
}
macro_rules! wmul_impl {
($ty:ty, $wide:ty, $shift:expr) => {
impl WideningMultiply for $ty {
type Output = ($ty, $ty);
#[inline(always)]
fn wmul(self, x: $ty) -> Self::Output {
let tmp = (self as $wide) * (x as $wide);
((tmp >> $shift) as $ty, tmp as $ty)
}
}
};
// simd bulk implementation
($(($ty:ident, $wide:ty),)+, $shift:expr) => {
$(
impl WideningMultiply for $ty {
type Output = ($ty, $ty);
#[inline(always)]
fn wmul(self, x: $ty) -> Self::Output {
// For supported vectors, this should compile to a couple
// supported multiply & swizzle instructions (no actual
// casting).
// TODO: optimize
let y: $wide = self.cast();
let x: $wide = x.cast();
let tmp = y * x;
let hi: $ty = (tmp >> Simd::splat($shift)).cast();
let lo: $ty = tmp.cast();
(hi, lo)
}
}
)+
};
}
wmul_impl! { u8, u16, 8 }
wmul_impl! { u16, u32, 16 }
wmul_impl! { u32, u64, 32 }
wmul_impl! { u64, u128, 64 }
// This code is a translation of the __mulddi3 function in LLVM's
// compiler-rt. It is an optimised variant of the common method
// `(a + b) * (c + d) = ac + ad + bc + bd`.
//
// For some reason LLVM can optimise the C version very well, but
// keeps shuffling registers in this Rust translation.
macro_rules! wmul_impl_large {
($ty:ty, $half:expr) => {
impl WideningMultiply for $ty {
type Output = ($ty, $ty);
#[inline(always)]
fn wmul(self, b: $ty) -> Self::Output {
const LOWER_MASK: $ty = !0 >> $half;
let mut low = (self & LOWER_MASK).wrapping_mul(b & LOWER_MASK);
let mut t = low >> $half;
low &= LOWER_MASK;
t += (self >> $half).wrapping_mul(b & LOWER_MASK);
low += (t & LOWER_MASK) << $half;
let mut high = t >> $half;
t = low >> $half;
low &= LOWER_MASK;
t += (b >> $half).wrapping_mul(self & LOWER_MASK);
low += (t & LOWER_MASK) << $half;
high += t >> $half;
high += (self >> $half).wrapping_mul(b >> $half);
(high, low)
}
}
};
// simd bulk implementation
(($($ty:ty,)+) $scalar:ty, $half:expr) => {
$(
impl WideningMultiply for $ty {
type Output = ($ty, $ty);
#[inline(always)]
fn wmul(self, b: $ty) -> Self::Output {
// needs wrapping multiplication
let lower_mask = <$ty>::splat(!0 >> $half);
let half = <$ty>::splat($half);
let mut low = (self & lower_mask) * (b & lower_mask);
let mut t = low >> half;
low &= lower_mask;
t += (self >> half) * (b & lower_mask);
low += (t & lower_mask) << half;
let mut high = t >> half;
t = low >> half;
low &= lower_mask;
t += (b >> half) * (self & lower_mask);
low += (t & lower_mask) << half;
high += t >> half;
high += (self >> half) * (b >> half);
(high, low)
}
}
)+
};
}
wmul_impl_large! { u128, 64 }
macro_rules! wmul_impl_usize {
($ty:ty) => {
impl WideningMultiply for usize {
type Output = (usize, usize);
#[inline(always)]
fn wmul(self, x: usize) -> Self::Output {
let (high, low) = (self as $ty).wmul(x as $ty);
(high as usize, low as usize)
}
}
};
}
#[cfg(target_pointer_width = "16")]
wmul_impl_usize! { u16 }
#[cfg(target_pointer_width = "32")]
wmul_impl_usize! { u32 }
#[cfg(target_pointer_width = "64")]
wmul_impl_usize! { u64 }
#[cfg(feature = "simd_support")]
mod simd_wmul {
use super::*;
#[cfg(target_arch = "x86")]
use core::arch::x86::*;
#[cfg(target_arch = "x86_64")]
use core::arch::x86_64::*;
wmul_impl! {
(u8x4, u16x4),
(u8x8, u16x8),
(u8x16, u16x16),
(u8x32, u16x32),
(u8x64, Simd<u16, 64>),,
8
}
wmul_impl! { (u16x2, u32x2),, 16 }
wmul_impl! { (u16x4, u32x4),, 16 }
#[cfg(not(target_feature = "sse2"))]
wmul_impl! { (u16x8, u32x8),, 16 }
#[cfg(not(target_feature = "avx2"))]
wmul_impl! { (u16x16, u32x16),, 16 }
#[cfg(not(target_feature = "avx512bw"))]
wmul_impl! { (u16x32, Simd<u32, 32>),, 16 }
// 16-bit lane widths allow use of the x86 `mulhi` instructions, which
// means `wmul` can be implemented with only two instructions.
#[allow(unused_macros)]
macro_rules! wmul_impl_16 {
($ty:ident, $mulhi:ident, $mullo:ident) => {
impl WideningMultiply for $ty {
type Output = ($ty, $ty);
#[inline(always)]
fn wmul(self, x: $ty) -> Self::Output {
let hi = unsafe { $mulhi(self.into(), x.into()) }.into();
let lo = unsafe { $mullo(self.into(), x.into()) }.into();
(hi, lo)
}
}
};
}
#[cfg(target_feature = "sse2")]
wmul_impl_16! { u16x8, _mm_mulhi_epu16, _mm_mullo_epi16 }
#[cfg(target_feature = "avx2")]
wmul_impl_16! { u16x16, _mm256_mulhi_epu16, _mm256_mullo_epi16 }
#[cfg(target_feature = "avx512bw")]
wmul_impl_16! { u16x32, _mm512_mulhi_epu16, _mm512_mullo_epi16 }
wmul_impl! {
(u32x2, u64x2),
(u32x4, u64x4),
(u32x8, u64x8),
(u32x16, Simd<u64, 16>),,
32
}
wmul_impl_large! { (u64x2, u64x4, u64x8,) u64, 32 }
}
/// Helper trait when dealing with scalar and SIMD floating point types.
pub(crate) trait FloatSIMDUtils {
// `PartialOrd` for vectors compares lexicographically. We want to compare all
// the individual SIMD lanes instead, and get the combined result over all
// lanes. This is possible using something like `a.lt(b).all()`, but we
// implement it as a trait so we can write the same code for `f32` and `f64`.
// Only the comparison functions we need are implemented.
fn all_lt(self, other: Self) -> bool;
fn all_le(self, other: Self) -> bool;
fn all_finite(self) -> bool;
type Mask;
fn gt_mask(self, other: Self) -> Self::Mask;
// Decrease all lanes where the mask is `true` to the next lower value
// representable by the floating-point type. At least one of the lanes
// must be set.
fn decrease_masked(self, mask: Self::Mask) -> Self;
// Convert from int value. Conversion is done while retaining the numerical
// value, not by retaining the binary representation.
type UInt;
fn cast_from_int(i: Self::UInt) -> Self;
}
#[cfg(test)]
pub(crate) trait FloatSIMDScalarUtils: FloatSIMDUtils {
type Scalar;
fn replace(self, index: usize, new_value: Self::Scalar) -> Self;
fn extract_lane(self, index: usize) -> Self::Scalar;
}
/// Implement functions on f32/f64 to give them APIs similar to SIMD types
pub(crate) trait FloatAsSIMD: Sized {
#[cfg(test)]
const LEN: usize = 1;
#[inline(always)]
fn splat(scalar: Self) -> Self {
scalar
}
}
pub(crate) trait IntAsSIMD: Sized {
#[inline(always)]
fn splat(scalar: Self) -> Self {
scalar
}
}
impl IntAsSIMD for u32 {}
impl IntAsSIMD for u64 {}
pub(crate) trait BoolAsSIMD: Sized {
fn any(self) -> bool;
}
impl BoolAsSIMD for bool {
#[inline(always)]
fn any(self) -> bool {
self
}
}
macro_rules! scalar_float_impl {
($ty:ident, $uty:ident) => {
impl FloatSIMDUtils for $ty {
type Mask = bool;
type UInt = $uty;
#[inline(always)]
fn all_lt(self, other: Self) -> bool {
self < other
}
#[inline(always)]
fn all_le(self, other: Self) -> bool {
self <= other
}
#[inline(always)]
fn all_finite(self) -> bool {
self.is_finite()
}
#[inline(always)]
fn gt_mask(self, other: Self) -> Self::Mask {
self > other
}
#[inline(always)]
fn decrease_masked(self, mask: Self::Mask) -> Self {
debug_assert!(mask, "At least one lane must be set");
<$ty>::from_bits(self.to_bits() - 1)
}
#[inline]
fn cast_from_int(i: Self::UInt) -> Self {
i as $ty
}
}
#[cfg(test)]
impl FloatSIMDScalarUtils for $ty {
type Scalar = $ty;
#[inline]
fn replace(self, index: usize, new_value: Self::Scalar) -> Self {
debug_assert_eq!(index, 0);
new_value
}
#[inline]
fn extract_lane(self, index: usize) -> Self::Scalar {
debug_assert_eq!(index, 0);
self
}
}
impl FloatAsSIMD for $ty {}
};
}
scalar_float_impl!(f32, u32);
scalar_float_impl!(f64, u64);
#[cfg(feature = "simd_support")]
macro_rules! simd_impl {
($fty:ident, $uty:ident) => {
impl<const LANES: usize> FloatSIMDUtils for Simd<$fty, LANES>
where
LaneCount<LANES>: SupportedLaneCount,
{
type Mask = Mask<<$fty as SimdElement>::Mask, LANES>;
type UInt = Simd<$uty, LANES>;
#[inline(always)]
fn all_lt(self, other: Self) -> bool {
self.simd_lt(other).all()
}
#[inline(always)]
fn all_le(self, other: Self) -> bool {
self.simd_le(other).all()
}
#[inline(always)]
fn all_finite(self) -> bool {
self.is_finite().all()
}
#[inline(always)]
fn gt_mask(self, other: Self) -> Self::Mask {
self.simd_gt(other)
}
#[inline(always)]
fn decrease_masked(self, mask: Self::Mask) -> Self {
// Casting a mask into ints will produce all bits set for
// true, and 0 for false. Adding that to the binary
// representation of a float means subtracting one from
// the binary representation, resulting in the next lower
// value representable by $fty. This works even when the
// current value is infinity.
debug_assert!(mask.any(), "At least one lane must be set");
Self::from_bits(self.to_bits() + mask.to_int().cast())
}
#[inline]
fn cast_from_int(i: Self::UInt) -> Self {
i.cast()
}
}
#[cfg(test)]
impl<const LANES: usize> FloatSIMDScalarUtils for Simd<$fty, LANES>
where
LaneCount<LANES>: SupportedLaneCount,
{
type Scalar = $fty;
#[inline]
fn replace(mut self, index: usize, new_value: Self::Scalar) -> Self {
self.as_mut_array()[index] = new_value;
self
}
#[inline]
fn extract_lane(self, index: usize) -> Self::Scalar {
self.as_array()[index]
}
}
};
}
#[cfg(feature = "simd_support")]
simd_impl!(f32, u32);
#[cfg(feature = "simd_support")]
simd_impl!(f64, u64);

115
vendor/rand/src/distr/weighted/mod.rs vendored Normal file
View File

@@ -0,0 +1,115 @@
// Copyright 2018 Developers of the Rand project.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Weighted (index) sampling
//!
//! Primarily, this module houses the [`WeightedIndex`] distribution.
//! See also [`rand_distr::weighted`] for alternative implementations supporting
//! potentially-faster sampling or a more easily modifiable tree structure.
//!
//! [`rand_distr::weighted`]: https://docs.rs/rand_distr/latest/rand_distr/weighted/index.html
use core::fmt;
mod weighted_index;
pub use weighted_index::WeightedIndex;
/// Bounds on a weight
///
/// See usage in [`WeightedIndex`].
pub trait Weight: Clone {
/// Representation of 0
const ZERO: Self;
/// Checked addition
///
/// - `Result::Ok`: On success, `v` is added to `self`
/// - `Result::Err`: Returns an error when `Self` cannot represent the
/// result of `self + v` (i.e. overflow). The value of `self` should be
/// discarded.
#[allow(clippy::result_unit_err)]
fn checked_add_assign(&mut self, v: &Self) -> Result<(), ()>;
}
macro_rules! impl_weight_int {
($t:ty) => {
impl Weight for $t {
const ZERO: Self = 0;
fn checked_add_assign(&mut self, v: &Self) -> Result<(), ()> {
match self.checked_add(*v) {
Some(sum) => {
*self = sum;
Ok(())
}
None => Err(()),
}
}
}
};
($t:ty, $($tt:ty),*) => {
impl_weight_int!($t);
impl_weight_int!($($tt),*);
}
}
impl_weight_int!(i8, i16, i32, i64, i128, isize);
impl_weight_int!(u8, u16, u32, u64, u128, usize);
macro_rules! impl_weight_float {
($t:ty) => {
impl Weight for $t {
const ZERO: Self = 0.0;
fn checked_add_assign(&mut self, v: &Self) -> Result<(), ()> {
// Floats have an explicit representation for overflow
*self += *v;
Ok(())
}
}
};
}
impl_weight_float!(f32);
impl_weight_float!(f64);
/// Invalid weight errors
///
/// This type represents errors from [`WeightedIndex::new`],
/// [`WeightedIndex::update_weights`] and other weighted distributions.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
// Marked non_exhaustive to allow a new error code in the solution to #1476.
#[non_exhaustive]
pub enum Error {
/// The input weight sequence is empty, too long, or wrongly ordered
InvalidInput,
/// A weight is negative, too large for the distribution, or not a valid number
InvalidWeight,
/// Not enough non-zero weights are available to sample values
///
/// When attempting to sample a single value this implies that all weights
/// are zero. When attempting to sample `amount` values this implies that
/// less than `amount` weights are greater than zero.
InsufficientNonZero,
/// Overflow when calculating the sum of weights
Overflow,
}
#[cfg(feature = "std")]
impl std::error::Error for Error {}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str(match *self {
Error::InvalidInput => "Weights sequence is empty/too long/unordered",
Error::InvalidWeight => "A weight is negative, too large or not a valid number",
Error::InsufficientNonZero => "Not enough weights > zero",
Error::Overflow => "Overflow when summing weights",
})
}
}

View File

@@ -0,0 +1,631 @@
// Copyright 2018 Developers of the Rand project.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use super::{Error, Weight};
use crate::distr::uniform::{SampleBorrow, SampleUniform, UniformSampler};
use crate::distr::Distribution;
use crate::Rng;
// Note that this whole module is only imported if feature="alloc" is enabled.
use alloc::vec::Vec;
use core::fmt::{self, Debug};
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
/// A distribution using weighted sampling of discrete items.
///
/// Sampling a `WeightedIndex` distribution returns the index of a randomly
/// selected element from the iterator used when the `WeightedIndex` was
/// created. The chance of a given element being picked is proportional to the
/// weight of the element. The weights can use any type `X` for which an
/// implementation of [`Uniform<X>`] exists. The implementation guarantees that
/// elements with zero weight are never picked, even when the weights are
/// floating point numbers.
///
/// # Performance
///
/// Time complexity of sampling from `WeightedIndex` is `O(log N)` where
/// `N` is the number of weights.
/// See also [`rand_distr::weighted`] for alternative implementations supporting
/// potentially-faster sampling or a more easily modifiable tree structure.
///
/// A `WeightedIndex<X>` contains a `Vec<X>` and a [`Uniform<X>`] and so its
/// size is the sum of the size of those objects, possibly plus some alignment.
///
/// Creating a `WeightedIndex<X>` will allocate enough space to hold `N - 1`
/// weights of type `X`, where `N` is the number of weights. However, since
/// `Vec` doesn't guarantee a particular growth strategy, additional memory
/// might be allocated but not used. Since the `WeightedIndex` object also
/// contains an instance of `X::Sampler`, this might cause additional allocations,
/// though for primitive types, [`Uniform<X>`] doesn't allocate any memory.
///
/// Sampling from `WeightedIndex` will result in a single call to
/// `Uniform<X>::sample` (method of the [`Distribution`] trait), which typically
/// will request a single value from the underlying [`RngCore`], though the
/// exact number depends on the implementation of `Uniform<X>::sample`.
///
/// # Example
///
/// ```
/// use rand::prelude::*;
/// use rand::distr::weighted::WeightedIndex;
///
/// let choices = ['a', 'b', 'c'];
/// let weights = [2, 1, 1];
/// let dist = WeightedIndex::new(&weights).unwrap();
/// let mut rng = rand::rng();
/// for _ in 0..100 {
/// // 50% chance to print 'a', 25% chance to print 'b', 25% chance to print 'c'
/// println!("{}", choices[dist.sample(&mut rng)]);
/// }
///
/// let items = [('a', 0.0), ('b', 3.0), ('c', 7.0)];
/// let dist2 = WeightedIndex::new(items.iter().map(|item| item.1)).unwrap();
/// for _ in 0..100 {
/// // 0% chance to print 'a', 30% chance to print 'b', 70% chance to print 'c'
/// println!("{}", items[dist2.sample(&mut rng)].0);
/// }
/// ```
///
/// [`Uniform<X>`]: crate::distr::Uniform
/// [`RngCore`]: crate::RngCore
/// [`rand_distr::weighted`]: https://docs.rs/rand_distr/latest/rand_distr/weighted/index.html
#[derive(Debug, Clone, PartialEq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct WeightedIndex<X: SampleUniform + PartialOrd> {
cumulative_weights: Vec<X>,
total_weight: X,
weight_distribution: X::Sampler,
}
impl<X: SampleUniform + PartialOrd> WeightedIndex<X> {
/// Creates a new a `WeightedIndex` [`Distribution`] using the values
/// in `weights`. The weights can use any type `X` for which an
/// implementation of [`Uniform<X>`] exists.
///
/// Error cases:
/// - [`Error::InvalidInput`] when the iterator `weights` is empty.
/// - [`Error::InvalidWeight`] when a weight is not-a-number or negative.
/// - [`Error::InsufficientNonZero`] when the sum of all weights is zero.
/// - [`Error::Overflow`] when the sum of all weights overflows.
///
/// [`Uniform<X>`]: crate::distr::uniform::Uniform
pub fn new<I>(weights: I) -> Result<WeightedIndex<X>, Error>
where
I: IntoIterator,
I::Item: SampleBorrow<X>,
X: Weight,
{
let mut iter = weights.into_iter();
let mut total_weight: X = iter.next().ok_or(Error::InvalidInput)?.borrow().clone();
let zero = X::ZERO;
if !(total_weight >= zero) {
return Err(Error::InvalidWeight);
}
let mut weights = Vec::<X>::with_capacity(iter.size_hint().0);
for w in iter {
// Note that `!(w >= x)` is not equivalent to `w < x` for partially
// ordered types due to NaNs which are equal to nothing.
if !(w.borrow() >= &zero) {
return Err(Error::InvalidWeight);
}
weights.push(total_weight.clone());
if let Err(()) = total_weight.checked_add_assign(w.borrow()) {
return Err(Error::Overflow);
}
}
if total_weight == zero {
return Err(Error::InsufficientNonZero);
}
let distr = X::Sampler::new(zero, total_weight.clone()).unwrap();
Ok(WeightedIndex {
cumulative_weights: weights,
total_weight,
weight_distribution: distr,
})
}
/// Update a subset of weights, without changing the number of weights.
///
/// `new_weights` must be sorted by the index.
///
/// Using this method instead of `new` might be more efficient if only a small number of
/// weights is modified. No allocations are performed, unless the weight type `X` uses
/// allocation internally.
///
/// In case of error, `self` is not modified. Error cases:
/// - [`Error::InvalidInput`] when `new_weights` are not ordered by
/// index or an index is too large.
/// - [`Error::InvalidWeight`] when a weight is not-a-number or negative.
/// - [`Error::InsufficientNonZero`] when the sum of all weights is zero.
/// Note that due to floating-point loss of precision, this case is not
/// always correctly detected; usage of a fixed-point weight type may be
/// preferred.
///
/// Updates take `O(N)` time. If you need to frequently update weights, consider
/// [`rand_distr::weighted_tree`](https://docs.rs/rand_distr/*/rand_distr/weighted_tree/index.html)
/// as an alternative where an update is `O(log N)`.
pub fn update_weights(&mut self, new_weights: &[(usize, &X)]) -> Result<(), Error>
where
X: for<'a> core::ops::AddAssign<&'a X>
+ for<'a> core::ops::SubAssign<&'a X>
+ Clone
+ Default,
{
if new_weights.is_empty() {
return Ok(());
}
let zero = <X as Default>::default();
let mut total_weight = self.total_weight.clone();
// Check for errors first, so we don't modify `self` in case something
// goes wrong.
let mut prev_i = None;
for &(i, w) in new_weights {
if let Some(old_i) = prev_i {
if old_i >= i {
return Err(Error::InvalidInput);
}
}
if !(*w >= zero) {
return Err(Error::InvalidWeight);
}
if i > self.cumulative_weights.len() {
return Err(Error::InvalidInput);
}
let mut old_w = if i < self.cumulative_weights.len() {
self.cumulative_weights[i].clone()
} else {
self.total_weight.clone()
};
if i > 0 {
old_w -= &self.cumulative_weights[i - 1];
}
total_weight -= &old_w;
total_weight += w;
prev_i = Some(i);
}
if total_weight <= zero {
return Err(Error::InsufficientNonZero);
}
// Update the weights. Because we checked all the preconditions in the
// previous loop, this should never panic.
let mut iter = new_weights.iter();
let mut prev_weight = zero.clone();
let mut next_new_weight = iter.next();
let &(first_new_index, _) = next_new_weight.unwrap();
let mut cumulative_weight = if first_new_index > 0 {
self.cumulative_weights[first_new_index - 1].clone()
} else {
zero.clone()
};
for i in first_new_index..self.cumulative_weights.len() {
match next_new_weight {
Some(&(j, w)) if i == j => {
cumulative_weight += w;
next_new_weight = iter.next();
}
_ => {
let mut tmp = self.cumulative_weights[i].clone();
tmp -= &prev_weight; // We know this is positive.
cumulative_weight += &tmp;
}
}
prev_weight = cumulative_weight.clone();
core::mem::swap(&mut prev_weight, &mut self.cumulative_weights[i]);
}
self.total_weight = total_weight;
self.weight_distribution = X::Sampler::new(zero, self.total_weight.clone()).unwrap();
Ok(())
}
}
/// A lazy-loading iterator over the weights of a `WeightedIndex` distribution.
/// This is returned by [`WeightedIndex::weights`].
pub struct WeightedIndexIter<'a, X: SampleUniform + PartialOrd> {
weighted_index: &'a WeightedIndex<X>,
index: usize,
}
impl<X> Debug for WeightedIndexIter<'_, X>
where
X: SampleUniform + PartialOrd + Debug,
X::Sampler: Debug,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("WeightedIndexIter")
.field("weighted_index", &self.weighted_index)
.field("index", &self.index)
.finish()
}
}
impl<X> Clone for WeightedIndexIter<'_, X>
where
X: SampleUniform + PartialOrd,
{
fn clone(&self) -> Self {
WeightedIndexIter {
weighted_index: self.weighted_index,
index: self.index,
}
}
}
impl<X> Iterator for WeightedIndexIter<'_, X>
where
X: for<'b> core::ops::SubAssign<&'b X> + SampleUniform + PartialOrd + Clone,
{
type Item = X;
fn next(&mut self) -> Option<Self::Item> {
match self.weighted_index.weight(self.index) {
None => None,
Some(weight) => {
self.index += 1;
Some(weight)
}
}
}
}
impl<X: SampleUniform + PartialOrd + Clone> WeightedIndex<X> {
/// Returns the weight at the given index, if it exists.
///
/// If the index is out of bounds, this will return `None`.
///
/// # Example
///
/// ```
/// use rand::distr::weighted::WeightedIndex;
///
/// let weights = [0, 1, 2];
/// let dist = WeightedIndex::new(&weights).unwrap();
/// assert_eq!(dist.weight(0), Some(0));
/// assert_eq!(dist.weight(1), Some(1));
/// assert_eq!(dist.weight(2), Some(2));
/// assert_eq!(dist.weight(3), None);
/// ```
pub fn weight(&self, index: usize) -> Option<X>
where
X: for<'a> core::ops::SubAssign<&'a X>,
{
use core::cmp::Ordering::*;
let mut weight = match index.cmp(&self.cumulative_weights.len()) {
Less => self.cumulative_weights[index].clone(),
Equal => self.total_weight.clone(),
Greater => return None,
};
if index > 0 {
weight -= &self.cumulative_weights[index - 1];
}
Some(weight)
}
/// Returns a lazy-loading iterator containing the current weights of this distribution.
///
/// If this distribution has not been updated since its creation, this will return the
/// same weights as were passed to `new`.
///
/// # Example
///
/// ```
/// use rand::distr::weighted::WeightedIndex;
///
/// let weights = [1, 2, 3];
/// let mut dist = WeightedIndex::new(&weights).unwrap();
/// assert_eq!(dist.weights().collect::<Vec<_>>(), vec![1, 2, 3]);
/// dist.update_weights(&[(0, &2)]).unwrap();
/// assert_eq!(dist.weights().collect::<Vec<_>>(), vec![2, 2, 3]);
/// ```
pub fn weights(&self) -> WeightedIndexIter<'_, X>
where
X: for<'a> core::ops::SubAssign<&'a X>,
{
WeightedIndexIter {
weighted_index: self,
index: 0,
}
}
/// Returns the sum of all weights in this distribution.
pub fn total_weight(&self) -> X {
self.total_weight.clone()
}
}
impl<X> Distribution<usize> for WeightedIndex<X>
where
X: SampleUniform + PartialOrd,
{
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> usize {
let chosen_weight = self.weight_distribution.sample(rng);
// Find the first item which has a weight *higher* than the chosen weight.
self.cumulative_weights
.partition_point(|w| w <= &chosen_weight)
}
}
#[cfg(test)]
mod test {
use super::*;
#[cfg(feature = "serde")]
#[test]
fn test_weightedindex_serde() {
let weighted_index = WeightedIndex::new([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]).unwrap();
let ser_weighted_index = bincode::serialize(&weighted_index).unwrap();
let de_weighted_index: WeightedIndex<i32> =
bincode::deserialize(&ser_weighted_index).unwrap();
assert_eq!(
de_weighted_index.cumulative_weights,
weighted_index.cumulative_weights
);
assert_eq!(de_weighted_index.total_weight, weighted_index.total_weight);
}
#[test]
fn test_accepting_nan() {
assert_eq!(
WeightedIndex::new([f32::NAN, 0.5]).unwrap_err(),
Error::InvalidWeight,
);
assert_eq!(
WeightedIndex::new([f32::NAN]).unwrap_err(),
Error::InvalidWeight,
);
assert_eq!(
WeightedIndex::new([0.5, f32::NAN]).unwrap_err(),
Error::InvalidWeight,
);
assert_eq!(
WeightedIndex::new([0.5, 7.0])
.unwrap()
.update_weights(&[(0, &f32::NAN)])
.unwrap_err(),
Error::InvalidWeight,
)
}
#[test]
#[cfg_attr(miri, ignore)] // Miri is too slow
fn test_weightedindex() {
let mut r = crate::test::rng(700);
const N_REPS: u32 = 5000;
let weights = [1u32, 2, 3, 0, 5, 6, 7, 1, 2, 3, 4, 5, 6, 7];
let total_weight = weights.iter().sum::<u32>() as f32;
let verify = |result: [i32; 14]| {
for (i, count) in result.iter().enumerate() {
let exp = (weights[i] * N_REPS) as f32 / total_weight;
let mut err = (*count as f32 - exp).abs();
if err != 0.0 {
err /= exp;
}
assert!(err <= 0.25);
}
};
// WeightedIndex from vec
let mut chosen = [0i32; 14];
let distr = WeightedIndex::new(weights.to_vec()).unwrap();
for _ in 0..N_REPS {
chosen[distr.sample(&mut r)] += 1;
}
verify(chosen);
// WeightedIndex from slice
chosen = [0i32; 14];
let distr = WeightedIndex::new(&weights[..]).unwrap();
for _ in 0..N_REPS {
chosen[distr.sample(&mut r)] += 1;
}
verify(chosen);
// WeightedIndex from iterator
chosen = [0i32; 14];
let distr = WeightedIndex::new(weights.iter()).unwrap();
for _ in 0..N_REPS {
chosen[distr.sample(&mut r)] += 1;
}
verify(chosen);
for _ in 0..5 {
assert_eq!(WeightedIndex::new([0, 1]).unwrap().sample(&mut r), 1);
assert_eq!(WeightedIndex::new([1, 0]).unwrap().sample(&mut r), 0);
assert_eq!(
WeightedIndex::new([0, 0, 0, 0, 10, 0])
.unwrap()
.sample(&mut r),
4
);
}
assert_eq!(
WeightedIndex::new(&[10][0..0]).unwrap_err(),
Error::InvalidInput
);
assert_eq!(
WeightedIndex::new([0]).unwrap_err(),
Error::InsufficientNonZero
);
assert_eq!(
WeightedIndex::new([10, 20, -1, 30]).unwrap_err(),
Error::InvalidWeight
);
assert_eq!(
WeightedIndex::new([-10, 20, 1, 30]).unwrap_err(),
Error::InvalidWeight
);
assert_eq!(WeightedIndex::new([-10]).unwrap_err(), Error::InvalidWeight);
}
#[test]
fn test_update_weights() {
let data = [
(
&[10u32, 2, 3, 4][..],
&[(1, &100), (2, &4)][..], // positive change
&[10, 100, 4, 4][..],
),
(
&[1u32, 2, 3, 0, 5, 6, 7, 1, 2, 3, 4, 5, 6, 7][..],
&[(2, &1), (5, &1), (13, &100)][..], // negative change and last element
&[1u32, 2, 1, 0, 5, 1, 7, 1, 2, 3, 4, 5, 6, 100][..],
),
];
for (weights, update, expected_weights) in data.iter() {
let total_weight = weights.iter().sum::<u32>();
let mut distr = WeightedIndex::new(weights.to_vec()).unwrap();
assert_eq!(distr.total_weight, total_weight);
distr.update_weights(update).unwrap();
let expected_total_weight = expected_weights.iter().sum::<u32>();
let expected_distr = WeightedIndex::new(expected_weights.to_vec()).unwrap();
assert_eq!(distr.total_weight, expected_total_weight);
assert_eq!(distr.total_weight, expected_distr.total_weight);
assert_eq!(distr.cumulative_weights, expected_distr.cumulative_weights);
}
}
#[test]
fn test_update_weights_errors() {
let data = [
(
&[1i32, 0, 0][..],
&[(0, &0)][..],
Error::InsufficientNonZero,
),
(
&[10, 10, 10, 10][..],
&[(1, &-11)][..],
Error::InvalidWeight, // A weight is negative
),
(
&[1, 2, 3, 4, 5][..],
&[(1, &5), (0, &5)][..], // Wrong order
Error::InvalidInput,
),
(
&[1][..],
&[(1, &1)][..], // Index too large
Error::InvalidInput,
),
];
for (weights, update, err) in data.iter() {
let total_weight = weights.iter().sum::<i32>();
let mut distr = WeightedIndex::new(weights.to_vec()).unwrap();
assert_eq!(distr.total_weight, total_weight);
match distr.update_weights(update) {
Ok(_) => panic!("Expected update_weights to fail, but it succeeded"),
Err(e) => assert_eq!(e, *err),
}
}
}
#[test]
fn test_weight_at() {
let data = [
&[1][..],
&[10, 2, 3, 4][..],
&[1, 2, 3, 0, 5, 6, 7, 1, 2, 3, 4, 5, 6, 7][..],
&[u32::MAX][..],
];
for weights in data.iter() {
let distr = WeightedIndex::new(weights.to_vec()).unwrap();
for (i, weight) in weights.iter().enumerate() {
assert_eq!(distr.weight(i), Some(*weight));
}
assert_eq!(distr.weight(weights.len()), None);
}
}
#[test]
fn test_weights() {
let data = [
&[1][..],
&[10, 2, 3, 4][..],
&[1, 2, 3, 0, 5, 6, 7, 1, 2, 3, 4, 5, 6, 7][..],
&[u32::MAX][..],
];
for weights in data.iter() {
let distr = WeightedIndex::new(weights.to_vec()).unwrap();
assert_eq!(distr.weights().collect::<Vec<_>>(), weights.to_vec());
}
}
#[test]
fn value_stability() {
fn test_samples<X: Weight + SampleUniform + PartialOrd, I>(
weights: I,
buf: &mut [usize],
expected: &[usize],
) where
I: IntoIterator,
I::Item: SampleBorrow<X>,
{
assert_eq!(buf.len(), expected.len());
let distr = WeightedIndex::new(weights).unwrap();
let mut rng = crate::test::rng(701);
for r in buf.iter_mut() {
*r = rng.sample(&distr);
}
assert_eq!(buf, expected);
}
let mut buf = [0; 10];
test_samples(
[1i32, 1, 1, 1, 1, 1, 1, 1, 1],
&mut buf,
&[0, 6, 2, 6, 3, 4, 7, 8, 2, 5],
);
test_samples(
[0.7f32, 0.1, 0.1, 0.1],
&mut buf,
&[0, 0, 0, 1, 0, 0, 2, 3, 0, 0],
);
test_samples(
[1.0f64, 0.999, 0.998, 0.997],
&mut buf,
&[2, 2, 1, 3, 2, 1, 3, 3, 2, 1],
);
}
#[test]
fn weighted_index_distributions_can_be_compared() {
assert_eq!(WeightedIndex::new([1, 2]), WeightedIndex::new([1, 2]));
}
#[test]
fn overflow() {
assert_eq!(WeightedIndex::new([2, usize::MAX]), Err(Error::Overflow));
}
}

360
vendor/rand/src/lib.rs vendored Normal file
View File

@@ -0,0 +1,360 @@
// Copyright 2018 Developers of the Rand project.
// Copyright 2013-2017 The Rust Project Developers.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Utilities for random number generation
//!
//! Rand provides utilities to generate random numbers, to convert them to
//! useful types and distributions, and some randomness-related algorithms.
//!
//! # Quick Start
//!
//! ```
//! // The prelude import enables methods we use below, specifically
//! // Rng::random, Rng::sample, SliceRandom::shuffle and IndexedRandom::choose.
//! use rand::prelude::*;
//!
//! // Get an RNG:
//! let mut rng = rand::rng();
//!
//! // Try printing a random unicode code point (probably a bad idea)!
//! println!("char: '{}'", rng.random::<char>());
//! // Try printing a random alphanumeric value instead!
//! println!("alpha: '{}'", rng.sample(rand::distr::Alphanumeric) as char);
//!
//! // Generate and shuffle a sequence:
//! let mut nums: Vec<i32> = (1..100).collect();
//! nums.shuffle(&mut rng);
//! // And take a random pick (yes, we didn't need to shuffle first!):
//! let _ = nums.choose(&mut rng);
//! ```
//!
//! # The Book
//!
//! For the user guide and further documentation, please read
//! [The Rust Rand Book](https://rust-random.github.io/book).
#![doc(
html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk.png",
html_favicon_url = "https://www.rust-lang.org/favicon.ico",
html_root_url = "https://rust-random.github.io/rand/"
)]
#![deny(missing_docs)]
#![deny(missing_debug_implementations)]
#![doc(test(attr(allow(unused_variables), deny(warnings))))]
#![no_std]
#![cfg_attr(feature = "simd_support", feature(portable_simd))]
#![cfg_attr(
all(feature = "simd_support", target_feature = "avx512bw"),
feature(stdarch_x86_avx512)
)]
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
#![allow(
clippy::float_cmp,
clippy::neg_cmp_op_on_partial_ord,
clippy::nonminimal_bool
)]
#![deny(clippy::undocumented_unsafe_blocks)]
#[cfg(feature = "alloc")]
extern crate alloc;
#[cfg(feature = "std")]
extern crate std;
#[allow(unused)]
macro_rules! trace { ($($x:tt)*) => (
#[cfg(feature = "log")] {
log::trace!($($x)*)
}
) }
#[allow(unused)]
macro_rules! debug { ($($x:tt)*) => (
#[cfg(feature = "log")] {
log::debug!($($x)*)
}
) }
#[allow(unused)]
macro_rules! info { ($($x:tt)*) => (
#[cfg(feature = "log")] {
log::info!($($x)*)
}
) }
#[allow(unused)]
macro_rules! warn { ($($x:tt)*) => (
#[cfg(feature = "log")] {
log::warn!($($x)*)
}
) }
#[allow(unused)]
macro_rules! error { ($($x:tt)*) => (
#[cfg(feature = "log")] {
log::error!($($x)*)
}
) }
// Re-export rand_core itself
pub use rand_core;
// Re-exports from rand_core
pub use rand_core::{CryptoRng, RngCore, SeedableRng, TryCryptoRng, TryRngCore};
// Public modules
pub mod distr;
pub mod prelude;
mod rng;
pub mod rngs;
pub mod seq;
// Public exports
#[cfg(feature = "thread_rng")]
pub use crate::rngs::thread::rng;
/// Access the thread-local generator
///
/// Use [`rand::rng()`](rng()) instead.
#[cfg(feature = "thread_rng")]
#[deprecated(since = "0.9.0", note = "Renamed to `rng`")]
#[inline]
pub fn thread_rng() -> crate::rngs::ThreadRng {
rng()
}
pub use rng::{Fill, Rng};
#[cfg(feature = "thread_rng")]
use crate::distr::{Distribution, StandardUniform};
/// Generate a random value using the thread-local random number generator.
///
/// This function is shorthand for <code>[rng()].[random()](Rng::random)</code>:
///
/// - See [`ThreadRng`] for documentation of the generator and security
/// - See [`StandardUniform`] for documentation of supported types and distributions
///
/// # Examples
///
/// ```
/// let x = rand::random::<u8>();
/// println!("{}", x);
///
/// let y = rand::random::<f64>();
/// println!("{}", y);
///
/// if rand::random() { // generates a boolean
/// println!("Better lucky than good!");
/// }
/// ```
///
/// If you're calling `random()` repeatedly, consider using a local `rng`
/// handle to save an initialization-check on each usage:
///
/// ```
/// use rand::Rng; // provides the `random` method
///
/// let mut rng = rand::rng(); // a local handle to the generator
///
/// let mut v = vec![1, 2, 3];
///
/// for x in v.iter_mut() {
/// *x = rng.random();
/// }
/// ```
///
/// [`StandardUniform`]: distr::StandardUniform
/// [`ThreadRng`]: rngs::ThreadRng
#[cfg(feature = "thread_rng")]
#[inline]
pub fn random<T>() -> T
where
StandardUniform: Distribution<T>,
{
rng().random()
}
/// Return an iterator over [`random()`] variates
///
/// This function is shorthand for
/// <code>[rng()].[random_iter](Rng::random_iter)()</code>.
///
/// # Example
///
/// ```
/// let v: Vec<i32> = rand::random_iter().take(5).collect();
/// println!("{v:?}");
/// ```
#[cfg(feature = "thread_rng")]
#[inline]
pub fn random_iter<T>() -> distr::Iter<StandardUniform, rngs::ThreadRng, T>
where
StandardUniform: Distribution<T>,
{
rng().random_iter()
}
/// Generate a random value in the given range using the thread-local random number generator.
///
/// This function is shorthand for
/// <code>[rng()].[random_range](Rng::random_range)(<var>range</var>)</code>.
///
/// # Example
///
/// ```
/// let y: f32 = rand::random_range(0.0..=1e9);
/// println!("{}", y);
///
/// let words: Vec<&str> = "Mary had a little lamb".split(' ').collect();
/// println!("{}", words[rand::random_range(..words.len())]);
/// ```
/// Note that the first example can also be achieved (without `collect`'ing
/// to a `Vec`) using [`seq::IteratorRandom::choose`].
#[cfg(feature = "thread_rng")]
#[inline]
pub fn random_range<T, R>(range: R) -> T
where
T: distr::uniform::SampleUniform,
R: distr::uniform::SampleRange<T>,
{
rng().random_range(range)
}
/// Return a bool with a probability `p` of being true.
///
/// This function is shorthand for
/// <code>[rng()].[random_bool](Rng::random_bool)(<var>p</var>)</code>.
///
/// # Example
///
/// ```
/// println!("{}", rand::random_bool(1.0 / 3.0));
/// ```
///
/// # Panics
///
/// If `p < 0` or `p > 1`.
#[cfg(feature = "thread_rng")]
#[inline]
#[track_caller]
pub fn random_bool(p: f64) -> bool {
rng().random_bool(p)
}
/// Return a bool with a probability of `numerator/denominator` of being
/// true.
///
/// That is, `random_ratio(2, 3)` has chance of 2 in 3, or about 67%, of
/// returning true. If `numerator == denominator`, then the returned value
/// is guaranteed to be `true`. If `numerator == 0`, then the returned
/// value is guaranteed to be `false`.
///
/// See also the [`Bernoulli`] distribution, which may be faster if
/// sampling from the same `numerator` and `denominator` repeatedly.
///
/// This function is shorthand for
/// <code>[rng()].[random_ratio](Rng::random_ratio)(<var>numerator</var>, <var>denominator</var>)</code>.
///
/// # Panics
///
/// If `denominator == 0` or `numerator > denominator`.
///
/// # Example
///
/// ```
/// println!("{}", rand::random_ratio(2, 3));
/// ```
///
/// [`Bernoulli`]: distr::Bernoulli
#[cfg(feature = "thread_rng")]
#[inline]
#[track_caller]
pub fn random_ratio(numerator: u32, denominator: u32) -> bool {
rng().random_ratio(numerator, denominator)
}
/// Fill any type implementing [`Fill`] with random data
///
/// This function is shorthand for
/// <code>[rng()].[fill](Rng::fill)(<var>dest</var>)</code>.
///
/// # Example
///
/// ```
/// let mut arr = [0i8; 20];
/// rand::fill(&mut arr[..]);
/// ```
///
/// Note that you can instead use [`random()`] to generate an array of random
/// data, though this is slower for small elements (smaller than the RNG word
/// size).
#[cfg(feature = "thread_rng")]
#[inline]
#[track_caller]
pub fn fill<T: Fill + ?Sized>(dest: &mut T) {
dest.fill(&mut rng())
}
#[cfg(test)]
mod test {
use super::*;
/// Construct a deterministic RNG with the given seed
pub fn rng(seed: u64) -> impl RngCore {
// For tests, we want a statistically good, fast, reproducible RNG.
// PCG32 will do fine, and will be easy to embed if we ever need to.
const INC: u64 = 11634580027462260723;
rand_pcg::Pcg32::new(seed, INC)
}
/// Construct a generator yielding a constant value
pub fn const_rng(x: u64) -> StepRng {
StepRng(x, 0)
}
/// Construct a generator yielding an arithmetic sequence
pub fn step_rng(x: u64, increment: u64) -> StepRng {
StepRng(x, increment)
}
#[derive(Clone)]
pub struct StepRng(u64, u64);
impl RngCore for StepRng {
fn next_u32(&mut self) -> u32 {
self.next_u64() as u32
}
fn next_u64(&mut self) -> u64 {
let res = self.0;
self.0 = self.0.wrapping_add(self.1);
res
}
fn fill_bytes(&mut self, dst: &mut [u8]) {
rand_core::impls::fill_bytes_via_next(self, dst)
}
}
#[test]
#[cfg(feature = "thread_rng")]
fn test_random() {
let _n: u64 = random();
let _f: f32 = random();
#[allow(clippy::type_complexity)]
let _many: (
(),
[(u32, bool); 3],
(u8, i8, u16, i16, u32, i32, u64, i64),
(f32, (f64, (f64,))),
) = random();
}
#[test]
#[cfg(feature = "thread_rng")]
fn test_range() {
let _n: usize = random_range(42..=43);
let _f: f32 = random_range(42.0..43.0);
}
}

35
vendor/rand/src/prelude.rs vendored Normal file
View File

@@ -0,0 +1,35 @@
// Copyright 2018 Developers of the Rand project.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Convenience re-export of common members
//!
//! Like the standard library's prelude, this module simplifies importing of
//! common items. Unlike the standard prelude, the contents of this module must
//! be imported manually:
//!
//! ```
//! use rand::prelude::*;
//! # let mut r = StdRng::from_rng(&mut rand::rng());
//! # let _: f32 = r.random();
//! ```
#[doc(no_inline)]
pub use crate::distr::Distribution;
#[cfg(feature = "small_rng")]
#[doc(no_inline)]
pub use crate::rngs::SmallRng;
#[cfg(feature = "std_rng")]
#[doc(no_inline)]
pub use crate::rngs::StdRng;
#[doc(no_inline)]
#[cfg(feature = "thread_rng")]
pub use crate::rngs::ThreadRng;
#[doc(no_inline)]
pub use crate::seq::{IndexedMutRandom, IndexedRandom, IteratorRandom, SliceRandom};
#[doc(no_inline)]
pub use crate::{CryptoRng, Rng, RngCore, SeedableRng};

657
vendor/rand/src/rng.rs vendored Normal file
View File

@@ -0,0 +1,657 @@
// Copyright 2018 Developers of the Rand project.
// Copyright 2013-2017 The Rust Project Developers.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! [`Rng`] trait
use crate::distr::uniform::{SampleRange, SampleUniform};
use crate::distr::{self, Distribution, StandardUniform};
use core::num::Wrapping;
use core::{mem, slice};
use rand_core::RngCore;
/// User-level interface for RNGs
///
/// [`RngCore`] is the `dyn`-safe implementation-level interface for Random
/// (Number) Generators. This trait, `Rng`, provides a user-level interface on
/// RNGs. It is implemented automatically for any `R: RngCore`.
///
/// This trait must usually be brought into scope via `use rand::Rng;` or
/// `use rand::prelude::*;`.
///
/// # Generic usage
///
/// The basic pattern is `fn foo<R: Rng + ?Sized>(rng: &mut R)`. Some
/// things are worth noting here:
///
/// - Since `Rng: RngCore` and every `RngCore` implements `Rng`, it makes no
/// difference whether we use `R: Rng` or `R: RngCore`.
/// - The `+ ?Sized` un-bounding allows functions to be called directly on
/// type-erased references; i.e. `foo(r)` where `r: &mut dyn RngCore`. Without
/// this it would be necessary to write `foo(&mut r)`.
///
/// An alternative pattern is possible: `fn foo<R: Rng>(rng: R)`. This has some
/// trade-offs. It allows the argument to be consumed directly without a `&mut`
/// (which is how `from_rng(rand::rng())` works); also it still works directly
/// on references (including type-erased references). Unfortunately within the
/// function `foo` it is not known whether `rng` is a reference type or not,
/// hence many uses of `rng` require an extra reference, either explicitly
/// (`distr.sample(&mut rng)`) or implicitly (`rng.random()`); one may hope the
/// optimiser can remove redundant references later.
///
/// Example:
///
/// ```
/// use rand::Rng;
///
/// fn foo<R: Rng + ?Sized>(rng: &mut R) -> f32 {
/// rng.random()
/// }
///
/// # let v = foo(&mut rand::rng());
/// ```
pub trait Rng: RngCore {
/// Return a random value via the [`StandardUniform`] distribution.
///
/// # Example
///
/// ```
/// use rand::Rng;
///
/// let mut rng = rand::rng();
/// let x: u32 = rng.random();
/// println!("{}", x);
/// println!("{:?}", rng.random::<(f64, bool)>());
/// ```
///
/// # Arrays and tuples
///
/// The `rng.random()` method is able to generate arrays
/// and tuples (up to 12 elements), so long as all element types can be
/// generated.
///
/// For arrays of integers, especially for those with small element types
/// (< 64 bit), it will likely be faster to instead use [`Rng::fill`],
/// though note that generated values will differ.
///
/// ```
/// use rand::Rng;
///
/// let mut rng = rand::rng();
/// let tuple: (u8, i32, char) = rng.random(); // arbitrary tuple support
///
/// let arr1: [f32; 32] = rng.random(); // array construction
/// let mut arr2 = [0u8; 128];
/// rng.fill(&mut arr2); // array fill
/// ```
///
/// [`StandardUniform`]: distr::StandardUniform
#[inline]
fn random<T>(&mut self) -> T
where
StandardUniform: Distribution<T>,
{
StandardUniform.sample(self)
}
/// Return an iterator over [`random`](Self::random) variates
///
/// This is a just a wrapper over [`Rng::sample_iter`] using
/// [`distr::StandardUniform`].
///
/// Note: this method consumes its argument. Use
/// `(&mut rng).random_iter()` to avoid consuming the RNG.
///
/// # Example
///
/// ```
/// use rand::{rngs::SmallRng, Rng, SeedableRng};
///
/// let rng = SmallRng::seed_from_u64(0);
/// let v: Vec<i32> = rng.random_iter().take(5).collect();
/// assert_eq!(v.len(), 5);
/// ```
#[inline]
fn random_iter<T>(self) -> distr::Iter<StandardUniform, Self, T>
where
Self: Sized,
StandardUniform: Distribution<T>,
{
StandardUniform.sample_iter(self)
}
/// Generate a random value in the given range.
///
/// This function is optimised for the case that only a single sample is
/// made from the given range. See also the [`Uniform`] distribution
/// type which may be faster if sampling from the same range repeatedly.
///
/// All types support `low..high_exclusive` and `low..=high` range syntax.
/// Unsigned integer types also support `..high_exclusive` and `..=high` syntax.
///
/// # Panics
///
/// Panics if the range is empty, or if `high - low` overflows for floats.
///
/// # Example
///
/// ```
/// use rand::Rng;
///
/// let mut rng = rand::rng();
///
/// // Exclusive range
/// let n: u32 = rng.random_range(..10);
/// println!("{}", n);
/// let m: f64 = rng.random_range(-40.0..1.3e5);
/// println!("{}", m);
///
/// // Inclusive range
/// let n: u32 = rng.random_range(..=10);
/// println!("{}", n);
/// ```
///
/// [`Uniform`]: distr::uniform::Uniform
#[track_caller]
fn random_range<T, R>(&mut self, range: R) -> T
where
T: SampleUniform,
R: SampleRange<T>,
{
assert!(!range.is_empty(), "cannot sample empty range");
range.sample_single(self).unwrap()
}
/// Return a bool with a probability `p` of being true.
///
/// See also the [`Bernoulli`] distribution, which may be faster if
/// sampling from the same probability repeatedly.
///
/// # Example
///
/// ```
/// use rand::Rng;
///
/// let mut rng = rand::rng();
/// println!("{}", rng.random_bool(1.0 / 3.0));
/// ```
///
/// # Panics
///
/// If `p < 0` or `p > 1`.
///
/// [`Bernoulli`]: distr::Bernoulli
#[inline]
#[track_caller]
fn random_bool(&mut self, p: f64) -> bool {
match distr::Bernoulli::new(p) {
Ok(d) => self.sample(d),
Err(_) => panic!("p={:?} is outside range [0.0, 1.0]", p),
}
}
/// Return a bool with a probability of `numerator/denominator` of being
/// true.
///
/// That is, `random_ratio(2, 3)` has chance of 2 in 3, or about 67%, of
/// returning true. If `numerator == denominator`, then the returned value
/// is guaranteed to be `true`. If `numerator == 0`, then the returned
/// value is guaranteed to be `false`.
///
/// See also the [`Bernoulli`] distribution, which may be faster if
/// sampling from the same `numerator` and `denominator` repeatedly.
///
/// # Panics
///
/// If `denominator == 0` or `numerator > denominator`.
///
/// # Example
///
/// ```
/// use rand::Rng;
///
/// let mut rng = rand::rng();
/// println!("{}", rng.random_ratio(2, 3));
/// ```
///
/// [`Bernoulli`]: distr::Bernoulli
#[inline]
#[track_caller]
fn random_ratio(&mut self, numerator: u32, denominator: u32) -> bool {
match distr::Bernoulli::from_ratio(numerator, denominator) {
Ok(d) => self.sample(d),
Err(_) => panic!(
"p={}/{} is outside range [0.0, 1.0]",
numerator, denominator
),
}
}
/// Sample a new value, using the given distribution.
///
/// ### Example
///
/// ```
/// use rand::Rng;
/// use rand::distr::Uniform;
///
/// let mut rng = rand::rng();
/// let x = rng.sample(Uniform::new(10u32, 15).unwrap());
/// // Type annotation requires two types, the type and distribution; the
/// // distribution can be inferred.
/// let y = rng.sample::<u16, _>(Uniform::new(10, 15).unwrap());
/// ```
fn sample<T, D: Distribution<T>>(&mut self, distr: D) -> T {
distr.sample(self)
}
/// Create an iterator that generates values using the given distribution.
///
/// Note: this method consumes its arguments. Use
/// `(&mut rng).sample_iter(..)` to avoid consuming the RNG.
///
/// # Example
///
/// ```
/// use rand::Rng;
/// use rand::distr::{Alphanumeric, Uniform, StandardUniform};
///
/// let mut rng = rand::rng();
///
/// // Vec of 16 x f32:
/// let v: Vec<f32> = (&mut rng).sample_iter(StandardUniform).take(16).collect();
///
/// // String:
/// let s: String = (&mut rng).sample_iter(Alphanumeric)
/// .take(7)
/// .map(char::from)
/// .collect();
///
/// // Combined values
/// println!("{:?}", (&mut rng).sample_iter(StandardUniform).take(5)
/// .collect::<Vec<(f64, bool)>>());
///
/// // Dice-rolling:
/// let die_range = Uniform::new_inclusive(1, 6).unwrap();
/// let mut roll_die = (&mut rng).sample_iter(die_range);
/// while roll_die.next().unwrap() != 6 {
/// println!("Not a 6; rolling again!");
/// }
/// ```
fn sample_iter<T, D>(self, distr: D) -> distr::Iter<D, Self, T>
where
D: Distribution<T>,
Self: Sized,
{
distr.sample_iter(self)
}
/// Fill any type implementing [`Fill`] with random data
///
/// This method is implemented for types which may be safely reinterpreted
/// as an (aligned) `[u8]` slice then filled with random data. It is often
/// faster than using [`Rng::random`] but not value-equivalent.
///
/// The distribution is expected to be uniform with portable results, but
/// this cannot be guaranteed for third-party implementations.
///
/// # Example
///
/// ```
/// use rand::Rng;
///
/// let mut arr = [0i8; 20];
/// rand::rng().fill(&mut arr[..]);
/// ```
///
/// [`fill_bytes`]: RngCore::fill_bytes
#[track_caller]
fn fill<T: Fill + ?Sized>(&mut self, dest: &mut T) {
dest.fill(self)
}
/// Alias for [`Rng::random`].
#[inline]
#[deprecated(
since = "0.9.0",
note = "Renamed to `random` to avoid conflict with the new `gen` keyword in Rust 2024."
)]
fn r#gen<T>(&mut self) -> T
where
StandardUniform: Distribution<T>,
{
self.random()
}
/// Alias for [`Rng::random_range`].
#[inline]
#[deprecated(since = "0.9.0", note = "Renamed to `random_range`")]
fn gen_range<T, R>(&mut self, range: R) -> T
where
T: SampleUniform,
R: SampleRange<T>,
{
self.random_range(range)
}
/// Alias for [`Rng::random_bool`].
#[inline]
#[deprecated(since = "0.9.0", note = "Renamed to `random_bool`")]
fn gen_bool(&mut self, p: f64) -> bool {
self.random_bool(p)
}
/// Alias for [`Rng::random_ratio`].
#[inline]
#[deprecated(since = "0.9.0", note = "Renamed to `random_ratio`")]
fn gen_ratio(&mut self, numerator: u32, denominator: u32) -> bool {
self.random_ratio(numerator, denominator)
}
}
impl<R: RngCore + ?Sized> Rng for R {}
/// Types which may be filled with random data
///
/// This trait allows arrays to be efficiently filled with random data.
///
/// Implementations are expected to be portable across machines unless
/// clearly documented otherwise (see the
/// [Chapter on Portability](https://rust-random.github.io/book/portability.html)).
pub trait Fill {
/// Fill self with random data
fn fill<R: Rng + ?Sized>(&mut self, rng: &mut R);
}
macro_rules! impl_fill_each {
() => {};
($t:ty) => {
impl Fill for [$t] {
fn fill<R: Rng + ?Sized>(&mut self, rng: &mut R) {
for elt in self.iter_mut() {
*elt = rng.random();
}
}
}
};
($t:ty, $($tt:ty,)*) => {
impl_fill_each!($t);
impl_fill_each!($($tt,)*);
};
}
impl_fill_each!(bool, char, f32, f64,);
impl Fill for [u8] {
fn fill<R: Rng + ?Sized>(&mut self, rng: &mut R) {
rng.fill_bytes(self)
}
}
/// Call target for unsafe macros
const unsafe fn __unsafe() {}
/// Implement `Fill` for given type `$t`.
///
/// # Safety
/// All bit patterns of `[u8; size_of::<$t>()]` must represent values of `$t`.
macro_rules! impl_fill {
() => {};
($t:ty) => {{
// Force caller to wrap with an `unsafe` block
__unsafe();
impl Fill for [$t] {
fn fill<R: Rng + ?Sized>(&mut self, rng: &mut R) {
if self.len() > 0 {
let size = mem::size_of_val(self);
rng.fill_bytes(
// SAFETY: `self` non-null and valid for reads and writes within its `size`
// bytes. `self` meets the alignment requirements of `&mut [u8]`.
// The contents of `self` are initialized. Both `[u8]` and `[$t]` are valid
// for all bit-patterns of their contents (note that the SAFETY requirement
// on callers of this macro). `self` is not borrowed.
unsafe {
slice::from_raw_parts_mut(self.as_mut_ptr()
as *mut u8,
size
)
}
);
for x in self {
*x = x.to_le();
}
}
}
}
impl Fill for [Wrapping<$t>] {
fn fill<R: Rng + ?Sized>(&mut self, rng: &mut R) {
if self.len() > 0 {
let size = self.len() * mem::size_of::<$t>();
rng.fill_bytes(
// SAFETY: `self` non-null and valid for reads and writes within its `size`
// bytes. `self` meets the alignment requirements of `&mut [u8]`.
// The contents of `self` are initialized. Both `[u8]` and `[$t]` are valid
// for all bit-patterns of their contents (note that the SAFETY requirement
// on callers of this macro). `self` is not borrowed.
unsafe {
slice::from_raw_parts_mut(self.as_mut_ptr()
as *mut u8,
size
)
}
);
for x in self {
*x = Wrapping(x.0.to_le());
}
}
}
}}
};
($t:ty, $($tt:ty,)*) => {{
impl_fill!($t);
// TODO: this could replace above impl once Rust #32463 is fixed
// impl_fill!(Wrapping<$t>);
impl_fill!($($tt,)*);
}}
}
// SAFETY: All bit patterns of `[u8; size_of::<$t>()]` represent values of `u*`.
const _: () = unsafe { impl_fill!(u16, u32, u64, u128,) };
// SAFETY: All bit patterns of `[u8; size_of::<$t>()]` represent values of `i*`.
const _: () = unsafe { impl_fill!(i8, i16, i32, i64, i128,) };
impl<T, const N: usize> Fill for [T; N]
where
[T]: Fill,
{
fn fill<R: Rng + ?Sized>(&mut self, rng: &mut R) {
<[T] as Fill>::fill(self, rng)
}
}
#[cfg(test)]
mod test {
use super::*;
use crate::test::{const_rng, rng};
#[cfg(feature = "alloc")]
use alloc::boxed::Box;
#[test]
fn test_fill_bytes_default() {
let mut r = const_rng(0x11_22_33_44_55_66_77_88);
// check every remainder mod 8, both in small and big vectors.
let lengths = [0, 1, 2, 3, 4, 5, 6, 7, 80, 81, 82, 83, 84, 85, 86, 87];
for &n in lengths.iter() {
let mut buffer = [0u8; 87];
let v = &mut buffer[0..n];
r.fill_bytes(v);
// use this to get nicer error messages.
for (i, &byte) in v.iter().enumerate() {
if byte == 0 {
panic!("byte {} of {} is zero", i, n)
}
}
}
}
#[test]
fn test_fill() {
let x = 9041086907909331047; // a random u64
let mut rng = const_rng(x);
// Convert to byte sequence and back to u64; byte-swap twice if BE.
let mut array = [0u64; 2];
rng.fill(&mut array[..]);
assert_eq!(array, [x, x]);
assert_eq!(rng.next_u64(), x);
// Convert to bytes then u32 in LE order
let mut array = [0u32; 2];
rng.fill(&mut array[..]);
assert_eq!(array, [x as u32, (x >> 32) as u32]);
assert_eq!(rng.next_u32(), x as u32);
// Check equivalence using wrapped arrays
let mut warray = [Wrapping(0u32); 2];
rng.fill(&mut warray[..]);
assert_eq!(array[0], warray[0].0);
assert_eq!(array[1], warray[1].0);
// Check equivalence for generated floats
let mut array = [0f32; 2];
rng.fill(&mut array);
let arr2: [f32; 2] = rng.random();
assert_eq!(array, arr2);
}
#[test]
fn test_fill_empty() {
let mut array = [0u32; 0];
let mut rng = rng(1);
rng.fill(&mut array);
rng.fill(&mut array[..]);
}
#[test]
fn test_random_range_int() {
let mut r = rng(101);
for _ in 0..1000 {
let a = r.random_range(-4711..17);
assert!((-4711..17).contains(&a));
let a: i8 = r.random_range(-3..42);
assert!((-3..42).contains(&a));
let a: u16 = r.random_range(10..99);
assert!((10..99).contains(&a));
let a: i32 = r.random_range(-100..2000);
assert!((-100..2000).contains(&a));
let a: u32 = r.random_range(12..=24);
assert!((12..=24).contains(&a));
assert_eq!(r.random_range(..1u32), 0u32);
assert_eq!(r.random_range(-12i64..-11), -12i64);
assert_eq!(r.random_range(3_000_000..3_000_001), 3_000_000);
}
}
#[test]
fn test_random_range_float() {
let mut r = rng(101);
for _ in 0..1000 {
let a = r.random_range(-4.5..1.7);
assert!((-4.5..1.7).contains(&a));
let a = r.random_range(-1.1..=-0.3);
assert!((-1.1..=-0.3).contains(&a));
assert_eq!(r.random_range(0.0f32..=0.0), 0.);
assert_eq!(r.random_range(-11.0..=-11.0), -11.);
assert_eq!(r.random_range(3_000_000.0..=3_000_000.0), 3_000_000.);
}
}
#[test]
#[should_panic]
#[allow(clippy::reversed_empty_ranges)]
fn test_random_range_panic_int() {
let mut r = rng(102);
r.random_range(5..-2);
}
#[test]
#[should_panic]
#[allow(clippy::reversed_empty_ranges)]
fn test_random_range_panic_usize() {
let mut r = rng(103);
r.random_range(5..2);
}
#[test]
#[allow(clippy::bool_assert_comparison)]
fn test_random_bool() {
let mut r = rng(105);
for _ in 0..5 {
assert_eq!(r.random_bool(0.0), false);
assert_eq!(r.random_bool(1.0), true);
}
}
#[test]
fn test_rng_mut_ref() {
fn use_rng(mut r: impl Rng) {
let _ = r.next_u32();
}
let mut rng = rng(109);
use_rng(&mut rng);
}
#[test]
fn test_rng_trait_object() {
use crate::distr::{Distribution, StandardUniform};
let mut rng = rng(109);
let mut r = &mut rng as &mut dyn RngCore;
r.next_u32();
r.random::<i32>();
assert_eq!(r.random_range(0..1), 0);
let _c: u8 = StandardUniform.sample(&mut r);
}
#[test]
#[cfg(feature = "alloc")]
fn test_rng_boxed_trait() {
use crate::distr::{Distribution, StandardUniform};
let rng = rng(110);
let mut r = Box::new(rng) as Box<dyn RngCore>;
r.next_u32();
r.random::<i32>();
assert_eq!(r.random_range(0..1), 0);
let _c: u8 = StandardUniform.sample(&mut r);
}
#[test]
#[cfg_attr(miri, ignore)] // Miri is too slow
fn test_gen_ratio_average() {
const NUM: u32 = 3;
const DENOM: u32 = 10;
const N: u32 = 100_000;
let mut sum: u32 = 0;
let mut rng = rng(111);
for _ in 0..N {
if rng.random_ratio(NUM, DENOM) {
sum += 1;
}
}
// Have Binomial(N, NUM/DENOM) distribution
let expected = (NUM * N) / DENOM; // exact integer
assert!(((sum - expected) as i32).abs() < 500);
}
}

80
vendor/rand/src/rngs/mock.rs vendored Normal file
View File

@@ -0,0 +1,80 @@
// Copyright 2018 Developers of the Rand project.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Mock random number generator
#![allow(deprecated)]
use rand_core::{impls, RngCore};
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
/// A mock generator yielding very predictable output
///
/// This generates an arithmetic sequence (i.e. adds a constant each step)
/// over a `u64` number, using wrapping arithmetic. If the increment is 0
/// the generator yields a constant.
///
/// Other integer types (64-bit and smaller) are produced via cast from `u64`.
///
/// Other types are produced via their implementation of [`Rng`](crate::Rng) or
/// [`Distribution`](crate::distr::Distribution).
/// Output values may not be intuitive and may change in future releases but
/// are considered
/// [portable](https://rust-random.github.io/book/portability.html).
/// (`bool` output is true when bit `1u64 << 31` is set.)
///
/// # Example
///
/// ```
/// # #![allow(deprecated)]
/// use rand::Rng;
/// use rand::rngs::mock::StepRng;
///
/// let mut my_rng = StepRng::new(2, 1);
/// let sample: [u64; 3] = my_rng.random();
/// assert_eq!(sample, [2, 3, 4]);
/// ```
#[derive(Debug, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[deprecated(since = "0.9.2", note = "Deprecated without replacement")]
pub struct StepRng {
v: u64,
a: u64,
}
impl StepRng {
/// Create a `StepRng`, yielding an arithmetic sequence starting with
/// `initial` and incremented by `increment` each time.
pub fn new(initial: u64, increment: u64) -> Self {
StepRng {
v: initial,
a: increment,
}
}
}
impl RngCore for StepRng {
#[inline]
fn next_u32(&mut self) -> u32 {
self.next_u64() as u32
}
#[inline]
fn next_u64(&mut self) -> u64 {
let res = self.v;
self.v = self.v.wrapping_add(self.a);
res
}
#[inline]
fn fill_bytes(&mut self, dst: &mut [u8]) {
impls::fill_bytes_via_next(self, dst)
}
}

110
vendor/rand/src/rngs/mod.rs vendored Normal file
View File

@@ -0,0 +1,110 @@
// Copyright 2018 Developers of the Rand project.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Random number generators and adapters
//!
//! This crate provides a small selection of non-[portable] generators.
//! See also [Types of generators] and [Our RNGs] in the book.
//!
//! ## Generators
//!
//! This crate provides a small selection of non-[portable] random number generators:
//!
//! - [`OsRng`] is a stateless interface over the operating system's random number
//! source. This is typically secure with some form of periodic re-seeding.
//! - [`ThreadRng`], provided by [`crate::rng()`], is a handle to a
//! thread-local generator with periodic seeding from [`OsRng`]. Because this
//! is local, it is typically much faster than [`OsRng`]. It should be
//! secure, but see documentation on [`ThreadRng`].
//! - [`StdRng`] is a CSPRNG chosen for good performance and trust of security
//! (based on reviews, maturity and usage). The current algorithm is ChaCha12,
//! which is well established and rigorously analysed.
//! [`StdRng`] is the deterministic generator used by [`ThreadRng`] but
//! without the periodic reseeding or thread-local management.
//! - [`SmallRng`] is a relatively simple, insecure generator designed to be
//! fast, use little memory, and pass various statistical tests of
//! randomness quality.
//!
//! The algorithms selected for [`StdRng`] and [`SmallRng`] may change in any
//! release and may be platform-dependent, therefore they are not
//! [reproducible][portable].
//!
//! ### Additional generators
//!
//! - The [`rdrand`] crate provides an interface to the RDRAND and RDSEED
//! instructions available in modern Intel and AMD CPUs.
//! - The [`rand_jitter`] crate provides a user-space implementation of
//! entropy harvesting from CPU timer jitter, but is very slow and has
//! [security issues](https://github.com/rust-random/rand/issues/699).
//! - The [`rand_chacha`] crate provides [portable] implementations of
//! generators derived from the [ChaCha] family of stream ciphers
//! - The [`rand_pcg`] crate provides [portable] implementations of a subset
//! of the [PCG] family of small, insecure generators
//! - The [`rand_xoshiro`] crate provides [portable] implementations of the
//! [xoshiro] family of small, insecure generators
//!
//! For more, search [crates with the `rng` tag].
//!
//! ## Traits and functionality
//!
//! All generators implement [`RngCore`] and thus also [`Rng`][crate::Rng].
//! See also the [Random Values] chapter in the book.
//!
//! Secure RNGs may additionally implement the [`CryptoRng`] trait.
//!
//! Use the [`rand_core`] crate when implementing your own RNGs.
//!
//! [portable]: https://rust-random.github.io/book/crate-reprod.html
//! [Types of generators]: https://rust-random.github.io/book/guide-gen.html
//! [Our RNGs]: https://rust-random.github.io/book/guide-rngs.html
//! [Random Values]: https://rust-random.github.io/book/guide-values.html
//! [`Rng`]: crate::Rng
//! [`RngCore`]: crate::RngCore
//! [`CryptoRng`]: crate::CryptoRng
//! [`SeedableRng`]: crate::SeedableRng
//! [`rdrand`]: https://crates.io/crates/rdrand
//! [`rand_jitter`]: https://crates.io/crates/rand_jitter
//! [`rand_chacha`]: https://crates.io/crates/rand_chacha
//! [`rand_pcg`]: https://crates.io/crates/rand_pcg
//! [`rand_xoshiro`]: https://crates.io/crates/rand_xoshiro
//! [crates with the `rng` tag]: https://crates.io/keywords/rng
//! [chacha]: https://cr.yp.to/chacha.html
//! [PCG]: https://www.pcg-random.org/
//! [xoshiro]: https://prng.di.unimi.it/
mod reseeding;
pub use reseeding::ReseedingRng;
#[deprecated(since = "0.9.2")]
pub mod mock; // Public so we don't export `StepRng` directly, making it a bit
// more clear it is intended for testing.
#[cfg(feature = "small_rng")]
mod small;
#[cfg(all(
feature = "small_rng",
any(target_pointer_width = "32", target_pointer_width = "16")
))]
mod xoshiro128plusplus;
#[cfg(all(feature = "small_rng", target_pointer_width = "64"))]
mod xoshiro256plusplus;
#[cfg(feature = "std_rng")]
mod std;
#[cfg(feature = "thread_rng")]
pub(crate) mod thread;
#[cfg(feature = "small_rng")]
pub use self::small::SmallRng;
#[cfg(feature = "std_rng")]
pub use self::std::StdRng;
#[cfg(feature = "thread_rng")]
pub use self::thread::ThreadRng;
#[cfg(feature = "os_rng")]
pub use rand_core::OsRng;

295
vendor/rand/src/rngs/reseeding.rs vendored Normal file
View File

@@ -0,0 +1,295 @@
// Copyright 2018 Developers of the Rand project.
// Copyright 2013 The Rust Project Developers.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! A wrapper around another PRNG that reseeds it after it
//! generates a certain number of random bytes.
use core::mem::size_of_val;
use rand_core::block::{BlockRng, BlockRngCore, CryptoBlockRng};
use rand_core::{CryptoRng, RngCore, SeedableRng, TryCryptoRng, TryRngCore};
/// A wrapper around any PRNG that implements [`BlockRngCore`], that adds the
/// ability to reseed it.
///
/// `ReseedingRng` reseeds the underlying PRNG in the following cases:
///
/// - On a manual call to [`reseed()`].
/// - After `clone()`, the clone will be reseeded on first use.
/// - After the PRNG has generated a configurable number of random bytes.
///
/// # When should reseeding after a fixed number of generated bytes be used?
///
/// Reseeding after a fixed number of generated bytes is never strictly
/// *necessary*. Cryptographic PRNGs don't have a limited number of bytes they
/// can output, or at least not a limit reachable in any practical way. There is
/// no such thing as 'running out of entropy'.
///
/// Occasionally reseeding can be seen as some form of 'security in depth'. Even
/// if in the future a cryptographic weakness is found in the CSPRNG being used,
/// or a flaw in the implementation, occasionally reseeding should make
/// exploiting it much more difficult or even impossible.
///
/// Use [`ReseedingRng::new`] with a `threshold` of `0` to disable reseeding
/// after a fixed number of generated bytes.
///
/// # Error handling
///
/// Although unlikely, reseeding the wrapped PRNG can fail. `ReseedingRng` will
/// never panic but try to handle the error intelligently through some
/// combination of retrying and delaying reseeding until later.
/// If handling the source error fails `ReseedingRng` will continue generating
/// data from the wrapped PRNG without reseeding.
///
/// Manually calling [`reseed()`] will not have this retry or delay logic, but
/// reports the error.
///
/// # Example
///
/// ```
/// use rand::prelude::*;
/// use rand_chacha::ChaCha20Core; // Internal part of ChaChaRng that
/// // implements BlockRngCore
/// use rand::rngs::OsRng;
/// use rand::rngs::ReseedingRng;
///
/// let mut reseeding_rng = ReseedingRng::<ChaCha20Core, _>::new(0, OsRng).unwrap();
///
/// println!("{}", reseeding_rng.random::<u64>());
///
/// let mut cloned_rng = reseeding_rng.clone();
/// assert!(reseeding_rng.random::<u64>() != cloned_rng.random::<u64>());
/// ```
///
/// [`BlockRngCore`]: rand_core::block::BlockRngCore
/// [`ReseedingRng::new`]: ReseedingRng::new
/// [`reseed()`]: ReseedingRng::reseed
#[derive(Debug)]
pub struct ReseedingRng<R, Rsdr>(BlockRng<ReseedingCore<R, Rsdr>>)
where
R: BlockRngCore + SeedableRng,
Rsdr: TryRngCore;
impl<R, Rsdr> ReseedingRng<R, Rsdr>
where
R: BlockRngCore + SeedableRng,
Rsdr: TryRngCore,
{
/// Create a new `ReseedingRng` from an existing PRNG, combined with a RNG
/// to use as reseeder.
///
/// `threshold` sets the number of generated bytes after which to reseed the
/// PRNG. Set it to zero to never reseed based on the number of generated
/// values.
pub fn new(threshold: u64, reseeder: Rsdr) -> Result<Self, Rsdr::Error> {
Ok(ReseedingRng(BlockRng::new(ReseedingCore::new(
threshold, reseeder,
)?)))
}
/// Immediately reseed the generator
///
/// This discards any remaining random data in the cache.
pub fn reseed(&mut self) -> Result<(), Rsdr::Error> {
self.0.reset();
self.0.core.reseed()
}
}
// TODO: this should be implemented for any type where the inner type
// implements RngCore, but we can't specify that because ReseedingCore is private
impl<R, Rsdr> RngCore for ReseedingRng<R, Rsdr>
where
R: BlockRngCore<Item = u32> + SeedableRng,
Rsdr: TryRngCore,
{
#[inline(always)]
fn next_u32(&mut self) -> u32 {
self.0.next_u32()
}
#[inline(always)]
fn next_u64(&mut self) -> u64 {
self.0.next_u64()
}
fn fill_bytes(&mut self, dest: &mut [u8]) {
self.0.fill_bytes(dest)
}
}
impl<R, Rsdr> Clone for ReseedingRng<R, Rsdr>
where
R: BlockRngCore + SeedableRng + Clone,
Rsdr: TryRngCore + Clone,
{
fn clone(&self) -> ReseedingRng<R, Rsdr> {
// Recreating `BlockRng` seems easier than cloning it and resetting
// the index.
ReseedingRng(BlockRng::new(self.0.core.clone()))
}
}
impl<R, Rsdr> CryptoRng for ReseedingRng<R, Rsdr>
where
R: BlockRngCore<Item = u32> + SeedableRng + CryptoBlockRng,
Rsdr: TryCryptoRng,
{
}
#[derive(Debug)]
struct ReseedingCore<R, Rsdr> {
inner: R,
reseeder: Rsdr,
threshold: i64,
bytes_until_reseed: i64,
}
impl<R, Rsdr> BlockRngCore for ReseedingCore<R, Rsdr>
where
R: BlockRngCore + SeedableRng,
Rsdr: TryRngCore,
{
type Item = <R as BlockRngCore>::Item;
type Results = <R as BlockRngCore>::Results;
fn generate(&mut self, results: &mut Self::Results) {
if self.bytes_until_reseed <= 0 {
// We get better performance by not calling only `reseed` here
// and continuing with the rest of the function, but by directly
// returning from a non-inlined function.
return self.reseed_and_generate(results);
}
let num_bytes = size_of_val(results.as_ref());
self.bytes_until_reseed -= num_bytes as i64;
self.inner.generate(results);
}
}
impl<R, Rsdr> ReseedingCore<R, Rsdr>
where
R: BlockRngCore + SeedableRng,
Rsdr: TryRngCore,
{
/// Create a new `ReseedingCore`.
///
/// `threshold` is the maximum number of bytes produced by
/// [`BlockRngCore::generate`] before attempting reseeding.
fn new(threshold: u64, mut reseeder: Rsdr) -> Result<Self, Rsdr::Error> {
// Because generating more values than `i64::MAX` takes centuries on
// current hardware, we just clamp to that value.
// Also we set a threshold of 0, which indicates no limit, to that
// value.
let threshold = if threshold == 0 {
i64::MAX
} else if threshold <= i64::MAX as u64 {
threshold as i64
} else {
i64::MAX
};
let inner = R::try_from_rng(&mut reseeder)?;
Ok(ReseedingCore {
inner,
reseeder,
threshold,
bytes_until_reseed: threshold,
})
}
/// Reseed the internal PRNG.
fn reseed(&mut self) -> Result<(), Rsdr::Error> {
R::try_from_rng(&mut self.reseeder).map(|result| {
self.bytes_until_reseed = self.threshold;
self.inner = result
})
}
#[inline(never)]
fn reseed_and_generate(&mut self, results: &mut <Self as BlockRngCore>::Results) {
trace!("Reseeding RNG (periodic reseed)");
let num_bytes = size_of_val(results.as_ref());
if let Err(e) = self.reseed() {
warn!("Reseeding RNG failed: {}", e);
let _ = e;
}
self.bytes_until_reseed = self.threshold - num_bytes as i64;
self.inner.generate(results);
}
}
impl<R, Rsdr> Clone for ReseedingCore<R, Rsdr>
where
R: BlockRngCore + SeedableRng + Clone,
Rsdr: TryRngCore + Clone,
{
fn clone(&self) -> ReseedingCore<R, Rsdr> {
ReseedingCore {
inner: self.inner.clone(),
reseeder: self.reseeder.clone(),
threshold: self.threshold,
bytes_until_reseed: 0, // reseed clone on first use
}
}
}
impl<R, Rsdr> CryptoBlockRng for ReseedingCore<R, Rsdr>
where
R: BlockRngCore<Item = u32> + SeedableRng + CryptoBlockRng,
Rsdr: TryCryptoRng,
{
}
#[cfg(feature = "std_rng")]
#[cfg(test)]
mod test {
use crate::rngs::std::Core;
use crate::test::const_rng;
use crate::Rng;
use super::ReseedingRng;
#[test]
fn test_reseeding() {
let zero = const_rng(0);
let thresh = 1; // reseed every time the buffer is exhausted
let mut reseeding = ReseedingRng::<Core, _>::new(thresh, zero).unwrap();
// RNG buffer size is [u32; 64]
// Debug is only implemented up to length 32 so use two arrays
let mut buf = ([0u32; 32], [0u32; 32]);
reseeding.fill(&mut buf.0);
reseeding.fill(&mut buf.1);
let seq = buf;
for _ in 0..10 {
reseeding.fill(&mut buf.0);
reseeding.fill(&mut buf.1);
assert_eq!(buf, seq);
}
}
#[test]
#[allow(clippy::redundant_clone)]
fn test_clone_reseeding() {
let zero = const_rng(0);
let mut rng1 = ReseedingRng::<Core, _>::new(32 * 4, zero).unwrap();
let first: u32 = rng1.random();
for _ in 0..10 {
let _ = rng1.random::<u32>();
}
let mut rng2 = rng1.clone();
assert_eq!(first, rng2.random::<u32>());
}
}

120
vendor/rand/src/rngs/small.rs vendored Normal file
View File

@@ -0,0 +1,120 @@
// Copyright 2018 Developers of the Rand project.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! A small fast RNG
use rand_core::{RngCore, SeedableRng};
#[cfg(any(target_pointer_width = "32", target_pointer_width = "16"))]
type Rng = super::xoshiro128plusplus::Xoshiro128PlusPlus;
#[cfg(target_pointer_width = "64")]
type Rng = super::xoshiro256plusplus::Xoshiro256PlusPlus;
/// A small-state, fast, non-crypto, non-portable PRNG
///
/// This is the "standard small" RNG, a generator with the following properties:
///
/// - Non-[portable]: any future library version may replace the algorithm
/// and results may be platform-dependent.
/// (For a small portable generator, use the [rand_pcg] or [rand_xoshiro] crate.)
/// - Non-cryptographic: output is easy to predict (insecure)
/// - [Quality]: statistically good quality
/// - Fast: the RNG is fast for both bulk generation and single values, with
/// consistent cost of method calls
/// - Fast initialization
/// - Small state: little memory usage (current state size is 16-32 bytes
/// depending on platform)
///
/// The current algorithm is
/// `Xoshiro256PlusPlus` on 64-bit platforms and `Xoshiro128PlusPlus` on 32-bit
/// platforms. Both are also implemented by the [rand_xoshiro] crate.
///
/// ## Seeding (construction)
///
/// This generator implements the [`SeedableRng`] trait. All methods are
/// suitable for seeding, but note that, even with a fixed seed, output is not
/// [portable]. Some suggestions:
///
/// 1. To automatically seed with a unique seed, use [`SeedableRng::from_rng`]:
/// ```
/// use rand::SeedableRng;
/// use rand::rngs::SmallRng;
/// let rng = SmallRng::from_rng(&mut rand::rng());
/// # let _: SmallRng = rng;
/// ```
/// or [`SeedableRng::from_os_rng`]:
/// ```
/// # use rand::SeedableRng;
/// # use rand::rngs::SmallRng;
/// let rng = SmallRng::from_os_rng();
/// # let _: SmallRng = rng;
/// ```
/// 2. To use a deterministic integral seed, use `seed_from_u64`. This uses a
/// hash function internally to yield a (typically) good seed from any
/// input.
/// ```
/// # use rand::{SeedableRng, rngs::SmallRng};
/// let rng = SmallRng::seed_from_u64(1);
/// # let _: SmallRng = rng;
/// ```
/// 3. To seed deterministically from text or other input, use [`rand_seeder`].
///
/// See also [Seeding RNGs] in the book.
///
/// ## Generation
///
/// The generators implements [`RngCore`] and thus also [`Rng`][crate::Rng].
/// See also the [Random Values] chapter in the book.
///
/// [portable]: https://rust-random.github.io/book/crate-reprod.html
/// [Seeding RNGs]: https://rust-random.github.io/book/guide-seeding.html
/// [Random Values]: https://rust-random.github.io/book/guide-values.html
/// [Quality]: https://rust-random.github.io/book/guide-rngs.html#quality
/// [`StdRng`]: crate::rngs::StdRng
/// [rand_pcg]: https://crates.io/crates/rand_pcg
/// [rand_xoshiro]: https://crates.io/crates/rand_xoshiro
/// [`rand_chacha::ChaCha8Rng`]: https://docs.rs/rand_chacha/latest/rand_chacha/struct.ChaCha8Rng.html
/// [`rand_seeder`]: https://docs.rs/rand_seeder/latest/rand_seeder/
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct SmallRng(Rng);
impl SeedableRng for SmallRng {
// Fix to 256 bits. Changing this is a breaking change!
type Seed = [u8; 32];
#[inline(always)]
fn from_seed(seed: Self::Seed) -> Self {
// This is for compatibility with 32-bit platforms where Rng::Seed has a different seed size
// With MSRV >= 1.77: let seed = *seed.first_chunk().unwrap()
const LEN: usize = core::mem::size_of::<<Rng as SeedableRng>::Seed>();
let seed = (&seed[..LEN]).try_into().unwrap();
SmallRng(Rng::from_seed(seed))
}
#[inline(always)]
fn seed_from_u64(state: u64) -> Self {
SmallRng(Rng::seed_from_u64(state))
}
}
impl RngCore for SmallRng {
#[inline(always)]
fn next_u32(&mut self) -> u32 {
self.0.next_u32()
}
#[inline(always)]
fn next_u64(&mut self) -> u64 {
self.0.next_u64()
}
#[inline(always)]
fn fill_bytes(&mut self, dest: &mut [u8]) {
self.0.fill_bytes(dest)
}
}

124
vendor/rand/src/rngs/std.rs vendored Normal file
View File

@@ -0,0 +1,124 @@
// Copyright 2018 Developers of the Rand project.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! The standard RNG
use rand_core::{CryptoRng, RngCore, SeedableRng};
#[cfg(any(test, feature = "os_rng"))]
pub(crate) use rand_chacha::ChaCha12Core as Core;
use rand_chacha::ChaCha12Rng as Rng;
/// A strong, fast (amortized), non-portable RNG
///
/// This is the "standard" RNG, a generator with the following properties:
///
/// - Non-[portable]: any future library version may replace the algorithm
/// and results may be platform-dependent.
/// (For a portable version, use the [rand_chacha] crate directly.)
/// - [CSPRNG]: statistically good quality of randomness and [unpredictable]
/// - Fast ([amortized](https://en.wikipedia.org/wiki/Amortized_analysis)):
/// the RNG is fast for bulk generation, but the cost of method calls is not
/// consistent due to usage of an output buffer.
///
/// The current algorithm used is the ChaCha block cipher with 12 rounds. Please
/// see this relevant [rand issue] for the discussion. This may change as new
/// evidence of cipher security and performance becomes available.
///
/// ## Seeding (construction)
///
/// This generator implements the [`SeedableRng`] trait. Any method may be used,
/// but note that `seed_from_u64` is not suitable for usage where security is
/// important. Also note that, even with a fixed seed, output is not [portable].
///
/// Using a fresh seed **direct from the OS** is the most secure option:
/// ```
/// # use rand::{SeedableRng, rngs::StdRng};
/// let rng = StdRng::from_os_rng();
/// # let _: StdRng = rng;
/// ```
///
/// Seeding via [`rand::rng()`](crate::rng()) may be faster:
/// ```
/// # use rand::{SeedableRng, rngs::StdRng};
/// let rng = StdRng::from_rng(&mut rand::rng());
/// # let _: StdRng = rng;
/// ```
///
/// Any [`SeedableRng`] method may be used, but note that `seed_from_u64` is not
/// suitable where security is required. See also [Seeding RNGs] in the book.
///
/// ## Generation
///
/// The generators implements [`RngCore`] and thus also [`Rng`][crate::Rng].
/// See also the [Random Values] chapter in the book.
///
/// [portable]: https://rust-random.github.io/book/crate-reprod.html
/// [Seeding RNGs]: https://rust-random.github.io/book/guide-seeding.html
/// [unpredictable]: https://rust-random.github.io/book/guide-rngs.html#security
/// [Random Values]: https://rust-random.github.io/book/guide-values.html
/// [CSPRNG]: https://rust-random.github.io/book/guide-gen.html#cryptographically-secure-pseudo-random-number-generator
/// [rand_chacha]: https://crates.io/crates/rand_chacha
/// [rand issue]: https://github.com/rust-random/rand/issues/932
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct StdRng(Rng);
impl RngCore for StdRng {
#[inline(always)]
fn next_u32(&mut self) -> u32 {
self.0.next_u32()
}
#[inline(always)]
fn next_u64(&mut self) -> u64 {
self.0.next_u64()
}
#[inline(always)]
fn fill_bytes(&mut self, dst: &mut [u8]) {
self.0.fill_bytes(dst)
}
}
impl SeedableRng for StdRng {
// Fix to 256 bits. Changing this is a breaking change!
type Seed = [u8; 32];
#[inline(always)]
fn from_seed(seed: Self::Seed) -> Self {
StdRng(Rng::from_seed(seed))
}
}
impl CryptoRng for StdRng {}
#[cfg(test)]
mod test {
use crate::rngs::StdRng;
use crate::{RngCore, SeedableRng};
#[test]
fn test_stdrng_construction() {
// Test value-stability of StdRng. This is expected to break any time
// the algorithm is changed.
#[rustfmt::skip]
let seed = [1,0,0,0, 23,0,0,0, 200,1,0,0, 210,30,0,0,
0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0];
let target = [10719222850664546238, 14064965282130556830];
let mut rng0 = StdRng::from_seed(seed);
let x0 = rng0.next_u64();
let mut rng1 = StdRng::from_rng(&mut rng0);
let x1 = rng1.next_u64();
assert_eq!([x0, x1], target);
}
}

212
vendor/rand/src/rngs/thread.rs vendored Normal file
View File

@@ -0,0 +1,212 @@
// Copyright 2018 Developers of the Rand project.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Thread-local random number generator
use core::cell::UnsafeCell;
use std::fmt;
use std::rc::Rc;
use std::thread_local;
use rand_core::{CryptoRng, RngCore};
use super::std::Core;
use crate::rngs::OsRng;
use crate::rngs::ReseedingRng;
// Rationale for using `UnsafeCell` in `ThreadRng`:
//
// Previously we used a `RefCell`, with an overhead of ~15%. There will only
// ever be one mutable reference to the interior of the `UnsafeCell`, because
// we only have such a reference inside `next_u32`, `next_u64`, etc. Within a
// single thread (which is the definition of `ThreadRng`), there will only ever
// be one of these methods active at a time.
//
// A possible scenario where there could be multiple mutable references is if
// `ThreadRng` is used inside `next_u32` and co. But the implementation is
// completely under our control. We just have to ensure none of them use
// `ThreadRng` internally, which is nonsensical anyway. We should also never run
// `ThreadRng` in destructors of its implementation, which is also nonsensical.
// Number of generated bytes after which to reseed `ThreadRng`.
// According to benchmarks, reseeding has a noticeable impact with thresholds
// of 32 kB and less. We choose 64 kB to avoid significant overhead.
const THREAD_RNG_RESEED_THRESHOLD: u64 = 1024 * 64;
/// A reference to the thread-local generator
///
/// This type is a reference to a lazily-initialized thread-local generator.
/// An instance can be obtained via [`rand::rng()`][crate::rng()] or via
/// [`ThreadRng::default()`].
/// The handle cannot be passed between threads (is not `Send` or `Sync`).
///
/// # Security
///
/// Security must be considered relative to a threat model and validation
/// requirements. The Rand project can provide no guarantee of fitness for
/// purpose. The design criteria for `ThreadRng` are as follows:
///
/// - Automatic seeding via [`OsRng`] and periodically thereafter (see
/// ([`ReseedingRng`] documentation). Limitation: there is no automatic
/// reseeding on process fork (see [below](#fork)).
/// - A rigorusly analyzed, unpredictable (cryptographic) pseudo-random generator
/// (see [the book on security](https://rust-random.github.io/book/guide-rngs.html#security)).
/// The currently selected algorithm is ChaCha (12-rounds).
/// See also [`StdRng`] documentation.
/// - Not to leak internal state through [`Debug`] or serialization
/// implementations.
/// - No further protections exist to in-memory state. In particular, the
/// implementation is not required to zero memory on exit (of the process or
/// thread). (This may change in the future.)
/// - Be fast enough for general-purpose usage. Note in particular that
/// `ThreadRng` is designed to be a "fast, reasonably secure generator"
/// (where "reasonably secure" implies the above criteria).
///
/// We leave it to the user to determine whether this generator meets their
/// security requirements. For an alternative, see [`OsRng`].
///
/// # Fork
///
/// `ThreadRng` is not automatically reseeded on fork. It is recommended to
/// explicitly call [`ThreadRng::reseed`] immediately after a fork, for example:
/// ```ignore
/// fn do_fork() {
/// let pid = unsafe { libc::fork() };
/// if pid == 0 {
/// // Reseed ThreadRng in child processes:
/// rand::rng().reseed();
/// }
/// }
/// ```
///
/// Methods on `ThreadRng` are not reentrant-safe and thus should not be called
/// from an interrupt (e.g. a fork handler) unless it can be guaranteed that no
/// other method on the same `ThreadRng` is currently executing.
///
/// [`ReseedingRng`]: crate::rngs::ReseedingRng
/// [`StdRng`]: crate::rngs::StdRng
#[derive(Clone)]
pub struct ThreadRng {
// Rc is explicitly !Send and !Sync
rng: Rc<UnsafeCell<ReseedingRng<Core, OsRng>>>,
}
impl ThreadRng {
/// Immediately reseed the generator
///
/// This discards any remaining random data in the cache.
pub fn reseed(&mut self) -> Result<(), rand_core::OsError> {
// SAFETY: We must make sure to stop using `rng` before anyone else
// creates another mutable reference
let rng = unsafe { &mut *self.rng.get() };
rng.reseed()
}
}
/// Debug implementation does not leak internal state
impl fmt::Debug for ThreadRng {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
write!(fmt, "ThreadRng {{ .. }}")
}
}
thread_local!(
// We require Rc<..> to avoid premature freeing when ThreadRng is used
// within thread-local destructors. See #968.
static THREAD_RNG_KEY: Rc<UnsafeCell<ReseedingRng<Core, OsRng>>> = {
let rng = ReseedingRng::new(THREAD_RNG_RESEED_THRESHOLD,
OsRng).unwrap_or_else(|err|
panic!("could not initialize ThreadRng: {}", err));
Rc::new(UnsafeCell::new(rng))
}
);
/// Access a fast, pre-initialized generator
///
/// This is a handle to the local [`ThreadRng`].
///
/// See also [`crate::rngs`] for alternatives.
///
/// # Example
///
/// ```
/// use rand::prelude::*;
///
/// # fn main() {
///
/// let mut numbers = [1, 2, 3, 4, 5];
/// numbers.shuffle(&mut rand::rng());
/// println!("Numbers: {numbers:?}");
///
/// // Using a local binding avoids an initialization-check on each usage:
/// let mut rng = rand::rng();
///
/// println!("True or false: {}", rng.random::<bool>());
/// println!("A simulated die roll: {}", rng.random_range(1..=6));
/// # }
/// ```
///
/// # Security
///
/// Refer to [`ThreadRng#Security`].
pub fn rng() -> ThreadRng {
let rng = THREAD_RNG_KEY.with(|t| t.clone());
ThreadRng { rng }
}
impl Default for ThreadRng {
fn default() -> ThreadRng {
rng()
}
}
impl RngCore for ThreadRng {
#[inline(always)]
fn next_u32(&mut self) -> u32 {
// SAFETY: We must make sure to stop using `rng` before anyone else
// creates another mutable reference
let rng = unsafe { &mut *self.rng.get() };
rng.next_u32()
}
#[inline(always)]
fn next_u64(&mut self) -> u64 {
// SAFETY: We must make sure to stop using `rng` before anyone else
// creates another mutable reference
let rng = unsafe { &mut *self.rng.get() };
rng.next_u64()
}
#[inline(always)]
fn fill_bytes(&mut self, dest: &mut [u8]) {
// SAFETY: We must make sure to stop using `rng` before anyone else
// creates another mutable reference
let rng = unsafe { &mut *self.rng.get() };
rng.fill_bytes(dest)
}
}
impl CryptoRng for ThreadRng {}
#[cfg(test)]
mod test {
#[test]
fn test_thread_rng() {
use crate::Rng;
let mut r = crate::rng();
r.random::<i32>();
assert_eq!(r.random_range(0..1), 0);
}
#[test]
fn test_debug_output() {
// We don't care about the exact output here, but it must not include
// private CSPRNG state or the cache stored by BlockRng!
assert_eq!(std::format!("{:?}", crate::rng()), "ThreadRng { .. }");
}
}

View File

@@ -0,0 +1,138 @@
// Copyright 2018 Developers of the Rand project.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use rand_core::impls::{fill_bytes_via_next, next_u64_via_u32};
use rand_core::le::read_u32_into;
use rand_core::{RngCore, SeedableRng};
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
/// A xoshiro128++ random number generator.
///
/// The xoshiro128++ algorithm is not suitable for cryptographic purposes, but
/// is very fast and has excellent statistical properties.
///
/// The algorithm used here is translated from [the `xoshiro128plusplus.c`
/// reference source code](http://xoshiro.di.unimi.it/xoshiro128plusplus.c) by
/// David Blackman and Sebastiano Vigna.
#[derive(Debug, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct Xoshiro128PlusPlus {
s: [u32; 4],
}
impl SeedableRng for Xoshiro128PlusPlus {
type Seed = [u8; 16];
/// Create a new `Xoshiro128PlusPlus`. If `seed` is entirely 0, it will be
/// mapped to a different seed.
#[inline]
fn from_seed(seed: [u8; 16]) -> Xoshiro128PlusPlus {
let mut state = [0; 4];
read_u32_into(&seed, &mut state);
// Check for zero on aligned integers for better code generation.
// Furtermore, seed_from_u64(0) will expand to a constant when optimized.
if state.iter().all(|&x| x == 0) {
return Self::seed_from_u64(0);
}
Xoshiro128PlusPlus { s: state }
}
/// Create a new `Xoshiro128PlusPlus` from a `u64` seed.
///
/// This uses the SplitMix64 generator internally.
#[inline]
fn seed_from_u64(mut state: u64) -> Self {
const PHI: u64 = 0x9e3779b97f4a7c15;
let mut s = [0; 4];
for i in s.chunks_exact_mut(2) {
state = state.wrapping_add(PHI);
let mut z = state;
z = (z ^ (z >> 30)).wrapping_mul(0xbf58476d1ce4e5b9);
z = (z ^ (z >> 27)).wrapping_mul(0x94d049bb133111eb);
z = z ^ (z >> 31);
i[0] = z as u32;
i[1] = (z >> 32) as u32;
}
// By using a non-zero PHI we are guaranteed to generate a non-zero state
// Thus preventing a recursion between from_seed and seed_from_u64.
debug_assert_ne!(s, [0; 4]);
Xoshiro128PlusPlus { s }
}
}
impl RngCore for Xoshiro128PlusPlus {
#[inline]
fn next_u32(&mut self) -> u32 {
let res = self.s[0]
.wrapping_add(self.s[3])
.rotate_left(7)
.wrapping_add(self.s[0]);
let t = self.s[1] << 9;
self.s[2] ^= self.s[0];
self.s[3] ^= self.s[1];
self.s[1] ^= self.s[2];
self.s[0] ^= self.s[3];
self.s[2] ^= t;
self.s[3] = self.s[3].rotate_left(11);
res
}
#[inline]
fn next_u64(&mut self) -> u64 {
next_u64_via_u32(self)
}
#[inline]
fn fill_bytes(&mut self, dst: &mut [u8]) {
fill_bytes_via_next(self, dst)
}
}
#[cfg(test)]
mod tests {
use super::Xoshiro128PlusPlus;
use rand_core::{RngCore, SeedableRng};
#[test]
fn reference() {
let mut rng =
Xoshiro128PlusPlus::from_seed([1, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 0, 4, 0, 0, 0]);
// These values were produced with the reference implementation:
// http://xoshiro.di.unimi.it/xoshiro128plusplus.c
let expected = [
641, 1573767, 3222811527, 3517856514, 836907274, 4247214768, 3867114732, 1355841295,
495546011, 621204420,
];
for &e in &expected {
assert_eq!(rng.next_u32(), e);
}
}
#[test]
fn stable_seed_from_u64_and_from_seed() {
// We don't guarantee value-stability for SmallRng but this
// could influence keeping stability whenever possible (e.g. after optimizations).
let mut rng = Xoshiro128PlusPlus::seed_from_u64(0);
// from_seed([0; 16]) should produce the same state as seed_from_u64(0).
let mut rng_from_seed_0 = Xoshiro128PlusPlus::from_seed([0; 16]);
let expected = [
1179900579, 1938959192, 3089844957, 3657088315, 1015453891, 479942911, 3433842246,
669252886, 3985671746, 2737205563,
];
for &e in &expected {
assert_eq!(rng.next_u32(), e);
assert_eq!(rng_from_seed_0.next_u32(), e);
}
}
}

View File

@@ -0,0 +1,158 @@
// Copyright 2018 Developers of the Rand project.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use rand_core::impls::fill_bytes_via_next;
use rand_core::le::read_u64_into;
use rand_core::{RngCore, SeedableRng};
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
/// A xoshiro256++ random number generator.
///
/// The xoshiro256++ algorithm is not suitable for cryptographic purposes, but
/// is very fast and has excellent statistical properties.
///
/// The algorithm used here is translated from [the `xoshiro256plusplus.c`
/// reference source code](http://xoshiro.di.unimi.it/xoshiro256plusplus.c) by
/// David Blackman and Sebastiano Vigna.
#[derive(Debug, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct Xoshiro256PlusPlus {
s: [u64; 4],
}
impl SeedableRng for Xoshiro256PlusPlus {
type Seed = [u8; 32];
/// Create a new `Xoshiro256PlusPlus`. If `seed` is entirely 0, it will be
/// mapped to a different seed.
#[inline]
fn from_seed(seed: [u8; 32]) -> Xoshiro256PlusPlus {
let mut state = [0; 4];
read_u64_into(&seed, &mut state);
// Check for zero on aligned integers for better code generation.
// Furtermore, seed_from_u64(0) will expand to a constant when optimized.
if state.iter().all(|&x| x == 0) {
return Self::seed_from_u64(0);
}
Xoshiro256PlusPlus { s: state }
}
/// Create a new `Xoshiro256PlusPlus` from a `u64` seed.
///
/// This uses the SplitMix64 generator internally.
#[inline]
fn seed_from_u64(mut state: u64) -> Self {
const PHI: u64 = 0x9e3779b97f4a7c15;
let mut s = [0; 4];
for i in s.iter_mut() {
state = state.wrapping_add(PHI);
let mut z = state;
z = (z ^ (z >> 30)).wrapping_mul(0xbf58476d1ce4e5b9);
z = (z ^ (z >> 27)).wrapping_mul(0x94d049bb133111eb);
z = z ^ (z >> 31);
*i = z;
}
// By using a non-zero PHI we are guaranteed to generate a non-zero state
// Thus preventing a recursion between from_seed and seed_from_u64.
debug_assert_ne!(s, [0; 4]);
Xoshiro256PlusPlus { s }
}
}
impl RngCore for Xoshiro256PlusPlus {
#[inline]
fn next_u32(&mut self) -> u32 {
// The lowest bits have some linear dependencies, so we use the
// upper bits instead.
let val = self.next_u64();
(val >> 32) as u32
}
#[inline]
fn next_u64(&mut self) -> u64 {
let res = self.s[0]
.wrapping_add(self.s[3])
.rotate_left(23)
.wrapping_add(self.s[0]);
let t = self.s[1] << 17;
self.s[2] ^= self.s[0];
self.s[3] ^= self.s[1];
self.s[1] ^= self.s[2];
self.s[0] ^= self.s[3];
self.s[2] ^= t;
self.s[3] = self.s[3].rotate_left(45);
res
}
#[inline]
fn fill_bytes(&mut self, dst: &mut [u8]) {
fill_bytes_via_next(self, dst)
}
}
#[cfg(test)]
mod tests {
use super::Xoshiro256PlusPlus;
use rand_core::{RngCore, SeedableRng};
#[test]
fn reference() {
let mut rng = Xoshiro256PlusPlus::from_seed([
1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 0,
0, 0, 0,
]);
// These values were produced with the reference implementation:
// http://xoshiro.di.unimi.it/xoshiro256plusplus.c
let expected = [
41943041,
58720359,
3588806011781223,
3591011842654386,
9228616714210784205,
9973669472204895162,
14011001112246962877,
12406186145184390807,
15849039046786891736,
10450023813501588000,
];
for &e in &expected {
assert_eq!(rng.next_u64(), e);
}
}
#[test]
fn stable_seed_from_u64_and_from_seed() {
// We don't guarantee value-stability for SmallRng but this
// could influence keeping stability whenever possible (e.g. after optimizations).
let mut rng = Xoshiro256PlusPlus::seed_from_u64(0);
// from_seed([0; 32]) should produce the same state as seed_from_u64(0).
let mut rng_from_seed_0 = Xoshiro256PlusPlus::from_seed([0; 32]);
let expected = [
5987356902031041503,
7051070477665621255,
6633766593972829180,
211316841551650330,
9136120204379184874,
379361710973160858,
15813423377499357806,
15596884590815070553,
5439680534584881407,
1369371744833522710,
];
for &e in &expected {
assert_eq!(rng.next_u64(), e);
assert_eq!(rng_from_seed_0.next_u64(), e);
}
}
}

160
vendor/rand/src/seq/coin_flipper.rs vendored Normal file
View File

@@ -0,0 +1,160 @@
// Copyright 2018-2023 Developers of the Rand project.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use crate::RngCore;
pub(crate) struct CoinFlipper<R: RngCore> {
pub rng: R,
chunk: u32, // TODO(opt): this should depend on RNG word size
chunk_remaining: u32,
}
impl<R: RngCore> CoinFlipper<R> {
pub fn new(rng: R) -> Self {
Self {
rng,
chunk: 0,
chunk_remaining: 0,
}
}
#[inline]
/// Returns true with a probability of 1 / d
/// Uses an expected two bits of randomness
/// Panics if d == 0
pub fn random_ratio_one_over(&mut self, d: usize) -> bool {
debug_assert_ne!(d, 0);
// This uses the same logic as `random_ratio` but is optimized for the case that
// the starting numerator is one (which it always is for `Sequence::Choose()`)
// In this case (but not `random_ratio`), this way of calculating c is always accurate
let c = (usize::BITS - 1 - d.leading_zeros()).min(32);
if self.flip_c_heads(c) {
let numerator = 1 << c;
self.random_ratio(numerator, d)
} else {
false
}
}
#[inline]
/// Returns true with a probability of n / d
/// Uses an expected two bits of randomness
fn random_ratio(&mut self, mut n: usize, d: usize) -> bool {
// Explanation:
// We are trying to return true with a probability of n / d
// If n >= d, we can just return true
// Otherwise there are two possibilities 2n < d and 2n >= d
// In either case we flip a coin.
// If 2n < d
// If it comes up tails, return false
// If it comes up heads, double n and start again
// This is fair because (0.5 * 0) + (0.5 * 2n / d) = n / d and 2n is less than d
// (if 2n was greater than d we would effectively round it down to 1
// by returning true)
// If 2n >= d
// If it comes up tails, set n to 2n - d and start again
// If it comes up heads, return true
// This is fair because (0.5 * 1) + (0.5 * (2n - d) / d) = n / d
// Note that if 2n = d and the coin comes up tails, n will be set to 0
// before restarting which is equivalent to returning false.
// As a performance optimization we can flip multiple coins at once
// This is efficient because we can use the `lzcnt` intrinsic
// We can check up to 32 flips at once but we only receive one bit of information
// - all heads or at least one tail.
// Let c be the number of coins to flip. 1 <= c <= 32
// If 2n < d, n * 2^c < d
// If the result is all heads, then set n to n * 2^c
// If there was at least one tail, return false
// If 2n >= d, the order of results matters so we flip one coin at a time so c = 1
// Ideally, c will be as high as possible within these constraints
while n < d {
// Find a good value for c by counting leading zeros
// This will either give the highest possible c, or 1 less than that
let c = n
.leading_zeros()
.saturating_sub(d.leading_zeros() + 1)
.clamp(1, 32);
if self.flip_c_heads(c) {
// All heads
// Set n to n * 2^c
// If 2n >= d, the while loop will exit and we will return `true`
// If n * 2^c > `usize::MAX` we always return `true` anyway
n = n.saturating_mul(2_usize.pow(c));
} else {
// At least one tail
if c == 1 {
// Calculate 2n - d.
// We need to use wrapping as 2n might be greater than `usize::MAX`
let next_n = n.wrapping_add(n).wrapping_sub(d);
if next_n == 0 || next_n > n {
// This will happen if 2n < d
return false;
}
n = next_n;
} else {
// c > 1 so 2n < d so we can return false
return false;
}
}
}
true
}
/// If the next `c` bits of randomness all represent heads, consume them, return true
/// Otherwise return false and consume the number of heads plus one.
/// Generates new bits of randomness when necessary (in 32 bit chunks)
/// Has a 1 in 2 to the `c` chance of returning true
/// `c` must be less than or equal to 32
fn flip_c_heads(&mut self, mut c: u32) -> bool {
debug_assert!(c <= 32);
// Note that zeros on the left of the chunk represent heads.
// It needs to be this way round because zeros are filled in when left shifting
loop {
let zeros = self.chunk.leading_zeros();
if zeros < c {
// The happy path - we found a 1 and can return false
// Note that because a 1 bit was detected,
// We cannot have run out of random bits so we don't need to check
// First consume all of the bits read
// Using shl seems to give worse performance for size-hinted iterators
self.chunk = self.chunk.wrapping_shl(zeros + 1);
self.chunk_remaining = self.chunk_remaining.saturating_sub(zeros + 1);
return false;
} else {
// The number of zeros is larger than `c`
// There are two possibilities
if let Some(new_remaining) = self.chunk_remaining.checked_sub(c) {
// Those zeroes were all part of our random chunk,
// throw away `c` bits of randomness and return true
self.chunk_remaining = new_remaining;
self.chunk <<= c;
return true;
} else {
// Some of those zeroes were part of the random chunk
// and some were part of the space behind it
// We need to take into account only the zeroes that were random
c -= self.chunk_remaining;
// Generate a new chunk
self.chunk = self.rng.next_u32();
self.chunk_remaining = 32;
// Go back to start of loop
}
}
}
}
}

View File

@@ -0,0 +1,108 @@
// Copyright 2018-2023 Developers of the Rand project.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use crate::{Rng, RngCore};
/// Similar to a Uniform distribution,
/// but after returning a number in the range [0,n], n is increased by 1.
pub(crate) struct IncreasingUniform<R: RngCore> {
pub rng: R,
n: u32,
// Chunk is a random number in [0, (n + 1) * (n + 2) *..* (n + chunk_remaining) )
chunk: u32,
chunk_remaining: u8,
}
impl<R: RngCore> IncreasingUniform<R> {
/// Create a dice roller.
/// The next item returned will be a random number in the range [0,n]
pub fn new(rng: R, n: u32) -> Self {
// If n = 0, the first number returned will always be 0
// so we don't need to generate a random number
let chunk_remaining = if n == 0 { 1 } else { 0 };
Self {
rng,
n,
chunk: 0,
chunk_remaining,
}
}
/// Returns a number in [0,n] and increments n by 1.
/// Generates new random bits as needed
/// Panics if `n >= u32::MAX`
#[inline]
pub fn next_index(&mut self) -> usize {
let next_n = self.n + 1;
// There's room for further optimisation here:
// random_range uses rejection sampling (or other method; see #1196) to avoid bias.
// When the initial sample is biased for range 0..bound
// it may still be viable to use for a smaller bound
// (especially if small biases are considered acceptable).
let next_chunk_remaining = self.chunk_remaining.checked_sub(1).unwrap_or_else(|| {
// If the chunk is empty, generate a new chunk
let (bound, remaining) = calculate_bound_u32(next_n);
// bound = (n + 1) * (n + 2) *..* (n + remaining)
self.chunk = self.rng.random_range(..bound);
// Chunk is a random number in
// [0, (n + 1) * (n + 2) *..* (n + remaining) )
remaining - 1
});
let result = if next_chunk_remaining == 0 {
// `chunk` is a random number in the range [0..n+1)
// Because `chunk_remaining` is about to be set to zero
// we do not need to clear the chunk here
self.chunk as usize
} else {
// `chunk` is a random number in a range that is a multiple of n+1
// so r will be a random number in [0..n+1)
let r = self.chunk % next_n;
self.chunk /= next_n;
r as usize
};
self.chunk_remaining = next_chunk_remaining;
self.n = next_n;
result
}
}
#[inline]
/// Calculates `bound`, `count` such that bound (m)*(m+1)*..*(m + remaining - 1)
fn calculate_bound_u32(m: u32) -> (u32, u8) {
debug_assert!(m > 0);
#[inline]
const fn inner(m: u32) -> (u32, u8) {
let mut product = m;
let mut current = m + 1;
loop {
if let Some(p) = u32::checked_mul(product, current) {
product = p;
current += 1;
} else {
// Count has a maximum value of 13 for when min is 1 or 2
let count = (current - m) as u8;
return (product, count);
}
}
}
const RESULT2: (u32, u8) = inner(2);
if m == 2 {
// Making this value a constant instead of recalculating it
// gives a significant (~50%) performance boost for small shuffles
return RESULT2;
}
inner(m)
}

696
vendor/rand/src/seq/index.rs vendored Normal file
View File

@@ -0,0 +1,696 @@
// Copyright 2018 Developers of the Rand project.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Low-level API for sampling indices
use alloc::vec::{self, Vec};
use core::slice;
use core::{hash::Hash, ops::AddAssign};
// BTreeMap is not as fast in tests, but better than nothing.
#[cfg(feature = "std")]
use super::WeightError;
use crate::distr::uniform::SampleUniform;
use crate::distr::{Distribution, Uniform};
use crate::Rng;
#[cfg(not(feature = "std"))]
use alloc::collections::BTreeSet;
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
#[cfg(feature = "std")]
use std::collections::HashSet;
#[cfg(not(any(target_pointer_width = "32", target_pointer_width = "64")))]
compile_error!("unsupported pointer width");
/// A vector of indices.
///
/// Multiple internal representations are possible.
#[derive(Clone, Debug)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub enum IndexVec {
#[doc(hidden)]
U32(Vec<u32>),
#[cfg(target_pointer_width = "64")]
#[doc(hidden)]
U64(Vec<u64>),
}
impl IndexVec {
/// Returns the number of indices
#[inline]
pub fn len(&self) -> usize {
match self {
IndexVec::U32(v) => v.len(),
#[cfg(target_pointer_width = "64")]
IndexVec::U64(v) => v.len(),
}
}
/// Returns `true` if the length is 0.
#[inline]
pub fn is_empty(&self) -> bool {
match self {
IndexVec::U32(v) => v.is_empty(),
#[cfg(target_pointer_width = "64")]
IndexVec::U64(v) => v.is_empty(),
}
}
/// Return the value at the given `index`.
///
/// (Note: we cannot implement [`std::ops::Index`] because of lifetime
/// restrictions.)
#[inline]
pub fn index(&self, index: usize) -> usize {
match self {
IndexVec::U32(v) => v[index] as usize,
#[cfg(target_pointer_width = "64")]
IndexVec::U64(v) => v[index] as usize,
}
}
/// Return result as a `Vec<usize>`. Conversion may or may not be trivial.
#[inline]
pub fn into_vec(self) -> Vec<usize> {
match self {
IndexVec::U32(v) => v.into_iter().map(|i| i as usize).collect(),
#[cfg(target_pointer_width = "64")]
IndexVec::U64(v) => v.into_iter().map(|i| i as usize).collect(),
}
}
/// Iterate over the indices as a sequence of `usize` values
#[inline]
pub fn iter(&self) -> IndexVecIter<'_> {
match self {
IndexVec::U32(v) => IndexVecIter::U32(v.iter()),
#[cfg(target_pointer_width = "64")]
IndexVec::U64(v) => IndexVecIter::U64(v.iter()),
}
}
}
impl IntoIterator for IndexVec {
type IntoIter = IndexVecIntoIter;
type Item = usize;
/// Convert into an iterator over the indices as a sequence of `usize` values
#[inline]
fn into_iter(self) -> IndexVecIntoIter {
match self {
IndexVec::U32(v) => IndexVecIntoIter::U32(v.into_iter()),
#[cfg(target_pointer_width = "64")]
IndexVec::U64(v) => IndexVecIntoIter::U64(v.into_iter()),
}
}
}
impl PartialEq for IndexVec {
fn eq(&self, other: &IndexVec) -> bool {
use self::IndexVec::*;
match (self, other) {
(U32(v1), U32(v2)) => v1 == v2,
#[cfg(target_pointer_width = "64")]
(U64(v1), U64(v2)) => v1 == v2,
#[cfg(target_pointer_width = "64")]
(U32(v1), U64(v2)) => {
(v1.len() == v2.len()) && (v1.iter().zip(v2.iter()).all(|(x, y)| *x as u64 == *y))
}
#[cfg(target_pointer_width = "64")]
(U64(v1), U32(v2)) => {
(v1.len() == v2.len()) && (v1.iter().zip(v2.iter()).all(|(x, y)| *x == *y as u64))
}
}
}
}
impl From<Vec<u32>> for IndexVec {
#[inline]
fn from(v: Vec<u32>) -> Self {
IndexVec::U32(v)
}
}
#[cfg(target_pointer_width = "64")]
impl From<Vec<u64>> for IndexVec {
#[inline]
fn from(v: Vec<u64>) -> Self {
IndexVec::U64(v)
}
}
/// Return type of `IndexVec::iter`.
#[derive(Debug)]
pub enum IndexVecIter<'a> {
#[doc(hidden)]
U32(slice::Iter<'a, u32>),
#[cfg(target_pointer_width = "64")]
#[doc(hidden)]
U64(slice::Iter<'a, u64>),
}
impl Iterator for IndexVecIter<'_> {
type Item = usize;
#[inline]
fn next(&mut self) -> Option<usize> {
use self::IndexVecIter::*;
match self {
U32(iter) => iter.next().map(|i| *i as usize),
#[cfg(target_pointer_width = "64")]
U64(iter) => iter.next().map(|i| *i as usize),
}
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
match self {
IndexVecIter::U32(v) => v.size_hint(),
#[cfg(target_pointer_width = "64")]
IndexVecIter::U64(v) => v.size_hint(),
}
}
}
impl ExactSizeIterator for IndexVecIter<'_> {}
/// Return type of `IndexVec::into_iter`.
#[derive(Clone, Debug)]
pub enum IndexVecIntoIter {
#[doc(hidden)]
U32(vec::IntoIter<u32>),
#[cfg(target_pointer_width = "64")]
#[doc(hidden)]
U64(vec::IntoIter<u64>),
}
impl Iterator for IndexVecIntoIter {
type Item = usize;
#[inline]
fn next(&mut self) -> Option<Self::Item> {
use self::IndexVecIntoIter::*;
match self {
U32(v) => v.next().map(|i| i as usize),
#[cfg(target_pointer_width = "64")]
U64(v) => v.next().map(|i| i as usize),
}
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
use self::IndexVecIntoIter::*;
match self {
U32(v) => v.size_hint(),
#[cfg(target_pointer_width = "64")]
U64(v) => v.size_hint(),
}
}
}
impl ExactSizeIterator for IndexVecIntoIter {}
/// Randomly sample exactly `amount` distinct indices from `0..length`, and
/// return them in random order (fully shuffled).
///
/// This method is used internally by the slice sampling methods, but it can
/// sometimes be useful to have the indices themselves so this is provided as
/// an alternative.
///
/// The implementation used is not specified; we automatically select the
/// fastest available algorithm for the `length` and `amount` parameters
/// (based on detailed profiling on an Intel Haswell CPU). Roughly speaking,
/// complexity is `O(amount)`, except that when `amount` is small, performance
/// is closer to `O(amount^2)`, and when `length` is close to `amount` then
/// `O(length)`.
///
/// Note that performance is significantly better over `u32` indices than over
/// `u64` indices. Because of this we hide the underlying type behind an
/// abstraction, `IndexVec`.
///
/// If an allocation-free `no_std` function is required, it is suggested
/// to adapt the internal `sample_floyd` implementation.
///
/// Panics if `amount > length`.
#[track_caller]
pub fn sample<R>(rng: &mut R, length: usize, amount: usize) -> IndexVec
where
R: Rng + ?Sized,
{
if amount > length {
panic!("`amount` of samples must be less than or equal to `length`");
}
if length > (u32::MAX as usize) {
#[cfg(target_pointer_width = "32")]
unreachable!();
// We never want to use inplace here, but could use floyd's alg
// Lazy version: always use the cache alg.
#[cfg(target_pointer_width = "64")]
return sample_rejection(rng, length as u64, amount as u64);
}
let amount = amount as u32;
let length = length as u32;
// Choice of algorithm here depends on both length and amount. See:
// https://github.com/rust-random/rand/pull/479
// We do some calculations with f32. Accuracy is not very important.
if amount < 163 {
const C: [[f32; 2]; 2] = [[1.6, 8.0 / 45.0], [10.0, 70.0 / 9.0]];
let j = usize::from(length >= 500_000);
let amount_fp = amount as f32;
let m4 = C[0][j] * amount_fp;
// Short-cut: when amount < 12, floyd's is always faster
if amount > 11 && (length as f32) < (C[1][j] + m4) * amount_fp {
sample_inplace(rng, length, amount)
} else {
sample_floyd(rng, length, amount)
}
} else {
const C: [f32; 2] = [270.0, 330.0 / 9.0];
let j = usize::from(length >= 500_000);
if (length as f32) < C[j] * (amount as f32) {
sample_inplace(rng, length, amount)
} else {
sample_rejection(rng, length, amount)
}
}
}
/// Randomly sample `amount` distinct indices from `0..length`
///
/// The result may contain less than `amount` indices if insufficient non-zero
/// weights are available. Results are returned in an arbitrary order (there is
/// no guarantee of shuffling or ordering).
///
/// Function `weight` is called once for each index to provide weights.
///
/// This method is used internally by the slice sampling methods, but it can
/// sometimes be useful to have the indices themselves so this is provided as
/// an alternative.
///
/// Error cases:
/// - [`WeightError::InvalidWeight`] when a weight is not-a-number or negative.
///
/// This implementation uses `O(length + amount)` space and `O(length)` time.
#[cfg(feature = "std")]
pub fn sample_weighted<R, F, X>(
rng: &mut R,
length: usize,
weight: F,
amount: usize,
) -> Result<IndexVec, WeightError>
where
R: Rng + ?Sized,
F: Fn(usize) -> X,
X: Into<f64>,
{
if length > (u32::MAX as usize) {
#[cfg(target_pointer_width = "32")]
unreachable!();
#[cfg(target_pointer_width = "64")]
{
let amount = amount as u64;
let length = length as u64;
sample_efraimidis_spirakis(rng, length, weight, amount)
}
} else {
assert!(amount <= u32::MAX as usize);
let amount = amount as u32;
let length = length as u32;
sample_efraimidis_spirakis(rng, length, weight, amount)
}
}
/// Randomly sample `amount` distinct indices from `0..length`
///
/// The result may contain less than `amount` indices if insufficient non-zero
/// weights are available. Results are returned in an arbitrary order (there is
/// no guarantee of shuffling or ordering).
///
/// Function `weight` is called once for each index to provide weights.
///
/// This implementation is based on the algorithm A-ExpJ as found in
/// [Efraimidis and Spirakis, 2005](https://doi.org/10.1016/j.ipl.2005.11.003).
/// It uses `O(length + amount)` space and `O(length)` time.
///
/// Error cases:
/// - [`WeightError::InvalidWeight`] when a weight is not-a-number or negative.
#[cfg(feature = "std")]
fn sample_efraimidis_spirakis<R, F, X, N>(
rng: &mut R,
length: N,
weight: F,
amount: N,
) -> Result<IndexVec, WeightError>
where
R: Rng + ?Sized,
F: Fn(usize) -> X,
X: Into<f64>,
N: UInt,
IndexVec: From<Vec<N>>,
{
use std::{cmp::Ordering, collections::BinaryHeap};
if amount == N::zero() {
return Ok(IndexVec::U32(Vec::new()));
}
struct Element<N> {
index: N,
key: f64,
}
impl<N> PartialOrd for Element<N> {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl<N> Ord for Element<N> {
fn cmp(&self, other: &Self) -> Ordering {
// unwrap() should not panic since weights should not be NaN
// We reverse so that BinaryHeap::peek shows the smallest item
self.key.partial_cmp(&other.key).unwrap().reverse()
}
}
impl<N> PartialEq for Element<N> {
fn eq(&self, other: &Self) -> bool {
self.key == other.key
}
}
impl<N> Eq for Element<N> {}
let mut candidates = BinaryHeap::with_capacity(amount.as_usize());
let mut index = N::zero();
while index < length && candidates.len() < amount.as_usize() {
let weight = weight(index.as_usize()).into();
if weight > 0.0 {
// We use the log of the key used in A-ExpJ to improve precision
// for small weights:
let key = rng.random::<f64>().ln() / weight;
candidates.push(Element { index, key });
} else if !(weight >= 0.0) {
return Err(WeightError::InvalidWeight);
}
index += N::one();
}
if index < length {
let mut x = rng.random::<f64>().ln() / candidates.peek().unwrap().key;
while index < length {
let weight = weight(index.as_usize()).into();
if weight > 0.0 {
x -= weight;
if x <= 0.0 {
let min_candidate = candidates.pop().unwrap();
let t = (min_candidate.key * weight).exp();
let key = rng.random_range(t..1.0).ln() / weight;
candidates.push(Element { index, key });
x = rng.random::<f64>().ln() / candidates.peek().unwrap().key;
}
} else if !(weight >= 0.0) {
return Err(WeightError::InvalidWeight);
}
index += N::one();
}
}
Ok(IndexVec::from(
candidates.iter().map(|elt| elt.index).collect(),
))
}
/// Randomly sample exactly `amount` indices from `0..length`, using Floyd's
/// combination algorithm.
///
/// The output values are fully shuffled. (Overhead is under 50%.)
///
/// This implementation uses `O(amount)` memory and `O(amount^2)` time.
fn sample_floyd<R>(rng: &mut R, length: u32, amount: u32) -> IndexVec
where
R: Rng + ?Sized,
{
// Note that the values returned by `rng.random_range()` can be
// inferred from the returned vector by working backwards from
// the last entry. This bijection proves the algorithm fair.
debug_assert!(amount <= length);
let mut indices = Vec::with_capacity(amount as usize);
for j in length - amount..length {
let t = rng.random_range(..=j);
if let Some(pos) = indices.iter().position(|&x| x == t) {
indices[pos] = j;
}
indices.push(t);
}
IndexVec::from(indices)
}
/// Randomly sample exactly `amount` indices from `0..length`, using an inplace
/// partial Fisher-Yates method.
/// Sample an amount of indices using an inplace partial fisher yates method.
///
/// This allocates the entire `length` of indices and randomizes only the first `amount`.
/// It then truncates to `amount` and returns.
///
/// This method is not appropriate for large `length` and potentially uses a lot
/// of memory; because of this we only implement for `u32` index (which improves
/// performance in all cases).
///
/// Set-up is `O(length)` time and memory and shuffling is `O(amount)` time.
fn sample_inplace<R>(rng: &mut R, length: u32, amount: u32) -> IndexVec
where
R: Rng + ?Sized,
{
debug_assert!(amount <= length);
let mut indices: Vec<u32> = Vec::with_capacity(length as usize);
indices.extend(0..length);
for i in 0..amount {
let j: u32 = rng.random_range(i..length);
indices.swap(i as usize, j as usize);
}
indices.truncate(amount as usize);
debug_assert_eq!(indices.len(), amount as usize);
IndexVec::from(indices)
}
trait UInt: Copy + PartialOrd + Ord + PartialEq + Eq + SampleUniform + Hash + AddAssign {
fn zero() -> Self;
#[cfg_attr(feature = "alloc", allow(dead_code))]
fn one() -> Self;
fn as_usize(self) -> usize;
}
impl UInt for u32 {
#[inline]
fn zero() -> Self {
0
}
#[inline]
fn one() -> Self {
1
}
#[inline]
fn as_usize(self) -> usize {
self as usize
}
}
#[cfg(target_pointer_width = "64")]
impl UInt for u64 {
#[inline]
fn zero() -> Self {
0
}
#[inline]
fn one() -> Self {
1
}
#[inline]
fn as_usize(self) -> usize {
self as usize
}
}
/// Randomly sample exactly `amount` indices from `0..length`, using rejection
/// sampling.
///
/// Since `amount <<< length` there is a low chance of a random sample in
/// `0..length` being a duplicate. We test for duplicates and resample where
/// necessary. The algorithm is `O(amount)` time and memory.
///
/// This function is generic over X primarily so that results are value-stable
/// over 32-bit and 64-bit platforms.
fn sample_rejection<X: UInt, R>(rng: &mut R, length: X, amount: X) -> IndexVec
where
R: Rng + ?Sized,
IndexVec: From<Vec<X>>,
{
debug_assert!(amount < length);
#[cfg(feature = "std")]
let mut cache = HashSet::with_capacity(amount.as_usize());
#[cfg(not(feature = "std"))]
let mut cache = BTreeSet::new();
let distr = Uniform::new(X::zero(), length).unwrap();
let mut indices = Vec::with_capacity(amount.as_usize());
for _ in 0..amount.as_usize() {
let mut pos = distr.sample(rng);
while !cache.insert(pos) {
pos = distr.sample(rng);
}
indices.push(pos);
}
debug_assert_eq!(indices.len(), amount.as_usize());
IndexVec::from(indices)
}
#[cfg(test)]
mod test {
use super::*;
use alloc::vec;
#[test]
#[cfg(feature = "serde")]
fn test_serialization_index_vec() {
let some_index_vec = IndexVec::from(vec![254_u32, 234, 2, 1]);
let de_some_index_vec: IndexVec =
bincode::deserialize(&bincode::serialize(&some_index_vec).unwrap()).unwrap();
assert_eq!(some_index_vec, de_some_index_vec);
}
#[test]
fn test_sample_boundaries() {
let mut r = crate::test::rng(404);
assert_eq!(sample_inplace(&mut r, 0, 0).len(), 0);
assert_eq!(sample_inplace(&mut r, 1, 0).len(), 0);
assert_eq!(sample_inplace(&mut r, 1, 1).into_vec(), vec![0]);
assert_eq!(sample_rejection(&mut r, 1u32, 0).len(), 0);
assert_eq!(sample_floyd(&mut r, 0, 0).len(), 0);
assert_eq!(sample_floyd(&mut r, 1, 0).len(), 0);
assert_eq!(sample_floyd(&mut r, 1, 1).into_vec(), vec![0]);
// These algorithms should be fast with big numbers. Test average.
let sum: usize = sample_rejection(&mut r, 1 << 25, 10u32).into_iter().sum();
assert!(1 << 25 < sum && sum < (1 << 25) * 25);
let sum: usize = sample_floyd(&mut r, 1 << 25, 10).into_iter().sum();
assert!(1 << 25 < sum && sum < (1 << 25) * 25);
}
#[test]
#[cfg_attr(miri, ignore)] // Miri is too slow
fn test_sample_alg() {
let seed_rng = crate::test::rng;
// We can't test which algorithm is used directly, but Floyd's alg
// should produce different results from the others. (Also, `inplace`
// and `cached` currently use different sizes thus produce different results.)
// A small length and relatively large amount should use inplace
let (length, amount): (usize, usize) = (100, 50);
let v1 = sample(&mut seed_rng(420), length, amount);
let v2 = sample_inplace(&mut seed_rng(420), length as u32, amount as u32);
assert!(v1.iter().all(|e| e < length));
assert_eq!(v1, v2);
// Test Floyd's alg does produce different results
let v3 = sample_floyd(&mut seed_rng(420), length as u32, amount as u32);
assert!(v1 != v3);
// A large length and small amount should use Floyd
let (length, amount): (usize, usize) = (1 << 20, 50);
let v1 = sample(&mut seed_rng(421), length, amount);
let v2 = sample_floyd(&mut seed_rng(421), length as u32, amount as u32);
assert!(v1.iter().all(|e| e < length));
assert_eq!(v1, v2);
// A large length and larger amount should use cache
let (length, amount): (usize, usize) = (1 << 20, 600);
let v1 = sample(&mut seed_rng(422), length, amount);
let v2 = sample_rejection(&mut seed_rng(422), length as u32, amount as u32);
assert!(v1.iter().all(|e| e < length));
assert_eq!(v1, v2);
}
#[cfg(feature = "std")]
#[test]
fn test_sample_weighted() {
let seed_rng = crate::test::rng;
for &(amount, len) in &[(0, 10), (5, 10), (9, 10)] {
let v = sample_weighted(&mut seed_rng(423), len, |i| i as f64, amount).unwrap();
match v {
IndexVec::U32(mut indices) => {
assert_eq!(indices.len(), amount);
indices.sort_unstable();
indices.dedup();
assert_eq!(indices.len(), amount);
for &i in &indices {
assert!((i as usize) < len);
}
}
#[cfg(target_pointer_width = "64")]
_ => panic!("expected `IndexVec::U32`"),
}
}
let r = sample_weighted(&mut seed_rng(423), 10, |i| i as f64, 10);
assert_eq!(r.unwrap().len(), 9);
}
#[test]
fn value_stability_sample() {
let do_test = |length, amount, values: &[u32]| {
let mut buf = [0u32; 8];
let mut rng = crate::test::rng(410);
let res = sample(&mut rng, length, amount);
let len = res.len().min(buf.len());
for (x, y) in res.into_iter().zip(buf.iter_mut()) {
*y = x as u32;
}
assert_eq!(
&buf[0..len],
values,
"failed sampling {}, {}",
length,
amount
);
};
do_test(10, 6, &[0, 9, 5, 4, 6, 8]); // floyd
do_test(25, 10, &[24, 20, 19, 9, 22, 16, 0, 14]); // floyd
do_test(300, 8, &[30, 283, 243, 150, 218, 240, 1, 189]); // floyd
do_test(300, 80, &[31, 289, 248, 154, 221, 243, 7, 192]); // inplace
do_test(300, 180, &[31, 289, 248, 154, 221, 243, 7, 192]); // inplace
do_test(
1_000_000,
8,
&[103717, 963485, 826422, 509101, 736394, 807035, 5327, 632573],
); // floyd
do_test(
1_000_000,
180,
&[103718, 963490, 826426, 509103, 736396, 807036, 5327, 632573],
); // rejection
}
}

672
vendor/rand/src/seq/iterator.rs vendored Normal file
View File

@@ -0,0 +1,672 @@
// Copyright 2018-2024 Developers of the Rand project.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! `IteratorRandom`
use super::coin_flipper::CoinFlipper;
#[allow(unused)]
use super::IndexedRandom;
use crate::Rng;
#[cfg(feature = "alloc")]
use alloc::vec::Vec;
/// Extension trait on iterators, providing random sampling methods.
///
/// This trait is implemented on all iterators `I` where `I: Iterator + Sized`
/// and provides methods for
/// choosing one or more elements. You must `use` this trait:
///
/// ```
/// use rand::seq::IteratorRandom;
///
/// let faces = "😀😎😐😕😠😢";
/// println!("I am {}!", faces.chars().choose(&mut rand::rng()).unwrap());
/// ```
/// Example output (non-deterministic):
/// ```none
/// I am 😀!
/// ```
pub trait IteratorRandom: Iterator + Sized {
/// Uniformly sample one element
///
/// Assuming that the [`Iterator::size_hint`] is correct, this method
/// returns one uniformly-sampled random element of the slice, or `None`
/// only if the slice is empty. Incorrect bounds on the `size_hint` may
/// cause this method to incorrectly return `None` if fewer elements than
/// the advertised `lower` bound are present and may prevent sampling of
/// elements beyond an advertised `upper` bound (i.e. incorrect `size_hint`
/// is memory-safe, but may result in unexpected `None` result and
/// non-uniform distribution).
///
/// With an accurate [`Iterator::size_hint`] and where [`Iterator::nth`] is
/// a constant-time operation, this method can offer `O(1)` performance.
/// Where no size hint is
/// available, complexity is `O(n)` where `n` is the iterator length.
/// Partial hints (where `lower > 0`) also improve performance.
///
/// Note further that [`Iterator::size_hint`] may affect the number of RNG
/// samples used as well as the result (while remaining uniform sampling).
/// Consider instead using [`IteratorRandom::choose_stable`] to avoid
/// [`Iterator`] combinators which only change size hints from affecting the
/// results.
///
/// # Example
///
/// ```
/// use rand::seq::IteratorRandom;
///
/// let words = "Mary had a little lamb".split(' ');
/// println!("{}", words.choose(&mut rand::rng()).unwrap());
/// ```
fn choose<R>(mut self, rng: &mut R) -> Option<Self::Item>
where
R: Rng + ?Sized,
{
let (mut lower, mut upper) = self.size_hint();
let mut result = None;
// Handling for this condition outside the loop allows the optimizer to eliminate the loop
// when the Iterator is an ExactSizeIterator. This has a large performance impact on e.g.
// seq_iter_choose_from_1000.
if upper == Some(lower) {
return match lower {
0 => None,
1 => self.next(),
_ => self.nth(rng.random_range(..lower)),
};
}
let mut coin_flipper = CoinFlipper::new(rng);
let mut consumed = 0;
// Continue until the iterator is exhausted
loop {
if lower > 1 {
let ix = coin_flipper.rng.random_range(..lower + consumed);
let skip = if ix < lower {
result = self.nth(ix);
lower - (ix + 1)
} else {
lower
};
if upper == Some(lower) {
return result;
}
consumed += lower;
if skip > 0 {
self.nth(skip - 1);
}
} else {
let elem = self.next();
if elem.is_none() {
return result;
}
consumed += 1;
if coin_flipper.random_ratio_one_over(consumed) {
result = elem;
}
}
let hint = self.size_hint();
lower = hint.0;
upper = hint.1;
}
}
/// Uniformly sample one element (stable)
///
/// This method is very similar to [`choose`] except that the result
/// only depends on the length of the iterator and the values produced by
/// `rng`. Notably for any iterator of a given length this will make the
/// same requests to `rng` and if the same sequence of values are produced
/// the same index will be selected from `self`. This may be useful if you
/// need consistent results no matter what type of iterator you are working
/// with. If you do not need this stability prefer [`choose`].
///
/// Note that this method still uses [`Iterator::size_hint`] to skip
/// constructing elements where possible, however the selection and `rng`
/// calls are the same in the face of this optimization. If you want to
/// force every element to be created regardless call `.inspect(|e| ())`.
///
/// [`choose`]: IteratorRandom::choose
//
// Clippy is wrong here: we need to iterate over all entries with the RNG to
// ensure that choosing is *stable*.
// "allow(unknown_lints)" can be removed when switching to at least
// rust-version 1.86.0, see:
// https://rust-lang.github.io/rust-clippy/master/index.html#double_ended_iterator_last
#[allow(unknown_lints)]
#[allow(clippy::double_ended_iterator_last)]
fn choose_stable<R>(mut self, rng: &mut R) -> Option<Self::Item>
where
R: Rng + ?Sized,
{
let mut consumed = 0;
let mut result = None;
let mut coin_flipper = CoinFlipper::new(rng);
loop {
// Currently the only way to skip elements is `nth()`. So we need to
// store what index to access next here.
// This should be replaced by `advance_by()` once it is stable:
// https://github.com/rust-lang/rust/issues/77404
let mut next = 0;
let (lower, _) = self.size_hint();
if lower >= 2 {
let highest_selected = (0..lower)
.filter(|ix| coin_flipper.random_ratio_one_over(consumed + ix + 1))
.last();
consumed += lower;
next = lower;
if let Some(ix) = highest_selected {
result = self.nth(ix);
next -= ix + 1;
debug_assert!(result.is_some(), "iterator shorter than size_hint().0");
}
}
let elem = self.nth(next);
if elem.is_none() {
return result;
}
if coin_flipper.random_ratio_one_over(consumed + 1) {
result = elem;
}
consumed += 1;
}
}
/// Uniformly sample `amount` distinct elements into a buffer
///
/// Collects values at random from the iterator into a supplied buffer
/// until that buffer is filled.
///
/// Although the elements are selected randomly, the order of elements in
/// the buffer is neither stable nor fully random. If random ordering is
/// desired, shuffle the result.
///
/// Returns the number of elements added to the buffer. This equals the length
/// of the buffer unless the iterator contains insufficient elements, in which
/// case this equals the number of elements available.
///
/// Complexity is `O(n)` where `n` is the length of the iterator.
/// For slices, prefer [`IndexedRandom::choose_multiple`].
fn choose_multiple_fill<R>(mut self, rng: &mut R, buf: &mut [Self::Item]) -> usize
where
R: Rng + ?Sized,
{
let amount = buf.len();
let mut len = 0;
while len < amount {
if let Some(elem) = self.next() {
buf[len] = elem;
len += 1;
} else {
// Iterator exhausted; stop early
return len;
}
}
// Continue, since the iterator was not exhausted
for (i, elem) in self.enumerate() {
let k = rng.random_range(..i + 1 + amount);
if let Some(slot) = buf.get_mut(k) {
*slot = elem;
}
}
len
}
/// Uniformly sample `amount` distinct elements into a [`Vec`]
///
/// This is equivalent to `choose_multiple_fill` except for the result type.
///
/// Although the elements are selected randomly, the order of elements in
/// the buffer is neither stable nor fully random. If random ordering is
/// desired, shuffle the result.
///
/// The length of the returned vector equals `amount` unless the iterator
/// contains insufficient elements, in which case it equals the number of
/// elements available.
///
/// Complexity is `O(n)` where `n` is the length of the iterator.
/// For slices, prefer [`IndexedRandom::choose_multiple`].
#[cfg(feature = "alloc")]
fn choose_multiple<R>(mut self, rng: &mut R, amount: usize) -> Vec<Self::Item>
where
R: Rng + ?Sized,
{
let mut reservoir = Vec::with_capacity(amount);
reservoir.extend(self.by_ref().take(amount));
// Continue unless the iterator was exhausted
//
// note: this prevents iterators that "restart" from causing problems.
// If the iterator stops once, then so do we.
if reservoir.len() == amount {
for (i, elem) in self.enumerate() {
let k = rng.random_range(..i + 1 + amount);
if let Some(slot) = reservoir.get_mut(k) {
*slot = elem;
}
}
} else {
// Don't hang onto extra memory. There is a corner case where
// `amount` was much less than `self.len()`.
reservoir.shrink_to_fit();
}
reservoir
}
}
impl<I> IteratorRandom for I where I: Iterator + Sized {}
#[cfg(test)]
mod test {
use super::*;
#[cfg(all(feature = "alloc", not(feature = "std")))]
use alloc::vec::Vec;
#[derive(Clone)]
struct UnhintedIterator<I: Iterator + Clone> {
iter: I,
}
impl<I: Iterator + Clone> Iterator for UnhintedIterator<I> {
type Item = I::Item;
fn next(&mut self) -> Option<Self::Item> {
self.iter.next()
}
}
#[derive(Clone)]
struct ChunkHintedIterator<I: ExactSizeIterator + Iterator + Clone> {
iter: I,
chunk_remaining: usize,
chunk_size: usize,
hint_total_size: bool,
}
impl<I: ExactSizeIterator + Iterator + Clone> Iterator for ChunkHintedIterator<I> {
type Item = I::Item;
fn next(&mut self) -> Option<Self::Item> {
if self.chunk_remaining == 0 {
self.chunk_remaining = core::cmp::min(self.chunk_size, self.iter.len());
}
self.chunk_remaining = self.chunk_remaining.saturating_sub(1);
self.iter.next()
}
fn size_hint(&self) -> (usize, Option<usize>) {
(
self.chunk_remaining,
if self.hint_total_size {
Some(self.iter.len())
} else {
None
},
)
}
}
#[derive(Clone)]
struct WindowHintedIterator<I: ExactSizeIterator + Iterator + Clone> {
iter: I,
window_size: usize,
hint_total_size: bool,
}
impl<I: ExactSizeIterator + Iterator + Clone> Iterator for WindowHintedIterator<I> {
type Item = I::Item;
fn next(&mut self) -> Option<Self::Item> {
self.iter.next()
}
fn size_hint(&self) -> (usize, Option<usize>) {
(
core::cmp::min(self.iter.len(), self.window_size),
if self.hint_total_size {
Some(self.iter.len())
} else {
None
},
)
}
}
#[test]
#[cfg_attr(miri, ignore)] // Miri is too slow
fn test_iterator_choose() {
let r = &mut crate::test::rng(109);
fn test_iter<R: Rng + ?Sized, Iter: Iterator<Item = usize> + Clone>(r: &mut R, iter: Iter) {
let mut chosen = [0i32; 9];
for _ in 0..1000 {
let picked = iter.clone().choose(r).unwrap();
chosen[picked] += 1;
}
for count in chosen.iter() {
// Samples should follow Binomial(1000, 1/9)
// Octave: binopdf(x, 1000, 1/9) gives the prob of *count == x
// Note: have seen 153, which is unlikely but not impossible.
assert!(
72 < *count && *count < 154,
"count not close to 1000/9: {}",
count
);
}
}
test_iter(r, 0..9);
test_iter(r, [0, 1, 2, 3, 4, 5, 6, 7, 8].iter().cloned());
#[cfg(feature = "alloc")]
test_iter(r, (0..9).collect::<Vec<_>>().into_iter());
test_iter(r, UnhintedIterator { iter: 0..9 });
test_iter(
r,
ChunkHintedIterator {
iter: 0..9,
chunk_size: 4,
chunk_remaining: 4,
hint_total_size: false,
},
);
test_iter(
r,
ChunkHintedIterator {
iter: 0..9,
chunk_size: 4,
chunk_remaining: 4,
hint_total_size: true,
},
);
test_iter(
r,
WindowHintedIterator {
iter: 0..9,
window_size: 2,
hint_total_size: false,
},
);
test_iter(
r,
WindowHintedIterator {
iter: 0..9,
window_size: 2,
hint_total_size: true,
},
);
assert_eq!((0..0).choose(r), None);
assert_eq!(UnhintedIterator { iter: 0..0 }.choose(r), None);
}
#[test]
#[cfg_attr(miri, ignore)] // Miri is too slow
fn test_iterator_choose_stable() {
let r = &mut crate::test::rng(109);
fn test_iter<R: Rng + ?Sized, Iter: Iterator<Item = usize> + Clone>(r: &mut R, iter: Iter) {
let mut chosen = [0i32; 9];
for _ in 0..1000 {
let picked = iter.clone().choose_stable(r).unwrap();
chosen[picked] += 1;
}
for count in chosen.iter() {
// Samples should follow Binomial(1000, 1/9)
// Octave: binopdf(x, 1000, 1/9) gives the prob of *count == x
// Note: have seen 153, which is unlikely but not impossible.
assert!(
72 < *count && *count < 154,
"count not close to 1000/9: {}",
count
);
}
}
test_iter(r, 0..9);
test_iter(r, [0, 1, 2, 3, 4, 5, 6, 7, 8].iter().cloned());
#[cfg(feature = "alloc")]
test_iter(r, (0..9).collect::<Vec<_>>().into_iter());
test_iter(r, UnhintedIterator { iter: 0..9 });
test_iter(
r,
ChunkHintedIterator {
iter: 0..9,
chunk_size: 4,
chunk_remaining: 4,
hint_total_size: false,
},
);
test_iter(
r,
ChunkHintedIterator {
iter: 0..9,
chunk_size: 4,
chunk_remaining: 4,
hint_total_size: true,
},
);
test_iter(
r,
WindowHintedIterator {
iter: 0..9,
window_size: 2,
hint_total_size: false,
},
);
test_iter(
r,
WindowHintedIterator {
iter: 0..9,
window_size: 2,
hint_total_size: true,
},
);
assert_eq!((0..0).choose(r), None);
assert_eq!(UnhintedIterator { iter: 0..0 }.choose(r), None);
}
#[test]
#[cfg_attr(miri, ignore)] // Miri is too slow
fn test_iterator_choose_stable_stability() {
fn test_iter(iter: impl Iterator<Item = usize> + Clone) -> [i32; 9] {
let r = &mut crate::test::rng(109);
let mut chosen = [0i32; 9];
for _ in 0..1000 {
let picked = iter.clone().choose_stable(r).unwrap();
chosen[picked] += 1;
}
chosen
}
let reference = test_iter(0..9);
assert_eq!(
test_iter([0, 1, 2, 3, 4, 5, 6, 7, 8].iter().cloned()),
reference
);
#[cfg(feature = "alloc")]
assert_eq!(test_iter((0..9).collect::<Vec<_>>().into_iter()), reference);
assert_eq!(test_iter(UnhintedIterator { iter: 0..9 }), reference);
assert_eq!(
test_iter(ChunkHintedIterator {
iter: 0..9,
chunk_size: 4,
chunk_remaining: 4,
hint_total_size: false,
}),
reference
);
assert_eq!(
test_iter(ChunkHintedIterator {
iter: 0..9,
chunk_size: 4,
chunk_remaining: 4,
hint_total_size: true,
}),
reference
);
assert_eq!(
test_iter(WindowHintedIterator {
iter: 0..9,
window_size: 2,
hint_total_size: false,
}),
reference
);
assert_eq!(
test_iter(WindowHintedIterator {
iter: 0..9,
window_size: 2,
hint_total_size: true,
}),
reference
);
}
#[test]
#[cfg(feature = "alloc")]
fn test_sample_iter() {
let min_val = 1;
let max_val = 100;
let mut r = crate::test::rng(401);
let vals = (min_val..max_val).collect::<Vec<i32>>();
let small_sample = vals.iter().choose_multiple(&mut r, 5);
let large_sample = vals.iter().choose_multiple(&mut r, vals.len() + 5);
assert_eq!(small_sample.len(), 5);
assert_eq!(large_sample.len(), vals.len());
// no randomization happens when amount >= len
assert_eq!(large_sample, vals.iter().collect::<Vec<_>>());
assert!(small_sample
.iter()
.all(|e| { **e >= min_val && **e <= max_val }));
}
#[test]
fn value_stability_choose() {
fn choose<I: Iterator<Item = u32>>(iter: I) -> Option<u32> {
let mut rng = crate::test::rng(411);
iter.choose(&mut rng)
}
assert_eq!(choose([].iter().cloned()), None);
assert_eq!(choose(0..100), Some(33));
assert_eq!(choose(UnhintedIterator { iter: 0..100 }), Some(27));
assert_eq!(
choose(ChunkHintedIterator {
iter: 0..100,
chunk_size: 32,
chunk_remaining: 32,
hint_total_size: false,
}),
Some(91)
);
assert_eq!(
choose(ChunkHintedIterator {
iter: 0..100,
chunk_size: 32,
chunk_remaining: 32,
hint_total_size: true,
}),
Some(91)
);
assert_eq!(
choose(WindowHintedIterator {
iter: 0..100,
window_size: 32,
hint_total_size: false,
}),
Some(34)
);
assert_eq!(
choose(WindowHintedIterator {
iter: 0..100,
window_size: 32,
hint_total_size: true,
}),
Some(34)
);
}
#[test]
fn value_stability_choose_stable() {
fn choose<I: Iterator<Item = u32>>(iter: I) -> Option<u32> {
let mut rng = crate::test::rng(411);
iter.choose_stable(&mut rng)
}
assert_eq!(choose([].iter().cloned()), None);
assert_eq!(choose(0..100), Some(27));
assert_eq!(choose(UnhintedIterator { iter: 0..100 }), Some(27));
assert_eq!(
choose(ChunkHintedIterator {
iter: 0..100,
chunk_size: 32,
chunk_remaining: 32,
hint_total_size: false,
}),
Some(27)
);
assert_eq!(
choose(ChunkHintedIterator {
iter: 0..100,
chunk_size: 32,
chunk_remaining: 32,
hint_total_size: true,
}),
Some(27)
);
assert_eq!(
choose(WindowHintedIterator {
iter: 0..100,
window_size: 32,
hint_total_size: false,
}),
Some(27)
);
assert_eq!(
choose(WindowHintedIterator {
iter: 0..100,
window_size: 32,
hint_total_size: true,
}),
Some(27)
);
}
#[test]
fn value_stability_choose_multiple() {
fn do_test<I: Clone + Iterator<Item = u32>>(iter: I, v: &[u32]) {
let mut rng = crate::test::rng(412);
let mut buf = [0u32; 8];
assert_eq!(
iter.clone().choose_multiple_fill(&mut rng, &mut buf),
v.len()
);
assert_eq!(&buf[0..v.len()], v);
#[cfg(feature = "alloc")]
{
let mut rng = crate::test::rng(412);
assert_eq!(iter.choose_multiple(&mut rng, v.len()), v);
}
}
do_test(0..4, &[0, 1, 2, 3]);
do_test(0..8, &[0, 1, 2, 3, 4, 5, 6, 7]);
do_test(0..100, &[77, 95, 38, 23, 25, 8, 58, 40]);
}
}

80
vendor/rand/src/seq/mod.rs vendored Normal file
View File

@@ -0,0 +1,80 @@
// Copyright 2018-2023 Developers of the Rand project.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Sequence-related functionality
//!
//! This module provides:
//!
//! * [`IndexedRandom`] for sampling slices and other indexable lists
//! * [`IndexedMutRandom`] for sampling slices and other mutably indexable lists
//! * [`SliceRandom`] for mutating slices
//! * [`IteratorRandom`] for sampling iterators
//! * [`index::sample`] low-level API to choose multiple indices from
//! `0..length`
//!
//! Also see:
//!
//! * [`crate::distr::weighted::WeightedIndex`] distribution which provides
//! weighted index sampling.
//!
//! In order to make results reproducible across 32-64 bit architectures, all
//! `usize` indices are sampled as a `u32` where possible (also providing a
//! small performance boost in some cases).
mod coin_flipper;
mod increasing_uniform;
mod iterator;
mod slice;
#[cfg(feature = "alloc")]
#[path = "index.rs"]
mod index_;
#[cfg(feature = "alloc")]
#[doc(no_inline)]
pub use crate::distr::weighted::Error as WeightError;
pub use iterator::IteratorRandom;
#[cfg(feature = "alloc")]
pub use slice::SliceChooseIter;
pub use slice::{IndexedMutRandom, IndexedRandom, SliceRandom};
/// Low-level API for sampling indices
pub mod index {
use crate::Rng;
#[cfg(feature = "alloc")]
#[doc(inline)]
pub use super::index_::*;
/// Randomly sample exactly `N` distinct indices from `0..len`, and
/// return them in random order (fully shuffled).
///
/// This is implemented via Floyd's algorithm. Time complexity is `O(N^2)`
/// and memory complexity is `O(N)`.
///
/// Returns `None` if (and only if) `N > len`.
pub fn sample_array<R, const N: usize>(rng: &mut R, len: usize) -> Option<[usize; N]>
where
R: Rng + ?Sized,
{
if N > len {
return None;
}
// Floyd's algorithm
let mut indices = [0; N];
for (i, j) in (len - N..len).enumerate() {
let t = rng.random_range(..j + 1);
if let Some(pos) = indices[0..i].iter().position(|&x| x == t) {
indices[pos] = j;
}
indices[i] = t;
}
Some(indices)
}
}

770
vendor/rand/src/seq/slice.rs vendored Normal file
View File

@@ -0,0 +1,770 @@
// Copyright 2018-2023 Developers of the Rand project.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! `IndexedRandom`, `IndexedMutRandom`, `SliceRandom`
use super::increasing_uniform::IncreasingUniform;
use super::index;
#[cfg(feature = "alloc")]
use crate::distr::uniform::{SampleBorrow, SampleUniform};
#[cfg(feature = "alloc")]
use crate::distr::weighted::{Error as WeightError, Weight};
use crate::Rng;
use core::ops::{Index, IndexMut};
/// Extension trait on indexable lists, providing random sampling methods.
///
/// This trait is implemented on `[T]` slice types. Other types supporting
/// [`std::ops::Index<usize>`] may implement this (only [`Self::len`] must be
/// specified).
pub trait IndexedRandom: Index<usize> {
/// The length
fn len(&self) -> usize;
/// True when the length is zero
#[inline]
fn is_empty(&self) -> bool {
self.len() == 0
}
/// Uniformly sample one element
///
/// Returns a reference to one uniformly-sampled random element of
/// the slice, or `None` if the slice is empty.
///
/// For slices, complexity is `O(1)`.
///
/// # Example
///
/// ```
/// use rand::seq::IndexedRandom;
///
/// let choices = [1, 2, 4, 8, 16, 32];
/// let mut rng = rand::rng();
/// println!("{:?}", choices.choose(&mut rng));
/// assert_eq!(choices[..0].choose(&mut rng), None);
/// ```
fn choose<R>(&self, rng: &mut R) -> Option<&Self::Output>
where
R: Rng + ?Sized,
{
if self.is_empty() {
None
} else {
Some(&self[rng.random_range(..self.len())])
}
}
/// Uniformly sample `amount` distinct elements from self
///
/// Chooses `amount` elements from the slice at random, without repetition,
/// and in random order. The returned iterator is appropriate both for
/// collection into a `Vec` and filling an existing buffer (see example).
///
/// In case this API is not sufficiently flexible, use [`index::sample`].
///
/// For slices, complexity is the same as [`index::sample`].
///
/// # Example
/// ```
/// use rand::seq::IndexedRandom;
///
/// let mut rng = &mut rand::rng();
/// let sample = "Hello, audience!".as_bytes();
///
/// // collect the results into a vector:
/// let v: Vec<u8> = sample.choose_multiple(&mut rng, 3).cloned().collect();
///
/// // store in a buffer:
/// let mut buf = [0u8; 5];
/// for (b, slot) in sample.choose_multiple(&mut rng, buf.len()).zip(buf.iter_mut()) {
/// *slot = *b;
/// }
/// ```
#[cfg(feature = "alloc")]
fn choose_multiple<R>(
&self,
rng: &mut R,
amount: usize,
) -> SliceChooseIter<'_, Self, Self::Output>
where
Self::Output: Sized,
R: Rng + ?Sized,
{
let amount = core::cmp::min(amount, self.len());
SliceChooseIter {
slice: self,
_phantom: Default::default(),
indices: index::sample(rng, self.len(), amount).into_iter(),
}
}
/// Uniformly sample a fixed-size array of distinct elements from self
///
/// Chooses `N` elements from the slice at random, without repetition,
/// and in random order.
///
/// For slices, complexity is the same as [`index::sample_array`].
///
/// # Example
/// ```
/// use rand::seq::IndexedRandom;
///
/// let mut rng = &mut rand::rng();
/// let sample = "Hello, audience!".as_bytes();
///
/// let a: [u8; 3] = sample.choose_multiple_array(&mut rng).unwrap();
/// ```
fn choose_multiple_array<R, const N: usize>(&self, rng: &mut R) -> Option<[Self::Output; N]>
where
Self::Output: Clone + Sized,
R: Rng + ?Sized,
{
let indices = index::sample_array(rng, self.len())?;
Some(indices.map(|index| self[index].clone()))
}
/// Biased sampling for one element
///
/// Returns a reference to one element of the slice, sampled according
/// to the provided weights. Returns `None` only if the slice is empty.
///
/// The specified function `weight` maps each item `x` to a relative
/// likelihood `weight(x)`. The probability of each item being selected is
/// therefore `weight(x) / s`, where `s` is the sum of all `weight(x)`.
///
/// For slices of length `n`, complexity is `O(n)`.
/// For more information about the underlying algorithm,
/// see the [`WeightedIndex`] distribution.
///
/// See also [`choose_weighted_mut`].
///
/// # Example
///
/// ```
/// use rand::prelude::*;
///
/// let choices = [('a', 2), ('b', 1), ('c', 1), ('d', 0)];
/// let mut rng = rand::rng();
/// // 50% chance to print 'a', 25% chance to print 'b', 25% chance to print 'c',
/// // and 'd' will never be printed
/// println!("{:?}", choices.choose_weighted(&mut rng, |item| item.1).unwrap().0);
/// ```
/// [`choose`]: IndexedRandom::choose
/// [`choose_weighted_mut`]: IndexedMutRandom::choose_weighted_mut
/// [`WeightedIndex`]: crate::distr::weighted::WeightedIndex
#[cfg(feature = "alloc")]
fn choose_weighted<R, F, B, X>(
&self,
rng: &mut R,
weight: F,
) -> Result<&Self::Output, WeightError>
where
R: Rng + ?Sized,
F: Fn(&Self::Output) -> B,
B: SampleBorrow<X>,
X: SampleUniform + Weight + PartialOrd<X>,
{
use crate::distr::{weighted::WeightedIndex, Distribution};
let distr = WeightedIndex::new((0..self.len()).map(|idx| weight(&self[idx])))?;
Ok(&self[distr.sample(rng)])
}
/// Biased sampling of `amount` distinct elements
///
/// Similar to [`choose_multiple`], but where the likelihood of each
/// element's inclusion in the output may be specified. Zero-weighted
/// elements are never returned; the result may therefore contain fewer
/// elements than `amount` even when `self.len() >= amount`. The elements
/// are returned in an arbitrary, unspecified order.
///
/// The specified function `weight` maps each item `x` to a relative
/// likelihood `weight(x)`. The probability of each item being selected is
/// therefore `weight(x) / s`, where `s` is the sum of all `weight(x)`.
///
/// This implementation uses `O(length + amount)` space and `O(length)` time.
/// See [`index::sample_weighted`] for details.
///
/// # Example
///
/// ```
/// use rand::prelude::*;
///
/// let choices = [('a', 2), ('b', 1), ('c', 1)];
/// let mut rng = rand::rng();
/// // First Draw * Second Draw = total odds
/// // -----------------------
/// // (50% * 50%) + (25% * 67%) = 41.7% chance that the output is `['a', 'b']` in some order.
/// // (50% * 50%) + (25% * 67%) = 41.7% chance that the output is `['a', 'c']` in some order.
/// // (25% * 33%) + (25% * 33%) = 16.6% chance that the output is `['b', 'c']` in some order.
/// println!("{:?}", choices.choose_multiple_weighted(&mut rng, 2, |item| item.1).unwrap().collect::<Vec<_>>());
/// ```
/// [`choose_multiple`]: IndexedRandom::choose_multiple
// Note: this is feature-gated on std due to usage of f64::powf.
// If necessary, we may use alloc+libm as an alternative (see PR #1089).
#[cfg(feature = "std")]
fn choose_multiple_weighted<R, F, X>(
&self,
rng: &mut R,
amount: usize,
weight: F,
) -> Result<SliceChooseIter<'_, Self, Self::Output>, WeightError>
where
Self::Output: Sized,
R: Rng + ?Sized,
F: Fn(&Self::Output) -> X,
X: Into<f64>,
{
let amount = core::cmp::min(amount, self.len());
Ok(SliceChooseIter {
slice: self,
_phantom: Default::default(),
indices: index::sample_weighted(
rng,
self.len(),
|idx| weight(&self[idx]).into(),
amount,
)?
.into_iter(),
})
}
}
/// Extension trait on indexable lists, providing random sampling methods.
///
/// This trait is implemented automatically for every type implementing
/// [`IndexedRandom`] and [`std::ops::IndexMut<usize>`].
pub trait IndexedMutRandom: IndexedRandom + IndexMut<usize> {
/// Uniformly sample one element (mut)
///
/// Returns a mutable reference to one uniformly-sampled random element of
/// the slice, or `None` if the slice is empty.
///
/// For slices, complexity is `O(1)`.
fn choose_mut<R>(&mut self, rng: &mut R) -> Option<&mut Self::Output>
where
R: Rng + ?Sized,
{
if self.is_empty() {
None
} else {
let len = self.len();
Some(&mut self[rng.random_range(..len)])
}
}
/// Biased sampling for one element (mut)
///
/// Returns a mutable reference to one element of the slice, sampled according
/// to the provided weights. Returns `None` only if the slice is empty.
///
/// The specified function `weight` maps each item `x` to a relative
/// likelihood `weight(x)`. The probability of each item being selected is
/// therefore `weight(x) / s`, where `s` is the sum of all `weight(x)`.
///
/// For slices of length `n`, complexity is `O(n)`.
/// For more information about the underlying algorithm,
/// see the [`WeightedIndex`] distribution.
///
/// See also [`choose_weighted`].
///
/// [`choose_mut`]: IndexedMutRandom::choose_mut
/// [`choose_weighted`]: IndexedRandom::choose_weighted
/// [`WeightedIndex`]: crate::distr::weighted::WeightedIndex
#[cfg(feature = "alloc")]
fn choose_weighted_mut<R, F, B, X>(
&mut self,
rng: &mut R,
weight: F,
) -> Result<&mut Self::Output, WeightError>
where
R: Rng + ?Sized,
F: Fn(&Self::Output) -> B,
B: SampleBorrow<X>,
X: SampleUniform + Weight + PartialOrd<X>,
{
use crate::distr::{weighted::WeightedIndex, Distribution};
let distr = WeightedIndex::new((0..self.len()).map(|idx| weight(&self[idx])))?;
let index = distr.sample(rng);
Ok(&mut self[index])
}
}
/// Extension trait on slices, providing shuffling methods.
///
/// This trait is implemented on all `[T]` slice types, providing several
/// methods for choosing and shuffling elements. You must `use` this trait:
///
/// ```
/// use rand::seq::SliceRandom;
///
/// let mut rng = rand::rng();
/// let mut bytes = "Hello, random!".to_string().into_bytes();
/// bytes.shuffle(&mut rng);
/// let str = String::from_utf8(bytes).unwrap();
/// println!("{}", str);
/// ```
/// Example output (non-deterministic):
/// ```none
/// l,nmroHado !le
/// ```
pub trait SliceRandom: IndexedMutRandom {
/// Shuffle a mutable slice in place.
///
/// For slices of length `n`, complexity is `O(n)`.
/// The resulting permutation is picked uniformly from the set of all possible permutations.
///
/// # Example
///
/// ```
/// use rand::seq::SliceRandom;
///
/// let mut rng = rand::rng();
/// let mut y = [1, 2, 3, 4, 5];
/// println!("Unshuffled: {:?}", y);
/// y.shuffle(&mut rng);
/// println!("Shuffled: {:?}", y);
/// ```
fn shuffle<R>(&mut self, rng: &mut R)
where
R: Rng + ?Sized;
/// Shuffle a slice in place, but exit early.
///
/// Returns two mutable slices from the source slice. The first contains
/// `amount` elements randomly permuted. The second has the remaining
/// elements that are not fully shuffled.
///
/// This is an efficient method to select `amount` elements at random from
/// the slice, provided the slice may be mutated.
///
/// If you only need to choose elements randomly and `amount > self.len()/2`
/// then you may improve performance by taking
/// `amount = self.len() - amount` and using only the second slice.
///
/// If `amount` is greater than the number of elements in the slice, this
/// will perform a full shuffle.
///
/// For slices, complexity is `O(m)` where `m = amount`.
fn partial_shuffle<R>(
&mut self,
rng: &mut R,
amount: usize,
) -> (&mut [Self::Output], &mut [Self::Output])
where
Self::Output: Sized,
R: Rng + ?Sized;
}
impl<T> IndexedRandom for [T] {
fn len(&self) -> usize {
self.len()
}
}
impl<IR: IndexedRandom + IndexMut<usize> + ?Sized> IndexedMutRandom for IR {}
impl<T> SliceRandom for [T] {
fn shuffle<R>(&mut self, rng: &mut R)
where
R: Rng + ?Sized,
{
if self.len() <= 1 {
// There is no need to shuffle an empty or single element slice
return;
}
self.partial_shuffle(rng, self.len());
}
fn partial_shuffle<R>(&mut self, rng: &mut R, amount: usize) -> (&mut [T], &mut [T])
where
R: Rng + ?Sized,
{
let m = self.len().saturating_sub(amount);
// The algorithm below is based on Durstenfeld's algorithm for the
// [FisherYates shuffle](https://en.wikipedia.org/wiki/Fisher%E2%80%93Yates_shuffle#The_modern_algorithm)
// for an unbiased permutation.
// It ensures that the last `amount` elements of the slice
// are randomly selected from the whole slice.
// `IncreasingUniform::next_index()` is faster than `Rng::random_range`
// but only works for 32 bit integers
// So we must use the slow method if the slice is longer than that.
if self.len() < (u32::MAX as usize) {
let mut chooser = IncreasingUniform::new(rng, m as u32);
for i in m..self.len() {
let index = chooser.next_index();
self.swap(i, index);
}
} else {
for i in m..self.len() {
let index = rng.random_range(..i + 1);
self.swap(i, index);
}
}
let r = self.split_at_mut(m);
(r.1, r.0)
}
}
/// An iterator over multiple slice elements.
///
/// This struct is created by
/// [`IndexedRandom::choose_multiple`](trait.IndexedRandom.html#tymethod.choose_multiple).
#[cfg(feature = "alloc")]
#[derive(Debug)]
pub struct SliceChooseIter<'a, S: ?Sized + 'a, T: 'a> {
slice: &'a S,
_phantom: core::marker::PhantomData<T>,
indices: index::IndexVecIntoIter,
}
#[cfg(feature = "alloc")]
impl<'a, S: Index<usize, Output = T> + ?Sized + 'a, T: 'a> Iterator for SliceChooseIter<'a, S, T> {
type Item = &'a T;
fn next(&mut self) -> Option<Self::Item> {
// TODO: investigate using SliceIndex::get_unchecked when stable
self.indices.next().map(|i| &self.slice[i])
}
fn size_hint(&self) -> (usize, Option<usize>) {
(self.indices.len(), Some(self.indices.len()))
}
}
#[cfg(feature = "alloc")]
impl<'a, S: Index<usize, Output = T> + ?Sized + 'a, T: 'a> ExactSizeIterator
for SliceChooseIter<'a, S, T>
{
fn len(&self) -> usize {
self.indices.len()
}
}
#[cfg(test)]
mod test {
use super::*;
#[cfg(feature = "alloc")]
use alloc::vec::Vec;
#[test]
fn test_slice_choose() {
let mut r = crate::test::rng(107);
let chars = [
'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n',
];
let mut chosen = [0i32; 14];
// The below all use a binomial distribution with n=1000, p=1/14.
// binocdf(40, 1000, 1/14) ~= 2e-5; 1-binocdf(106, ..) ~= 2e-5
for _ in 0..1000 {
let picked = *chars.choose(&mut r).unwrap();
chosen[(picked as usize) - ('a' as usize)] += 1;
}
for count in chosen.iter() {
assert!(40 < *count && *count < 106);
}
chosen.iter_mut().for_each(|x| *x = 0);
for _ in 0..1000 {
*chosen.choose_mut(&mut r).unwrap() += 1;
}
for count in chosen.iter() {
assert!(40 < *count && *count < 106);
}
let mut v: [isize; 0] = [];
assert_eq!(v.choose(&mut r), None);
assert_eq!(v.choose_mut(&mut r), None);
}
#[test]
fn value_stability_slice() {
let mut r = crate::test::rng(413);
let chars = [
'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n',
];
let mut nums = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12];
assert_eq!(chars.choose(&mut r), Some(&'l'));
assert_eq!(nums.choose_mut(&mut r), Some(&mut 3));
assert_eq!(
&chars.choose_multiple_array(&mut r),
&Some(['f', 'i', 'd', 'b', 'c', 'm', 'j', 'k'])
);
#[cfg(feature = "alloc")]
assert_eq!(
&chars
.choose_multiple(&mut r, 8)
.cloned()
.collect::<Vec<char>>(),
&['h', 'm', 'd', 'b', 'c', 'e', 'n', 'f']
);
#[cfg(feature = "alloc")]
assert_eq!(chars.choose_weighted(&mut r, |_| 1), Ok(&'i'));
#[cfg(feature = "alloc")]
assert_eq!(nums.choose_weighted_mut(&mut r, |_| 1), Ok(&mut 2));
let mut r = crate::test::rng(414);
nums.shuffle(&mut r);
assert_eq!(nums, [5, 11, 0, 8, 7, 12, 6, 4, 9, 3, 1, 2, 10]);
nums = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12];
let res = nums.partial_shuffle(&mut r, 6);
assert_eq!(res.0, &mut [7, 12, 6, 8, 1, 9]);
assert_eq!(res.1, &mut [0, 11, 2, 3, 4, 5, 10]);
}
#[test]
#[cfg_attr(miri, ignore)] // Miri is too slow
fn test_shuffle() {
let mut r = crate::test::rng(108);
let empty: &mut [isize] = &mut [];
empty.shuffle(&mut r);
let mut one = [1];
one.shuffle(&mut r);
let b: &[_] = &[1];
assert_eq!(one, b);
let mut two = [1, 2];
two.shuffle(&mut r);
assert!(two == [1, 2] || two == [2, 1]);
fn move_last(slice: &mut [usize], pos: usize) {
// use slice[pos..].rotate_left(1); once we can use that
let last_val = slice[pos];
for i in pos..slice.len() - 1 {
slice[i] = slice[i + 1];
}
*slice.last_mut().unwrap() = last_val;
}
let mut counts = [0i32; 24];
for _ in 0..10000 {
let mut arr: [usize; 4] = [0, 1, 2, 3];
arr.shuffle(&mut r);
let mut permutation = 0usize;
let mut pos_value = counts.len();
for i in 0..4 {
pos_value /= 4 - i;
let pos = arr.iter().position(|&x| x == i).unwrap();
assert!(pos < (4 - i));
permutation += pos * pos_value;
move_last(&mut arr, pos);
assert_eq!(arr[3], i);
}
for (i, &a) in arr.iter().enumerate() {
assert_eq!(a, i);
}
counts[permutation] += 1;
}
for count in counts.iter() {
// Binomial(10000, 1/24) with average 416.667
// Octave: binocdf(n, 10000, 1/24)
// 99.9% chance samples lie within this range:
assert!(352 <= *count && *count <= 483, "count: {}", count);
}
}
#[test]
fn test_partial_shuffle() {
let mut r = crate::test::rng(118);
let mut empty: [u32; 0] = [];
let res = empty.partial_shuffle(&mut r, 10);
assert_eq!((res.0.len(), res.1.len()), (0, 0));
let mut v = [1, 2, 3, 4, 5];
let res = v.partial_shuffle(&mut r, 2);
assert_eq!((res.0.len(), res.1.len()), (2, 3));
assert!(res.0[0] != res.0[1]);
// First elements are only modified if selected, so at least one isn't modified:
assert!(res.1[0] == 1 || res.1[1] == 2 || res.1[2] == 3);
}
#[test]
#[cfg(feature = "alloc")]
#[cfg_attr(miri, ignore)] // Miri is too slow
fn test_weighted() {
let mut r = crate::test::rng(406);
const N_REPS: u32 = 3000;
let weights = [1u32, 2, 3, 0, 5, 6, 7, 1, 2, 3, 4, 5, 6, 7];
let total_weight = weights.iter().sum::<u32>() as f32;
let verify = |result: [i32; 14]| {
for (i, count) in result.iter().enumerate() {
let exp = (weights[i] * N_REPS) as f32 / total_weight;
let mut err = (*count as f32 - exp).abs();
if err != 0.0 {
err /= exp;
}
assert!(err <= 0.25);
}
};
// choose_weighted
fn get_weight<T>(item: &(u32, T)) -> u32 {
item.0
}
let mut chosen = [0i32; 14];
let mut items = [(0u32, 0usize); 14]; // (weight, index)
for (i, item) in items.iter_mut().enumerate() {
*item = (weights[i], i);
}
for _ in 0..N_REPS {
let item = items.choose_weighted(&mut r, get_weight).unwrap();
chosen[item.1] += 1;
}
verify(chosen);
// choose_weighted_mut
let mut items = [(0u32, 0i32); 14]; // (weight, count)
for (i, item) in items.iter_mut().enumerate() {
*item = (weights[i], 0);
}
for _ in 0..N_REPS {
items.choose_weighted_mut(&mut r, get_weight).unwrap().1 += 1;
}
for (ch, item) in chosen.iter_mut().zip(items.iter()) {
*ch = item.1;
}
verify(chosen);
// Check error cases
let empty_slice = &mut [10][0..0];
assert_eq!(
empty_slice.choose_weighted(&mut r, |_| 1),
Err(WeightError::InvalidInput)
);
assert_eq!(
empty_slice.choose_weighted_mut(&mut r, |_| 1),
Err(WeightError::InvalidInput)
);
assert_eq!(
['x'].choose_weighted_mut(&mut r, |_| 0),
Err(WeightError::InsufficientNonZero)
);
assert_eq!(
[0, -1].choose_weighted_mut(&mut r, |x| *x),
Err(WeightError::InvalidWeight)
);
assert_eq!(
[-1, 0].choose_weighted_mut(&mut r, |x| *x),
Err(WeightError::InvalidWeight)
);
}
#[test]
#[cfg(feature = "std")]
fn test_multiple_weighted_edge_cases() {
use super::*;
let mut rng = crate::test::rng(413);
// Case 1: One of the weights is 0
let choices = [('a', 2), ('b', 1), ('c', 0)];
for _ in 0..100 {
let result = choices
.choose_multiple_weighted(&mut rng, 2, |item| item.1)
.unwrap()
.collect::<Vec<_>>();
assert_eq!(result.len(), 2);
assert!(!result.iter().any(|val| val.0 == 'c'));
}
// Case 2: All of the weights are 0
let choices = [('a', 0), ('b', 0), ('c', 0)];
let r = choices.choose_multiple_weighted(&mut rng, 2, |item| item.1);
assert_eq!(r.unwrap().len(), 0);
// Case 3: Negative weights
let choices = [('a', -1), ('b', 1), ('c', 1)];
let r = choices.choose_multiple_weighted(&mut rng, 2, |item| item.1);
assert_eq!(r.unwrap_err(), WeightError::InvalidWeight);
// Case 4: Empty list
let choices = [];
let r = choices.choose_multiple_weighted(&mut rng, 0, |_: &()| 0);
assert_eq!(r.unwrap().count(), 0);
// Case 5: NaN weights
let choices = [('a', f64::NAN), ('b', 1.0), ('c', 1.0)];
let r = choices.choose_multiple_weighted(&mut rng, 2, |item| item.1);
assert_eq!(r.unwrap_err(), WeightError::InvalidWeight);
// Case 6: +infinity weights
let choices = [('a', f64::INFINITY), ('b', 1.0), ('c', 1.0)];
for _ in 0..100 {
let result = choices
.choose_multiple_weighted(&mut rng, 2, |item| item.1)
.unwrap()
.collect::<Vec<_>>();
assert_eq!(result.len(), 2);
assert!(result.iter().any(|val| val.0 == 'a'));
}
// Case 7: -infinity weights
let choices = [('a', f64::NEG_INFINITY), ('b', 1.0), ('c', 1.0)];
let r = choices.choose_multiple_weighted(&mut rng, 2, |item| item.1);
assert_eq!(r.unwrap_err(), WeightError::InvalidWeight);
// Case 8: -0 weights
let choices = [('a', -0.0), ('b', 1.0), ('c', 1.0)];
let r = choices.choose_multiple_weighted(&mut rng, 2, |item| item.1);
assert!(r.is_ok());
}
#[test]
#[cfg(feature = "std")]
fn test_multiple_weighted_distributions() {
use super::*;
// The theoretical probabilities of the different outcomes are:
// AB: 0.5 * 0.667 = 0.3333
// AC: 0.5 * 0.333 = 0.1667
// BA: 0.333 * 0.75 = 0.25
// BC: 0.333 * 0.25 = 0.0833
// CA: 0.167 * 0.6 = 0.1
// CB: 0.167 * 0.4 = 0.0667
let choices = [('a', 3), ('b', 2), ('c', 1)];
let mut rng = crate::test::rng(414);
let mut results = [0i32; 3];
let expected_results = [5833, 2667, 1500];
for _ in 0..10000 {
let result = choices
.choose_multiple_weighted(&mut rng, 2, |item| item.1)
.unwrap()
.collect::<Vec<_>>();
assert_eq!(result.len(), 2);
match (result[0].0, result[1].0) {
('a', 'b') | ('b', 'a') => {
results[0] += 1;
}
('a', 'c') | ('c', 'a') => {
results[1] += 1;
}
('b', 'c') | ('c', 'b') => {
results[2] += 1;
}
(_, _) => panic!("unexpected result"),
}
}
let mut diffs = results
.iter()
.zip(&expected_results)
.map(|(a, b)| (a - b).abs());
assert!(!diffs.any(|deviation| deviation > 100));
}
}