chore: checkpoint before Python removal

This commit is contained in:
2026-03-26 22:33:59 +00:00
parent 683cec9307
commit e568ddf82a
29972 changed files with 11269302 additions and 2 deletions

15
vendor/poly1305/src/backend.rs vendored Normal file
View File

@@ -0,0 +1,15 @@
//! Poly1305 backends
#[cfg(all(
any(target_arch = "x86", target_arch = "x86_64"),
not(poly1305_force_soft)
))]
pub(crate) mod avx2;
#[cfg(all(
any(target_arch = "x86", target_arch = "x86_64"),
not(poly1305_force_soft)
))]
pub(crate) mod autodetect;
pub(crate) mod soft;

View File

@@ -0,0 +1,105 @@
//! Autodetection support for AVX2 CPU intrinsics on x86 CPUs, with fallback
//! to the "soft" backend when it's unavailable.
use universal_hash::{consts::U16, crypto_common::BlockSizeUser, UniversalHash};
use crate::{backend, Block, Key, Tag};
use core::mem::ManuallyDrop;
cpufeatures::new!(avx2_cpuid, "avx2");
pub struct State {
inner: Inner,
token: avx2_cpuid::InitToken,
}
union Inner {
avx2: ManuallyDrop<backend::avx2::State>,
soft: ManuallyDrop<backend::soft::State>,
}
impl BlockSizeUser for State {
type BlockSize = U16;
}
impl State {
/// Initialize Poly1305 [`State`] with the given key
#[inline]
pub(crate) fn new(key: &Key) -> State {
let (token, avx2_present) = avx2_cpuid::init_get();
let inner = if avx2_present {
Inner {
avx2: ManuallyDrop::new(backend::avx2::State::new(key)),
}
} else {
Inner {
soft: ManuallyDrop::new(backend::soft::State::new(key)),
}
};
Self { inner, token }
}
/// Compute a Poly1305 block
#[inline]
pub(crate) fn compute_block(&mut self, block: &Block, partial: bool) {
if self.token.get() {
unsafe { (*self.inner.avx2).compute_block(block, partial) }
} else {
unsafe { (*self.inner.soft).compute_block(block, partial) }
}
}
}
impl UniversalHash for State {
fn update_with_backend(
&mut self,
f: impl universal_hash::UhfClosure<BlockSize = Self::BlockSize>,
) {
if self.token.get() {
unsafe { f.call(&mut *self.inner.avx2) }
} else {
unsafe { f.call(&mut *self.inner.soft) }
}
}
/// Finalize output producing a [`Tag`]
#[inline]
fn finalize(mut self) -> Tag {
if self.token.get() {
unsafe { (*self.inner.avx2).finalize() }
} else {
unsafe { (*self.inner.soft).finalize_mut() }
}
}
}
impl Clone for State {
fn clone(&self) -> Self {
let inner = if self.token.get() {
Inner {
avx2: ManuallyDrop::new(unsafe { (*self.inner.avx2).clone() }),
}
} else {
Inner {
soft: ManuallyDrop::new(unsafe { (*self.inner.soft).clone() }),
}
};
Self {
inner,
token: self.token,
}
}
}
#[cfg(feature = "zeroize")]
impl Drop for State {
fn drop(&mut self) {
use zeroize::Zeroize;
const SIZE: usize = core::mem::size_of::<State>();
let state = unsafe { &mut *(self as *mut State as *mut [u8; SIZE]) };
state.zeroize();
}
}

209
vendor/poly1305/src/backend/avx2.rs vendored Normal file
View File

@@ -0,0 +1,209 @@
//! AVX2 implementation of the Poly1305 state machine.
// The State struct and its logic was originally derived from Goll and Gueron's AVX2 C
// code:
// [Vectorization of Poly1305 message authentication code](https://ieeexplore.ieee.org/document/7113463)
//
// which was sourced from Bhattacharyya and Sarkar's modified variant:
// [Improved SIMD Implementation of Poly1305](https://eprint.iacr.org/2019/842)
// https://github.com/Sreyosi/Improved-SIMD-Implementation-of-Poly1305
//
// The logic has been extensively rewritten and documented, and several bugs in the
// original C code were fixed.
//
// Note that State only implements the original Goll-Gueron algorithm, not the
// optimisations provided by Bhattacharyya and Sarkar. The latter require the message
// length to be known, which is incompatible with the streaming API of UniversalHash.
use universal_hash::{
consts::{U16, U4},
crypto_common::{BlockSizeUser, ParBlocksSizeUser},
generic_array::GenericArray,
UhfBackend,
};
use crate::{Block, Key, Tag};
mod helpers;
use self::helpers::*;
/// Four Poly1305 blocks (64-bytes)
type ParBlocks = universal_hash::ParBlocks<State>;
#[derive(Copy, Clone)]
struct Initialized {
p: Aligned4x130,
m: SpacedMultiplier4x130,
r4: PrecomputedMultiplier,
}
#[derive(Clone)]
pub(crate) struct State {
k: AdditionKey,
r1: PrecomputedMultiplier,
r2: PrecomputedMultiplier,
initialized: Option<Initialized>,
cached_blocks: [Block; 4],
num_cached_blocks: usize,
partial_block: Option<Block>,
}
impl State {
/// Initialize Poly1305 [`State`] with the given key
pub(crate) fn new(key: &Key) -> Self {
// Prepare addition key and polynomial key.
let (k, r1) = unsafe { prepare_keys(key) };
// Precompute R^2.
let r2 = (r1 * r1).reduce();
State {
k,
r1,
r2: r2.into(),
initialized: None,
cached_blocks: [Block::default(); 4],
num_cached_blocks: 0,
partial_block: None,
}
}
/// Process four Poly1305 blocks at once.
#[target_feature(enable = "avx2")]
pub(crate) unsafe fn compute_par_blocks(&mut self, blocks: &ParBlocks) {
assert!(self.partial_block.is_none());
assert_eq!(self.num_cached_blocks, 0);
self.process_blocks(Aligned4x130::from_par_blocks(blocks));
}
/// Compute a Poly1305 block
#[target_feature(enable = "avx2")]
pub(crate) unsafe fn compute_block(&mut self, block: &Block, partial: bool) {
// We can cache a single partial block.
if partial {
assert!(self.partial_block.is_none());
self.partial_block = Some(*block);
return;
}
self.cached_blocks[self.num_cached_blocks].copy_from_slice(block);
if self.num_cached_blocks < 3 {
self.num_cached_blocks += 1;
return;
} else {
self.num_cached_blocks = 0;
}
self.process_blocks(Aligned4x130::from_blocks(&self.cached_blocks));
}
/// Compute a Poly1305 block
#[target_feature(enable = "avx2")]
unsafe fn process_blocks(&mut self, blocks: Aligned4x130) {
if let Some(inner) = &mut self.initialized {
// P <-- R^4 * P + blocks
inner.p = (&inner.p * inner.r4).reduce() + blocks;
} else {
// Initialize the polynomial.
let p = blocks;
// Initialize the multiplier (used to merge down the polynomial during
// finalization).
let (m, r4) = SpacedMultiplier4x130::new(self.r1, self.r2);
self.initialized = Some(Initialized { p, m, r4 })
}
}
/// Finalize output producing a [`Tag`]
#[target_feature(enable = "avx2")]
pub(crate) unsafe fn finalize(&mut self) -> Tag {
assert!(self.num_cached_blocks < 4);
let mut data = &self.cached_blocks[..];
// T ← R◦T
// P = T_0 + T_1 + T_2 + T_3
let mut p = self
.initialized
.take()
.map(|inner| (inner.p * inner.m).sum().reduce());
if self.num_cached_blocks >= 2 {
// Compute 32 byte block (remaining data < 64 bytes)
let mut c = Aligned2x130::from_blocks(data[..2].try_into().unwrap());
if let Some(p) = p {
c = c + p;
}
p = Some(c.mul_and_sum(self.r1, self.r2).reduce());
data = &data[2..];
self.num_cached_blocks -= 2;
}
if self.num_cached_blocks == 1 {
// Compute 16 byte block (remaining data < 32 bytes)
let mut c = Aligned130::from_block(&data[0]);
if let Some(p) = p {
c = c + p;
}
p = Some((c * self.r1).reduce());
self.num_cached_blocks -= 1;
}
if let Some(block) = &self.partial_block {
// Compute last block (remaining data < 16 bytes)
let mut c = Aligned130::from_partial_block(block);
if let Some(p) = p {
c = c + p;
}
p = Some((c * self.r1).reduce());
}
// Compute tag: p + k mod 2^128
let mut tag = GenericArray::<u8, _>::default();
let tag_int = if let Some(p) = p {
self.k + p
} else {
self.k.into()
};
tag_int.write(tag.as_mut_slice());
tag
}
}
impl BlockSizeUser for State {
type BlockSize = U16;
}
impl ParBlocksSizeUser for State {
type ParBlocksSize = U4;
}
impl UhfBackend for State {
fn proc_block(&mut self, block: &Block) {
unsafe { self.compute_block(block, false) };
}
fn proc_par_blocks(&mut self, blocks: &ParBlocks) {
if self.num_cached_blocks == 0 {
// Fast path.
unsafe { self.compute_par_blocks(blocks) };
} else {
// We are unaligned; use the slow fallback.
for block in blocks {
self.proc_block(block);
}
}
}
fn blocks_needed_to_align(&self) -> usize {
if self.num_cached_blocks == 0 {
// There are no cached blocks; fast path is available.
0
} else {
// There are cached blocks; report how many more we need.
self.cached_blocks.len() - self.num_cached_blocks
}
}
}

File diff suppressed because it is too large Load Diff

271
vendor/poly1305/src/backend/soft.rs vendored Normal file
View File

@@ -0,0 +1,271 @@
//! Software implementation of the Poly1305 state machine.
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//
// This code originates from the rust-crypto project:
// <https://github.com/DaGenix/rust-crypto>
//
// ...and was originally a port of Andrew Moons poly1305-donna
// https://github.com/floodyberry/poly1305-donna
use universal_hash::{
consts::{U1, U16},
crypto_common::{BlockSizeUser, ParBlocksSizeUser},
UhfBackend, UniversalHash,
};
use crate::{Block, Key, Tag};
#[derive(Clone, Default)]
pub(crate) struct State {
r: [u32; 5],
h: [u32; 5],
pad: [u32; 4],
}
impl State {
/// Initialize Poly1305 [`State`] with the given key
pub(crate) fn new(key: &Key) -> State {
let mut poly = State::default();
// r &= 0xffffffc0ffffffc0ffffffc0fffffff
poly.r[0] = (u32::from_le_bytes(key[0..4].try_into().unwrap())) & 0x3ff_ffff;
poly.r[1] = (u32::from_le_bytes(key[3..7].try_into().unwrap()) >> 2) & 0x3ff_ff03;
poly.r[2] = (u32::from_le_bytes(key[6..10].try_into().unwrap()) >> 4) & 0x3ff_c0ff;
poly.r[3] = (u32::from_le_bytes(key[9..13].try_into().unwrap()) >> 6) & 0x3f0_3fff;
poly.r[4] = (u32::from_le_bytes(key[12..16].try_into().unwrap()) >> 8) & 0x00f_ffff;
poly.pad[0] = u32::from_le_bytes(key[16..20].try_into().unwrap());
poly.pad[1] = u32::from_le_bytes(key[20..24].try_into().unwrap());
poly.pad[2] = u32::from_le_bytes(key[24..28].try_into().unwrap());
poly.pad[3] = u32::from_le_bytes(key[28..32].try_into().unwrap());
poly
}
/// Compute a Poly1305 block
pub(crate) fn compute_block(&mut self, block: &Block, partial: bool) {
let hibit = if partial { 0 } else { 1 << 24 };
let r0 = self.r[0];
let r1 = self.r[1];
let r2 = self.r[2];
let r3 = self.r[3];
let r4 = self.r[4];
let s1 = r1 * 5;
let s2 = r2 * 5;
let s3 = r3 * 5;
let s4 = r4 * 5;
let mut h0 = self.h[0];
let mut h1 = self.h[1];
let mut h2 = self.h[2];
let mut h3 = self.h[3];
let mut h4 = self.h[4];
// h += m
h0 += (u32::from_le_bytes(block[0..4].try_into().unwrap())) & 0x3ff_ffff;
h1 += (u32::from_le_bytes(block[3..7].try_into().unwrap()) >> 2) & 0x3ff_ffff;
h2 += (u32::from_le_bytes(block[6..10].try_into().unwrap()) >> 4) & 0x3ff_ffff;
h3 += (u32::from_le_bytes(block[9..13].try_into().unwrap()) >> 6) & 0x3ff_ffff;
h4 += (u32::from_le_bytes(block[12..16].try_into().unwrap()) >> 8) | hibit;
// h *= r
let d0 = (u64::from(h0) * u64::from(r0))
+ (u64::from(h1) * u64::from(s4))
+ (u64::from(h2) * u64::from(s3))
+ (u64::from(h3) * u64::from(s2))
+ (u64::from(h4) * u64::from(s1));
let mut d1 = (u64::from(h0) * u64::from(r1))
+ (u64::from(h1) * u64::from(r0))
+ (u64::from(h2) * u64::from(s4))
+ (u64::from(h3) * u64::from(s3))
+ (u64::from(h4) * u64::from(s2));
let mut d2 = (u64::from(h0) * u64::from(r2))
+ (u64::from(h1) * u64::from(r1))
+ (u64::from(h2) * u64::from(r0))
+ (u64::from(h3) * u64::from(s4))
+ (u64::from(h4) * u64::from(s3));
let mut d3 = (u64::from(h0) * u64::from(r3))
+ (u64::from(h1) * u64::from(r2))
+ (u64::from(h2) * u64::from(r1))
+ (u64::from(h3) * u64::from(r0))
+ (u64::from(h4) * u64::from(s4));
let mut d4 = (u64::from(h0) * u64::from(r4))
+ (u64::from(h1) * u64::from(r3))
+ (u64::from(h2) * u64::from(r2))
+ (u64::from(h3) * u64::from(r1))
+ (u64::from(h4) * u64::from(r0));
// (partial) h %= p
let mut c: u32;
c = (d0 >> 26) as u32;
h0 = d0 as u32 & 0x3ff_ffff;
d1 += u64::from(c);
c = (d1 >> 26) as u32;
h1 = d1 as u32 & 0x3ff_ffff;
d2 += u64::from(c);
c = (d2 >> 26) as u32;
h2 = d2 as u32 & 0x3ff_ffff;
d3 += u64::from(c);
c = (d3 >> 26) as u32;
h3 = d3 as u32 & 0x3ff_ffff;
d4 += u64::from(c);
c = (d4 >> 26) as u32;
h4 = d4 as u32 & 0x3ff_ffff;
h0 += c * 5;
c = h0 >> 26;
h0 &= 0x3ff_ffff;
h1 += c;
self.h[0] = h0;
self.h[1] = h1;
self.h[2] = h2;
self.h[3] = h3;
self.h[4] = h4;
}
/// Finalize output producing a [`Tag`]
pub(crate) fn finalize_mut(&mut self) -> Tag {
// fully carry h
let mut h0 = self.h[0];
let mut h1 = self.h[1];
let mut h2 = self.h[2];
let mut h3 = self.h[3];
let mut h4 = self.h[4];
let mut c: u32;
c = h1 >> 26;
h1 &= 0x3ff_ffff;
h2 += c;
c = h2 >> 26;
h2 &= 0x3ff_ffff;
h3 += c;
c = h3 >> 26;
h3 &= 0x3ff_ffff;
h4 += c;
c = h4 >> 26;
h4 &= 0x3ff_ffff;
h0 += c * 5;
c = h0 >> 26;
h0 &= 0x3ff_ffff;
h1 += c;
// compute h + -p
let mut g0 = h0.wrapping_add(5);
c = g0 >> 26;
g0 &= 0x3ff_ffff;
let mut g1 = h1.wrapping_add(c);
c = g1 >> 26;
g1 &= 0x3ff_ffff;
let mut g2 = h2.wrapping_add(c);
c = g2 >> 26;
g2 &= 0x3ff_ffff;
let mut g3 = h3.wrapping_add(c);
c = g3 >> 26;
g3 &= 0x3ff_ffff;
let mut g4 = h4.wrapping_add(c).wrapping_sub(1 << 26);
// select h if h < p, or h + -p if h >= p
let mut mask = (g4 >> (32 - 1)).wrapping_sub(1);
g0 &= mask;
g1 &= mask;
g2 &= mask;
g3 &= mask;
g4 &= mask;
mask = !mask;
h0 = (h0 & mask) | g0;
h1 = (h1 & mask) | g1;
h2 = (h2 & mask) | g2;
h3 = (h3 & mask) | g3;
h4 = (h4 & mask) | g4;
// h = h % (2^128)
h0 |= h1 << 26;
h1 = (h1 >> 6) | (h2 << 20);
h2 = (h2 >> 12) | (h3 << 14);
h3 = (h3 >> 18) | (h4 << 8);
// h = mac = (h + pad) % (2^128)
let mut f: u64;
f = u64::from(h0) + u64::from(self.pad[0]);
h0 = f as u32;
f = u64::from(h1) + u64::from(self.pad[1]) + (f >> 32);
h1 = f as u32;
f = u64::from(h2) + u64::from(self.pad[2]) + (f >> 32);
h2 = f as u32;
f = u64::from(h3) + u64::from(self.pad[3]) + (f >> 32);
h3 = f as u32;
let mut tag = Block::default();
tag[0..4].copy_from_slice(&h0.to_le_bytes());
tag[4..8].copy_from_slice(&h1.to_le_bytes());
tag[8..12].copy_from_slice(&h2.to_le_bytes());
tag[12..16].copy_from_slice(&h3.to_le_bytes());
tag
}
}
#[cfg(feature = "zeroize")]
impl Drop for State {
fn drop(&mut self) {
use zeroize::Zeroize;
self.r.zeroize();
self.h.zeroize();
self.pad.zeroize();
}
}
impl BlockSizeUser for State {
type BlockSize = U16;
}
impl ParBlocksSizeUser for State {
type ParBlocksSize = U1;
}
impl UhfBackend for State {
fn proc_block(&mut self, block: &Block) {
self.compute_block(block, false);
}
}
impl UniversalHash for State {
fn update_with_backend(
&mut self,
f: impl universal_hash::UhfClosure<BlockSize = Self::BlockSize>,
) {
f.call(self);
}
/// Finalize output producing a [`Tag`]
fn finalize(mut self) -> Tag {
self.finalize_mut()
}
}

190
vendor/poly1305/src/fuzz.rs vendored Normal file
View File

@@ -0,0 +1,190 @@
use universal_hash::{generic_array::GenericArray, UniversalHash};
use crate::{backend, Block, Key, BLOCK_SIZE};
/// Helper function for fuzzing the AVX2 backend.
pub fn fuzz_avx2(key: &Key, data: &[u8]) {
let mut avx2 = backend::avx2::State::new(key);
let mut soft = backend::soft::State::new(key);
for (_i, chunk) in data.chunks(BLOCK_SIZE).enumerate() {
if chunk.len() == BLOCK_SIZE {
let block = GenericArray::from_slice(chunk);
unsafe {
avx2.compute_block(block, false);
}
soft.compute_block(block, false);
} else {
let mut block = Block::default();
block[..chunk.len()].copy_from_slice(chunk);
block[chunk.len()] = 1;
unsafe {
avx2.compute_block(&block, true);
}
soft.compute_block(&block, true);
}
// Check that the same tag would be derived after each chunk.
// We add the chunk number to the assertion for debugging.
// When fuzzing, we skip this check, and just look at the end.
#[cfg(test)]
assert_eq!(
(_i + 1, unsafe { avx2.clone().finalize() }),
(_i + 1, soft.clone().finalize()),
);
}
assert_eq!(unsafe { avx2.finalize() }, soft.finalize());
}
fn avx2_fuzzer_test_case(data: &[u8]) {
fuzz_avx2(data[0..32].into(), &data[32..]);
}
#[test]
fn crash_0() {
avx2_fuzzer_test_case(include_bytes!(
"fuzz/id=000000,sig=06,src=000014,op=flip4,pos=11"
));
}
#[test]
fn crash_1() {
avx2_fuzzer_test_case(include_bytes!(
"fuzz/id=000001,sig=06,src=000006+000014,op=splice,rep=64"
));
}
#[test]
fn crash_2() {
avx2_fuzzer_test_case(include_bytes!(
"fuzz/id=000002,sig=06,src=000008+000014,op=splice,rep=32"
));
}
#[test]
fn crash_3() {
// This input corresponds to a key of:
// r = 0x0f245bfc0f7fe5fc0fffff3400fb1c2b
// s = 0xffffff000001000040f6fff5ffffffff
//
// and input blocks:
// [0x01ea0010000a00ff108b72ffffffffffff, 0x01ffffffff245b74ff7fe5ffffff0040ff,
// 0x01000a00ff108b7200ff04000002ffffff, 0x01ffffffffffffffffffff0000ffea0010,
// 0x0180ffffffffffffffffffffffe3ffffff, 0x01ffffffffffffffffffffffffffffffff,
// 0x01ffffffffffffffffffdfffff03ffffff, 0x01ffffffffff245b74ff7fe5ffffe4ffff,
// 0x0112118b7d00ffeaffffffffffffffffff, 0x010e40eb10ffffffff1edd7f0010000a00]
//
// When this crash occurred, the software and AVX2 backends would generate the same
// tags given the first seven blocks as input. Given the first eight blocks, the
// following tags were generated:
//
// | tag | low 128 bits of final accumulator
// soft | 0x0004d01b9168ded528a9b541cc461988 - s = 0x0004d11b9167ded4e7b2b54bcc461989
// avx2 | 0x0004d01b9168ded528a9b540cc461988 - s = 0x0004d11b9167ded4e7b2b54acc461989
// difference = 0x0100000000
//
// This discrepancy was due to Unreduced130::reduce (as called during finalization)
// not correctly reducing. During the reduction step, the upper limb's upper bits
// (beyond 2^130) are added into the lower limb multiplied by 5 (for reduction modulo
// 2^130 - 5). This is computed like so:
//
// b = t_4 >> 26
// t_0 += b + (b << 2)
//
// It is possible for the upper limb to be 57+ bits; thus b << 2 can be 33+ bits.
// However, the original reduction code was using _mm256_slli_epi32, which shifts
// packed 32-bit integers; this was causing the upper bits of b to be lost. Switching
// to _mm256_slli_epi64 (correctly treating b as a 64-bit field) solves the problem.
avx2_fuzzer_test_case(include_bytes!(
"fuzz/id=000003,sig=06,src=000003,op=havoc,rep=64"
));
}
#[test]
fn crash_4() {
avx2_fuzzer_test_case(include_bytes!(
"fuzz/id=000004,sig=06,src=000022+000005,op=splice,rep=32"
));
}
#[test]
fn crash_5() {
avx2_fuzzer_test_case(include_bytes!(
"fuzz/id=000005,sig=06,src=000008+000007,op=splice,rep=128"
));
}
#[test]
fn crash_6() {
// This input corresponds to a key of:
// r = 0x04040404040404040404040404040404
// s = 0x0404040403ef04040404040404040404
//
// and input:
// [0x04, 0x04, 0x04, 0xf2]
//
// The input fits into a single short block:
// m = 0x01f2040404
//
// and we should have the following computation:
// tag = ((m * r) % p) + s
// = ((0x01f2040404 * 0x04040404040404040404040404040404) % p) + s
// = (0x7cfdfeffffffffffffffffffffffffff8302010 % ((1 << 130) - 5)) + s
// = 0x1f3f7fc + 0x0404040403ef04040404040404040404
// = 0x0404040403ef04040404040405f7fc00
//
// or in bytes:
// tag = [
// 0x00, 0xfc, 0xf7, 0x05, 0x04, 0x04, 0x04, 0x04,
// 0x04, 0x04, 0xef, 0x03, 0x04, 0x04, 0x04, 0x04,
// ];
//
// The crash was caused by the final modular reduction (in the `addkey` method of the
// Goll-Gueron implementation, and `impl Add<Aligned130> for AdditionKey` here) not
// fully carrying all bits. `Aligned130` is guaranteed to be a 130-bit integer, but is
// not guaranteed to be an integer modulo 2^130 - 5.
avx2_fuzzer_test_case(include_bytes!(
"fuzz/id=000006,sig=06,src=000005,op=havoc,rep=8"
));
}
#[test]
fn crash_7() {
avx2_fuzzer_test_case(include_bytes!(
"fuzz/id=000007,sig=06,src=000024+000000,op=splice,rep=64"
));
}
#[test]
fn crash_8() {
// This input corresponds to a key of:
// r = 0x0fff00fc0000000000000000006f91ab
// s = 0xffffffffffffffffffffffffffffffff
//
// and a single input block:
// 0x01d4d4ffffffffffffffffffffffffffff
//
// We should have the following computation:
// tag = ((m * r) % p) + s
// = ((0x01d4d4ffffffffffffffffffffffffffff * 0x0fff00fc0000000000000000006f91ab) % p) + s
// = (0x1d4b7cf881ac00000000000000cc5320bf47ff03ffffffffffffffffff906e55 % ((1 << 130) - 5)) + s
// = 0xe3e65b3aa217000000000000008fd63d + 0xffffffffffffffffffffffffffffffff
// = 0x01e3e65b3aa217000000000000008fd63c mod 128
//
// or in bytes:
// tag = [
// 0x3c, 0xd6, 0x8f, 0x00, 0x00, 0x00, 0x00, 0x00,
// 0x00, 0x00, 0x17, 0xa2, 0x3a, 0x5b, 0xe6, 0xe3,
// ];
//
// The crash was caused by the final modular reduction (in the `addkey` method of the
// Goll-Gueron implementation, and `impl Add<Aligned130> for AdditionKey` here). After
// adding s, limbs 0 and 2 have carries, while limb 1 is 0xffffffff. The original
// implementation only carried once, after which limb 1 has a carry, which was then
// discarded. The fix was to always carry three times, to ensure that all potential
// carry bits are carried.
avx2_fuzzer_test_case(include_bytes!(
"fuzz/id=000008,sig=06,src=000019,time=165655+000011,op=splice,rep=128"
));
}

View File

@@ -0,0 +1 @@
<01><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>

View File

@@ -0,0 +1 @@
ος

171
vendor/poly1305/src/lib.rs vendored Normal file
View File

@@ -0,0 +1,171 @@
//! The Poly1305 universal hash function and message authentication code.
//!
//! # About
//!
//! Poly1305 is a universal hash function suitable for use as a one-time
//! authenticator and, when combined with a cipher, a message authentication
//! code (MAC).
//!
//! It takes a 32-byte one-time key and a message and produces a 16-byte tag,
//! which can be used to authenticate the message.
//!
//! Poly1305 is primarily notable for its use in the [`ChaCha20Poly1305`] and
//! [`XSalsa20Poly1305`] authenticated encryption algorithms.
//!
//! # Minimum Supported Rust Version
//!
//! Rust **1.56** or higher.
//!
//! Minimum supported Rust version may be changed in the future, but such
//! changes will be accompanied with a minor version bump.
//!
//! # Security Notes
//!
//! This crate has received one [security audit by NCC Group][audit], with no
//! significant findings. We would like to thank [MobileCoin] for funding the
//! audit.
//!
//! NOTE: the audit predates the AVX2 backend, which has not yet been audited.
//!
//! All implementations contained in the crate are designed to execute in constant
//! time, either by relying on hardware intrinsics (e.g. AVX2 on x86/x86_64), or
//! using a portable implementation which is only constant time on processors which
//! implement constant-time multiplication.
//!
//! It is not suitable for use on processors with a variable-time multiplication
//! operation (e.g. short circuit on multiply-by-zero / multiply-by-one, such as
//! certain 32-bit PowerPC CPUs and some non-ARM microcontrollers).
//!
//! [`ChaCha20Poly1305`]: https://docs.rs/chacha20poly1305
//! [`XSalsa20Poly1305`]: https://docs.rs/xsalsa20poly1305
//! [audit]: https://research.nccgroup.com/2020/02/26/public-report-rustcrypto-aes-gcm-and-chacha20poly1305-implementation-review/
//! [MobileCoin]: https://mobilecoin.com
#![no_std]
#![doc(
html_logo_url = "https://raw.githubusercontent.com/RustCrypto/media/8f1a9894/logo.svg",
html_favicon_url = "https://raw.githubusercontent.com/RustCrypto/media/8f1a9894/logo.svg"
)]
#![warn(missing_docs, rust_2018_idioms)]
#[cfg(feature = "std")]
extern crate std;
pub use universal_hash;
use universal_hash::{
consts::{U16, U32},
crypto_common::{BlockSizeUser, KeySizeUser},
generic_array::GenericArray,
KeyInit, UniversalHash,
};
mod backend;
#[cfg(all(
any(target_arch = "x86", target_arch = "x86_64"),
not(poly1305_force_soft),
target_feature = "avx2", // Fuzz tests bypass AVX2 autodetection code
any(fuzzing, test)
))]
mod fuzz;
#[cfg(all(
any(target_arch = "x86", target_arch = "x86_64"),
not(poly1305_force_soft)
))]
use crate::backend::autodetect::State;
#[cfg(not(all(
any(target_arch = "x86", target_arch = "x86_64"),
not(poly1305_force_soft)
)))]
use crate::backend::soft::State;
/// Size of a Poly1305 key
pub const KEY_SIZE: usize = 32;
/// Size of the blocks Poly1305 acts upon
pub const BLOCK_SIZE: usize = 16;
/// Poly1305 keys (32-bytes)
pub type Key = universal_hash::Key<Poly1305>;
/// Poly1305 blocks (16-bytes)
pub type Block = universal_hash::Block<Poly1305>;
/// Poly1305 tags (16-bytes)
pub type Tag = universal_hash::Block<Poly1305>;
/// The Poly1305 universal hash function.
///
/// Note that Poly1305 is not a traditional MAC and is single-use only
/// (a.k.a. "one-time authenticator").
///
/// For this reason it doesn't impl the `crypto_mac::Mac` trait.
#[derive(Clone)]
pub struct Poly1305 {
state: State,
}
impl KeySizeUser for Poly1305 {
type KeySize = U32;
}
impl KeyInit for Poly1305 {
/// Initialize Poly1305 with the given key
fn new(key: &Key) -> Poly1305 {
Poly1305 {
state: State::new(key),
}
}
}
impl BlockSizeUser for Poly1305 {
type BlockSize = U16;
}
impl UniversalHash for Poly1305 {
fn update_with_backend(
&mut self,
f: impl universal_hash::UhfClosure<BlockSize = Self::BlockSize>,
) {
self.state.update_with_backend(f);
}
/// Get the hashed output
fn finalize(self) -> Tag {
self.state.finalize()
}
}
impl Poly1305 {
/// Compute unpadded Poly1305 for the given input data.
///
/// The main use case for this is XSalsa20Poly1305.
pub fn compute_unpadded(mut self, data: &[u8]) -> Tag {
for chunk in data.chunks(BLOCK_SIZE) {
if chunk.len() == BLOCK_SIZE {
let block = GenericArray::from_slice(chunk);
self.state.compute_block(block, false);
} else {
let mut block = Block::default();
block[..chunk.len()].copy_from_slice(chunk);
block[chunk.len()] = 1;
self.state.compute_block(&block, true)
}
}
self.state.finalize()
}
}
opaque_debug::implement!(Poly1305);
#[cfg(all(
any(target_arch = "x86", target_arch = "x86_64"),
not(poly1305_force_soft),
target_feature = "avx2", // Fuzz tests bypass AVX2 autodetection code
any(fuzzing, test)
))]
pub use crate::fuzz::fuzz_avx2;