chore: checkpoint before Python removal

This commit is contained in:
2026-03-26 22:33:59 +00:00
parent 683cec9307
commit e568ddf82a
29972 changed files with 11269302 additions and 2 deletions

251
vendor/chacha20/src/backends/avx2.rs vendored Normal file
View File

@@ -0,0 +1,251 @@
use crate::{Block, StreamClosure, Unsigned, STATE_WORDS};
use cipher::{
consts::{U4, U64},
BlockSizeUser, ParBlocks, ParBlocksSizeUser, StreamBackend,
};
use core::marker::PhantomData;
#[cfg(target_arch = "x86")]
use core::arch::x86::*;
#[cfg(target_arch = "x86_64")]
use core::arch::x86_64::*;
/// Number of blocks processed in parallel.
const PAR_BLOCKS: usize = 4;
/// Number of `__m256i` to store parallel blocks.
const N: usize = PAR_BLOCKS / 2;
#[inline]
#[target_feature(enable = "avx2")]
pub(crate) unsafe fn inner<R, F>(state: &mut [u32; STATE_WORDS], f: F)
where
R: Unsigned,
F: StreamClosure<BlockSize = U64>,
{
let state_ptr = state.as_ptr() as *const __m128i;
let v = [
_mm256_broadcastsi128_si256(_mm_loadu_si128(state_ptr.add(0))),
_mm256_broadcastsi128_si256(_mm_loadu_si128(state_ptr.add(1))),
_mm256_broadcastsi128_si256(_mm_loadu_si128(state_ptr.add(2))),
];
let mut c = _mm256_broadcastsi128_si256(_mm_loadu_si128(state_ptr.add(3)));
c = _mm256_add_epi32(c, _mm256_set_epi32(0, 0, 0, 1, 0, 0, 0, 0));
let mut ctr = [c; N];
for i in 0..N {
ctr[i] = c;
c = _mm256_add_epi32(c, _mm256_set_epi32(0, 0, 0, 2, 0, 0, 0, 2));
}
let mut backend = Backend::<R> {
v,
ctr,
_pd: PhantomData,
};
f.call(&mut backend);
state[12] = _mm256_extract_epi32(backend.ctr[0], 0) as u32;
}
struct Backend<R: Unsigned> {
v: [__m256i; 3],
ctr: [__m256i; N],
_pd: PhantomData<R>,
}
impl<R: Unsigned> BlockSizeUser for Backend<R> {
type BlockSize = U64;
}
impl<R: Unsigned> ParBlocksSizeUser for Backend<R> {
type ParBlocksSize = U4;
}
impl<R: Unsigned> StreamBackend for Backend<R> {
#[inline(always)]
fn gen_ks_block(&mut self, block: &mut Block) {
unsafe {
let res = rounds::<R>(&self.v, &self.ctr);
for c in self.ctr.iter_mut() {
*c = _mm256_add_epi32(*c, _mm256_set_epi32(0, 0, 0, 1, 0, 0, 0, 1));
}
let res0: [__m128i; 8] = core::mem::transmute(res[0]);
let block_ptr = block.as_mut_ptr() as *mut __m128i;
for i in 0..4 {
_mm_storeu_si128(block_ptr.add(i), res0[2 * i]);
}
}
}
#[inline(always)]
fn gen_par_ks_blocks(&mut self, blocks: &mut ParBlocks<Self>) {
unsafe {
let vs = rounds::<R>(&self.v, &self.ctr);
let pb = PAR_BLOCKS as i32;
for c in self.ctr.iter_mut() {
*c = _mm256_add_epi32(*c, _mm256_set_epi32(0, 0, 0, pb, 0, 0, 0, pb));
}
let mut block_ptr = blocks.as_mut_ptr() as *mut __m128i;
for v in vs {
let t: [__m128i; 8] = core::mem::transmute(v);
for i in 0..4 {
_mm_storeu_si128(block_ptr.add(i), t[2 * i]);
_mm_storeu_si128(block_ptr.add(4 + i), t[2 * i + 1]);
}
block_ptr = block_ptr.add(8);
}
}
}
}
#[inline]
#[target_feature(enable = "avx2")]
unsafe fn rounds<R: Unsigned>(v: &[__m256i; 3], c: &[__m256i; N]) -> [[__m256i; 4]; N] {
let mut vs: [[__m256i; 4]; N] = [[_mm256_setzero_si256(); 4]; N];
for i in 0..N {
vs[i] = [v[0], v[1], v[2], c[i]];
}
for _ in 0..R::USIZE {
double_quarter_round(&mut vs);
}
for i in 0..N {
for j in 0..3 {
vs[i][j] = _mm256_add_epi32(vs[i][j], v[j]);
}
vs[i][3] = _mm256_add_epi32(vs[i][3], c[i]);
}
vs
}
#[inline]
#[target_feature(enable = "avx2")]
unsafe fn double_quarter_round(v: &mut [[__m256i; 4]; N]) {
add_xor_rot(v);
rows_to_cols(v);
add_xor_rot(v);
cols_to_rows(v);
}
/// The goal of this function is to transform the state words from:
/// ```text
/// [a0, a1, a2, a3] [ 0, 1, 2, 3]
/// [b0, b1, b2, b3] == [ 4, 5, 6, 7]
/// [c0, c1, c2, c3] [ 8, 9, 10, 11]
/// [d0, d1, d2, d3] [12, 13, 14, 15]
/// ```
///
/// to:
/// ```text
/// [a0, a1, a2, a3] [ 0, 1, 2, 3]
/// [b1, b2, b3, b0] == [ 5, 6, 7, 4]
/// [c2, c3, c0, c1] [10, 11, 8, 9]
/// [d3, d0, d1, d2] [15, 12, 13, 14]
/// ```
///
/// so that we can apply [`add_xor_rot`] to the resulting columns, and have it compute the
/// "diagonal rounds" (as defined in RFC 7539) in parallel. In practice, this shuffle is
/// non-optimal: the last state word to be altered in `add_xor_rot` is `b`, so the shuffle
/// blocks on the result of `b` being calculated.
///
/// We can optimize this by observing that the four quarter rounds in `add_xor_rot` are
/// data-independent: they only access a single column of the state, and thus the order of
/// the columns does not matter. We therefore instead shuffle the other three state words,
/// to obtain the following equivalent layout:
/// ```text
/// [a3, a0, a1, a2] [ 3, 0, 1, 2]
/// [b0, b1, b2, b3] == [ 4, 5, 6, 7]
/// [c1, c2, c3, c0] [ 9, 10, 11, 8]
/// [d2, d3, d0, d1] [14, 15, 12, 13]
/// ```
///
/// See https://github.com/sneves/blake2-avx2/pull/4 for additional details. The earliest
/// known occurrence of this optimization is in floodyberry's SSE4 ChaCha code from 2014:
/// - https://github.com/floodyberry/chacha-opt/blob/0ab65cb99f5016633b652edebaf3691ceb4ff753/chacha_blocks_ssse3-64.S#L639-L643
#[inline]
#[target_feature(enable = "avx2")]
unsafe fn rows_to_cols(vs: &mut [[__m256i; 4]; N]) {
// c >>>= 32; d >>>= 64; a >>>= 96;
for [a, _, c, d] in vs {
*c = _mm256_shuffle_epi32(*c, 0b_00_11_10_01); // _MM_SHUFFLE(0, 3, 2, 1)
*d = _mm256_shuffle_epi32(*d, 0b_01_00_11_10); // _MM_SHUFFLE(1, 0, 3, 2)
*a = _mm256_shuffle_epi32(*a, 0b_10_01_00_11); // _MM_SHUFFLE(2, 1, 0, 3)
}
}
/// The goal of this function is to transform the state words from:
/// ```text
/// [a3, a0, a1, a2] [ 3, 0, 1, 2]
/// [b0, b1, b2, b3] == [ 4, 5, 6, 7]
/// [c1, c2, c3, c0] [ 9, 10, 11, 8]
/// [d2, d3, d0, d1] [14, 15, 12, 13]
/// ```
///
/// to:
/// ```text
/// [a0, a1, a2, a3] [ 0, 1, 2, 3]
/// [b0, b1, b2, b3] == [ 4, 5, 6, 7]
/// [c0, c1, c2, c3] [ 8, 9, 10, 11]
/// [d0, d1, d2, d3] [12, 13, 14, 15]
/// ```
///
/// reversing the transformation of [`rows_to_cols`].
#[inline]
#[target_feature(enable = "avx2")]
unsafe fn cols_to_rows(vs: &mut [[__m256i; 4]; N]) {
// c <<<= 32; d <<<= 64; a <<<= 96;
for [a, _, c, d] in vs {
*c = _mm256_shuffle_epi32(*c, 0b_10_01_00_11); // _MM_SHUFFLE(2, 1, 0, 3)
*d = _mm256_shuffle_epi32(*d, 0b_01_00_11_10); // _MM_SHUFFLE(1, 0, 3, 2)
*a = _mm256_shuffle_epi32(*a, 0b_00_11_10_01); // _MM_SHUFFLE(0, 3, 2, 1)
}
}
#[inline]
#[target_feature(enable = "avx2")]
unsafe fn add_xor_rot(vs: &mut [[__m256i; 4]; N]) {
let rol16_mask = _mm256_set_epi64x(
0x0d0c_0f0e_0908_0b0a,
0x0504_0706_0100_0302,
0x0d0c_0f0e_0908_0b0a,
0x0504_0706_0100_0302,
);
let rol8_mask = _mm256_set_epi64x(
0x0e0d_0c0f_0a09_080b,
0x0605_0407_0201_0003,
0x0e0d_0c0f_0a09_080b,
0x0605_0407_0201_0003,
);
// a += b; d ^= a; d <<<= (16, 16, 16, 16);
for [a, b, _, d] in vs.iter_mut() {
*a = _mm256_add_epi32(*a, *b);
*d = _mm256_xor_si256(*d, *a);
*d = _mm256_shuffle_epi8(*d, rol16_mask);
}
// c += d; b ^= c; b <<<= (12, 12, 12, 12);
for [_, b, c, d] in vs.iter_mut() {
*c = _mm256_add_epi32(*c, *d);
*b = _mm256_xor_si256(*b, *c);
*b = _mm256_xor_si256(_mm256_slli_epi32(*b, 12), _mm256_srli_epi32(*b, 20));
}
// a += b; d ^= a; d <<<= (8, 8, 8, 8);
for [a, b, _, d] in vs.iter_mut() {
*a = _mm256_add_epi32(*a, *b);
*d = _mm256_xor_si256(*d, *a);
*d = _mm256_shuffle_epi8(*d, rol8_mask);
}
// c += d; b ^= c; b <<<= (7, 7, 7, 7);
for [_, b, c, d] in vs.iter_mut() {
*c = _mm256_add_epi32(*c, *d);
*b = _mm256_xor_si256(*b, *c);
*b = _mm256_xor_si256(_mm256_slli_epi32(*b, 7), _mm256_srli_epi32(*b, 25));
}
}

356
vendor/chacha20/src/backends/neon.rs vendored Normal file
View File

@@ -0,0 +1,356 @@
//! NEON-optimized implementation for aarch64 CPUs.
//!
//! Adapted from the Crypto++ `chacha_simd` implementation by Jack Lloyd and
//! Jeffrey Walton (public domain).
use crate::{Block, StreamClosure, Unsigned, STATE_WORDS};
use cipher::{
consts::{U4, U64},
BlockSizeUser, ParBlocks, ParBlocksSizeUser, StreamBackend,
};
use core::{arch::aarch64::*, marker::PhantomData};
#[inline]
#[target_feature(enable = "neon")]
pub(crate) unsafe fn inner<R, F>(state: &mut [u32; STATE_WORDS], f: F)
where
R: Unsigned,
F: StreamClosure<BlockSize = U64>,
{
let mut backend = Backend::<R> {
state: [
vld1q_u32(state.as_ptr().offset(0)),
vld1q_u32(state.as_ptr().offset(4)),
vld1q_u32(state.as_ptr().offset(8)),
vld1q_u32(state.as_ptr().offset(12)),
],
_pd: PhantomData,
};
f.call(&mut backend);
vst1q_u32(state.as_mut_ptr().offset(12), backend.state[3]);
}
struct Backend<R: Unsigned> {
state: [uint32x4_t; 4],
_pd: PhantomData<R>,
}
impl<R: Unsigned> BlockSizeUser for Backend<R> {
type BlockSize = U64;
}
impl<R: Unsigned> ParBlocksSizeUser for Backend<R> {
type ParBlocksSize = U4;
}
macro_rules! add64 {
($a:expr, $b:expr) => {
vreinterpretq_u32_u64(vaddq_u64(
vreinterpretq_u64_u32($a),
vreinterpretq_u64_u32($b),
))
};
}
impl<R: Unsigned> StreamBackend for Backend<R> {
#[inline(always)]
fn gen_ks_block(&mut self, block: &mut Block) {
let state3 = self.state[3];
let mut par = ParBlocks::<Self>::default();
self.gen_par_ks_blocks(&mut par);
*block = par[0];
unsafe {
self.state[3] = add64!(state3, vld1q_u32([1, 0, 0, 0].as_ptr()));
}
}
#[inline(always)]
fn gen_par_ks_blocks(&mut self, blocks: &mut ParBlocks<Self>) {
macro_rules! rotate_left {
($v:ident, 8) => {{
let maskb = [3u8, 0, 1, 2, 7, 4, 5, 6, 11, 8, 9, 10, 15, 12, 13, 14];
let mask = vld1q_u8(maskb.as_ptr());
vreinterpretq_u32_u8(vqtbl1q_u8(vreinterpretq_u8_u32($v), mask))
}};
($v:ident, 16) => {
vreinterpretq_u32_u16(vrev32q_u16(vreinterpretq_u16_u32($v)))
};
($v:ident, $r:literal) => {
vorrq_u32(vshlq_n_u32($v, $r), vshrq_n_u32($v, 32 - $r))
};
}
macro_rules! extract {
($v:ident, $s:literal) => {
vextq_u32($v, $v, $s)
};
}
unsafe {
let ctrs = [
vld1q_u32([1, 0, 0, 0].as_ptr()),
vld1q_u32([2, 0, 0, 0].as_ptr()),
vld1q_u32([3, 0, 0, 0].as_ptr()),
vld1q_u32([4, 0, 0, 0].as_ptr()),
];
let mut r0_0 = self.state[0];
let mut r0_1 = self.state[1];
let mut r0_2 = self.state[2];
let mut r0_3 = self.state[3];
let mut r1_0 = self.state[0];
let mut r1_1 = self.state[1];
let mut r1_2 = self.state[2];
let mut r1_3 = add64!(r0_3, ctrs[0]);
let mut r2_0 = self.state[0];
let mut r2_1 = self.state[1];
let mut r2_2 = self.state[2];
let mut r2_3 = add64!(r0_3, ctrs[1]);
let mut r3_0 = self.state[0];
let mut r3_1 = self.state[1];
let mut r3_2 = self.state[2];
let mut r3_3 = add64!(r0_3, ctrs[2]);
for _ in 0..R::USIZE {
r0_0 = vaddq_u32(r0_0, r0_1);
r1_0 = vaddq_u32(r1_0, r1_1);
r2_0 = vaddq_u32(r2_0, r2_1);
r3_0 = vaddq_u32(r3_0, r3_1);
r0_3 = veorq_u32(r0_3, r0_0);
r1_3 = veorq_u32(r1_3, r1_0);
r2_3 = veorq_u32(r2_3, r2_0);
r3_3 = veorq_u32(r3_3, r3_0);
r0_3 = rotate_left!(r0_3, 16);
r1_3 = rotate_left!(r1_3, 16);
r2_3 = rotate_left!(r2_3, 16);
r3_3 = rotate_left!(r3_3, 16);
r0_2 = vaddq_u32(r0_2, r0_3);
r1_2 = vaddq_u32(r1_2, r1_3);
r2_2 = vaddq_u32(r2_2, r2_3);
r3_2 = vaddq_u32(r3_2, r3_3);
r0_1 = veorq_u32(r0_1, r0_2);
r1_1 = veorq_u32(r1_1, r1_2);
r2_1 = veorq_u32(r2_1, r2_2);
r3_1 = veorq_u32(r3_1, r3_2);
r0_1 = rotate_left!(r0_1, 12);
r1_1 = rotate_left!(r1_1, 12);
r2_1 = rotate_left!(r2_1, 12);
r3_1 = rotate_left!(r3_1, 12);
r0_0 = vaddq_u32(r0_0, r0_1);
r1_0 = vaddq_u32(r1_0, r1_1);
r2_0 = vaddq_u32(r2_0, r2_1);
r3_0 = vaddq_u32(r3_0, r3_1);
r0_3 = veorq_u32(r0_3, r0_0);
r1_3 = veorq_u32(r1_3, r1_0);
r2_3 = veorq_u32(r2_3, r2_0);
r3_3 = veorq_u32(r3_3, r3_0);
r0_3 = rotate_left!(r0_3, 8);
r1_3 = rotate_left!(r1_3, 8);
r2_3 = rotate_left!(r2_3, 8);
r3_3 = rotate_left!(r3_3, 8);
r0_2 = vaddq_u32(r0_2, r0_3);
r1_2 = vaddq_u32(r1_2, r1_3);
r2_2 = vaddq_u32(r2_2, r2_3);
r3_2 = vaddq_u32(r3_2, r3_3);
r0_1 = veorq_u32(r0_1, r0_2);
r1_1 = veorq_u32(r1_1, r1_2);
r2_1 = veorq_u32(r2_1, r2_2);
r3_1 = veorq_u32(r3_1, r3_2);
r0_1 = rotate_left!(r0_1, 7);
r1_1 = rotate_left!(r1_1, 7);
r2_1 = rotate_left!(r2_1, 7);
r3_1 = rotate_left!(r3_1, 7);
r0_1 = extract!(r0_1, 1);
r0_2 = extract!(r0_2, 2);
r0_3 = extract!(r0_3, 3);
r1_1 = extract!(r1_1, 1);
r1_2 = extract!(r1_2, 2);
r1_3 = extract!(r1_3, 3);
r2_1 = extract!(r2_1, 1);
r2_2 = extract!(r2_2, 2);
r2_3 = extract!(r2_3, 3);
r3_1 = extract!(r3_1, 1);
r3_2 = extract!(r3_2, 2);
r3_3 = extract!(r3_3, 3);
r0_0 = vaddq_u32(r0_0, r0_1);
r1_0 = vaddq_u32(r1_0, r1_1);
r2_0 = vaddq_u32(r2_0, r2_1);
r3_0 = vaddq_u32(r3_0, r3_1);
r0_3 = veorq_u32(r0_3, r0_0);
r1_3 = veorq_u32(r1_3, r1_0);
r2_3 = veorq_u32(r2_3, r2_0);
r3_3 = veorq_u32(r3_3, r3_0);
r0_3 = rotate_left!(r0_3, 16);
r1_3 = rotate_left!(r1_3, 16);
r2_3 = rotate_left!(r2_3, 16);
r3_3 = rotate_left!(r3_3, 16);
r0_2 = vaddq_u32(r0_2, r0_3);
r1_2 = vaddq_u32(r1_2, r1_3);
r2_2 = vaddq_u32(r2_2, r2_3);
r3_2 = vaddq_u32(r3_2, r3_3);
r0_1 = veorq_u32(r0_1, r0_2);
r1_1 = veorq_u32(r1_1, r1_2);
r2_1 = veorq_u32(r2_1, r2_2);
r3_1 = veorq_u32(r3_1, r3_2);
r0_1 = rotate_left!(r0_1, 12);
r1_1 = rotate_left!(r1_1, 12);
r2_1 = rotate_left!(r2_1, 12);
r3_1 = rotate_left!(r3_1, 12);
r0_0 = vaddq_u32(r0_0, r0_1);
r1_0 = vaddq_u32(r1_0, r1_1);
r2_0 = vaddq_u32(r2_0, r2_1);
r3_0 = vaddq_u32(r3_0, r3_1);
r0_3 = veorq_u32(r0_3, r0_0);
r1_3 = veorq_u32(r1_3, r1_0);
r2_3 = veorq_u32(r2_3, r2_0);
r3_3 = veorq_u32(r3_3, r3_0);
r0_3 = rotate_left!(r0_3, 8);
r1_3 = rotate_left!(r1_3, 8);
r2_3 = rotate_left!(r2_3, 8);
r3_3 = rotate_left!(r3_3, 8);
r0_2 = vaddq_u32(r0_2, r0_3);
r1_2 = vaddq_u32(r1_2, r1_3);
r2_2 = vaddq_u32(r2_2, r2_3);
r3_2 = vaddq_u32(r3_2, r3_3);
r0_1 = veorq_u32(r0_1, r0_2);
r1_1 = veorq_u32(r1_1, r1_2);
r2_1 = veorq_u32(r2_1, r2_2);
r3_1 = veorq_u32(r3_1, r3_2);
r0_1 = rotate_left!(r0_1, 7);
r1_1 = rotate_left!(r1_1, 7);
r2_1 = rotate_left!(r2_1, 7);
r3_1 = rotate_left!(r3_1, 7);
r0_1 = extract!(r0_1, 3);
r0_2 = extract!(r0_2, 2);
r0_3 = extract!(r0_3, 1);
r1_1 = extract!(r1_1, 3);
r1_2 = extract!(r1_2, 2);
r1_3 = extract!(r1_3, 1);
r2_1 = extract!(r2_1, 3);
r2_2 = extract!(r2_2, 2);
r2_3 = extract!(r2_3, 1);
r3_1 = extract!(r3_1, 3);
r3_2 = extract!(r3_2, 2);
r3_3 = extract!(r3_3, 1);
}
r0_0 = vaddq_u32(r0_0, self.state[0]);
r0_1 = vaddq_u32(r0_1, self.state[1]);
r0_2 = vaddq_u32(r0_2, self.state[2]);
r0_3 = vaddq_u32(r0_3, self.state[3]);
r1_0 = vaddq_u32(r1_0, self.state[0]);
r1_1 = vaddq_u32(r1_1, self.state[1]);
r1_2 = vaddq_u32(r1_2, self.state[2]);
r1_3 = vaddq_u32(r1_3, self.state[3]);
r1_3 = add64!(r1_3, ctrs[0]);
r2_0 = vaddq_u32(r2_0, self.state[0]);
r2_1 = vaddq_u32(r2_1, self.state[1]);
r2_2 = vaddq_u32(r2_2, self.state[2]);
r2_3 = vaddq_u32(r2_3, self.state[3]);
r2_3 = add64!(r2_3, ctrs[1]);
r3_0 = vaddq_u32(r3_0, self.state[0]);
r3_1 = vaddq_u32(r3_1, self.state[1]);
r3_2 = vaddq_u32(r3_2, self.state[2]);
r3_3 = vaddq_u32(r3_3, self.state[3]);
r3_3 = add64!(r3_3, ctrs[2]);
vst1q_u8(blocks[0].as_mut_ptr().offset(0), vreinterpretq_u8_u32(r0_0));
vst1q_u8(
blocks[0].as_mut_ptr().offset(16),
vreinterpretq_u8_u32(r0_1),
);
vst1q_u8(
blocks[0].as_mut_ptr().offset(2 * 16),
vreinterpretq_u8_u32(r0_2),
);
vst1q_u8(
blocks[0].as_mut_ptr().offset(3 * 16),
vreinterpretq_u8_u32(r0_3),
);
vst1q_u8(blocks[1].as_mut_ptr().offset(0), vreinterpretq_u8_u32(r1_0));
vst1q_u8(
blocks[1].as_mut_ptr().offset(16),
vreinterpretq_u8_u32(r1_1),
);
vst1q_u8(
blocks[1].as_mut_ptr().offset(2 * 16),
vreinterpretq_u8_u32(r1_2),
);
vst1q_u8(
blocks[1].as_mut_ptr().offset(3 * 16),
vreinterpretq_u8_u32(r1_3),
);
vst1q_u8(blocks[2].as_mut_ptr().offset(0), vreinterpretq_u8_u32(r2_0));
vst1q_u8(
blocks[2].as_mut_ptr().offset(16),
vreinterpretq_u8_u32(r2_1),
);
vst1q_u8(
blocks[2].as_mut_ptr().offset(2 * 16),
vreinterpretq_u8_u32(r2_2),
);
vst1q_u8(
blocks[2].as_mut_ptr().offset(3 * 16),
vreinterpretq_u8_u32(r2_3),
);
vst1q_u8(blocks[3].as_mut_ptr().offset(0), vreinterpretq_u8_u32(r3_0));
vst1q_u8(
blocks[3].as_mut_ptr().offset(16),
vreinterpretq_u8_u32(r3_1),
);
vst1q_u8(
blocks[3].as_mut_ptr().offset(2 * 16),
vreinterpretq_u8_u32(r3_2),
);
vst1q_u8(
blocks[3].as_mut_ptr().offset(3 * 16),
vreinterpretq_u8_u32(r3_3),
);
self.state[3] = add64!(self.state[3], ctrs[3]);
}
}
}

73
vendor/chacha20/src/backends/soft.rs vendored Normal file
View File

@@ -0,0 +1,73 @@
//! Portable implementation which does not rely on architecture-specific
//! intrinsics.
use crate::{Block, ChaChaCore, Unsigned, STATE_WORDS};
use cipher::{
consts::{U1, U64},
BlockSizeUser, ParBlocksSizeUser, StreamBackend,
};
pub(crate) struct Backend<'a, R: Unsigned>(pub(crate) &'a mut ChaChaCore<R>);
impl<'a, R: Unsigned> BlockSizeUser for Backend<'a, R> {
type BlockSize = U64;
}
impl<'a, R: Unsigned> ParBlocksSizeUser for Backend<'a, R> {
type ParBlocksSize = U1;
}
impl<'a, R: Unsigned> StreamBackend for Backend<'a, R> {
#[inline(always)]
fn gen_ks_block(&mut self, block: &mut Block) {
let res = run_rounds::<R>(&self.0.state);
self.0.state[12] = self.0.state[12].wrapping_add(1);
for (chunk, val) in block.chunks_exact_mut(4).zip(res.iter()) {
chunk.copy_from_slice(&val.to_le_bytes());
}
}
}
#[inline(always)]
fn run_rounds<R: Unsigned>(state: &[u32; STATE_WORDS]) -> [u32; STATE_WORDS] {
let mut res = *state;
for _ in 0..R::USIZE {
// column rounds
quarter_round(0, 4, 8, 12, &mut res);
quarter_round(1, 5, 9, 13, &mut res);
quarter_round(2, 6, 10, 14, &mut res);
quarter_round(3, 7, 11, 15, &mut res);
// diagonal rounds
quarter_round(0, 5, 10, 15, &mut res);
quarter_round(1, 6, 11, 12, &mut res);
quarter_round(2, 7, 8, 13, &mut res);
quarter_round(3, 4, 9, 14, &mut res);
}
for (s1, s0) in res.iter_mut().zip(state.iter()) {
*s1 = s1.wrapping_add(*s0);
}
res
}
/// The ChaCha20 quarter round function
fn quarter_round(a: usize, b: usize, c: usize, d: usize, state: &mut [u32; STATE_WORDS]) {
state[a] = state[a].wrapping_add(state[b]);
state[d] ^= state[a];
state[d] = state[d].rotate_left(16);
state[c] = state[c].wrapping_add(state[d]);
state[b] ^= state[c];
state[b] = state[b].rotate_left(12);
state[a] = state[a].wrapping_add(state[b]);
state[d] ^= state[a];
state[d] = state[d].rotate_left(8);
state[c] = state[c].wrapping_add(state[d]);
state[b] ^= state[c];
state[b] = state[b].rotate_left(7);
}

180
vendor/chacha20/src/backends/sse2.rs vendored Normal file
View File

@@ -0,0 +1,180 @@
use crate::{Block, StreamClosure, Unsigned, STATE_WORDS};
use cipher::{
consts::{U1, U64},
BlockSizeUser, ParBlocksSizeUser, StreamBackend,
};
use core::marker::PhantomData;
#[cfg(target_arch = "x86")]
use core::arch::x86::*;
#[cfg(target_arch = "x86_64")]
use core::arch::x86_64::*;
#[inline]
#[target_feature(enable = "sse2")]
pub(crate) unsafe fn inner<R, F>(state: &mut [u32; STATE_WORDS], f: F)
where
R: Unsigned,
F: StreamClosure<BlockSize = U64>,
{
let state_ptr = state.as_ptr() as *const __m128i;
let mut backend = Backend::<R> {
v: [
_mm_loadu_si128(state_ptr.add(0)),
_mm_loadu_si128(state_ptr.add(1)),
_mm_loadu_si128(state_ptr.add(2)),
_mm_loadu_si128(state_ptr.add(3)),
],
_pd: PhantomData,
};
f.call(&mut backend);
state[12] = _mm_cvtsi128_si32(backend.v[3]) as u32;
}
struct Backend<R: Unsigned> {
v: [__m128i; 4],
_pd: PhantomData<R>,
}
impl<R: Unsigned> BlockSizeUser for Backend<R> {
type BlockSize = U64;
}
impl<R: Unsigned> ParBlocksSizeUser for Backend<R> {
type ParBlocksSize = U1;
}
impl<R: Unsigned> StreamBackend for Backend<R> {
#[inline(always)]
fn gen_ks_block(&mut self, block: &mut Block) {
unsafe {
let res = rounds::<R>(&self.v);
self.v[3] = _mm_add_epi32(self.v[3], _mm_set_epi32(0, 0, 0, 1));
let block_ptr = block.as_mut_ptr() as *mut __m128i;
for i in 0..4 {
_mm_storeu_si128(block_ptr.add(i), res[i]);
}
}
}
}
#[inline]
#[target_feature(enable = "sse2")]
unsafe fn rounds<R: Unsigned>(v: &[__m128i; 4]) -> [__m128i; 4] {
let mut res = *v;
for _ in 0..R::USIZE {
double_quarter_round(&mut res);
}
for i in 0..4 {
res[i] = _mm_add_epi32(res[i], v[i]);
}
res
}
#[inline]
#[target_feature(enable = "sse2")]
unsafe fn double_quarter_round(v: &mut [__m128i; 4]) {
add_xor_rot(v);
rows_to_cols(v);
add_xor_rot(v);
cols_to_rows(v);
}
/// The goal of this function is to transform the state words from:
/// ```text
/// [a0, a1, a2, a3] [ 0, 1, 2, 3]
/// [b0, b1, b2, b3] == [ 4, 5, 6, 7]
/// [c0, c1, c2, c3] [ 8, 9, 10, 11]
/// [d0, d1, d2, d3] [12, 13, 14, 15]
/// ```
///
/// to:
/// ```text
/// [a0, a1, a2, a3] [ 0, 1, 2, 3]
/// [b1, b2, b3, b0] == [ 5, 6, 7, 4]
/// [c2, c3, c0, c1] [10, 11, 8, 9]
/// [d3, d0, d1, d2] [15, 12, 13, 14]
/// ```
///
/// so that we can apply [`add_xor_rot`] to the resulting columns, and have it compute the
/// "diagonal rounds" (as defined in RFC 7539) in parallel. In practice, this shuffle is
/// non-optimal: the last state word to be altered in `add_xor_rot` is `b`, so the shuffle
/// blocks on the result of `b` being calculated.
///
/// We can optimize this by observing that the four quarter rounds in `add_xor_rot` are
/// data-independent: they only access a single column of the state, and thus the order of
/// the columns does not matter. We therefore instead shuffle the other three state words,
/// to obtain the following equivalent layout:
/// ```text
/// [a3, a0, a1, a2] [ 3, 0, 1, 2]
/// [b0, b1, b2, b3] == [ 4, 5, 6, 7]
/// [c1, c2, c3, c0] [ 9, 10, 11, 8]
/// [d2, d3, d0, d1] [14, 15, 12, 13]
/// ```
///
/// See https://github.com/sneves/blake2-avx2/pull/4 for additional details. The earliest
/// known occurrence of this optimization is in floodyberry's SSE4 ChaCha code from 2014:
/// - https://github.com/floodyberry/chacha-opt/blob/0ab65cb99f5016633b652edebaf3691ceb4ff753/chacha_blocks_ssse3-64.S#L639-L643
#[inline]
#[target_feature(enable = "sse2")]
unsafe fn rows_to_cols([a, _, c, d]: &mut [__m128i; 4]) {
// c >>>= 32; d >>>= 64; a >>>= 96;
*c = _mm_shuffle_epi32(*c, 0b_00_11_10_01); // _MM_SHUFFLE(0, 3, 2, 1)
*d = _mm_shuffle_epi32(*d, 0b_01_00_11_10); // _MM_SHUFFLE(1, 0, 3, 2)
*a = _mm_shuffle_epi32(*a, 0b_10_01_00_11); // _MM_SHUFFLE(2, 1, 0, 3)
}
/// The goal of this function is to transform the state words from:
/// ```text
/// [a3, a0, a1, a2] [ 3, 0, 1, 2]
/// [b0, b1, b2, b3] == [ 4, 5, 6, 7]
/// [c1, c2, c3, c0] [ 9, 10, 11, 8]
/// [d2, d3, d0, d1] [14, 15, 12, 13]
/// ```
///
/// to:
/// ```text
/// [a0, a1, a2, a3] [ 0, 1, 2, 3]
/// [b0, b1, b2, b3] == [ 4, 5, 6, 7]
/// [c0, c1, c2, c3] [ 8, 9, 10, 11]
/// [d0, d1, d2, d3] [12, 13, 14, 15]
/// ```
///
/// reversing the transformation of [`rows_to_cols`].
#[inline]
#[target_feature(enable = "sse2")]
unsafe fn cols_to_rows([a, _, c, d]: &mut [__m128i; 4]) {
// c <<<= 32; d <<<= 64; a <<<= 96;
*c = _mm_shuffle_epi32(*c, 0b_10_01_00_11); // _MM_SHUFFLE(2, 1, 0, 3)
*d = _mm_shuffle_epi32(*d, 0b_01_00_11_10); // _MM_SHUFFLE(1, 0, 3, 2)
*a = _mm_shuffle_epi32(*a, 0b_00_11_10_01); // _MM_SHUFFLE(0, 3, 2, 1)
}
#[inline]
#[target_feature(enable = "sse2")]
unsafe fn add_xor_rot([a, b, c, d]: &mut [__m128i; 4]) {
// a += b; d ^= a; d <<<= (16, 16, 16, 16);
*a = _mm_add_epi32(*a, *b);
*d = _mm_xor_si128(*d, *a);
*d = _mm_xor_si128(_mm_slli_epi32(*d, 16), _mm_srli_epi32(*d, 16));
// c += d; b ^= c; b <<<= (12, 12, 12, 12);
*c = _mm_add_epi32(*c, *d);
*b = _mm_xor_si128(*b, *c);
*b = _mm_xor_si128(_mm_slli_epi32(*b, 12), _mm_srli_epi32(*b, 20));
// a += b; d ^= a; d <<<= (8, 8, 8, 8);
*a = _mm_add_epi32(*a, *b);
*d = _mm_xor_si128(*d, *a);
*d = _mm_xor_si128(_mm_slli_epi32(*d, 8), _mm_srli_epi32(*d, 24));
// c += d; b ^= c; b <<<= (7, 7, 7, 7);
*c = _mm_add_epi32(*c, *d);
*b = _mm_xor_si128(*b, *c);
*b = _mm_xor_si128(_mm_slli_epi32(*b, 7), _mm_srli_epi32(*b, 25));
}