chore: checkpoint before Python removal

This commit is contained in:
2026-03-26 22:33:59 +00:00
parent 683cec9307
commit e568ddf82a
29972 changed files with 11269302 additions and 2 deletions

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,52 @@
/* Copyright (c) 2014, Intel Corporation.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
* SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
* OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */
#include "ecp_nistz.h"
#if defined(__GNUC__)
#pragma GCC diagnostic ignored "-Wconversion"
#endif
/* Fills |str| with the bytewise little-endian encoding of |scalar|, where
* |scalar| has |num_limbs| limbs. |str| is padded with zeros at the end up
* to |str_len| bytes. Actually, |str_len| must be exactly one byte more than
* needed to encode |num_limbs| losslessly, so that there is an extra byte at
* the end. The extra byte is useful because the caller will be breaking |str|
* up into windows of a number of bits (5 or 7) that isn't divisible by 8, and
* so it is useful for it to be able to read an extra zero byte. */
void little_endian_bytes_from_scalar(uint8_t str[], size_t str_len,
const Limb scalar[],
size_t num_limbs) {
debug_assert_nonsecret(str_len == (num_limbs * sizeof(Limb)) + 1);
size_t i;
for (i = 0; i < num_limbs * sizeof(Limb); i += sizeof(Limb)) {
Limb d = scalar[i / sizeof(Limb)];
str[i + 0] = d & 0xff;
str[i + 1] = (d >> 8) & 0xff;
str[i + 2] = (d >> 16) & 0xff;
str[i + 3] = (d >>= 24) & 0xff;
if (sizeof(Limb) == 8) {
d >>= 8;
str[i + 4] = d & 0xff;
str[i + 5] = (d >> 8) & 0xff;
str[i + 6] = (d >> 16) & 0xff;
str[i + 7] = (d >> 24) & 0xff;
}
}
for (; i < str_len; i++) {
str[i] = 0;
}
}

View File

@@ -0,0 +1,274 @@
/* Copyright (c) 2015, Google Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
* SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
* OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */
#ifndef OPENSSL_HEADER_EC_ECP_NISTZ_H
#define OPENSSL_HEADER_EC_ECP_NISTZ_H
#include <ring-core/base.h>
#include "../../limbs/limbs.h"
#if defined(__GNUC__)
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#pragma GCC diagnostic ignored "-Wsign-conversion"
#endif
// This function looks at `w + 1` scalar bits (`w` current, 1 adjacent less
// significant bit), and recodes them into a signed digit for use in fast point
// multiplication: the use of signed rather than unsigned digits means that
// fewer points need to be precomputed, given that point inversion is easy (a
// precomputed point dP makes -dP available as well).
//
// BACKGROUND:
//
// Signed digits for multiplication were introduced by Booth ("A signed binary
// multiplication technique", Quart. Journ. Mech. and Applied Math., vol. IV,
// pt. 2 (1951), pp. 236-240), in that case for multiplication of integers.
// Booth's original encoding did not generally improve the density of nonzero
// digits over the binary representation, and was merely meant to simplify the
// handling of signed factors given in two's complement; but it has since been
// shown to be the basis of various signed-digit representations that do have
// further advantages, including the wNAF, using the following general
// approach:
//
// (1) Given a binary representation
//
// b_k ... b_2 b_1 b_0,
//
// of a nonnegative integer (b_k in {0, 1}), rewrite it in digits 0, 1, -1
// by using bit-wise subtraction as follows:
//
// b_k b_(k-1) ... b_2 b_1 b_0
// - b_k ... b_3 b_2 b_1 b_0
// -----------------------------------------
// s_(k+1) s_k ... s_3 s_2 s_1 s_0
//
// A left-shift followed by subtraction of the original value yields a new
// representation of the same value, using signed bits s_i = b_(i-1) - b_i.
// This representation from Booth's paper has since appeared in the
// literature under a variety of different names including "reversed binary
// form", "alternating greedy expansion", "mutual opposite form", and
// "sign-alternating {+-1}-representation".
//
// An interesting property is that among the nonzero bits, values 1 and -1
// strictly alternate.
//
// (2) Various window schemes can be applied to the Booth representation of
// integers: for example, right-to-left sliding windows yield the wNAF
// (a signed-digit encoding independently discovered by various researchers
// in the 1990s), and left-to-right sliding windows yield a left-to-right
// equivalent of the wNAF (independently discovered by various researchers
// around 2004).
//
// To prevent leaking information through side channels in point multiplication,
// we need to recode the given integer into a regular pattern: sliding windows
// as in wNAFs won't do, we need their fixed-window equivalent -- which is a few
// decades older: we'll be using the so-called "modified Booth encoding" due to
// MacSorley ("High-speed arithmetic in binary computers", Proc. IRE, vol. 49
// (1961), pp. 67-91), in a radix-2**w setting. That is, we always combine `w`
// signed bits into a signed digit, e.g. (for `w == 5`):
//
// s_(5j + 4) s_(5j + 3) s_(5j + 2) s_(5j + 1) s_(5j)
//
// The sign-alternating property implies that the resulting digit values are
// integers from `-2**(w-1)` to `2**(w-1)`, e.g. -16 to 16 for `w == 5`.
//
// Of course, we don't actually need to compute the signed digits s_i as an
// intermediate step (that's just a nice way to see how this scheme relates
// to the wNAF): a direct computation obtains the recoded digit from the
// six bits b_(5j + 4) ... b_(5j - 1).
//
// This function takes those `w` bits as an integer (e.g. 0 .. 63), writing the
// recoded digit to *sign (0 for positive, 1 for negative) and *digit (absolute
// value, in the range 0 .. 2**(w-1). Note that this integer essentially provides
// the input bits "shifted to the left" by one position: for example, the input
// to compute the least significant recoded digit, given that there's no bit
// b_-1, has to be b_4 b_3 b_2 b_1 b_0 0.
//
// DOUBLING CASE:
//
// Point addition formulas for short Weierstrass curves are often incomplete.
// Edge cases such as P + P or P + ∞ must be handled separately. This
// complicates constant-time requirements. P + ∞ cannot be avoided (any window
// may be zero) and is handled with constant-time selects. P + P (where P is not
// ∞) usually is not. Instead, windowing strategies are chosen to avoid this
// case. Whether this happens depends on the group order.
//
// Let w be the window width (in this function, w = 5). The non-trivial doubling
// case in single-point scalar multiplication may occur if and only if the
// 2^(w-1) bit of the group order is zero.
//
// Note the above only holds if the scalar is fully reduced and the group order
// is a prime that is much larger than 2^w. It also only holds when windows
// are applied from most significant to least significant, doubling between each
// window. It does not apply to more complex table strategies such as
// |EC_nistz256_method|.
//
// PROOF:
//
// Let n be the group order. Let l be the number of bits needed to represent n.
// Assume there exists some 0 <= k < n such that signed w-bit windowed
// multiplication hits the doubling case.
//
// Windowed multiplication consists of iterating over groups of s_i (defined
// above based on k's binary representation) from most to least significant. At
// iteration i (for i = ..., 3w, 2w, w, 0, starting from the most significant
// window), we:
//
// 1. Double the accumulator A, w times. Let A_i be the value of A at this
// point.
//
// 2. Set A to T_i + A_i, where T_i is a precomputed multiple of P
// corresponding to the window s_(i+w-1) ... s_i.
//
// Let j be the index such that A_j = T_j ≠ ∞. Looking at A_i and T_i as
// multiples of P, define a_i and t_i to be scalar coefficients of A_i and T_i.
// Thus a_j = t_j ≠ 0 (mod n). Note a_i and t_i may not be reduced mod n. t_i is
// the value of the w signed bits s_(i+w-1) ... s_i. a_i is computed as a_i =
// 2^w * (a_(i+w) + t_(i+w)).
//
// t_i is bounded by -2^(w-1) <= t_i <= 2^(w-1). Additionally, we may write it
// in terms of unsigned bits b_i. t_i consists of signed bits s_(i+w-1) ... s_i.
// This is computed as:
//
// b_(i+w-2) b_(i+w-3) ... b_i b_(i-1)
// - b_(i+w-1) b_(i+w-2) ... b_(i+1) b_i
// --------------------------------------------
// t_i = s_(i+w-1) s_(i+w-2) ... s_(i+1) s_i
//
// Observe that b_(i+w-2) through b_i occur in both terms. Let x be the integer
// represented by that bit string, i.e. 2^(w-2)*b_(i+w-2) + ... + b_i.
//
// t_i = (2*x + b_(i-1)) - (2^(w-1)*b_(i+w-1) + x)
// = x - 2^(w-1)*b_(i+w-1) + b_(i-1)
//
// Or, using C notation for bit operations:
//
// t_i = (k>>i) & ((1<<(w-1)) - 1) - (k>>i) & (1<<(w-1)) + (k>>(i-1)) & 1
//
// Note b_(i-1) is added in left-shifted by one (or doubled) from its place.
// This is compensated by t_(i-w)'s subtraction term. Thus, a_i may be computed
// by adding b_l b_(l-1) ... b_(i+1) b_i and an extra copy of b_(i-1). In C
// notation, this is:
//
// a_i = (k>>(i+w)) << w + ((k>>(i+w-1)) & 1) << w
//
// Observe that, while t_i may be positive or negative, a_i is bounded by
// 0 <= a_i < n + 2^w. Additionally, a_i can only be zero if b_(i+w-1) and up
// are all zero. (Note this implies a non-trivial P + (-P) is unreachable for
// all groups. That would imply the subsequent a_i is zero, which means all
// terms thus far were zero.)
//
// Returning to our doubling position, we have a_j = t_j (mod n). We now
// determine the value of a_j - t_j, which must be divisible by n. Our bounds on
// a_j and t_j imply a_j - t_j is 0 or n. If it is 0, a_j = t_j. However, 2^w
// divides a_j and -2^(w-1) <= t_j <= 2^(w-1), so this can only happen if
// a_j = t_j = 0, which is a trivial doubling. Therefore, a_j - t_j = n.
//
// Now we determine j. Suppose j > 0. w divides j, so j >= w. Then,
//
// n = a_j - t_j = (k>>(j+w)) << w + ((k>>(j+w-1)) & 1) << w - t_j
// <= k/2^j + 2^w - t_j
// < n/2^w + 2^w + 2^(w-1)
//
// n is much larger than 2^w, so this is impossible. Thus, j = 0: only the final
// addition may hit the doubling case.
//
// Finally, we consider bit patterns for n and k. Divide k into k_H + k_M + k_L
// such that k_H is the contribution from b_(l-1) .. b_w, k_M is the
// contribution from b_(w-1), and k_L is the contribution from b_(w-2) ... b_0.
// That is:
//
// - 2^w divides k_H
// - k_M is 0 or 2^(w-1)
// - 0 <= k_L < 2^(w-1)
//
// Divide n into n_H + n_M + n_L similarly. We thus have:
//
// t_0 = (k>>0) & ((1<<(w-1)) - 1) - (k>>0) & (1<<(w-1)) + (k>>(0-1)) & 1
// = k & ((1<<(w-1)) - 1) - k & (1<<(w-1))
// = k_L - k_M
//
// a_0 = (k>>(0+w)) << w + ((k>>(0+w-1)) & 1) << w
// = (k>>w) << w + ((k>>(w-1)) & 1) << w
// = k_H + 2*k_M
//
// n = a_0 - t_0
// n_H + n_M + n_L = (k_H + 2*k_M) - (k_L - k_M)
// = k_H + 3*k_M - k_L
//
// k_H - k_L < k and k < n, so k_H - k_L ≠ n. Therefore k_M is not 0 and must be
// 2^(w-1). Now we consider k_H and n_H. We know k_H <= n_H. Suppose k_H = n_H.
// Then,
//
// n_M + n_L = 3*(2^(w-1)) - k_L
// > 3*(2^(w-1)) - 2^(w-1)
// = 2^w
//
// Contradiction (n_M + n_L is the bottom w bits of n). Thus k_H < n_H. Suppose
// k_H < n_H - 2*2^w. Then,
//
// n_H + n_M + n_L = k_H + 3*(2^(w-1)) - k_L
// < n_H - 2*2^w + 3*(2^(w-1)) - k_L
// n_M + n_L < -2^(w-1) - k_L
//
// Contradiction. Thus, k_H = n_H - 2^w. (Note 2^w divides n_H and k_H.) Thus,
//
// n_H + n_M + n_L = k_H + 3*(2^(w-1)) - k_L
// = n_H - 2^w + 3*(2^(w-1)) - k_L
// n_M + n_L = 2^(w-1) - k_L
// <= 2^(w-1)
//
// Equality would mean 2^(w-1) divides n, which is impossible if n is prime.
// Thus n_M + n_L < 2^(w-1), so n_M is zero, proving our condition.
//
// This proof constructs k, so, to show the converse, let k_H = n_H - 2^w,
// k_M = 2^(w-1), k_L = 2^(w-1) - n_L. This will result in a non-trivial point
// doubling in the final addition and is the only such scalar.
//
// COMMON CURVES:
//
// The group orders for common curves end in the following bit patterns:
//
// P-521: ...00001001; w = 4 is okay
// P-384: ...01110011; w = 2, 5, 6, 7 are okay
// P-256: ...01010001; w = 5, 7 are okay
// P-224: ...00111101; w = 3, 4, 5, 6 are okay
static inline void booth_recode(crypto_word_t *is_negative, crypto_word_t *digit,
crypto_word_t in, crypto_word_t w) {
debug_assert_nonsecret(w >= 2);
debug_assert_nonsecret(w <= 7);
// Set all bits of `s` to MSB(in), similar to |constant_time_msb_s|,
// but 'in' seen as (`w+1`)-bit value.
crypto_word_t s = ~((in >> w) - 1);
crypto_word_t d;
d = ((crypto_word_t)1u << (w + 1)) - in - 1;
d = (d & s) | (in & ~s);
d = (d >> 1) + (d & 1);
*is_negative = constant_time_is_nonzero_w(s & 1);
*digit = d;
}
#if defined(__GNUC__)
#pragma GCC diagnostic pop
#endif
void little_endian_bytes_from_scalar(uint8_t str[], size_t str_len,
const Limb scalar[],
size_t num_limbs);
#endif // OPENSSL_HEADER_EC_ECP_NISTZ_H

View File

@@ -0,0 +1,34 @@
/* Copyright (c) 2014, Intel Corporation.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
* SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
* OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */
#ifndef OPENSSL_HEADER_EC_ECP_NISTZ384_H
#define OPENSSL_HEADER_EC_ECP_NISTZ384_H
#include "../../limbs/limbs.h"
#define P384_LIMBS (384u / LIMB_BITS)
typedef struct {
Limb X[P384_LIMBS];
Limb Y[P384_LIMBS];
Limb Z[P384_LIMBS];
} P384_POINT;
typedef struct {
Limb X[P384_LIMBS];
Limb Y[P384_LIMBS];
} P384_POINT_AFFINE;
#endif // OPENSSL_HEADER_EC_ECP_NISTZ384_H

View File

@@ -0,0 +1,300 @@
/* Copyright (c) 2014, Intel Corporation.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
* SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
* OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */
/* Developers and authors:
* Shay Gueron (1, 2), and Vlad Krasnov (1)
* (1) Intel Corporation, Israel Development Center
* (2) University of Haifa
* Reference:
* Shay Gueron and Vlad Krasnov
* "Fast Prime Field Elliptic Curve Cryptography with 256 Bit Primes"
* http://eprint.iacr.org/2013/816 */
#include "ecp_nistz.h"
#if defined(__GNUC__) || defined(__clang__)
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wsign-conversion"
#endif
/* Point double: r = 2*a */
static void nistz384_point_double(P384_POINT *r, const P384_POINT *a) {
BN_ULONG S[P384_LIMBS];
BN_ULONG M[P384_LIMBS];
BN_ULONG Zsqr[P384_LIMBS];
BN_ULONG tmp0[P384_LIMBS];
const BN_ULONG *in_x = a->X;
const BN_ULONG *in_y = a->Y;
const BN_ULONG *in_z = a->Z;
BN_ULONG *res_x = r->X;
BN_ULONG *res_y = r->Y;
BN_ULONG *res_z = r->Z;
elem_mul_by_2(S, in_y);
elem_sqr_mont(Zsqr, in_z);
elem_sqr_mont(S, S);
elem_mul_mont(res_z, in_z, in_y);
elem_mul_by_2(res_z, res_z);
elem_add(M, in_x, Zsqr);
elem_sub(Zsqr, in_x, Zsqr);
elem_sqr_mont(res_y, S);
elem_div_by_2(res_y, res_y);
elem_mul_mont(M, M, Zsqr);
elem_mul_by_3(M, M);
elem_mul_mont(S, S, in_x);
elem_mul_by_2(tmp0, S);
elem_sqr_mont(res_x, M);
elem_sub(res_x, res_x, tmp0);
elem_sub(S, S, res_x);
elem_mul_mont(S, S, M);
elem_sub(res_y, S, res_y);
}
/* Point addition: r = a+b */
static void nistz384_point_add(P384_POINT *r, const P384_POINT *a,
const P384_POINT *b) {
BN_ULONG U2[P384_LIMBS], S2[P384_LIMBS];
BN_ULONG U1[P384_LIMBS], S1[P384_LIMBS];
BN_ULONG Z1sqr[P384_LIMBS];
BN_ULONG Z2sqr[P384_LIMBS];
BN_ULONG H[P384_LIMBS], R[P384_LIMBS];
BN_ULONG Hsqr[P384_LIMBS];
BN_ULONG Rsqr[P384_LIMBS];
BN_ULONG Hcub[P384_LIMBS];
BN_ULONG res_x[P384_LIMBS];
BN_ULONG res_y[P384_LIMBS];
BN_ULONG res_z[P384_LIMBS];
const BN_ULONG *in1_x = a->X;
const BN_ULONG *in1_y = a->Y;
const BN_ULONG *in1_z = a->Z;
const BN_ULONG *in2_x = b->X;
const BN_ULONG *in2_y = b->Y;
const BN_ULONG *in2_z = b->Z;
BN_ULONG in1infty = is_zero(a->Z);
BN_ULONG in2infty = is_zero(b->Z);
elem_sqr_mont(Z2sqr, in2_z); /* Z2^2 */
elem_sqr_mont(Z1sqr, in1_z); /* Z1^2 */
elem_mul_mont(S1, Z2sqr, in2_z); /* S1 = Z2^3 */
elem_mul_mont(S2, Z1sqr, in1_z); /* S2 = Z1^3 */
elem_mul_mont(S1, S1, in1_y); /* S1 = Y1*Z2^3 */
elem_mul_mont(S2, S2, in2_y); /* S2 = Y2*Z1^3 */
elem_sub(R, S2, S1); /* R = S2 - S1 */
elem_mul_mont(U1, in1_x, Z2sqr); /* U1 = X1*Z2^2 */
elem_mul_mont(U2, in2_x, Z1sqr); /* U2 = X2*Z1^2 */
elem_sub(H, U2, U1); /* H = U2 - U1 */
BN_ULONG is_exceptional = is_equal(U1, U2) & ~in1infty & ~in2infty;
if (is_exceptional) {
if (is_equal(S1, S2)) {
nistz384_point_double(r, a);
} else {
limbs_zero(r->X, P384_LIMBS);
limbs_zero(r->Y, P384_LIMBS);
limbs_zero(r->Z, P384_LIMBS);
}
return;
}
elem_sqr_mont(Rsqr, R); /* R^2 */
elem_mul_mont(res_z, H, in1_z); /* Z3 = H*Z1*Z2 */
elem_sqr_mont(Hsqr, H); /* H^2 */
elem_mul_mont(res_z, res_z, in2_z); /* Z3 = H*Z1*Z2 */
elem_mul_mont(Hcub, Hsqr, H); /* H^3 */
elem_mul_mont(U2, U1, Hsqr); /* U1*H^2 */
elem_mul_by_2(Hsqr, U2); /* 2*U1*H^2 */
elem_sub(res_x, Rsqr, Hsqr);
elem_sub(res_x, res_x, Hcub);
elem_sub(res_y, U2, res_x);
elem_mul_mont(S2, S1, Hcub);
elem_mul_mont(res_y, R, res_y);
elem_sub(res_y, res_y, S2);
copy_conditional(res_x, in2_x, in1infty);
copy_conditional(res_y, in2_y, in1infty);
copy_conditional(res_z, in2_z, in1infty);
copy_conditional(res_x, in1_x, in2infty);
copy_conditional(res_y, in1_y, in2infty);
copy_conditional(res_z, in1_z, in2infty);
limbs_copy(r->X, res_x, P384_LIMBS);
limbs_copy(r->Y, res_y, P384_LIMBS);
limbs_copy(r->Z, res_z, P384_LIMBS);
}
static void add_precomputed_w5(P384_POINT *r, crypto_word_t wvalue,
const P384_POINT table[16]) {
crypto_word_t recoded_is_negative;
crypto_word_t recoded;
booth_recode(&recoded_is_negative, &recoded, wvalue, 5);
alignas(64) P384_POINT h;
p384_point_select_w5(&h, table, recoded);
alignas(64) BN_ULONG tmp[P384_LIMBS];
p384_elem_neg(tmp, h.Y);
copy_conditional(h.Y, tmp, recoded_is_negative);
nistz384_point_add(r, r, &h);
}
/* r = p * p_scalar */
static void nistz384_point_mul(P384_POINT *r,
const BN_ULONG p_scalar[P384_LIMBS],
const Limb p_x[P384_LIMBS],
const Limb p_y[P384_LIMBS]) {
static const size_t kWindowSize = 5;
static const crypto_word_t kMask = (1 << (5 /* kWindowSize */ + 1)) - 1;
uint8_t p_str[(P384_LIMBS * sizeof(Limb)) + 1];
little_endian_bytes_from_scalar(p_str, sizeof(p_str) / sizeof(p_str[0]),
p_scalar, P384_LIMBS);
/* A |P384_POINT| is (3 * 48) = 144 bytes, and the 64-byte alignment should
* add no more than 63 bytes of overhead. Thus, |table| should require
* ~2367 ((144 * 16) + 63) bytes of stack space. */
alignas(64) P384_POINT table[16];
/* table[0] is implicitly (0,0,0) (the point at infinity), therefore it is
* not stored. All other values are actually stored with an offset of -1 in
* table. */
P384_POINT *row = table;
limbs_copy(row[1 - 1].X, p_x, P384_LIMBS);
limbs_copy(row[1 - 1].Y, p_y, P384_LIMBS);
limbs_copy(row[1 - 1].Z, ONE, P384_LIMBS);
nistz384_point_double(&row[2 - 1], &row[1 - 1]);
nistz384_point_add(&row[3 - 1], &row[2 - 1], &row[1 - 1]);
nistz384_point_double(&row[4 - 1], &row[2 - 1]);
nistz384_point_double(&row[6 - 1], &row[3 - 1]);
nistz384_point_double(&row[8 - 1], &row[4 - 1]);
nistz384_point_double(&row[12 - 1], &row[6 - 1]);
nistz384_point_add(&row[5 - 1], &row[4 - 1], &row[1 - 1]);
nistz384_point_add(&row[7 - 1], &row[6 - 1], &row[1 - 1]);
nistz384_point_add(&row[9 - 1], &row[8 - 1], &row[1 - 1]);
nistz384_point_add(&row[13 - 1], &row[12 - 1], &row[1 - 1]);
nistz384_point_double(&row[14 - 1], &row[7 - 1]);
nistz384_point_double(&row[10 - 1], &row[5 - 1]);
nistz384_point_add(&row[15 - 1], &row[14 - 1], &row[1 - 1]);
nistz384_point_add(&row[11 - 1], &row[10 - 1], &row[1 - 1]);
nistz384_point_double(&row[16 - 1], &row[8 - 1]);
static const size_t START_INDEX = 384 - 4;
size_t index = START_INDEX;
BN_ULONG recoded_is_negative;
crypto_word_t recoded;
crypto_word_t wvalue = p_str[(index - 1) / 8];
wvalue = (wvalue >> ((index - 1) % 8)) & kMask;
booth_recode(&recoded_is_negative, &recoded, wvalue, 5);
dev_assert_secret(!recoded_is_negative);
p384_point_select_w5(r, table, recoded);
while (index >= kWindowSize) {
if (index != START_INDEX) {
size_t off = (index - 1) / 8;
wvalue = p_str[off] | p_str[off + 1] << 8;
wvalue = (wvalue >> ((index - 1) % 8)) & kMask;
add_precomputed_w5(r, wvalue, table);
}
index -= kWindowSize;
nistz384_point_double(r, r);
nistz384_point_double(r, r);
nistz384_point_double(r, r);
nistz384_point_double(r, r);
nistz384_point_double(r, r);
}
/* Final window */
wvalue = p_str[0];
wvalue = (wvalue << 1) & kMask;
add_precomputed_w5(r, wvalue, table);
}
void p384_point_double(Limb r[3][P384_LIMBS], const Limb a[3][P384_LIMBS])
{
P384_POINT t;
limbs_copy(t.X, a[0], P384_LIMBS);
limbs_copy(t.Y, a[1], P384_LIMBS);
limbs_copy(t.Z, a[2], P384_LIMBS);
nistz384_point_double(&t, &t);
limbs_copy(r[0], t.X, P384_LIMBS);
limbs_copy(r[1], t.Y, P384_LIMBS);
limbs_copy(r[2], t.Z, P384_LIMBS);
}
void p384_point_add(Limb r[3][P384_LIMBS],
const Limb a[3][P384_LIMBS],
const Limb b[3][P384_LIMBS])
{
P384_POINT t1;
limbs_copy(t1.X, a[0], P384_LIMBS);
limbs_copy(t1.Y, a[1], P384_LIMBS);
limbs_copy(t1.Z, a[2], P384_LIMBS);
P384_POINT t2;
limbs_copy(t2.X, b[0], P384_LIMBS);
limbs_copy(t2.Y, b[1], P384_LIMBS);
limbs_copy(t2.Z, b[2], P384_LIMBS);
nistz384_point_add(&t1, &t1, &t2);
limbs_copy(r[0], t1.X, P384_LIMBS);
limbs_copy(r[1], t1.Y, P384_LIMBS);
limbs_copy(r[2], t1.Z, P384_LIMBS);
}
void p384_point_mul(Limb r[3][P384_LIMBS], const BN_ULONG p_scalar[P384_LIMBS],
const Limb p_x[P384_LIMBS], const Limb p_y[P384_LIMBS]) {
alignas(64) P384_POINT acc;
nistz384_point_mul(&acc, p_scalar, p_x, p_y);
limbs_copy(r[0], acc.X, P384_LIMBS);
limbs_copy(r[1], acc.Y, P384_LIMBS);
limbs_copy(r[2], acc.Z, P384_LIMBS);
}
#if defined(__GNUC__) || defined(__clang__)
#pragma GCC diagnostic pop
#endif

View File

@@ -0,0 +1,54 @@
/* Copyright 2016 Brian Smith.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
* SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
* OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */
#include "./p256_shared.h"
#include "../../limbs/limbs.h"
#if !defined(OPENSSL_USE_NISTZ256)
typedef Limb ScalarMont[P256_LIMBS];
typedef Limb Scalar[P256_LIMBS];
#include "../bn/internal.h"
static const BN_ULONG N[P256_LIMBS] = {
#if defined(OPENSSL_64_BIT)
0xf3b9cac2fc632551, 0xbce6faada7179e84, 0xffffffffffffffff, 0xffffffff00000000
#else
0xfc632551, 0xf3b9cac2, 0xa7179e84, 0xbce6faad, 0xffffffff, 0xffffffff, 0,
0xffffffff
#endif
};
static const BN_ULONG N_N0[] = {
BN_MONT_CTX_N0(0xccd1c8aa, 0xee00bc4f)
};
void p256_scalar_mul_mont(ScalarMont r, const ScalarMont a,
const ScalarMont b) {
/* XXX: Inefficient. TODO: optimize with dedicated multiplication routine. */
bn_mul_mont_small(r, a, b, N, N_N0, P256_LIMBS);
}
/* XXX: Inefficient. TODO: optimize with dedicated squaring routine. */
void p256_scalar_sqr_rep_mont(ScalarMont r, const ScalarMont a, Limb rep) {
dev_assert_secret(rep >= 1);
p256_scalar_mul_mont(r, a, a);
for (Limb i = 1; i < rep; ++i) {
p256_scalar_mul_mont(r, r, r);
}
}
#endif

View File

@@ -0,0 +1,246 @@
/* Copyright 2016 Brian Smith.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
* SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
* OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */
#include "../../limbs/limbs.h"
#include "ecp_nistz384.h"
#include "../bn/internal.h"
#include "../../internal.h"
#include "../../limbs/limbs.inl"
/* XXX: Here we assume that the conversion from |Carry| to |Limb| is
* constant-time, but we haven't verified that assumption. TODO: Fix it so
* we don't need to make that assumption. */
typedef Limb Elem[P384_LIMBS];
typedef Limb ScalarMont[P384_LIMBS];
typedef Limb Scalar[P384_LIMBS];
static const BN_ULONG Q[P384_LIMBS] = {
#if defined(OPENSSL_64_BIT)
0xffffffff, 0xffffffff00000000, 0xfffffffffffffffe, 0xffffffffffffffff,
0xffffffffffffffff, 0xffffffffffffffff
#else
0xffffffff, 0, 0, 0xffffffff, 0xfffffffe, 0xffffffff, 0xffffffff, 0xffffffff,
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff
#endif
};
static const BN_ULONG N[P384_LIMBS] = {
#if defined(OPENSSL_64_BIT)
0xecec196accc52973, 0x581a0db248b0a77a, 0xc7634d81f4372ddf, 0xffffffffffffffff,
0xffffffffffffffff, 0xffffffffffffffff
#else
0xccc52973, 0xecec196a, 0x48b0a77a, 0x581a0db2, 0xf4372ddf, 0xc7634d81,
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff
#endif
};
static const BN_ULONG ONE[P384_LIMBS] = {
#if defined(OPENSSL_64_BIT)
0xffffffff00000001, 0xffffffff, 1, 0, 0
#else
1, 0xffffffff, 0xffffffff, 0, 1, 0, 0, 0, 0, 0
#endif
};
static const Elem Q_PLUS_1_SHR_1 = {
#if defined(OPENSSL_64_BIT)
0x80000000, 0x7fffffff80000000, 0xffffffffffffffff, 0xffffffffffffffff,
0xffffffffffffffff, 0x7fffffffffffffff
#else
0x80000000, 0, 0x80000000, 0x7fffffff, 0xffffffff, 0xffffffff, 0xffffffff,
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0x7fffffff
#endif
};
static const BN_ULONG Q_N0[] = {
BN_MONT_CTX_N0(1, 1)
};
static const BN_ULONG N_N0[] = {
BN_MONT_CTX_N0(0x6ed46089, 0xe88fdc45)
};
/* XXX: MSVC for x86 warns when it fails to inline these functions it should
* probably inline. */
#if defined(_MSC_VER) && !defined(__clang__) && defined(OPENSSL_X86)
#define INLINE_IF_POSSIBLE __forceinline
#else
#define INLINE_IF_POSSIBLE inline
#endif
static inline Limb is_equal(const Elem a, const Elem b) {
return LIMBS_equal(a, b, P384_LIMBS);
}
static inline Limb is_zero(const BN_ULONG a[P384_LIMBS]) {
return LIMBS_are_zero(a, P384_LIMBS);
}
static inline void copy_conditional(Elem r, const Elem a,
const Limb condition) {
for (size_t i = 0; i < P384_LIMBS; ++i) {
r[i] = constant_time_select_w(condition, a[i], r[i]);
}
}
static inline void elem_add(Elem r, const Elem a, const Elem b) {
LIMBS_add_mod(r, a, b, Q, P384_LIMBS);
}
static inline void elem_sub(Elem r, const Elem a, const Elem b) {
LIMBS_sub_mod(r, a, b, Q, P384_LIMBS);
}
static void elem_div_by_2(Elem r, const Elem a) {
/* Consider the case where `a` is even. Then we can shift `a` right one bit
* and the result will still be valid because we didn't lose any bits and so
* `(a >> 1) * 2 == a (mod q)`, which is the invariant we must satisfy.
*
* The remainder of this comment is considering the case where `a` is odd.
*
* Since `a` is odd, it isn't the case that `(a >> 1) * 2 == a (mod q)`
* because the lowest bit is lost during the shift. For example, consider:
*
* ```python
* q = 2**384 - 2**128 - 2**96 + 2**32 - 1
* a = 2**383
* two_a = a * 2 % q
* assert two_a == 0x100000000ffffffffffffffff00000001
* ```
*
* Notice there how `(2 * a) % q` wrapped around to a smaller odd value. When
* we divide `two_a` by two (mod q), we need to get the value `2**383`, which
* we obviously can't get with just a right shift.
*
* `q` is odd, and `a` is odd, so `a + q` is even. We could calculate
* `(a + q) >> 1` and then reduce it mod `q`. However, then we would have to
* keep track of an extra most significant bit. We can avoid that by instead
* calculating `(a >> 1) + ((q + 1) >> 1)`. The `1` in `q + 1` is the least
* significant bit of `a`. `q + 1` is even, which means it can be shifted
* without losing any bits. Since `q` is odd, `q - 1` is even, so the largest
* odd field element is `q - 2`. Thus we know that `a <= q - 2`. We know
* `(q + 1) >> 1` is `(q + 1) / 2` since (`q + 1`) is even. The value of
* `a >> 1` is `(a - 1)/2` since the shift will drop the least significant
* bit of `a`, which is 1. Thus:
*
* sum = ((q + 1) >> 1) + (a >> 1)
* sum = (q + 1)/2 + (a >> 1) (substituting (q + 1)/2)
* <= (q + 1)/2 + (q - 2 - 1)/2 (substituting a <= q - 2)
* <= (q + 1)/2 + (q - 3)/2 (simplifying)
* <= (q + 1 + q - 3)/2 (factoring out the common divisor)
* <= (2q - 2)/2 (simplifying)
* <= q - 1 (simplifying)
*
* Thus, no reduction of the sum mod `q` is necessary. */
Limb is_odd = constant_time_is_nonzero_w(a[0] & 1);
/* r = a >> 1. */
Limb carry = a[P384_LIMBS - 1] & 1;
r[P384_LIMBS - 1] = a[P384_LIMBS - 1] >> 1;
for (size_t i = 1; i < P384_LIMBS; ++i) {
Limb new_carry = a[P384_LIMBS - i - 1];
r[P384_LIMBS - i - 1] =
(a[P384_LIMBS - i - 1] >> 1) | (carry << (LIMB_BITS - 1));
carry = new_carry;
}
Elem adjusted;
BN_ULONG carry2 = limbs_add(adjusted, r, Q_PLUS_1_SHR_1, P384_LIMBS);
dev_assert_secret(carry2 == 0);
(void)carry2;
copy_conditional(r, adjusted, is_odd);
}
static inline void elem_mul_mont(Elem r, const Elem a, const Elem b) {
/* XXX: Not (clearly) constant-time; inefficient.*/
bn_mul_mont_small(r, a, b, Q, Q_N0, P384_LIMBS);
}
static inline void elem_mul_by_2(Elem r, const Elem a) {
LIMBS_shl_mod(r, a, Q, P384_LIMBS);
}
static INLINE_IF_POSSIBLE void elem_mul_by_3(Elem r, const Elem a) {
/* XXX: inefficient. TODO: Replace with an integrated shift + add. */
Elem doubled;
elem_add(doubled, a, a);
elem_add(r, doubled, a);
}
static inline void elem_sqr_mont(Elem r, const Elem a) {
/* XXX: Inefficient. TODO: Add a dedicated squaring routine. */
elem_mul_mont(r, a, a);
}
void p384_elem_sub(Elem r, const Elem a, const Elem b) {
elem_sub(r, a, b);
}
void p384_elem_div_by_2(Elem r, const Elem a) {
elem_div_by_2(r, a);
}
void p384_elem_mul_mont(Elem r, const Elem a, const Elem b) {
elem_mul_mont(r, a, b);
}
void p384_elem_neg(Elem r, const Elem a) {
Limb is_zero = LIMBS_are_zero(a, P384_LIMBS);
Carry borrow = limbs_sub(r, Q, a, P384_LIMBS);
dev_assert_secret(borrow == 0);
(void)borrow;
for (size_t i = 0; i < P384_LIMBS; ++i) {
r[i] = constant_time_select_w(is_zero, 0, r[i]);
}
}
void p384_scalar_mul_mont(ScalarMont r, const ScalarMont a,
const ScalarMont b) {
/* XXX: Inefficient. TODO: Add dedicated multiplication routine. */
bn_mul_mont_small(r, a, b, N, N_N0, P384_LIMBS);
}
/* TODO(perf): Optimize this. */
static void p384_point_select_w5(P384_POINT *out,
const P384_POINT table[16], size_t index) {
Elem x; limbs_zero(x, P384_LIMBS);
Elem y; limbs_zero(y, P384_LIMBS);
Elem z; limbs_zero(z, P384_LIMBS);
// TODO: Rewrite in terms of |limbs_select|.
for (size_t i = 0; i < 16; ++i) {
crypto_word_t equal = constant_time_eq_w(index, (crypto_word_t)i + 1);
for (size_t j = 0; j < P384_LIMBS; ++j) {
x[j] = constant_time_select_w(equal, table[i].X[j], x[j]);
y[j] = constant_time_select_w(equal, table[i].Y[j], y[j]);
z[j] = constant_time_select_w(equal, table[i].Z[j], z[j]);
}
}
limbs_copy(out->X, x, P384_LIMBS);
limbs_copy(out->Y, y, P384_LIMBS);
limbs_copy(out->Z, z, P384_LIMBS);
}
#include "ecp_nistz384.inl"

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,437 @@
// Copyright 2014-2016 The OpenSSL Project Authors. All Rights Reserved.
// Copyright (c) 2014, Intel Corporation. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Originally written by Shay Gueron (1, 2), and Vlad Krasnov (1)
// (1) Intel Corporation, Israel Development Center, Haifa, Israel
// (2) University of Haifa, Israel
//
// Reference:
// S.Gueron and V.Krasnov, "Fast Prime Field Elliptic Curve Cryptography with
// 256 Bit Primes"
#include <ring-core/base.h>
#include "../../limbs/limbs.inl"
#include <stdint.h>
#include "p256-nistz.h"
#if defined(OPENSSL_USE_NISTZ256)
typedef P256_POINT_AFFINE PRECOMP256_ROW[64];
// One converted into the Montgomery domain
static const BN_ULONG ONE_MONT[P256_LIMBS] = {
TOBN(0x00000000, 0x00000001),
TOBN(0xffffffff, 0x00000000),
TOBN(0xffffffff, 0xffffffff),
TOBN(0x00000000, 0xfffffffe),
};
// Precomputed tables for the default generator
#include "p256-nistz-table.h"
// Recode window to a signed digit, see |ec_GFp_nistp_recode_scalar_bits| in
// util.c for details
static crypto_word_t booth_recode_w5(crypto_word_t in) {
crypto_word_t s, d;
s = ~((in >> 5) - 1);
d = (1 << 6) - in - 1;
d = (d & s) | (in & ~s);
d = (d >> 1) + (d & 1);
return (d << 1) + (s & 1);
}
static crypto_word_t booth_recode_w7(crypto_word_t in) {
crypto_word_t s, d;
s = ~((in >> 7) - 1);
d = (1 << 8) - in - 1;
d = (d & s) | (in & ~s);
d = (d >> 1) + (d & 1);
return (d << 1) + (s & 1);
}
// The `(P256_LIMBS == 8)` case is unreachable for 64-bit targets.
#if defined(OPENSSL_64_BIT) && defined(__clang__)
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunreachable-code"
#endif
// copy_conditional copies |src| to |dst| if |move| is one and leaves it as-is
// if |move| is zero.
//
// WARNING: this breaks the usual convention of constant-time functions
// returning masks.
static void copy_conditional(BN_ULONG dst[P256_LIMBS],
const BN_ULONG src[P256_LIMBS], BN_ULONG move) {
BN_ULONG mask1 = ((BN_ULONG)0) - move;
BN_ULONG mask2 = ~mask1;
dst[0] = (src[0] & mask1) ^ (dst[0] & mask2);
dst[1] = (src[1] & mask1) ^ (dst[1] & mask2);
dst[2] = (src[2] & mask1) ^ (dst[2] & mask2);
dst[3] = (src[3] & mask1) ^ (dst[3] & mask2);
if (P256_LIMBS == 8) {
dst[4] = (src[4] & mask1) ^ (dst[4] & mask2);
dst[5] = (src[5] & mask1) ^ (dst[5] & mask2);
dst[6] = (src[6] & mask1) ^ (dst[6] & mask2);
dst[7] = (src[7] & mask1) ^ (dst[7] & mask2);
}
}
#if defined(__clang__)
#pragma GCC diagnostic pop
#endif
// is_not_zero returns one iff in != 0 and zero otherwise.
//
// WARNING: this breaks the usual convention of constant-time functions
// returning masks.
//
// (define-fun is_not_zero ((in (_ BitVec 64))) (_ BitVec 64)
// (bvlshr (bvor in (bvsub #x0000000000000000 in)) #x000000000000003f)
// )
//
// (declare-fun x () (_ BitVec 64))
//
// (assert (and (= x #x0000000000000000) (= (is_not_zero x)
// #x0000000000000001))) (check-sat)
//
// (assert (and (not (= x #x0000000000000000)) (= (is_not_zero x)
// #x0000000000000000))) (check-sat)
//
static BN_ULONG is_not_zero(BN_ULONG in) {
in |= (0 - in);
in >>= BN_BITS2 - 1;
return in;
}
#if defined(OPENSSL_X86_64)
// Dispatch between CPU variations. The "_adx" suffixed functions use MULX in
// addition to ADCX/ADOX. MULX is part of BMI2, not ADX, so we must check both
// capabilities.
void ecp_nistz256_mul_mont(BN_ULONG res[P256_LIMBS],
const BN_ULONG a[P256_LIMBS],
const BN_ULONG b[P256_LIMBS]) {
if (adx_bmi2_available) {
ecp_nistz256_mul_mont_adx(res, a, b);
} else {
ecp_nistz256_mul_mont_nohw(res, a, b);
}
}
void ecp_nistz256_sqr_mont(BN_ULONG res[P256_LIMBS],
const BN_ULONG a[P256_LIMBS]) {
if (adx_bmi2_available) {
ecp_nistz256_sqr_mont_adx(res, a);
} else {
ecp_nistz256_sqr_mont_nohw(res, a);
}
}
void ecp_nistz256_ord_mul_mont(BN_ULONG res[P256_LIMBS],
const BN_ULONG a[P256_LIMBS],
const BN_ULONG b[P256_LIMBS]) {
if (adx_bmi2_available) {
ecp_nistz256_ord_mul_mont_adx(res, a, b);
} else {
ecp_nistz256_ord_mul_mont_nohw(res, a, b);
}
}
void ecp_nistz256_ord_sqr_mont(BN_ULONG res[P256_LIMBS],
const BN_ULONG a[P256_LIMBS],
BN_ULONG rep) {
if (adx_bmi2_available) {
ecp_nistz256_ord_sqr_mont_adx(res, a, rep);
} else {
ecp_nistz256_ord_sqr_mont_nohw(res, a, rep);
}
}
static void ecp_nistz256_select_w5(P256_POINT *val, const P256_POINT in_t[16],
int index) {
if (avx2_available) {
ecp_nistz256_select_w5_avx2(val, in_t, index);
} else {
ecp_nistz256_select_w5_nohw(val, in_t, index);
}
}
static void ecp_nistz256_select_w7(P256_POINT_AFFINE *val,
const P256_POINT_AFFINE in_t[64],
int index) {
if (avx2_available) {
ecp_nistz256_select_w7_avx2(val, in_t, index);
} else {
ecp_nistz256_select_w7_nohw(val, in_t, index);
}
}
void ecp_nistz256_point_double(P256_POINT *r, const P256_POINT *a) {
if (adx_bmi2_available) {
ecp_nistz256_point_double_adx(r, a);
} else {
ecp_nistz256_point_double_nohw(r, a);
}
}
void ecp_nistz256_point_add(P256_POINT *r, const P256_POINT *a,
const P256_POINT *b) {
if (adx_bmi2_available) {
ecp_nistz256_point_add_adx(r, a, b);
} else {
ecp_nistz256_point_add_nohw(r, a, b);
}
}
void ecp_nistz256_point_add_affine(P256_POINT *r, const P256_POINT *a,
const P256_POINT_AFFINE *b) {
if (adx_bmi2_available) {
ecp_nistz256_point_add_affine_adx(r, a, b);
} else {
ecp_nistz256_point_add_affine_nohw(r, a, b);
}
}
#endif // OPENSSL_X86_64
// r = p * p_scalar
static void ecp_nistz256_windowed_mul(P256_POINT *r,
const BN_ULONG p_scalar[P256_LIMBS],
const BN_ULONG p_x[P256_LIMBS],
const BN_ULONG p_y[P256_LIMBS]) {
debug_assert_nonsecret(r != NULL);
debug_assert_nonsecret(p_scalar != NULL);
debug_assert_nonsecret(p_x != NULL);
debug_assert_nonsecret(p_y != NULL);
static const size_t kWindowSize = 5;
static const crypto_word_t kMask = (1 << (5 /* kWindowSize */ + 1)) - 1;
// A |P256_POINT| is (3 * 32) = 96 bytes, and the 64-byte alignment should
// add no more than 63 bytes of overhead. Thus, |table| should require
// ~1599 ((96 * 16) + 63) bytes of stack space.
alignas(64) P256_POINT table[16];
P256_SCALAR_BYTES p_str;
p256_scalar_bytes_from_limbs(p_str, p_scalar);
// table[0] is implicitly (0,0,0) (the point at infinity), therefore it is
// not stored. All other values are actually stored with an offset of -1 in
// table.
P256_POINT *row = table;
limbs_copy(row[1 - 1].X, p_x, P256_LIMBS);
limbs_copy(row[1 - 1].Y, p_y, P256_LIMBS);
limbs_copy(row[1 - 1].Z, ONE_MONT, P256_LIMBS);
ecp_nistz256_point_double(&row[2 - 1], &row[1 - 1]);
ecp_nistz256_point_add(&row[3 - 1], &row[2 - 1], &row[1 - 1]);
ecp_nistz256_point_double(&row[4 - 1], &row[2 - 1]);
ecp_nistz256_point_double(&row[6 - 1], &row[3 - 1]);
ecp_nistz256_point_double(&row[8 - 1], &row[4 - 1]);
ecp_nistz256_point_double(&row[12 - 1], &row[6 - 1]);
ecp_nistz256_point_add(&row[5 - 1], &row[4 - 1], &row[1 - 1]);
ecp_nistz256_point_add(&row[7 - 1], &row[6 - 1], &row[1 - 1]);
ecp_nistz256_point_add(&row[9 - 1], &row[8 - 1], &row[1 - 1]);
ecp_nistz256_point_add(&row[13 - 1], &row[12 - 1], &row[1 - 1]);
ecp_nistz256_point_double(&row[14 - 1], &row[7 - 1]);
ecp_nistz256_point_double(&row[10 - 1], &row[5 - 1]);
ecp_nistz256_point_add(&row[15 - 1], &row[14 - 1], &row[1 - 1]);
ecp_nistz256_point_add(&row[11 - 1], &row[10 - 1], &row[1 - 1]);
ecp_nistz256_point_double(&row[16 - 1], &row[8 - 1]);
BN_ULONG tmp[P256_LIMBS];
alignas(32) P256_POINT h;
size_t index = 255;
crypto_word_t wvalue = p_str[(index - 1) / 8];
wvalue = (wvalue >> ((index - 1) % 8)) & kMask;
ecp_nistz256_select_w5(r, table, (int)(booth_recode_w5(wvalue) >> 1));
while (index >= 5) {
if (index != 255) {
size_t off = (index - 1) / 8;
wvalue = (crypto_word_t)p_str[off] | (crypto_word_t)p_str[off + 1] << 8;
wvalue = (wvalue >> ((index - 1) % 8)) & kMask;
wvalue = booth_recode_w5(wvalue);
ecp_nistz256_select_w5(&h, table, (int)(wvalue >> 1));
ecp_nistz256_neg(tmp, h.Y);
copy_conditional(h.Y, tmp, (wvalue & 1));
ecp_nistz256_point_add(r, r, &h);
}
index -= kWindowSize;
ecp_nistz256_point_double(r, r);
ecp_nistz256_point_double(r, r);
ecp_nistz256_point_double(r, r);
ecp_nistz256_point_double(r, r);
ecp_nistz256_point_double(r, r);
}
// Final window
wvalue = p_str[0];
wvalue = (wvalue << 1) & kMask;
wvalue = booth_recode_w5(wvalue);
ecp_nistz256_select_w5(&h, table, (int)(wvalue >> 1));
ecp_nistz256_neg(tmp, h.Y);
copy_conditional(h.Y, tmp, wvalue & 1);
ecp_nistz256_point_add(r, r, &h);
}
static crypto_word_t calc_first_wvalue(size_t *index, const uint8_t p_str[33]) {
static const size_t kWindowSize = 7;
static const crypto_word_t kMask = (1 << (7 /* kWindowSize */ + 1)) - 1;
*index = kWindowSize;
crypto_word_t wvalue = ((crypto_word_t)p_str[0] << 1) & kMask;
return booth_recode_w7(wvalue);
}
static crypto_word_t calc_wvalue(size_t *index, const uint8_t p_str[33]) {
static const size_t kWindowSize = 7;
static const crypto_word_t kMask = (1 << (7 /* kWindowSize */ + 1)) - 1;
const size_t off = (*index - 1) / 8;
crypto_word_t wvalue =
(crypto_word_t)p_str[off] | (crypto_word_t)p_str[off + 1] << 8;
wvalue = (wvalue >> ((*index - 1) % 8)) & kMask;
*index += kWindowSize;
return booth_recode_w7(wvalue);
}
void p256_point_mul(Limb r[3][P256_LIMBS], const Limb p_scalar[P256_LIMBS],
const Limb p_x[P256_LIMBS],
const Limb p_y[P256_LIMBS]) {
alignas(32) P256_POINT out;
ecp_nistz256_windowed_mul(&out, p_scalar, p_x, p_y);
limbs_copy(r[0], out.X, P256_LIMBS);
limbs_copy(r[1], out.Y, P256_LIMBS);
limbs_copy(r[2], out.Z, P256_LIMBS);
}
void p256_point_mul_base(Limb r[3][P256_LIMBS], const Limb scalar[P256_LIMBS]) {
P256_SCALAR_BYTES p_str;
p256_scalar_bytes_from_limbs(p_str, scalar);
// First window
size_t index = 0;
crypto_word_t wvalue = calc_first_wvalue(&index, p_str);
alignas(32) P256_POINT_AFFINE t;
alignas(32) P256_POINT p;
ecp_nistz256_select_w7(&t, ecp_nistz256_precomputed[0], (int)(wvalue >> 1));
ecp_nistz256_neg(p.Z, t.Y);
copy_conditional(t.Y, p.Z, wvalue & 1);
// Convert |t| from affine to Jacobian coordinates. We set Z to zero if |t|
// is infinity and |ONE| otherwise. |t| was computed from the table, so it
// is infinity iff |wvalue >> 1| is zero.
limbs_copy(p.X, t.X, P256_LIMBS);
limbs_copy(p.Y, t.Y, P256_LIMBS);
limbs_zero(p.Z, P256_LIMBS);
copy_conditional(p.Z, ONE_MONT, is_not_zero(wvalue >> 1));
for (int i = 1; i < 37; i++) {
wvalue = calc_wvalue(&index, p_str);
ecp_nistz256_select_w7(&t, ecp_nistz256_precomputed[i], (int)(wvalue >> 1));
alignas(32) BN_ULONG neg_Y[P256_LIMBS];
ecp_nistz256_neg(neg_Y, t.Y);
copy_conditional(t.Y, neg_Y, wvalue & 1);
// Note |ecp_nistz256_point_add_affine| does not work if |p| and |t| are the
// same non-infinity point.
ecp_nistz256_point_add_affine(&p, &p, &t);
}
limbs_copy(r[0], p.X, P256_LIMBS);
limbs_copy(r[1], p.Y, P256_LIMBS);
limbs_copy(r[2], p.Z, P256_LIMBS);
}
void p256_point_mul_base_vartime(Limb r[3][P256_LIMBS],
const Limb g_scalar[P256_LIMBS]) {
alignas(32) P256_POINT p;
uint8_t p_str[33];
OPENSSL_memcpy(p_str, g_scalar, 32);
p_str[32] = 0;
// First window
size_t index = 0;
size_t wvalue = calc_first_wvalue(&index, p_str);
// Convert |p| from affine to Jacobian coordinates. We set Z to zero if |p|
// is infinity and |ONE_MONT| otherwise. |p| was computed from the table, so
// it is infinity iff |wvalue >> 1| is zero.
if ((wvalue >> 1) != 0) {
OPENSSL_memcpy(p.X, &ecp_nistz256_precomputed[0][(wvalue >> 1) - 1].X,
sizeof(p.X));
OPENSSL_memcpy(p.Y, &ecp_nistz256_precomputed[0][(wvalue >> 1) - 1].Y,
sizeof(p.Y));
OPENSSL_memcpy(p.Z, ONE_MONT, sizeof(p.Z));
} else {
OPENSSL_memset(p.X, 0, sizeof(p.X));
OPENSSL_memset(p.Y, 0, sizeof(p.Y));
OPENSSL_memset(p.Z, 0, sizeof(p.Z));
}
if ((wvalue & 1) == 1) {
ecp_nistz256_neg(p.Y, p.Y);
}
for (int i = 1; i < 37; i++) {
wvalue = calc_wvalue(&index, p_str);
if ((wvalue >> 1) == 0) {
continue;
}
alignas(32) P256_POINT_AFFINE t;
OPENSSL_memcpy(&t, &ecp_nistz256_precomputed[i][(wvalue >> 1) - 1],
sizeof(t));
if ((wvalue & 1) == 1) {
ecp_nistz256_neg(t.Y, t.Y);
}
// Note |ecp_nistz256_point_add_affine| does not work if |p| and |t| are
// the same non-infinity point, so it is important that we compute the
// |g_scalar| term before the |p_scalar| term.
ecp_nistz256_point_add_affine(&p, &p, &t);
}
limbs_copy(r[0], p.X, P256_LIMBS);
limbs_copy(r[1], p.Y, P256_LIMBS);
limbs_copy(r[2], p.Z, P256_LIMBS);
}
#endif /* defined(OPENSSL_USE_NISTZ256) */

View File

@@ -0,0 +1,171 @@
// Copyright 2014-2016 The OpenSSL Project Authors. All Rights Reserved.
// Copyright (c) 2014, Intel Corporation. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Originally written by Shay Gueron (1, 2), and Vlad Krasnov (1)
// (1) Intel Corporation, Israel Development Center, Haifa, Israel
// (2) University of Haifa, Israel
//
// Reference:
// S.Gueron and V.Krasnov, "Fast Prime Field Elliptic Curve Cryptography with
// 256 Bit Primes"
#ifndef OPENSSL_HEADER_EC_P256_X86_64_H
#define OPENSSL_HEADER_EC_P256_X86_64_H
#include <ring-core/base.h>
#include "p256_shared.h"
#include "../bn/internal.h"
#if defined(OPENSSL_USE_NISTZ256)
// ecp_nistz256_neg sets |res| to -|a| mod P.
void ecp_nistz256_neg(BN_ULONG res[P256_LIMBS], const BN_ULONG a[P256_LIMBS]);
// ecp_nistz256_mul_mont sets |res| to |a| * |b| * 2^-256 mod P.
#if defined(OPENSSL_X86_64)
void ecp_nistz256_mul_mont_nohw(BN_ULONG res[P256_LIMBS],
const BN_ULONG a[P256_LIMBS],
const BN_ULONG b[P256_LIMBS]);
void ecp_nistz256_mul_mont_adx(BN_ULONG res[P256_LIMBS],
const BN_ULONG a[P256_LIMBS],
const BN_ULONG b[P256_LIMBS]);
#else
void ecp_nistz256_mul_mont(BN_ULONG res[P256_LIMBS],
const BN_ULONG a[P256_LIMBS],
const BN_ULONG b[P256_LIMBS]);
#endif
// ecp_nistz256_sqr_mont sets |res| to |a| * |a| * 2^-256 mod P.
#if defined(OPENSSL_X86_64)
void ecp_nistz256_sqr_mont_nohw(BN_ULONG res[P256_LIMBS],
const BN_ULONG a[P256_LIMBS]);
void ecp_nistz256_sqr_mont_adx(BN_ULONG res[P256_LIMBS],
const BN_ULONG a[P256_LIMBS]);
#else
void ecp_nistz256_sqr_mont(BN_ULONG res[P256_LIMBS],
const BN_ULONG a[P256_LIMBS]);
#endif
// P-256 scalar operations.
//
// The following functions compute modulo N, where N is the order of P-256. They
// take fully-reduced inputs and give fully-reduced outputs.
// ecp_nistz256_ord_mul_mont sets |res| to |a| * |b| where inputs and outputs
// are in Montgomery form. That is, |res| is |a| * |b| * 2^-256 mod N.
#if defined(OPENSSL_X86_64)
void ecp_nistz256_ord_mul_mont_nohw(BN_ULONG res[P256_LIMBS],
const BN_ULONG a[P256_LIMBS],
const BN_ULONG b[P256_LIMBS]);
void ecp_nistz256_ord_mul_mont_adx(BN_ULONG res[P256_LIMBS],
const BN_ULONG a[P256_LIMBS],
const BN_ULONG b[P256_LIMBS]);
#else
void ecp_nistz256_ord_mul_mont(BN_ULONG res[P256_LIMBS],
const BN_ULONG a[P256_LIMBS],
const BN_ULONG b[P256_LIMBS]);
#endif
// ecp_nistz256_ord_sqr_mont sets |res| to |a|^(2*|rep|) where inputs and
// outputs are in Montgomery form. That is, |res| is
// (|a| * 2^-256)^(2*|rep|) * 2^256 mod N.
#if defined(OPENSSL_X86_64)
void ecp_nistz256_ord_sqr_mont_nohw(BN_ULONG res[P256_LIMBS],
const BN_ULONG a[P256_LIMBS], BN_ULONG rep);
void ecp_nistz256_ord_sqr_mont_adx(BN_ULONG res[P256_LIMBS],
const BN_ULONG a[P256_LIMBS], BN_ULONG rep);
#else
void ecp_nistz256_ord_sqr_mont(BN_ULONG res[P256_LIMBS],
const BN_ULONG a[P256_LIMBS], BN_ULONG rep);
#endif
// P-256 point operations.
//
// The following functions may be used in-place. All coordinates are in the
// Montgomery domain.
// A P256_POINT_AFFINE represents a P-256 point in affine coordinates. Infinity
// is encoded as (0, 0).
typedef struct {
BN_ULONG X[P256_LIMBS];
BN_ULONG Y[P256_LIMBS];
} P256_POINT_AFFINE;
// ecp_nistz256_select_w5 sets |*val| to |in_t[index-1]| if 1 <= |index| <= 16
// and all zeros (the point at infinity) if |index| is 0. This is done in
// constant time.
#if defined(OPENSSL_X86_64)
void ecp_nistz256_select_w5_nohw(P256_POINT *val, const P256_POINT in_t[16],
int index);
void ecp_nistz256_select_w5_avx2(P256_POINT *val, const P256_POINT in_t[16],
int index);
#else
void ecp_nistz256_select_w5(P256_POINT *val, const P256_POINT in_t[16],
int index);
#endif
// ecp_nistz256_select_w7 sets |*val| to |in_t[index-1]| if 1 <= |index| <= 64
// and all zeros (the point at infinity) if |index| is 0. This is done in
// constant time.
#if defined(OPENSSL_X86_64)
void ecp_nistz256_select_w7_nohw(P256_POINT_AFFINE *val,
const P256_POINT_AFFINE in_t[64], int index);
void ecp_nistz256_select_w7_avx2(P256_POINT_AFFINE *val,
const P256_POINT_AFFINE in_t[64], int index);
#else
void ecp_nistz256_select_w7(P256_POINT_AFFINE *val,
const P256_POINT_AFFINE in_t[64], int index);
#endif
// ecp_nistz256_point_double sets |r| to |a| doubled.
#if defined(OPENSSL_X86_64)
void ecp_nistz256_point_double_nohw(P256_POINT *r, const P256_POINT *a);
void ecp_nistz256_point_double_adx(P256_POINT *r, const P256_POINT *a);
#else
void ecp_nistz256_point_double(P256_POINT *r, const P256_POINT *a);
#endif
// ecp_nistz256_point_add adds |a| to |b| and places the result in |r|.
#if defined(OPENSSL_X86_64)
void ecp_nistz256_point_add_nohw(P256_POINT *r, const P256_POINT *a,
const P256_POINT *b);
void ecp_nistz256_point_add_adx(P256_POINT *r, const P256_POINT *a,
const P256_POINT *b);
#else
void ecp_nistz256_point_add(P256_POINT *r, const P256_POINT *a,
const P256_POINT *b);
#endif
// ecp_nistz256_point_add_affine adds |a| to |b| and places the result in
// |r|. |a| and |b| must not represent the same point unless they are both
// infinity.
#if defined(OPENSSL_X86_64)
void ecp_nistz256_point_add_affine_adx(P256_POINT *r, const P256_POINT *a,
const P256_POINT_AFFINE *b);
void ecp_nistz256_point_add_affine_nohw(P256_POINT *r, const P256_POINT *a,
const P256_POINT_AFFINE *b);
#else
void ecp_nistz256_point_add_affine(P256_POINT *r, const P256_POINT *a,
const P256_POINT_AFFINE *b);
#endif
#endif /* defined(OPENSSL_USE_NISTZ256) */
#endif // OPENSSL_HEADER_EC_P256_X86_64_H

539
vendor/ring/crypto/fipsmodule/ec/p256.c vendored Normal file
View File

@@ -0,0 +1,539 @@
// Copyright 2020 The BoringSSL Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// An implementation of the NIST P-256 elliptic curve point multiplication.
// 256-bit Montgomery form for 64 and 32-bit. Field operations are generated by
// Fiat, which lives in //third_party/fiat.
#include <ring-core/base.h>
#include "../../limbs/limbs.h"
#include "../../limbs/limbs.inl"
#include "p256_shared.h"
#include "../../internal.h"
#include "./util.h"
#if !defined(OPENSSL_USE_NISTZ256)
#if defined(_MSC_VER) && !defined(__clang__)
// '=': conversion from 'int64_t' to 'int32_t', possible loss of data
#pragma warning(disable: 4242)
// '=': conversion from 'int32_t' to 'uint8_t', possible loss of data
#pragma warning(disable: 4244)
// 'initializing': conversion from 'size_t' to 'fiat_p256_limb_t'
#pragma warning(disable: 4267)
#endif
#if defined(__GNUC__) || defined(__clang__)
#pragma GCC diagnostic ignored "-Wconversion"
#pragma GCC diagnostic ignored "-Wsign-conversion"
#endif
#if defined(__GNUC__) && !defined(__clang__)
#pragma GCC diagnostic ignored "-Winline"
#endif
#if defined(BORINGSSL_HAS_UINT128)
#if defined(__GNUC__)
#pragma GCC diagnostic ignored "-Wpedantic"
#endif
#include "../../../third_party/fiat/p256_64.h"
#elif defined(OPENSSL_64_BIT)
#include "../../../third_party/fiat/p256_64_msvc.h"
#else
#include "../../../third_party/fiat/p256_32.h"
#endif
// utility functions, handwritten
#if defined(OPENSSL_64_BIT)
#define FIAT_P256_NLIMBS 4
typedef uint64_t fiat_p256_limb_t;
typedef uint64_t fiat_p256_felem[FIAT_P256_NLIMBS];
static const fiat_p256_felem fiat_p256_one = {0x1, 0xffffffff00000000,
0xffffffffffffffff, 0xfffffffe};
#else // 64BIT; else 32BIT
#define FIAT_P256_NLIMBS 8
typedef uint32_t fiat_p256_limb_t;
typedef uint32_t fiat_p256_felem[FIAT_P256_NLIMBS];
static const fiat_p256_felem fiat_p256_one = {
0x1, 0x0, 0x0, 0xffffffff, 0xffffffff, 0xffffffff, 0xfffffffe, 0x0};
#endif // 64BIT
static fiat_p256_limb_t fiat_p256_nz(
const fiat_p256_limb_t in1[FIAT_P256_NLIMBS]) {
fiat_p256_limb_t ret;
fiat_p256_nonzero(&ret, in1);
return ret;
}
static void fiat_p256_copy(fiat_p256_limb_t out[FIAT_P256_NLIMBS],
const fiat_p256_limb_t in1[FIAT_P256_NLIMBS]) {
for (size_t i = 0; i < FIAT_P256_NLIMBS; i++) {
out[i] = in1[i];
}
}
static void fiat_p256_cmovznz(fiat_p256_limb_t out[FIAT_P256_NLIMBS],
fiat_p256_limb_t t,
const fiat_p256_limb_t z[FIAT_P256_NLIMBS],
const fiat_p256_limb_t nz[FIAT_P256_NLIMBS]) {
fiat_p256_selectznz(out, !!t, z, nz);
}
static void fiat_p256_from_words(fiat_p256_felem out,
const Limb in[32 / sizeof(BN_ULONG)]) {
// Typically, |BN_ULONG| and |fiat_p256_limb_t| will be the same type, but on
// 64-bit platforms without |uint128_t|, they are different. However, on
// little-endian systems, |uint64_t[4]| and |uint32_t[8]| have the same
// layout.
OPENSSL_memcpy(out, in, 32);
}
static void fiat_p256_to_words(Limb out[32 / sizeof(BN_ULONG)], const fiat_p256_felem in) {
// See |fiat_p256_from_words|.
OPENSSL_memcpy(out, in, 32);
}
// Group operations
// ----------------
//
// Building on top of the field operations we have the operations on the
// elliptic curve group itself. Points on the curve are represented in Jacobian
// coordinates.
//
// Both operations were transcribed to Coq and proven to correspond to naive
// implementations using Affine coordinates, for all suitable fields. In the
// Coq proofs, issues of constant-time execution and memory layout (aliasing)
// conventions were not considered. Specification of affine coordinates:
// <https://github.com/mit-plv/fiat-crypto/blob/79f8b5f39ed609339f0233098dee1a3c4e6b3080/src/Spec/WeierstrassCurve.v#L28>
// As a sanity check, a proof that these points form a commutative group:
// <https://github.com/mit-plv/fiat-crypto/blob/79f8b5f39ed609339f0233098dee1a3c4e6b3080/src/Curves/Weierstrass/AffineProofs.v#L33>
// fiat_p256_point_double calculates 2*(x_in, y_in, z_in)
//
// The method is taken from:
// http://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-3.html#doubling-dbl-2001-b
//
// Coq transcription and correctness proof:
// <https://github.com/mit-plv/fiat-crypto/blob/79f8b5f39ed609339f0233098dee1a3c4e6b3080/src/Curves/Weierstrass/Jacobian.v#L93>
// <https://github.com/mit-plv/fiat-crypto/blob/79f8b5f39ed609339f0233098dee1a3c4e6b3080/src/Curves/Weierstrass/Jacobian.v#L201>
//
// Outputs can equal corresponding inputs, i.e., x_out == x_in is allowed.
// while x_out == y_in is not (maybe this works, but it's not tested).
static void fiat_p256_point_double(fiat_p256_felem x_out, fiat_p256_felem y_out,
fiat_p256_felem z_out,
const fiat_p256_felem x_in,
const fiat_p256_felem y_in,
const fiat_p256_felem z_in) {
fiat_p256_felem delta, gamma, beta, ftmp, ftmp2, tmptmp, alpha, fourbeta;
// delta = z^2
fiat_p256_square(delta, z_in);
// gamma = y^2
fiat_p256_square(gamma, y_in);
// beta = x*gamma
fiat_p256_mul(beta, x_in, gamma);
// alpha = 3*(x-delta)*(x+delta)
fiat_p256_sub(ftmp, x_in, delta);
fiat_p256_add(ftmp2, x_in, delta);
fiat_p256_add(tmptmp, ftmp2, ftmp2);
fiat_p256_add(ftmp2, ftmp2, tmptmp);
fiat_p256_mul(alpha, ftmp, ftmp2);
// x' = alpha^2 - 8*beta
fiat_p256_square(x_out, alpha);
fiat_p256_add(fourbeta, beta, beta);
fiat_p256_add(fourbeta, fourbeta, fourbeta);
fiat_p256_add(tmptmp, fourbeta, fourbeta);
fiat_p256_sub(x_out, x_out, tmptmp);
// z' = (y + z)^2 - gamma - delta
fiat_p256_add(delta, gamma, delta);
fiat_p256_add(ftmp, y_in, z_in);
fiat_p256_square(z_out, ftmp);
fiat_p256_sub(z_out, z_out, delta);
// y' = alpha*(4*beta - x') - 8*gamma^2
fiat_p256_sub(y_out, fourbeta, x_out);
fiat_p256_add(gamma, gamma, gamma);
fiat_p256_square(gamma, gamma);
fiat_p256_mul(y_out, alpha, y_out);
fiat_p256_add(gamma, gamma, gamma);
fiat_p256_sub(y_out, y_out, gamma);
}
// fiat_p256_point_add calculates (x1, y1, z1) + (x2, y2, z2)
//
// The method is taken from:
// http://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-3.html#addition-add-2007-bl,
// adapted for mixed addition (z2 = 1, or z2 = 0 for the point at infinity).
//
// Coq transcription and correctness proof:
// <https://github.com/mit-plv/fiat-crypto/blob/79f8b5f39ed609339f0233098dee1a3c4e6b3080/src/Curves/Weierstrass/Jacobian.v#L135>
// <https://github.com/mit-plv/fiat-crypto/blob/79f8b5f39ed609339f0233098dee1a3c4e6b3080/src/Curves/Weierstrass/Jacobian.v#L205>
//
// This function includes a branch for checking whether the two input points
// are equal, (while not equal to the point at infinity). This case never
// happens during single point multiplication, so there is no timing leak for
// ECDH or ECDSA signing.
static void fiat_p256_point_add(fiat_p256_felem x3, fiat_p256_felem y3,
fiat_p256_felem z3, const fiat_p256_felem x1,
const fiat_p256_felem y1,
const fiat_p256_felem z1, const int mixed,
const fiat_p256_felem x2,
const fiat_p256_felem y2,
const fiat_p256_felem z2) {
fiat_p256_felem x_out, y_out, z_out;
fiat_p256_limb_t z1nz = fiat_p256_nz(z1);
fiat_p256_limb_t z2nz = fiat_p256_nz(z2);
// z1z1 = z1z1 = z1**2
fiat_p256_felem z1z1;
fiat_p256_square(z1z1, z1);
fiat_p256_felem u1, s1, two_z1z2;
if (!mixed) {
// z2z2 = z2**2
fiat_p256_felem z2z2;
fiat_p256_square(z2z2, z2);
// u1 = x1*z2z2
fiat_p256_mul(u1, x1, z2z2);
// two_z1z2 = (z1 + z2)**2 - (z1z1 + z2z2) = 2z1z2
fiat_p256_add(two_z1z2, z1, z2);
fiat_p256_square(two_z1z2, two_z1z2);
fiat_p256_sub(two_z1z2, two_z1z2, z1z1);
fiat_p256_sub(two_z1z2, two_z1z2, z2z2);
// s1 = y1 * z2**3
fiat_p256_mul(s1, z2, z2z2);
fiat_p256_mul(s1, s1, y1);
} else {
// We'll assume z2 = 1 (special case z2 = 0 is handled later).
// u1 = x1*z2z2
fiat_p256_copy(u1, x1);
// two_z1z2 = 2z1z2
fiat_p256_add(two_z1z2, z1, z1);
// s1 = y1 * z2**3
fiat_p256_copy(s1, y1);
}
// u2 = x2*z1z1
fiat_p256_felem u2;
fiat_p256_mul(u2, x2, z1z1);
// h = u2 - u1
fiat_p256_felem h;
fiat_p256_sub(h, u2, u1);
fiat_p256_limb_t xneq = fiat_p256_nz(h);
// z_out = two_z1z2 * h
fiat_p256_mul(z_out, h, two_z1z2);
// z1z1z1 = z1 * z1z1
fiat_p256_felem z1z1z1;
fiat_p256_mul(z1z1z1, z1, z1z1);
// s2 = y2 * z1**3
fiat_p256_felem s2;
fiat_p256_mul(s2, y2, z1z1z1);
// r = (s2 - s1)*2
fiat_p256_felem r;
fiat_p256_sub(r, s2, s1);
fiat_p256_add(r, r, r);
fiat_p256_limb_t yneq = fiat_p256_nz(r);
fiat_p256_limb_t is_nontrivial_double = constant_time_is_zero_w(xneq | yneq) &
~constant_time_is_zero_w(z1nz) &
~constant_time_is_zero_w(z2nz);
if (constant_time_declassify_w(is_nontrivial_double)) {
fiat_p256_point_double(x3, y3, z3, x1, y1, z1);
return;
}
// I = (2h)**2
fiat_p256_felem i;
fiat_p256_add(i, h, h);
fiat_p256_square(i, i);
// J = h * I
fiat_p256_felem j;
fiat_p256_mul(j, h, i);
// V = U1 * I
fiat_p256_felem v;
fiat_p256_mul(v, u1, i);
// x_out = r**2 - J - 2V
fiat_p256_square(x_out, r);
fiat_p256_sub(x_out, x_out, j);
fiat_p256_sub(x_out, x_out, v);
fiat_p256_sub(x_out, x_out, v);
// y_out = r(V-x_out) - 2 * s1 * J
fiat_p256_sub(y_out, v, x_out);
fiat_p256_mul(y_out, y_out, r);
fiat_p256_felem s1j;
fiat_p256_mul(s1j, s1, j);
fiat_p256_sub(y_out, y_out, s1j);
fiat_p256_sub(y_out, y_out, s1j);
fiat_p256_cmovznz(x_out, z1nz, x2, x_out);
fiat_p256_cmovznz(x3, z2nz, x1, x_out);
fiat_p256_cmovznz(y_out, z1nz, y2, y_out);
fiat_p256_cmovznz(y3, z2nz, y1, y_out);
fiat_p256_cmovznz(z_out, z1nz, z2, z_out);
fiat_p256_cmovznz(z3, z2nz, z1, z_out);
}
#include "./p256_table.h"
// fiat_p256_select_point_affine selects the |idx-1|th point from a
// precomputation table and copies it to out. If |idx| is zero, the output is
// the point at infinity.
static void fiat_p256_select_point_affine(
const fiat_p256_limb_t idx, size_t size,
const fiat_p256_felem pre_comp[/*size*/][2], fiat_p256_felem out[3]) {
OPENSSL_memset(out, 0, sizeof(fiat_p256_felem) * 3);
for (size_t i = 0; i < size; i++) {
fiat_p256_limb_t mismatch = i ^ (idx - 1);
fiat_p256_cmovznz(out[0], mismatch, pre_comp[i][0], out[0]);
fiat_p256_cmovznz(out[1], mismatch, pre_comp[i][1], out[1]);
}
fiat_p256_cmovznz(out[2], idx, out[2], fiat_p256_one);
}
// fiat_p256_select_point selects the |idx|th point from a precomputation table
// and copies it to out.
static void fiat_p256_select_point(const fiat_p256_limb_t idx, size_t size,
const fiat_p256_felem pre_comp[/*size*/][3],
fiat_p256_felem out[3]) {
OPENSSL_memset(out, 0, sizeof(fiat_p256_felem) * 3);
for (size_t i = 0; i < size; i++) {
fiat_p256_limb_t mismatch = i ^ idx;
fiat_p256_cmovznz(out[0], mismatch, pre_comp[i][0], out[0]);
fiat_p256_cmovznz(out[1], mismatch, pre_comp[i][1], out[1]);
fiat_p256_cmovznz(out[2], mismatch, pre_comp[i][2], out[2]);
}
}
// fiat_p256_get_bit returns the |i|th bit in |in|
static crypto_word_t fiat_p256_get_bit(const Limb in[P256_LIMBS], int i) {
if (i < 0 || i >= 256) {
return 0;
}
#if defined(OPENSSL_64_BIT)
OPENSSL_STATIC_ASSERT(sizeof(Limb) == 8, "BN_ULONG was not 64-bit");
return (in[i >> 6] >> (i & 63)) & 1;
#else
OPENSSL_STATIC_ASSERT(sizeof(Limb) == 4, "BN_ULONG was not 32-bit");
return (in[i >> 5] >> (i & 31)) & 1;
#endif
}
void p256_point_mul(Limb r[3][P256_LIMBS], const Limb scalar[P256_LIMBS],
const Limb p_x[P256_LIMBS], const Limb p_y[P256_LIMBS]) {
debug_assert_nonsecret(r != NULL);
debug_assert_nonsecret(scalar != NULL);
debug_assert_nonsecret(p_x != NULL);
debug_assert_nonsecret(p_y != NULL);
fiat_p256_felem p_pre_comp[17][3];
OPENSSL_memset(&p_pre_comp, 0, sizeof(p_pre_comp));
// Precompute multiples.
fiat_p256_from_words(p_pre_comp[1][0], p_x);
fiat_p256_from_words(p_pre_comp[1][1], p_y);
fiat_p256_copy(p_pre_comp[1][2], fiat_p256_one);
for (size_t j = 2; j <= 16; ++j) {
if (j & 1) {
fiat_p256_point_add(p_pre_comp[j][0], p_pre_comp[j][1], p_pre_comp[j][2],
p_pre_comp[1][0], p_pre_comp[1][1], p_pre_comp[1][2],
0, p_pre_comp[j - 1][0], p_pre_comp[j - 1][1],
p_pre_comp[j - 1][2]);
} else {
fiat_p256_point_double(p_pre_comp[j][0], p_pre_comp[j][1],
p_pre_comp[j][2], p_pre_comp[j / 2][0],
p_pre_comp[j / 2][1], p_pre_comp[j / 2][2]);
}
}
// Set nq to the point at infinity.
fiat_p256_felem nq[3] = {{0}, {0}, {0}}, ftmp, tmp[3];
// Loop over |scalar| msb-to-lsb, incorporating |p_pre_comp| every 5th round.
int skip = 1; // Save two point operations in the first round.
for (size_t i = 255; i < 256; i--) {
// double
if (!skip) {
fiat_p256_point_double(nq[0], nq[1], nq[2], nq[0], nq[1], nq[2]);
}
// do other additions every 5 doublings
if (i % 5 == 0) {
crypto_word_t bits = fiat_p256_get_bit(scalar, i + 4) << 5;
bits |= fiat_p256_get_bit(scalar, i + 3) << 4;
bits |= fiat_p256_get_bit(scalar, i + 2) << 3;
bits |= fiat_p256_get_bit(scalar, i + 1) << 2;
bits |= fiat_p256_get_bit(scalar, i) << 1;
bits |= fiat_p256_get_bit(scalar, i - 1);
crypto_word_t sign, digit;
recode_scalar_bits(&sign, &digit, bits);
// select the point to add or subtract, in constant time.
fiat_p256_select_point((fiat_p256_limb_t)digit, 17,
RING_CORE_POINTLESS_ARRAY_CONST_CAST((const fiat_p256_felem(*)[3]))p_pre_comp,
tmp);
fiat_p256_opp(ftmp, tmp[1]); // (X, -Y, Z) is the negative point.
fiat_p256_cmovznz(tmp[1], (fiat_p256_limb_t)sign, tmp[1], ftmp);
if (!skip) {
fiat_p256_point_add(nq[0], nq[1], nq[2], nq[0], nq[1], nq[2],
0 /* mixed */, tmp[0], tmp[1], tmp[2]);
} else {
fiat_p256_copy(nq[0], tmp[0]);
fiat_p256_copy(nq[1], tmp[1]);
fiat_p256_copy(nq[2], tmp[2]);
skip = 0;
}
}
}
fiat_p256_to_words(r[0], nq[0]);
fiat_p256_to_words(r[1], nq[1]);
fiat_p256_to_words(r[2], nq[2]);
}
void p256_point_mul_base(Limb r[3][P256_LIMBS], const Limb scalar[P256_LIMBS]) {
// Set nq to the point at infinity.
fiat_p256_felem nq[3] = {{0}, {0}, {0}}, tmp[3];
int skip = 1; // Save two point operations in the first round.
for (size_t i = 31; i < 32; i--) {
if (!skip) {
fiat_p256_point_double(nq[0], nq[1], nq[2], nq[0], nq[1], nq[2]);
}
// First, look 32 bits upwards.
crypto_word_t bits = fiat_p256_get_bit(scalar, i + 224) << 3;
bits |= fiat_p256_get_bit(scalar, i + 160) << 2;
bits |= fiat_p256_get_bit(scalar, i + 96) << 1;
bits |= fiat_p256_get_bit(scalar, i + 32);
// Select the point to add, in constant time.
fiat_p256_select_point_affine((fiat_p256_limb_t)bits, 15,
fiat_p256_g_pre_comp[1], tmp);
if (!skip) {
fiat_p256_point_add(nq[0], nq[1], nq[2], nq[0], nq[1], nq[2],
1 /* mixed */, tmp[0], tmp[1], tmp[2]);
} else {
fiat_p256_copy(nq[0], tmp[0]);
fiat_p256_copy(nq[1], tmp[1]);
fiat_p256_copy(nq[2], tmp[2]);
skip = 0;
}
// Second, look at the current position.
bits = fiat_p256_get_bit(scalar, i + 192) << 3;
bits |= fiat_p256_get_bit(scalar, i + 128) << 2;
bits |= fiat_p256_get_bit(scalar, i + 64) << 1;
bits |= fiat_p256_get_bit(scalar, i);
// Select the point to add, in constant time.
fiat_p256_select_point_affine((fiat_p256_limb_t)bits, 15,
fiat_p256_g_pre_comp[0], tmp);
fiat_p256_point_add(nq[0], nq[1], nq[2], nq[0], nq[1], nq[2], 1 /* mixed */,
tmp[0], tmp[1], tmp[2]);
}
fiat_p256_to_words(r[0], nq[0]);
fiat_p256_to_words(r[1], nq[1]);
fiat_p256_to_words(r[2], nq[2]);
}
void p256_mul_mont(Limb r[P256_LIMBS], const Limb a[P256_LIMBS],
const Limb b[P256_LIMBS]) {
fiat_p256_felem a_, b_;
fiat_p256_from_words(a_, a);
fiat_p256_from_words(b_, b);
fiat_p256_mul(a_, a_, b_);
fiat_p256_to_words(r, a_);
}
void p256_sqr_mont(Limb r[P256_LIMBS], const Limb a[P256_LIMBS]) {
fiat_p256_felem x;
fiat_p256_from_words(x, a);
fiat_p256_square(x, x);
fiat_p256_to_words(r, x);
}
void p256_point_add(Limb r[3][P256_LIMBS], const Limb a[3][P256_LIMBS],
const Limb b[3][P256_LIMBS]) {
fiat_p256_felem x1, y1, z1, x2, y2, z2;
fiat_p256_from_words(x1, a[0]);
fiat_p256_from_words(y1, a[1]);
fiat_p256_from_words(z1, a[2]);
fiat_p256_from_words(x2, b[0]);
fiat_p256_from_words(y2, b[1]);
fiat_p256_from_words(z2, b[2]);
fiat_p256_point_add(x1, y1, z1, x1, y1, z1, 0 /* both Jacobian */, x2, y2,
z2);
fiat_p256_to_words(r[0], x1);
fiat_p256_to_words(r[1], y1);
fiat_p256_to_words(r[2], z1);
}
void p256_point_double(Limb r[3][P256_LIMBS], const Limb a[3][P256_LIMBS]) {
fiat_p256_felem x, y, z;
fiat_p256_from_words(x, a[0]);
fiat_p256_from_words(y, a[1]);
fiat_p256_from_words(z, a[2]);
fiat_p256_point_double(x, y, z, x, y, z);
fiat_p256_to_words(r[0], x);
fiat_p256_to_words(r[1], y);
fiat_p256_to_words(r[2], z);
}
// For testing only.
void p256_point_add_affine(Limb r[3][P256_LIMBS], const Limb a[3][P256_LIMBS],
const Limb b[2][P256_LIMBS]) {
fiat_p256_felem x1, y1, z1, x2, y2;
fiat_p256_from_words(x1, a[0]);
fiat_p256_from_words(y1, a[1]);
fiat_p256_from_words(z1, a[2]);
fiat_p256_from_words(x2, b[0]);
fiat_p256_from_words(y2, b[1]);
fiat_p256_felem z2 = {0};
fiat_p256_cmovznz(z2, fiat_p256_nz(x2) & fiat_p256_nz(y2), z2, fiat_p256_one);
fiat_p256_point_add(x1, y1, z1, x1, y1, z1, 1 /* mixed */, x2, y2, z2);
fiat_p256_to_words(r[0], x1);
fiat_p256_to_words(r[1], y1);
fiat_p256_to_words(r[2], z1);
}
#endif

View File

@@ -0,0 +1,62 @@
// Copyright 2014-2016 The OpenSSL Project Authors. All Rights Reserved.
// Copyright (c) 2014, Intel Corporation. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Originally written by Shay Gueron (1, 2), and Vlad Krasnov (1)
// (1) Intel Corporation, Israel Development Center, Haifa, Israel
// (2) University of Haifa, Israel
//
// Reference:
// S.Gueron and V.Krasnov, "Fast Prime Field Elliptic Curve Cryptography with
// 256 Bit Primes"
#ifndef OPENSSL_HEADER_EC_P256_SHARED_H
#define OPENSSL_HEADER_EC_P256_SHARED_H
#include "ring-core/base.h"
#include "../bn/internal.h"
#if !defined(OPENSSL_NO_ASM) && \
(defined(OPENSSL_X86_64) || defined(OPENSSL_AARCH64)) && \
!defined(OPENSSL_SMALL)
# define OPENSSL_USE_NISTZ256
#endif
// P-256 field operations.
//
// An element mod P in P-256 is represented as a little-endian array of
// |P256_LIMBS| |BN_ULONG|s, spanning the full range of values.
//
// The following functions take fully-reduced inputs mod P and give
// fully-reduced outputs. They may be used in-place.
#define P256_LIMBS (256 / BN_BITS2)
// A P256_POINT represents a P-256 point in Jacobian coordinates.
typedef struct {
BN_ULONG X[P256_LIMBS];
BN_ULONG Y[P256_LIMBS];
BN_ULONG Z[P256_LIMBS];
} P256_POINT;
typedef unsigned char P256_SCALAR_BYTES[33];
static inline void p256_scalar_bytes_from_limbs(
P256_SCALAR_BYTES bytes_out, const BN_ULONG limbs[P256_LIMBS]) {
OPENSSL_memcpy(bytes_out, limbs, 32);
bytes_out[32] = 0;
}
#endif /* !defined(OPENSSL_USE_NISTZ256) */

View File

@@ -0,0 +1,297 @@
// Copyright 2020 The BoringSSL Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// This file is generated by make_tables.go.
// Base point pre computation
// --------------------------
//
// Two different sorts of precomputed tables are used in the following code.
// Each contain various points on the curve, where each point is three field
// elements (x, y, z).
//
// For the base point table, z is usually 1 (0 for the point at infinity).
// This table has 2 * 16 elements, starting with the following:
// index | bits | point
// ------+---------+------------------------------
// 0 | 0 0 0 0 | 0G
// 1 | 0 0 0 1 | 1G
// 2 | 0 0 1 0 | 2^64G
// 3 | 0 0 1 1 | (2^64 + 1)G
// 4 | 0 1 0 0 | 2^128G
// 5 | 0 1 0 1 | (2^128 + 1)G
// 6 | 0 1 1 0 | (2^128 + 2^64)G
// 7 | 0 1 1 1 | (2^128 + 2^64 + 1)G
// 8 | 1 0 0 0 | 2^192G
// 9 | 1 0 0 1 | (2^192 + 1)G
// 10 | 1 0 1 0 | (2^192 + 2^64)G
// 11 | 1 0 1 1 | (2^192 + 2^64 + 1)G
// 12 | 1 1 0 0 | (2^192 + 2^128)G
// 13 | 1 1 0 1 | (2^192 + 2^128 + 1)G
// 14 | 1 1 1 0 | (2^192 + 2^128 + 2^64)G
// 15 | 1 1 1 1 | (2^192 + 2^128 + 2^64 + 1)G
// followed by a copy of this with each element multiplied by 2^32.
//
// The reason for this is so that we can clock bits into four different
// locations when doing simple scalar multiplies against the base point,
// and then another four locations using the second 16 elements.
//
// Tables for other points have table[i] = iG for i in 0 .. 16.
// fiat_p256_g_pre_comp is the table of precomputed base points
#if defined(OPENSSL_64_BIT)
static const fiat_p256_felem fiat_p256_g_pre_comp[2][15][2] = {
{{{0x79e730d418a9143c, 0x75ba95fc5fedb601, 0x79fb732b77622510,
0x18905f76a53755c6},
{0xddf25357ce95560a, 0x8b4ab8e4ba19e45c, 0xd2e88688dd21f325,
0x8571ff1825885d85}},
{{0x4f922fc516a0d2bb, 0x0d5cc16c1a623499, 0x9241cf3a57c62c8b,
0x2f5e6961fd1b667f},
{0x5c15c70bf5a01797, 0x3d20b44d60956192, 0x04911b37071fdb52,
0xf648f9168d6f0f7b}},
{{0x9e566847e137bbbc, 0xe434469e8a6a0bec, 0xb1c4276179d73463,
0x5abe0285133d0015},
{0x92aa837cc04c7dab, 0x573d9f4c43260c07, 0x0c93156278e6cc37,
0x94bb725b6b6f7383}},
{{0x62a8c244bfe20925, 0x91c19ac38fdce867, 0x5a96a5d5dd387063,
0x61d587d421d324f6},
{0xe87673a2a37173ea, 0x2384800853778b65, 0x10f8441e05bab43e,
0xfa11fe124621efbe}},
{{0x1c891f2b2cb19ffd, 0x01ba8d5bb1923c23, 0xb6d03d678ac5ca8e,
0x586eb04c1f13bedc},
{0x0c35c6e527e8ed09, 0x1e81a33c1819ede2, 0x278fd6c056c652fa,
0x19d5ac0870864f11}},
{{0x62577734d2b533d5, 0x673b8af6a1bdddc0, 0x577e7c9aa79ec293,
0xbb6de651c3b266b1},
{0xe7e9303ab65259b3, 0xd6a0afd3d03a7480, 0xc5ac83d19b3cfc27,
0x60b4619a5d18b99b}},
{{0xbd6a38e11ae5aa1c, 0xb8b7652b49e73658, 0x0b130014ee5f87ed,
0x9d0f27b2aeebffcd},
{0xca9246317a730a55, 0x9c955b2fddbbc83a, 0x07c1dfe0ac019a71,
0x244a566d356ec48d}},
{{0x56f8410ef4f8b16a, 0x97241afec47b266a, 0x0a406b8e6d9c87c1,
0x803f3e02cd42ab1b},
{0x7f0309a804dbec69, 0xa83b85f73bbad05f, 0xc6097273ad8e197f,
0xc097440e5067adc1}},
{{0x846a56f2c379ab34, 0xa8ee068b841df8d1, 0x20314459176c68ef,
0xf1af32d5915f1f30},
{0x99c375315d75bd50, 0x837cffbaf72f67bc, 0x0613a41848d7723f,
0x23d0f130e2d41c8b}},
{{0xed93e225d5be5a2b, 0x6fe799835934f3c6, 0x4314092622626ffc,
0x50bbb4d97990216a},
{0x378191c6e57ec63e, 0x65422c40181dcdb2, 0x41a8099b0236e0f6,
0x2b10011801fe49c3}},
{{0xfc68b5c59b391593, 0xc385f5a2598270fc, 0x7144f3aad19adcbb,
0xdd55899983fbae0c},
{0x93b88b8e74b82ff4, 0xd2e03c4071e734c9, 0x9a7a9eaf43c0322a,
0xe6e4c551149d6041}},
{{0x5fe14bfe80ec21fe, 0xf6ce116ac255be82, 0x98bc5a072f4a5d67,
0xfad27148db7e63af},
{0x90c0b6ac29ab05b3, 0x37a9a83c4e251ae6, 0x0a7dc875c2aade7d,
0x77387de39f0e1a84}},
{{0x1e9ecc49a56c0dd7, 0xa5cffcd846086c74, 0x8f7a1408f505aece,
0xb37b85c0bef0c47e},
{0x3596b6e4cc0e6a8f, 0xfd6d4bbf6b388f23, 0xaba453fac39cef4e,
0x9c135ac8f9f628d5}},
{{0x0a1c729495c8f8be, 0x2961c4803bf362bf, 0x9e418403df63d4ac,
0xc109f9cb91ece900},
{0xc2d095d058945705, 0xb9083d96ddeb85c0, 0x84692b8d7a40449b,
0x9bc3344f2eee1ee1}},
{{0x0d5ae35642913074, 0x55491b2748a542b1, 0x469ca665b310732a,
0x29591d525f1a4cc1},
{0xe76f5b6bb84f983f, 0xbe7eef419f5f84e1, 0x1200d49680baa189,
0x6376551f18ef332c}}},
{{{0x202886024147519a, 0xd0981eac26b372f0, 0xa9d4a7caa785ebc8,
0xd953c50ddbdf58e9},
{0x9d6361ccfd590f8f, 0x72e9626b44e6c917, 0x7fd9611022eb64cf,
0x863ebb7e9eb288f3}},
{{0x4fe7ee31b0e63d34, 0xf4600572a9e54fab, 0xc0493334d5e7b5a4,
0x8589fb9206d54831},
{0xaa70f5cc6583553a, 0x0879094ae25649e5, 0xcc90450710044652,
0xebb0696d02541c4f}},
{{0xabbaa0c03b89da99, 0xa6f2d79eb8284022, 0x27847862b81c05e8,
0x337a4b5905e54d63},
{0x3c67500d21f7794a, 0x207005b77d6d7f61, 0x0a5a378104cfd6e8,
0x0d65e0d5f4c2fbd6}},
{{0xd433e50f6d3549cf, 0x6f33696ffacd665e, 0x695bfdacce11fcb4,
0x810ee252af7c9860},
{0x65450fe17159bb2c, 0xf7dfbebe758b357b, 0x2b057e74d69fea72,
0xd485717a92731745}},
{{0xce1f69bbe83f7669, 0x09f8ae8272877d6b, 0x9548ae543244278d,
0x207755dee3c2c19c},
{0x87bd61d96fef1945, 0x18813cefb12d28c3, 0x9fbcd1d672df64aa,
0x48dc5ee57154b00d}},
{{0xef0f469ef49a3154, 0x3e85a5956e2b2e9a, 0x45aaec1eaa924a9c,
0xaa12dfc8a09e4719},
{0x26f272274df69f1d, 0xe0e4c82ca2ff5e73, 0xb9d8ce73b7a9dd44,
0x6c036e73e48ca901}},
{{0xe1e421e1a47153f0, 0xb86c3b79920418c9, 0x93bdce87705d7672,
0xf25ae793cab79a77},
{0x1f3194a36d869d0c, 0x9d55c8824986c264, 0x49fb5ea3096e945e,
0x39b8e65313db0a3e}},
{{0xe3417bc035d0b34a, 0x440b386b8327c0a7, 0x8fb7262dac0362d1,
0x2c41114ce0cdf943},
{0x2ba5cef1ad95a0b1, 0xc09b37a867d54362, 0x26d6cdd201e486c9,
0x20477abf42ff9297}},
{{0x0f121b41bc0a67d2, 0x62d4760a444d248a, 0x0e044f1d659b4737,
0x08fde365250bb4a8},
{0xaceec3da848bf287, 0xc2a62182d3369d6e, 0x3582dfdc92449482,
0x2f7e2fd2565d6cd7}},
{{0x0a0122b5178a876b, 0x51ff96ff085104b4, 0x050b31ab14f29f76,
0x84abb28b5f87d4e6},
{0xd5ed439f8270790a, 0x2d6cb59d85e3f46b, 0x75f55c1b6c1e2212,
0xe5436f6717655640}},
{{0xc2965ecc9aeb596d, 0x01ea03e7023c92b4, 0x4704b4b62e013961,
0x0ca8fd3f905ea367},
{0x92523a42551b2b61, 0x1eb7a89c390fcd06, 0xe7f1d2be0392a63e,
0x96dca2644ddb0c33}},
{{0x231c210e15339848, 0xe87a28e870778c8d, 0x9d1de6616956e170,
0x4ac3c9382bb09c0b},
{0x19be05516998987d, 0x8b2376c4ae09f4d6, 0x1de0b7651a3f933d,
0x380d94c7e39705f4}},
{{0x3685954b8c31c31d, 0x68533d005bf21a0c, 0x0bd7626e75c79ec9,
0xca17754742c69d54},
{0xcc6edafff6d2dbb2, 0xfd0d8cbd174a9d18, 0x875e8793aa4578e8,
0xa976a7139cab2ce6}},
{{0xce37ab11b43ea1db, 0x0a7ff1a95259d292, 0x851b02218f84f186,
0xa7222beadefaad13},
{0xa2ac78ec2b0a9144, 0x5a024051f2fa59c5, 0x91d1eca56147ce38,
0xbe94d523bc2ac690}},
{{0x2d8daefd79ec1a0f, 0x3bbcd6fdceb39c97, 0xf5575ffc58f61a95,
0xdbd986c4adf7b420},
{0x81aa881415f39eb7, 0x6ee2fcf5b98d976c, 0x5465475dcf2f717d,
0x8e24d3c46860bbd0}}}};
#else
static const fiat_p256_felem fiat_p256_g_pre_comp[2][15][2] = {
{{{0x18a9143c, 0x79e730d4, 0x5fedb601, 0x75ba95fc, 0x77622510, 0x79fb732b,
0xa53755c6, 0x18905f76},
{0xce95560a, 0xddf25357, 0xba19e45c, 0x8b4ab8e4, 0xdd21f325, 0xd2e88688,
0x25885d85, 0x8571ff18}},
{{0x16a0d2bb, 0x4f922fc5, 0x1a623499, 0x0d5cc16c, 0x57c62c8b, 0x9241cf3a,
0xfd1b667f, 0x2f5e6961},
{0xf5a01797, 0x5c15c70b, 0x60956192, 0x3d20b44d, 0x071fdb52, 0x04911b37,
0x8d6f0f7b, 0xf648f916}},
{{0xe137bbbc, 0x9e566847, 0x8a6a0bec, 0xe434469e, 0x79d73463, 0xb1c42761,
0x133d0015, 0x5abe0285},
{0xc04c7dab, 0x92aa837c, 0x43260c07, 0x573d9f4c, 0x78e6cc37, 0x0c931562,
0x6b6f7383, 0x94bb725b}},
{{0xbfe20925, 0x62a8c244, 0x8fdce867, 0x91c19ac3, 0xdd387063, 0x5a96a5d5,
0x21d324f6, 0x61d587d4},
{0xa37173ea, 0xe87673a2, 0x53778b65, 0x23848008, 0x05bab43e, 0x10f8441e,
0x4621efbe, 0xfa11fe12}},
{{0x2cb19ffd, 0x1c891f2b, 0xb1923c23, 0x01ba8d5b, 0x8ac5ca8e, 0xb6d03d67,
0x1f13bedc, 0x586eb04c},
{0x27e8ed09, 0x0c35c6e5, 0x1819ede2, 0x1e81a33c, 0x56c652fa, 0x278fd6c0,
0x70864f11, 0x19d5ac08}},
{{0xd2b533d5, 0x62577734, 0xa1bdddc0, 0x673b8af6, 0xa79ec293, 0x577e7c9a,
0xc3b266b1, 0xbb6de651},
{0xb65259b3, 0xe7e9303a, 0xd03a7480, 0xd6a0afd3, 0x9b3cfc27, 0xc5ac83d1,
0x5d18b99b, 0x60b4619a}},
{{0x1ae5aa1c, 0xbd6a38e1, 0x49e73658, 0xb8b7652b, 0xee5f87ed, 0x0b130014,
0xaeebffcd, 0x9d0f27b2},
{0x7a730a55, 0xca924631, 0xddbbc83a, 0x9c955b2f, 0xac019a71, 0x07c1dfe0,
0x356ec48d, 0x244a566d}},
{{0xf4f8b16a, 0x56f8410e, 0xc47b266a, 0x97241afe, 0x6d9c87c1, 0x0a406b8e,
0xcd42ab1b, 0x803f3e02},
{0x04dbec69, 0x7f0309a8, 0x3bbad05f, 0xa83b85f7, 0xad8e197f, 0xc6097273,
0x5067adc1, 0xc097440e}},
{{0xc379ab34, 0x846a56f2, 0x841df8d1, 0xa8ee068b, 0x176c68ef, 0x20314459,
0x915f1f30, 0xf1af32d5},
{0x5d75bd50, 0x99c37531, 0xf72f67bc, 0x837cffba, 0x48d7723f, 0x0613a418,
0xe2d41c8b, 0x23d0f130}},
{{0xd5be5a2b, 0xed93e225, 0x5934f3c6, 0x6fe79983, 0x22626ffc, 0x43140926,
0x7990216a, 0x50bbb4d9},
{0xe57ec63e, 0x378191c6, 0x181dcdb2, 0x65422c40, 0x0236e0f6, 0x41a8099b,
0x01fe49c3, 0x2b100118}},
{{0x9b391593, 0xfc68b5c5, 0x598270fc, 0xc385f5a2, 0xd19adcbb, 0x7144f3aa,
0x83fbae0c, 0xdd558999},
{0x74b82ff4, 0x93b88b8e, 0x71e734c9, 0xd2e03c40, 0x43c0322a, 0x9a7a9eaf,
0x149d6041, 0xe6e4c551}},
{{0x80ec21fe, 0x5fe14bfe, 0xc255be82, 0xf6ce116a, 0x2f4a5d67, 0x98bc5a07,
0xdb7e63af, 0xfad27148},
{0x29ab05b3, 0x90c0b6ac, 0x4e251ae6, 0x37a9a83c, 0xc2aade7d, 0x0a7dc875,
0x9f0e1a84, 0x77387de3}},
{{0xa56c0dd7, 0x1e9ecc49, 0x46086c74, 0xa5cffcd8, 0xf505aece, 0x8f7a1408,
0xbef0c47e, 0xb37b85c0},
{0xcc0e6a8f, 0x3596b6e4, 0x6b388f23, 0xfd6d4bbf, 0xc39cef4e, 0xaba453fa,
0xf9f628d5, 0x9c135ac8}},
{{0x95c8f8be, 0x0a1c7294, 0x3bf362bf, 0x2961c480, 0xdf63d4ac, 0x9e418403,
0x91ece900, 0xc109f9cb},
{0x58945705, 0xc2d095d0, 0xddeb85c0, 0xb9083d96, 0x7a40449b, 0x84692b8d,
0x2eee1ee1, 0x9bc3344f}},
{{0x42913074, 0x0d5ae356, 0x48a542b1, 0x55491b27, 0xb310732a, 0x469ca665,
0x5f1a4cc1, 0x29591d52},
{0xb84f983f, 0xe76f5b6b, 0x9f5f84e1, 0xbe7eef41, 0x80baa189, 0x1200d496,
0x18ef332c, 0x6376551f}}},
{{{0x4147519a, 0x20288602, 0x26b372f0, 0xd0981eac, 0xa785ebc8, 0xa9d4a7ca,
0xdbdf58e9, 0xd953c50d},
{0xfd590f8f, 0x9d6361cc, 0x44e6c917, 0x72e9626b, 0x22eb64cf, 0x7fd96110,
0x9eb288f3, 0x863ebb7e}},
{{0xb0e63d34, 0x4fe7ee31, 0xa9e54fab, 0xf4600572, 0xd5e7b5a4, 0xc0493334,
0x06d54831, 0x8589fb92},
{0x6583553a, 0xaa70f5cc, 0xe25649e5, 0x0879094a, 0x10044652, 0xcc904507,
0x02541c4f, 0xebb0696d}},
{{0x3b89da99, 0xabbaa0c0, 0xb8284022, 0xa6f2d79e, 0xb81c05e8, 0x27847862,
0x05e54d63, 0x337a4b59},
{0x21f7794a, 0x3c67500d, 0x7d6d7f61, 0x207005b7, 0x04cfd6e8, 0x0a5a3781,
0xf4c2fbd6, 0x0d65e0d5}},
{{0x6d3549cf, 0xd433e50f, 0xfacd665e, 0x6f33696f, 0xce11fcb4, 0x695bfdac,
0xaf7c9860, 0x810ee252},
{0x7159bb2c, 0x65450fe1, 0x758b357b, 0xf7dfbebe, 0xd69fea72, 0x2b057e74,
0x92731745, 0xd485717a}},
{{0xe83f7669, 0xce1f69bb, 0x72877d6b, 0x09f8ae82, 0x3244278d, 0x9548ae54,
0xe3c2c19c, 0x207755de},
{0x6fef1945, 0x87bd61d9, 0xb12d28c3, 0x18813cef, 0x72df64aa, 0x9fbcd1d6,
0x7154b00d, 0x48dc5ee5}},
{{0xf49a3154, 0xef0f469e, 0x6e2b2e9a, 0x3e85a595, 0xaa924a9c, 0x45aaec1e,
0xa09e4719, 0xaa12dfc8},
{0x4df69f1d, 0x26f27227, 0xa2ff5e73, 0xe0e4c82c, 0xb7a9dd44, 0xb9d8ce73,
0xe48ca901, 0x6c036e73}},
{{0xa47153f0, 0xe1e421e1, 0x920418c9, 0xb86c3b79, 0x705d7672, 0x93bdce87,
0xcab79a77, 0xf25ae793},
{0x6d869d0c, 0x1f3194a3, 0x4986c264, 0x9d55c882, 0x096e945e, 0x49fb5ea3,
0x13db0a3e, 0x39b8e653}},
{{0x35d0b34a, 0xe3417bc0, 0x8327c0a7, 0x440b386b, 0xac0362d1, 0x8fb7262d,
0xe0cdf943, 0x2c41114c},
{0xad95a0b1, 0x2ba5cef1, 0x67d54362, 0xc09b37a8, 0x01e486c9, 0x26d6cdd2,
0x42ff9297, 0x20477abf}},
{{0xbc0a67d2, 0x0f121b41, 0x444d248a, 0x62d4760a, 0x659b4737, 0x0e044f1d,
0x250bb4a8, 0x08fde365},
{0x848bf287, 0xaceec3da, 0xd3369d6e, 0xc2a62182, 0x92449482, 0x3582dfdc,
0x565d6cd7, 0x2f7e2fd2}},
{{0x178a876b, 0x0a0122b5, 0x085104b4, 0x51ff96ff, 0x14f29f76, 0x050b31ab,
0x5f87d4e6, 0x84abb28b},
{0x8270790a, 0xd5ed439f, 0x85e3f46b, 0x2d6cb59d, 0x6c1e2212, 0x75f55c1b,
0x17655640, 0xe5436f67}},
{{0x9aeb596d, 0xc2965ecc, 0x023c92b4, 0x01ea03e7, 0x2e013961, 0x4704b4b6,
0x905ea367, 0x0ca8fd3f},
{0x551b2b61, 0x92523a42, 0x390fcd06, 0x1eb7a89c, 0x0392a63e, 0xe7f1d2be,
0x4ddb0c33, 0x96dca264}},
{{0x15339848, 0x231c210e, 0x70778c8d, 0xe87a28e8, 0x6956e170, 0x9d1de661,
0x2bb09c0b, 0x4ac3c938},
{0x6998987d, 0x19be0551, 0xae09f4d6, 0x8b2376c4, 0x1a3f933d, 0x1de0b765,
0xe39705f4, 0x380d94c7}},
{{0x8c31c31d, 0x3685954b, 0x5bf21a0c, 0x68533d00, 0x75c79ec9, 0x0bd7626e,
0x42c69d54, 0xca177547},
{0xf6d2dbb2, 0xcc6edaff, 0x174a9d18, 0xfd0d8cbd, 0xaa4578e8, 0x875e8793,
0x9cab2ce6, 0xa976a713}},
{{0xb43ea1db, 0xce37ab11, 0x5259d292, 0x0a7ff1a9, 0x8f84f186, 0x851b0221,
0xdefaad13, 0xa7222bea},
{0x2b0a9144, 0xa2ac78ec, 0xf2fa59c5, 0x5a024051, 0x6147ce38, 0x91d1eca5,
0xbc2ac690, 0xbe94d523}},
{{0x79ec1a0f, 0x2d8daefd, 0xceb39c97, 0x3bbcd6fd, 0x58f61a95, 0xf5575ffc,
0xadf7b420, 0xdbd986c4},
{0x15f39eb7, 0x81aa8814, 0xb98d976c, 0x6ee2fcf5, 0xcf2f717d, 0x5465475d,
0x6860bbd0, 0x8e24d3c4}}}};
#endif

258
vendor/ring/crypto/fipsmodule/ec/util.h vendored Normal file
View File

@@ -0,0 +1,258 @@
// Copyright 2015 The BoringSSL Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <ring-core/base.h>
#include "../../internal.h"
#if defined(__GNUC__)
#pragma GCC diagnostic ignored "-Wconversion"
#pragma GCC diagnostic ignored "-Wsign-conversion"
#endif
// This function looks at 5+1 scalar bits (5 current, 1 adjacent less
// significant bit), and recodes them into a signed digit for use in fast point
// multiplication: the use of signed rather than unsigned digits means that
// fewer points need to be precomputed, given that point inversion is easy (a
// precomputed point dP makes -dP available as well).
//
// BACKGROUND:
//
// Signed digits for multiplication were introduced by Booth ("A signed binary
// multiplication technique", Quart. Journ. Mech. and Applied Math., vol. IV,
// pt. 2 (1951), pp. 236-240), in that case for multiplication of integers.
// Booth's original encoding did not generally improve the density of nonzero
// digits over the binary representation, and was merely meant to simplify the
// handling of signed factors given in two's complement; but it has since been
// shown to be the basis of various signed-digit representations that do have
// further advantages, including the wNAF, using the following general
// approach:
//
// (1) Given a binary representation
//
// b_k ... b_2 b_1 b_0,
//
// of a nonnegative integer (b_k in {0, 1}), rewrite it in digits 0, 1, -1
// by using bit-wise subtraction as follows:
//
// b_k b_(k-1) ... b_2 b_1 b_0
// - b_k ... b_3 b_2 b_1 b_0
// -----------------------------------------
// s_(k+1) s_k ... s_3 s_2 s_1 s_0
//
// A left-shift followed by subtraction of the original value yields a new
// representation of the same value, using signed bits s_i = b_(i-1) - b_i.
// This representation from Booth's paper has since appeared in the
// literature under a variety of different names including "reversed binary
// form", "alternating greedy expansion", "mutual opposite form", and
// "sign-alternating {+-1}-representation".
//
// An interesting property is that among the nonzero bits, values 1 and -1
// strictly alternate.
//
// (2) Various window schemes can be applied to the Booth representation of
// integers: for example, right-to-left sliding windows yield the wNAF
// (a signed-digit encoding independently discovered by various researchers
// in the 1990s), and left-to-right sliding windows yield a left-to-right
// equivalent of the wNAF (independently discovered by various researchers
// around 2004).
//
// To prevent leaking information through side channels in point multiplication,
// we need to recode the given integer into a regular pattern: sliding windows
// as in wNAFs won't do, we need their fixed-window equivalent -- which is a few
// decades older: we'll be using the so-called "modified Booth encoding" due to
// MacSorley ("High-speed arithmetic in binary computers", Proc. IRE, vol. 49
// (1961), pp. 67-91), in a radix-2^5 setting. That is, we always combine five
// signed bits into a signed digit:
//
// s_(5j + 4) s_(5j + 3) s_(5j + 2) s_(5j + 1) s_(5j)
//
// The sign-alternating property implies that the resulting digit values are
// integers from -16 to 16.
//
// Of course, we don't actually need to compute the signed digits s_i as an
// intermediate step (that's just a nice way to see how this scheme relates
// to the wNAF): a direct computation obtains the recoded digit from the
// six bits b_(5j + 4) ... b_(5j - 1).
//
// This function takes those six bits as an integer (0 .. 63), writing the
// recoded digit to *sign (0 for positive, 1 for negative) and *digit (absolute
// value, in the range 0 .. 16). Note that this integer essentially provides
// the input bits "shifted to the left" by one position: for example, the input
// to compute the least significant recoded digit, given that there's no bit
// b_-1, has to be b_4 b_3 b_2 b_1 b_0 0.
//
// DOUBLING CASE:
//
// Point addition formulas for short Weierstrass curves are often incomplete.
// Edge cases such as P + P or P + ∞ must be handled separately. This
// complicates constant-time requirements. P + ∞ cannot be avoided (any window
// may be zero) and is handled with constant-time selects. P + P (where P is not
// ∞) usually is not. Instead, windowing strategies are chosen to avoid this
// case. Whether this happens depends on the group order.
//
// Let w be the window width (in this function, w = 5). The non-trivial doubling
// case in single-point scalar multiplication may occur if and only if the
// 2^(w-1) bit of the group order is zero.
//
// Note the above only holds if the scalar is fully reduced and the group order
// is a prime that is much larger than 2^w. It also only holds when windows
// are applied from most significant to least significant, doubling between each
// window. It does not apply to more complex table strategies such as
// |EC_nistz256_method|.
//
// PROOF:
//
// Let n be the group order. Let l be the number of bits needed to represent n.
// Assume there exists some 0 <= k < n such that signed w-bit windowed
// multiplication hits the doubling case.
//
// Windowed multiplication consists of iterating over groups of s_i (defined
// above based on k's binary representation) from most to least significant. At
// iteration i (for i = ..., 3w, 2w, w, 0, starting from the most significant
// window), we:
//
// 1. Double the accumulator A, w times. Let A_i be the value of A at this
// point.
//
// 2. Set A to T_i + A_i, where T_i is a precomputed multiple of P
// corresponding to the window s_(i+w-1) ... s_i.
//
// Let j be the index such that A_j = T_j ≠ ∞. Looking at A_i and T_i as
// multiples of P, define a_i and t_i to be scalar coefficients of A_i and T_i.
// Thus a_j = t_j ≠ 0 (mod n). Note a_i and t_i may not be reduced mod n. t_i is
// the value of the w signed bits s_(i+w-1) ... s_i. a_i is computed as a_i =
// 2^w * (a_(i+w) + t_(i+w)).
//
// t_i is bounded by -2^(w-1) <= t_i <= 2^(w-1). Additionally, we may write it
// in terms of unsigned bits b_i. t_i consists of signed bits s_(i+w-1) ... s_i.
// This is computed as:
//
// b_(i+w-2) b_(i+w-3) ... b_i b_(i-1)
// - b_(i+w-1) b_(i+w-2) ... b_(i+1) b_i
// --------------------------------------------
// t_i = s_(i+w-1) s_(i+w-2) ... s_(i+1) s_i
//
// Observe that b_(i+w-2) through b_i occur in both terms. Let x be the integer
// represented by that bit string, i.e. 2^(w-2)*b_(i+w-2) + ... + b_i.
//
// t_i = (2*x + b_(i-1)) - (2^(w-1)*b_(i+w-1) + x)
// = x - 2^(w-1)*b_(i+w-1) + b_(i-1)
//
// Or, using C notation for bit operations:
//
// t_i = (k>>i) & ((1<<(w-1)) - 1) - (k>>i) & (1<<(w-1)) + (k>>(i-1)) & 1
//
// Note b_(i-1) is added in left-shifted by one (or doubled) from its place.
// This is compensated by t_(i-w)'s subtraction term. Thus, a_i may be computed
// by adding b_l b_(l-1) ... b_(i+1) b_i and an extra copy of b_(i-1). In C
// notation, this is:
//
// a_i = (k>>(i+w)) << w + ((k>>(i+w-1)) & 1) << w
//
// Observe that, while t_i may be positive or negative, a_i is bounded by
// 0 <= a_i < n + 2^w. Additionally, a_i can only be zero if b_(i+w-1) and up
// are all zero. (Note this implies a non-trivial P + (-P) is unreachable for
// all groups. That would imply the subsequent a_i is zero, which means all
// terms thus far were zero.)
//
// Returning to our doubling position, we have a_j = t_j (mod n). We now
// determine the value of a_j - t_j, which must be divisible by n. Our bounds on
// a_j and t_j imply a_j - t_j is 0 or n. If it is 0, a_j = t_j. However, 2^w
// divides a_j and -2^(w-1) <= t_j <= 2^(w-1), so this can only happen if
// a_j = t_j = 0, which is a trivial doubling. Therefore, a_j - t_j = n.
//
// Now we determine j. Suppose j > 0. w divides j, so j >= w. Then,
//
// n = a_j - t_j = (k>>(j+w)) << w + ((k>>(j+w-1)) & 1) << w - t_j
// <= k/2^j + 2^w - t_j
// < n/2^w + 2^w + 2^(w-1)
//
// n is much larger than 2^w, so this is impossible. Thus, j = 0: only the final
// addition may hit the doubling case.
//
// Finally, we consider bit patterns for n and k. Divide k into k_H + k_M + k_L
// such that k_H is the contribution from b_(l-1) .. b_w, k_M is the
// contribution from b_(w-1), and k_L is the contribution from b_(w-2) ... b_0.
// That is:
//
// - 2^w divides k_H
// - k_M is 0 or 2^(w-1)
// - 0 <= k_L < 2^(w-1)
//
// Divide n into n_H + n_M + n_L similarly. We thus have:
//
// t_0 = (k>>0) & ((1<<(w-1)) - 1) - (k>>0) & (1<<(w-1)) + (k>>(0-1)) & 1
// = k & ((1<<(w-1)) - 1) - k & (1<<(w-1))
// = k_L - k_M
//
// a_0 = (k>>(0+w)) << w + ((k>>(0+w-1)) & 1) << w
// = (k>>w) << w + ((k>>(w-1)) & 1) << w
// = k_H + 2*k_M
//
// n = a_0 - t_0
// n_H + n_M + n_L = (k_H + 2*k_M) - (k_L - k_M)
// = k_H + 3*k_M - k_L
//
// k_H - k_L < k and k < n, so k_H - k_L ≠ n. Therefore k_M is not 0 and must be
// 2^(w-1). Now we consider k_H and n_H. We know k_H <= n_H. Suppose k_H = n_H.
// Then,
//
// n_M + n_L = 3*(2^(w-1)) - k_L
// > 3*(2^(w-1)) - 2^(w-1)
// = 2^w
//
// Contradiction (n_M + n_L is the bottom w bits of n). Thus k_H < n_H. Suppose
// k_H < n_H - 2*2^w. Then,
//
// n_H + n_M + n_L = k_H + 3*(2^(w-1)) - k_L
// < n_H - 2*2^w + 3*(2^(w-1)) - k_L
// n_M + n_L < -2^(w-1) - k_L
//
// Contradiction. Thus, k_H = n_H - 2^w. (Note 2^w divides n_H and k_H.) Thus,
//
// n_H + n_M + n_L = k_H + 3*(2^(w-1)) - k_L
// = n_H - 2^w + 3*(2^(w-1)) - k_L
// n_M + n_L = 2^(w-1) - k_L
// <= 2^(w-1)
//
// Equality would mean 2^(w-1) divides n, which is impossible if n is prime.
// Thus n_M + n_L < 2^(w-1), so n_M is zero, proving our condition.
//
// This proof constructs k, so, to show the converse, let k_H = n_H - 2^w,
// k_M = 2^(w-1), k_L = 2^(w-1) - n_L. This will result in a non-trivial point
// doubling in the final addition and is the only such scalar.
//
// COMMON CURVES:
//
// The group orders for common curves end in the following bit patterns:
//
// P-521: ...00001001; w = 4 is okay
// P-384: ...01110011; w = 2, 5, 6, 7 are okay
// P-256: ...01010001; w = 5, 7 are okay
// P-224: ...00111101; w = 3, 4, 5, 6 are okay
static inline void recode_scalar_bits(crypto_word_t *sign, crypto_word_t *digit,
crypto_word_t in) {
crypto_word_t s, d;
s = ~((in >> 5) - 1); /* sets all bits to MSB(in), 'in' seen as
* 6-bit value */
d = (1 << 6) - in - 1;
d = (d & s) | (in & ~s);
d = (d >> 1) + (d & 1);
*sign = s & 1;
*digit = d;
}