chore: checkpoint before Python removal

This commit is contained in:
2026-03-26 22:33:59 +00:00
parent 683cec9307
commit e568ddf82a
29972 changed files with 11269302 additions and 2 deletions

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,109 @@
// Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com) All rights reserved.
// SPDX-License-Identifier: Apache-2.0
#include <openssl/cipher.h>
#include <assert.h>
#include <string.h>
#include <openssl/err.h>
#include <openssl/mem.h>
#include <openssl/nid.h>
#include "internal.h"
#include "../internal.h"
static const struct {
int nid;
const char *name;
const EVP_CIPHER *(*func)(void);
} kCiphers[] = {
{NID_aes_128_cbc, "aes-128-cbc", EVP_aes_128_cbc},
{NID_aes_128_cfb128, "aes-128-cfb", EVP_aes_128_cfb},
{NID_aes_128_ctr, "aes-128-ctr", EVP_aes_128_ctr},
{NID_aes_128_ecb, "aes-128-ecb", EVP_aes_128_ecb},
{NID_aes_128_gcm, "aes-128-gcm", EVP_aes_128_gcm},
{NID_aes_128_ofb128, "aes-128-ofb", EVP_aes_128_ofb},
{NID_aes_192_cbc, "aes-192-cbc", EVP_aes_192_cbc},
{NID_aes_192_cfb128, "aes-192-cfb", EVP_aes_192_cfb},
{NID_aes_192_ctr, "aes-192-ctr", EVP_aes_192_ctr},
{NID_aes_192_ecb, "aes-192-ecb", EVP_aes_192_ecb},
{NID_aes_192_gcm, "aes-192-gcm", EVP_aes_192_gcm},
{NID_aes_192_ofb128, "aes-192-ofb", EVP_aes_192_ofb},
{NID_aes_256_cbc, "aes-256-cbc", EVP_aes_256_cbc},
{NID_aes_256_cfb128, "aes-256-cfb", EVP_aes_256_cfb},
{NID_aes_256_ctr, "aes-256-ctr", EVP_aes_256_ctr},
{NID_aes_256_ecb, "aes-256-ecb", EVP_aes_256_ecb},
{NID_aes_256_gcm, "aes-256-gcm", EVP_aes_256_gcm},
{NID_aes_256_ofb128, "aes-256-ofb", EVP_aes_256_ofb},
{NID_aes_256_xts, "aes-256-xts", EVP_aes_256_xts},
{NID_chacha20_poly1305, "chacha20-poly1305", EVP_chacha20_poly1305},
{NID_des_cbc, "des-cbc", EVP_des_cbc},
{NID_des_ecb, "des-ecb", EVP_des_ecb},
{NID_des_ede_cbc, "des-ede-cbc", EVP_des_ede_cbc},
{NID_des_ede_ecb, "des-ede", EVP_des_ede},
{NID_des_ede3_cbc, "des-ede3-cbc", EVP_des_ede3_cbc},
{NID_rc2_cbc, "rc2-cbc", EVP_rc2_cbc},
{NID_rc4, "rc4", EVP_rc4},
{NID_bf_cbc, "bf-cbc", EVP_bf_cbc},
{NID_bf_cfb64, "bf-cfb", EVP_bf_cfb},
{NID_bf_ecb, "bf-ecb", EVP_bf_ecb},
};
static const struct {
const char* alias;
const char* name;
} kCipherAliases[] = {
{"3des", "des-ede3-cbc"},
{"DES", "des-cbc"},
{"aes256", "aes-256-cbc"},
{"aes128", "aes-128-cbc"},
{"id-aes128-gcm", "aes-128-gcm"},
{"id-aes192-gcm", "aes-192-gcm"},
{"id-aes256-gcm", "aes-256-gcm"}
};
const EVP_CIPHER *EVP_get_cipherbynid(int nid) {
for (size_t i = 0; i < OPENSSL_ARRAY_SIZE(kCiphers); i++) {
if (kCiphers[i].nid == nid) {
return kCiphers[i].func();
}
}
return NULL;
}
static const EVP_CIPHER *get_cipherbyname(const char* name) {
for (size_t i = 0; i < OPENSSL_ARRAY_SIZE(kCiphers); i++) {
if (OPENSSL_strcasecmp(kCiphers[i].name, name) == 0) {
return kCiphers[i].func();
}
}
return NULL;
}
const EVP_CIPHER *EVP_get_cipherbyname(const char *name) {
if (name == NULL) {
return NULL;
}
const EVP_CIPHER * ec = get_cipherbyname(name);
if (ec != NULL) {
return ec;
}
// These are not names used by OpenSSL, but tcpdump registers it with
// |EVP_add_cipher_alias|. Our |EVP_add_cipher_alias| is a no-op, so we
// support the name here.
for(size_t i = 0; i < OPENSSL_ARRAY_SIZE(kCipherAliases); i++) {
if (OPENSSL_strcasecmp(name, kCipherAliases[i].alias) == 0) {
name = kCipherAliases[i].name;
const EVP_CIPHER * cipher = get_cipherbyname(name);
assert(cipher != NULL);
return cipher;
}
}
return NULL;
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,98 @@
// Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com) All rights reserved.
// SPDX-License-Identifier: Apache-2.0
#include <openssl/cipher.h>
#include <assert.h>
#include <openssl/digest.h>
#include <openssl/mem.h>
#include <openssl/evp.h>
int EVP_BytesToKey(const EVP_CIPHER *type, const EVP_MD *md,
const uint8_t *salt, const uint8_t *data, size_t data_len,
unsigned count, uint8_t *key, uint8_t *iv) {
EVP_MD_CTX c;
uint8_t md_buf[EVP_MAX_MD_SIZE];
unsigned addmd = 0;
unsigned mds = 0, i;
int rv = 0;
unsigned nkey = EVP_CIPHER_key_length(type);
unsigned niv = EVP_CIPHER_iv_length(type);
assert(nkey <= EVP_MAX_KEY_LENGTH);
assert(niv <= EVP_MAX_IV_LENGTH);
if (data == NULL) {
return nkey;
}
EVP_MD_CTX_init(&c);
for (;;) {
if (!EVP_DigestInit_ex(&c, md, NULL)) {
goto err;
}
if (addmd++) {
if (!EVP_DigestUpdate(&c, md_buf, mds)) {
goto err;
}
}
if (!EVP_DigestUpdate(&c, data, data_len)) {
goto err;
}
if (salt != NULL) {
if (!EVP_DigestUpdate(&c, salt, PKCS5_SALT_LEN)) {
goto err;
}
}
if (!EVP_DigestFinal_ex(&c, md_buf, &mds)) {
goto err;
}
for (i = 1; i < count; i++) {
if (!EVP_DigestInit_ex(&c, md, NULL) ||
!EVP_DigestUpdate(&c, md_buf, mds) ||
!EVP_DigestFinal_ex(&c, md_buf, &mds)) {
goto err;
}
}
i = 0;
if (nkey) {
for (;;) {
if (nkey == 0 || i == mds) {
break;
}
if (key != NULL) {
*(key++) = md_buf[i];
}
nkey--;
i++;
}
}
if (niv && i != mds) {
for (;;) {
if (niv == 0 || i == mds) {
break;
}
if (iv != NULL) {
*(iv++) = md_buf[i];
}
niv--;
i++;
}
}
if (nkey == 0 && niv == 0) {
break;
}
}
rv = EVP_CIPHER_key_length(type);
err:
EVP_MD_CTX_cleanup(&c);
OPENSSL_cleanse(md_buf, EVP_MAX_MD_SIZE);
return rv;
}

View File

@@ -0,0 +1,397 @@
// Copyright 2011-2016 The OpenSSL Project Authors. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
#include <openssl/opensslconf.h>
#include <stdio.h>
#include <string.h>
#include <openssl/aes.h>
#include <openssl/cipher.h>
#include <openssl/err.h>
#include <openssl/evp.h>
#include <openssl/objects.h>
#include <openssl/rand.h>
#include <openssl/sha.h>
#include "../fipsmodule/aes/internal.h"
#include "../fipsmodule/cipher/internal.h"
#include "internal.h"
#if defined(AES_CBC_HMAC_SHA_STITCH)
typedef struct {
AES_KEY ks;
// Used to compute(init, update and final) HMAC-SHA1.
// head stores the initialised inner hash state.
// tail stores the outer hash state.
// These storage are for using in subsequent invocations with the same MAC key.
SHA_CTX head, tail, md;
// In encrypt case, it's eiv_len + plaintext_len. eiv is explicit iv(required
// TLS 1.1+). In decrypt case, it's |EVP_AEAD_TLS1_AAD_LEN(13)|.
size_t payload_length;
union {
uint16_t tls_ver;
// In encrypt case, it's not set.
// In decrypt case, it stores |additional_data|.
// additional_data = seq_num + content_type + protocol_version +
// payload_eiv_len seq_num: 8 octets long. content_type: 1 octets long.
// protocol_version: 2 octets long.
// payload_eiv_len: 2 octets long. eiv is explicit iv required by TLS 1.1+.
//
// TLS 1.0: https://www.rfc-editor.org/rfc/rfc2246.html#section-6.2.3.2
// TLS 1.1: https://www.ietf.org/rfc/rfc5246.html#section-6.2.3.2
// TLS 1.2: https://www.ietf.org/rfc/rfc5246.html#section-6.2.3.2
uint8_t tls_aad[EVP_AEAD_TLS1_AAD_LEN];
} aux;
// Used to store the key computed in EVP_CTRL_AEAD_SET_MAC_KEY operation.
uint8_t hmac_key[HMAC_KEY_SIZE];
} EVP_AES_HMAC_SHA1;
void aesni_cbc_sha1_enc(const void *inp, void *out, size_t blocks,
const AES_KEY *key, uint8_t iv[AES_BLOCK_SIZE], SHA_CTX *ctx,
const void *in0);
static int aesni_cbc_hmac_sha1_init_key(EVP_CIPHER_CTX *ctx,
const uint8_t *inkey,
const uint8_t *iv, int enc) {
EVP_AES_HMAC_SHA1 *key = (EVP_AES_HMAC_SHA1 *)(ctx->cipher_data);
int ret;
int key_bits = EVP_CIPHER_CTX_key_length(ctx) * 8;
if (enc) {
ret = aes_hw_set_encrypt_key(inkey, key_bits, &key->ks);
} else {
ret = aes_hw_set_decrypt_key(inkey, key_bits, &key->ks);
}
if (ret < 0) {
return 0;
}
SHA1_Init(&key->head);
key->tail = key->head;
key->md = key->head;
key->payload_length = NO_PAYLOAD_LENGTH;
return 1;
}
// aesni_cbc_hmac_sha1_cipher implements TLS-specific CBC-mode+HMAC-SHA1 cipher suite based encryption and decryption.
//
// For encryption in TLS version 1.0
// |in|: payload/fragment
// |len|: (|payload| + SHA_DIGEST_LENGTH + AES_BLOCK_SIZE) & -AES_BLOCK_SIZE
// |out|: Must point to allocated memory of at least (|payload| + SHA_DIGEST_LENGTH + AES_BLOCK_SIZE) & -AES_BLOCK_SIZE bytes
// If the function returns successfully |out| will contain AES-CBC(aes_key, IV, payload || hmac-sha1(mac_key, aad || payload) || padding || padding_length)
// For encryption in TLS version 1.1 and 1.2
// |in|: payload/fragment
// |len|: (|IV| + |payload| + SHA_DIGEST_LENGTH + AES_BLOCK_SIZE) & -AES_BLOCK_SIZE
// |out|: Must point to allocated memory of at least (|IV| + |payload| + SHA_DIGEST_LENGTH + AES_BLOCK_SIZE) & -AES_BLOCK_SIZE bytes
// If the function returns successfully |out| will contain AES-CBC(aes_key, mask, IV || payload || hmac-sha1(mac_key, aad || payload) || padding || padding_length)
// |len|: should be (eiv_len + plaintext_len + SHA_DIGEST_LENGTH + AES_BLOCK_SIZE) & -AES_BLOCK_SIZE).
// The mask and IV are according to method 2.b from https://datatracker.ietf.org/doc/html/rfc2246#section-6.2.3.2
//
// WARNING: Do not set explicit |IV| = |mask|. It will result in aes(aes_key, 0) being used at the effective IV for all records.
//
// In decryption, this function performs decrytion, removing padding, and verifying mac value.
static int aesni_cbc_hmac_sha1_cipher(EVP_CIPHER_CTX *ctx, uint8_t *out,
const uint8_t *in, size_t len) {
EVP_AES_HMAC_SHA1 *key = (EVP_AES_HMAC_SHA1 *)(ctx->cipher_data);
size_t plen = key->payload_length, iv_len = 0;
key->payload_length = NO_PAYLOAD_LENGTH;
if (len % AES_BLOCK_SIZE) {
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_DATA_NOT_MULTIPLE_OF_BLOCK_LENGTH);
return 0;
}
if (EVP_CIPHER_CTX_encrypting(ctx)) {
// NOTE: Difference between openssl and aws-lc:
// In encrypt case, |plen| is set in the call |EVP_CIPHER_CTX_ctrl| with
// |EVP_CTRL_AEAD_TLS1_AAD| operation.
// When |plen == NO_PAYLOAD_LENGTH|, it means the call did not happen.
// In this case, aws-lc returns error(0) but openssl supports that with
// below explanation.
// https://mta.openssl.org/pipermail/openssl-users/2019-November/011458.html
// -- These stitched ciphers are specifically targeted at use by libssl
// and are designed for use in SSL/TLS only.
if (plen == NO_PAYLOAD_LENGTH) {
// |EVP_CIPHER_CTX_ctrl| with |EVP_CTRL_AEAD_TLS1_AAD| operation is not
// performed.
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_CTRL_OPERATION_NOT_PERFORMED);
return 0;
}
if (len !=
((plen + SHA_DIGEST_LENGTH + AES_BLOCK_SIZE) & -AES_BLOCK_SIZE)) {
// The input should have space for plen(eiv + plaintext) + SHA_DIGEST_LENGTH + padding.
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_UNSUPPORTED_INPUT_SIZE);
return 0;
} else if (key->aux.tls_ver >= TLS1_1_VERSION) {
iv_len = AES_BLOCK_SIZE;
}
size_t aes_off = 0;
size_t sha_off = SHA_CBLOCK - key->md.num;
size_t blocks;
// Use stitch code |aesni_cbc_sha1_enc| when there are multiple of SHA_CBLOCK
// so |aesni_cbc_sha1_enc| can use AES and SHA on the same data block.
//
// Assembly stitch handles AVX-capable processors, but its
// performance is not optimal on AMD Jaguar, ~40% worse, for
// unknown reasons. Incidentally processor in question supports
// AVX, but not AMD-specific XOP extension, which can be used
// to identify it and avoid stitch invocation. So that after we
// establish that current CPU supports AVX, we even see if it's
// either even XOP-capable Bulldozer-based or GenuineIntel one.
// But SHAEXT-capable go ahead...
if ((CRYPTO_is_SHAEXT_capable() ||
(CRYPTO_is_AVX_capable() &&
(CRYPTO_is_AMD_XOP_support() | CRYPTO_is_intel_cpu()))) &&
plen > (sha_off + iv_len) &&
(blocks = (plen - (sha_off + iv_len)) / SHA_CBLOCK)) {
// Before calling |aesni_cbc_sha1_enc|, |key->md| should not
// include not hashed data(partial data).
SHA1_Update(&key->md, in + iv_len, sha_off);
aesni_cbc_sha1_enc(in, out, blocks, &key->ks,
ctx->iv, &key->md,
in + iv_len + sha_off);
// Update the offset to record and skip the part processed
// (encrypted and hashed) by |aesni_cbc_sha1_enc|.
blocks *= SHA_CBLOCK;
aes_off += blocks;
sha_off += blocks;
key->md.Nh += blocks >> 29;
key->md.Nl += blocks <<= 3;
if (key->md.Nl < (unsigned int)blocks) {
key->md.Nh++;
}
} else {
sha_off = 0;
}
sha_off += iv_len;
SHA1_Update(&key->md, in + sha_off, plen - sha_off);
if (in != out) {
OPENSSL_memcpy(out + aes_off, in + aes_off, plen - aes_off);
}
// calculate HMAC and append it to payload.
SHA1_Final(out + plen, &key->md);
key->md = key->tail;
SHA1_Update(&key->md, out + plen, SHA_DIGEST_LENGTH);
SHA1_Final(out + plen, &key->md);
// pad the payload|hmac.
plen += SHA_DIGEST_LENGTH;
for (unsigned int l = len - plen - 1; plen < len; plen++) {
out[plen] = l;
}
// encrypt HMAC|padding at once.
aes_hw_cbc_encrypt(out + aes_off, out + aes_off, len - aes_off, &key->ks,
ctx->iv, 1);
return 1;
} else {
if (plen != EVP_AEAD_TLS1_AAD_LEN) {
// |EVP_CIPHER_CTX_ctrl| with |EVP_CTRL_AEAD_TLS1_AAD| operation is not
// performed.
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_CTRL_OPERATION_NOT_PERFORMED);
return 0;
}
if ((key->aux.tls_aad[plen - 4] << 8 | key->aux.tls_aad[plen - 3]) >=
TLS1_1_VERSION) {
if (len < (AES_BLOCK_SIZE + SHA_DIGEST_LENGTH + 1)) {
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_UNSUPPORTED_INPUT_SIZE);
return 0;
}
// omit explicit iv.
OPENSSL_memcpy(ctx->iv, in, AES_BLOCK_SIZE);
in += AES_BLOCK_SIZE;
out += AES_BLOCK_SIZE;
len -= AES_BLOCK_SIZE;
} else if (len < (SHA_DIGEST_LENGTH + 1)) {
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_UNSUPPORTED_INPUT_SIZE);
return 0;
}
// decrypt HMAC|padding at once.
aes_hw_cbc_encrypt(in, out, len, &key->ks, ctx->iv, 0);
CONSTTIME_SECRET(out, len);
// Remove CBC padding. Code from here on is timing-sensitive with respect to
// |padding_ok| and |data_plus_mac_len| for CBC ciphers.
size_t data_plus_mac_len;
crypto_word_t padding_ok;
if (!EVP_tls_cbc_remove_padding(&padding_ok, &data_plus_mac_len, out, len,
AES_BLOCK_SIZE, SHA_DIGEST_LENGTH)) {
// Publicly invalid. This can be rejected in non-constant time.
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_DECRYPT);
return 0;
}
size_t data_len = data_plus_mac_len - SHA_DIGEST_LENGTH;
key->aux.tls_aad[11] = (uint8_t)(data_len >> 8);
key->aux.tls_aad[12] = (uint8_t)(data_len);
// Compute the MAC and extract the one in the record.
uint8_t mac[EVP_MAX_MD_SIZE];
size_t mac_len;
uint8_t record_mac_tmp[EVP_MAX_MD_SIZE];
uint8_t *record_mac;
if (!EVP_tls_cbc_digest_record(EVP_sha1(), mac, &mac_len, key->aux.tls_aad,
out, data_len, len, key->hmac_key, 64)) {
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_DECRYPT);
return 0;
}
assert(mac_len == SHA_DIGEST_LENGTH);
record_mac = record_mac_tmp;
EVP_tls_cbc_copy_mac(record_mac, mac_len, out, data_plus_mac_len, len);
// Perform the MAC check and the padding check in constant-time. It should
// be safe to simply perform the padding check first, but it would not be
// under a different choice of MAC location on padding failure. See
// |EVP_tls_cbc_remove_padding|.
crypto_word_t good =
constant_time_eq_int(CRYPTO_memcmp(record_mac, mac, mac_len), 0);
good &= padding_ok;
CONSTTIME_DECLASSIFY(&good, sizeof(good));
if (!good) {
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_DECRYPT);
return 0;
}
CONSTTIME_DECLASSIFY(&data_len, sizeof(data_len));
CONSTTIME_DECLASSIFY(out, data_len);
// End of timing-sensitive code.
return 1;
}
}
static int aesni_cbc_hmac_sha1_ctrl(EVP_CIPHER_CTX *ctx, int type, int arg,
void *ptr) {
EVP_AES_HMAC_SHA1 *key = (EVP_AES_HMAC_SHA1 *)(ctx->cipher_data);
switch (type) {
case EVP_CTRL_AEAD_SET_MAC_KEY: {
if (arg < 0) {
return 0;
}
// This CTRL operation is to perform |HMAC_Init_ex| with SHA1 on |ptr|.
uint8_t hmac_key[HMAC_KEY_SIZE];
OPENSSL_memset(hmac_key, 0, sizeof(hmac_key));
size_t u_arg = (size_t)arg;
if (u_arg > sizeof(hmac_key)) {
SHA1_Init(&key->head);
SHA1_Update(&key->head, ptr, arg);
SHA1_Final(hmac_key, &key->head);
} else {
OPENSSL_memcpy(hmac_key, ptr, arg);
}
OPENSSL_memcpy(&key->hmac_key, hmac_key, 64);
for (size_t i = 0; i < sizeof(hmac_key); i++) {
hmac_key[i] ^= 0x36; /* ipad */
}
SHA1_Init(&key->head);
SHA1_Update(&key->head, hmac_key, sizeof(hmac_key));
for (size_t i = 0; i < sizeof(hmac_key); i++) {
hmac_key[i] ^= 0x36 ^ 0x5c; /* opad */
}
SHA1_Init(&key->tail);
SHA1_Update(&key->tail, hmac_key, sizeof(hmac_key));
OPENSSL_cleanse(hmac_key, sizeof(hmac_key));
return 1;
}
case EVP_CTRL_AEAD_TLS1_AAD: {
// p is
// additional_data = |seq_num + content_type + protocol_version + payload_eiv_len|.
// seq_num: 8 octets long.
// content_type: 1 octets long.
// protocol_version: 2 octets long.
// payload_eiv_len: 2 octets long. eiv is explicit iv required by TLS 1.1+.
uint8_t *p = ptr;
if (arg != EVP_AEAD_TLS1_AAD_LEN) {
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_INVALID_AD_SIZE);
return 0;
}
if (EVP_CIPHER_CTX_encrypting(ctx)) {
uint16_t len = p[arg - 2] << 8 | p[arg - 1];
key->payload_length = len;
if ((key->aux.tls_ver = p[arg - 4] << 8 | p[arg - 3]) >=
TLS1_1_VERSION) {
if (len < AES_BLOCK_SIZE) {
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_INVALID_AD_SIZE);
return 0;
}
len -= AES_BLOCK_SIZE;
p[arg - 2] = len >> 8;
p[arg - 1] = len;
}
key->md = key->head;
SHA1_Update(&key->md, p, arg);
return (int)(((len + SHA_DIGEST_LENGTH + AES_BLOCK_SIZE) &
-AES_BLOCK_SIZE) -
len);
} else {
OPENSSL_memcpy(key->aux.tls_aad, ptr, arg);
key->payload_length = arg;
return SHA_DIGEST_LENGTH;
}
}
default:
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_CTRL_NOT_IMPLEMENTED);
return 0;
}
}
static const EVP_CIPHER aesni_128_cbc_hmac_sha1_cipher = {
NID_aes_128_cbc_hmac_sha1 /* nid */,
AES_BLOCK_SIZE /* block size */,
16 /* key len */,
AES_BLOCK_SIZE /* iv len */,
sizeof(EVP_AES_HMAC_SHA1) /* ctx_size */,
EVP_CIPH_CBC_MODE | EVP_CIPH_FLAG_AEAD_CIPHER /* flags */,
aesni_cbc_hmac_sha1_init_key,
aesni_cbc_hmac_sha1_cipher,
NULL /* cleanup */,
aesni_cbc_hmac_sha1_ctrl};
static const EVP_CIPHER aesni_256_cbc_hmac_sha1_cipher = {
NID_aes_256_cbc_hmac_sha1 /* nid */,
AES_BLOCK_SIZE /* block size */,
32 /* key len */,
AES_BLOCK_SIZE /* iv len */,
sizeof(EVP_AES_HMAC_SHA1) /* ctx_size */,
EVP_CIPH_CBC_MODE | EVP_CIPH_FLAG_AEAD_CIPHER /* flags */,
aesni_cbc_hmac_sha1_init_key,
aesni_cbc_hmac_sha1_cipher,
NULL,
aesni_cbc_hmac_sha1_ctrl};
const EVP_CIPHER *EVP_aes_128_cbc_hmac_sha1(void) {
return (hwaes_capable() ? &aesni_128_cbc_hmac_sha1_cipher : NULL);
}
const EVP_CIPHER *EVP_aes_256_cbc_hmac_sha1(void) {
return (hwaes_capable() ? &aesni_256_cbc_hmac_sha1_cipher : NULL);
}
#else
const EVP_CIPHER *EVP_aes_128_cbc_hmac_sha1(void) { return NULL; }
const EVP_CIPHER *EVP_aes_256_cbc_hmac_sha1(void) { return NULL; }
#endif

View File

@@ -0,0 +1,393 @@
// Copyright 2013-2016 The OpenSSL Project Authors. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
#include <openssl/opensslconf.h>
#include <stdio.h>
#include <string.h>
#include <openssl/aes.h>
#include <openssl/cipher.h>
#include <openssl/err.h>
#include <openssl/evp.h>
#include <openssl/objects.h>
#include <openssl/rand.h>
#include <openssl/sha.h>
#include "../fipsmodule/aes/internal.h"
#include "../fipsmodule/cipher/internal.h"
#include "internal.h"
#if defined(AES_CBC_HMAC_SHA_STITCH)
typedef struct {
AES_KEY ks;
// Used to compute(init, update and final) HMAC-SHA256.
// head stores the initialised inner hash state.
// tail stores the outer hash state.
// These storage are for using in subsequent invocations with the same MAC key.
SHA256_CTX head, tail, md;
// In encrypt case, it's eiv_len + plaintext_len. eiv is explicit iv(required
// TLS 1.1+). In decrypt case, it's |EVP_AEAD_TLS1_AAD_LEN(13)|.
size_t payload_length;
union {
uint16_t tls_ver;
// In encrypt case, it's not set.
// In decrypt case, it stores |additional_data|.
// additional_data = seq_num + content_type + protocol_version +
// payload_eiv_len seq_num: 8 octets long. content_type: 1 octets long.
// protocol_version: 2 octets long.
// payload_eiv_len: 2 octets long. eiv is explicit iv required by TLS 1.1+.
//
// TLS 1.0: https://www.rfc-editor.org/rfc/rfc2246.html#section-6.2.3.2
// TLS 1.1: https://www.ietf.org/rfc/rfc5246.html#section-6.2.3.2
// TLS 1.2: https://www.ietf.org/rfc/rfc5246.html#section-6.2.3.2
uint8_t tls_aad[EVP_AEAD_TLS1_AAD_LEN];
} aux;
// Used to store the key computed in EVP_CTRL_AEAD_SET_MAC_KEY operation.
uint8_t hmac_key[HMAC_KEY_SIZE];
} EVP_AES_HMAC_SHA256;
void aesni_cbc_sha256_enc(const void *inp, void *out, size_t blocks,
const AES_KEY *key, uint8_t iv[AES_BLOCK_SIZE],
SHA256_CTX *ctx, const void *in0);
static int aesni_cbc_hmac_sha256_init_key(EVP_CIPHER_CTX *ctx,
const uint8_t *inkey,
const uint8_t *iv, int enc) {
EVP_AES_HMAC_SHA256 *key = (EVP_AES_HMAC_SHA256 *)(ctx->cipher_data);
int ret;
int key_bits = EVP_CIPHER_CTX_key_length(ctx) * 8;
if (enc) {
ret = aes_hw_set_encrypt_key(inkey, key_bits, &key->ks);
} else {
ret = aes_hw_set_decrypt_key(inkey, key_bits, &key->ks);
}
SHA256_Init(&key->head);
key->tail = key->head;
key->md = key->head;
key->payload_length = NO_PAYLOAD_LENGTH;
return ret < 0 ? 0 : 1;
}
// aesni_cbc_hmac_sha256_cipher implements TLS-specific CBC-mode+HMAC-SHA256 cipher suite based encryption and decryption.
//
// For encryption in TLS version 1.0
// |in|: payload/fragment
// |len|: (|payload| + SHA256_DIGEST_LENGTH + AES_BLOCK_SIZE) & -AES_BLOCK_SIZE
// |out|: Must point to allocated memory of at least (|payload| + SHA256_DIGEST_LENGTH + AES_BLOCK_SIZE) & -AES_BLOCK_SIZE bytes
// If the function returns successfully |out| will contain AES-CBC(aes_key, IV, payload || hmac-sha256(mac_key, aad || payload) || padding || padding_length)
// For encryption in TLS version 1.1 and 1.2
// |in|: payload/fragment
// |len|: (|IV| + |payload| + SHA256_DIGEST_LENGTH + AES_BLOCK_SIZE) & -AES_BLOCK_SIZE
// |out|: Must point to allocated memory of at least (|IV| + |payload| + SHA256_DIGEST_LENGTH + AES_BLOCK_SIZE) & -AES_BLOCK_SIZE bytes
// If the function returns successfully |out| will contain AES-CBC(aes_key, mask, IV || payload || hmac-sha256(mac_key, aad || payload) || padding || padding_length)
// |len|: should be (eiv_len + plaintext_len + SHA256_DIGEST_LENGTH + AES_BLOCK_SIZE) & -AES_BLOCK_SIZE).
// The mask and IV are according to method 2.b from https://datatracker.ietf.org/doc/html/rfc2246#section-6.2.3.2
//
// WARNING: Do not set explicit |IV| = |mask|. It will result in aes(aes_key, 0) being used at the effective IV for all records.
//
// In decryption, this function performs decrytion, removing padding, and verifying mac value.
static int aesni_cbc_hmac_sha256_cipher(EVP_CIPHER_CTX *ctx, uint8_t *out,
const uint8_t *in, size_t len) {
EVP_AES_HMAC_SHA256 *key = (EVP_AES_HMAC_SHA256 *)(ctx->cipher_data);
unsigned int l;
size_t plen = key->payload_length, iv_len = 0;
size_t aes_off = 0;
size_t blocks;
size_t sha_off = SHA256_CBLOCK - key->md.num;
key->payload_length = NO_PAYLOAD_LENGTH;
if (len % AES_BLOCK_SIZE) {
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_DATA_NOT_MULTIPLE_OF_BLOCK_LENGTH);
return 0;
}
if (EVP_CIPHER_CTX_encrypting(ctx)) {
// NOTE: Difference between openssl and aws-lc:
// In encrypt case, |plen| is set in the call |EVP_CIPHER_CTX_ctrl| with
// |EVP_CTRL_AEAD_TLS1_AAD| operation.
// When |plen == NO_PAYLOAD_LENGTH|, it means the call did not happen.
// In this case, aws-lc returns error(0) but openssl supports that with
// below explanation.
// https://mta.openssl.org/pipermail/openssl-users/2019-November/011458.html
// -- These stitched ciphers are specifically targeted at use by libssl
// and are designed for use in SSL/TLS only.
if (plen == NO_PAYLOAD_LENGTH) {
// |EVP_CIPHER_CTX_ctrl| with |EVP_CTRL_AEAD_TLS1_AAD| operation is not
// performed.
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_INVALID_OPERATION);
return 0;
}
if (len !=
((plen + SHA256_DIGEST_LENGTH + AES_BLOCK_SIZE) & -AES_BLOCK_SIZE)) {
// The input should have space for plen(eiv + plaintext) + SHA256_DIGEST_LENGTH + padding.
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_UNSUPPORTED_INPUT_SIZE);
return 0;
} else if (key->aux.tls_ver >= TLS1_1_VERSION) {
iv_len = AES_BLOCK_SIZE;
}
// Use stitch code |aesni_cbc_sha256_enc| when there are multiple of SHA_CBLOCK
// so |aesni_cbc_sha1_enc| can use AES and SHA on the same data block.
//
// Assembly stitch handles AVX-capable processors, but its
// performance is not optimal on AMD Jaguar, ~40% worse, for
// unknown reasons. Incidentally processor in question supports
// AVX, but not AMD-specific XOP extension, which can be used
// to identify it and avoid stitch invocation. So that after we
// establish that current CPU supports AVX, we even see if it's
// either even XOP-capable Bulldozer-based or GenuineIntel one.
// But SHAEXT-capable go ahead...
if ((CRYPTO_is_SHAEXT_capable() ||
(CRYPTO_is_AVX2_capable() &&
(CRYPTO_is_AMD_XOP_support() | CRYPTO_is_intel_cpu()))) &&
plen > (sha_off + iv_len) &&
(blocks = (plen - (sha_off + iv_len)) / SHA256_CBLOCK)) {
// Before calling |aesni_cbc_sha256_enc|, |key->md| should not
// include not hashed data(partial data).
SHA256_Update(&key->md, in + iv_len, sha_off);
aesni_cbc_sha256_enc(in, out, blocks, &key->ks,
ctx->iv, &key->md,
in + iv_len + sha_off);
blocks *= SHA256_CBLOCK;
aes_off += blocks;
sha_off += blocks;
key->md.Nh += blocks >> 29;
key->md.Nl += blocks <<= 3;
if (key->md.Nl < (unsigned int)blocks) {
key->md.Nh++;
}
} else {
sha_off = 0;
}
sha_off += iv_len;
SHA256_Update(&key->md, in + sha_off, plen - sha_off);
if (in != out) {
OPENSSL_memcpy(out + aes_off, in + aes_off, plen - aes_off);
}
// calculate HMAC and append it to payload.
SHA256_Final(out + plen, &key->md);
key->md = key->tail;
SHA256_Update(&key->md, out + plen, SHA256_DIGEST_LENGTH);
SHA256_Final(out + plen, &key->md);
// pad the payload|hmac.
plen += SHA256_DIGEST_LENGTH;
for (l = len - plen - 1; plen < len; plen++) {
out[plen] = l;
}
// encrypt HMAC|padding at once.
aes_hw_cbc_encrypt(out + aes_off, out + aes_off, len - aes_off, &key->ks,
ctx->iv, 1);
return 1;
} else {
if (plen != EVP_AEAD_TLS1_AAD_LEN) {
// |EVP_CIPHER_CTX_ctrl| with |EVP_CTRL_AEAD_TLS1_AAD| operation is not
// performed.
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_INVALID_OPERATION);
return 0;
}
// decrypt HMAC|padding at once.
// stitch sha1 uses the 1st block of |in| as |iv| but decrypts the |in|
// starting from |in| + iv_len. Minor diff: the sha1 case does not change
// data of [|in|, |in| + iv_len].
aes_hw_cbc_encrypt(in, out, len, &key->ks, ctx->iv, 0);
if ((key->aux.tls_aad[plen - 4] << 8 | key->aux.tls_aad[plen - 3]) >=
TLS1_1_VERSION) {
iv_len = AES_BLOCK_SIZE;
}
if (len < (iv_len + SHA256_DIGEST_LENGTH + 1)) {
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_UNSUPPORTED_INPUT_SIZE);
return 0;
}
// omit explicit iv.
out += iv_len;
len -= iv_len;
CONSTTIME_SECRET(out, len);
// Remove CBC padding. Code from here on is timing-sensitive with respect to
// |padding_ok| and |data_plus_mac_len| for CBC ciphers.
size_t data_plus_mac_len;
crypto_word_t padding_ok;
if (!EVP_tls_cbc_remove_padding(&padding_ok, &data_plus_mac_len, out, len,
AES_BLOCK_SIZE, SHA256_DIGEST_LENGTH)) {
// Publicly invalid. This can be rejected in non-constant time.
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_DECRYPT);
return 0;
}
size_t data_len = data_plus_mac_len - SHA256_DIGEST_LENGTH;
key->aux.tls_aad[11] = (uint8_t)(data_len >> 8);
key->aux.tls_aad[12] = (uint8_t)(data_len);
// Compute the MAC and extract the one in the record.
uint8_t mac[EVP_MAX_MD_SIZE];
size_t mac_len;
uint8_t record_mac_tmp[EVP_MAX_MD_SIZE];
uint8_t *record_mac;
if (!EVP_tls_cbc_digest_record(EVP_sha256(), mac, &mac_len,
key->aux.tls_aad, out, data_len, len,
key->hmac_key, 64)) {
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_DECRYPT);
return 0;
}
assert(mac_len == SHA256_DIGEST_LENGTH);
record_mac = record_mac_tmp;
EVP_tls_cbc_copy_mac(record_mac, mac_len, out, data_plus_mac_len, len);
// Perform the MAC check and the padding check in constant-time. It should
// be safe to simply perform the padding check first, but it would not be
// under a different choice of MAC location on padding failure. See
// |EVP_tls_cbc_remove_padding|.
crypto_word_t good =
constant_time_eq_int(CRYPTO_memcmp(record_mac, mac, mac_len), 0);
good &= padding_ok;
CONSTTIME_DECLASSIFY(&good, sizeof(good));
if (!good) {
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_DECRYPT);
// printf("not good\n");
return 0;
}
CONSTTIME_DECLASSIFY(&data_len, sizeof(data_len));
CONSTTIME_DECLASSIFY(out, data_len);
// End of timing-sensitive code.
return 1;
}
}
static int aesni_cbc_hmac_sha256_ctrl(EVP_CIPHER_CTX *ctx, int type, int arg,
void *ptr) {
EVP_AES_HMAC_SHA256 *key = (EVP_AES_HMAC_SHA256 *)(ctx->cipher_data);
switch (type) {
case EVP_CTRL_AEAD_SET_MAC_KEY: {
if (arg < 0) {
return 0;
}
uint8_t hmac_key[HMAC_KEY_SIZE];
OPENSSL_memset(hmac_key, 0, sizeof(hmac_key));
size_t u_arg = (size_t)arg;
if (u_arg > sizeof(hmac_key)) {
SHA256_Init(&key->head);
SHA256_Update(&key->head, ptr, arg);
SHA256_Final(hmac_key, &key->head);
} else {
OPENSSL_memcpy(hmac_key, ptr, arg);
}
OPENSSL_memcpy(&key->hmac_key, hmac_key, 64);
for (size_t i = 0; i < sizeof(hmac_key); i++) {
hmac_key[i] ^= 0x36; /* ipad */
}
SHA256_Init(&key->head);
SHA256_Update(&key->head, hmac_key, sizeof(hmac_key));
for (size_t i = 0; i < sizeof(hmac_key); i++) {
hmac_key[i] ^= 0x36 ^ 0x5c; /* opad */
}
SHA256_Init(&key->tail);
SHA256_Update(&key->tail, hmac_key, sizeof(hmac_key));
OPENSSL_cleanse(hmac_key, sizeof(hmac_key));
return 1;
}
case EVP_CTRL_AEAD_TLS1_AAD: {
if (arg != EVP_AEAD_TLS1_AAD_LEN) {
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_INVALID_AD_SIZE);
return 0;
}
// p is
// additional_data = |seq_num + content_type + protocol_version + payload_eiv_len|.
// seq_num: 8 octets long.
// content_type: 1 octets long.
// protocol_version: 2 octets long.
// payload_eiv_len: 2 octets long. eiv is explicit iv required by TLS 1.1+.
uint8_t *p = ptr;
uint16_t len = p[arg - 2] << 8 | p[arg - 1];
if (EVP_CIPHER_CTX_encrypting(ctx)) {
key->payload_length = len;
if ((key->aux.tls_ver = p[arg - 4] << 8 | p[arg - 3]) >=
TLS1_1_VERSION) {
if (len < AES_BLOCK_SIZE) {
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_INVALID_AD_SIZE);
return 0;
}
len -= AES_BLOCK_SIZE;
p[arg - 2] = len >> 8;
p[arg - 1] = len;
}
key->md = key->head;
SHA256_Update(&key->md, p, arg);
return (int)(((len + SHA256_DIGEST_LENGTH + AES_BLOCK_SIZE) &
-AES_BLOCK_SIZE) -
len);
} else {
OPENSSL_memcpy(key->aux.tls_aad, ptr, arg);
key->payload_length = arg;
return SHA256_DIGEST_LENGTH;
}
}
default:
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_CTRL_NOT_IMPLEMENTED);
return 0;
}
}
static const EVP_CIPHER aesni_128_cbc_hmac_sha256_cipher = {
NID_aes_128_cbc_hmac_sha256 /* nid */,
AES_BLOCK_SIZE /* block size */,
16 /* key len */,
AES_BLOCK_SIZE /* iv len */,
sizeof(EVP_AES_HMAC_SHA256) /* ctx_size */,
EVP_CIPH_CBC_MODE | EVP_CIPH_FLAG_AEAD_CIPHER /* flags */,
aesni_cbc_hmac_sha256_init_key,
aesni_cbc_hmac_sha256_cipher,
NULL /* cleanup */,
aesni_cbc_hmac_sha256_ctrl};
static const EVP_CIPHER aesni_256_cbc_hmac_sha256_cipher = {
NID_aes_256_cbc_hmac_sha256 /* nid */,
AES_BLOCK_SIZE /* block size */,
32 /* key len */,
AES_BLOCK_SIZE /* iv len */,
sizeof(EVP_AES_HMAC_SHA256) /* ctx_size */,
EVP_CIPH_CBC_MODE | EVP_CIPH_FLAG_AEAD_CIPHER /* flags */,
aesni_cbc_hmac_sha256_init_key,
aesni_cbc_hmac_sha256_cipher,
NULL /* cleanup */,
aesni_cbc_hmac_sha256_ctrl};
const EVP_CIPHER *EVP_aes_128_cbc_hmac_sha256(void) {
return hwaes_capable() ? &aesni_128_cbc_hmac_sha256_cipher : NULL;
}
const EVP_CIPHER *EVP_aes_256_cbc_hmac_sha256(void) {
return hwaes_capable() ? &aesni_256_cbc_hmac_sha256_cipher : NULL;
}
#else
const EVP_CIPHER *EVP_aes_128_cbc_hmac_sha256(void) { return NULL; }
const EVP_CIPHER *EVP_aes_256_cbc_hmac_sha256(void) { return NULL; }
#endif /* AES_CBC_HMAC_SHA_STITCH */

View File

@@ -0,0 +1,275 @@
// Copyright (c) 2017, Google Inc.
// SPDX-License-Identifier: ISC
#include <openssl/aead.h>
#include <openssl/cipher.h>
#include <openssl/crypto.h>
#include <openssl/err.h>
#include <openssl/sha.h>
#include "../fipsmodule/cipher/internal.h"
#define EVP_AEAD_AES_CTR_HMAC_SHA256_TAG_LEN SHA256_DIGEST_LENGTH
#define EVP_AEAD_AES_CTR_HMAC_SHA256_NONCE_LEN 12
struct aead_aes_ctr_hmac_sha256_ctx {
union {
double align;
AES_KEY ks;
} ks;
ctr128_f ctr;
block128_f block;
SHA256_CTX inner_init_state;
SHA256_CTX outer_init_state;
};
OPENSSL_STATIC_ASSERT(sizeof(((EVP_AEAD_CTX *)NULL)->state) >=
sizeof(struct aead_aes_ctr_hmac_sha256_ctx),
AEAD_state_is_too_small)
OPENSSL_STATIC_ASSERT(alignof(union evp_aead_ctx_st_state) >=
alignof(struct aead_aes_ctr_hmac_sha256_ctx),
AEAD_state_has_insufficient_alignment)
static void hmac_init(SHA256_CTX *out_inner, SHA256_CTX *out_outer,
const uint8_t hmac_key[32]) {
static const size_t hmac_key_len = 32;
uint8_t block[SHA256_CBLOCK];
OPENSSL_memcpy(block, hmac_key, hmac_key_len);
OPENSSL_memset(block + hmac_key_len, 0x36, sizeof(block) - hmac_key_len);
unsigned i;
for (i = 0; i < hmac_key_len; i++) {
block[i] ^= 0x36;
}
SHA256_Init(out_inner);
SHA256_Update(out_inner, block, sizeof(block));
OPENSSL_memset(block + hmac_key_len, 0x5c, sizeof(block) - hmac_key_len);
for (i = 0; i < hmac_key_len; i++) {
block[i] ^= (0x36 ^ 0x5c);
}
SHA256_Init(out_outer);
SHA256_Update(out_outer, block, sizeof(block));
}
static int aead_aes_ctr_hmac_sha256_init(EVP_AEAD_CTX *ctx, const uint8_t *key,
size_t key_len, size_t tag_len) {
struct aead_aes_ctr_hmac_sha256_ctx *aes_ctx =
(struct aead_aes_ctr_hmac_sha256_ctx *)&ctx->state;
static const size_t hmac_key_len = 32;
if (key_len < hmac_key_len) {
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_KEY_LENGTH);
return 0; // EVP_AEAD_CTX_init should catch this.
}
const size_t aes_key_len = key_len - hmac_key_len;
if (aes_key_len != 16 && aes_key_len != 32) {
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_KEY_LENGTH);
return 0; // EVP_AEAD_CTX_init should catch this.
}
if (tag_len == EVP_AEAD_DEFAULT_TAG_LENGTH) {
tag_len = EVP_AEAD_AES_CTR_HMAC_SHA256_TAG_LEN;
}
if (tag_len > EVP_AEAD_AES_CTR_HMAC_SHA256_TAG_LEN) {
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_TAG_TOO_LARGE);
return 0;
}
aes_ctx->ctr =
aes_ctr_set_key(&aes_ctx->ks.ks, NULL, &aes_ctx->block, key, aes_key_len);
ctx->tag_len = tag_len;
hmac_init(&aes_ctx->inner_init_state, &aes_ctx->outer_init_state,
key + aes_key_len);
return 1;
}
static void aead_aes_ctr_hmac_sha256_cleanup(EVP_AEAD_CTX *ctx) {}
static void hmac_update_uint64(SHA256_CTX *sha256, uint64_t value) {
unsigned i;
uint8_t bytes[8];
for (i = 0; i < sizeof(bytes); i++) {
bytes[i] = value & 0xff;
value >>= 8;
}
SHA256_Update(sha256, bytes, sizeof(bytes));
}
static void hmac_calculate(uint8_t out[SHA256_DIGEST_LENGTH],
const SHA256_CTX *inner_init_state,
const SHA256_CTX *outer_init_state,
const uint8_t *ad, size_t ad_len,
const uint8_t *nonce, const uint8_t *ciphertext,
size_t ciphertext_len) {
SHA256_CTX sha256;
OPENSSL_memcpy(&sha256, inner_init_state, sizeof(sha256));
hmac_update_uint64(&sha256, ad_len);
hmac_update_uint64(&sha256, ciphertext_len);
SHA256_Update(&sha256, nonce, EVP_AEAD_AES_CTR_HMAC_SHA256_NONCE_LEN);
SHA256_Update(&sha256, ad, ad_len);
// Pad with zeros to the end of the SHA-256 block.
const unsigned num_padding =
(SHA256_CBLOCK - ((sizeof(uint64_t) * 2 +
EVP_AEAD_AES_CTR_HMAC_SHA256_NONCE_LEN + ad_len) %
SHA256_CBLOCK)) %
SHA256_CBLOCK;
uint8_t padding[SHA256_CBLOCK];
OPENSSL_memset(padding, 0, num_padding);
SHA256_Update(&sha256, padding, num_padding);
SHA256_Update(&sha256, ciphertext, ciphertext_len);
uint8_t inner_digest[SHA256_DIGEST_LENGTH];
SHA256_Final(inner_digest, &sha256);
OPENSSL_memcpy(&sha256, outer_init_state, sizeof(sha256));
SHA256_Update(&sha256, inner_digest, sizeof(inner_digest));
SHA256_Final(out, &sha256);
}
static void aead_aes_ctr_hmac_sha256_crypt(
const struct aead_aes_ctr_hmac_sha256_ctx *aes_ctx, uint8_t *out,
const uint8_t *in, size_t len, const uint8_t *nonce) {
// Since the AEAD operation is one-shot, keeping a buffer of unused keystream
// bytes is pointless. However, |CRYPTO_ctr128_encrypt| requires it.
uint8_t partial_block_buffer[AES_BLOCK_SIZE];
unsigned partial_block_offset = 0;
OPENSSL_memset(partial_block_buffer, 0, sizeof(partial_block_buffer));
uint8_t counter[AES_BLOCK_SIZE];
OPENSSL_memcpy(counter, nonce, EVP_AEAD_AES_CTR_HMAC_SHA256_NONCE_LEN);
OPENSSL_memset(counter + EVP_AEAD_AES_CTR_HMAC_SHA256_NONCE_LEN, 0, 4);
if (aes_ctx->ctr) {
CRYPTO_ctr128_encrypt_ctr32(in, out, len, &aes_ctx->ks.ks, counter,
partial_block_buffer, &partial_block_offset,
aes_ctx->ctr);
} else {
CRYPTO_ctr128_encrypt(in, out, len, &aes_ctx->ks.ks, counter,
partial_block_buffer, &partial_block_offset,
aes_ctx->block);
}
}
static int aead_aes_ctr_hmac_sha256_seal_scatter(
const EVP_AEAD_CTX *ctx, uint8_t *out, uint8_t *out_tag,
size_t *out_tag_len, size_t max_out_tag_len, const uint8_t *nonce,
size_t nonce_len, const uint8_t *in, size_t in_len, const uint8_t *extra_in,
size_t extra_in_len, const uint8_t *ad, size_t ad_len) {
const struct aead_aes_ctr_hmac_sha256_ctx *aes_ctx =
(struct aead_aes_ctr_hmac_sha256_ctx *)&ctx->state;
const uint64_t in_len_64 = in_len;
if (in_len_64 >= (UINT64_C(1) << 32) * AES_BLOCK_SIZE) {
// This input is so large it would overflow the 32-bit block counter.
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_TOO_LARGE);
return 0;
}
if (max_out_tag_len < ctx->tag_len) {
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BUFFER_TOO_SMALL);
return 0;
}
if (nonce_len != EVP_AEAD_AES_CTR_HMAC_SHA256_NONCE_LEN) {
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_UNSUPPORTED_NONCE_SIZE);
return 0;
}
aead_aes_ctr_hmac_sha256_crypt(aes_ctx, out, in, in_len, nonce);
uint8_t hmac_result[SHA256_DIGEST_LENGTH];
hmac_calculate(hmac_result, &aes_ctx->inner_init_state,
&aes_ctx->outer_init_state, ad, ad_len, nonce, out, in_len);
OPENSSL_memcpy(out_tag, hmac_result, ctx->tag_len);
*out_tag_len = ctx->tag_len;
return 1;
}
static int aead_aes_ctr_hmac_sha256_open_gather(
const EVP_AEAD_CTX *ctx, uint8_t *out, const uint8_t *nonce,
size_t nonce_len, const uint8_t *in, size_t in_len, const uint8_t *in_tag,
size_t in_tag_len, const uint8_t *ad, size_t ad_len) {
const struct aead_aes_ctr_hmac_sha256_ctx *aes_ctx =
(struct aead_aes_ctr_hmac_sha256_ctx *)&ctx->state;
if (in_tag_len != ctx->tag_len) {
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_DECRYPT);
return 0;
}
if (nonce_len != EVP_AEAD_AES_CTR_HMAC_SHA256_NONCE_LEN) {
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_UNSUPPORTED_NONCE_SIZE);
return 0;
}
uint8_t hmac_result[SHA256_DIGEST_LENGTH];
hmac_calculate(hmac_result, &aes_ctx->inner_init_state,
&aes_ctx->outer_init_state, ad, ad_len, nonce, in, in_len);
if (CRYPTO_memcmp(hmac_result, in_tag, ctx->tag_len) != 0) {
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_DECRYPT);
return 0;
}
aead_aes_ctr_hmac_sha256_crypt(aes_ctx, out, in, in_len, nonce);
return 1;
}
static const EVP_AEAD aead_aes_128_ctr_hmac_sha256 = {
16 /* AES key */ + 32 /* HMAC key */,
12, // nonce length
EVP_AEAD_AES_CTR_HMAC_SHA256_TAG_LEN, // overhead
EVP_AEAD_AES_CTR_HMAC_SHA256_TAG_LEN, // max tag length
AEAD_AES_128_CTR_HMAC_SHA256_ID, // evp_aead_id
0, // seal_scatter_supports_extra_in
aead_aes_ctr_hmac_sha256_init,
NULL /* init_with_direction */,
aead_aes_ctr_hmac_sha256_cleanup,
NULL /* open */,
aead_aes_ctr_hmac_sha256_seal_scatter,
aead_aes_ctr_hmac_sha256_open_gather,
NULL /* get_iv */,
NULL /* tag_len */,
NULL /* serialize_state */,
NULL /* deserialize_state */,
};
static const EVP_AEAD aead_aes_256_ctr_hmac_sha256 = {
32 /* AES key */ + 32 /* HMAC key */,
12, // nonce length
EVP_AEAD_AES_CTR_HMAC_SHA256_TAG_LEN, // overhead
EVP_AEAD_AES_CTR_HMAC_SHA256_TAG_LEN, // max tag length
AEAD_AES_256_CTR_HMAC_SHA256_ID, // evp_aead_id
0, // seal_scatter_supports_extra_in
aead_aes_ctr_hmac_sha256_init,
NULL /* init_with_direction */,
aead_aes_ctr_hmac_sha256_cleanup,
NULL /* open */,
aead_aes_ctr_hmac_sha256_seal_scatter,
aead_aes_ctr_hmac_sha256_open_gather,
NULL /* get_iv */,
NULL /* tag_len */,
NULL /* serialize_state */,
NULL /* deserialize_state */,
};
const EVP_AEAD *EVP_aead_aes_128_ctr_hmac_sha256(void) {
return &aead_aes_128_ctr_hmac_sha256;
}
const EVP_AEAD *EVP_aead_aes_256_ctr_hmac_sha256(void) {
return &aead_aes_256_ctr_hmac_sha256;
}

View File

@@ -0,0 +1,863 @@
// Copyright (c) 2017, Google Inc.
// SPDX-License-Identifier: ISC
#include <openssl/aead.h>
#include <assert.h>
#include <openssl/cipher.h>
#include <openssl/crypto.h>
#include <openssl/err.h>
#include "../fipsmodule/cipher/internal.h"
#include "../internal.h"
#include "./internal.h"
#define EVP_AEAD_AES_GCM_SIV_NONCE_LEN 12
#define EVP_AEAD_AES_GCM_SIV_TAG_LEN 16
// TODO(davidben): AES-GCM-SIV assembly is not correct for Windows. It must save
// and restore xmm6 through xmm15.
#if defined(OPENSSL_X86_64) && !defined(OPENSSL_NO_ASM) && \
!defined(OPENSSL_WINDOWS) && !defined(MY_ASSEMBLER_IS_TOO_OLD_FOR_AVX)
#define AES_GCM_SIV_ASM
// Optimised AES-GCM-SIV
struct aead_aes_gcm_siv_asm_ctx {
alignas(16) uint8_t key[16 * 15];
int is_128_bit;
};
// The assembly code assumes 8-byte alignment of the EVP_AEAD_CTX's state, and
// aligns to 16 bytes itself.
OPENSSL_STATIC_ASSERT(sizeof(((EVP_AEAD_CTX *)NULL)->state) + 8 >=
sizeof(struct aead_aes_gcm_siv_asm_ctx),
AEAD_state_is_too_small)
OPENSSL_STATIC_ASSERT(alignof(union evp_aead_ctx_st_state) >= 8,
AEAD_state_has_insufficient_alignment)
// asm_ctx_from_ctx returns a 16-byte aligned context pointer from |ctx|.
static struct aead_aes_gcm_siv_asm_ctx *asm_ctx_from_ctx(
const EVP_AEAD_CTX *ctx) {
// ctx->state must already be 8-byte aligned. Thus, at most, we may need to
// add eight to align it to 16 bytes.
const uintptr_t actual_offset = ((uintptr_t)&ctx->state) & 8;
if(ctx->state_offset != actual_offset) {
return NULL;
}
return (struct aead_aes_gcm_siv_asm_ctx *)(&ctx->state.opaque[actual_offset]);
}
// aes128gcmsiv_aes_ks writes an AES-128 key schedule for |key| to
// |out_expanded_key|.
extern void aes128gcmsiv_aes_ks(const uint8_t key[16],
uint8_t out_expanded_key[16 * 15]);
// aes256gcmsiv_aes_ks writes an AES-256 key schedule for |key| to
// |out_expanded_key|.
extern void aes256gcmsiv_aes_ks(const uint8_t key[32],
uint8_t out_expanded_key[16 * 15]);
static int aead_aes_gcm_siv_asm_init(EVP_AEAD_CTX *ctx, const uint8_t *key,
size_t key_len, size_t tag_len) {
const size_t key_bits = key_len * 8;
if (key_bits != 128 && key_bits != 256) {
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_KEY_LENGTH);
return 0; // EVP_AEAD_CTX_init should catch this.
}
if (tag_len == EVP_AEAD_DEFAULT_TAG_LENGTH) {
tag_len = EVP_AEAD_AES_GCM_SIV_TAG_LEN;
}
if (tag_len != EVP_AEAD_AES_GCM_SIV_TAG_LEN) {
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_TAG_TOO_LARGE);
return 0;
}
ctx->state_offset = ((uintptr_t)&ctx->state) & 8;
struct aead_aes_gcm_siv_asm_ctx *gcm_siv_ctx = asm_ctx_from_ctx(ctx);
if(gcm_siv_ctx == NULL) {
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_INITIALIZATION_ERROR);
return 0;
}
assert((((uintptr_t)gcm_siv_ctx) & 15) == 0);
if (key_bits == 128) {
aes128gcmsiv_aes_ks(key, &gcm_siv_ctx->key[0]);
gcm_siv_ctx->is_128_bit = 1;
} else {
aes256gcmsiv_aes_ks(key, &gcm_siv_ctx->key[0]);
gcm_siv_ctx->is_128_bit = 0;
}
ctx->tag_len = tag_len;
return 1;
}
static void aead_aes_gcm_siv_asm_cleanup(EVP_AEAD_CTX *ctx) {}
// aesgcmsiv_polyval_horner updates the POLYVAL value in |in_out_poly| to
// include a number (|in_blocks|) of 16-byte blocks of data from |in|, given
// the POLYVAL key in |key|.
extern void aesgcmsiv_polyval_horner(const uint8_t in_out_poly[16],
const uint8_t key[16], const uint8_t *in,
size_t in_blocks);
// aesgcmsiv_htable_init writes powers 1..8 of |auth_key| to |out_htable|.
extern void aesgcmsiv_htable_init(uint8_t out_htable[16 * 8],
const uint8_t auth_key[16]);
// aesgcmsiv_htable6_init writes powers 1..6 of |auth_key| to |out_htable|.
extern void aesgcmsiv_htable6_init(uint8_t out_htable[16 * 6],
const uint8_t auth_key[16]);
// aesgcmsiv_htable_polyval updates the POLYVAL value in |in_out_poly| to
// include |in_len| bytes of data from |in|. (Where |in_len| must be a multiple
// of 16.) It uses the precomputed powers of the key given in |htable|.
extern void aesgcmsiv_htable_polyval(const uint8_t htable[16 * 8],
const uint8_t *in, size_t in_len,
uint8_t in_out_poly[16]);
// aes128gcmsiv_dec decrypts |in_len| & ~15 bytes from |out| and writes them to
// |in|. |in| and |out| may be equal, but must not otherwise alias.
//
// |in_out_calculated_tag_and_scratch|, on entry, must contain:
// 1. The current value of the calculated tag, which will be updated during
// decryption and written back to the beginning of this buffer on exit.
// 2. The claimed tag, which is needed to derive counter values.
//
// While decrypting, the whole of |in_out_calculated_tag_and_scratch| may be
// used for other purposes. In order to decrypt and update the POLYVAL value, it
// uses the expanded key from |key| and the table of powers in |htable|.
extern void aes128gcmsiv_dec(const uint8_t *in, uint8_t *out,
uint8_t in_out_calculated_tag_and_scratch[16 * 8],
const uint8_t htable[16 * 6],
const struct aead_aes_gcm_siv_asm_ctx *key,
size_t in_len);
// aes256gcmsiv_dec acts like |aes128gcmsiv_dec|, but for AES-256.
extern void aes256gcmsiv_dec(const uint8_t *in, uint8_t *out,
uint8_t in_out_calculated_tag_and_scratch[16 * 8],
const uint8_t htable[16 * 6],
const struct aead_aes_gcm_siv_asm_ctx *key,
size_t in_len);
// aes128gcmsiv_kdf performs the AES-GCM-SIV KDF given the expanded key from
// |key_schedule| and the nonce in |nonce|. Note that, while only 12 bytes of
// the nonce are used, 16 bytes are read and so the value must be
// right-padded.
extern void aes128gcmsiv_kdf(const uint8_t nonce[16],
uint64_t out_key_material[8],
const uint8_t *key_schedule);
// aes256gcmsiv_kdf acts like |aes128gcmsiv_kdf|, but for AES-256.
extern void aes256gcmsiv_kdf(const uint8_t nonce[16],
uint64_t out_key_material[12],
const uint8_t *key_schedule);
// aes128gcmsiv_aes_ks_enc_x1 performs a key expansion of the AES-128 key in
// |key|, writes the expanded key to |out_expanded_key| and encrypts a single
// block from |in| to |out|.
extern void aes128gcmsiv_aes_ks_enc_x1(const uint8_t in[16], uint8_t out[16],
uint8_t out_expanded_key[16 * 15],
const uint64_t key[2]);
// aes256gcmsiv_aes_ks_enc_x1 acts like |aes128gcmsiv_aes_ks_enc_x1|, but for
// AES-256.
extern void aes256gcmsiv_aes_ks_enc_x1(const uint8_t in[16], uint8_t out[16],
uint8_t out_expanded_key[16 * 15],
const uint64_t key[4]);
// aes128gcmsiv_ecb_enc_block encrypts a single block from |in| to |out| using
// the expanded key in |expanded_key|.
extern void aes128gcmsiv_ecb_enc_block(
const uint8_t in[16], uint8_t out[16],
const struct aead_aes_gcm_siv_asm_ctx *expanded_key);
// aes256gcmsiv_ecb_enc_block acts like |aes128gcmsiv_ecb_enc_block|, but for
// AES-256.
extern void aes256gcmsiv_ecb_enc_block(
const uint8_t in[16], uint8_t out[16],
const struct aead_aes_gcm_siv_asm_ctx *expanded_key);
// aes128gcmsiv_enc_msg_x4 encrypts |in_len| bytes from |in| to |out| using the
// expanded key from |key|. (The value of |in_len| must be a multiple of 16.)
// The |in| and |out| buffers may be equal but must not otherwise overlap. The
// initial counter is constructed from the given |tag| as required by
// AES-GCM-SIV.
extern void aes128gcmsiv_enc_msg_x4(const uint8_t *in, uint8_t *out,
const uint8_t *tag,
const struct aead_aes_gcm_siv_asm_ctx *key,
size_t in_len);
// aes256gcmsiv_enc_msg_x4 acts like |aes128gcmsiv_enc_msg_x4|, but for
// AES-256.
extern void aes256gcmsiv_enc_msg_x4(const uint8_t *in, uint8_t *out,
const uint8_t *tag,
const struct aead_aes_gcm_siv_asm_ctx *key,
size_t in_len);
// aes128gcmsiv_enc_msg_x8 acts like |aes128gcmsiv_enc_msg_x4|, but is
// optimised for longer messages.
extern void aes128gcmsiv_enc_msg_x8(const uint8_t *in, uint8_t *out,
const uint8_t *tag,
const struct aead_aes_gcm_siv_asm_ctx *key,
size_t in_len);
// aes256gcmsiv_enc_msg_x8 acts like |aes256gcmsiv_enc_msg_x4|, but is
// optimised for longer messages.
extern void aes256gcmsiv_enc_msg_x8(const uint8_t *in, uint8_t *out,
const uint8_t *tag,
const struct aead_aes_gcm_siv_asm_ctx *key,
size_t in_len);
// gcm_siv_asm_polyval evaluates POLYVAL at |auth_key| on the given plaintext
// and AD. The result is written to |out_tag|.
static void gcm_siv_asm_polyval(uint8_t out_tag[16], const uint8_t *in,
size_t in_len, const uint8_t *ad, size_t ad_len,
const uint8_t auth_key[16],
const uint8_t nonce[12]) {
OPENSSL_memset(out_tag, 0, 16);
const size_t ad_blocks = ad_len / 16;
const size_t in_blocks = in_len / 16;
int htable_init = 0;
alignas(16) uint8_t htable[16 * 8];
if (ad_blocks > 8 || in_blocks > 8) {
htable_init = 1;
aesgcmsiv_htable_init(htable, auth_key);
}
if (htable_init) {
aesgcmsiv_htable_polyval(htable, ad, ad_len & ~15, out_tag);
} else {
aesgcmsiv_polyval_horner(out_tag, auth_key, ad, ad_blocks);
}
uint8_t scratch[16];
if (ad_len & 15) {
OPENSSL_memset(scratch, 0, sizeof(scratch));
OPENSSL_memcpy(scratch, &ad[ad_len & ~15], ad_len & 15);
aesgcmsiv_polyval_horner(out_tag, auth_key, scratch, 1);
}
if (htable_init) {
aesgcmsiv_htable_polyval(htable, in, in_len & ~15, out_tag);
} else {
aesgcmsiv_polyval_horner(out_tag, auth_key, in, in_blocks);
}
if (in_len & 15) {
OPENSSL_memset(scratch, 0, sizeof(scratch));
OPENSSL_memcpy(scratch, &in[in_len & ~15], in_len & 15);
aesgcmsiv_polyval_horner(out_tag, auth_key, scratch, 1);
}
uint8_t length_block[16];
CRYPTO_store_u64_le(length_block, ad_len * 8);
CRYPTO_store_u64_le(length_block + 8, in_len * 8);
aesgcmsiv_polyval_horner(out_tag, auth_key, length_block, 1);
for (size_t i = 0; i < 12; i++) {
out_tag[i] ^= nonce[i];
}
out_tag[15] &= 0x7f;
}
// aead_aes_gcm_siv_asm_crypt_last_block handles the encryption/decryption
// (same thing in CTR mode) of the final block of a plaintext/ciphertext. It
// writes |in_len| & 15 bytes to |out| + |in_len|, based on an initial counter
// derived from |tag|.
static void aead_aes_gcm_siv_asm_crypt_last_block(
int is_128_bit, uint8_t *out, const uint8_t *in, size_t in_len,
const uint8_t tag[16],
const struct aead_aes_gcm_siv_asm_ctx *enc_key_expanded) {
alignas(16) uint8_t counter[16];
OPENSSL_memcpy(&counter, tag, sizeof(counter));
counter[15] |= 0x80;
CRYPTO_store_u32_le(counter, CRYPTO_load_u32_le(counter) + in_len / 16);
if (is_128_bit) {
aes128gcmsiv_ecb_enc_block(counter, counter, enc_key_expanded);
} else {
aes256gcmsiv_ecb_enc_block(counter, counter, enc_key_expanded);
}
const size_t last_bytes_offset = in_len & ~15;
const size_t last_bytes_len = in_len & 15;
uint8_t *last_bytes_out = &out[last_bytes_offset];
const uint8_t *last_bytes_in = &in[last_bytes_offset];
for (size_t i = 0; i < last_bytes_len; i++) {
last_bytes_out[i] = last_bytes_in[i] ^ counter[i];
}
}
// aead_aes_gcm_siv_kdf calculates the record encryption and authentication
// keys given the |nonce|.
static void aead_aes_gcm_siv_kdf(
int is_128_bit, const struct aead_aes_gcm_siv_asm_ctx *gcm_siv_ctx,
uint64_t out_record_auth_key[2], uint64_t out_record_enc_key[4],
const uint8_t nonce[12]) {
alignas(16) uint8_t padded_nonce[16];
OPENSSL_memcpy(padded_nonce, nonce, 12);
alignas(16) uint64_t key_material[12];
if (is_128_bit) {
aes128gcmsiv_kdf(padded_nonce, key_material, &gcm_siv_ctx->key[0]);
out_record_enc_key[0] = key_material[4];
out_record_enc_key[1] = key_material[6];
} else {
aes256gcmsiv_kdf(padded_nonce, key_material, &gcm_siv_ctx->key[0]);
out_record_enc_key[0] = key_material[4];
out_record_enc_key[1] = key_material[6];
out_record_enc_key[2] = key_material[8];
out_record_enc_key[3] = key_material[10];
}
out_record_auth_key[0] = key_material[0];
out_record_auth_key[1] = key_material[2];
}
static int aead_aes_gcm_siv_asm_seal_scatter(
const EVP_AEAD_CTX *ctx, uint8_t *out, uint8_t *out_tag,
size_t *out_tag_len, size_t max_out_tag_len, const uint8_t *nonce,
size_t nonce_len, const uint8_t *in, size_t in_len, const uint8_t *extra_in,
size_t extra_in_len, const uint8_t *ad, size_t ad_len) {
const struct aead_aes_gcm_siv_asm_ctx *gcm_siv_ctx = asm_ctx_from_ctx(ctx);
if(gcm_siv_ctx == NULL) {
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_ALIGNMENT_CHANGED);
return 0;
}
const uint64_t in_len_64 = in_len;
const uint64_t ad_len_64 = ad_len;
if (in_len_64 > (UINT64_C(1) << 36) || ad_len_64 >= (UINT64_C(1) << 61)) {
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_TOO_LARGE);
return 0;
}
if (max_out_tag_len < EVP_AEAD_AES_GCM_SIV_TAG_LEN) {
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BUFFER_TOO_SMALL);
return 0;
}
if (nonce_len != EVP_AEAD_AES_GCM_SIV_NONCE_LEN) {
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_UNSUPPORTED_NONCE_SIZE);
return 0;
}
alignas(16) uint64_t record_auth_key[2];
alignas(16) uint64_t record_enc_key[4];
aead_aes_gcm_siv_kdf(gcm_siv_ctx->is_128_bit, gcm_siv_ctx, record_auth_key,
record_enc_key, nonce);
alignas(16) uint8_t tag[16] = {0};
gcm_siv_asm_polyval(tag, in, in_len, ad, ad_len,
(const uint8_t *)record_auth_key, nonce);
struct aead_aes_gcm_siv_asm_ctx enc_key_expanded;
if (gcm_siv_ctx->is_128_bit) {
aes128gcmsiv_aes_ks_enc_x1(tag, tag, &enc_key_expanded.key[0],
record_enc_key);
if (in_len < 128) {
aes128gcmsiv_enc_msg_x4(in, out, tag, &enc_key_expanded, in_len & ~15);
} else {
aes128gcmsiv_enc_msg_x8(in, out, tag, &enc_key_expanded, in_len & ~15);
}
} else {
aes256gcmsiv_aes_ks_enc_x1(tag, tag, &enc_key_expanded.key[0],
record_enc_key);
if (in_len < 128) {
aes256gcmsiv_enc_msg_x4(in, out, tag, &enc_key_expanded, in_len & ~15);
} else {
aes256gcmsiv_enc_msg_x8(in, out, tag, &enc_key_expanded, in_len & ~15);
}
}
if (in_len & 15) {
aead_aes_gcm_siv_asm_crypt_last_block(gcm_siv_ctx->is_128_bit, out, in,
in_len, tag, &enc_key_expanded);
}
OPENSSL_memcpy(out_tag, tag, sizeof(tag));
*out_tag_len = EVP_AEAD_AES_GCM_SIV_TAG_LEN;
return 1;
}
static int aead_aes_gcm_siv_asm_open_gather(
const EVP_AEAD_CTX *ctx, uint8_t *out, const uint8_t *nonce,
size_t nonce_len, const uint8_t *in, size_t in_len, const uint8_t *in_tag,
size_t in_tag_len, const uint8_t *ad, size_t ad_len) {
const uint64_t ad_len_64 = ad_len;
if (ad_len_64 >= (UINT64_C(1) << 61)) {
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_TOO_LARGE);
return 0;
}
const uint64_t in_len_64 = in_len;
if (in_len_64 > UINT64_C(1) << 36 ||
in_tag_len != EVP_AEAD_AES_GCM_SIV_TAG_LEN) {
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_DECRYPT);
return 0;
}
if (nonce_len != EVP_AEAD_AES_GCM_SIV_NONCE_LEN) {
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_UNSUPPORTED_NONCE_SIZE);
return 0;
}
const struct aead_aes_gcm_siv_asm_ctx *gcm_siv_ctx = asm_ctx_from_ctx(ctx);
if(gcm_siv_ctx == NULL) {
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_ALIGNMENT_CHANGED);
return 0;
}
alignas(16) uint64_t record_auth_key[2];
alignas(16) uint64_t record_enc_key[4];
aead_aes_gcm_siv_kdf(gcm_siv_ctx->is_128_bit, gcm_siv_ctx, record_auth_key,
record_enc_key, nonce);
struct aead_aes_gcm_siv_asm_ctx expanded_key;
if (gcm_siv_ctx->is_128_bit) {
aes128gcmsiv_aes_ks((const uint8_t *)record_enc_key, &expanded_key.key[0]);
} else {
aes256gcmsiv_aes_ks((const uint8_t *)record_enc_key, &expanded_key.key[0]);
}
// calculated_tag is 16*8 bytes, rather than 16 bytes, because
// aes[128|256]gcmsiv_dec uses the extra as scratch space.
alignas(16) uint8_t calculated_tag[16 * 8] = {0};
OPENSSL_memset(calculated_tag, 0, EVP_AEAD_AES_GCM_SIV_TAG_LEN);
const size_t ad_blocks = ad_len / 16;
aesgcmsiv_polyval_horner(calculated_tag, (const uint8_t *)record_auth_key, ad,
ad_blocks);
uint8_t scratch[16];
if (ad_len & 15) {
OPENSSL_memset(scratch, 0, sizeof(scratch));
OPENSSL_memcpy(scratch, &ad[ad_len & ~15], ad_len & 15);
aesgcmsiv_polyval_horner(calculated_tag, (const uint8_t *)record_auth_key,
scratch, 1);
}
alignas(16) uint8_t htable[16 * 6];
aesgcmsiv_htable6_init(htable, (const uint8_t *)record_auth_key);
// aes[128|256]gcmsiv_dec needs access to the claimed tag. So it's put into
// its scratch space.
memcpy(calculated_tag + 16, in_tag, EVP_AEAD_AES_GCM_SIV_TAG_LEN);
if (gcm_siv_ctx->is_128_bit) {
aes128gcmsiv_dec(in, out, calculated_tag, htable, &expanded_key, in_len);
} else {
aes256gcmsiv_dec(in, out, calculated_tag, htable, &expanded_key, in_len);
}
if (in_len & 15) {
aead_aes_gcm_siv_asm_crypt_last_block(gcm_siv_ctx->is_128_bit, out, in,
in_len, in_tag, &expanded_key);
OPENSSL_memset(scratch, 0, sizeof(scratch));
OPENSSL_memcpy(scratch, out + (in_len & ~15), in_len & 15);
aesgcmsiv_polyval_horner(calculated_tag, (const uint8_t *)record_auth_key,
scratch, 1);
}
uint8_t length_block[16];
CRYPTO_store_u64_le(length_block, ad_len * 8);
CRYPTO_store_u64_le(length_block + 8, in_len * 8);
aesgcmsiv_polyval_horner(calculated_tag, (const uint8_t *)record_auth_key,
length_block, 1);
for (size_t i = 0; i < 12; i++) {
calculated_tag[i] ^= nonce[i];
}
calculated_tag[15] &= 0x7f;
if (gcm_siv_ctx->is_128_bit) {
aes128gcmsiv_ecb_enc_block(calculated_tag, calculated_tag, &expanded_key);
} else {
aes256gcmsiv_ecb_enc_block(calculated_tag, calculated_tag, &expanded_key);
}
if (CRYPTO_memcmp(calculated_tag, in_tag, EVP_AEAD_AES_GCM_SIV_TAG_LEN) !=
0) {
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_DECRYPT);
return 0;
}
return 1;
}
static const EVP_AEAD aead_aes_128_gcm_siv_asm = {
16, // key length
EVP_AEAD_AES_GCM_SIV_NONCE_LEN, // nonce length
EVP_AEAD_AES_GCM_SIV_TAG_LEN, // overhead
EVP_AEAD_AES_GCM_SIV_TAG_LEN, // max tag length
AEAD_AES_128_GCM_SIV_ID, // evp_aead_id
0, // seal_scatter_supports_extra_in
aead_aes_gcm_siv_asm_init,
NULL /* init_with_direction */,
aead_aes_gcm_siv_asm_cleanup,
NULL /* open */,
aead_aes_gcm_siv_asm_seal_scatter,
aead_aes_gcm_siv_asm_open_gather,
NULL /* get_iv */,
NULL /* tag_len */,
NULL /* serialize_state */,
NULL /* deserialize_state */,
};
static const EVP_AEAD aead_aes_256_gcm_siv_asm = {
32, // key length
EVP_AEAD_AES_GCM_SIV_NONCE_LEN, // nonce length
EVP_AEAD_AES_GCM_SIV_TAG_LEN, // overhead
EVP_AEAD_AES_GCM_SIV_TAG_LEN, // max tag length
AEAD_AES_256_GCM_SIV_ID, // evp_aead_id
0, // seal_scatter_supports_extra_in
aead_aes_gcm_siv_asm_init,
NULL /* init_with_direction */,
aead_aes_gcm_siv_asm_cleanup,
NULL /* open */,
aead_aes_gcm_siv_asm_seal_scatter,
aead_aes_gcm_siv_asm_open_gather,
NULL /* get_iv */,
NULL /* tag_len */,
NULL /* serialize_state */,
NULL /* deserialize_state */,
};
#endif // X86_64 && !NO_ASM && !WINDOWS
struct aead_aes_gcm_siv_ctx {
union {
double align;
AES_KEY ks;
} ks;
block128_f kgk_block;
unsigned is_256 : 1;
};
OPENSSL_STATIC_ASSERT(sizeof(((EVP_AEAD_CTX *)NULL)->state) >=
sizeof(struct aead_aes_gcm_siv_ctx),
AEAD_state_is_too_small)
OPENSSL_STATIC_ASSERT(alignof(union evp_aead_ctx_st_state) >=
alignof(struct aead_aes_gcm_siv_ctx),
AEAD_state_has_insufficient_alignment)
static int aead_aes_gcm_siv_init(EVP_AEAD_CTX *ctx, const uint8_t *key,
size_t key_len, size_t tag_len) {
const size_t key_bits = key_len * 8;
if (key_bits != 128 && key_bits != 256) {
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_KEY_LENGTH);
return 0; // EVP_AEAD_CTX_init should catch this.
}
if (tag_len == EVP_AEAD_DEFAULT_TAG_LENGTH) {
tag_len = EVP_AEAD_AES_GCM_SIV_TAG_LEN;
}
if (tag_len != EVP_AEAD_AES_GCM_SIV_TAG_LEN) {
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_TAG_TOO_LARGE);
return 0;
}
struct aead_aes_gcm_siv_ctx *gcm_siv_ctx =
(struct aead_aes_gcm_siv_ctx *)&ctx->state;
OPENSSL_memset(gcm_siv_ctx, 0, sizeof(struct aead_aes_gcm_siv_ctx));
aes_ctr_set_key(&gcm_siv_ctx->ks.ks, NULL, &gcm_siv_ctx->kgk_block, key,
key_len);
gcm_siv_ctx->is_256 = (key_len == 32);
ctx->tag_len = tag_len;
return 1;
}
static void aead_aes_gcm_siv_cleanup(EVP_AEAD_CTX *ctx) {}
// gcm_siv_crypt encrypts (or decrypts—it's the same thing) |in_len| bytes from
// |in| to |out|, using the block function |enc_block| with |key| in counter
// mode, starting at |initial_counter|. This differs from the traditional
// counter mode code in that the counter is handled little-endian, only the
// first four bytes are used and the GCM-SIV tweak to the final byte is
// applied. The |in| and |out| pointers may be equal but otherwise must not
// alias.
static void gcm_siv_crypt(uint8_t *out, const uint8_t *in, size_t in_len,
const uint8_t initial_counter[AES_BLOCK_SIZE],
block128_f enc_block, const AES_KEY *key) {
uint8_t counter[16];
OPENSSL_memcpy(counter, initial_counter, AES_BLOCK_SIZE);
counter[15] |= 0x80;
for (size_t done = 0; done < in_len;) {
uint8_t keystream[AES_BLOCK_SIZE];
enc_block(counter, keystream, key);
CRYPTO_store_u32_le(counter, CRYPTO_load_u32_le(counter) + 1);
size_t todo = AES_BLOCK_SIZE;
if (in_len - done < todo) {
todo = in_len - done;
}
for (size_t i = 0; i < todo; i++) {
out[done + i] = keystream[i] ^ in[done + i];
}
done += todo;
}
}
// gcm_siv_polyval evaluates POLYVAL at |auth_key| on the given plaintext and
// AD. The result is written to |out_tag|.
static void gcm_siv_polyval(
uint8_t out_tag[16], const uint8_t *in, size_t in_len, const uint8_t *ad,
size_t ad_len, const uint8_t auth_key[16],
const uint8_t nonce[EVP_AEAD_AES_GCM_SIV_NONCE_LEN]) {
struct polyval_ctx polyval_ctx;
CRYPTO_POLYVAL_init(&polyval_ctx, auth_key);
CRYPTO_POLYVAL_update_blocks(&polyval_ctx, ad, ad_len & ~15);
uint8_t scratch[16];
if (ad_len & 15) {
OPENSSL_memset(scratch, 0, sizeof(scratch));
OPENSSL_memcpy(scratch, &ad[ad_len & ~15], ad_len & 15);
CRYPTO_POLYVAL_update_blocks(&polyval_ctx, scratch, sizeof(scratch));
}
CRYPTO_POLYVAL_update_blocks(&polyval_ctx, in, in_len & ~15);
if (in_len & 15) {
OPENSSL_memset(scratch, 0, sizeof(scratch));
OPENSSL_memcpy(scratch, &in[in_len & ~15], in_len & 15);
CRYPTO_POLYVAL_update_blocks(&polyval_ctx, scratch, sizeof(scratch));
}
uint8_t length_block[16];
CRYPTO_store_u64_le(length_block, ((uint64_t) ad_len) * 8);
CRYPTO_store_u64_le(length_block + 8, ((uint64_t) in_len) * 8);
CRYPTO_POLYVAL_update_blocks(&polyval_ctx, length_block,
sizeof(length_block));
CRYPTO_POLYVAL_finish(&polyval_ctx, out_tag);
for (size_t i = 0; i < EVP_AEAD_AES_GCM_SIV_NONCE_LEN; i++) {
out_tag[i] ^= nonce[i];
}
out_tag[15] &= 0x7f;
}
// gcm_siv_record_keys contains the keys used for a specific GCM-SIV record.
struct gcm_siv_record_keys {
uint8_t auth_key[16];
union {
double align;
AES_KEY ks;
} enc_key;
block128_f enc_block;
};
// gcm_siv_keys calculates the keys for a specific GCM-SIV record with the
// given nonce and writes them to |*out_keys|.
static void gcm_siv_keys(const struct aead_aes_gcm_siv_ctx *gcm_siv_ctx,
struct gcm_siv_record_keys *out_keys,
const uint8_t nonce[EVP_AEAD_AES_GCM_SIV_NONCE_LEN]) {
const AES_KEY *const key = &gcm_siv_ctx->ks.ks;
uint8_t key_material[(128 /* POLYVAL key */ + 256 /* max AES key */) / 8];
const size_t blocks_needed = gcm_siv_ctx->is_256 ? 6 : 4;
uint8_t counter[AES_BLOCK_SIZE];
OPENSSL_memset(counter, 0, AES_BLOCK_SIZE - EVP_AEAD_AES_GCM_SIV_NONCE_LEN);
OPENSSL_memcpy(counter + AES_BLOCK_SIZE - EVP_AEAD_AES_GCM_SIV_NONCE_LEN,
nonce, EVP_AEAD_AES_GCM_SIV_NONCE_LEN);
for (size_t i = 0; i < blocks_needed; i++) {
counter[0] = i;
uint8_t ciphertext[AES_BLOCK_SIZE];
gcm_siv_ctx->kgk_block(counter, ciphertext, key);
OPENSSL_memcpy(&key_material[i * 8], ciphertext, 8);
}
OPENSSL_memcpy(out_keys->auth_key, key_material, 16);
// Note the |ctr128_f| function uses a big-endian couner, while AES-GCM-SIV
// uses a little-endian counter. We ignore the return value and only use
// |block128_f|. This has a significant performance cost for the fallback
// bitsliced AES implementations (bsaes and aes_nohw).
//
// We currently do not consider AES-GCM-SIV to be performance-sensitive on
// client hardware. If this changes, we can write little-endian |ctr128_f|
// functions.
aes_ctr_set_key(&out_keys->enc_key.ks, NULL, &out_keys->enc_block,
key_material + 16, gcm_siv_ctx->is_256 ? 32 : 16);
}
static int aead_aes_gcm_siv_seal_scatter(
const EVP_AEAD_CTX *ctx, uint8_t *out, uint8_t *out_tag,
size_t *out_tag_len, size_t max_out_tag_len, const uint8_t *nonce,
size_t nonce_len, const uint8_t *in, size_t in_len, const uint8_t *extra_in,
size_t extra_in_len, const uint8_t *ad, size_t ad_len) {
const struct aead_aes_gcm_siv_ctx *gcm_siv_ctx =
(struct aead_aes_gcm_siv_ctx *)&ctx->state;
const uint64_t in_len_64 = in_len;
const uint64_t ad_len_64 = ad_len;
if (in_len + EVP_AEAD_AES_GCM_SIV_TAG_LEN < in_len ||
in_len_64 > (UINT64_C(1) << 36) || ad_len_64 >= (UINT64_C(1) << 61)) {
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_TOO_LARGE);
return 0;
}
if (max_out_tag_len < EVP_AEAD_AES_GCM_SIV_TAG_LEN) {
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BUFFER_TOO_SMALL);
return 0;
}
if (nonce_len != EVP_AEAD_AES_GCM_SIV_NONCE_LEN) {
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_UNSUPPORTED_NONCE_SIZE);
return 0;
}
struct gcm_siv_record_keys keys;
gcm_siv_keys(gcm_siv_ctx, &keys, nonce);
uint8_t tag[16];
gcm_siv_polyval(tag, in, in_len, ad, ad_len, keys.auth_key, nonce);
keys.enc_block(tag, tag, &keys.enc_key.ks);
gcm_siv_crypt(out, in, in_len, tag, keys.enc_block, &keys.enc_key.ks);
OPENSSL_memcpy(out_tag, tag, EVP_AEAD_AES_GCM_SIV_TAG_LEN);
*out_tag_len = EVP_AEAD_AES_GCM_SIV_TAG_LEN;
return 1;
}
static int aead_aes_gcm_siv_open_gather(const EVP_AEAD_CTX *ctx, uint8_t *out,
const uint8_t *nonce, size_t nonce_len,
const uint8_t *in, size_t in_len,
const uint8_t *in_tag,
size_t in_tag_len, const uint8_t *ad,
size_t ad_len) {
const uint64_t ad_len_64 = ad_len;
if (ad_len_64 >= (UINT64_C(1) << 61)) {
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_TOO_LARGE);
return 0;
}
const uint64_t in_len_64 = in_len;
if (in_tag_len != EVP_AEAD_AES_GCM_SIV_TAG_LEN ||
in_len_64 > (UINT64_C(1) << 36) + AES_BLOCK_SIZE) {
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_DECRYPT);
return 0;
}
if (nonce_len != EVP_AEAD_AES_GCM_SIV_NONCE_LEN) {
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_UNSUPPORTED_NONCE_SIZE);
return 0;
}
const struct aead_aes_gcm_siv_ctx *gcm_siv_ctx =
(struct aead_aes_gcm_siv_ctx *)&ctx->state;
struct gcm_siv_record_keys keys;
gcm_siv_keys(gcm_siv_ctx, &keys, nonce);
gcm_siv_crypt(out, in, in_len, in_tag, keys.enc_block, &keys.enc_key.ks);
uint8_t expected_tag[EVP_AEAD_AES_GCM_SIV_TAG_LEN];
gcm_siv_polyval(expected_tag, out, in_len, ad, ad_len, keys.auth_key, nonce);
keys.enc_block(expected_tag, expected_tag, &keys.enc_key.ks);
if (CRYPTO_memcmp(expected_tag, in_tag, sizeof(expected_tag)) != 0) {
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_DECRYPT);
return 0;
}
return 1;
}
static const EVP_AEAD aead_aes_128_gcm_siv = {
16, // key length
EVP_AEAD_AES_GCM_SIV_NONCE_LEN, // nonce length
EVP_AEAD_AES_GCM_SIV_TAG_LEN, // overhead
EVP_AEAD_AES_GCM_SIV_TAG_LEN, // max tag length
AEAD_AES_128_GCM_SIV_ID, // evp_aead_id
0, // seal_scatter_supports_extra_in
aead_aes_gcm_siv_init,
NULL /* init_with_direction */,
aead_aes_gcm_siv_cleanup,
NULL /* open */,
aead_aes_gcm_siv_seal_scatter,
aead_aes_gcm_siv_open_gather,
NULL /* get_iv */,
NULL /* tag_len */,
NULL /* serialize_state */,
NULL /* deserialize_state */,
};
static const EVP_AEAD aead_aes_256_gcm_siv = {
32, // key length
EVP_AEAD_AES_GCM_SIV_NONCE_LEN, // nonce length
EVP_AEAD_AES_GCM_SIV_TAG_LEN, // overhead
EVP_AEAD_AES_GCM_SIV_TAG_LEN, // max tag length
AEAD_AES_256_GCM_SIV_ID, // evp_aead_id
0, // seal_scatter_supports_extra_in
aead_aes_gcm_siv_init,
NULL /* init_with_direction */,
aead_aes_gcm_siv_cleanup,
NULL /* open */,
aead_aes_gcm_siv_seal_scatter,
aead_aes_gcm_siv_open_gather,
NULL /* get_iv */,
NULL /* tag_len */,
NULL /* serialize_state */,
NULL /* deserialize_state */,
};
#if defined(AES_GCM_SIV_ASM)
const EVP_AEAD *EVP_aead_aes_128_gcm_siv(void) {
if (CRYPTO_is_AVX_capable() && CRYPTO_is_AESNI_capable()) {
return &aead_aes_128_gcm_siv_asm;
}
return &aead_aes_128_gcm_siv;
}
const EVP_AEAD *EVP_aead_aes_256_gcm_siv(void) {
if (CRYPTO_is_AVX_capable() && CRYPTO_is_AESNI_capable()) {
return &aead_aes_256_gcm_siv_asm;
}
return &aead_aes_256_gcm_siv;
}
int x86_64_assembly_implementation_FOR_TESTING(void) {
if (CRYPTO_is_AVX_capable() && CRYPTO_is_AESNI_capable()) {
return 1;
}
return 0;
}
#else
const EVP_AEAD *EVP_aead_aes_128_gcm_siv(void) { return &aead_aes_128_gcm_siv; }
const EVP_AEAD *EVP_aead_aes_256_gcm_siv(void) { return &aead_aes_256_gcm_siv; }
int x86_64_assembly_implementation_FOR_TESTING(void) {
return 0;
}
#endif // AES_GCM_SIV_ASM

View File

@@ -0,0 +1,728 @@
// Copyright (c) 2014, Google Inc.
// SPDX-License-Identifier: ISC
#include <openssl/aead.h>
#include <string.h>
#include <openssl/chacha.h>
#include <openssl/cipher.h>
#include <openssl/err.h>
#include <openssl/mem.h>
#include <openssl/nid.h>
#include <openssl/poly1305.h>
#include <openssl/type_check.h>
#include "../chacha/internal.h"
#include "../fipsmodule/cipher/internal.h"
#include "../internal.h"
#include "internal.h"
#define CHACHA_KEY_LEN 32
#define CHACHA_IV_LEN 12
#define CHACHA_BLOCK_LEN 64
#define CHACHA_CTR_IV_LEN 16
// ChaCha-Poly specific context within an EVP_CIPHER_CTX
#define CCP_CTX(ctx) ((CIPHER_CHACHA_POLY_CTX *) ctx->cipher_data)
// Return the CIPHER_CHACHA_KEY from a CIPHER_CHACHA_POLY_CTX
#define CC_KEY(ccp) (&(ccp)->key)
// Return the poly1305_state from a CIPHER_CHACHA_POLY_CTX
#define POLY_CTX(ccp) (&(ccp)->poly_ctx)
// Struct for Poly1305 key within an EVP_AEAD_CTX
typedef struct aead_chacha20_poly1305_ctx {
uint8_t key[CHACHA_KEY_LEN];
} AEAD_CHACHA_POLY_CTX;
// Struct for ChaCha key within an EVP_CIPHER_CTX
typedef struct {
uint32_t key[CHACHA_KEY_LEN / 4];
// Buffer containing both the counter and nonce.
// The index 0 is the counter and the remaining portion is the nonce.
uint32_t counter_nonce[CHACHA_CTR_IV_LEN / 4];
// Buffer for any partially used keys
uint8_t buf[CHACHA_BLOCK_LEN];
uint32_t partial_len;
} CIPHER_CHACHA_KEY;
typedef struct cipher_chacha_poly_ctx {
CIPHER_CHACHA_KEY key;
uint32_t iv[CHACHA_IV_LEN / 4];
uint8_t tag_len;
uint8_t tag[POLY1305_TAG_LEN];
// Use 64-bit integers so this struct can be passed directly into poly1305
struct { uint64_t aad, text; } len;
int32_t poly_initialized;
int32_t pad_aad;
poly1305_state poly_ctx;
} CIPHER_CHACHA_POLY_CTX;
OPENSSL_STATIC_ASSERT(sizeof(((EVP_AEAD_CTX *)NULL)->state) >=
sizeof(AEAD_CHACHA_POLY_CTX),
AEAD_state_is_too_small)
OPENSSL_STATIC_ASSERT(alignof(union evp_aead_ctx_st_state) >=
alignof(AEAD_CHACHA_POLY_CTX),
AEAD_state_has_insufficient_alignment)
static int aead_chacha20_poly1305_init(EVP_AEAD_CTX *ctx, const uint8_t *key,
size_t key_len, size_t tag_len) {
AEAD_CHACHA_POLY_CTX *c20_ctx =
(AEAD_CHACHA_POLY_CTX *)&ctx->state;
if (tag_len == 0) {
tag_len = POLY1305_TAG_LEN;
}
if (tag_len > POLY1305_TAG_LEN) {
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_TOO_LARGE);
return 0;
}
if (key_len != sizeof(c20_ctx->key)) {
return 0; // internal error - EVP_AEAD_CTX_init should catch this.
}
OPENSSL_memcpy(c20_ctx->key, key, key_len);
ctx->tag_len = tag_len;
return 1;
}
static void aead_chacha20_poly1305_cleanup(EVP_AEAD_CTX *ctx) {}
static void poly1305_update_length(poly1305_state *poly1305, size_t data_len) {
uint8_t length_bytes[8];
for (unsigned i = 0; i < sizeof(length_bytes); i++) {
length_bytes[i] = data_len;
data_len >>= 8;
}
CRYPTO_poly1305_update(poly1305, length_bytes, sizeof(length_bytes));
}
// calc_tag fills |tag| with the authentication tag for the given inputs.
static void calc_tag(uint8_t tag[POLY1305_TAG_LEN], const uint8_t *key,
const uint8_t nonce[CHACHA_IV_LEN], const uint8_t *ad,
size_t ad_len, const uint8_t *ciphertext,
size_t ciphertext_len, const uint8_t *ciphertext_extra,
size_t ciphertext_extra_len) {
alignas(16) uint8_t poly1305_key[CHACHA_KEY_LEN];
OPENSSL_memset(poly1305_key, 0, sizeof(poly1305_key));
CRYPTO_chacha_20(poly1305_key, poly1305_key, sizeof(poly1305_key), key, nonce,
0);
static const uint8_t padding[16] = {0}; // Padding is all zeros.
poly1305_state ctx;
CRYPTO_poly1305_init(&ctx, poly1305_key);
CRYPTO_poly1305_update(&ctx, ad, ad_len);
if (ad_len % 16 != 0) {
CRYPTO_poly1305_update(&ctx, padding, sizeof(padding) - (ad_len % 16));
}
CRYPTO_poly1305_update(&ctx, ciphertext, ciphertext_len);
CRYPTO_poly1305_update(&ctx, ciphertext_extra, ciphertext_extra_len);
const size_t ciphertext_total = ciphertext_len + ciphertext_extra_len;
if (ciphertext_total % 16 != 0) {
CRYPTO_poly1305_update(&ctx, padding,
sizeof(padding) - (ciphertext_total % 16));
}
poly1305_update_length(&ctx, ad_len);
poly1305_update_length(&ctx, ciphertext_total);
CRYPTO_poly1305_finish(&ctx, tag);
}
static int chacha20_poly1305_seal_scatter(
const uint8_t *key, uint8_t *out, uint8_t *out_tag, size_t *out_tag_len,
size_t max_out_tag_len, const uint8_t *nonce, size_t nonce_len,
const uint8_t *in, size_t in_len, const uint8_t *extra_in,
size_t extra_in_len, const uint8_t *ad, size_t ad_len, size_t tag_len) {
if (extra_in_len + tag_len < tag_len) {
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_TOO_LARGE);
return 0;
}
if (max_out_tag_len < tag_len + extra_in_len) {
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BUFFER_TOO_SMALL);
return 0;
}
if (nonce_len != CHACHA_IV_LEN) {
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_UNSUPPORTED_NONCE_SIZE);
return 0;
}
// |CRYPTO_chacha_20| uses a 32-bit block counter. Therefore we disallow
// individual operations that work on more than 256GB at a time.
// |in_len_64| is needed because, on 32-bit platforms, size_t is only
// 32-bits and this produces a warning because it's always false.
// Casting to uint64_t inside the conditional is not sufficient to stop
// the warning.
const uint64_t in_len_64 = in_len;
if (in_len_64 >= (UINT64_C(1) << 32) * 64 - 64) {
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_TOO_LARGE);
return 0;
}
if (max_out_tag_len < tag_len) {
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BUFFER_TOO_SMALL);
return 0;
}
// The the extra input is given, it is expected to be very short and so is
// encrypted byte-by-byte first.
if (extra_in_len) {
static const size_t kChaChaBlockSize = 64;
uint32_t block_counter = (uint32_t)(1 + (in_len / kChaChaBlockSize));
size_t offset = in_len % kChaChaBlockSize;
uint8_t block[64 /* kChaChaBlockSize */];
for (size_t done = 0; done < extra_in_len; block_counter++) {
memset(block, 0, sizeof(block));
CRYPTO_chacha_20(block, block, sizeof(block), key, nonce, block_counter);
for (size_t i = offset; i < sizeof(block) && done < extra_in_len;
i++, done++) {
out_tag[done] = extra_in[done] ^ block[i];
}
offset = 0;
}
}
union chacha20_poly1305_seal_data data;
if (chacha20_poly1305_asm_capable()) {
OPENSSL_memcpy(data.in.key, key, CHACHA_KEY_LEN);
data.in.counter = 0;
OPENSSL_memcpy(data.in.nonce, nonce, CHACHA_IV_LEN);
data.in.extra_ciphertext = out_tag;
data.in.extra_ciphertext_len = extra_in_len;
chacha20_poly1305_seal(out, in, in_len, ad, ad_len, &data);
} else {
CRYPTO_chacha_20(out, in, in_len, key, nonce, 1);
calc_tag(data.out.tag, key, nonce, ad, ad_len, out, in_len, out_tag,
extra_in_len);
}
OPENSSL_memcpy(out_tag + extra_in_len, data.out.tag, tag_len);
*out_tag_len = extra_in_len + tag_len;
return 1;
}
static int aead_chacha20_poly1305_seal_scatter(
const EVP_AEAD_CTX *ctx, uint8_t *out, uint8_t *out_tag,
size_t *out_tag_len, size_t max_out_tag_len, const uint8_t *nonce,
size_t nonce_len, const uint8_t *in, size_t in_len, const uint8_t *extra_in,
size_t extra_in_len, const uint8_t *ad, size_t ad_len) {
const AEAD_CHACHA_POLY_CTX *c20_ctx =
(AEAD_CHACHA_POLY_CTX *)&ctx->state;
return chacha20_poly1305_seal_scatter(
c20_ctx->key, out, out_tag, out_tag_len, max_out_tag_len, nonce,
nonce_len, in, in_len, extra_in, extra_in_len, ad, ad_len, ctx->tag_len);
}
static int aead_xchacha20_poly1305_seal_scatter(
const EVP_AEAD_CTX *ctx, uint8_t *out, uint8_t *out_tag,
size_t *out_tag_len, size_t max_out_tag_len, const uint8_t *nonce,
size_t nonce_len, const uint8_t *in, size_t in_len, const uint8_t *extra_in,
size_t extra_in_len, const uint8_t *ad, size_t ad_len) {
const AEAD_CHACHA_POLY_CTX *c20_ctx =
(AEAD_CHACHA_POLY_CTX *)&ctx->state;
if (nonce_len != 24) {
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_UNSUPPORTED_NONCE_SIZE);
return 0;
}
alignas(4) uint8_t derived_key[CHACHA_KEY_LEN];
alignas(4) uint8_t derived_nonce[CHACHA_IV_LEN];
CRYPTO_hchacha20(derived_key, c20_ctx->key, nonce);
OPENSSL_memset(derived_nonce, 0, 4);
OPENSSL_memcpy(&derived_nonce[4], &nonce[16], 8);
return chacha20_poly1305_seal_scatter(
derived_key, out, out_tag, out_tag_len, max_out_tag_len, derived_nonce,
sizeof(derived_nonce), in, in_len, extra_in, extra_in_len, ad, ad_len,
ctx->tag_len);
}
static int chacha20_poly1305_open_gather(const uint8_t *key, uint8_t *out,
const uint8_t *nonce, size_t nonce_len,
const uint8_t *in, size_t in_len,
const uint8_t *in_tag,
size_t in_tag_len, const uint8_t *ad,
size_t ad_len, size_t tag_len) {
if (nonce_len != CHACHA_IV_LEN) {
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_UNSUPPORTED_NONCE_SIZE);
return 0;
}
if (in_tag_len != tag_len) {
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_DECRYPT);
return 0;
}
// |CRYPTO_chacha_20| uses a 32-bit block counter. Therefore we disallow
// individual operations that work on more than 256GB at a time.
// |in_len_64| is needed because, on 32-bit platforms, size_t is only
// 32-bits and this produces a warning because it's always false.
// Casting to uint64_t inside the conditional is not sufficient to stop
// the warning.
const uint64_t in_len_64 = in_len;
if (in_len_64 >= (UINT64_C(1) << 32) * 64 - 64) {
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_TOO_LARGE);
return 0;
}
union chacha20_poly1305_open_data data;
if (chacha20_poly1305_asm_capable()) {
OPENSSL_memcpy(data.in.key, key, CHACHA_KEY_LEN);
data.in.counter = 0;
OPENSSL_memcpy(data.in.nonce, nonce, CHACHA_IV_LEN);
chacha20_poly1305_open(out, in, in_len, ad, ad_len, &data);
} else {
calc_tag(data.out.tag, key, nonce, ad, ad_len, in, in_len, NULL, 0);
CRYPTO_chacha_20(out, in, in_len, key, nonce, 1);
}
if (CRYPTO_memcmp(data.out.tag, in_tag, tag_len) != 0) {
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_DECRYPT);
return 0;
}
return 1;
}
static int aead_chacha20_poly1305_open_gather(
const EVP_AEAD_CTX *ctx, uint8_t *out, const uint8_t *nonce,
size_t nonce_len, const uint8_t *in, size_t in_len, const uint8_t *in_tag,
size_t in_tag_len, const uint8_t *ad, size_t ad_len) {
const AEAD_CHACHA_POLY_CTX *c20_ctx =
(AEAD_CHACHA_POLY_CTX *)&ctx->state;
return chacha20_poly1305_open_gather(c20_ctx->key, out, nonce, nonce_len, in,
in_len, in_tag, in_tag_len, ad, ad_len,
ctx->tag_len);
}
static int aead_xchacha20_poly1305_open_gather(
const EVP_AEAD_CTX *ctx, uint8_t *out, const uint8_t *nonce,
size_t nonce_len, const uint8_t *in, size_t in_len, const uint8_t *in_tag,
size_t in_tag_len, const uint8_t *ad, size_t ad_len) {
const AEAD_CHACHA_POLY_CTX *c20_ctx =
(AEAD_CHACHA_POLY_CTX *)&ctx->state;
if (nonce_len != 24) {
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_UNSUPPORTED_NONCE_SIZE);
return 0;
}
alignas(4) uint8_t derived_key[CHACHA_KEY_LEN];
alignas(4) uint8_t derived_nonce[CHACHA_IV_LEN];
CRYPTO_hchacha20(derived_key, c20_ctx->key, nonce);
OPENSSL_memset(derived_nonce, 0, 4);
OPENSSL_memcpy(&derived_nonce[4], &nonce[16], 8);
return chacha20_poly1305_open_gather(
derived_key, out, derived_nonce, sizeof(derived_nonce), in, in_len,
in_tag, in_tag_len, ad, ad_len, ctx->tag_len);
}
static const EVP_AEAD aead_chacha20_poly1305 = {
CHACHA_KEY_LEN, // key len
CHACHA_IV_LEN, // nonce len
POLY1305_TAG_LEN, // overhead
POLY1305_TAG_LEN, // max tag length
AEAD_CHACHA20_POLY1305_ID, // evp_aead_id
1, // seal_scatter_supports_extra_in
aead_chacha20_poly1305_init,
NULL, // init_with_direction
aead_chacha20_poly1305_cleanup,
NULL /* open */,
aead_chacha20_poly1305_seal_scatter,
aead_chacha20_poly1305_open_gather,
NULL, // get_iv
NULL, // tag_len
NULL, // serialize_state
NULL, // deserialize_state
};
static const EVP_AEAD aead_xchacha20_poly1305 = {
CHACHA_KEY_LEN, // key len
24, // nonce len
POLY1305_TAG_LEN, // overhead
POLY1305_TAG_LEN, // max tag length
AEAD_XCHACHA20_POLY1305_ID, // evp_aead_id
1, // seal_scatter_supports_extra_in
aead_chacha20_poly1305_init,
NULL, // init_with_direction
aead_chacha20_poly1305_cleanup,
NULL /* open */,
aead_xchacha20_poly1305_seal_scatter,
aead_xchacha20_poly1305_open_gather,
NULL, // get_iv
NULL, // tag_len
NULL, // serialize_state
NULL, // deserialize_state
};
const EVP_AEAD *EVP_aead_chacha20_poly1305(void) {
return &aead_chacha20_poly1305;
}
const EVP_AEAD *EVP_aead_xchacha20_poly1305(void) {
return &aead_xchacha20_poly1305;
}
static int cipher_chacha20_poly1305_init_key(CIPHER_CHACHA_POLY_CTX *ctx,
const uint8_t user_key[CHACHA_KEY_LEN],
const uint8_t counter_nonce[CHACHA_CTR_IV_LEN])
{
CIPHER_CHACHA_KEY *key = CC_KEY(ctx);
uint32_t i;
if (user_key) {
for (i = 0; i < (CHACHA_KEY_LEN / 4); i++) {
key->key[i] = CRYPTO_load_u32_le(user_key + (i * 4));
}
}
if (counter_nonce) {
for (i = 0; i < CHACHA_CTR_IV_LEN / 4; i++) {
key->counter_nonce[i] = CRYPTO_load_u32_le(counter_nonce + (i * 4));
}
}
key->partial_len = 0;
return 1;
}
static int cipher_chacha20_poly1305_init(EVP_CIPHER_CTX *ctx,
const uint8_t *key,
const uint8_t *iv, int32_t enc) {
CIPHER_CHACHA_POLY_CTX *cipher_ctx = CCP_CTX(ctx);
cipher_ctx->len.aad = 0;
cipher_ctx->len.text = 0;
cipher_ctx->pad_aad = 0;
cipher_ctx->poly_initialized = 0;
if (!key && !iv) {
return 1;
}
// Init can be called multiple times before starting the cipher to
// independently initialize any combination of Key/IV/NULL.
if (iv != NULL) {
// Start the counter at 0 and copy over the nonce(iv)
uint8_t counter_nonce[CHACHA_CTR_IV_LEN] = {0};
OPENSSL_memcpy(counter_nonce + CHACHA_CTR_IV_LEN - CHACHA_IV_LEN, iv,
CHACHA_IV_LEN);
cipher_chacha20_poly1305_init_key(cipher_ctx, key, counter_nonce);
// Nonce occupies the last 3 indices of the array
cipher_ctx->iv[0] = cipher_ctx->key.counter_nonce[1];
cipher_ctx->iv[1] = cipher_ctx->key.counter_nonce[2];
cipher_ctx->iv[2] = cipher_ctx->key.counter_nonce[3];
} else {
cipher_chacha20_poly1305_init_key(cipher_ctx, key, NULL);
}
return 1;
}
static int cipher_chacha20_do_cipher(EVP_CIPHER_CTX *ctx, uint8_t *out,
const uint8_t *in, size_t in_len)
{
CIPHER_CHACHA_POLY_CTX *cipher_ctx = CCP_CTX(ctx);
CIPHER_CHACHA_KEY *key = CC_KEY(cipher_ctx);
uint32_t n, rem, counter;
// Complete any partial block
n = key->partial_len;
if (n) {
// Compute the cipher using our partially used key and any new input up
// to the next block
while (in_len && n < CHACHA_BLOCK_LEN) {
// Compute 1-byte of output by xor'ing 1-byte of input with the
// corresponding key byte and increment it all for the next byte.
*out++ = *in++ ^ key->buf[n++];
in_len--;
}
key->partial_len = n;
// If we consumed all the input, we're done
if (in_len == 0) {
return 1;
}
// If we completed a block, increment the counter
if (n == CHACHA_BLOCK_LEN) {
key->partial_len = 0;
// If this overflows we let the cipher wrap. This would be a bug in the
// calling code as overflow behavior is not defined in RFC 8439.
key->counter_nonce[0]++;
}
}
#ifdef OPENSSL_BIG_ENDIAN
// |CRYPTO_chacha_20| expects the input as a little-endian byte array.
uint8_t chacha_key[CHACHA_KEY_LEN];
uint8_t nonce[CHACHA_IV_LEN];
for(size_t i = 0; i < CHACHA_KEY_LEN / 4; i++) {
CRYPTO_store_u32_le(chacha_key + (i * sizeof(uint32_t)),
cipher_ctx->key.key[i]);
}
for(size_t i = 0; i < CHACHA_IV_LEN / 4; i++) {
CRYPTO_store_u32_le(nonce + (i * sizeof(uint32_t)),
cipher_ctx->iv[i]);
}
#else
const uint8_t *chacha_key = (const uint8_t *) cipher_ctx->key.key;
const uint8_t *nonce = (const uint8_t *) cipher_ctx->iv;
#endif
// Truncate down to the last complete block prior to the bulk cipher
rem = (uint32_t)(in_len % CHACHA_BLOCK_LEN);
in_len -= rem;
counter = key->counter_nonce[0];
while (in_len >= CHACHA_BLOCK_LEN) {
size_t blocks = in_len / CHACHA_BLOCK_LEN;
// 1<<28 is just a not-so-small yet not-so-large number... Below
// condition is practically never met, but it has to be checked for code
// correctness.
if (sizeof(size_t) > sizeof(uint32_t) && blocks > (1U<<28)) {
blocks = (1U << 28);
}
// As ChaCha20_ctr32 operates on 32-bit counter, caller has to handle
// overflow. 'if' below detects the overflow, which is then handled by
// limiting the amount of blocks to the exact overflow point. This while
// loop then continues the cipher by wrapping around with counter=0.
counter += (uint32_t) blocks;
if (counter < blocks) {
blocks -= counter;
counter = 0;
}
blocks *= CHACHA_BLOCK_LEN;
CRYPTO_chacha_20(out, in, blocks, chacha_key, nonce,
key->counter_nonce[0]);
in_len -= blocks;
in += blocks;
out += blocks;
key->counter_nonce[0] = counter;
}
// Start the next block if we have any leftover input
if (rem) {
OPENSSL_memset(key->buf, 0, sizeof(key->buf));
// Obtain the current key and store it in the context
CRYPTO_chacha_20(key->buf, key->buf, CHACHA_BLOCK_LEN, chacha_key, nonce,
key->counter_nonce[0]);
for (n = 0; n < rem; n++) {
out[n] = in[n] ^ key->buf[n];
}
key->partial_len = rem;
}
return 1;
}
static int cipher_chacha20_poly1305_do_cipher(
EVP_CIPHER_CTX *ctx, unsigned char *out, const unsigned char *in,
size_t in_len) {
CIPHER_CHACHA_POLY_CTX *cipher_ctx = CCP_CTX(ctx);
poly1305_state *poly_ctx = POLY_CTX(cipher_ctx);
size_t remainder;
if (!cipher_ctx->poly_initialized) {
#ifdef OPENSSL_BIG_ENDIAN
// |CRYPTO_chacha_20| expects the input as a little-endian byte array.
uint8_t chacha_key[CHACHA_KEY_LEN];
uint8_t nonce[CHACHA_IV_LEN];
for(int i = 0; i < CHACHA_KEY_LEN / 4; i++) {
CRYPTO_store_u32_le(chacha_key + (i * sizeof(uint32_t)),
cipher_ctx->key.key[i]);
}
for(size_t i = 0; i < CHACHA_IV_LEN / 4; i++) {
CRYPTO_store_u32_le(nonce + (i * sizeof(uint32_t)),
cipher_ctx->iv[i]);
}
#else
const uint8_t *chacha_key = (const uint8_t *) cipher_ctx->key.key;
const uint8_t *nonce = (const uint8_t *) cipher_ctx->iv;
#endif
// Obtain the poly1305 key by computing the 0th chacha20 key
alignas(16) uint8_t poly1305_key[CHACHA_KEY_LEN];
OPENSSL_memset(poly1305_key, 0, sizeof(poly1305_key));
CRYPTO_chacha_20(poly1305_key, poly1305_key, sizeof(poly1305_key),
chacha_key, nonce, 0);
// Initialize the poly1305 context
CRYPTO_poly1305_init(poly_ctx, poly1305_key);
cipher_ctx->key.counter_nonce[0] = 1;
cipher_ctx->key.partial_len = 0;
cipher_ctx->len.aad = 0;
cipher_ctx->len.text = 0;
cipher_ctx->poly_initialized = 1;
}
// Handle an |EVP_CipherUpdate|
if (in) {
if (out == NULL) {
// NULL |out| signals an AAD update
CRYPTO_poly1305_update(poly_ctx, in, in_len);
cipher_ctx->len.aad += in_len;
cipher_ctx->pad_aad = 1;
return (int32_t) in_len;
} else {
// Finish AAD by applying padding
if (cipher_ctx->pad_aad) {
remainder = cipher_ctx->len.aad % POLY1305_TAG_LEN;
if (remainder != 0) {
static const uint8_t padding[POLY1305_TAG_LEN] = {0};
CRYPTO_poly1305_update(poly_ctx, padding, sizeof(padding) - remainder);
}
cipher_ctx->pad_aad = 0;
}
// cipher/plain text |EVP_CipherUpdate|
if (EVP_CIPHER_CTX_encrypting(ctx)) {
// Encryption
cipher_chacha20_do_cipher(ctx, out, in, in_len);
// Update poly1305 with computed ciphertext
CRYPTO_poly1305_update(poly_ctx, out, in_len);
cipher_ctx->len.text += in_len;
} else {
// Decryption
// Update poly1305 with incoming ciphertext
CRYPTO_poly1305_update(poly_ctx, in, in_len);
cipher_chacha20_do_cipher(ctx, out, in, in_len);
cipher_ctx->len.text += in_len;
}
}
}
// Process an |EVP_CipherFinal|
if (in == NULL) {
uint8_t temp[POLY1305_TAG_LEN];
static const uint8_t padding[POLY1305_TAG_LEN] = {0};
// Finish AAD inp case there were no intermediate Update() calls
if (cipher_ctx->pad_aad) {
remainder = cipher_ctx->len.aad % POLY1305_TAG_LEN;
if (remainder != 0) {
CRYPTO_poly1305_update(poly_ctx, padding, sizeof(padding) - remainder);
}
cipher_ctx->pad_aad = 0;
}
// Apply padding for the text
remainder = cipher_ctx->len.text % POLY1305_TAG_LEN;
if (remainder != 0) {
CRYPTO_poly1305_update(poly_ctx, padding, sizeof(padding) - remainder);
}
// ChaCha20-Poly1305 passes the AAD and CT lengths through Poly1305 as two
// 64-bit little-endian integers.
#ifdef OPENSSL_BIG_ENDIAN
uint8_t length_bytes[2 * sizeof(uint64_t)];
CRYPTO_store_u64_le(length_bytes, cipher_ctx->len.aad);
CRYPTO_store_u64_le(length_bytes + sizeof(uint64_t), cipher_ctx->len.text);
#else
// For a little-endian platform, the struct's layout in memory works as-is.
const uint8_t *length_bytes = (const uint8_t *) &cipher_ctx->len;
#endif
CRYPTO_poly1305_update(poly_ctx, length_bytes, 2 * sizeof(uint64_t));
// Compute the tag and write it to scratch or the cipher context
CRYPTO_poly1305_finish(poly_ctx, EVP_CIPHER_CTX_encrypting(ctx) ?
cipher_ctx->tag : temp);
cipher_ctx->poly_initialized = 0;
// Check the tags if we're decrypting
if (!EVP_CIPHER_CTX_encrypting(ctx)) {
if (CRYPTO_memcmp(temp, cipher_ctx->tag, cipher_ctx->tag_len)) {
return -1;
}
}
}
return (int32_t) in_len;
}
static void cipher_chacha20_poly1305_cleanup(EVP_CIPHER_CTX *ctx) {
if (ctx->cipher_data) {
OPENSSL_cleanse(ctx->cipher_data, sizeof(CIPHER_CHACHA_POLY_CTX));
}
}
static int32_t cipher_chacha20_poly1305_ctrl(EVP_CIPHER_CTX *ctx, int32_t type,
int32_t arg, void *ptr) {
CIPHER_CHACHA_POLY_CTX *cipher_ctx = CCP_CTX(ctx);
switch (type) {
case EVP_CTRL_INIT:
if (cipher_ctx == NULL) {
cipher_ctx = ctx->cipher_data = OPENSSL_zalloc(ctx->cipher->ctx_size);
if (cipher_ctx == NULL) {
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_INITIALIZATION_ERROR);
return 0;
}
} else {
cipher_ctx->len.aad = 0;
cipher_ctx->len.text = 0;
cipher_ctx->pad_aad = 0;
cipher_ctx->poly_initialized = 0;
cipher_ctx->tag_len = 0;
}
return 1;
case EVP_CTRL_COPY:
if (cipher_ctx && cipher_ctx->poly_initialized) {
// The poly1305 context needs to be aligned on a 64-byte boundary.
// The destination context doesn't necessarily have the same
// alignment so we have to fix that here.
EVP_CIPHER_CTX *dst = (EVP_CIPHER_CTX *) ptr;
void *source_base = align_pointer((void *) POLY_CTX(CCP_CTX(ctx)), 64);
void *dest_base = align_pointer((void *) POLY_CTX(CCP_CTX(dst)), 64);
// We have 63 bytes of padding for alignment, so the actual size of
// the poly1305 context is the difference of that and the total buffer.
size_t length = sizeof(poly1305_state) - 63;
OPENSSL_memcpy(dest_base, source_base, length);
}
return 1;
case EVP_CTRL_AEAD_SET_IVLEN:
if (arg != CHACHA_IV_LEN) {
return 0;
}
return 1;
case EVP_CTRL_AEAD_GET_TAG:
if (arg <= 0 || arg > POLY1305_TAG_LEN ||
!EVP_CIPHER_CTX_encrypting(ctx)) {
return 0;
}
OPENSSL_memcpy(ptr, cipher_ctx->tag, arg);
return 1;
case EVP_CTRL_AEAD_SET_TAG:
if (arg <= 0 || arg > POLY1305_TAG_LEN ||
EVP_CIPHER_CTX_encrypting(ctx)) {
return 0;
}
if (ptr != NULL) {
OPENSSL_memcpy(cipher_ctx->tag, ptr, arg);
cipher_ctx->tag_len = arg;
}
return 1;
default:
return -1;
}
}
static EVP_CIPHER cipher_chacha20_poly1305 = {
NID_chacha20_poly1305,
1, // stream cipher
CHACHA_KEY_LEN,
CHACHA_IV_LEN,
sizeof(CIPHER_CHACHA_POLY_CTX),
EVP_CIPH_FLAG_AEAD_CIPHER | EVP_CIPH_CUSTOM_IV | EVP_CIPH_ALWAYS_CALL_INIT |
EVP_CIPH_CTRL_INIT | EVP_CIPH_CUSTOM_COPY | EVP_CIPH_FLAG_CUSTOM_CIPHER,
cipher_chacha20_poly1305_init,
cipher_chacha20_poly1305_do_cipher,
cipher_chacha20_poly1305_cleanup,
cipher_chacha20_poly1305_ctrl
};
const EVP_CIPHER *EVP_chacha20_poly1305(void)
{
return(&cipher_chacha20_poly1305);
}

View File

@@ -0,0 +1,175 @@
// Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com) All rights reserved.
// SPDX-License-Identifier: Apache-2.0
#include <openssl/cipher.h>
#include <openssl/des.h>
#include <openssl/nid.h>
#include "../des/internal.h"
#include "../fipsmodule/cipher/internal.h"
#include "internal.h"
typedef struct {
union {
double align;
DES_key_schedule ks;
} ks;
} EVP_DES_KEY;
static int des_init_key(EVP_CIPHER_CTX *ctx, const uint8_t *key,
const uint8_t *iv, int enc) {
EVP_DES_KEY *dat = (EVP_DES_KEY *)ctx->cipher_data;
DES_set_key_ex(key, &dat->ks.ks);
return 1;
}
static int des_cbc_cipher(EVP_CIPHER_CTX *ctx, uint8_t *out, const uint8_t *in,
size_t in_len) {
EVP_DES_KEY *dat = (EVP_DES_KEY *)ctx->cipher_data;
DES_ncbc_encrypt_ex(in, out, in_len, &dat->ks.ks, ctx->iv, ctx->encrypt);
return 1;
}
static const EVP_CIPHER evp_des_cbc = {
.nid = NID_des_cbc,
.block_size = 8,
.key_len = 8,
.iv_len = 8,
.ctx_size = sizeof(EVP_DES_KEY),
.flags = EVP_CIPH_CBC_MODE,
.init = des_init_key,
.cipher = des_cbc_cipher,
};
const EVP_CIPHER *EVP_des_cbc(void) { return &evp_des_cbc; }
static int des_ecb_cipher(EVP_CIPHER_CTX *ctx, uint8_t *out, const uint8_t *in,
size_t in_len) {
if (in_len < ctx->cipher->block_size) {
return 1;
}
in_len -= ctx->cipher->block_size;
EVP_DES_KEY *dat = (EVP_DES_KEY *)ctx->cipher_data;
for (size_t i = 0; i <= in_len; i += ctx->cipher->block_size) {
DES_ecb_encrypt_ex(in + i, out + i, &dat->ks.ks, ctx->encrypt);
}
return 1;
}
static const EVP_CIPHER evp_des_ecb = {
.nid = NID_des_ecb,
.block_size = 8,
.key_len = 8,
.iv_len = 0,
.ctx_size = sizeof(EVP_DES_KEY),
.flags = EVP_CIPH_ECB_MODE,
.init = des_init_key,
.cipher = des_ecb_cipher,
};
const EVP_CIPHER *EVP_des_ecb(void) { return &evp_des_ecb; }
typedef struct {
union {
double align;
DES_key_schedule ks[3];
} ks;
} DES_EDE_KEY;
static int des_ede3_init_key(EVP_CIPHER_CTX *ctx, const uint8_t *key,
const uint8_t *iv, int enc) {
DES_EDE_KEY *dat = (DES_EDE_KEY *)ctx->cipher_data;
DES_set_key_ex(key, &dat->ks.ks[0]);
DES_set_key_ex(key + 8, &dat->ks.ks[1]);
DES_set_key_ex(key + 16, &dat->ks.ks[2]);
return 1;
}
static int des_ede3_cbc_cipher(EVP_CIPHER_CTX *ctx, uint8_t *out,
const uint8_t *in, size_t in_len) {
DES_EDE_KEY *dat = (DES_EDE_KEY *)ctx->cipher_data;
DES_ede3_cbc_encrypt_ex(in, out, in_len, &dat->ks.ks[0], &dat->ks.ks[1],
&dat->ks.ks[2], ctx->iv, ctx->encrypt);
return 1;
}
static const EVP_CIPHER evp_des_ede3_cbc = {
.nid = NID_des_ede3_cbc,
.block_size = 8,
.key_len = 24,
.iv_len = 8,
.ctx_size = sizeof(DES_EDE_KEY),
.flags = EVP_CIPH_CBC_MODE,
.init = des_ede3_init_key,
.cipher = des_ede3_cbc_cipher,
};
const EVP_CIPHER *EVP_des_ede3_cbc(void) { return &evp_des_ede3_cbc; }
static int des_ede_init_key(EVP_CIPHER_CTX *ctx, const uint8_t *key,
const uint8_t *iv, int enc) {
DES_EDE_KEY *dat = (DES_EDE_KEY *)ctx->cipher_data;
// 2-DES is 3-DES with the first key used twice.
DES_set_key_ex(key, &dat->ks.ks[0]);
DES_set_key_ex(key + 8, &dat->ks.ks[1]);
DES_set_key_ex(key, &dat->ks.ks[2]);
return 1;
}
static const EVP_CIPHER evp_des_ede_cbc = {
.nid = NID_des_ede_cbc,
.block_size = 8,
.key_len = 16,
.iv_len = 8,
.ctx_size = sizeof(DES_EDE_KEY),
.flags = EVP_CIPH_CBC_MODE,
.init = des_ede_init_key,
.cipher = des_ede3_cbc_cipher,
};
const EVP_CIPHER *EVP_des_ede_cbc(void) { return &evp_des_ede_cbc; }
static int des_ede_ecb_cipher(EVP_CIPHER_CTX *ctx, uint8_t *out,
const uint8_t *in, size_t in_len) {
if (in_len < ctx->cipher->block_size) {
return 1;
}
in_len -= ctx->cipher->block_size;
DES_EDE_KEY *dat = (DES_EDE_KEY *) ctx->cipher_data;
for (size_t i = 0; i <= in_len; i += ctx->cipher->block_size) {
DES_ecb3_encrypt_ex(in + i, out + i, &dat->ks.ks[0], &dat->ks.ks[1],
&dat->ks.ks[2], ctx->encrypt);
}
return 1;
}
static const EVP_CIPHER evp_des_ede = {
.nid = NID_des_ede_ecb,
.block_size = 8,
.key_len = 16,
.iv_len = 0,
.ctx_size = sizeof(DES_EDE_KEY),
.flags = EVP_CIPH_ECB_MODE,
.init = des_ede_init_key,
.cipher = des_ede_ecb_cipher,
};
const EVP_CIPHER *EVP_des_ede(void) { return &evp_des_ede; }
static const EVP_CIPHER evp_des_ede3 = {
.nid = NID_des_ede3_ecb,
.block_size = 8,
.key_len = 24,
.iv_len = 0,
.ctx_size = sizeof(DES_EDE_KEY),
.flags = EVP_CIPH_ECB_MODE,
.init = des_ede3_init_key,
.cipher = des_ede_ecb_cipher,
};
const EVP_CIPHER *EVP_des_ede3(void) { return &evp_des_ede3; }
const EVP_CIPHER *EVP_des_ede3_ecb(void) { return EVP_des_ede3(); }

View File

@@ -0,0 +1,37 @@
// Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com) All rights reserved.
// SPDX-License-Identifier: Apache-2.0
#include <openssl/cipher.h>
#include <string.h>
#include <openssl/nid.h>
#include "../fipsmodule/cipher/internal.h"
#include "../internal.h"
static int null_init_key(EVP_CIPHER_CTX *ctx, const uint8_t *key,
const uint8_t *iv, int enc) {
return 1;
}
static int null_cipher(EVP_CIPHER_CTX *ctx, uint8_t *out,
const uint8_t *in, size_t in_len) {
if (in != out) {
OPENSSL_memcpy(out, in, in_len);
}
return 1;
}
static const EVP_CIPHER n_cipher = {
.nid = NID_undef,
.block_size = 1,
.key_len = 0,
.iv_len = 0,
.ctx_size = 0,
.init = null_init_key,
.cipher = null_cipher,
};
const EVP_CIPHER *EVP_enc_null(void) { return &n_cipher; }

View File

@@ -0,0 +1,402 @@
// Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com) All rights reserved.
// SPDX-License-Identifier: Apache-2.0
#include <openssl/cipher.h>
#include <openssl/nid.h>
#include "../fipsmodule/cipher/internal.h"
#include "../internal.h"
#define c2l(c, l) \
do { \
(l) = ((uint32_t)(*((c)++))); \
(l) |= ((uint32_t)(*((c)++))) << 8L; \
(l) |= ((uint32_t)(*((c)++))) << 16L; \
(l) |= ((uint32_t)(*((c)++))) << 24L; \
} while (0)
#define c2ln(c, l1, l2, n) \
do { \
(c) += (n); \
(l1) = (l2) = 0; \
switch (n) { \
case 8: \
(l2) = ((uint32_t)(*(--(c)))) << 24L; \
OPENSSL_FALLTHROUGH; \
case 7: \
(l2) |= ((uint32_t)(*(--(c)))) << 16L; \
OPENSSL_FALLTHROUGH; \
case 6: \
(l2) |= ((uint32_t)(*(--(c)))) << 8L; \
OPENSSL_FALLTHROUGH; \
case 5: \
(l2) |= ((uint32_t)(*(--(c)))); \
OPENSSL_FALLTHROUGH; \
case 4: \
(l1) = ((uint32_t)(*(--(c)))) << 24L; \
OPENSSL_FALLTHROUGH; \
case 3: \
(l1) |= ((uint32_t)(*(--(c)))) << 16L; \
OPENSSL_FALLTHROUGH; \
case 2: \
(l1) |= ((uint32_t)(*(--(c)))) << 8L; \
OPENSSL_FALLTHROUGH; \
case 1: \
(l1) |= ((uint32_t)(*(--(c)))); \
} \
} while (0)
#define l2c(l, c) \
do { \
*((c)++) = (uint8_t)(((l)) & 0xff); \
*((c)++) = (uint8_t)(((l) >> 8L) & 0xff); \
*((c)++) = (uint8_t)(((l) >> 16L) & 0xff); \
*((c)++) = (uint8_t)(((l) >> 24L) & 0xff); \
} while (0)
#define l2cn(l1, l2, c, n) \
do { \
(c) += (n); \
switch (n) { \
case 8: \
*(--(c)) = (uint8_t)(((l2) >> 24L) & 0xff); \
OPENSSL_FALLTHROUGH; \
case 7: \
*(--(c)) = (uint8_t)(((l2) >> 16L) & 0xff); \
OPENSSL_FALLTHROUGH; \
case 6: \
*(--(c)) = (uint8_t)(((l2) >> 8L) & 0xff); \
OPENSSL_FALLTHROUGH; \
case 5: \
*(--(c)) = (uint8_t)(((l2)) & 0xff); \
OPENSSL_FALLTHROUGH; \
case 4: \
*(--(c)) = (uint8_t)(((l1) >> 24L) & 0xff); \
OPENSSL_FALLTHROUGH; \
case 3: \
*(--(c)) = (uint8_t)(((l1) >> 16L) & 0xff); \
OPENSSL_FALLTHROUGH; \
case 2: \
*(--(c)) = (uint8_t)(((l1) >> 8L) & 0xff); \
OPENSSL_FALLTHROUGH; \
case 1: \
*(--(c)) = (uint8_t)(((l1)) & 0xff); \
} \
} while (0)
typedef struct rc2_key_st { uint16_t data[64]; } RC2_KEY;
static void RC2_encrypt(uint32_t *d, RC2_KEY *key) {
int i, n;
uint16_t *p0, *p1;
uint16_t x0, x1, x2, x3, t;
uint32_t l;
l = d[0];
x0 = (uint16_t)l & 0xffff;
x1 = (uint16_t)(l >> 16L);
l = d[1];
x2 = (uint16_t)l & 0xffff;
x3 = (uint16_t)(l >> 16L);
n = 3;
i = 5;
p0 = p1 = &key->data[0];
for (;;) {
t = (x0 + (x1 & ~x3) + (x2 & x3) + *(p0++)) & 0xffff;
x0 = (t << 1) | (t >> 15);
t = (x1 + (x2 & ~x0) + (x3 & x0) + *(p0++)) & 0xffff;
x1 = (t << 2) | (t >> 14);
t = (x2 + (x3 & ~x1) + (x0 & x1) + *(p0++)) & 0xffff;
x2 = (t << 3) | (t >> 13);
t = (x3 + (x0 & ~x2) + (x1 & x2) + *(p0++)) & 0xffff;
x3 = (t << 5) | (t >> 11);
if (--i == 0) {
if (--n == 0) {
break;
}
i = (n == 2) ? 6 : 5;
x0 += p1[x3 & 0x3f];
x1 += p1[x0 & 0x3f];
x2 += p1[x1 & 0x3f];
x3 += p1[x2 & 0x3f];
}
}
d[0] = (uint32_t)(x0 & 0xffff) | ((uint32_t)(x1 & 0xffff) << 16L);
d[1] = (uint32_t)(x2 & 0xffff) | ((uint32_t)(x3 & 0xffff) << 16L);
}
static void RC2_decrypt(uint32_t *d, RC2_KEY *key) {
int i, n;
uint16_t *p0, *p1;
uint16_t x0, x1, x2, x3, t;
uint32_t l;
l = d[0];
x0 = (uint16_t)l & 0xffff;
x1 = (uint16_t)(l >> 16L);
l = d[1];
x2 = (uint16_t)l & 0xffff;
x3 = (uint16_t)(l >> 16L);
n = 3;
i = 5;
p0 = &key->data[63];
p1 = &key->data[0];
for (;;) {
t = ((x3 << 11) | (x3 >> 5)) & 0xffff;
x3 = (t - (x0 & ~x2) - (x1 & x2) - *(p0--)) & 0xffff;
t = ((x2 << 13) | (x2 >> 3)) & 0xffff;
x2 = (t - (x3 & ~x1) - (x0 & x1) - *(p0--)) & 0xffff;
t = ((x1 << 14) | (x1 >> 2)) & 0xffff;
x1 = (t - (x2 & ~x0) - (x3 & x0) - *(p0--)) & 0xffff;
t = ((x0 << 15) | (x0 >> 1)) & 0xffff;
x0 = (t - (x1 & ~x3) - (x2 & x3) - *(p0--)) & 0xffff;
if (--i == 0) {
if (--n == 0) {
break;
}
i = (n == 2) ? 6 : 5;
x3 = (x3 - p1[x2 & 0x3f]) & 0xffff;
x2 = (x2 - p1[x1 & 0x3f]) & 0xffff;
x1 = (x1 - p1[x0 & 0x3f]) & 0xffff;
x0 = (x0 - p1[x3 & 0x3f]) & 0xffff;
}
}
d[0] = (uint32_t)(x0 & 0xffff) | ((uint32_t)(x1 & 0xffff) << 16L);
d[1] = (uint32_t)(x2 & 0xffff) | ((uint32_t)(x3 & 0xffff) << 16L);
}
static void RC2_cbc_encrypt(const uint8_t *in, uint8_t *out, size_t length,
RC2_KEY *ks, uint8_t *iv, int encrypt) {
uint32_t tin0, tin1;
uint32_t tout0, tout1, xor0, xor1;
long l = length;
uint32_t tin[2];
if (encrypt) {
c2l(iv, tout0);
c2l(iv, tout1);
iv -= 8;
for (l -= 8; l >= 0; l -= 8) {
c2l(in, tin0);
c2l(in, tin1);
tin0 ^= tout0;
tin1 ^= tout1;
tin[0] = tin0;
tin[1] = tin1;
RC2_encrypt(tin, ks);
tout0 = tin[0];
l2c(tout0, out);
tout1 = tin[1];
l2c(tout1, out);
}
if (l != -8) {
c2ln(in, tin0, tin1, l + 8);
tin0 ^= tout0;
tin1 ^= tout1;
tin[0] = tin0;
tin[1] = tin1;
RC2_encrypt(tin, ks);
tout0 = tin[0];
l2c(tout0, out);
tout1 = tin[1];
l2c(tout1, out);
}
l2c(tout0, iv);
l2c(tout1, iv);
} else {
c2l(iv, xor0);
c2l(iv, xor1);
iv -= 8;
for (l -= 8; l >= 0; l -= 8) {
c2l(in, tin0);
tin[0] = tin0;
c2l(in, tin1);
tin[1] = tin1;
RC2_decrypt(tin, ks);
tout0 = tin[0] ^ xor0;
tout1 = tin[1] ^ xor1;
l2c(tout0, out);
l2c(tout1, out);
xor0 = tin0;
xor1 = tin1;
}
if (l != -8) {
c2l(in, tin0);
tin[0] = tin0;
c2l(in, tin1);
tin[1] = tin1;
RC2_decrypt(tin, ks);
tout0 = tin[0] ^ xor0;
tout1 = tin[1] ^ xor1;
l2cn(tout0, tout1, out, l + 8);
xor0 = tin0;
xor1 = tin1;
}
l2c(xor0, iv);
l2c(xor1, iv);
}
tin[0] = tin[1] = 0;
}
static const uint8_t key_table[256] = {
0xd9, 0x78, 0xf9, 0xc4, 0x19, 0xdd, 0xb5, 0xed, 0x28, 0xe9, 0xfd, 0x79,
0x4a, 0xa0, 0xd8, 0x9d, 0xc6, 0x7e, 0x37, 0x83, 0x2b, 0x76, 0x53, 0x8e,
0x62, 0x4c, 0x64, 0x88, 0x44, 0x8b, 0xfb, 0xa2, 0x17, 0x9a, 0x59, 0xf5,
0x87, 0xb3, 0x4f, 0x13, 0x61, 0x45, 0x6d, 0x8d, 0x09, 0x81, 0x7d, 0x32,
0xbd, 0x8f, 0x40, 0xeb, 0x86, 0xb7, 0x7b, 0x0b, 0xf0, 0x95, 0x21, 0x22,
0x5c, 0x6b, 0x4e, 0x82, 0x54, 0xd6, 0x65, 0x93, 0xce, 0x60, 0xb2, 0x1c,
0x73, 0x56, 0xc0, 0x14, 0xa7, 0x8c, 0xf1, 0xdc, 0x12, 0x75, 0xca, 0x1f,
0x3b, 0xbe, 0xe4, 0xd1, 0x42, 0x3d, 0xd4, 0x30, 0xa3, 0x3c, 0xb6, 0x26,
0x6f, 0xbf, 0x0e, 0xda, 0x46, 0x69, 0x07, 0x57, 0x27, 0xf2, 0x1d, 0x9b,
0xbc, 0x94, 0x43, 0x03, 0xf8, 0x11, 0xc7, 0xf6, 0x90, 0xef, 0x3e, 0xe7,
0x06, 0xc3, 0xd5, 0x2f, 0xc8, 0x66, 0x1e, 0xd7, 0x08, 0xe8, 0xea, 0xde,
0x80, 0x52, 0xee, 0xf7, 0x84, 0xaa, 0x72, 0xac, 0x35, 0x4d, 0x6a, 0x2a,
0x96, 0x1a, 0xd2, 0x71, 0x5a, 0x15, 0x49, 0x74, 0x4b, 0x9f, 0xd0, 0x5e,
0x04, 0x18, 0xa4, 0xec, 0xc2, 0xe0, 0x41, 0x6e, 0x0f, 0x51, 0xcb, 0xcc,
0x24, 0x91, 0xaf, 0x50, 0xa1, 0xf4, 0x70, 0x39, 0x99, 0x7c, 0x3a, 0x85,
0x23, 0xb8, 0xb4, 0x7a, 0xfc, 0x02, 0x36, 0x5b, 0x25, 0x55, 0x97, 0x31,
0x2d, 0x5d, 0xfa, 0x98, 0xe3, 0x8a, 0x92, 0xae, 0x05, 0xdf, 0x29, 0x10,
0x67, 0x6c, 0xba, 0xc9, 0xd3, 0x00, 0xe6, 0xcf, 0xe1, 0x9e, 0xa8, 0x2c,
0x63, 0x16, 0x01, 0x3f, 0x58, 0xe2, 0x89, 0xa9, 0x0d, 0x38, 0x34, 0x1b,
0xab, 0x33, 0xff, 0xb0, 0xbb, 0x48, 0x0c, 0x5f, 0xb9, 0xb1, 0xcd, 0x2e,
0xc5, 0xf3, 0xdb, 0x47, 0xe5, 0xa5, 0x9c, 0x77, 0x0a, 0xa6, 0x20, 0x68,
0xfe, 0x7f, 0xc1, 0xad,
};
static void RC2_set_key(RC2_KEY *key, int len, const uint8_t *data, int bits) {
int i, j;
uint8_t *k;
uint16_t *ki;
unsigned int c, d;
k = (uint8_t *)&key->data[0];
*k = 0; // for if there is a zero length key
if (len > 128) {
len = 128;
}
if (bits <= 0) {
bits = 1024;
}
if (bits > 1024) {
bits = 1024;
}
for (i = 0; i < len; i++) {
k[i] = data[i];
}
// expand table
d = k[len - 1];
j = 0;
for (i = len; i < 128; i++, j++) {
d = key_table[(k[j] + d) & 0xff];
k[i] = d;
}
// hmm.... key reduction to 'bits' bits
j = (bits + 7) >> 3;
i = 128 - j;
c = (0xff >> (-bits & 0x07));
d = key_table[k[i] & c];
k[i] = d;
while (i--) {
d = key_table[k[i + j] ^ d];
k[i] = d;
}
// copy from bytes into uint16_t's
ki = &(key->data[63]);
for (i = 127; i >= 0; i -= 2) {
*(ki--) = ((k[i] << 8) | k[i - 1]) & 0xffff;
}
}
typedef struct {
int key_bits; // effective key bits
RC2_KEY ks; // key schedule
} EVP_RC2_KEY;
static int rc2_init_key(EVP_CIPHER_CTX *ctx, const uint8_t *key,
const uint8_t *iv, int enc) {
EVP_RC2_KEY *rc2_key = (EVP_RC2_KEY *)ctx->cipher_data;
RC2_set_key(&rc2_key->ks, EVP_CIPHER_CTX_key_length(ctx), key,
rc2_key->key_bits);
return 1;
}
static int rc2_cbc_cipher(EVP_CIPHER_CTX *ctx, uint8_t *out, const uint8_t *in,
size_t inl) {
EVP_RC2_KEY *key = (EVP_RC2_KEY *)ctx->cipher_data;
static const size_t kChunkSize = 0x10000;
while (inl >= kChunkSize) {
RC2_cbc_encrypt(in, out, kChunkSize, &key->ks, ctx->iv, ctx->encrypt);
inl -= kChunkSize;
in += kChunkSize;
out += kChunkSize;
}
if (inl) {
RC2_cbc_encrypt(in, out, inl, &key->ks, ctx->iv, ctx->encrypt);
}
return 1;
}
static int rc2_ctrl(EVP_CIPHER_CTX *ctx, int type, int arg, void *ptr) {
EVP_RC2_KEY *key = (EVP_RC2_KEY *)ctx->cipher_data;
switch (type) {
case EVP_CTRL_INIT:
key->key_bits = EVP_CIPHER_CTX_key_length(ctx) * 8;
return 1;
case EVP_CTRL_SET_RC2_KEY_BITS:
// Should be overridden by later call to |EVP_CTRL_INIT|, but
// people call it, so it may as well work.
key->key_bits = arg;
return 1;
default:
return -1;
}
}
static const EVP_CIPHER rc2_40_cbc = {
.nid = NID_rc2_40_cbc,
.block_size = 8,
.key_len = 5 /* 40 bit */,
.iv_len = 8,
.ctx_size = sizeof(EVP_RC2_KEY),
.flags = EVP_CIPH_CBC_MODE | EVP_CIPH_VARIABLE_LENGTH | EVP_CIPH_CTRL_INIT,
.init = rc2_init_key,
.cipher = rc2_cbc_cipher,
.ctrl = rc2_ctrl,
};
const EVP_CIPHER *EVP_rc2_40_cbc(void) { return &rc2_40_cbc; }
static const EVP_CIPHER rc2_cbc = {
.nid = NID_rc2_cbc,
.block_size = 8,
.key_len = 16 /* 128 bit */,
.iv_len = 8,
.ctx_size = sizeof(EVP_RC2_KEY),
.flags = EVP_CIPH_CBC_MODE | EVP_CIPH_VARIABLE_LENGTH | EVP_CIPH_CTRL_INIT,
.init = rc2_init_key,
.cipher = rc2_cbc_cipher,
.ctrl = rc2_ctrl,
};
const EVP_CIPHER *EVP_rc2_cbc(void) { return &rc2_cbc; }

View File

@@ -0,0 +1,41 @@
// Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com) All rights reserved.
// SPDX-License-Identifier: Apache-2.0
#include <assert.h>
#include <string.h>
#include <openssl/cipher.h>
#include <openssl/nid.h>
#include <openssl/rc4.h>
#include "../fipsmodule/cipher/internal.h"
static int rc4_init_key(EVP_CIPHER_CTX *ctx, const uint8_t *key,
const uint8_t *iv, int enc) {
RC4_KEY *rc4key = (RC4_KEY *)ctx->cipher_data;
RC4_set_key(rc4key, EVP_CIPHER_CTX_key_length(ctx), key);
return 1;
}
static int rc4_cipher(EVP_CIPHER_CTX *ctx, uint8_t *out, const uint8_t *in,
size_t in_len) {
RC4_KEY *rc4key = (RC4_KEY *)ctx->cipher_data;
RC4(rc4key, in_len, in, out);
return 1;
}
static const EVP_CIPHER rc4 = {
.nid = NID_rc4,
.block_size = 1,
.key_len = 16,
.iv_len = 0,
.ctx_size = sizeof(RC4_KEY),
.flags = EVP_CIPH_VARIABLE_LENGTH,
.init = rc4_init_key,
.cipher = rc4_cipher,
};
const EVP_CIPHER *EVP_rc4(void) { return &rc4; }

View File

@@ -0,0 +1,705 @@
// Copyright (c) 2014, Google Inc.
// SPDX-License-Identifier: ISC
#include <assert.h>
#include <limits.h>
#include <string.h>
#include <openssl/aead.h>
#include <openssl/cipher.h>
#include <openssl/err.h>
#include <openssl/hmac.h>
#include <openssl/md5.h>
#include <openssl/mem.h>
#include <openssl/sha.h>
#include <openssl/type_check.h>
#include "../fipsmodule/cipher/internal.h"
#include "../internal.h"
#include "internal.h"
typedef struct {
EVP_CIPHER_CTX cipher_ctx;
HMAC_CTX hmac_ctx;
// mac_key is the portion of the key used for the MAC. It is retained
// separately for the constant-time CBC code.
uint8_t mac_key[EVP_MAX_MD_SIZE];
uint8_t mac_key_len;
// implicit_iv is one iff this is a pre-TLS-1.1 CBC cipher without an explicit
// IV.
char implicit_iv;
} AEAD_TLS_CTX;
OPENSSL_STATIC_ASSERT(EVP_MAX_MD_SIZE < 256,
mac_key_len_does_not_fit_in_uint8_t)
static void aead_tls_cleanup(EVP_AEAD_CTX *ctx) {
AEAD_TLS_CTX *tls_ctx = (AEAD_TLS_CTX *)ctx->state.ptr;
EVP_CIPHER_CTX_cleanup(&tls_ctx->cipher_ctx);
HMAC_CTX_cleanup(&tls_ctx->hmac_ctx);
OPENSSL_free(tls_ctx);
ctx->state.ptr = NULL;
}
static int aead_tls_init(EVP_AEAD_CTX *ctx, const uint8_t *key, size_t key_len,
size_t tag_len, enum evp_aead_direction_t dir,
const EVP_CIPHER *cipher, const EVP_MD *md,
char implicit_iv) {
if (tag_len != EVP_AEAD_DEFAULT_TAG_LENGTH && tag_len != EVP_MD_size(md)) {
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_UNSUPPORTED_TAG_SIZE);
return 0;
}
if (key_len != EVP_AEAD_key_length(ctx->aead)) {
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_KEY_LENGTH);
return 0;
}
size_t mac_key_len = EVP_MD_size(md);
size_t enc_key_len = EVP_CIPHER_key_length(cipher);
assert(mac_key_len + enc_key_len +
(implicit_iv ? EVP_CIPHER_iv_length(cipher) : 0) ==
key_len);
AEAD_TLS_CTX *tls_ctx = OPENSSL_malloc(sizeof(AEAD_TLS_CTX));
if (tls_ctx == NULL) {
return 0;
}
ctx->state.ptr = (void *)tls_ctx;
EVP_CIPHER_CTX_init(&tls_ctx->cipher_ctx);
HMAC_CTX_init(&tls_ctx->hmac_ctx);
assert(mac_key_len <= EVP_MAX_MD_SIZE);
OPENSSL_memcpy(tls_ctx->mac_key, key, mac_key_len);
tls_ctx->mac_key_len = (uint8_t)mac_key_len;
tls_ctx->implicit_iv = implicit_iv;
if (!EVP_CipherInit_ex(&tls_ctx->cipher_ctx, cipher, NULL, &key[mac_key_len],
implicit_iv ? &key[mac_key_len + enc_key_len] : NULL,
dir == evp_aead_seal) ||
!HMAC_Init_ex(&tls_ctx->hmac_ctx, key, mac_key_len, md, NULL)) {
aead_tls_cleanup(ctx);
return 0;
}
EVP_CIPHER_CTX_set_padding(&tls_ctx->cipher_ctx, 0);
return 1;
}
static size_t aead_tls_tag_len(const EVP_AEAD_CTX *ctx, const size_t in_len,
const size_t extra_in_len) {
assert(extra_in_len == 0);
const AEAD_TLS_CTX *tls_ctx = (AEAD_TLS_CTX *)ctx->state.ptr;
const size_t hmac_len = HMAC_size(&tls_ctx->hmac_ctx);
if (EVP_CIPHER_CTX_mode(&tls_ctx->cipher_ctx) != EVP_CIPH_CBC_MODE) {
// The NULL cipher.
return hmac_len;
}
const size_t block_size = EVP_CIPHER_CTX_block_size(&tls_ctx->cipher_ctx);
// An overflow of |in_len + hmac_len| doesn't affect the result mod
// |block_size|, provided that |block_size| is a smaller power of two.
assert(block_size != 0 && (block_size & (block_size - 1)) == 0);
const size_t pad_len = block_size - (in_len + hmac_len) % block_size;
return hmac_len + pad_len;
}
static int aead_tls_seal_scatter(const EVP_AEAD_CTX *ctx, uint8_t *out,
uint8_t *out_tag, size_t *out_tag_len,
const size_t max_out_tag_len,
const uint8_t *nonce, const size_t nonce_len,
const uint8_t *in, const size_t in_len,
const uint8_t *extra_in,
const size_t extra_in_len, const uint8_t *ad,
const size_t ad_len) {
AEAD_TLS_CTX *tls_ctx = (AEAD_TLS_CTX *)ctx->state.ptr;
if (!tls_ctx->cipher_ctx.encrypt) {
// Unlike a normal AEAD, a TLS AEAD may only be used in one direction.
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_INVALID_OPERATION);
return 0;
}
if (in_len > INT_MAX) {
// EVP_CIPHER takes int as input.
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_TOO_LARGE);
return 0;
}
if (max_out_tag_len < aead_tls_tag_len(ctx, in_len, extra_in_len)) {
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BUFFER_TOO_SMALL);
return 0;
}
if (nonce_len != EVP_AEAD_nonce_length(ctx->aead)) {
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_INVALID_NONCE_SIZE);
return 0;
}
if (ad_len != 13 - 2 /* length bytes */) {
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_INVALID_AD_SIZE);
return 0;
}
// To allow for CBC mode which changes cipher length, |ad| doesn't include the
// length for legacy ciphers.
uint8_t ad_extra[2];
ad_extra[0] = (uint8_t)(in_len >> 8);
ad_extra[1] = (uint8_t)(in_len & 0xff);
// Compute the MAC. This must be first in case the operation is being done
// in-place.
uint8_t mac[EVP_MAX_MD_SIZE];
unsigned mac_len;
if (!HMAC_Init_ex(&tls_ctx->hmac_ctx, NULL, 0, NULL, NULL) ||
!HMAC_Update(&tls_ctx->hmac_ctx, ad, ad_len) ||
!HMAC_Update(&tls_ctx->hmac_ctx, ad_extra, sizeof(ad_extra)) ||
!HMAC_Update(&tls_ctx->hmac_ctx, in, in_len) ||
!HMAC_Final(&tls_ctx->hmac_ctx, mac, &mac_len)) {
return 0;
}
// Configure the explicit IV.
if (EVP_CIPHER_CTX_mode(&tls_ctx->cipher_ctx) == EVP_CIPH_CBC_MODE &&
!tls_ctx->implicit_iv &&
!EVP_EncryptInit_ex(&tls_ctx->cipher_ctx, NULL, NULL, NULL, nonce)) {
return 0;
}
// Encrypt the input.
int len;
if (!EVP_EncryptUpdate(&tls_ctx->cipher_ctx, out, &len, in, (int)in_len)) {
return 0;
}
unsigned block_size = EVP_CIPHER_CTX_block_size(&tls_ctx->cipher_ctx);
// Feed the MAC into the cipher in two steps. First complete the final partial
// block from encrypting the input and split the result between |out| and
// |out_tag|. Then feed the rest.
const size_t early_mac_len =
(block_size - (in_len % block_size)) % block_size;
if (early_mac_len != 0) {
assert(len + block_size - early_mac_len == in_len);
uint8_t buf[EVP_MAX_BLOCK_LENGTH];
int buf_len;
if (!EVP_EncryptUpdate(&tls_ctx->cipher_ctx, buf, &buf_len, mac,
(int)early_mac_len)) {
return 0;
}
assert(buf_len == (int)block_size);
OPENSSL_memcpy(out + len, buf, block_size - early_mac_len);
OPENSSL_memcpy(out_tag, buf + block_size - early_mac_len, early_mac_len);
}
size_t tag_len = early_mac_len;
if (!EVP_EncryptUpdate(&tls_ctx->cipher_ctx, out_tag + tag_len, &len,
mac + tag_len, mac_len - tag_len)) {
return 0;
}
tag_len += len;
if (block_size > 1) {
assert(block_size <= 256);
assert(EVP_CIPHER_CTX_mode(&tls_ctx->cipher_ctx) == EVP_CIPH_CBC_MODE);
// Compute padding and feed that into the cipher.
uint8_t padding[256];
unsigned padding_len = block_size - ((in_len + mac_len) % block_size);
OPENSSL_memset(padding, padding_len - 1, padding_len);
if (!EVP_EncryptUpdate(&tls_ctx->cipher_ctx, out_tag + tag_len, &len,
padding, (int)padding_len)) {
return 0;
}
tag_len += len;
}
if (!EVP_EncryptFinal_ex(&tls_ctx->cipher_ctx, out_tag + tag_len, &len)) {
return 0;
}
assert(len == 0); // Padding is explicit.
assert(tag_len == aead_tls_tag_len(ctx, in_len, extra_in_len));
*out_tag_len = tag_len;
return 1;
}
static int aead_tls_open(const EVP_AEAD_CTX *ctx, uint8_t *out, size_t *out_len,
size_t max_out_len, const uint8_t *nonce,
size_t nonce_len, const uint8_t *in, size_t in_len,
const uint8_t *ad, size_t ad_len) {
AEAD_TLS_CTX *tls_ctx = (AEAD_TLS_CTX *)ctx->state.ptr;
if (tls_ctx->cipher_ctx.encrypt) {
// Unlike a normal AEAD, a TLS AEAD may only be used in one direction.
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_INVALID_OPERATION);
return 0;
}
if (in_len < HMAC_size(&tls_ctx->hmac_ctx)) {
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_DECRYPT);
return 0;
}
if (max_out_len < in_len) {
// This requires that the caller provide space for the MAC, even though it
// will always be removed on return.
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BUFFER_TOO_SMALL);
return 0;
}
if (nonce_len != EVP_AEAD_nonce_length(ctx->aead)) {
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_INVALID_NONCE_SIZE);
return 0;
}
if (ad_len != 13 - 2 /* length bytes */) {
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_INVALID_AD_SIZE);
return 0;
}
if (in_len > INT_MAX) {
// EVP_CIPHER takes int as input.
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_TOO_LARGE);
return 0;
}
// Configure the explicit IV.
if (EVP_CIPHER_CTX_mode(&tls_ctx->cipher_ctx) == EVP_CIPH_CBC_MODE &&
!tls_ctx->implicit_iv &&
!EVP_DecryptInit_ex(&tls_ctx->cipher_ctx, NULL, NULL, NULL, nonce)) {
return 0;
}
// Decrypt to get the plaintext + MAC + padding.
size_t total = 0;
int len;
if (!EVP_DecryptUpdate(&tls_ctx->cipher_ctx, out, &len, in, (int)in_len)) {
return 0;
}
total += len;
if (!EVP_DecryptFinal_ex(&tls_ctx->cipher_ctx, out + total, &len)) {
return 0;
}
total += len;
assert(total == in_len);
CONSTTIME_SECRET(out, total);
// Remove CBC padding. Code from here on is timing-sensitive with respect to
// |padding_ok| and |data_plus_mac_len| for CBC ciphers.
size_t data_plus_mac_len;
crypto_word_t padding_ok;
if (EVP_CIPHER_CTX_mode(&tls_ctx->cipher_ctx) == EVP_CIPH_CBC_MODE) {
if (!EVP_tls_cbc_remove_padding(
&padding_ok, &data_plus_mac_len, out, total,
EVP_CIPHER_CTX_block_size(&tls_ctx->cipher_ctx),
HMAC_size(&tls_ctx->hmac_ctx))) {
// Publicly invalid. This can be rejected in non-constant time.
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_DECRYPT);
return 0;
}
} else {
padding_ok = CONSTTIME_TRUE_W;
data_plus_mac_len = total;
// |data_plus_mac_len| = |total| = |in_len| at this point. |in_len| has
// already been checked against the MAC size at the top of the function.
assert(data_plus_mac_len >= HMAC_size(&tls_ctx->hmac_ctx));
}
size_t data_len = data_plus_mac_len - HMAC_size(&tls_ctx->hmac_ctx);
// At this point, if the padding is valid, the first |data_plus_mac_len| bytes
// after |out| are the plaintext and MAC. Otherwise, |data_plus_mac_len| is
// still large enough to extract a MAC, but it will be irrelevant.
// To allow for CBC mode which changes cipher length, |ad| doesn't include the
// length for legacy ciphers.
uint8_t ad_fixed[13];
OPENSSL_memcpy(ad_fixed, ad, 11);
ad_fixed[11] = (uint8_t)(data_len >> 8);
ad_fixed[12] = (uint8_t)(data_len & 0xff);
ad_len += 2;
// Compute the MAC and extract the one in the record.
uint8_t mac[EVP_MAX_MD_SIZE];
size_t mac_len;
uint8_t record_mac_tmp[EVP_MAX_MD_SIZE];
uint8_t *record_mac;
if (EVP_CIPHER_CTX_mode(&tls_ctx->cipher_ctx) == EVP_CIPH_CBC_MODE &&
EVP_tls_cbc_record_digest_supported(tls_ctx->hmac_ctx.md)) {
if (!EVP_tls_cbc_digest_record(tls_ctx->hmac_ctx.md, mac, &mac_len,
ad_fixed, out, data_len, total,
tls_ctx->mac_key, tls_ctx->mac_key_len)) {
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_DECRYPT);
return 0;
}
assert(mac_len == HMAC_size(&tls_ctx->hmac_ctx));
record_mac = record_mac_tmp;
EVP_tls_cbc_copy_mac(record_mac, mac_len, out, data_plus_mac_len, total);
} else {
// We should support the constant-time path for all CBC-mode ciphers
// implemented.
assert(EVP_CIPHER_CTX_mode(&tls_ctx->cipher_ctx) != EVP_CIPH_CBC_MODE);
unsigned mac_len_u;
if (!HMAC_Init_ex(&tls_ctx->hmac_ctx, NULL, 0, NULL, NULL) ||
!HMAC_Update(&tls_ctx->hmac_ctx, ad_fixed, ad_len) ||
!HMAC_Update(&tls_ctx->hmac_ctx, out, data_len) ||
!HMAC_Final(&tls_ctx->hmac_ctx, mac, &mac_len_u)) {
return 0;
}
mac_len = mac_len_u;
assert(mac_len == HMAC_size(&tls_ctx->hmac_ctx));
record_mac = &out[data_len];
}
// Perform the MAC check and the padding check in constant-time. It should be
// safe to simply perform the padding check first, but it would not be under a
// different choice of MAC location on padding failure. See
// EVP_tls_cbc_remove_padding.
crypto_word_t good =
constant_time_eq_int(CRYPTO_memcmp(record_mac, mac, mac_len), 0);
good &= padding_ok;
CONSTTIME_DECLASSIFY(&good, sizeof(good));
if (!good) {
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_DECRYPT);
return 0;
}
CONSTTIME_DECLASSIFY(&data_len, sizeof(data_len));
CONSTTIME_DECLASSIFY(out, data_len);
// End of timing-sensitive code.
*out_len = data_len;
return 1;
}
static int aead_aes_128_cbc_sha1_tls_init(EVP_AEAD_CTX *ctx, const uint8_t *key,
size_t key_len, size_t tag_len,
enum evp_aead_direction_t dir) {
return aead_tls_init(ctx, key, key_len, tag_len, dir, EVP_aes_128_cbc(),
EVP_sha1(), 0);
}
static int aead_aes_128_cbc_sha1_tls_implicit_iv_init(
EVP_AEAD_CTX *ctx, const uint8_t *key, size_t key_len, size_t tag_len,
enum evp_aead_direction_t dir) {
return aead_tls_init(ctx, key, key_len, tag_len, dir, EVP_aes_128_cbc(),
EVP_sha1(), 1);
}
static int aead_aes_256_cbc_sha1_tls_init(EVP_AEAD_CTX *ctx, const uint8_t *key,
size_t key_len, size_t tag_len,
enum evp_aead_direction_t dir) {
return aead_tls_init(ctx, key, key_len, tag_len, dir, EVP_aes_256_cbc(),
EVP_sha1(), 0);
}
static int aead_aes_256_cbc_sha1_tls_implicit_iv_init(
EVP_AEAD_CTX *ctx, const uint8_t *key, size_t key_len, size_t tag_len,
enum evp_aead_direction_t dir) {
return aead_tls_init(ctx, key, key_len, tag_len, dir, EVP_aes_256_cbc(),
EVP_sha1(), 1);
}
static int aead_aes_128_cbc_sha256_tls_init(EVP_AEAD_CTX *ctx,
const uint8_t *key, size_t key_len,
size_t tag_len,
enum evp_aead_direction_t dir) {
return aead_tls_init(ctx, key, key_len, tag_len, dir, EVP_aes_128_cbc(),
EVP_sha256(), 0);
}
static int aead_aes_128_cbc_sha256_tls_implicit_iv_init(
EVP_AEAD_CTX *ctx, const uint8_t *key, size_t key_len, size_t tag_len,
enum evp_aead_direction_t dir) {
return aead_tls_init(ctx, key, key_len, tag_len, dir, EVP_aes_128_cbc(),
EVP_sha256(), 1);
}
static int aead_aes_256_cbc_sha384_tls_init(EVP_AEAD_CTX *ctx,
const uint8_t *key, size_t key_len,
size_t tag_len,
enum evp_aead_direction_t dir) {
return aead_tls_init(ctx, key, key_len, tag_len, dir, EVP_aes_256_cbc(),
EVP_sha384(), 0);
}
static int aead_des_ede3_cbc_sha1_tls_init(EVP_AEAD_CTX *ctx,
const uint8_t *key, size_t key_len,
size_t tag_len,
enum evp_aead_direction_t dir) {
return aead_tls_init(ctx, key, key_len, tag_len, dir, EVP_des_ede3_cbc(),
EVP_sha1(), 0);
}
static int aead_des_ede3_cbc_sha1_tls_implicit_iv_init(
EVP_AEAD_CTX *ctx, const uint8_t *key, size_t key_len, size_t tag_len,
enum evp_aead_direction_t dir) {
return aead_tls_init(ctx, key, key_len, tag_len, dir, EVP_des_ede3_cbc(),
EVP_sha1(), 1);
}
static int aead_tls_get_iv(const EVP_AEAD_CTX *ctx, const uint8_t **out_iv,
size_t *out_iv_len) {
const AEAD_TLS_CTX *tls_ctx = (AEAD_TLS_CTX *)ctx->state.ptr;
const size_t iv_len = EVP_CIPHER_CTX_iv_length(&tls_ctx->cipher_ctx);
if (iv_len <= 1) {
return 0;
}
*out_iv = tls_ctx->cipher_ctx.iv;
*out_iv_len = iv_len;
return 1;
}
static int aead_null_sha1_tls_init(EVP_AEAD_CTX *ctx, const uint8_t *key,
size_t key_len, size_t tag_len,
enum evp_aead_direction_t dir) {
return aead_tls_init(ctx, key, key_len, tag_len, dir, EVP_enc_null(),
EVP_sha1(), 1 /* implicit iv */);
}
static const EVP_AEAD aead_aes_128_cbc_sha1_tls = {
SHA_DIGEST_LENGTH + 16, // key len (SHA1 + AES128)
16, // nonce len (IV)
16 + SHA_DIGEST_LENGTH, // overhead (padding + SHA1)
SHA_DIGEST_LENGTH, // max tag length
AEAD_AES_128_CBC_SHA1_TLS_ID, // evp_aead_id
0, // seal_scatter_supports_extra_in
NULL, // init
aead_aes_128_cbc_sha1_tls_init,
aead_tls_cleanup,
aead_tls_open,
aead_tls_seal_scatter,
NULL, // open_gather
NULL, // get_iv
aead_tls_tag_len,
NULL /* serialize_state */,
NULL /* deserialize_state */,
};
static const EVP_AEAD aead_aes_128_cbc_sha1_tls_implicit_iv = {
SHA_DIGEST_LENGTH + 16 + 16, // key len (SHA1 + AES128 + IV)
0, // nonce len
16 + SHA_DIGEST_LENGTH, // overhead (padding + SHA1)
SHA_DIGEST_LENGTH, // max tag length
AEAD_AES_128_CBC_SHA1_TLS_IMPLICIT_IV_ID, // evp_aead_id
0, // seal_scatter_supports_extra_in
NULL, // init
aead_aes_128_cbc_sha1_tls_implicit_iv_init,
aead_tls_cleanup,
aead_tls_open,
aead_tls_seal_scatter,
NULL, // open_gather
aead_tls_get_iv, // get_iv
aead_tls_tag_len,
NULL /* serialize_state */,
NULL /* deserialize_state */,
};
static const EVP_AEAD aead_aes_256_cbc_sha1_tls = {
SHA_DIGEST_LENGTH + 32, // key len (SHA1 + AES256)
16, // nonce len (IV)
16 + SHA_DIGEST_LENGTH, // overhead (padding + SHA1)
SHA_DIGEST_LENGTH, // max tag length
AEAD_AES_256_CBC_SHA1_TLS_ID, // evp_aead_id
0, // seal_scatter_supports_extra_in
NULL, // init
aead_aes_256_cbc_sha1_tls_init,
aead_tls_cleanup,
aead_tls_open,
aead_tls_seal_scatter,
NULL, // open_gather
NULL, // get_iv
aead_tls_tag_len,
NULL /* serialize_state */,
NULL /* deserialize_state */,
};
static const EVP_AEAD aead_aes_256_cbc_sha1_tls_implicit_iv = {
SHA_DIGEST_LENGTH + 32 + 16, // key len (SHA1 + AES256 + IV)
0, // nonce len
16 + SHA_DIGEST_LENGTH, // overhead (padding + SHA1)
SHA_DIGEST_LENGTH, // max tag length
AEAD_AES_256_CBC_SHA1_TLS_IMPLICIT_IV_ID, // evp_aead_id
0, // seal_scatter_supports_extra_in
NULL, // init
aead_aes_256_cbc_sha1_tls_implicit_iv_init,
aead_tls_cleanup,
aead_tls_open,
aead_tls_seal_scatter,
NULL, // open_gather
aead_tls_get_iv, // get_iv
aead_tls_tag_len,
NULL /* serialize_state */,
NULL /* deserialize_state */,
};
static const EVP_AEAD aead_aes_128_cbc_sha256_tls = {
SHA256_DIGEST_LENGTH + 16, // key len (SHA256 + AES128)
16, // nonce len (IV)
16 + SHA256_DIGEST_LENGTH, // overhead (padding + SHA256)
SHA256_DIGEST_LENGTH, // max tag length
AEAD_AES_128_CBC_SHA256_TLS_ID, // evp_aead_id
0, // seal_scatter_supports_extra_in
NULL, // init
aead_aes_128_cbc_sha256_tls_init,
aead_tls_cleanup,
aead_tls_open,
aead_tls_seal_scatter,
NULL, // open_gather
NULL, // get_iv
aead_tls_tag_len,
NULL /* serialize_state */,
NULL /* deserialize_state */,
};
static const EVP_AEAD aead_aes_128_cbc_sha256_tls_implicit_iv = {
SHA256_DIGEST_LENGTH + 16 + 16, // key len (SHA256 + AES128 + IV)
0, // nonce len
16 + SHA256_DIGEST_LENGTH, // overhead (padding + SHA256)
SHA256_DIGEST_LENGTH, // max tag length
AEAD_AES_128_CBC_SHA256_TLS_IMPLICIT_IV_ID, // evp_aead_id
0, // seal_scatter_supports_extra_in
NULL, // init
aead_aes_128_cbc_sha256_tls_implicit_iv_init,
aead_tls_cleanup,
aead_tls_open,
aead_tls_seal_scatter,
NULL, // open_gather
aead_tls_get_iv, // get_iv
aead_tls_tag_len,
NULL /* serialize_state */,
NULL /* deserialize_state */,
};
static const EVP_AEAD aead_aes_256_cbc_sha384_tls = {
SHA384_DIGEST_LENGTH + 32, // key len (SHA384 + AES256)
16, // nonce len (IV)
16 + SHA384_DIGEST_LENGTH, // overhead (padding + SHA384)
SHA384_DIGEST_LENGTH, // max tag length
AEAD_AES_256_CBC_SHA384_TLS_ID, // evp_aead_id
0, // seal_scatter_supports_extra_in
NULL, // init
aead_aes_256_cbc_sha384_tls_init,
aead_tls_cleanup,
aead_tls_open,
aead_tls_seal_scatter,
NULL, // open_gather
NULL, // get_iv
aead_tls_tag_len,
NULL /* serialize_state */,
NULL /* deserialize_state */,
};
static const EVP_AEAD aead_des_ede3_cbc_sha1_tls = {
SHA_DIGEST_LENGTH + 24, // key len (SHA1 + 3DES)
8, // nonce len (IV)
8 + SHA_DIGEST_LENGTH, // overhead (padding + SHA1)
SHA_DIGEST_LENGTH, // max tag length
AEAD_DES_EDE3_CBC_SHA1_TLS_ID, // evp_aead_id
0, // seal_scatter_supports_extra_in
NULL, // init
aead_des_ede3_cbc_sha1_tls_init,
aead_tls_cleanup,
aead_tls_open,
aead_tls_seal_scatter,
NULL, // open_gather
NULL, // get_iv
aead_tls_tag_len,
NULL /* serialize_state */,
NULL /* deserialize_state */,
};
static const EVP_AEAD aead_des_ede3_cbc_sha1_tls_implicit_iv = {
SHA_DIGEST_LENGTH + 24 + 8, // key len (SHA1 + 3DES + IV)
0, // nonce len
8 + SHA_DIGEST_LENGTH, // overhead (padding + SHA1)
SHA_DIGEST_LENGTH, // max tag length
AEAD_DES_EDE3_CBC_SHA1_TLS_IMPLICIT_IV_ID, // evp_aead_id
0, // seal_scatter_supports_extra_in
NULL, // init
aead_des_ede3_cbc_sha1_tls_implicit_iv_init,
aead_tls_cleanup,
aead_tls_open,
aead_tls_seal_scatter,
NULL, // open_gather
aead_tls_get_iv, // get_iv
aead_tls_tag_len,
NULL /* serialize_state */,
NULL /* deserialize_state */,
};
static const EVP_AEAD aead_null_sha1_tls = {
SHA_DIGEST_LENGTH, // key len
0, // nonce len
SHA_DIGEST_LENGTH, // overhead (SHA1)
SHA_DIGEST_LENGTH, // max tag length
AEAD_NULL_SHA1_TLS_ID, // evp_aead_id
0, // seal_scatter_supports_extra_in
NULL, // init
aead_null_sha1_tls_init,
aead_tls_cleanup,
aead_tls_open,
aead_tls_seal_scatter,
NULL, // open_gather
NULL, // get_iv
aead_tls_tag_len,
NULL /* serialize_state */,
NULL /* deserialize_state */,
};
const EVP_AEAD *EVP_aead_aes_128_cbc_sha1_tls(void) {
return &aead_aes_128_cbc_sha1_tls;
}
const EVP_AEAD *EVP_aead_aes_128_cbc_sha1_tls_implicit_iv(void) {
return &aead_aes_128_cbc_sha1_tls_implicit_iv;
}
const EVP_AEAD *EVP_aead_aes_256_cbc_sha1_tls(void) {
return &aead_aes_256_cbc_sha1_tls;
}
const EVP_AEAD *EVP_aead_aes_256_cbc_sha1_tls_implicit_iv(void) {
return &aead_aes_256_cbc_sha1_tls_implicit_iv;
}
const EVP_AEAD *EVP_aead_aes_128_cbc_sha256_tls(void) {
return &aead_aes_128_cbc_sha256_tls;
}
const EVP_AEAD *EVP_aead_aes_128_cbc_sha256_tls_implicit_iv(void) {
return &aead_aes_128_cbc_sha256_tls_implicit_iv;
}
const EVP_AEAD *EVP_aead_aes_256_cbc_sha384_tls(void) {
return &aead_aes_256_cbc_sha384_tls;
}
const EVP_AEAD *EVP_aead_des_ede3_cbc_sha1_tls(void) {
return &aead_des_ede3_cbc_sha1_tls;
}
const EVP_AEAD *EVP_aead_des_ede3_cbc_sha1_tls_implicit_iv(void) {
return &aead_des_ede3_cbc_sha1_tls_implicit_iv;
}
const EVP_AEAD *EVP_aead_null_sha1_tls(void) { return &aead_null_sha1_tls; }

View File

@@ -0,0 +1,197 @@
// Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com) All rights reserved.
// SPDX-License-Identifier: Apache-2.0
#ifndef OPENSSL_HEADER_CIPHER_EXTRA_INTERNAL_H
#define OPENSSL_HEADER_CIPHER_EXTRA_INTERNAL_H
#include <stdlib.h>
#include <openssl/base.h>
#include <openssl/cipher.h>
#include <openssl/type_check.h>
#include "../internal.h"
#include "../fipsmodule/cpucap/internal.h"
#if defined(__cplusplus)
extern "C" {
#endif
OPENSSL_EXPORT int x86_64_assembly_implementation_FOR_TESTING(void);
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && \
!defined(MY_ASSEMBLER_IS_TOO_OLD_FOR_AVX) && !defined(AWSLC_FIPS)
#define AES_CBC_HMAC_SHA_STITCH
// TLS1_1_VERSION is also defined in ssl.h.
#define TLS1_1_VERSION 0x0302
#define NO_PAYLOAD_LENGTH ((size_t)-1)
#define HMAC_KEY_SIZE 64
#endif
// EVP_tls_cbc_get_padding determines the padding from the decrypted, TLS, CBC
// record in |in|. This decrypted record should not include any "decrypted"
// explicit IV. If the record is publicly invalid, it returns zero. Otherwise,
// it returns one and sets |*out_padding_ok| to all ones (0xfff..f) if the
// padding is valid and zero otherwise. It then sets |*out_len| to the length
// with the padding removed or |in_len| if invalid.
//
// If the function returns one, it runs in time independent of the contents of
// |in|. It is also guaranteed that |*out_len| >= |mac_size|, satisfying
// |EVP_tls_cbc_copy_mac|'s precondition.
int EVP_tls_cbc_remove_padding(crypto_word_t *out_padding_ok, size_t *out_len,
const uint8_t *in, size_t in_len,
size_t block_size, size_t mac_size);
// EVP_tls_cbc_copy_mac copies |md_size| bytes from the end of the first
// |in_len| bytes of |in| to |out| in constant time (independent of the concrete
// value of |in_len|, which may vary within a 256-byte window). |in| must point
// to a buffer of |orig_len| bytes.
//
// On entry:
// orig_len >= in_len >= md_size
// md_size <= EVP_MAX_MD_SIZE
void EVP_tls_cbc_copy_mac(uint8_t *out, size_t md_size, const uint8_t *in,
size_t in_len, size_t orig_len);
// EVP_tls_cbc_record_digest_supported returns 1 iff |md| is a hash function
// which EVP_tls_cbc_digest_record supports.
int EVP_tls_cbc_record_digest_supported(const EVP_MD *md);
// EVP_final_with_secret_suffix_sha1, EVP_final_with_secret_suffix_sha256, and
// EVP_final_with_secret_suffix_sha384 compute the result of hashing |len|
// bytes from |in| to |ctx| and write the resulting hash to |out|.
// |len| is treated as secret and must be at most |max_len|, which is treated
// as public. |in| must point to a buffer of at least |max_len| bytes.
// It returns one on success and zero if inputs are too long.
//
// The functions are exported for unit tests.
OPENSSL_EXPORT int EVP_final_with_secret_suffix_sha1(
SHA_CTX *ctx, uint8_t out[SHA_DIGEST_LENGTH], const uint8_t *in, size_t len,
size_t max_len);
OPENSSL_EXPORT int EVP_final_with_secret_suffix_sha256(
SHA256_CTX *ctx, uint8_t out[SHA256_DIGEST_LENGTH], const uint8_t *in,
size_t len, size_t max_len);
OPENSSL_EXPORT int EVP_final_with_secret_suffix_sha384(
SHA512_CTX *ctx, uint8_t out[SHA384_DIGEST_LENGTH], const uint8_t *in,
size_t len, size_t max_len);
// EVP_tls_cbc_digest_record computes the MAC of a decrypted, padded TLS
// record.
//
// md: the hash function used in the HMAC.
// EVP_tls_cbc_record_digest_supported must return true for this hash.
// md_out: the digest output. At most EVP_MAX_MD_SIZE bytes will be written.
// md_out_size: the number of output bytes is written here.
// header: the 13-byte, TLS record header.
// data: the record data itself
// data_size: the secret, reported length of the data once the padding and MAC
// have been removed.
// data_plus_mac_plus_padding_size: the public length of the whole
// record, including padding.
//
// On entry: by virtue of having been through one of the remove_padding
// functions, above, we know that data_plus_mac_size is large enough to contain
// a padding byte and MAC. (If the padding was invalid, it might contain the
// padding too. )
int EVP_tls_cbc_digest_record(const EVP_MD *md, uint8_t *md_out,
size_t *md_out_size, const uint8_t header[13],
const uint8_t *data, size_t data_size,
size_t data_plus_mac_plus_padding_size,
const uint8_t *mac_secret,
unsigned mac_secret_length);
#define POLY1305_TAG_LEN 16
// For convenience (the x86_64 calling convention allows only six parameters in
// registers), the final parameter for the assembly functions is both an input
// and output parameter.
union chacha20_poly1305_open_data {
struct {
alignas(16) uint8_t key[32];
uint32_t counter;
uint8_t nonce[12];
} in;
struct {
uint8_t tag[POLY1305_TAG_LEN];
} out;
};
union chacha20_poly1305_seal_data {
struct {
alignas(16) uint8_t key[32];
uint32_t counter;
uint8_t nonce[12];
const uint8_t *extra_ciphertext;
size_t extra_ciphertext_len;
} in;
struct {
uint8_t tag[POLY1305_TAG_LEN];
} out;
};
#if (defined(OPENSSL_X86_64) || defined(OPENSSL_AARCH64)) && \
!defined(OPENSSL_NO_ASM)
OPENSSL_STATIC_ASSERT(sizeof(union chacha20_poly1305_open_data) == 48,
_wrong_chacha20_poly1305_open_data_size)
OPENSSL_STATIC_ASSERT(sizeof(union chacha20_poly1305_seal_data) == 48 + 8 + 8,
_wrong_chacha20_poly1305_seal_data_size)
OPENSSL_INLINE int chacha20_poly1305_asm_capable(void) {
#if defined(OPENSSL_X86_64)
return CRYPTO_is_SSE4_1_capable();
#elif defined(OPENSSL_AARCH64)
return CRYPTO_is_NEON_capable();
#endif
}
// chacha20_poly1305_open is defined in chacha20_poly1305_*.pl. It decrypts
// |plaintext_len| bytes from |ciphertext| and writes them to |out_plaintext|.
// Additional input parameters are passed in |aead_data->in|. On exit, it will
// write calculated tag value to |aead_data->out.tag|, which the caller must
// check.
extern void chacha20_poly1305_open(uint8_t *out_plaintext,
const uint8_t *ciphertext,
size_t plaintext_len, const uint8_t *ad,
size_t ad_len,
union chacha20_poly1305_open_data *data);
// chacha20_poly1305_open is defined in chacha20_poly1305_*.pl. It encrypts
// |plaintext_len| bytes from |plaintext| and writes them to |out_ciphertext|.
// Additional input parameters are passed in |aead_data->in|. The calculated tag
// value is over the computed ciphertext concatenated with |extra_ciphertext|
// and written to |aead_data->out.tag|.
extern void chacha20_poly1305_seal(uint8_t *out_ciphertext,
const uint8_t *plaintext,
size_t plaintext_len, const uint8_t *ad,
size_t ad_len,
union chacha20_poly1305_seal_data *data);
#else
OPENSSL_INLINE int chacha20_poly1305_asm_capable(void) { return 0; }
OPENSSL_INLINE void chacha20_poly1305_open(uint8_t *out_plaintext,
const uint8_t *ciphertext,
size_t plaintext_len, const uint8_t *ad,
size_t ad_len,
union chacha20_poly1305_open_data *data) {
abort();
}
OPENSSL_INLINE void chacha20_poly1305_seal(uint8_t *out_ciphertext,
const uint8_t *plaintext,
size_t plaintext_len, const uint8_t *ad,
size_t ad_len,
union chacha20_poly1305_seal_data *data) {
abort();
}
#endif
#if defined(__cplusplus)
} // extern C
#endif
#endif // OPENSSL_HEADER_CIPHER_EXTRA_INTERNAL_H

View File

@@ -0,0 +1,638 @@
// Copyright (c) 2012 The OpenSSL Project. All rights reserved.
// SPDX-License-Identifier: Apache-2.0
#include <assert.h>
#include <string.h>
#include <openssl/digest.h>
#include <openssl/nid.h>
#include <openssl/sha.h>
#include "../internal.h"
#include "internal.h"
#include "../fipsmodule/cipher/internal.h"
// The length of the additional data field in AES-CBC-HMAC based AEADs.
#define AEAD_TLS_AES_CBC_HMAC_AD_LENGTH (13)
int EVP_tls_cbc_remove_padding(crypto_word_t *out_padding_ok, size_t *out_len,
const uint8_t *in, size_t in_len,
size_t block_size, size_t mac_size) {
const size_t overhead = 1 /* padding length byte */ + mac_size;
// These lengths are all public so we can test them in non-constant time.
if (overhead > in_len) {
return 0;
}
size_t padding_length = in[in_len - 1];
crypto_word_t good = constant_time_ge_w(in_len, overhead + padding_length);
// The padding consists of a length byte at the end of the record and
// then that many bytes of padding, all with the same value as the
// length byte. Thus, with the length byte included, there are i+1
// bytes of padding.
//
// We can't check just |padding_length+1| bytes because that leaks
// decrypted information. Therefore we always have to check the maximum
// amount of padding possible. (Again, the length of the record is
// public information so we can use it.)
size_t to_check = 256; // maximum amount of padding, inc length byte.
if (to_check > in_len) {
to_check = in_len;
}
for (size_t i = 0; i < to_check; i++) {
uint8_t mask = constant_time_ge_8(padding_length, i);
uint8_t b = in[in_len - 1 - i];
// The final |padding_length+1| bytes should all have the value
// |padding_length|. Therefore the XOR should be zero.
good &= ~(mask & (padding_length ^ b));
}
// If any of the final |padding_length+1| bytes had the wrong value,
// one or more of the lower eight bits of |good| will be cleared.
good = constant_time_eq_w(0xff, good & 0xff);
// Always treat |padding_length| as zero on error. If, assuming block size of
// 16, a padding of [<15 arbitrary bytes> 15] treated |padding_length| as 16
// and returned -1, distinguishing good MAC and bad padding from bad MAC and
// bad padding would give POODLE's padding oracle.
padding_length = good & (padding_length + 1);
*out_len = in_len - padding_length;
*out_padding_ok = good;
return 1;
}
void EVP_tls_cbc_copy_mac(uint8_t *out, size_t md_size, const uint8_t *in,
size_t in_len, size_t orig_len) {
uint8_t rotated_mac1[EVP_MAX_MD_SIZE], rotated_mac2[EVP_MAX_MD_SIZE];
uint8_t *rotated_mac = rotated_mac1;
uint8_t *rotated_mac_tmp = rotated_mac2;
// mac_end is the index of |in| just after the end of the MAC.
size_t mac_end = in_len;
size_t mac_start = mac_end - md_size;
declassify_assert(orig_len >= in_len);
declassify_assert(in_len >= md_size);
assert(md_size <= EVP_MAX_MD_SIZE);
assert(md_size > 0);
// scan_start contains the number of bytes that we can ignore because
// the MAC's position can only vary by 255 bytes.
size_t scan_start = 0;
// This information is public so it's safe to branch based on it.
if (orig_len > md_size + 255 + 1) {
scan_start = orig_len - (md_size + 255 + 1);
}
size_t rotate_offset = 0;
uint8_t mac_started = 0;
OPENSSL_memset(rotated_mac, 0, md_size);
for (size_t i = scan_start, j = 0; i < orig_len; i++, j++) {
if (j >= md_size) {
j -= md_size;
}
crypto_word_t is_mac_start = constant_time_eq_w(i, mac_start);
mac_started |= is_mac_start;
uint8_t mac_ended = constant_time_ge_8(i, mac_end);
rotated_mac[j] |= in[i] & mac_started & ~mac_ended;
// Save the offset that |mac_start| is mapped to.
rotate_offset |= j & is_mac_start;
}
// Now rotate the MAC. We rotate in log(md_size) steps, one for each bit
// position.
for (size_t offset = 1; offset < md_size; offset <<= 1, rotate_offset >>= 1) {
// Rotate by |offset| iff the corresponding bit is set in
// |rotate_offset|, placing the result in |rotated_mac_tmp|.
const uint8_t skip_rotate = (rotate_offset & 1) - 1;
for (size_t i = 0, j = offset; i < md_size; i++, j++) {
if (j >= md_size) {
j -= md_size;
}
rotated_mac_tmp[i] =
constant_time_select_8(skip_rotate, rotated_mac[i], rotated_mac[j]);
}
// Swap pointers so |rotated_mac| contains the (possibly) rotated value.
// Note the number of iterations and thus the identity of these pointers is
// public information.
uint8_t *tmp = rotated_mac;
rotated_mac = rotated_mac_tmp;
rotated_mac_tmp = tmp;
}
OPENSSL_memcpy(out, rotated_mac, md_size);
}
int EVP_final_with_secret_suffix_sha1(SHA_CTX *ctx,
uint8_t out[SHA_DIGEST_LENGTH],
const uint8_t *in, size_t len,
size_t max_len) {
// Bound the input length so |total_bits| below fits in four bytes. This is
// redundant with TLS record size limits. This also ensures |input_idx| below
// does not overflow.
size_t max_len_bits = max_len << 3;
if (ctx->Nh != 0 ||
(max_len_bits >> 3) != max_len || // Overflow
ctx->Nl + max_len_bits < max_len_bits ||
ctx->Nl + max_len_bits > UINT32_MAX) {
return 0;
}
// We need to hash the following into |ctx|:
//
// - ctx->data[:ctx->num]
// - in[:len]
// - A 0x80 byte
// - However many zero bytes are needed to pad up to a block.
// - Eight bytes of length.
size_t num_blocks = (ctx->num + len + 1 + 8 + SHA_CBLOCK - 1) >> 6;
size_t last_block = num_blocks - 1;
size_t max_blocks = (ctx->num + max_len + 1 + 8 + SHA_CBLOCK - 1) >> 6;
// The bounds above imply |total_bits| fits in four bytes.
size_t total_bits = ctx->Nl + (len << 3);
uint8_t length_bytes[4];
length_bytes[0] = (uint8_t)(total_bits >> 24);
length_bytes[1] = (uint8_t)(total_bits >> 16);
length_bytes[2] = (uint8_t)(total_bits >> 8);
length_bytes[3] = (uint8_t)total_bits;
// We now construct and process each expected block in constant-time.
uint8_t block[SHA_CBLOCK] = {0};
uint32_t result[5] = {0}; // The size of SHA1 state = 160 bits = 5*32 bits.
// input_idx is the index into |in| corresponding to the current block.
// However, we allow this index to overflow beyond |max_len|, to simplify the
// 0x80 byte.
size_t input_idx = 0;
for (size_t i = 0; i < max_blocks; i++) {
// Fill |block| with data from the partial block in |ctx| and |in|. We copy
// as if we were hashing up to |max_len| and then zero the excess later.
size_t block_start = 0;
if (i == 0) {
OPENSSL_memcpy(block, ctx->data, ctx->num);
block_start = ctx->num;
}
if (input_idx < max_len) {
size_t to_copy = SHA_CBLOCK - block_start;
if (to_copy > max_len - input_idx) {
to_copy = max_len - input_idx;
}
OPENSSL_memcpy(block + block_start, in + input_idx, to_copy);
}
// Zero any bytes beyond |len| and add the 0x80 byte.
for (size_t j = block_start; j < SHA_CBLOCK; j++) {
// input[idx] corresponds to block[j].
size_t idx = input_idx + j - block_start;
// The barriers on |len| are not strictly necessary. However, without
// them, GCC compiles this code by incorporating |len| into the loop
// counter and subtracting it out later. This is still constant-time, but
// it frustrates attempts to validate this.
uint8_t is_in_bounds = constant_time_lt_8(idx, value_barrier_w(len));
uint8_t is_padding_byte = constant_time_eq_8(idx, value_barrier_w(len));
block[j] &= is_in_bounds;
block[j] |= 0x80 & is_padding_byte;
}
input_idx += SHA_CBLOCK - block_start;
// Fill in the length if this is the last block.
crypto_word_t is_last_block = constant_time_eq_w(i, last_block);
for (size_t j = 0; j < 4; j++) {
block[SHA_CBLOCK - 4 + j] |= is_last_block & length_bytes[j];
}
// Process the block and save the hash state if it is the final value.
SHA1_Transform(ctx, block);
for (size_t j = 0; j < 5; j++) {
result[j] |= is_last_block & ctx->h[j];
}
}
// Write the output.
for (size_t i = 0; i < 5; i++) {
CRYPTO_store_u32_be(out + 4 * i, result[i]);
}
return 1;
}
static int EVP_tls_cbc_digest_record_sha1(
uint8_t *md_out, size_t *md_out_size,
const uint8_t header[AEAD_TLS_AES_CBC_HMAC_AD_LENGTH], const uint8_t *data,
size_t data_size, size_t data_plus_mac_plus_padding_size,
const uint8_t *mac_secret, unsigned mac_secret_length) {
if (mac_secret_length > SHA_CBLOCK) {
// HMAC pads small keys with zeros and hashes large keys down. This function
// should never reach the large key case.
assert(0);
return 0;
}
// Compute the initial HMAC block.
uint8_t hmac_pad[SHA_CBLOCK];
OPENSSL_memset(hmac_pad, 0, sizeof(hmac_pad));
OPENSSL_memcpy(hmac_pad, mac_secret, mac_secret_length);
for (size_t i = 0; i < SHA_CBLOCK; i++) {
hmac_pad[i] ^= 0x36;
}
SHA_CTX ctx;
SHA1_Init(&ctx);
SHA1_Update(&ctx, hmac_pad, SHA_CBLOCK);
SHA1_Update(&ctx, header, AEAD_TLS_AES_CBC_HMAC_AD_LENGTH);
// There are at most 256 bytes of padding, so we can compute the public
// minimum length for |data_size|.
size_t min_data_size = 0;
if (data_plus_mac_plus_padding_size > SHA_DIGEST_LENGTH + 256) {
min_data_size = data_plus_mac_plus_padding_size - SHA_DIGEST_LENGTH - 256;
}
// Hash the public minimum length directly. This reduces the number of blocks
// that must be computed in constant-time.
SHA1_Update(&ctx, data, min_data_size);
// Hash the remaining data without leaking |data_size|.
uint8_t mac_out[SHA_DIGEST_LENGTH];
if (!EVP_final_with_secret_suffix_sha1(
&ctx, mac_out, data + min_data_size, data_size - min_data_size,
data_plus_mac_plus_padding_size - min_data_size)) {
return 0;
}
// Complete the HMAC in the standard manner.
SHA1_Init(&ctx);
for (size_t i = 0; i < SHA_CBLOCK; i++) {
hmac_pad[i] ^= 0x6a;
}
SHA1_Update(&ctx, hmac_pad, SHA_CBLOCK);
SHA1_Update(&ctx, mac_out, SHA_DIGEST_LENGTH);
SHA1_Final(md_out, &ctx);
*md_out_size = SHA_DIGEST_LENGTH;
return 1;
}
int EVP_final_with_secret_suffix_sha256(SHA256_CTX *ctx,
uint8_t out[SHA256_DIGEST_LENGTH],
const uint8_t *in, size_t len,
size_t max_len) {
// Bound the input length so |total_bits| below fits in four bytes. This is
// redundant with TLS record size limits. This also ensures |input_idx| below
// does not overflow.
size_t max_len_bits = max_len << 3;
if (ctx->Nh != 0 ||
(max_len_bits >> 3) != max_len || // Overflow
ctx->Nl + max_len_bits < max_len_bits ||
ctx->Nl + max_len_bits > UINT32_MAX) {
return 0;
}
// We need to hash the following into |ctx|:
//
// - ctx->data[:ctx->num]
// - in[:len]
// - A 0x80 byte
// - However many zero bytes are needed to pad up to a block.
// - Eight bytes of length.
size_t num_blocks = (ctx->num + len + 1 + 8 + SHA256_CBLOCK - 1) >> 6;
size_t last_block = num_blocks - 1;
size_t max_blocks = (ctx->num + max_len + 1 + 8 + SHA256_CBLOCK - 1) >> 6;
// The bounds above imply |total_bits| fits in four bytes.
size_t total_bits = ctx->Nl + (len << 3);
uint8_t length_bytes[4];
length_bytes[0] = (uint8_t)(total_bits >> 24);
length_bytes[1] = (uint8_t)(total_bits >> 16);
length_bytes[2] = (uint8_t)(total_bits >> 8);
length_bytes[3] = (uint8_t)total_bits;
// We now construct and process each expected block in constant-time.
uint8_t block[SHA256_CBLOCK] = {0};
uint32_t result[8] = {0}; // The size of SHA256 state = 256 bits = 8*32 bits.
// input_idx is the index into |in| corresponding to the current block.
// However, we allow this index to overflow beyond |max_len|, to simplify the
// 0x80 byte.
size_t input_idx = 0;
for (size_t i = 0; i < max_blocks; i++) {
// Fill |block| with data from the partial block in |ctx| and |in|. We copy
// as if we were hashing up to |max_len| and then zero the excess later.
size_t block_start = 0;
if (i == 0) {
OPENSSL_memcpy(block, ctx->data, ctx->num);
block_start = ctx->num;
}
if (input_idx < max_len) {
size_t to_copy = SHA256_CBLOCK - block_start;
if (to_copy > max_len - input_idx) {
to_copy = max_len - input_idx;
}
OPENSSL_memcpy(block + block_start, in + input_idx, to_copy);
}
// Zero any bytes beyond |len| and add the 0x80 byte.
for (size_t j = block_start; j < SHA256_CBLOCK; j++) {
// input[idx] corresponds to block[j].
size_t idx = input_idx + j - block_start;
// The barriers on |len| are not strictly necessary. However, without
// them, GCC compiles this code by incorporating |len| into the loop
// counter and subtracting it out later. This is still constant-time, but
// it frustrates attempts to validate this.
uint8_t is_in_bounds = constant_time_lt_8(idx, value_barrier_w(len));
uint8_t is_padding_byte = constant_time_eq_8(idx, value_barrier_w(len));
block[j] &= is_in_bounds;
block[j] |= 0x80 & is_padding_byte;
}
input_idx += SHA256_CBLOCK - block_start;
// Fill in the length if this is the last block.
crypto_word_t is_last_block = constant_time_eq_w(i, last_block);
for (size_t j = 0; j < 4; j++) {
block[SHA256_CBLOCK - 4 + j] |= is_last_block & length_bytes[j];
}
// Process the block and save the hash state if it is the final value.
SHA256_Transform(ctx, block);
for (size_t j = 0; j < 8; j++) {
result[j] |= is_last_block & ctx->h[j];
}
}
// Write the output.
for (size_t i = 0; i < 8; i++) {
CRYPTO_store_u32_be(out + 4 * i, result[i]);
}
return 1;
}
static int EVP_tls_cbc_digest_record_sha256(
uint8_t *md_out, size_t *md_out_size,
const uint8_t header[AEAD_TLS_AES_CBC_HMAC_AD_LENGTH], const uint8_t *data,
size_t data_size, size_t data_plus_mac_plus_padding_size,
const uint8_t *mac_secret, unsigned mac_secret_length) {
if (mac_secret_length > SHA256_CBLOCK) {
// HMAC pads small keys with zeros and hashes large keys down. This function
// should never reach the large key case.
assert(0);
return 0;
}
// Compute the initial HMAC block.
uint8_t hmac_pad[SHA256_CBLOCK];
OPENSSL_memset(hmac_pad, 0, sizeof(hmac_pad));
OPENSSL_memcpy(hmac_pad, mac_secret, mac_secret_length);
for (size_t i = 0; i < SHA256_CBLOCK; i++) {
hmac_pad[i] ^= 0x36;
}
SHA256_CTX ctx;
SHA256_Init(&ctx);
SHA256_Update(&ctx, hmac_pad, SHA256_CBLOCK);
SHA256_Update(&ctx, header, AEAD_TLS_AES_CBC_HMAC_AD_LENGTH);
// There are at most 256 bytes of padding, so we can compute the public
// minimum length for |data_size|.
size_t min_data_size = 0;
if (data_plus_mac_plus_padding_size > SHA256_DIGEST_LENGTH + 256) {
min_data_size = data_plus_mac_plus_padding_size - SHA256_DIGEST_LENGTH - 256;
}
// Hash the public minimum length directly. This reduces the number of blocks
// that must be computed in constant-time.
SHA256_Update(&ctx, data, min_data_size);
// Hash the remaining data without leaking |data_size|.
uint8_t mac_out[SHA256_DIGEST_LENGTH];
if (!EVP_final_with_secret_suffix_sha256(
&ctx, mac_out, data + min_data_size, data_size - min_data_size,
data_plus_mac_plus_padding_size - min_data_size)) {
return 0;
}
// Complete the HMAC in the standard manner.
SHA256_Init(&ctx);
for (size_t i = 0; i < SHA256_CBLOCK; i++) {
hmac_pad[i] ^= 0x6a;
}
SHA256_Update(&ctx, hmac_pad, SHA256_CBLOCK);
SHA256_Update(&ctx, mac_out, SHA256_DIGEST_LENGTH);
SHA256_Final(md_out, &ctx);
*md_out_size = SHA256_DIGEST_LENGTH;
return 1;
}
// The size of SHA384 working state = 512 bits = 8 64-bit words.
#define SHA384_WORKING_VARIABLES 8
int EVP_final_with_secret_suffix_sha384(SHA512_CTX *ctx,
uint8_t out[SHA384_DIGEST_LENGTH],
const uint8_t *in, size_t len,
size_t max_len) {
// Bound the input length so |total_bits| below fits in four bytes. This is
// redundant with TLS record size limits. This also ensures |input_idx| below
// does not overflow.
size_t max_len_bits = max_len << 3;
if (ctx->Nh != 0 ||
(max_len_bits >> 3) != max_len || // Overflow
ctx->Nl + max_len_bits < max_len_bits ||
ctx->Nl + max_len_bits > UINT32_MAX) {
return 0;
}
// See FIPS 180-4 section 5.1.2 for an explanation on SHA-384 message padding
// and preprocessing. Here are some constants of interest:
// * 16 == 128 bits for the message length
// * 1 byte to cover padding bit.
// * SHA384_CBLOCK == 1024 bits the padded message length that we should have
// a multiple of. The padding added will be less then this value.
// * 7 is the how much we shift right (divide) by 128 bytes (1024 bits) to
// get the total number of blocks.
// We need to hash the following into |ctx|:
//
// - ctx->data[:ctx->num]
// - in[:len]
// - A 0x80 byte
// - However many zero bytes are needed to pad up to a block.
// - 16 bytes of length.
size_t num_blocks = (ctx->num + len + 1 + 16 + SHA384_CBLOCK - 1) >> 7;
size_t last_block = num_blocks - 1;
size_t max_blocks = (ctx->num + max_len + 1 + 16 + SHA384_CBLOCK - 1) >> 7;
// The bounds above imply |total_bits| fits in four bytes.
size_t total_bits = ctx->Nl + (len << 3);
uint8_t length_bytes[4];
length_bytes[0] = (uint8_t)(total_bits >> 24);
length_bytes[1] = (uint8_t)(total_bits >> 16);
length_bytes[2] = (uint8_t)(total_bits >> 8);
length_bytes[3] = (uint8_t)total_bits;
// We now construct and process each expected block in constant-time.
uint8_t block[SHA384_CBLOCK] = {0};
uint64_t result[SHA384_WORKING_VARIABLES] = {0};
// input_idx is the index into |in| corresponding to the current block.
// However, we allow this index to overflow beyond |max_len|, to simplify the
// 0x80 byte.
size_t input_idx = 0;
for (size_t i = 0; i < max_blocks; i++) {
// Fill |block| with data from the partial block in |ctx| and |in|. We copy
// as if we were hashing up to |max_len| and then zero the excess later.
size_t block_start = 0;
if (i == 0) {
OPENSSL_memcpy(block, ctx->p, ctx->num);
block_start = ctx->num;
}
if (input_idx < max_len) {
size_t to_copy = SHA384_CBLOCK- block_start;
if (to_copy > max_len - input_idx) {
to_copy = max_len - input_idx;
}
OPENSSL_memcpy(block + block_start, in + input_idx, to_copy);
}
// Zero any bytes beyond |len| and add the 0x80 byte.
for (size_t j = block_start; j < SHA384_CBLOCK; j++) {
// input[idx] corresponds to block[j].
size_t idx = input_idx + j - block_start;
// The barriers on |len| are not strictly necessary. However, without
// them, GCC compiles this code by incorporating |len| into the loop
// counter and subtracting it out later. This is still constant-time, but
// it frustrates attempts to validate this.
uint8_t is_in_bounds = constant_time_lt_8(idx, value_barrier_w(len));
uint8_t is_padding_byte = constant_time_eq_8(idx, value_barrier_w(len));
block[j] &= is_in_bounds;
block[j] |= 0x80 & is_padding_byte;
}
input_idx += SHA384_CBLOCK - block_start;
// Fill in the length if this is the last block.
crypto_word_t is_last_block = constant_time_eq_w(i, last_block);
for (size_t j = 0; j < 4; j++) {
block[SHA384_CBLOCK - 4 + j] |= is_last_block & length_bytes[j];
}
// Process the block and save the hash state if it is the final value.
assert(SHA384_CBLOCK == SHA512_CBLOCK);
SHA512_Transform(ctx, block);
#if defined(OPENSSL_64_BIT)
uint64_t mask = is_last_block;
#elif defined(OPENSSL_32_BIT)
uint64_t mask =
((uint64_t)is_last_block) | (((uint64_t)is_last_block) << 32);
#else
#error "Must define either OPENSSL_32_BIT or OPENSSL_64_BIT"
#endif
for (size_t j = 0; j < 8; j++) {
result[j] |= mask & ctx->h[j];
}
}
// Write the output. For SHA384 the resulting hash is truncated to the left-most
// 384-bits (6 64-bit words).
for (size_t i = 0; i < 6; i++) {
CRYPTO_store_u64_be(out + 8 * i, result[i]);
}
return 1;
}
static int EVP_tls_cbc_digest_record_sha384(
uint8_t *md_out, size_t *md_out_size,
const uint8_t header[AEAD_TLS_AES_CBC_HMAC_AD_LENGTH], const uint8_t *data,
size_t data_size, size_t data_plus_mac_plus_padding_size,
const uint8_t *mac_secret, unsigned mac_secret_length) {
if (mac_secret_length > SHA384_CBLOCK) {
// HMAC pads small keys with zeros and hashes large keys down. This function
// should never reach the large key case.
assert(0);
return 0;
}
// Compute the initial HMAC block.
uint8_t hmac_pad[SHA384_CBLOCK];
OPENSSL_memset(hmac_pad, 0, sizeof(hmac_pad));
OPENSSL_memcpy(hmac_pad, mac_secret, mac_secret_length);
for (size_t i = 0; i < SHA384_CBLOCK; i++) {
hmac_pad[i] ^= 0x36;
}
SHA512_CTX ctx;
SHA384_Init(&ctx);
SHA384_Update(&ctx, hmac_pad, SHA384_CBLOCK);
SHA384_Update(&ctx, header, AEAD_TLS_AES_CBC_HMAC_AD_LENGTH);
// There are at most 256 bytes of padding, so we can compute the public
// minimum length for |data_size|.
size_t min_data_size = 0;
if (data_plus_mac_plus_padding_size > SHA384_DIGEST_LENGTH + 256) {
min_data_size =
data_plus_mac_plus_padding_size - SHA384_DIGEST_LENGTH - 256;
}
// Hash the public minimum length directly. This reduces the number of blocks
// that must be computed in constant-time.
SHA384_Update(&ctx, data, min_data_size);
// Hash the remaining data without leaking |data_size|.
uint8_t mac_out[SHA384_DIGEST_LENGTH];
if (!EVP_final_with_secret_suffix_sha384(
&ctx, mac_out, data + min_data_size, data_size - min_data_size,
data_plus_mac_plus_padding_size - min_data_size)) {
return 0;
}
// Complete the HMAC in the standard manner.
SHA384_Init(&ctx);
for (size_t i = 0; i < SHA384_CBLOCK; i++) {
hmac_pad[i] ^= 0x6a;
}
SHA384_Update(&ctx, hmac_pad, SHA384_CBLOCK);
SHA384_Update(&ctx, mac_out, SHA384_DIGEST_LENGTH);
SHA384_Final(md_out, &ctx);
*md_out_size = SHA384_DIGEST_LENGTH;
return 1;
}
int EVP_tls_cbc_record_digest_supported(const EVP_MD *md) {
return (EVP_MD_type(md) == NID_sha1) || (EVP_MD_type(md) == NID_sha256) ||
(EVP_MD_type(md) == NID_sha384);
}
int EVP_tls_cbc_digest_record(const EVP_MD *md, uint8_t *md_out,
size_t *md_out_size,
const uint8_t header[AEAD_TLS_AES_CBC_HMAC_AD_LENGTH],
const uint8_t *data, size_t data_size,
size_t data_plus_mac_plus_padding_size,
const uint8_t *mac_secret,
unsigned mac_secret_length) {
// The specific hash algorithm is public knowledge.
if (EVP_MD_type(md) == NID_sha1) {
return EVP_tls_cbc_digest_record_sha1(
md_out, md_out_size, header, data, data_size,
data_plus_mac_plus_padding_size, mac_secret, mac_secret_length);
} else if (EVP_MD_type(md) == NID_sha256) {
return EVP_tls_cbc_digest_record_sha256(
md_out, md_out_size, header, data, data_size,
data_plus_mac_plus_padding_size, mac_secret, mac_secret_length);
} else if (EVP_MD_type(md) == NID_sha384) {
return EVP_tls_cbc_digest_record_sha384(
md_out, md_out_size, header, data, data_size,
data_plus_mac_plus_padding_size, mac_secret, mac_secret_length);
}
// EVP_tls_cbc_record_digest_supported should have been called first to
// check that the hash function is supported.
assert(0);
*md_out_size = 0;
return 0;
}