chore: checkpoint before Python removal

This commit is contained in:
2026-03-26 22:33:59 +00:00
parent 683cec9307
commit e568ddf82a
29972 changed files with 11269302 additions and 2 deletions

View File

@@ -0,0 +1,413 @@
// Copyright (c) 2014, Google Inc.
// SPDX-License-Identifier: ISC
#include <openssl/aead.h>
#include <assert.h>
#include <string.h>
#include <openssl/bytestring.h>
#include <openssl/cipher.h>
#include <openssl/err.h>
#include <openssl/mem.h>
#include "../../internal.h"
#include "internal.h"
size_t EVP_AEAD_key_length(const EVP_AEAD *aead) { return aead->key_len; }
size_t EVP_AEAD_nonce_length(const EVP_AEAD *aead) { return aead->nonce_len; }
size_t EVP_AEAD_max_overhead(const EVP_AEAD *aead) { return aead->overhead; }
size_t EVP_AEAD_max_tag_len(const EVP_AEAD *aead) { return aead->max_tag_len; }
void EVP_AEAD_CTX_zero(EVP_AEAD_CTX *ctx) {
OPENSSL_memset(ctx, 0, sizeof(EVP_AEAD_CTX));
}
EVP_AEAD_CTX *EVP_AEAD_CTX_new(const EVP_AEAD *aead, const uint8_t *key,
size_t key_len, size_t tag_len) {
EVP_AEAD_CTX *ctx = OPENSSL_zalloc(sizeof(EVP_AEAD_CTX));
if (ctx == NULL) {
return NULL;
}
// NO-OP: struct already zeroed
//EVP_AEAD_CTX_zero(ctx);
if (EVP_AEAD_CTX_init(ctx, aead, key, key_len, tag_len, NULL)) {
return ctx;
}
EVP_AEAD_CTX_free(ctx);
return NULL;
}
void EVP_AEAD_CTX_free(EVP_AEAD_CTX *ctx) {
if (ctx == NULL) {
return;
}
EVP_AEAD_CTX_cleanup(ctx);
OPENSSL_free(ctx);
}
int EVP_AEAD_CTX_init(EVP_AEAD_CTX *ctx, const EVP_AEAD *aead,
const uint8_t *key, size_t key_len, size_t tag_len,
ENGINE *impl) {
if (!aead->init) {
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_NO_DIRECTION_SET);
ctx->aead = NULL;
return 0;
}
return EVP_AEAD_CTX_init_with_direction(ctx, aead, key, key_len, tag_len,
evp_aead_open);
}
int EVP_AEAD_CTX_init_with_direction(EVP_AEAD_CTX *ctx, const EVP_AEAD *aead,
const uint8_t *key, size_t key_len,
size_t tag_len,
enum evp_aead_direction_t dir) {
SET_DIT_AUTO_RESET;
if (key_len != aead->key_len) {
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_UNSUPPORTED_KEY_SIZE);
ctx->aead = NULL;
return 0;
}
ctx->aead = aead;
int ok;
if (aead->init) {
ok = aead->init(ctx, key, key_len, tag_len);
} else {
ok = aead->init_with_direction(ctx, key, key_len, tag_len, dir);
}
if (!ok) {
ctx->aead = NULL;
}
return ok;
}
void EVP_AEAD_CTX_cleanup(EVP_AEAD_CTX *ctx) {
if (ctx->aead == NULL) {
return;
}
ctx->aead->cleanup(ctx);
ctx->aead = NULL;
}
// check_alias returns 1 if |out| is compatible with |in| and 0 otherwise. If
// |in| and |out| alias, we require that |in| == |out|.
static int check_alias(const uint8_t *in, size_t in_len, const uint8_t *out,
size_t out_len) {
if (!buffers_alias(in, in_len, out, out_len)) {
return 1;
}
return in == out;
}
int EVP_AEAD_CTX_seal(const EVP_AEAD_CTX *ctx, uint8_t *out, size_t *out_len,
size_t max_out_len, const uint8_t *nonce,
size_t nonce_len, const uint8_t *in, size_t in_len,
const uint8_t *ad, size_t ad_len) {
SET_DIT_AUTO_RESET;
if (in_len + ctx->aead->overhead < in_len /* overflow */) {
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_TOO_LARGE);
goto error;
}
if (max_out_len < in_len) {
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BUFFER_TOO_SMALL);
goto error;
}
if (!check_alias(in, in_len, out, max_out_len)) {
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_OUTPUT_ALIASES_INPUT);
goto error;
}
size_t out_tag_len;
if (ctx->aead->seal_scatter(ctx, out, out + in_len, &out_tag_len,
max_out_len - in_len, nonce, nonce_len, in,
in_len, NULL, 0, ad, ad_len)) {
*out_len = in_len + out_tag_len;
return 1;
}
error:
// In the event of an error, clear the output buffer so that a caller
// that doesn't check the return value doesn't send raw data.
OPENSSL_memset(out, 0, max_out_len);
*out_len = 0;
return 0;
}
int EVP_AEAD_CTX_seal_scatter(const EVP_AEAD_CTX *ctx, uint8_t *out,
uint8_t *out_tag, size_t *out_tag_len,
size_t max_out_tag_len, const uint8_t *nonce,
size_t nonce_len, const uint8_t *in,
size_t in_len, const uint8_t *extra_in,
size_t extra_in_len, const uint8_t *ad,
size_t ad_len) {
SET_DIT_AUTO_RESET; //check that it was preserved
// |in| and |out| may alias exactly, |out_tag| may not alias.
if (!check_alias(in, in_len, out, in_len) ||
buffers_alias(out, in_len, out_tag, max_out_tag_len) ||
buffers_alias(in, in_len, out_tag, max_out_tag_len)) {
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_OUTPUT_ALIASES_INPUT);
goto error;
}
if (!ctx->aead->seal_scatter_supports_extra_in && extra_in_len) {
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_INVALID_OPERATION);
goto error;
}
if (ctx->aead->seal_scatter(ctx, out, out_tag, out_tag_len, max_out_tag_len,
nonce, nonce_len, in, in_len, extra_in,
extra_in_len, ad, ad_len)) {
return 1;
}
error:
// In the event of an error, clear the output buffer so that a caller
// that doesn't check the return value doesn't send raw data.
OPENSSL_memset(out, 0, in_len);
OPENSSL_memset(out_tag, 0, max_out_tag_len);
*out_tag_len = 0;
return 0;
}
int EVP_AEAD_CTX_open(const EVP_AEAD_CTX *ctx, uint8_t *out, size_t *out_len,
size_t max_out_len, const uint8_t *nonce,
size_t nonce_len, const uint8_t *in, size_t in_len,
const uint8_t *ad, size_t ad_len) {
SET_DIT_AUTO_RESET;
if (!check_alias(in, in_len, out, max_out_len)) {
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_OUTPUT_ALIASES_INPUT);
goto error;
}
if (ctx->aead->open) {
if (!ctx->aead->open(ctx, out, out_len, max_out_len, nonce, nonce_len, in,
in_len, ad, ad_len)) {
goto error;
}
return 1;
}
// AEADs that use the default implementation of open() must set |tag_len| at
// initialization time.
assert(ctx->tag_len);
if (in_len < ctx->tag_len) {
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_DECRYPT);
goto error;
}
size_t plaintext_len = in_len - ctx->tag_len;
if (max_out_len < plaintext_len) {
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BUFFER_TOO_SMALL);
goto error;
}
if (EVP_AEAD_CTX_open_gather(ctx, out, nonce, nonce_len, in, plaintext_len,
in + plaintext_len, ctx->tag_len, ad, ad_len)) {
*out_len = plaintext_len;
return 1;
}
error:
// In the event of an error, clear the output buffer so that a caller
// that doesn't check the return value doesn't try and process bad
// data.
OPENSSL_memset(out, 0, max_out_len);
*out_len = 0;
return 0;
}
int EVP_AEAD_CTX_open_gather(const EVP_AEAD_CTX *ctx, uint8_t *out,
const uint8_t *nonce, size_t nonce_len,
const uint8_t *in, size_t in_len,
const uint8_t *in_tag, size_t in_tag_len,
const uint8_t *ad, size_t ad_len) {
SET_DIT_AUTO_RESET;
if (!check_alias(in, in_len, out, in_len)) {
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_OUTPUT_ALIASES_INPUT);
goto error;
}
if (!ctx->aead->open_gather) {
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_CTRL_NOT_IMPLEMENTED);
goto error;
}
if (ctx->aead->open_gather(ctx, out, nonce, nonce_len, in, in_len, in_tag,
in_tag_len, ad, ad_len)) {
return 1;
}
error:
// In the event of an error, clear the output buffer so that a caller
// that doesn't check the return value doesn't try and process bad
// data.
OPENSSL_memset(out, 0, in_len);
return 0;
}
const EVP_AEAD *EVP_AEAD_CTX_aead(const EVP_AEAD_CTX *ctx) { return ctx->aead; }
int EVP_AEAD_CTX_get_iv(const EVP_AEAD_CTX *ctx, const uint8_t **out_iv,
size_t *out_len) {
if (ctx->aead->get_iv == NULL) {
return 0;
}
return ctx->aead->get_iv(ctx, out_iv, out_len);
}
int EVP_AEAD_CTX_tag_len(const EVP_AEAD_CTX *ctx, size_t *out_tag_len,
const size_t in_len, const size_t extra_in_len) {
assert(ctx->aead->seal_scatter_supports_extra_in || !extra_in_len);
if (ctx->aead->tag_len) {
*out_tag_len = ctx->aead->tag_len(ctx, in_len, extra_in_len);
return 1;
}
if (extra_in_len + ctx->tag_len < extra_in_len) {
OPENSSL_PUT_ERROR(CIPHER, ERR_R_OVERFLOW);
*out_tag_len = 0;
return 0;
}
*out_tag_len = extra_in_len + ctx->tag_len;
return 1;
}
int EVP_AEAD_get_iv_from_ipv4_nanosecs(
const uint32_t ipv4_address, const uint64_t nanosecs,
uint8_t out_iv[FIPS_AES_GCM_NONCE_LENGTH]) {
if (out_iv == NULL) {
return 0;
}
CRYPTO_store_u32_le(&out_iv[0], ipv4_address);
CRYPTO_store_u64_le(&out_iv[sizeof(ipv4_address)], nanosecs);
return 1;
}
#define EVP_AEAD_CTX_SERDE_VERSION 1
int EVP_AEAD_CTX_serialize_state(const EVP_AEAD_CTX *ctx, CBB *cbb) {
// EVP_AEAD_CTX must be initialized by EVP_AEAD_CTX_init first.
if (!ctx->aead) {
return 0;
}
size_t aead_id = EVP_AEAD_CTX_get_aead_id(ctx);
// We shouldn't serialize if we don't have a proper identifier
if (aead_id == AEAD_UNKNOWN_ID) {
OPENSSL_PUT_ERROR(CIPHER, ERR_R_MALLOC_FAILURE);
return 0;
}
CBB seq;
if (!CBB_add_asn1(cbb, &seq, CBS_ASN1_SEQUENCE) ||
!CBB_add_asn1_uint64(&seq, EVP_AEAD_CTX_SERDE_VERSION) ||
!CBB_add_asn1_uint64(&seq, aead_id)) {
OPENSSL_PUT_ERROR(CIPHER, ERR_R_MALLOC_FAILURE);
return 0;
}
CBB state;
// 50 here is just an initial capacity based on some estimated calculations
// of the AES GCM state structure encoding with headroom:
//
// -- 2 bytes for sequence tag+length
// AeadAesGCMTls13State ::= SEQUENCE {
// -- 2 bytes for tag+length and 8 bytes if a full uint64
// serializationVersion AeadAesGCMTls13StateSerializationVersion,
// -- 2 bytes for tag+length and 8 bytes if a full uint64
// minNextNonce INTEGER,
// -- 2 bytes for tag+length and 8 bytes if a full uint64
// mask INTEGER,
// -- 2 bytes for tag+length and 1 byte
// first BOOLEAN
// }
if (!CBB_init(&state, 50)) {
OPENSSL_PUT_ERROR(CIPHER, ERR_R_MALLOC_FAILURE);
return 0;
}
if (ctx->aead->serialize_state) {
if (!ctx->aead->serialize_state(ctx, &state)) {
CBB_cleanup(&state);
OPENSSL_PUT_ERROR(CIPHER, ERR_R_MALLOC_FAILURE);
return 0;
}
}
if (!CBB_add_asn1_octet_string(&seq, CBB_data(&state), CBB_len(&state))) {
CBB_cleanup(&state);
OPENSSL_PUT_ERROR(CIPHER, ERR_R_MALLOC_FAILURE);
return 0;
}
CBB_cleanup(&state);
return CBB_flush(cbb);
}
int EVP_AEAD_CTX_deserialize_state(const EVP_AEAD_CTX *ctx, CBS *cbs) {
// EVP_AEAD_CTX must be initialized by EVP_AEAD_CTX_init first.
if (!ctx->aead) {
return 0;
}
CBS seq;
uint64_t version;
uint64_t aead_id;
CBS state;
if (!CBS_get_asn1(cbs, &seq, CBS_ASN1_SEQUENCE)) {
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_SERIALIZATION_INVALID_EVP_AEAD_CTX);
return 0;
}
if (!CBS_get_asn1_uint64(&seq, &version) ||
version != EVP_AEAD_CTX_SERDE_VERSION) {
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_SERIALIZATION_INVALID_SERDE_VERSION);
return 0;
}
if (!CBS_get_asn1_uint64(&seq, &aead_id) || aead_id > UINT16_MAX ||
aead_id != EVP_AEAD_CTX_get_aead_id(ctx)) {
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_SERIALIZATION_INVALID_CIPHER_ID);
return 0;
}
if (!CBS_get_asn1(&seq, &state, CBS_ASN1_OCTETSTRING)) {
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_SERIALIZATION_INVALID_EVP_AEAD_CTX);
return 0;
}
if (!ctx->aead->deserialize_state) {
return CBS_len(&state) == 0;
}
return ctx->aead->deserialize_state(ctx, &state);
}
uint16_t EVP_AEAD_CTX_get_aead_id(const EVP_AEAD_CTX *ctx) {
if (!ctx->aead) {
return AEAD_UNKNOWN_ID;
}
return ctx->aead->aead_id;
}

View File

@@ -0,0 +1,716 @@
// Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com) All rights reserved.
// SPDX-License-Identifier: Apache-2.0
#include <openssl/cipher.h>
#include <assert.h>
#include <limits.h>
#include <string.h>
#include <openssl/err.h>
#include <openssl/mem.h>
#include <openssl/nid.h>
#include <openssl/obj.h>
#include "internal.h"
#include "../../internal.h"
void EVP_CIPHER_CTX_init(EVP_CIPHER_CTX *ctx) {
OPENSSL_memset(ctx, 0, sizeof(EVP_CIPHER_CTX));
ctx->poisoned = 1;
}
EVP_CIPHER_CTX *EVP_CIPHER_CTX_new(void) {
EVP_CIPHER_CTX *ctx = OPENSSL_zalloc(sizeof(EVP_CIPHER_CTX));
if (ctx) {
ctx->poisoned = 1;
// NO-OP: struct already zeroed
// EVP_CIPHER_CTX_init(ctx);
}
return ctx;
}
int EVP_CIPHER_CTX_cleanup(EVP_CIPHER_CTX *c) {
GUARD_PTR(c);
if (c->cipher != NULL && c->cipher->cleanup) {
c->cipher->cleanup(c);
}
OPENSSL_free(c->cipher_data);
OPENSSL_memset(c, 0, sizeof(EVP_CIPHER_CTX));
c->poisoned = 1;
return 1;
}
void EVP_CIPHER_CTX_free(EVP_CIPHER_CTX *ctx) {
if (ctx) {
EVP_CIPHER_CTX_cleanup(ctx);
OPENSSL_free(ctx);
}
}
int EVP_CIPHER_CTX_copy(EVP_CIPHER_CTX *out, const EVP_CIPHER_CTX *in) {
SET_DIT_AUTO_RESET;
if (in == NULL || in->cipher == NULL) {
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_INPUT_NOT_INITIALIZED);
return 0;
}
if (in->poisoned) {
OPENSSL_PUT_ERROR(CIPHER, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED);
return 0;
}
GUARD_PTR(out);
EVP_CIPHER_CTX_cleanup(out);
OPENSSL_memcpy(out, in, sizeof(EVP_CIPHER_CTX));
if (in->cipher_data && in->cipher->ctx_size) {
out->cipher_data = OPENSSL_memdup(in->cipher_data, in->cipher->ctx_size);
if (!out->cipher_data) {
out->cipher = NULL;
return 0;
}
}
if (in->cipher->flags & EVP_CIPH_CUSTOM_COPY) {
if (!in->cipher->ctrl((EVP_CIPHER_CTX *)in, EVP_CTRL_COPY, 0, out)) {
out->cipher = NULL;
return 0;
}
}
return 1;
}
int EVP_CIPHER_CTX_reset(EVP_CIPHER_CTX *ctx) {
EVP_CIPHER_CTX_cleanup(ctx);
EVP_CIPHER_CTX_init(ctx);
return 1;
}
int EVP_CipherInit_ex(EVP_CIPHER_CTX *ctx, const EVP_CIPHER *cipher,
ENGINE *engine, const uint8_t *key, const uint8_t *iv,
int enc) {
SET_DIT_AUTO_RESET;
GUARD_PTR(ctx);
if (enc == -1) {
enc = ctx->encrypt;
} else {
if (enc) {
enc = 1;
}
ctx->encrypt = enc;
}
if (cipher) {
// Ensure a context left from last time is cleared (the previous check
// attempted to avoid this if the same ENGINE and EVP_CIPHER could be
// used).
if (ctx->cipher) {
EVP_CIPHER_CTX_cleanup(ctx);
// Restore encrypt and flags
ctx->encrypt = enc;
}
ctx->cipher = cipher;
if (ctx->cipher->ctx_size) {
ctx->cipher_data = OPENSSL_malloc(ctx->cipher->ctx_size);
if (!ctx->cipher_data) {
ctx->cipher = NULL;
return 0;
}
} else {
ctx->cipher_data = NULL;
}
ctx->key_len = cipher->key_len;
ctx->flags = 0;
if (ctx->cipher->flags & EVP_CIPH_CTRL_INIT) {
if (!EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_INIT, 0, NULL)) {
ctx->cipher = NULL;
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_INITIALIZATION_ERROR);
return 0;
}
}
} else if (!ctx->cipher) {
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_NO_CIPHER_SET);
return 0;
}
// we assume block size is a power of 2 in *cryptUpdate
assert(ctx->cipher->block_size == 1 || ctx->cipher->block_size == 8 ||
ctx->cipher->block_size == 16);
if (!(EVP_CIPHER_CTX_flags(ctx) & EVP_CIPH_CUSTOM_IV)) {
switch (EVP_CIPHER_CTX_mode(ctx)) {
case EVP_CIPH_STREAM_CIPHER:
case EVP_CIPH_ECB_MODE:
break;
case EVP_CIPH_CFB_MODE:
ctx->num = 0;
OPENSSL_FALLTHROUGH;
case EVP_CIPH_CBC_MODE:
assert(EVP_CIPHER_CTX_iv_length(ctx) <= sizeof(ctx->iv));
if (iv) {
OPENSSL_memcpy(ctx->oiv, iv, EVP_CIPHER_CTX_iv_length(ctx));
}
OPENSSL_memcpy(ctx->iv, ctx->oiv, EVP_CIPHER_CTX_iv_length(ctx));
break;
case EVP_CIPH_CTR_MODE:
case EVP_CIPH_OFB_MODE:
ctx->num = 0;
// Don't reuse IV for CTR mode
if (iv) {
OPENSSL_memcpy(ctx->iv, iv, EVP_CIPHER_CTX_iv_length(ctx));
}
break;
default:
return 0;
}
}
if (key || (ctx->cipher->flags & EVP_CIPH_ALWAYS_CALL_INIT)) {
if (!ctx->cipher->init(ctx, key, iv, enc)) {
return 0;
}
}
ctx->buf_len = 0;
ctx->final_used = 0;
// Clear the poisoned flag to permit re-use of a CTX that previously had a
// failed operation.
ctx->poisoned = 0;
return 1;
}
int EVP_EncryptInit_ex(EVP_CIPHER_CTX *ctx, const EVP_CIPHER *cipher,
ENGINE *impl, const uint8_t *key, const uint8_t *iv) {
return EVP_CipherInit_ex(ctx, cipher, impl, key, iv, 1);
}
int EVP_DecryptInit_ex(EVP_CIPHER_CTX *ctx, const EVP_CIPHER *cipher,
ENGINE *impl, const uint8_t *key, const uint8_t *iv) {
return EVP_CipherInit_ex(ctx, cipher, impl, key, iv, 0);
}
// block_remainder returns the number of bytes to remove from |len| to get a
// multiple of |ctx|'s block size.
static int block_remainder(const EVP_CIPHER_CTX *ctx, int len) {
// |block_size| must be a power of two.
assert(ctx->cipher->block_size != 0);
assert((ctx->cipher->block_size & (ctx->cipher->block_size - 1)) == 0);
return len & (ctx->cipher->block_size - 1);
}
int EVP_EncryptUpdate(EVP_CIPHER_CTX *ctx, uint8_t *out, int *out_len,
const uint8_t *in, int in_len) {
SET_DIT_AUTO_RESET;
GUARD_PTR(ctx);
if (ctx->poisoned) {
OPENSSL_PUT_ERROR(CIPHER, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED);
return 0;
}
// If the first call to |cipher| succeeds and the second fails, |ctx| may be
// left in an indeterminate state. We set a poison flag on failure to ensure
// callers do not continue to use the object in that case.
ctx->poisoned = 1;
// Ciphers that use blocks may write up to |bl| extra bytes. Ensure the output
// does not overflow |*out_len|.
GUARD_PTR(ctx->cipher);
int bl = ctx->cipher->block_size;
if (bl > 1 && in_len > INT_MAX - bl) {
OPENSSL_PUT_ERROR(CIPHER, ERR_R_OVERFLOW);
return 0;
}
if (ctx->cipher->flags & EVP_CIPH_FLAG_CUSTOM_CIPHER) {
int ret = ctx->cipher->cipher(ctx, out, in, in_len);
if (ret < 0) {
return 0;
} else {
*out_len = ret;
}
ctx->poisoned = 0;
return 1;
}
if (in_len <= 0) {
*out_len = 0;
if (in_len == 0) {
ctx->poisoned = 0;
return 1;
}
return 0;
}
if (ctx->buf_len == 0 && block_remainder(ctx, in_len) == 0) {
if (ctx->cipher->cipher(ctx, out, in, in_len)) {
*out_len = in_len;
ctx->poisoned = 0;
return 1;
} else {
*out_len = 0;
return 0;
}
}
int i = ctx->buf_len;
assert(bl <= (int)sizeof(ctx->buf));
if (i != 0) {
if (bl - i > in_len) {
OPENSSL_memcpy(&ctx->buf[i], in, in_len);
ctx->buf_len += in_len;
*out_len = 0;
ctx->poisoned = 0;
return 1;
} else {
int j = bl - i;
OPENSSL_memcpy(&ctx->buf[i], in, j);
if (!ctx->cipher->cipher(ctx, out, ctx->buf, bl)) {
return 0;
}
in_len -= j;
in += j;
out += bl;
*out_len = bl;
}
} else {
*out_len = 0;
}
i = block_remainder(ctx, in_len);
in_len -= i;
if (in_len > 0) {
if (!ctx->cipher->cipher(ctx, out, in, in_len)) {
return 0;
}
*out_len += in_len;
}
if (i != 0) {
OPENSSL_memcpy(ctx->buf, &in[in_len], i);
}
ctx->buf_len = i;
ctx->poisoned = 0;
return 1;
}
int EVP_EncryptFinal_ex(EVP_CIPHER_CTX *ctx, uint8_t *out, int *out_len) {
SET_DIT_AUTO_RESET;
int n;
unsigned int i, b, bl;
GUARD_PTR(ctx);
if (ctx->poisoned) {
OPENSSL_PUT_ERROR(CIPHER, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED);
return 0;
}
GUARD_PTR(ctx->cipher);
if (ctx->cipher->flags & EVP_CIPH_FLAG_CUSTOM_CIPHER) {
// When EVP_CIPH_FLAG_CUSTOM_CIPHER is set, the return value of |cipher| is
// the number of bytes written, or -1 on error. Otherwise the return value
// is one on success and zero on error.
const int num_bytes = ctx->cipher->cipher(ctx, out, NULL, 0);
if (num_bytes < 0) {
return 0;
}
*out_len = num_bytes;
goto out;
}
b = ctx->cipher->block_size;
assert(b <= sizeof(ctx->buf));
if (b == 1) {
*out_len = 0;
goto out;
}
bl = ctx->buf_len;
if (ctx->flags & EVP_CIPH_NO_PADDING) {
if (bl) {
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_DATA_NOT_MULTIPLE_OF_BLOCK_LENGTH);
return 0;
}
*out_len = 0;
goto out;
}
n = b - bl;
for (i = bl; i < b; i++) {
ctx->buf[i] = n;
}
if (!ctx->cipher->cipher(ctx, out, ctx->buf, b)) {
return 0;
}
*out_len = b;
out:
EVP_Cipher_verify_service_indicator(ctx);
return 1;
}
int EVP_DecryptUpdate(EVP_CIPHER_CTX *ctx, uint8_t *out, int *out_len,
const uint8_t *in, int in_len) {
SET_DIT_AUTO_RESET;
GUARD_PTR(ctx);
if (ctx->poisoned) {
OPENSSL_PUT_ERROR(CIPHER, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED);
return 0;
}
// Ciphers that use blocks may write up to |bl| extra bytes. Ensure the output
// does not overflow |*out_len|.
GUARD_PTR(ctx->cipher);
unsigned int b = ctx->cipher->block_size;
if (b > 1 && in_len > INT_MAX - (int)b) {
OPENSSL_PUT_ERROR(CIPHER, ERR_R_OVERFLOW);
return 0;
}
if (ctx->cipher->flags & EVP_CIPH_FLAG_CUSTOM_CIPHER) {
int r = ctx->cipher->cipher(ctx, out, in, in_len);
if (r < 0) {
*out_len = 0;
return 0;
} else {
*out_len = r;
}
return 1;
}
if (in_len <= 0) {
*out_len = 0;
return in_len == 0;
}
if (ctx->flags & EVP_CIPH_NO_PADDING) {
return EVP_EncryptUpdate(ctx, out, out_len, in, in_len);
}
assert(b <= sizeof(ctx->final));
int fix_len = 0;
if (ctx->final_used) {
OPENSSL_memcpy(out, ctx->final, b);
out += b;
fix_len = 1;
}
if (!EVP_EncryptUpdate(ctx, out, out_len, in, in_len)) {
return 0;
}
// if we have 'decrypted' a multiple of block size, make sure
// we have a copy of this last block
if (b > 1 && !ctx->buf_len) {
*out_len -= b;
ctx->final_used = 1;
OPENSSL_memcpy(ctx->final, &out[*out_len], b);
} else {
ctx->final_used = 0;
}
if (fix_len) {
*out_len += b;
}
return 1;
}
int EVP_DecryptFinal_ex(EVP_CIPHER_CTX *ctx, unsigned char *out, int *out_len) {
SET_DIT_AUTO_RESET;
int i, n;
unsigned int b;
*out_len = 0;
GUARD_PTR(ctx);
// |ctx->cipher->cipher| calls the static aes encryption function way under
// the hood instead of |EVP_Cipher|, so the service indicator does not need
// locking here.
if (ctx->poisoned) {
OPENSSL_PUT_ERROR(CIPHER, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED);
return 0;
}
GUARD_PTR(ctx->cipher);
if (ctx->cipher->flags & EVP_CIPH_FLAG_CUSTOM_CIPHER) {
i = ctx->cipher->cipher(ctx, out, NULL, 0);
if (i < 0) {
return 0;
} else {
*out_len = i;
}
goto out;
}
b = ctx->cipher->block_size;
if (ctx->flags & EVP_CIPH_NO_PADDING) {
if (ctx->buf_len) {
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_DATA_NOT_MULTIPLE_OF_BLOCK_LENGTH);
return 0;
}
*out_len = 0;
goto out;
}
if (b > 1) {
if (ctx->buf_len || !ctx->final_used) {
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_WRONG_FINAL_BLOCK_LENGTH);
return 0;
}
assert(b <= sizeof(ctx->final));
// The following assumes that the ciphertext has been authenticated.
// Otherwise it provides a padding oracle.
n = ctx->final[b - 1];
if (n == 0 || n > (int)b) {
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_DECRYPT);
return 0;
}
for (i = 0; i < n; i++) {
if (ctx->final[--b] != n) {
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_DECRYPT);
return 0;
}
}
n = ctx->cipher->block_size - n;
for (i = 0; i < n; i++) {
out[i] = ctx->final[i];
}
*out_len = n;
} else {
*out_len = 0;
}
out:
EVP_Cipher_verify_service_indicator(ctx);
return 1;
}
int EVP_Cipher(EVP_CIPHER_CTX *ctx, uint8_t *out, const uint8_t *in,
size_t in_len) {
SET_DIT_AUTO_RESET;
GUARD_PTR(ctx);
GUARD_PTR(ctx->cipher);
const int ret = ctx->cipher->cipher(ctx, out, in, in_len);
// |EVP_CIPH_FLAG_CUSTOM_CIPHER| never sets the FIPS indicator via
// |EVP_Cipher| because it's complicated whether the operation has completed
// or not. E.g. AES-GCM with a non-NULL |in| argument hasn't completed an
// operation. Callers should use the |EVP_AEAD| API or, at least,
// |EVP_CipherUpdate| etc. AES-KeyWrap users should use the |AES_wrap_key|
// API instead.
//
// This call can't be pushed into |EVP_Cipher_verify_service_indicator|
// because whether |ret| indicates success or not depends on whether
// |EVP_CIPH_FLAG_CUSTOM_CIPHER| is set. (This is unreasonable, but matches
// OpenSSL.)
if (!(ctx->cipher->flags & EVP_CIPH_FLAG_CUSTOM_CIPHER) && ret) {
EVP_Cipher_verify_service_indicator(ctx);
}
return ret;
}
int EVP_CipherUpdate(EVP_CIPHER_CTX *ctx, uint8_t *out, int *out_len,
const uint8_t *in, int in_len) {
GUARD_PTR(ctx);
if (ctx->encrypt) {
return EVP_EncryptUpdate(ctx, out, out_len, in, in_len);
} else {
return EVP_DecryptUpdate(ctx, out, out_len, in, in_len);
}
}
int EVP_CipherFinal_ex(EVP_CIPHER_CTX *ctx, uint8_t *out, int *out_len) {
GUARD_PTR(ctx);
if (ctx->encrypt) {
return EVP_EncryptFinal_ex(ctx, out, out_len);
} else {
return EVP_DecryptFinal_ex(ctx, out, out_len);
}
}
const EVP_CIPHER *EVP_CIPHER_CTX_cipher(const EVP_CIPHER_CTX *ctx) {
return ctx->cipher;
}
int EVP_CIPHER_CTX_nid(const EVP_CIPHER_CTX *ctx) {
return ctx->cipher->nid;
}
int EVP_CIPHER_CTX_encrypting(const EVP_CIPHER_CTX *ctx) {
return ctx->encrypt;
}
unsigned EVP_CIPHER_CTX_block_size(const EVP_CIPHER_CTX *ctx) {
return ctx->cipher->block_size;
}
unsigned EVP_CIPHER_CTX_key_length(const EVP_CIPHER_CTX *ctx) {
return ctx->key_len;
}
unsigned EVP_CIPHER_CTX_iv_length(const EVP_CIPHER_CTX *ctx) {
if (EVP_CIPHER_mode(ctx->cipher) == EVP_CIPH_GCM_MODE ||
EVP_CIPHER_mode(ctx->cipher) == EVP_CIPH_CCM_MODE) {
int length;
int res = EVP_CIPHER_CTX_ctrl((EVP_CIPHER_CTX *)ctx, EVP_CTRL_GET_IVLEN, 0,
&length);
// EVP_CIPHER_CTX_ctrl returning an error should be impossible under this
// circumstance. If it somehow did, fallback to the static cipher iv_len.
if (res == 1) {
return length;
}
}
return ctx->cipher->iv_len;
}
void *EVP_CIPHER_CTX_get_app_data(const EVP_CIPHER_CTX *ctx) {
return ctx->app_data;
}
void EVP_CIPHER_CTX_set_app_data(EVP_CIPHER_CTX *ctx, void *data) {
ctx->app_data = data;
}
uint32_t EVP_CIPHER_CTX_flags(const EVP_CIPHER_CTX *ctx) {
return ctx->cipher->flags & ~EVP_CIPH_MODE_MASK;
}
uint32_t EVP_CIPHER_CTX_mode(const EVP_CIPHER_CTX *ctx) {
return ctx->cipher->flags & EVP_CIPH_MODE_MASK;
}
int EVP_CIPHER_CTX_ctrl(EVP_CIPHER_CTX *ctx, int command, int arg, void *ptr) {
int ret;
if (!ctx->cipher) {
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_NO_CIPHER_SET);
return 0;
}
if (!ctx->cipher->ctrl) {
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_CTRL_NOT_IMPLEMENTED);
return 0;
}
ret = ctx->cipher->ctrl(ctx, command, arg, ptr);
if (ret == -1) {
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_CTRL_OPERATION_NOT_IMPLEMENTED);
return 0;
}
return ret;
}
int EVP_CIPHER_CTX_set_padding(EVP_CIPHER_CTX *ctx, int pad) {
if (pad) {
ctx->flags &= ~EVP_CIPH_NO_PADDING;
} else {
ctx->flags |= EVP_CIPH_NO_PADDING;
}
return 1;
}
int EVP_CIPHER_CTX_set_key_length(EVP_CIPHER_CTX *c, unsigned key_len) {
if (c->key_len == key_len) {
return 1;
}
if (key_len == 0 || !(c->cipher->flags & EVP_CIPH_VARIABLE_LENGTH)) {
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_INVALID_KEY_LENGTH);
return 0;
}
c->key_len = key_len;
return 1;
}
int EVP_CIPHER_nid(const EVP_CIPHER *cipher) {
if (cipher != NULL) {
return cipher->nid;
}
return 0;
}
unsigned EVP_CIPHER_block_size(const EVP_CIPHER *cipher) {
if (cipher != NULL) {
return cipher->block_size;
}
return 0;
}
unsigned EVP_CIPHER_key_length(const EVP_CIPHER *cipher) {
if (cipher != NULL) {
return cipher->key_len;
}
return 0;
}
unsigned EVP_CIPHER_iv_length(const EVP_CIPHER *cipher) {
if (cipher != NULL) {
return cipher->iv_len;
}
return 0;
}
uint32_t EVP_CIPHER_flags(const EVP_CIPHER *cipher) {
return cipher->flags & ~EVP_CIPH_MODE_MASK;
}
uint32_t EVP_CIPHER_mode(const EVP_CIPHER *cipher) {
return cipher->flags & EVP_CIPH_MODE_MASK;
}
const char *EVP_CIPHER_name(const EVP_CIPHER *cipher) {
if (cipher != NULL) {
return OBJ_nid2sn(cipher->nid);
}
return NULL;
}
int EVP_CipherInit(EVP_CIPHER_CTX *ctx, const EVP_CIPHER *cipher,
const uint8_t *key, const uint8_t *iv, int enc) {
if (cipher) {
EVP_CIPHER_CTX_init(ctx);
}
return EVP_CipherInit_ex(ctx, cipher, NULL, key, iv, enc);
}
int EVP_EncryptInit(EVP_CIPHER_CTX *ctx, const EVP_CIPHER *cipher,
const uint8_t *key, const uint8_t *iv) {
return EVP_CipherInit(ctx, cipher, key, iv, 1);
}
int EVP_DecryptInit(EVP_CIPHER_CTX *ctx, const EVP_CIPHER *cipher,
const uint8_t *key, const uint8_t *iv) {
return EVP_CipherInit(ctx, cipher, key, iv, 0);
}
int EVP_CipherFinal(EVP_CIPHER_CTX *ctx, uint8_t *out, int *out_len) {
return EVP_CipherFinal_ex(ctx, out, out_len);
}
int EVP_EncryptFinal(EVP_CIPHER_CTX *ctx, uint8_t *out, int *out_len) {
return EVP_EncryptFinal_ex(ctx, out, out_len);
}
int EVP_DecryptFinal(EVP_CIPHER_CTX *ctx, uint8_t *out, int *out_len) {
return EVP_DecryptFinal_ex(ctx, out, out_len);
}
int EVP_add_cipher_alias(const char *a, const char *b) {
return 1;
}
void EVP_CIPHER_CTX_set_flags(const EVP_CIPHER_CTX *ctx, uint32_t flags) {}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,727 @@
// Copyright (c) 2008 The OpenSSL Project. All rights reserved.
// SPDX-License-Identifier: Apache-2.0
#include <openssl/aead.h>
#include <assert.h>
#include <openssl/cipher.h>
#include <openssl/err.h>
#include <openssl/mem.h>
#include "../delocate.h"
#include "../modes/internal.h"
#include "../service_indicator/internal.h"
#include "internal.h"
#define EVP_AEAD_AES_CCM_MIN_TAG_LEN 4
#define EVP_AEAD_AES_CCM_MAX_TAG_LEN 16
#define CCM_MAX_NONCE_LEN 13
typedef struct ccm128_context {
block128_f block;
ctr128_f ctr;
uint32_t M, L;
} CCM128_CTX;
typedef struct ccm128_state {
alignas(16) uint8_t nonce[16];
alignas(16) uint8_t cmac[16];
} CCM128_STATE;
typedef struct cipher_aes_ccm_ctx {
union {
uint64_t align;
AES_KEY ks;
} ks; // AES key schedule to use
CCM128_CTX ccm;
CCM128_STATE ccm_state;
// Boolean flags
uint8_t key_set;
uint8_t iv_set;
uint8_t tag_set;
uint8_t len_set;
uint8_t ccm_set;
// L and M parameters from RFC3610
uint32_t L; // Number of octets in length field
uint32_t M; // Number of octets in authentication field
size_t message_len;
uint8_t tag[EVP_AEAD_AES_CCM_MAX_TAG_LEN];
uint8_t nonce[CCM_MAX_NONCE_LEN];
} CIPHER_AES_CCM_CTX;
// The "inner" CCM128_CTX struct within a CIPHER_AES_CCM_CTX
#define CCM_INNER_CTX(ccm_ctx) (&ccm_ctx->ccm)
// The CCM128 state struct within a CIPHER_AES_CCM_CTX
#define CCM_INNER_STATE(ccm_ctx) (&ccm_ctx->ccm_state)
// As per RFC3610, the nonce length in bytes is 15 - L.
#define CCM_L_TO_NONCE_LEN(L) (15 - (L))
static int CRYPTO_ccm128_init(struct ccm128_context *ctx, block128_f block,
ctr128_f ctr, unsigned M, unsigned L) {
if (M < EVP_AEAD_AES_CCM_MIN_TAG_LEN || M > EVP_AEAD_AES_CCM_MAX_TAG_LEN
|| (M & 1) != 0 || L < 2 || L > 8) {
return 0;
}
if (block) {
ctx->block = block;
}
if (ctr) {
ctx->ctr = ctr;
}
ctx->M = M;
ctx->L = L;
return 1;
}
static size_t CRYPTO_ccm128_max_input(const struct ccm128_context *ctx) {
return ctx->L >= sizeof(size_t) ? SIZE_MAX
: (((size_t)1) << (ctx->L * 8)) - 1;
}
static int ccm128_init_state(const struct ccm128_context *ctx,
struct ccm128_state *state, const AES_KEY *key,
const uint8_t *nonce, size_t nonce_len,
const uint8_t *aad, size_t aad_len,
size_t plaintext_len) {
const block128_f block = ctx->block;
const uint32_t M = ctx->M;
const uint32_t L = ctx->L;
// |L| determines the expected |nonce_len| and the limit for |plaintext_len|.
if (plaintext_len > CRYPTO_ccm128_max_input(ctx) ||
nonce_len != CCM_L_TO_NONCE_LEN(L)) {
return 0;
}
// Assemble the first block for computing the MAC.
OPENSSL_memset(state, 0, sizeof(*state));
state->nonce[0] = (uint8_t)((L - 1) | ((M - 2) / 2) << 3);
if (aad_len != 0) {
state->nonce[0] |= 0x40; // Set AAD Flag
}
OPENSSL_memcpy(&state->nonce[1], nonce, nonce_len);
// Explicitly cast plaintext_len up to 64-bits so that we don't shift out of
// bounds on 32-bit machines when encoding the message length.
uint64_t plaintext_len_64 = plaintext_len;
for (uint32_t i = 0; i < L; i++) {
state->nonce[15 - i] = (uint8_t)(plaintext_len_64 >> (8 * i));
}
(*block)(state->nonce, state->cmac, key);
size_t blocks = 1;
if (aad_len != 0) {
unsigned i;
// Cast to u64 to avoid the compiler complaining about invalid shifts.
uint64_t aad_len_u64 = aad_len;
if (aad_len_u64 < 0x10000 - 0x100) {
state->cmac[0] ^= (uint8_t)(aad_len_u64 >> 8);
state->cmac[1] ^= (uint8_t)aad_len_u64;
i = 2;
} else if (aad_len_u64 <= 0xffffffff) {
state->cmac[0] ^= 0xff;
state->cmac[1] ^= 0xfe;
state->cmac[2] ^= (uint8_t)(aad_len_u64 >> 24);
state->cmac[3] ^= (uint8_t)(aad_len_u64 >> 16);
state->cmac[4] ^= (uint8_t)(aad_len_u64 >> 8);
state->cmac[5] ^= (uint8_t)aad_len_u64;
i = 6;
} else {
state->cmac[0] ^= 0xff;
state->cmac[1] ^= 0xff;
state->cmac[2] ^= (uint8_t)(aad_len_u64 >> 56);
state->cmac[3] ^= (uint8_t)(aad_len_u64 >> 48);
state->cmac[4] ^= (uint8_t)(aad_len_u64 >> 40);
state->cmac[5] ^= (uint8_t)(aad_len_u64 >> 32);
state->cmac[6] ^= (uint8_t)(aad_len_u64 >> 24);
state->cmac[7] ^= (uint8_t)(aad_len_u64 >> 16);
state->cmac[8] ^= (uint8_t)(aad_len_u64 >> 8);
state->cmac[9] ^= (uint8_t)aad_len_u64;
i = 10;
}
do {
for (; i < 16 && aad_len != 0; i++) {
state->cmac[i] ^= *aad;
aad++;
aad_len--;
}
(*block)(state->cmac, state->cmac, key);
blocks++;
i = 0;
} while (aad_len != 0);
}
// Per RFC 3610, section 2.6, the total number of block cipher operations done
// must not exceed 2^61. There are two block cipher operations remaining per
// message block, plus one block at the end to encrypt the MAC.
size_t remaining_blocks = 2 * ((plaintext_len + 15) / 16) + 1;
if (plaintext_len + 15 < plaintext_len ||
remaining_blocks + blocks < blocks ||
(uint64_t) remaining_blocks + blocks > UINT64_C(1) << 61) {
return 0;
}
// Assemble the first block for encrypting and decrypting. The bottom |L|
// bytes are replaced with a counter and all bit the encoding of |L| is
// cleared in the first byte.
state->nonce[0] &= 7;
return 1;
}
static int ccm128_encrypt(const struct ccm128_context *ctx,
struct ccm128_state *state, const AES_KEY *key,
uint8_t *out, const uint8_t *in, size_t len) {
// The counter for encryption begins at one.
for (unsigned i = 0; i < ctx->L; i++) {
state->nonce[15 - i] = 0;
}
state->nonce[15] = 1;
uint8_t partial_buf[16];
unsigned num = 0;
if (ctx->ctr != NULL) {
CRYPTO_ctr128_encrypt_ctr32(in, out, len, key, state->nonce, partial_buf,
&num, ctx->ctr);
} else {
CRYPTO_ctr128_encrypt(in, out, len, key, state->nonce, partial_buf, &num,
ctx->block);
}
return 1;
}
static int ccm128_compute_mac(const struct ccm128_context *ctx,
struct ccm128_state *state, const AES_KEY *key,
uint8_t *out_tag, size_t tag_len,
const uint8_t *in, size_t len) {
block128_f block = ctx->block;
if (tag_len != ctx->M) {
return 0;
}
// Incorporate |in| into the MAC.
while (len >= 16) {
CRYPTO_xor16(state->cmac, state->cmac, in);
(*block)(state->cmac, state->cmac, key);
in += 16;
len -= 16;
}
if (len > 0) {
for (size_t i = 0; i < len; i++) {
state->cmac[i] ^= in[i];
}
(*block)(state->cmac, state->cmac, key);
}
// Encrypt the MAC with counter zero.
for (unsigned i = 0; i < ctx->L; i++) {
state->nonce[15 - i] = 0;
}
alignas(16) uint8_t tmp[16];
(*block)(state->nonce, tmp, key);
CRYPTO_xor16(state->cmac, state->cmac, tmp);
OPENSSL_memcpy(out_tag, state->cmac, tag_len);
return 1;
}
static int CRYPTO_ccm128_encrypt(const struct ccm128_context *ctx,
const AES_KEY *key, uint8_t *out,
uint8_t *out_tag, size_t tag_len,
const uint8_t *nonce, size_t nonce_len,
const uint8_t *in, size_t len,
const uint8_t *aad, size_t aad_len) {
struct ccm128_state state;
return ccm128_init_state(ctx, &state, key, nonce, nonce_len, aad, aad_len,
len) &&
ccm128_compute_mac(ctx, &state, key, out_tag, tag_len, in, len) &&
ccm128_encrypt(ctx, &state, key, out, in, len);
}
static int CRYPTO_ccm128_decrypt(const struct ccm128_context *ctx,
const AES_KEY *key, uint8_t *out,
uint8_t *out_tag, size_t tag_len,
const uint8_t *nonce, size_t nonce_len,
const uint8_t *in, size_t len,
const uint8_t *aad, size_t aad_len) {
struct ccm128_state state;
return ccm128_init_state(ctx, &state, key, nonce, nonce_len, aad, aad_len,
len) &&
ccm128_encrypt(ctx, &state, key, out, in, len) &&
ccm128_compute_mac(ctx, &state, key, out_tag, tag_len, out, len);
}
struct aead_aes_ccm_ctx {
union {
double align;
AES_KEY ks;
} ks;
struct ccm128_context ccm;
};
OPENSSL_STATIC_ASSERT(sizeof(((EVP_AEAD_CTX *)NULL)->state) >=
sizeof(struct aead_aes_ccm_ctx),
AEAD_state_is_too_small)
OPENSSL_STATIC_ASSERT(alignof(union evp_aead_ctx_st_state) >=
alignof(struct aead_aes_ccm_ctx),
AEAD_state_has_insufficient_alignment)
static int aead_aes_ccm_init(EVP_AEAD_CTX *ctx, const uint8_t *key,
size_t key_len, size_t tag_len, unsigned M,
unsigned L) {
assert(M == EVP_AEAD_max_overhead(ctx->aead));
assert(M == EVP_AEAD_max_tag_len(ctx->aead));
assert(CCM_L_TO_NONCE_LEN(L) == EVP_AEAD_nonce_length(ctx->aead));
if (key_len != EVP_AEAD_key_length(ctx->aead)) {
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_KEY_LENGTH);
return 0; // EVP_AEAD_CTX_init should catch this.
}
if (tag_len == EVP_AEAD_DEFAULT_TAG_LENGTH) {
tag_len = M;
}
if (tag_len != M) {
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_TAG_TOO_LARGE);
return 0;
}
struct aead_aes_ccm_ctx *ccm_ctx = (struct aead_aes_ccm_ctx *)&ctx->state;
block128_f block;
ctr128_f ctr = aes_ctr_set_key(&ccm_ctx->ks.ks, NULL, &block, key, key_len);
ctx->tag_len = tag_len;
if (!CRYPTO_ccm128_init(&ccm_ctx->ccm, block, ctr, M, L)) {
OPENSSL_PUT_ERROR(CIPHER, ERR_R_INTERNAL_ERROR);
return 0;
}
return 1;
}
static void aead_aes_ccm_cleanup(EVP_AEAD_CTX *ctx) {}
static int aead_aes_ccm_seal_scatter(
const EVP_AEAD_CTX *ctx, uint8_t *out, uint8_t *out_tag,
size_t *out_tag_len, size_t max_out_tag_len, const uint8_t *nonce,
size_t nonce_len, const uint8_t *in, size_t in_len, const uint8_t *extra_in,
size_t extra_in_len, const uint8_t *ad, size_t ad_len) {
const struct aead_aes_ccm_ctx *ccm_ctx =
(struct aead_aes_ccm_ctx *)&ctx->state;
if (in_len > CRYPTO_ccm128_max_input(&ccm_ctx->ccm)) {
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_TOO_LARGE);
return 0;
}
if (max_out_tag_len < ctx->tag_len) {
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BUFFER_TOO_SMALL);
return 0;
}
if (nonce_len != EVP_AEAD_nonce_length(ctx->aead)) {
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_INVALID_NONCE_SIZE);
return 0;
}
if (!CRYPTO_ccm128_encrypt(&ccm_ctx->ccm, &ccm_ctx->ks.ks, out, out_tag,
ctx->tag_len, nonce, nonce_len, in, in_len, ad,
ad_len)) {
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_TOO_LARGE);
return 0;
}
*out_tag_len = ctx->tag_len;
AEAD_CCM_verify_service_indicator(ctx);
return 1;
}
static int aead_aes_ccm_open_gather(const EVP_AEAD_CTX *ctx, uint8_t *out,
const uint8_t *nonce, size_t nonce_len,
const uint8_t *in, size_t in_len,
const uint8_t *in_tag, size_t in_tag_len,
const uint8_t *ad, size_t ad_len) {
const struct aead_aes_ccm_ctx *ccm_ctx =
(struct aead_aes_ccm_ctx *)&ctx->state;
if (in_len > CRYPTO_ccm128_max_input(&ccm_ctx->ccm)) {
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_TOO_LARGE);
return 0;
}
if (nonce_len != EVP_AEAD_nonce_length(ctx->aead)) {
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_INVALID_NONCE_SIZE);
return 0;
}
if (in_tag_len != ctx->tag_len) {
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_DECRYPT);
return 0;
}
uint8_t tag[EVP_AEAD_AES_CCM_MAX_TAG_LEN];
assert(ctx->tag_len <= EVP_AEAD_AES_CCM_MAX_TAG_LEN);
if (!CRYPTO_ccm128_decrypt(&ccm_ctx->ccm, &ccm_ctx->ks.ks, out, tag,
ctx->tag_len, nonce, nonce_len, in, in_len, ad,
ad_len)) {
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_TOO_LARGE);
return 0;
}
if (CRYPTO_memcmp(tag, in_tag, ctx->tag_len) != 0) {
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_DECRYPT);
return 0;
}
AEAD_CCM_verify_service_indicator(ctx);
return 1;
}
static int aead_aes_ccm_bluetooth_init(EVP_AEAD_CTX *ctx, const uint8_t *key,
size_t key_len, size_t tag_len) {
return aead_aes_ccm_init(ctx, key, key_len, tag_len, 4, 2);
}
DEFINE_METHOD_FUNCTION(EVP_AEAD, EVP_aead_aes_128_ccm_bluetooth) {
memset(out, 0, sizeof(EVP_AEAD));
out->key_len = 16;
out->nonce_len = 13;
out->overhead = 4;
out->max_tag_len = 4;
out->aead_id = AEAD_AES_128_CCM_BLUETOOTH_ID;
out->seal_scatter_supports_extra_in = 0;
out->init = aead_aes_ccm_bluetooth_init;
out->cleanup = aead_aes_ccm_cleanup;
out->seal_scatter = aead_aes_ccm_seal_scatter;
out->open_gather = aead_aes_ccm_open_gather;
}
static int aead_aes_ccm_bluetooth_8_init(EVP_AEAD_CTX *ctx, const uint8_t *key,
size_t key_len, size_t tag_len) {
return aead_aes_ccm_init(ctx, key, key_len, tag_len, 8, 2);
}
DEFINE_METHOD_FUNCTION(EVP_AEAD, EVP_aead_aes_128_ccm_bluetooth_8) {
memset(out, 0, sizeof(EVP_AEAD));
out->key_len = 16;
out->nonce_len = 13;
out->overhead = 8;
out->max_tag_len = 8;
out->aead_id = AEAD_AES_128_CCM_BLUETOOTH_8_ID;
out->seal_scatter_supports_extra_in = 0;
out->init = aead_aes_ccm_bluetooth_8_init;
out->cleanup = aead_aes_ccm_cleanup;
out->seal_scatter = aead_aes_ccm_seal_scatter;
out->open_gather = aead_aes_ccm_open_gather;
}
static int aead_aes_ccm_matter_init(EVP_AEAD_CTX *ctx, const uint8_t *key,
size_t key_len, size_t tag_len) {
return aead_aes_ccm_init(ctx, key, key_len, tag_len, 16, 2);
}
DEFINE_METHOD_FUNCTION(EVP_AEAD, EVP_aead_aes_128_ccm_matter) {
memset(out, 0, sizeof(EVP_AEAD));
out->key_len = 16;
out->nonce_len = 13;
out->overhead = 16;
out->aead_id = AEAD_AES_128_CCM_MATTER_ID;
out->max_tag_len = 16;
out->init = aead_aes_ccm_matter_init;
out->cleanup = aead_aes_ccm_cleanup;
out->seal_scatter = aead_aes_ccm_seal_scatter;
out->open_gather = aead_aes_ccm_open_gather;
}
#if defined(OPENSSL_32_BIT)
#define CIPHER_AES_CCM_CTX_PADDING (4 + 8)
#else
#define CIPHER_AES_CCM_CTX_PADDING 8
#endif
// This is the same handling as EVP_AES_GCM_CTX which is also a context
// that is 16-byte aligned.
// TODO: possibly refactor the code instead of repeating it from e_aes.c
static CIPHER_AES_CCM_CTX *aes_ccm_from_cipher_ctx(EVP_CIPHER_CTX *ctx) {
OPENSSL_STATIC_ASSERT(
alignof(CIPHER_AES_CCM_CTX) <= 16,
EVP_AES_CCM_CTX_needs_more_alignment_than_this_function_provides)
// |malloc| guarantees up to 4-byte alignment on 32-bit and 8-byte alignment
// on 64-bit systems, so we need to adjust to reach 16-byte alignment.
assert(ctx->cipher->ctx_size ==
sizeof(CIPHER_AES_CCM_CTX) + CIPHER_AES_CCM_CTX_PADDING);
char *ptr = ctx->cipher_data;
#if defined(OPENSSL_32_BIT)
assert((uintptr_t)ptr % 4 == 0);
ptr += (uintptr_t)ptr & 4;
#endif
assert((uintptr_t)ptr % 8 == 0);
ptr += (uintptr_t)ptr & 8;
return (CIPHER_AES_CCM_CTX *)ptr;
}
static int cipher_aes_ccm_init(EVP_CIPHER_CTX *ctx, const uint8_t *key,
const uint8_t *iv, int enc) {
CIPHER_AES_CCM_CTX *cipher_ctx = aes_ccm_from_cipher_ctx(ctx);
if (!iv && !key) {
return 1;
}
if (key) {
block128_f block;
ctr128_f ctr = aes_ctr_set_key(&cipher_ctx->ks.ks, NULL, &block, key,
ctx->key_len);
if (!CRYPTO_ccm128_init(&cipher_ctx->ccm, block, ctr, cipher_ctx->M,
cipher_ctx->L)) {
return 0;
}
cipher_ctx->key_set = 1;
}
if (iv) {
if (!CRYPTO_ccm128_init(&cipher_ctx->ccm, NULL, NULL, cipher_ctx->M,
cipher_ctx->L)) {
return 0;
}
OPENSSL_memcpy(cipher_ctx->nonce, iv, CCM_L_TO_NONCE_LEN(cipher_ctx->L));
cipher_ctx->iv_set = 1;
}
return 1;
}
static int cipher_aes_ccm_cipher(EVP_CIPHER_CTX *ctx, uint8_t *out,
const uint8_t *in, size_t len) {
CIPHER_AES_CCM_CTX *cipher_ctx = aes_ccm_from_cipher_ctx(ctx);
CCM128_CTX *ccm_ctx = CCM_INNER_CTX(cipher_ctx);
CCM128_STATE *ccm_state = CCM_INNER_STATE(cipher_ctx);
// Implicit EVP_*Final call. CCM does all the work in EVP_*Update
// n.b. |out| is non-NULL and |in| is NULL despite being a no-op.
if (in == NULL && out != NULL) {
return 0;
}
if (!cipher_ctx->iv_set || !cipher_ctx->key_set) {
return -1;
}
if (!out) {
if (!in) {
// If |out| and |in| are both NULL, |len| is the total length of the
// message which we need to include that in the 0th block of the CBC-MAC.
cipher_ctx->message_len = len;
cipher_ctx->len_set = 1;
return len;
} else {
// If only |out| is NULL then this is the AAD.
// The message length must be set apriori.
if (!cipher_ctx->len_set && len) {
return -1;
}
// We now have everything we need to initialize the CBC-MAC state
if (ccm128_init_state(ccm_ctx, ccm_state,
&cipher_ctx->ks.ks, cipher_ctx->nonce,
CCM_L_TO_NONCE_LEN(cipher_ctx->L), in, len,
cipher_ctx->message_len)) {
cipher_ctx->ccm_set = 1;
return len;
} else {
return -1;
}
}
}
// The tag must be set before decrypting any data.
if (!EVP_CIPHER_CTX_encrypting(ctx) && !cipher_ctx->tag_set) {
return -1;
}
if (!cipher_ctx->len_set) {
return -1;
}
if (!cipher_ctx->ccm_set) {
// Initialize the ccm_state if this did not happen during the AAD update.
if (!ccm128_init_state(ccm_ctx, ccm_state, &cipher_ctx->ks.ks,
cipher_ctx->nonce, CCM_L_TO_NONCE_LEN(cipher_ctx->L),
NULL, 0, cipher_ctx->message_len)) {
return -1;
}
cipher_ctx->ccm_set = 1;
}
if (EVP_CIPHER_CTX_encrypting(ctx)) {
// Encryption path. Compute CBC-MAC on plaintext and then encrypt.
if (!ccm128_compute_mac(ccm_ctx, ccm_state, &cipher_ctx->ks.ks,
cipher_ctx->tag, cipher_ctx->M, in, len)) {
return -1;
}
if (!ccm128_encrypt(ccm_ctx, ccm_state, &cipher_ctx->ks.ks, out, in, len)) {
return -1;
}
cipher_ctx->tag_set = 1;
} else {
// Decryption path. Compute the plaintext then compute its CBC-MAC.
// n.b. The method says encrypt, but it works both ways.
if (!ccm128_encrypt(ccm_ctx, ccm_state, &cipher_ctx->ks.ks, out, in, len)) {
return -1;
}
uint8_t computed_tag[EVP_AEAD_AES_CCM_MAX_TAG_LEN] = {0};
if (!ccm128_compute_mac(ccm_ctx, ccm_state, &cipher_ctx->ks.ks,
computed_tag, cipher_ctx->M, out, len)) {
OPENSSL_cleanse(out, len);
return -1;
}
// Validate the tag and invalidate the output if it doesn't match.
if (CRYPTO_memcmp(cipher_ctx->tag, computed_tag, cipher_ctx->M)) {
OPENSSL_cleanse(out, len);
return -1;
}
cipher_ctx->iv_set = 0;
cipher_ctx->tag_set = 0;
cipher_ctx->len_set = 0;
cipher_ctx->ccm_set = 0;
}
return (int) len;
}
static int cipher_aes_ccm_ctrl_set_L(CIPHER_AES_CCM_CTX *ctx, int L) {
if (L < 2 || L > 8) {
return 0;
}
ctx->L = L;
return 1;
}
static int cipher_aes_ccm_ctrl(EVP_CIPHER_CTX *ctx, int type, int arg,
void *ptr) {
CIPHER_AES_CCM_CTX *cipher_ctx = aes_ccm_from_cipher_ctx(ctx);
switch (type) {
case EVP_CTRL_INIT:
OPENSSL_cleanse(cipher_ctx, sizeof(CIPHER_AES_CCM_CTX));
cipher_ctx->key_set = 0;
cipher_ctx->iv_set = 0;
cipher_ctx->tag_set = 0;
cipher_ctx->len_set = 0;
cipher_ctx->ccm_set = 0;
cipher_ctx->L = 8;
cipher_ctx->M = 14;
cipher_ctx->message_len = 0;
return 1;
case EVP_CTRL_GET_IVLEN:
*(int *)ptr = CCM_L_TO_NONCE_LEN(cipher_ctx->L);
return 1;
case EVP_CTRL_AEAD_SET_IVLEN:
// The nonce (IV) length is 15-L, compute L here and set it below to "set"
// the IV length.
return cipher_aes_ccm_ctrl_set_L(cipher_ctx, 15 - arg);
case EVP_CTRL_CCM_SET_L:
return cipher_aes_ccm_ctrl_set_L(cipher_ctx, arg);
case EVP_CTRL_AEAD_SET_TAG:
// |arg| is the tag length in bytes.
if ((arg & 1) || arg < EVP_AEAD_AES_CCM_MIN_TAG_LEN
|| arg > EVP_AEAD_AES_CCM_MAX_TAG_LEN) {
return 0;
}
// If encrypting, we don't expect incoming tag data
if (ctx->encrypt && ptr) {
return 0;
}
if (ptr) {
// Set the tag for validation when decrypting.
OPENSSL_memcpy(cipher_ctx->tag, ptr, arg);
cipher_ctx->tag_set = 1;
}
// Set the value of M (i.e. the tag length) when encrypting.
cipher_ctx->M = arg;
return 1;
case EVP_CTRL_AEAD_GET_TAG:
if (!ctx->encrypt || !cipher_ctx->tag_set) {
return 0;
}
if ((size_t) arg != cipher_ctx->M) {
return 0;
}
OPENSSL_memcpy(ptr, cipher_ctx->tag, cipher_ctx->M);
cipher_ctx->tag_set = 0;
cipher_ctx->iv_set = 0;
cipher_ctx->len_set = 0;
cipher_ctx->ccm_set = 0;
return 1;
case EVP_CTRL_COPY: {
EVP_CIPHER_CTX *out = ptr;
CIPHER_AES_CCM_CTX *cipher_ctx_out = aes_ccm_from_cipher_ctx(out);
// |EVP_CIPHER_CTX_copy| copies this generically, but we must redo it in
// case |out->cipher_data| and |in->cipher_data| are differently aligned.
OPENSSL_memcpy(cipher_ctx_out, cipher_ctx, sizeof(CIPHER_AES_CCM_CTX));
return 1;
}
default:
return -1;
}
}
DEFINE_METHOD_FUNCTION(EVP_CIPHER, EVP_aes_128_ccm) {
memset(out, 0, sizeof(EVP_CIPHER));
out->nid = NID_aes_128_ccm;
out->block_size = 1; // stream cipher
out->key_len = 16;
out->iv_len = 13;
out->ctx_size = sizeof(CIPHER_AES_CCM_CTX) + CIPHER_AES_CCM_CTX_PADDING;
out->flags = EVP_CIPH_CCM_MODE | EVP_CIPH_CUSTOM_IV | EVP_CIPH_CUSTOM_COPY |
EVP_CIPH_FLAG_CUSTOM_CIPHER | EVP_CIPH_ALWAYS_CALL_INIT |
EVP_CIPH_CTRL_INIT | EVP_CIPH_FLAG_AEAD_CIPHER;
out->init = cipher_aes_ccm_init;
out->cipher = cipher_aes_ccm_cipher;
out->cleanup = NULL;
out->ctrl = cipher_aes_ccm_ctrl;
}
DEFINE_METHOD_FUNCTION(EVP_CIPHER, EVP_aes_192_ccm) {
memset(out, 0, sizeof(EVP_CIPHER));
out->nid = NID_aes_192_ccm;
out->block_size = 1; // stream cipher
out->key_len = 24;
out->iv_len = 13;
out->ctx_size = sizeof(CIPHER_AES_CCM_CTX) + CIPHER_AES_CCM_CTX_PADDING;
out->flags = EVP_CIPH_CCM_MODE | EVP_CIPH_CUSTOM_IV | EVP_CIPH_CUSTOM_COPY |
EVP_CIPH_FLAG_CUSTOM_CIPHER | EVP_CIPH_ALWAYS_CALL_INIT |
EVP_CIPH_CTRL_INIT | EVP_CIPH_FLAG_AEAD_CIPHER;
out->init = cipher_aes_ccm_init;
out->cipher = cipher_aes_ccm_cipher;
out->cleanup = NULL;
out->ctrl = cipher_aes_ccm_ctrl;
}
DEFINE_METHOD_FUNCTION(EVP_CIPHER, EVP_aes_256_ccm) {
memset(out, 0, sizeof(EVP_CIPHER));
out->nid = NID_aes_256_ccm;
out->block_size = 1; // stream cipher
out->key_len = 32;
out->iv_len = 13;
out->ctx_size = sizeof(CIPHER_AES_CCM_CTX) + CIPHER_AES_CCM_CTX_PADDING;
out->flags = EVP_CIPH_CCM_MODE | EVP_CIPH_CUSTOM_IV | EVP_CIPH_CUSTOM_COPY |
EVP_CIPH_FLAG_CUSTOM_CIPHER | EVP_CIPH_ALWAYS_CALL_INIT |
EVP_CIPH_CTRL_INIT | EVP_CIPH_FLAG_AEAD_CIPHER;
out->init = cipher_aes_ccm_init;
out->cipher = cipher_aes_ccm_cipher;
out->cleanup = NULL;
out->ctrl = cipher_aes_ccm_ctrl;
}

View File

@@ -0,0 +1,182 @@
// Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com) All rights reserved.
// SPDX-License-Identifier: Apache-2.0
#ifndef OPENSSL_HEADER_CIPHER_INTERNAL_H
#define OPENSSL_HEADER_CIPHER_INTERNAL_H
#include <openssl/base.h>
#include <openssl/aead.h>
#include <openssl/aes.h>
#include <openssl/bytestring.h>
#include "../../internal.h"
#include "../modes/internal.h"
#if defined(__cplusplus)
extern "C" {
#endif
// EVP_CIPH_MODE_MASK contains the bits of |flags| that represent the mode.
#define EVP_CIPH_MODE_MASK 0x3f
// Set of EVP_AEAD->aead_id identifiers, zero is reserved as the "unknown"
// value since it is the default for a structure. Implementations of the same
// algorithms should use the same identifier. For example, machine-optimised
// assembly versions should use the same identifier as their C counterparts.
#define AEAD_UNKNOWN_ID 0
#define AEAD_AES_128_CTR_HMAC_SHA256_ID 1
#define AEAD_AES_256_CTR_HMAC_SHA256_ID 2
#define AEAD_AES_128_GCM_SIV_ID 3
#define AEAD_AES_256_GCM_SIV_ID 4
#define AEAD_CHACHA20_POLY1305_ID 5
#define AEAD_XCHACHA20_POLY1305_ID 6
#define AEAD_AES_128_CBC_SHA1_TLS_ID 7
#define AEAD_AES_128_CBC_SHA1_TLS_IMPLICIT_IV_ID 8
#define AEAD_AES_256_CBC_SHA1_TLS_ID 9
#define AEAD_AES_256_CBC_SHA1_TLS_IMPLICIT_IV_ID 10
#define AEAD_AES_128_CBC_SHA256_TLS_ID 11
#define AEAD_AES_128_CBC_SHA256_TLS_IMPLICIT_IV_ID 12
#define AEAD_DES_EDE3_CBC_SHA1_TLS_ID 13
#define AEAD_DES_EDE3_CBC_SHA1_TLS_IMPLICIT_IV_ID 14
#define AEAD_NULL_SHA1_TLS_ID 15
#define AEAD_AES_128_GCM_ID 16
#define AEAD_AES_192_GCM_ID 17
#define AEAD_AES_256_GCM_ID 18
#define AEAD_AES_128_GCM_RANDNONCE_ID 19
#define AEAD_AES_256_GCM_RANDNONCE_ID 20
#define AEAD_AES_128_GCM_TLS12_ID 21
#define AEAD_AES_256_GCM_TLS12_ID 22
#define AEAD_AES_128_GCM_TLS13_ID 23
#define AEAD_AES_256_GCM_TLS13_ID 24
#define AEAD_AES_128_CCM_BLUETOOTH_ID 25
#define AEAD_AES_128_CCM_BLUETOOTH_8_ID 26
#define AEAD_AES_128_CCM_MATTER_ID 27
#define AEAD_AES_256_CBC_SHA384_TLS_ID 28
#define AEAD_MAX_ID 28
// EVP_AEAD represents a specific AEAD algorithm.
struct evp_aead_st {
uint8_t key_len;
uint8_t nonce_len;
uint8_t overhead;
uint8_t max_tag_len;
uint16_t aead_id;
int seal_scatter_supports_extra_in;
// init initialises an |EVP_AEAD_CTX|. If this call returns zero then
// |cleanup| will not be called for that context.
int (*init)(EVP_AEAD_CTX *, const uint8_t *key, size_t key_len,
size_t tag_len);
int (*init_with_direction)(EVP_AEAD_CTX *, const uint8_t *key, size_t key_len,
size_t tag_len, enum evp_aead_direction_t dir);
void (*cleanup)(EVP_AEAD_CTX *);
int (*open)(const EVP_AEAD_CTX *ctx, uint8_t *out, size_t *out_len,
size_t max_out_len, const uint8_t *nonce, size_t nonce_len,
const uint8_t *in, size_t in_len, const uint8_t *ad,
size_t ad_len);
int (*seal_scatter)(const EVP_AEAD_CTX *ctx, uint8_t *out, uint8_t *out_tag,
size_t *out_tag_len, size_t max_out_tag_len,
const uint8_t *nonce, size_t nonce_len, const uint8_t *in,
size_t in_len, const uint8_t *extra_in,
size_t extra_in_len, const uint8_t *ad, size_t ad_len);
int (*open_gather)(const EVP_AEAD_CTX *ctx, uint8_t *out,
const uint8_t *nonce, size_t nonce_len, const uint8_t *in,
size_t in_len, const uint8_t *in_tag, size_t in_tag_len,
const uint8_t *ad, size_t ad_len);
int (*get_iv)(const EVP_AEAD_CTX *ctx, const uint8_t **out_iv,
size_t *out_len);
size_t (*tag_len)(const EVP_AEAD_CTX *ctx, size_t in_Len,
size_t extra_in_len);
int (*serialize_state)(const EVP_AEAD_CTX *ctx, CBB *cbb);
int (*deserialize_state)(const EVP_AEAD_CTX *ctx, CBS *cbs);
};
struct evp_cipher_st {
// type contains a NID identifying the cipher. (e.g. NID_aes_128_gcm.)
int nid;
// block_size contains the block size, in bytes, of the cipher, or 1 for a
// stream cipher.
unsigned block_size;
// key_len contains the key size, in bytes, for the cipher. If the cipher
// takes a variable key size then this contains the default size.
unsigned key_len;
// iv_len contains the IV size, in bytes, or zero if inapplicable.
unsigned iv_len;
// ctx_size contains the size, in bytes, of the per-key context for this
// cipher.
unsigned ctx_size;
// flags contains the OR of a number of flags. See |EVP_CIPH_*|.
uint32_t flags;
int (*init)(EVP_CIPHER_CTX *ctx, const uint8_t *key, const uint8_t *iv,
int enc);
int (*cipher)(EVP_CIPHER_CTX *ctx, uint8_t *out, const uint8_t *in,
size_t inl);
// cleanup, if non-NULL, releases memory associated with the context. It is
// called if |EVP_CTRL_INIT| succeeds. Note that |init| may not have been
// called at this point.
void (*cleanup)(EVP_CIPHER_CTX *);
int (*ctrl)(EVP_CIPHER_CTX *, int type, int arg, void *ptr);
};
// aes_ctr_set_key initialises |*aes_key| using |key_bytes| bytes from |key|,
// where |key_bytes| must either be 16, 24 or 32. If not NULL, |*out_block| is
// set to a function that encrypts single blocks. If not NULL, |*gcm_key| is
// initialised to do GHASH with the given key. It returns a function for
// optimised CTR-mode, or NULL if CTR-mode should be built using |*out_block|.
ctr128_f aes_ctr_set_key(AES_KEY *aes_key, GCM128_KEY *gcm_key,
block128_f *out_block, const uint8_t *key,
size_t key_bytes);
// EXPERIMENTAL functions for use in the TLS Transfer function. See
// |SSL_to_bytes| for more details.
// EVP_AEAD_CTX_serialize_state serializes the state of |ctx|,
// and writes it to |cbb|. The serialized bytes contains only the subset of data
// necessary to restore the state of an |EVP_AEAD_CTX| after initializing a new
// instance using |EVP_AEAD_CTX_init|. Function returns 1 on success or zero for
// an error.
//
// EvpAeadCtxStateSerializationVersion ::= INTEGER {v1 (1)}
//
// EvpAeadCtxState ::= SEQUENCE {
// serializationVersion EvpAeadCtxStateSerializationVersion,
// evpAeadCipherIdentifier INTEGER,
// state OCTET STRING
// }
OPENSSL_EXPORT int EVP_AEAD_CTX_serialize_state(const EVP_AEAD_CTX *ctx,
CBB *cbb);
// EVP_AEAD_CTX_deserialize_state deserializes the state
// contained in |cbs|, configures the |ctx| to match. The deserialized bytes
// contains only the subset of data necessary to restore the state of an
// |EVP_AEAD_CTX| after initializing a new instance using |EVP_AEAD_CTX_init|.
// The function returns 1 on success or zero for an error.
OPENSSL_EXPORT int EVP_AEAD_CTX_deserialize_state(const EVP_AEAD_CTX *ctx,
CBS *cbs);
OPENSSL_EXPORT uint16_t EVP_AEAD_CTX_get_aead_id(const EVP_AEAD_CTX *ctx);
#if defined(__cplusplus)
} // extern C
#endif
#endif // OPENSSL_HEADER_CIPHER_INTERNAL_H