chore: checkpoint before Python removal

This commit is contained in:
2026-03-26 22:33:59 +00:00
parent 683cec9307
commit e568ddf82a
29972 changed files with 11269302 additions and 2 deletions

View File

@@ -0,0 +1,64 @@
#!/usr/bin/env perl
# Copyright (c) 2015, Google Inc.
# SPDX-License-Identifier: ISC
use strict;
# The first two arguments should always be the flavour and output file path.
if ($#ARGV < 1) { die "Not enough arguments provided.
Two arguments are necessary: the flavour and the output file path."; }
my $flavour = shift;
my $output = shift;
my $win64 = 0;
$win64 = 1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
$0 =~ m/(.*[\/\\])[^\/\\]+$/;
my $dir = $1;
my $xlate;
( $xlate="${dir}../../../perlasm/x86_64-xlate.pl" and -f $xlate) or
die "can't locate x86_64-xlate.pl";
open OUT,"| \"$^X\" \"$xlate\" $flavour \"$output\"";
*STDOUT=*OUT;
my ($out, $len, $tmp1, $tmp2) = $win64 ? ("%rcx", "%rdx", "%r8", "%r9")
: ("%rdi", "%rsi", "%rdx", "%rcx");
print<<___;
.text
# int CRYPTO_rdrand_multiple8(uint8_t *buf, size_t len);
.globl CRYPTO_rdrand_multiple8
.type CRYPTO_rdrand_multiple8,\@abi-omnipotent
.align 16
CRYPTO_rdrand_multiple8:
.cfi_startproc
_CET_ENDBR
test $len, $len
jz .Lout
movq \$8, $tmp1
.Lloop:
rdrand $tmp2
jnc .Lerr_multiple
test $tmp2, $tmp2 # OLD cpu's: can use all 0s in output as error signal
jz .Lerr_multiple
cmp \$-1, $tmp2 # AMD bug: check if all returned bits by RDRAND is stuck on 1
je .Lerr_multiple
movq $tmp2, 0($out)
addq $tmp1, $out
subq $tmp1, $len
jnz .Lloop
.Lout:
movq \$1, %rax
retq
.Lerr_multiple:
xorq %rax, %rax
retq
.cfi_endproc
.size CRYPTO_rdrand_multiple8,.-CRYPTO_rdrand_multiple8
___
close STDOUT or die "error closing STDOUT: $!"; # flush

View File

@@ -0,0 +1,60 @@
#! /usr/bin/env perl
# Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0 OR ISC
# RNDR from ARMv8.5-A.
# System register encoding: s3_3_c2_c4_0.
# see https://developer.arm.com/documentation/ddi0601/2024-09/AArch64-Registers/RNDR--Random-Number
# The first two arguments should always be the flavour and output file path.
if ($#ARGV < 1) { die "Not enough arguments provided.
Two arguments are necessary: the flavour and the output file path."; }
my $flavour = shift;
my $output = shift;
$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
( $xlate="${dir}arm-xlate.pl" and -f $xlate ) or
( $xlate="${dir}../../../perlasm/arm-xlate.pl" and -f $xlate) or
die "can't locate arm-xlate.pl";
open OUT,qq{| "$^X" "$xlate" "$flavour" "$output"};
*STDOUT=*OUT;
my ($out, $len, $rndr64) = ("x0", "x1", "x2");
$code.=<<___;
#include <openssl/arm_arch.h>
.arch armv8-a
.text
# int CRYPTO_rndr_multiple8(uint8_t *out, const size_t len)
.globl CRYPTO_rndr_multiple8
.type CRYPTO_rndr_multiple8,%function
.align 4
CRYPTO_rndr_multiple8:
cbz $len, .Lrndr_multiple8_error // len = 0 is not supported
.Lrndr_multiple8_loop:
mrs $rndr64, s3_3_c2_c4_0 // rndr instruction https://developer.arm.com/documentation/ddi0601/2024-09/Index-by-Encoding
cbz $rndr64, .Lrndr_multiple8_error // Check if rndr failed
str $rndr64, [$out], #8 // Copy 8 bytes to *out and increment pointer by 8
sub $len, $len, #8
cbz $len, .Lrndr_multiple8_done // If multiple of 8 this will be 0 eventually
b .Lrndr_multiple8_loop
.Lrndr_multiple8_done:
mov x0, #1 // Return value success
ret
.Lrndr_multiple8_error:
mov x0, #0 // Return value error
ret
.size CRYPTO_rndr_multiple8,.-CRYPTO_rndr_multiple8
___
print $code;
close STDOUT or die "error closing STDOUT: $!"; # enforce flush

View File

@@ -0,0 +1,69 @@
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC
#if !defined(DISABLE_CPU_JITTER_ENTROPY)
#include <gtest/gtest.h>
#include "../../test/test_util.h"
#include "../../../third_party/jitterentropy/jitterentropy-library/jitterentropy.h"
// Struct for Jitter entropy collector instance with constructor/desctructor.
struct JitterEC {
rand_data *instance;
JitterEC(unsigned int osr, unsigned int flags) {
instance = nullptr;
instance = jent_entropy_collector_alloc(osr, flags);
}
~JitterEC() {
jent_entropy_collector_free(instance);
instance = nullptr;
}
};
TEST(CPUJitterEntropyTest, Basic) {
// Allocate Jitter instance with default oversampling rate.
JitterEC jitter_ec(0, JENT_FORCE_FIPS);
// Check that the instance is properly allocated and initialized.
EXPECT_NE(jitter_ec.instance, nullptr);
// Check that the default oversampling rate is 3 as expected.
unsigned int default_osr = 3;
EXPECT_EQ(jitter_ec.instance->osr, default_osr);
const ssize_t data_len = 48;
uint8_t data0[data_len], data1[data_len];
// Draw some entropy to check if it works.
EXPECT_EQ(jent_read_entropy(jitter_ec.instance,
(char*) data0, data_len), data_len);
EXPECT_EQ(jent_read_entropy(jitter_ec.instance,
(char*) data1, data_len), data_len);
// Basic check that the random data is not equal.
EXPECT_NE(Bytes(data0), Bytes(data1));
// Free Jitter instance and initialize a new one with different osr.
jent_entropy_collector_free(jitter_ec.instance);
unsigned int osr = 5;
jitter_ec.instance = jent_entropy_collector_alloc(osr, JENT_FORCE_FIPS);
EXPECT_NE(jitter_ec.instance, nullptr);
EXPECT_EQ(jitter_ec.instance->osr, osr);
// Test drawing entropy from the Jitter object that was reset.
EXPECT_EQ(jent_read_entropy(jitter_ec.instance,
(char*) data0, data_len), data_len);
EXPECT_EQ(jent_read_entropy(jitter_ec.instance,
(char*) data1, data_len), data_len);
// Verify that the Jitter Entropy library version is as expected.
unsigned int jitter_version = 3060300;
EXPECT_EQ(jitter_version, jent_version());
}
#endif // !defined(DISABLE_CPU_JITTER_ENTROPY)

View File

@@ -0,0 +1,226 @@
// Copyright (c) 2017, Google Inc.
// SPDX-License-Identifier: ISC
#include <openssl/ctrdrbg.h>
#include <openssl/type_check.h>
#include <openssl/mem.h>
#include "internal.h"
#include "../cipher/internal.h"
// Section references in this file refer to SP 800-90Ar1:
// http://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.800-90Ar1.pdf
// See table 3.
static const uint64_t kMaxReseedCount = UINT64_C(1) << 48;
CTR_DRBG_STATE *CTR_DRBG_new(const uint8_t entropy[CTR_DRBG_ENTROPY_LEN],
const uint8_t *personalization,
size_t personalization_len) {
SET_DIT_AUTO_RESET;
CTR_DRBG_STATE *drbg = OPENSSL_malloc(sizeof(CTR_DRBG_STATE));
if (drbg == NULL ||
!CTR_DRBG_init(drbg, entropy, personalization, personalization_len)) {
CTR_DRBG_free(drbg);
return NULL;
}
return drbg;
}
void CTR_DRBG_free(CTR_DRBG_STATE *state) {
SET_DIT_AUTO_RESET;
OPENSSL_free(state);
}
int CTR_DRBG_init(CTR_DRBG_STATE *drbg,
const uint8_t entropy[CTR_DRBG_ENTROPY_LEN],
const uint8_t *personalization, size_t personalization_len) {
SET_DIT_AUTO_RESET;
if (buffers_alias(entropy, CTR_DRBG_ENTROPY_LEN,
personalization, personalization_len)) {
return 0;
}
// Section 10.2.1.3.1
if (personalization_len > CTR_DRBG_ENTROPY_LEN) {
return 0;
}
uint8_t seed_material[CTR_DRBG_ENTROPY_LEN];
OPENSSL_memcpy(seed_material, entropy, CTR_DRBG_ENTROPY_LEN);
for (size_t i = 0; i < personalization_len; i++) {
seed_material[i] ^= personalization[i];
}
// Section 10.2.1.2
// kInitMask is the result of encrypting blocks with big-endian value 1, 2
// and 3 with the all-zero AES-256 key.
static const uint8_t kInitMask[CTR_DRBG_ENTROPY_LEN] = {
0x53, 0x0f, 0x8a, 0xfb, 0xc7, 0x45, 0x36, 0xb9, 0xa9, 0x63, 0xb4, 0xf1,
0xc4, 0xcb, 0x73, 0x8b, 0xce, 0xa7, 0x40, 0x3d, 0x4d, 0x60, 0x6b, 0x6e,
0x07, 0x4e, 0xc5, 0xd3, 0xba, 0xf3, 0x9d, 0x18, 0x72, 0x60, 0x03, 0xca,
0x37, 0xa6, 0x2a, 0x74, 0xd1, 0xa2, 0xf5, 0x8e, 0x75, 0x06, 0x35, 0x8e,
};
for (size_t i = 0; i < sizeof(kInitMask); i++) {
seed_material[i] ^= kInitMask[i];
}
drbg->ctr = aes_ctr_set_key(&drbg->ks, NULL, &drbg->block, seed_material, 32);
OPENSSL_memcpy(drbg->counter, seed_material + 32, 16);
drbg->reseed_counter = 1;
return 1;
}
OPENSSL_STATIC_ASSERT(CTR_DRBG_ENTROPY_LEN % AES_BLOCK_SIZE == 0,
not_a_multiple_of_AES_block_size)
// ctr_inc adds |n| to the last four bytes of |drbg->counter|, treated as a
// big-endian number.
static void ctr32_add(CTR_DRBG_STATE *drbg, uint32_t n) {
uint32_t ctr = CRYPTO_load_u32_be(drbg->counter + 12);
CRYPTO_store_u32_be(drbg->counter + 12, ctr + n);
}
static int ctr_drbg_update(CTR_DRBG_STATE *drbg, const uint8_t *data,
size_t data_len) {
// Per section 10.2.1.2, |data_len| must be |CTR_DRBG_ENTROPY_LEN|. Here, we
// allow shorter inputs and right-pad them with zeros. This is equivalent to
// the specified algorithm but saves a copy in |CTR_DRBG_generate|.
if (data_len > CTR_DRBG_ENTROPY_LEN) {
return 0;
}
uint8_t temp[CTR_DRBG_ENTROPY_LEN];
for (size_t i = 0; i < CTR_DRBG_ENTROPY_LEN; i += AES_BLOCK_SIZE) {
ctr32_add(drbg, 1);
drbg->block(drbg->counter, temp + i, &drbg->ks);
}
for (size_t i = 0; i < data_len; i++) {
temp[i] ^= data[i];
}
drbg->ctr = aes_ctr_set_key(&drbg->ks, NULL, &drbg->block, temp, 32);
OPENSSL_memcpy(drbg->counter, temp + 32, 16);
return 1;
}
int CTR_DRBG_reseed(CTR_DRBG_STATE *drbg,
const uint8_t entropy[CTR_DRBG_ENTROPY_LEN],
const uint8_t *additional_data,
size_t additional_data_len) {
SET_DIT_AUTO_RESET;
if (buffers_alias(entropy, CTR_DRBG_ENTROPY_LEN,
additional_data, additional_data_len)) {
return 0;
}
// Section 10.2.1.4
uint8_t entropy_copy[CTR_DRBG_ENTROPY_LEN];
if (additional_data_len > 0) {
if (additional_data_len > CTR_DRBG_ENTROPY_LEN) {
return 0;
}
OPENSSL_memcpy(entropy_copy, entropy, CTR_DRBG_ENTROPY_LEN);
for (size_t i = 0; i < additional_data_len; i++) {
entropy_copy[i] ^= additional_data[i];
}
entropy = entropy_copy;
}
if (!ctr_drbg_update(drbg, entropy, CTR_DRBG_ENTROPY_LEN)) {
return 0;
}
drbg->reseed_counter = 1;
return 1;
}
int CTR_DRBG_generate(CTR_DRBG_STATE *drbg, uint8_t *out, size_t out_len,
const uint8_t *additional_data,
size_t additional_data_len) {
SET_DIT_AUTO_RESET;
// See 9.3.1
if (out_len > CTR_DRBG_MAX_GENERATE_LENGTH) {
return 0;
}
// See 10.2.1.5.1
if (drbg->reseed_counter > kMaxReseedCount) {
return 0;
}
if (additional_data_len != 0 &&
!ctr_drbg_update(drbg, additional_data, additional_data_len)) {
return 0;
}
// kChunkSize is used to interact better with the cache. Since the AES-CTR
// code assumes that it's encrypting rather than just writing keystream, the
// buffer has to be zeroed first. Without chunking, large reads would zero
// the whole buffer, flushing the L1 cache, and then do another pass (missing
// the cache every time) to “encrypt” it. The code can avoid this by
// chunking.
static const size_t kChunkSize = 8 * 1024;
while (out_len >= AES_BLOCK_SIZE) {
size_t todo = kChunkSize;
if (todo > out_len) {
todo = out_len;
}
todo &= ~(AES_BLOCK_SIZE-1);
const size_t num_blocks = todo / AES_BLOCK_SIZE;
if (drbg->ctr) {
OPENSSL_memset(out, 0, todo);
ctr32_add(drbg, 1);
drbg->ctr(out, out, num_blocks, &drbg->ks, drbg->counter);
ctr32_add(drbg, (uint32_t)(num_blocks - 1));
} else {
for (size_t i = 0; i < todo; i += AES_BLOCK_SIZE) {
ctr32_add(drbg, 1);
drbg->block(drbg->counter, out + i, &drbg->ks);
}
}
out += todo;
out_len -= todo;
}
if (out_len > 0) {
uint8_t block[AES_BLOCK_SIZE];
ctr32_add(drbg, 1);
drbg->block(drbg->counter, block, &drbg->ks);
OPENSSL_memcpy(out, block, out_len);
}
// Right-padding |additional_data| in step 2.2 is handled implicitly by
// |ctr_drbg_update|, to save a copy.
if (!ctr_drbg_update(drbg, additional_data, additional_data_len)) {
return 0;
}
drbg->reseed_counter++;
FIPS_service_indicator_update_state();
return 1;
}
void CTR_DRBG_clear(CTR_DRBG_STATE *drbg) {
OPENSSL_cleanse(drbg, sizeof(CTR_DRBG_STATE));
}

View File

@@ -0,0 +1,140 @@
// Copyright (c) 2017, Google Inc.
// SPDX-License-Identifier: ISC
#include <gtest/gtest.h>
#include <openssl/ctrdrbg.h>
#include <openssl/sha.h>
#include "internal.h"
#include "../../test/file_test.h"
#include "../../test/test_util.h"
TEST(CTRDRBGTest, Basic) {
const uint8_t kSeed[CTR_DRBG_ENTROPY_LEN] = {
0xe4, 0xbc, 0x23, 0xc5, 0x08, 0x9a, 0x19, 0xd8, 0x6f, 0x41, 0x19, 0xcb,
0x3f, 0xa0, 0x8c, 0x0a, 0x49, 0x91, 0xe0, 0xa1, 0xde, 0xf1, 0x7e, 0x10,
0x1e, 0x4c, 0x14, 0xd9, 0xc3, 0x23, 0x46, 0x0a, 0x7c, 0x2f, 0xb5, 0x8e,
0x0b, 0x08, 0x6c, 0x6c, 0x57, 0xb5, 0x5f, 0x56, 0xca, 0xe2, 0x5b, 0xad,
};
CTR_DRBG_STATE drbg;
ASSERT_TRUE(CTR_DRBG_init(&drbg, kSeed, nullptr, 0));
// This is an implicit assumption in random places in the code and is not
// always documented. Discovery is hard, so explicitly assert it's true.
ASSERT_EQ(drbg.reseed_counter, (uint64_t) 1);
const uint8_t kReseed[CTR_DRBG_ENTROPY_LEN] = {
0xfd, 0x85, 0xa8, 0x36, 0xbb, 0xa8, 0x50, 0x19, 0x88, 0x1e, 0x8c, 0x6b,
0xad, 0x23, 0xc9, 0x06, 0x1a, 0xdc, 0x75, 0x47, 0x76, 0x59, 0xac, 0xae,
0xa8, 0xe4, 0xa0, 0x1d, 0xfe, 0x07, 0xa1, 0x83, 0x2d, 0xad, 0x1c, 0x13,
0x6f, 0x59, 0xd7, 0x0f, 0x86, 0x53, 0xa5, 0xdc, 0x11, 0x86, 0x63, 0xd6,
};
ASSERT_TRUE(CTR_DRBG_reseed(&drbg, kReseed, nullptr, 0));
uint8_t out[64];
ASSERT_TRUE(CTR_DRBG_generate(&drbg, out, sizeof(out), nullptr, 0));
ASSERT_TRUE(CTR_DRBG_generate(&drbg, out, sizeof(out), nullptr, 0));
const uint8_t kExpected[64] = {
0xb2, 0xcb, 0x89, 0x05, 0xc0, 0x5e, 0x59, 0x50, 0xca, 0x31, 0x89,
0x50, 0x96, 0xbe, 0x29, 0xea, 0x3d, 0x5a, 0x3b, 0x82, 0xb2, 0x69,
0x49, 0x55, 0x54, 0xeb, 0x80, 0xfe, 0x07, 0xde, 0x43, 0xe1, 0x93,
0xb9, 0xe7, 0xc3, 0xec, 0xe7, 0x3b, 0x80, 0xe0, 0x62, 0xb1, 0xc1,
0xf6, 0x82, 0x02, 0xfb, 0xb1, 0xc5, 0x2a, 0x04, 0x0e, 0xa2, 0x47,
0x88, 0x64, 0x29, 0x52, 0x82, 0x23, 0x4a, 0xaa, 0xda,
};
EXPECT_EQ(Bytes(kExpected), Bytes(out));
CTR_DRBG_clear(&drbg);
}
TEST(CTRDRBGTest, Allocated) {
const uint8_t kSeed[CTR_DRBG_ENTROPY_LEN] = {0};
bssl::UniquePtr<CTR_DRBG_STATE> allocated(CTR_DRBG_new(kSeed, nullptr, 0));
ASSERT_TRUE(allocated);
allocated.reset(CTR_DRBG_new(kSeed, nullptr, 1<<20));
ASSERT_FALSE(allocated);
}
TEST(CTRDRBGTest, Large) {
const uint8_t kSeed[CTR_DRBG_ENTROPY_LEN] = {0};
CTR_DRBG_STATE drbg;
ASSERT_TRUE(CTR_DRBG_init(&drbg, kSeed, nullptr, 0));
std::unique_ptr<uint8_t[]> buf(new uint8_t[CTR_DRBG_MAX_GENERATE_LENGTH]);
ASSERT_TRUE(CTR_DRBG_generate(&drbg, buf.get(), CTR_DRBG_MAX_GENERATE_LENGTH,
nullptr, 0));
uint8_t digest[SHA256_DIGEST_LENGTH];
SHA256(buf.get(), CTR_DRBG_MAX_GENERATE_LENGTH, digest);
const uint8_t kExpected[SHA256_DIGEST_LENGTH] = {
0x69, 0x78, 0x15, 0x96, 0xca, 0xc0, 0x3f, 0x6a, 0x6d, 0xed, 0x22,
0x1e, 0x26, 0xd0, 0x75, 0x49, 0xa0, 0x4b, 0x91, 0x58, 0x3c, 0xf4,
0xe3, 0x6d, 0xff, 0x41, 0xbf, 0xb9, 0xf8, 0xa8, 0x1c, 0x2b,
};
EXPECT_EQ(Bytes(kExpected), Bytes(digest));
CTR_DRBG_clear(&drbg);
}
TEST(CTRDRBGTest, TestVectors) {
FileTestGTest("crypto/fipsmodule/rand/ctrdrbg_vectors.txt", [](FileTest *t) {
std::vector<uint8_t> seed, personalisation, reseed, ai_reseed, ai1, ai2,
expected;
ASSERT_TRUE(t->GetBytes(&seed, "EntropyInput"));
ASSERT_TRUE(t->GetBytes(&personalisation, "PersonalizationString"));
ASSERT_TRUE(t->GetBytes(&reseed, "EntropyInputReseed"));
ASSERT_TRUE(t->GetBytes(&ai_reseed, "AdditionalInputReseed"));
ASSERT_TRUE(t->GetBytes(&ai1, "AdditionalInput1"));
ASSERT_TRUE(t->GetBytes(&ai2, "AdditionalInput2"));
ASSERT_TRUE(t->GetBytes(&expected, "ReturnedBits"));
ASSERT_EQ(static_cast<size_t>(CTR_DRBG_ENTROPY_LEN), seed.size());
ASSERT_EQ(static_cast<size_t>(CTR_DRBG_ENTROPY_LEN), reseed.size());
CTR_DRBG_STATE drbg;
CTR_DRBG_init(&drbg, seed.data(),
personalisation.empty() ? nullptr : personalisation.data(),
personalisation.size());
CTR_DRBG_reseed(&drbg, reseed.data(),
ai_reseed.empty() ? nullptr : ai_reseed.data(),
ai_reseed.size());
std::vector<uint8_t> out;
out.resize(expected.size());
CTR_DRBG_generate(&drbg, out.data(), out.size(),
ai1.empty() ? nullptr : ai1.data(), ai1.size());
CTR_DRBG_generate(&drbg, out.data(), out.size(),
ai2.empty() ? nullptr : ai2.data(), ai2.size());
EXPECT_EQ(Bytes(expected), Bytes(out));
});
}
TEST(CTRDRBGTest, NoAlias) {
const uint8_t kSeed[CTR_DRBG_ENTROPY_LEN] = {0};
const uint8_t *kAliasEqual = kSeed;
const uint8_t kSeedOversized[CTR_DRBG_ENTROPY_LEN+10] = {0};
const uint8_t *kAliasOverlapping = &kSeedOversized[10];
CTR_DRBG_STATE drbg;
ASSERT_FALSE(CTR_DRBG_init(&drbg, kSeed, kAliasEqual, CTR_DRBG_ENTROPY_LEN));
ASSERT_FALSE(CTR_DRBG_init(&drbg, kSeedOversized, kAliasOverlapping, CTR_DRBG_ENTROPY_LEN));
ASSERT_TRUE(CTR_DRBG_init(&drbg, kSeed, nullptr, 0));
ASSERT_FALSE(CTR_DRBG_reseed(&drbg, kSeed, kAliasEqual, CTR_DRBG_ENTROPY_LEN));
ASSERT_FALSE(CTR_DRBG_reseed(&drbg, kSeedOversized, kAliasOverlapping, CTR_DRBG_ENTROPY_LEN));
CTR_DRBG_clear(&drbg);
}

View File

@@ -0,0 +1,100 @@
// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC
#include <gtest/gtest.h>
#include <openssl/crypto.h>
#include "internal.h"
#include "../../../ube/vm_ube_detect.h"
#define MAX_MULTIPLE_FROM_RNG (16)
// In the future this test can be improved by being able to predict whether the
// test is running on hardware that we expect to support RNDR. This will require
// amending the CI with such information.
// For now, simply ensure we exercise all code-paths in the hw rng
// implementations.
TEST(EntropySourceHw, Aarch64) {
uint8_t buf[MAX_MULTIPLE_FROM_RNG*8] = { 0 } ;
#if !defined(OPENSSL_AARCH64) || defined(OPENSSL_NO_ASM)
ASSERT_FALSE(have_hw_rng_aarch64_for_testing());
ASSERT_FALSE(rndr_multiple8(buf, 0));
ASSERT_FALSE(rndr_multiple8(buf, 8));
#else
if (have_hw_rng_aarch64_for_testing() != 1) {
GTEST_SKIP() << "Compiled for Arm64, but Aarch64 hw rng is not available in run-time";
}
// Extracting 0 bytes is never supported.
ASSERT_FALSE(rndr_multiple8(buf, 0));
// Multiples of 8 allowed.
for (size_t i = 8; i < MAX_MULTIPLE_FROM_RNG; i += 8) {
ASSERT_TRUE(rndr_multiple8(buf, i));
}
// Must be multiples of 8.
for (size_t i : {1, 2, 3, 4, 5, 6, 7, 9, 10, 11, 12, 13, 14, 15}) {
ASSERT_FALSE(rndr_multiple8(buf, i));
}
#endif
}
TEST(EntropySourceHw, x86_64) {
uint8_t buf[MAX_MULTIPLE_FROM_RNG*8] = { 0 } ;
#if !defined(OPENSSL_X86_64) || defined(OPENSSL_NO_ASM)
ASSERT_FALSE(have_hw_rng_x86_64_for_testing());
ASSERT_FALSE(rdrand_multiple8(buf, 0));
ASSERT_FALSE(rdrand_multiple8(buf, 8));
#else
if (have_hw_rng_x86_64_for_testing() != 1) {
GTEST_SKIP() << "Compiled for x86_64, but x86_64 hw rng is not available in run-time";
}
// Extracting 0 bytes is never supported.
ASSERT_FALSE(rdrand_multiple8(buf, 0));
// Multiples of 8 allowed.
for (size_t i = 8; i < MAX_MULTIPLE_FROM_RNG; i += 8) {
ASSERT_TRUE(rdrand_multiple8(buf, i));
}
// Must be multiples of 8.
for (size_t i : {1, 2, 3, 4, 5, 6, 7, 9, 10, 11, 12, 13, 14, 15}) {
ASSERT_FALSE(rdrand_multiple8(buf, i));
}
#endif
}
TEST(EntropySources, Configuration) {
uint8_t buf[1];
ASSERT_TRUE(RAND_bytes(buf, sizeof(buf)));
// VM UBE detection is only defined for Linux. So, only strongly assert on
// that kernel.
#if defined(AWSLC_VM_UBE_TESTING) && defined(OPENSSL_LINUX)
EXPECT_EQ(OPT_OUT_CPU_JITTER_ENTROPY_SOURCE, get_entropy_source_method_id_FOR_TESTING());
// If entropy build configuration choose to explicitly opt-out of CPU Jitter
// Entropy
#elif defined(DISABLE_CPU_JITTER_ENTROPY)
EXPECT_EQ(OPT_OUT_CPU_JITTER_ENTROPY_SOURCE, get_entropy_source_method_id_FOR_TESTING());
#else
int expected_entropy_source_id = TREE_DRBG_JITTER_ENTROPY_SOURCE;
if (CRYPTO_get_vm_ube_supported()) {
expected_entropy_source_id = OPT_OUT_CPU_JITTER_ENTROPY_SOURCE;
}
EXPECT_EQ(expected_entropy_source_id, get_entropy_source_method_id_FOR_TESTING());
// For FIPS build we can strongly assert.
if (FIPS_mode() == 1 && CRYPTO_get_vm_ube_supported() != 1) {
EXPECT_NE(OPT_OUT_CPU_JITTER_ENTROPY_SOURCE, get_entropy_source_method_id_FOR_TESTING());
}
#endif
}

View File

@@ -0,0 +1,218 @@
// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC
#include <openssl/base.h>
#include <openssl/target.h>
#include "internal.h"
#include "../internal.h"
#include "../../delocate.h"
#include "../../../rand_extra/internal.h"
#include "../../../ube/vm_ube_detect.h"
DEFINE_BSS_GET(const struct entropy_source_methods *, entropy_source_methods_override)
DEFINE_BSS_GET(int, allow_entropy_source_methods_override)
DEFINE_STATIC_MUTEX(global_entropy_source_lock)
static int entropy_cpu_get_entropy(uint8_t *entropy, size_t entropy_len) {
#if defined(OPENSSL_X86_64)
if (rdrand_multiple8(entropy, entropy_len) == 1) {
return 1;
}
#elif defined(OPENSSL_AARCH64)
if (rndr_multiple8(entropy, entropy_len) == 1) {
return 1;
}
#endif
return 0;
}
static int entropy_cpu_get_prediction_resistance(
const struct entropy_source_t *entropy_source,
uint8_t pred_resistance[RAND_PRED_RESISTANCE_LEN]) {
return entropy_cpu_get_entropy(pred_resistance, RAND_PRED_RESISTANCE_LEN);
}
static int entropy_cpu_get_extra_entropy(
const struct entropy_source_t *entropy_source,
uint8_t extra_entropy[CTR_DRBG_ENTROPY_LEN]) {
return entropy_cpu_get_entropy(extra_entropy, CTR_DRBG_ENTROPY_LEN);
}
static int entropy_os_get_extra_entropy(
const struct entropy_source_t *entropy_source,
uint8_t extra_entropy[CTR_DRBG_ENTROPY_LEN]) {
CRYPTO_sysrand(extra_entropy, CTR_DRBG_ENTROPY_LEN);
return 1;
}
// Tree-DRBG entropy source configuration.
// - Tree DRBG with Jitter Entropy as root for seeding.
// - OS as personalization string source.
// - If run-time is on an x86_64 or Arm64 CPU and it supports rdrand
// or rndr respectively, use it as a source for prediction resistance.
// Otherwise, no source.
DEFINE_LOCAL_DATA(struct entropy_source_methods, tree_jitter_entropy_source_methods) {
out->initialize = tree_jitter_initialize;
out->zeroize_thread = tree_jitter_zeroize_thread_drbg;
out->free_thread = tree_jitter_free_thread_drbg;
out->get_seed = tree_jitter_get_seed;
out->get_extra_entropy = entropy_os_get_extra_entropy;
if (have_hw_rng_x86_64() == 1 ||
have_hw_rng_aarch64() == 1) {
out->get_prediction_resistance = entropy_cpu_get_prediction_resistance;
} else {
out->get_prediction_resistance = NULL;
}
out->id = TREE_DRBG_JITTER_ENTROPY_SOURCE;
}
static int opt_out_cpu_jitter_initialize(
struct entropy_source_t *entropy_source) {
return 1;
}
static void opt_out_cpu_jitter_zeroize_thread(struct entropy_source_t *entropy_source) {}
static void opt_out_cpu_jitter_free_thread(struct entropy_source_t *entropy_source) {}
static int opt_out_cpu_jitter_get_seed_wrap(
const struct entropy_source_t *entropy_source, uint8_t seed[CTR_DRBG_ENTROPY_LEN]) {
return vm_ube_fallback_get_seed(seed);
}
// Define conditions for not using CPU Jitter
static int is_vm_ube_environment(void) {
return CRYPTO_get_vm_ube_supported();
}
static int has_explicitly_opted_out_of_cpu_jitter(void) {
#if defined(DISABLE_CPU_JITTER_ENTROPY)
return 1;
#else
return 0;
#endif
}
static int use_opt_out_cpu_jitter_entropy(void) {
if (has_explicitly_opted_out_of_cpu_jitter() == 1 ||
is_vm_ube_environment() == 1) {
return 1;
}
return 0;
}
// Out-out CPU Jitter configurations. CPU source required for rule-of-two.
// - OS as seed source source.
// - Uses rdrand or rndr, if supported, for personalization string. Otherwise
// falls back to OS source.
DEFINE_LOCAL_DATA(struct entropy_source_methods, opt_out_cpu_jitter_entropy_source_methods) {
out->initialize = opt_out_cpu_jitter_initialize;
out->zeroize_thread = opt_out_cpu_jitter_zeroize_thread;
out->free_thread = opt_out_cpu_jitter_free_thread;
out->get_seed = opt_out_cpu_jitter_get_seed_wrap;
if (have_hw_rng_x86_64() == 1 ||
have_hw_rng_aarch64() == 1) {
out->get_extra_entropy = entropy_cpu_get_extra_entropy;
} else {
// Fall back to seed source because a second source must always be present.
out->get_extra_entropy = opt_out_cpu_jitter_get_seed_wrap;
}
out->get_prediction_resistance = NULL;
out->id = OPT_OUT_CPU_JITTER_ENTROPY_SOURCE;
}
static const struct entropy_source_methods * get_entropy_source_methods(void) {
if (*allow_entropy_source_methods_override_bss_get() == 1) {
return *entropy_source_methods_override_bss_get();
}
if (use_opt_out_cpu_jitter_entropy()) {
return opt_out_cpu_jitter_entropy_source_methods();
}
return tree_jitter_entropy_source_methods();
}
struct entropy_source_t * get_entropy_source(void) {
struct entropy_source_t *entropy_source = OPENSSL_zalloc(sizeof(struct entropy_source_t));
if (entropy_source == NULL) {
return NULL;
}
entropy_source->methods = get_entropy_source_methods();
// Make sure that the function table contains the minimal number of callbacks
// that we expect. Also make sure that the entropy source is initialized such
// that calling code can assume that.
if (entropy_source->methods == NULL ||
entropy_source->methods->zeroize_thread == NULL ||
entropy_source->methods->free_thread == NULL ||
entropy_source->methods->get_seed == NULL ||
entropy_source->methods->initialize == NULL ||
entropy_source->methods->initialize(entropy_source) != 1) {
OPENSSL_free(entropy_source);
return NULL;
}
return entropy_source;
}
int rndr_multiple8(uint8_t *buf, const size_t len) {
if (len == 0 || ((len & 0x7) != 0)) {
return 0;
}
return CRYPTO_rndr_multiple8(buf, len);
}
int have_hw_rng_aarch64_for_testing(void) {
return have_hw_rng_aarch64();
}
// rdrand maximum retries as suggested by:
// Intel® Digital Random Number Generator (DRNG) Software Implementation Guide
// Revision 2.1
// https://software.intel.com/content/www/us/en/develop/articles/intel-digital-random-number-generator-drng-software-implementation-guide.html
#define RDRAND_MAX_RETRIES 10
OPENSSL_STATIC_ASSERT(RDRAND_MAX_RETRIES > 0, rdrand_max_retries_must_be_positive)
// rdrand_multiple8 should only be called if |have_hw_rng_x86_64| returned true.
int rdrand_multiple8(uint8_t *buf, size_t len) {
if (len == 0 || ((len & 0x7) != 0)) {
return 0;
}
// This retries all rdrand calls for the requested |len|.
// |CRYPTO_rdrand_multiple8| will typically execute rdrand multiple times. But
// it's easier to implement on the C-level and it should be a very rare event.
for (size_t tries = 0; tries < RDRAND_MAX_RETRIES; tries++) {
if (CRYPTO_rdrand_multiple8(buf, len) == 1) {
return 1;
}
}
return 0;
}
int have_hw_rng_x86_64_for_testing(void) {
return have_hw_rng_x86_64();
}
void override_entropy_source_method_FOR_TESTING(
const struct entropy_source_methods *override_entropy_source_methods) {
CRYPTO_STATIC_MUTEX_lock_write(global_entropy_source_lock_bss_get());
*allow_entropy_source_methods_override_bss_get() = 1;
*entropy_source_methods_override_bss_get() = override_entropy_source_methods;
CRYPTO_STATIC_MUTEX_unlock_write(global_entropy_source_lock_bss_get());
}
int get_entropy_source_method_id_FOR_TESTING(void) {
int id;
CRYPTO_STATIC_MUTEX_lock_read(global_entropy_source_lock_bss_get());
const struct entropy_source_methods *entropy_source_method = get_entropy_source_methods();
id = entropy_source_method->id;
CRYPTO_STATIC_MUTEX_unlock_read(global_entropy_source_lock_bss_get());
return id;
}

View File

@@ -0,0 +1,170 @@
// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC
#ifndef OPENSSL_HEADER_CRYPTO_RAND_ENTROPY_INTERNAL_H
#define OPENSSL_HEADER_CRYPTO_RAND_ENTROPY_INTERNAL_H
#include <openssl/ctrdrbg.h>
#include <openssl/rand.h>
#include "../../cpucap/internal.h"
#if defined(__cplusplus)
extern "C" {
#endif
#define OVERRIDDEN_ENTROPY_SOURCE 0
#define TREE_DRBG_JITTER_ENTROPY_SOURCE 1
#define OPT_OUT_CPU_JITTER_ENTROPY_SOURCE 2
#define ENTROPY_JITTER_MAX_NUM_TRIES (3)
// TREE_JITTER_GLOBAL_DRBG_MAX_GENERATE = 2^24
#define TREE_JITTER_GLOBAL_DRBG_MAX_GENERATE 0x1000000
// TREE_JITTER_THREAD_DRBG_MAX_GENERATE = 2^20
#define TREE_JITTER_THREAD_DRBG_MAX_GENERATE 0x100000
struct entropy_source_t {
void *state;
const struct entropy_source_methods *methods;
};
struct entropy_source_methods {
int (*initialize)(struct entropy_source_t *entropy_source);
void (*zeroize_thread)(struct entropy_source_t *entropy_source);
void (*free_thread)(struct entropy_source_t *entropy_source);
int (*get_seed)(const struct entropy_source_t *entropy_source,
uint8_t seed[CTR_DRBG_ENTROPY_LEN]);
int (*get_extra_entropy)(const struct entropy_source_t *entropy_source,
uint8_t extra_entropy[CTR_DRBG_ENTROPY_LEN]);
int (*get_prediction_resistance)(const struct entropy_source_t *entropy_source,
uint8_t pred_resistance[RAND_PRED_RESISTANCE_LEN]);
int id;
};
// get_entropy_source will return an entropy source configured for the platform.
struct entropy_source_t * get_entropy_source(void);
// override_entropy_source_method_FOR_TESTING will override the global
// entropy source that is assigned when calling |get_entropy_source|.
// |override_entropy_source_method_FOR_TESTING| can be called multiple times but
// it's designed to allow overriding the entropy source for testing purposes at
// the start of a process.
OPENSSL_EXPORT void override_entropy_source_method_FOR_TESTING(
const struct entropy_source_methods *override_entropy_source_methods);
OPENSSL_EXPORT int get_entropy_source_method_id_FOR_TESTING(void);
#if !defined(DISABLE_CPU_JITTER_ENTROPY)
OPENSSL_EXPORT int tree_jitter_initialize(struct entropy_source_t *entropy_source);
OPENSSL_EXPORT void tree_jitter_zeroize_thread_drbg(struct entropy_source_t *entropy_source);
OPENSSL_EXPORT void tree_jitter_free_thread_drbg(struct entropy_source_t *entropy_source);
OPENSSL_EXPORT int tree_jitter_get_seed(
const struct entropy_source_t *entropy_source, uint8_t seed[CTR_DRBG_ENTROPY_LEN]);
#else // !defined(DISABLE_CPU_JITTER_ENTROPY)
// Define stubs for tree-DRBG functions that implements the entropy source
// interface.
static inline int tree_jitter_initialize(struct entropy_source_t *entropy_source) { return 0; }
static inline void tree_jitter_zeroize_thread_drbg(struct entropy_source_t *entropy_source) { abort(); }
static inline void tree_jitter_free_thread_drbg(struct entropy_source_t *entropy_source) { abort(); }
static inline int tree_jitter_get_seed(const struct entropy_source_t *entropy_source, uint8_t seed[CTR_DRBG_ENTROPY_LEN]) { return 0; }
#endif // !defined(DISABLE_CPU_JITTER_ENTROPY)
// rndr_multiple8 writes |len| number of bytes to |buf| generated using the
// rndr instruction. |len| must be a multiple of 8.
// Outputs 1 on success, 0 otherwise.
OPENSSL_EXPORT int rndr_multiple8(uint8_t *buf, const size_t len);
// have_hw_rng_aarch64_for_testing wraps |have_hw_rng_aarch64| to allow usage
// in testing.
OPENSSL_EXPORT int have_hw_rng_aarch64_for_testing(void);
#if defined(OPENSSL_AARCH64) && !defined(OPENSSL_NO_ASM)
// CRYPTO_rndr_multiple8 writes |len| number of bytes to |buf| generated using
// the rndr instruction. |len| must be a multiple of 8 and positive.
// Outputs 1 on success, 0 otherwise.
int CRYPTO_rndr_multiple8(uint8_t *out, size_t out_len);
// Returns 1 if Armv8-A instruction rndr is available, 0 otherwise.
OPENSSL_INLINE int have_hw_rng_aarch64(void) {
return CRYPTO_is_ARMv8_RNDR_capable();
}
#else // defined(OPENSSL_AARCH64) && !defined(OPENSSL_NO_ASM)
OPENSSL_INLINE int CRYPTO_rndr_multiple8(uint8_t *out, size_t out_len) {
return 0;
}
OPENSSL_INLINE int have_hw_rng_aarch64(void) {
return 0;
}
#endif // defined(OPENSSL_AARCH64) && !defined(OPENSSL_NO_ASM)
// rdrand_multiple8 writes |len| number of bytes to |buf| generated using the
// rdrand instruction. |len| must be a multiple of 8. Retries
// Outputs 1 on success, 0 otherwise.
OPENSSL_EXPORT int rdrand_multiple8(uint8_t *buf, size_t len);
// have_hw_rng_x86_64_for_testing wraps |have_hw_rng_x86_64| to allow usage
// in testing.
OPENSSL_EXPORT int have_hw_rng_x86_64_for_testing(void);
#if defined(OPENSSL_X86_64) && !defined(OPENSSL_NO_ASM)
// Certain operating environments will disable RDRAND for both security and
// performance reasons. See initialization of CPU capability vector for details.
// At the moment, we must implement this logic there because the CPU capability
// vector does not carry CPU family/model information which is required to
// determine restrictions.
OPENSSL_INLINE int have_hw_rng_x86_64(void) {
return CRYPTO_is_RDRAND_capable();
}
// CRYPTO_rdrand_multiple8 writes |len| number of bytes to |buf| generated using
// the rdrand instruction. |len| must be a multiple of 8 and positive.
// Outputs 1 on success, 0 otherwise.
int CRYPTO_rdrand_multiple8(uint8_t *buf, size_t len);
#else // defined(OPENSSL_X86_64) && !defined(OPENSSL_NO_ASM)
OPENSSL_INLINE int CRYPTO_rdrand_multiple8(uint8_t *buf, size_t len) {
return 0;
}
OPENSSL_INLINE int have_hw_rng_x86_64(void) {
return 0;
}
#endif // defined(OPENSSL_X86_64) && !defined(OPENSSL_NO_ASM)
struct test_tree_drbg_t {
uint64_t thread_generate_calls_since_seed;
uint64_t thread_reseed_calls_since_initialization;
uint64_t global_generate_calls_since_seed;
uint64_t global_reseed_calls_since_initialization;
};
// get_thread_and_global_tree_drbg_calls_FOR_TESTING returns the number of
// generate calls since seed/reseed for the tread-local and global tree-DRBG.
// In addition, it returns the number of reseeds applied on the tread-local and
// global tree-DRBG. These values of copied to |test_tree_drbg|.
OPENSSL_EXPORT int get_thread_and_global_tree_drbg_calls_FOR_TESTING(
const struct entropy_source_t *entropy_source,
struct test_tree_drbg_t *test_tree_drbg);
// set_thread_and_global_tree_drbg_reseed_counter_FOR_TESTING sets the reseed
// counter for either the tread-local and/or global tree-DRBG. If either of
// |thread_reseed_calls| or |global_reseed_calls| are equal to 0, their
// reseed counter is not set.
OPENSSL_EXPORT int set_thread_and_global_tree_drbg_reseed_counter_FOR_TESTING(
struct entropy_source_t *entropy_source, uint64_t thread_reseed_calls,
uint64_t global_reseed_calls);
#if defined(__cplusplus)
} // extern C
#endif
#endif // OPENSSL_HEADER_CRYPTO_RAND_ENTROPY_INTERNAL_H

View File

@@ -0,0 +1,541 @@
// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC
#if !defined(DISABLE_CPU_JITTER_ENTROPY)
#include <openssl/ctrdrbg.h>
#include <openssl/mem.h>
#include <openssl/type_check.h>
#include "internal.h"
#include "../internal.h"
#include "../../delocate.h"
#include "../../../internal.h"
#include "../../../rand_extra/internal.h"
#include "../../../ube/internal.h"
#include "../../../../third_party/jitterentropy/jitterentropy-library/jitterentropy.h"
// Randomness generation implements thread-local "frontend" DRBGs that serve
// requests for randomness from consumers through exported functions such as
// RAND_bytes(). This file implements a tree-DRBG from SP800-90C as a seed
// entropy source for the frontend DRBGs. The implemented tree-DRBG has the
// following characteristics:
// - A per-thread seed DRBG that serves seed requests for a thread-local
// frontend DRBG.
// - A global seed DRBG that serves seed requests from the thread-local seed
// DRBGs.
// - A root seed source that serves seed requests from the global seed DRBG.
// The root seed source is a global instance of Jitter Entropy.
//
// The dependency tree looks as follows:
//
// entropy_source
// interface
// |
// rand.c | tree_drbg_jitter_entropy.c
// |
// front-end | tree-DRBG
// per-thread | per-thread
// +-----------+ | +-----------+
// | CTR-DRBG | --> | CTR-DRBG | -|
// +-----------+ | +-----------+ -|
// +-----------+ | +-----------+ --| per-process per-process
// | CTR-DRBG | --> | CTR-DRBG | ---| --> +-----------+ +----------------+
// +-----------+ | +-----------+ -----> | CTR-DRBG | --> | Jitter Entropy |
// ... | ... --> +-----------+ +----------------+
// +-----------+ | +-----------+ -----|
// | CTR-DRBG | --> | CTR-DRBG |-|
// +-----------+ | +-----------+
// |
//
// Memory life-cycle: The thread-local DRBGs have the same storage duration as
// their corresponding thread-local frontend DRBGs. The per-process DRBG and
// Jitter Entropy instance has a storage duration that extends to the duration
// of AWS-LC being loaded into the process. The per-process memory is lazily
// allocated.
// To serve seed requests from the frontend DRBGs the following
// |struct entropy_source_methods| interface functions are implemented:
// - tree_jitter_initialize
// - tree_jitter_zeroize_thread_drbg
// - tree_jitter_free_thread_drbg
// - tree_jitter_get_seed
struct tree_jitter_drbg_t {
// is_global is 1 if this object is the per-process seed DRBG. Otherwise 0.
uint8_t is_global;
// drbg is the DRBG state.
CTR_DRBG_STATE drbg;
// max_generate_calls is the maximum number of generate calls that can be
// invoked on |drbg| without a reseed.
uint64_t max_generate_calls;
// reseed_calls_since_initialization is the number of seed/reseed calls made
// on |drbg| since its initialization.
// We assume 2^64 - 1 is an upper bound on the number of reseeds. Type must
// support that.
uint64_t reseed_calls_since_initialization;
// generation_number caches the UBE generation number.
uint64_t generation_number;
// ube_protection denotes whether this object is protected from UBEs.
uint8_t ube_protection;
// Jitter entropy state. NULL if not the per-process seed DRBG.
struct rand_data *jitter_ec;
};
// Per-process seed DRBG locks.
DEFINE_BSS_GET(struct tree_jitter_drbg_t *, global_seed_drbg)
DEFINE_STATIC_ONCE(global_seed_drbg_once)
DEFINE_STATIC_ONCE(global_seed_drbg_zeroize_once)
DEFINE_STATIC_MUTEX(global_seed_drbg_lock)
// tree_jitter_get_root_seed generates |CTR_DRBG_ENTROPY_LEN| bytes of output
// from the Jitter Entropy instance configured in |tree_jitter_drbg|. The output
// is returned in |seed_out|.
// Access to this function must be synchronized.
static void tree_jitter_get_root_seed(
struct tree_jitter_drbg_t *tree_jitter_drbg,
uint8_t seed_out[CTR_DRBG_ENTROPY_LEN]) {
if (tree_jitter_drbg->jitter_ec == NULL) {
abort();
}
// |jent_read_entropy| has a false positive health test failure rate of 2^-22.
// To avoid aborting so frequently, we retry 3 times.
char jitter_generated_output = 0;
for (size_t num_tries = 1; num_tries <= ENTROPY_JITTER_MAX_NUM_TRIES; num_tries++) {
// Try to generate the required number of bytes with Jitter.
// If successful break out from the loop, otherwise try again.
if (jent_read_entropy(tree_jitter_drbg->jitter_ec, (char *) seed_out,
CTR_DRBG_ENTROPY_LEN) == (ssize_t) CTR_DRBG_ENTROPY_LEN) {
jitter_generated_output = 1;
break;
}
// If Jitter entropy failed to produce entropy we need to reset it.
jent_entropy_collector_free(tree_jitter_drbg->jitter_ec);
tree_jitter_drbg->jitter_ec = NULL;
tree_jitter_drbg->jitter_ec = jent_entropy_collector_alloc(0, JENT_FORCE_FIPS);
if (tree_jitter_drbg->jitter_ec == NULL) {
abort();
}
}
if (jitter_generated_output != 1) {
abort();
}
}
// tree_jitter_drbg_maybe_get_pred_resistance generates RAND_PRED_RESISTANCE_LEN
// bytes for prediction resistance and returns them in |pred_resistance|.
// However, it only generate bytes if |tree_jitter_drbg| meets the conditions:
// 1) is not the global seed DRBG 2) is not protected from UBEs. If bytes are
// generated, |pred_resistance_len| is set to RAND_PRED_RESISTANCE_LEN and is
// otherwise not mutated.
static void tree_jitter_drbg_maybe_get_pred_resistance(
struct tree_jitter_drbg_t *tree_jitter_drbg,
uint8_t pred_resistance[RAND_PRED_RESISTANCE_LEN],
size_t *pred_resistance_len) {
if (tree_jitter_drbg->is_global == 0 &&
tree_jitter_drbg->ube_protection != 1) {
CRYPTO_sysrand(pred_resistance, RAND_PRED_RESISTANCE_LEN);
*pred_resistance_len = RAND_PRED_RESISTANCE_LEN;
}
}
// tree_jitter_check_drbg_must_reseed computes whether |state| must be
// randomized to ensure uniqueness.
//
// Return 1 if |state| must be randomized. 0 otherwise.
static int tree_jitter_check_drbg_must_reseed(
struct tree_jitter_drbg_t *tree_jitter_drbg) {
uint64_t current_generation_number = 0;
if (CRYPTO_get_ube_generation_number(&current_generation_number) == 1 &&
current_generation_number != tree_jitter_drbg->generation_number) {
tree_jitter_drbg->generation_number = current_generation_number;
return 1;
}
// drbg.reseed_counter is initialized to 1, incremented after a generate call
// on |drbg|. |max_generate_calls| is the maximum allowed invocation of the
// generate function on |drbg|.
if (tree_jitter_drbg->drbg.reseed_counter > tree_jitter_drbg->max_generate_calls) {
return 1;
}
return 0;
}
// tree_jitter_drbg_derive_seed generates a CTR_DRBG_ENTROPY_LEN byte seed from
// the DRBG configured in |tree_jitter_drbg|. The generated bytes are returned
// in |seed_out|.
//
// |tree_jitter_drbg_derive_seed| automatically handles reseeding the
// associated DRBG if required. In addition, if UBE detection is not supported
// prediction resistance is used to ensure bytes are generated safely.
static void tree_jitter_drbg_derive_seed(
struct tree_jitter_drbg_t *tree_jitter_drbg,
uint8_t seed_out[CTR_DRBG_ENTROPY_LEN]) {
if (tree_jitter_drbg == NULL) {
abort();
}
if (tree_jitter_check_drbg_must_reseed(tree_jitter_drbg) == 1) {
uint8_t seed_drbg[CTR_DRBG_ENTROPY_LEN];
if (tree_jitter_drbg->is_global == 1) {
tree_jitter_get_root_seed(tree_jitter_drbg, seed_drbg);
} else {
CRYPTO_STATIC_MUTEX_lock_write(global_seed_drbg_lock_bss_get());
tree_jitter_drbg_derive_seed(*global_seed_drbg_bss_get(), seed_drbg);
CRYPTO_STATIC_MUTEX_unlock_write(global_seed_drbg_lock_bss_get());
}
if (CTR_DRBG_reseed(&(tree_jitter_drbg->drbg), seed_drbg, NULL, 0) != 1) {
abort();
}
OPENSSL_cleanse(seed_drbg, CTR_DRBG_ENTROPY_LEN);
tree_jitter_drbg->reseed_calls_since_initialization += 1;
}
uint8_t pred_resistance[RAND_PRED_RESISTANCE_LEN];
size_t pred_resistance_len = 0;
tree_jitter_drbg_maybe_get_pred_resistance(tree_jitter_drbg,
pred_resistance, &pred_resistance_len);
OPENSSL_STATIC_ASSERT(CTR_DRBG_ENTROPY_LEN <= CTR_DRBG_MAX_GENERATE_LENGTH,
CTR_DRBG_ENTROPY_LEN_is_too_large_compared_to_CTR_DRBG_MAX_GENERATE_LENGTH)
if (!CTR_DRBG_generate(&(tree_jitter_drbg->drbg), seed_out, CTR_DRBG_ENTROPY_LEN,
pred_resistance, pred_resistance_len)) {
abort();
}
OPENSSL_cleanse(pred_resistance, RAND_PRED_RESISTANCE_LEN);
}
// tree_jitter_get_seed generates a CTR_DRBG_ENTROPY_LEN byte seed from the DRBG
// configured in |entropy_source|. The generated bytes are returned in
// |seed_out|. This function is the entry point for generating output from the
// tree DRBG.
//
// Return 1 on success and 0 otherwise.
int tree_jitter_get_seed(const struct entropy_source_t *entropy_source,
uint8_t seed_out[CTR_DRBG_ENTROPY_LEN]) {
GUARD_PTR_ABORT(entropy_source);
GUARD_PTR_ABORT(seed_out);
struct tree_jitter_drbg_t *tree_jitter_drbg_thread =
(struct tree_jitter_drbg_t *) entropy_source->state;
tree_jitter_drbg_derive_seed(tree_jitter_drbg_thread, seed_out);
return 1;
}
// tree_jitter_initialize initializes the global seed DRBG.
static void tree_jitter_initialize_once(void) {
struct tree_jitter_drbg_t *tree_jitter_drbg_global =
OPENSSL_zalloc(sizeof(struct tree_jitter_drbg_t));
if (tree_jitter_drbg_global == NULL) {
abort();
}
tree_jitter_drbg_global->is_global = 1;
tree_jitter_drbg_global->max_generate_calls = TREE_JITTER_GLOBAL_DRBG_MAX_GENERATE;
tree_jitter_drbg_global->reseed_calls_since_initialization = 0;
uint64_t current_generation_number = 0;
if (CRYPTO_get_ube_generation_number(&current_generation_number) != 1) {
tree_jitter_drbg_global->generation_number = 0;
} else {
tree_jitter_drbg_global->generation_number = current_generation_number;
}
// The first parameter passed to |jent_entropy_collector_alloc| function is
// the desired oversampling rate. Passing a 0 tells Jitter module to use
// the default rate (which is 3 in Jitter v3.6.3).
tree_jitter_drbg_global->jitter_ec = jent_entropy_collector_alloc(0, JENT_FORCE_FIPS);
if (tree_jitter_drbg_global->jitter_ec == NULL) {
abort();
}
uint8_t seed_drbg[CTR_DRBG_ENTROPY_LEN];
tree_jitter_get_root_seed(tree_jitter_drbg_global, seed_drbg);
if (!CTR_DRBG_init(&(tree_jitter_drbg_global->drbg), seed_drbg, NULL, 0)) {
abort();
}
tree_jitter_drbg_global->reseed_calls_since_initialization += 1;
OPENSSL_cleanse(seed_drbg, CTR_DRBG_ENTROPY_LEN);
*global_seed_drbg_bss_get() = tree_jitter_drbg_global;
}
// tree_jitter_initialize initializes a thread-local seed DRBG and configures
// it in |entropy_source|. If the global seed DRBG has not been initialized yet
// it's also initialized.
//
// Returns 1 on success and 0 otherwise.
int tree_jitter_initialize(struct entropy_source_t *entropy_source) {
GUARD_PTR_ABORT(entropy_source);
// Initialize the per-thread seed drbg.
struct tree_jitter_drbg_t *tree_jitter_drbg =
OPENSSL_zalloc(sizeof(struct tree_jitter_drbg_t));
if (tree_jitter_drbg == NULL) {
abort();
}
// Initialize the global seed DRBG if haven't already.
CRYPTO_once(global_seed_drbg_once_bss_get(), tree_jitter_initialize_once);
// Initialize the per-thread seed DRBG.
uint8_t seed_drbg[CTR_DRBG_ENTROPY_LEN];
CRYPTO_STATIC_MUTEX_lock_write(global_seed_drbg_lock_bss_get());
tree_jitter_drbg_derive_seed(*global_seed_drbg_bss_get(), seed_drbg);
CRYPTO_STATIC_MUTEX_unlock_write(global_seed_drbg_lock_bss_get());
if (!CTR_DRBG_init(&(tree_jitter_drbg->drbg), seed_drbg, NULL, 0)) {
abort();
}
tree_jitter_drbg->reseed_calls_since_initialization = 1;
OPENSSL_cleanse(seed_drbg, CTR_DRBG_ENTROPY_LEN);
tree_jitter_drbg->is_global = 0;
tree_jitter_drbg->max_generate_calls = TREE_JITTER_THREAD_DRBG_MAX_GENERATE;
uint64_t current_generation_number = 0;
if (CRYPTO_get_ube_generation_number(&current_generation_number) != 1) {
tree_jitter_drbg->ube_protection = 0;
tree_jitter_drbg->generation_number = 0;
} else {
tree_jitter_drbg->ube_protection = 1;
tree_jitter_drbg->generation_number = current_generation_number;
}
entropy_source->state = tree_jitter_drbg;
return 1;
}
#if defined(_MSC_VER)
#pragma section(".CRT$XCU", read)
static void tree_jitter_free_global_drbg(void);
static void windows_install_tree_jitter_free_global_drbg(void) {
atexit(&tree_jitter_free_global_drbg);
}
__declspec(allocate(".CRT$XCU")) void(*tree_jitter_drbg_destructor)(void) =
windows_install_tree_jitter_free_global_drbg;
#else
static void tree_jitter_free_global_drbg(void) __attribute__ ((destructor));
#endif
// The memory life-time for thread-local seed DRBGs is handled differently
// compared to the global seed DRBG (and Jitter Entropy instance). The frontend
// DRBG thread-local destuctors will invoke |tree_jitter_free_thread_drbg| using
// their reference to it. The global seed DRBG and Jitter Entropy instance will
// be released by a destructor. This ensures that the global seed DRBG life-time
// extends to the entire process life-time if the lazy initialization happened.
// Obviously, any dlclose on AWS-LC will release the memory early but that's
// correct behaviour.
// tree_jitter_free_global_drbg frees the memory allocated for the global seed
// DRBG and Jitter Entropy instance.
static void tree_jitter_free_global_drbg(void) {
CRYPTO_STATIC_MUTEX_lock_write(global_seed_drbg_lock_bss_get());
struct tree_jitter_drbg_t *global_tree_jitter_drbg = *global_seed_drbg_bss_get();
if (global_tree_jitter_drbg == NULL) {
CRYPTO_STATIC_MUTEX_unlock_write(global_seed_drbg_lock_bss_get());
return;
}
if (global_tree_jitter_drbg->is_global != 1) {
// Should not happen.
abort();
}
jent_entropy_collector_free(global_tree_jitter_drbg->jitter_ec);
OPENSSL_free(global_tree_jitter_drbg);
*global_seed_drbg_bss_get() = NULL;
CRYPTO_STATIC_MUTEX_unlock_write(global_seed_drbg_lock_bss_get());
}
// tree_jitter_free_thread_drbg frees the thread-local seed DRBG
// associated with the entropy source |entropy_source|.
void tree_jitter_free_thread_drbg(struct entropy_source_t *entropy_source) {
GUARD_PTR_ABORT(entropy_source);
struct tree_jitter_drbg_t *tree_jitter_drbg =
(struct tree_jitter_drbg_t *) entropy_source->state;
if (tree_jitter_drbg == NULL) {
return;
}
OPENSSL_free(tree_jitter_drbg);
entropy_source->state = NULL;
}
// Per ISO/IEC 19790-2012 7.9.7 "zeroization" can be random data just not other
// SSP/CSP's. The Jitter Entropy instance doesn't have any practical state; it's
// a live entropy source. The zeroization strategy used for the DRBG's is to
// reseed with random data, that in turn, will override all states in the tree
// with random data. The zeroization of the tree DRBG executes after the
// frontend DRBGs have been locked - they can't release any generated output.
// Therefore, the randomness generation layer ensures that no output from the
// tree DRBG is used to generate any output that is later released. Randomizing
// the tree DRBG states therefore effectively "zeroize" the state.
//
// If there aren't any threads running, the zeroizer for the global seed DRBG
// won't execute. But the destructor responsible for releasing the memory
// allocated for the global seed DRBG and Jitter Entropy instance, will still
// execute, in turn, zeroize it.
//
// One could override the DRBG states with zero's. However, doing the small
// extra work to use random data (from the OS source) ensures that even if some
// output were to escape from the randomness generation, it will still be sound
// practically.
// tree_jitter_zeroize_drbg zeroizes the DRBG state configured in
// |tree_jitter_drbg|.
static void tree_jitter_zeroize_drbg(
struct tree_jitter_drbg_t *tree_jitter_drbg) {
uint8_t random_data[CTR_DRBG_ENTROPY_LEN];
CRYPTO_sysrand_if_available(random_data, CTR_DRBG_ENTROPY_LEN);
if (CTR_DRBG_reseed(&(tree_jitter_drbg->drbg), random_data, NULL, 0) != 1) {
abort();
}
OPENSSL_cleanse(random_data, CTR_DRBG_ENTROPY_LEN);
tree_jitter_drbg->reseed_calls_since_initialization += 1;
}
// tree_jitter_zeroize_global_drbg is similar to |tree_jitter_zeroize_drbg| but
// also handles synchronizing access to the global seed DRBG
static void tree_jitter_zeroize_global_drbg(void) {
CRYPTO_STATIC_MUTEX_lock_write(global_seed_drbg_lock_bss_get());
struct tree_jitter_drbg_t *tree_jitter_drbg = *global_seed_drbg_bss_get();
if (tree_jitter_drbg == NULL) {
CRYPTO_STATIC_MUTEX_unlock_write(global_seed_drbg_lock_bss_get());
return;
}
if (tree_jitter_drbg->is_global != 1) {
// Should not happen.
abort();
}
tree_jitter_zeroize_drbg(tree_jitter_drbg);
CRYPTO_STATIC_MUTEX_unlock_write(global_seed_drbg_lock_bss_get());
}
// tree_jitter_zeroize_thread_drbg zeroizes the thread-local seed DRBG
// associated with the entropy source |entropy_source|. It also executes
// zeroization of the global seed DRBG if applicable.
void tree_jitter_zeroize_thread_drbg(struct entropy_source_t *entropy_source) {
GUARD_PTR_ABORT(entropy_source);
CRYPTO_once(global_seed_drbg_zeroize_once_bss_get(), tree_jitter_zeroize_global_drbg);
struct tree_jitter_drbg_t *tree_jitter_drbg =
(struct tree_jitter_drbg_t *) entropy_source->state;
if (tree_jitter_drbg == NULL) {
return;
}
if (tree_jitter_drbg->is_global == 1) {
// Should not happen.
abort();
}
tree_jitter_zeroize_drbg(tree_jitter_drbg);
}
int get_thread_and_global_tree_drbg_calls_FOR_TESTING(
const struct entropy_source_t *entropy_source,
struct test_tree_drbg_t *test_tree_drbg) {
if (test_tree_drbg == NULL || entropy_source == NULL) {
return 0;
}
int ret = 0;
CRYPTO_STATIC_MUTEX_lock_read(global_seed_drbg_lock_bss_get());
struct tree_jitter_drbg_t *global_tree_jitter_drbg = *global_seed_drbg_bss_get();
struct tree_jitter_drbg_t *thread_tree_jitter_drbg =
(struct tree_jitter_drbg_t *) entropy_source->state;
if (global_tree_jitter_drbg == NULL || thread_tree_jitter_drbg == NULL) {
goto out;
}
// Note that |drbg.reseed_counter| is initialized to 1.
test_tree_drbg->thread_generate_calls_since_seed = thread_tree_jitter_drbg->drbg.reseed_counter;
test_tree_drbg->thread_reseed_calls_since_initialization = thread_tree_jitter_drbg->reseed_calls_since_initialization;
test_tree_drbg->global_generate_calls_since_seed = global_tree_jitter_drbg->drbg.reseed_counter;
test_tree_drbg->global_reseed_calls_since_initialization = global_tree_jitter_drbg->reseed_calls_since_initialization;
ret = 1;
out:
CRYPTO_STATIC_MUTEX_unlock_read(global_seed_drbg_lock_bss_get());
return ret;
}
OPENSSL_EXPORT int set_thread_and_global_tree_drbg_reseed_counter_FOR_TESTING(
struct entropy_source_t *entropy_source, uint64_t thread_reseed_calls,
uint64_t global_reseed_calls) {
if (entropy_source == NULL) {
return 0;
}
int ret = 0;
CRYPTO_STATIC_MUTEX_lock_write(global_seed_drbg_lock_bss_get());
struct tree_jitter_drbg_t *global_tree_jitter_drbg = *global_seed_drbg_bss_get();
struct tree_jitter_drbg_t *thread_tree_jitter_drbg =
(struct tree_jitter_drbg_t *) entropy_source->state;
if (global_tree_jitter_drbg == NULL || thread_tree_jitter_drbg == NULL) {
goto out;
}
if (thread_reseed_calls != 0) {
thread_tree_jitter_drbg->drbg.reseed_counter = thread_reseed_calls;
}
if (global_reseed_calls != 0) {
global_tree_jitter_drbg->drbg.reseed_counter = global_reseed_calls;
}
ret = 1;
out:
CRYPTO_STATIC_MUTEX_unlock_write(global_seed_drbg_lock_bss_get());
return ret;
}
#endif // !defined(DISABLE_CPU_JITTER_ENTROPY)

View File

@@ -0,0 +1,557 @@
// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC
#if !defined(DISABLE_CPU_JITTER_ENTROPY)
#include <gtest/gtest.h>
#include "internal.h"
#include "../internal.h"
#include "../../../ube/internal.h"
#include "../../../test/ube_test.h"
#include "../../../test/test_util.h"
#include <cstdio>
#if defined(GTEST_HAS_DEATH_TEST)
#define TEST_IN_FORK_ASSERT_TRUE(condition) if (!condition) { std::cerr << __FILE__ << ":" << __LINE__ << ": Assertion failed: " << #condition << std::endl;; exit(1);}
#define TEST_IN_FORK_ASSERT_FALSE(condition) if (condition) { std::cerr << __FILE__ << ":" << __LINE__ << ": Assertion failed: " << #condition << std::endl;; exit(1);}
static const size_t number_of_threads = 8;
class treeDrbgJitterentropyTest : public::testing::Test {
private:
UbeBase ube_base_;
protected:
void SetUp() override {
ube_base_.SetUp();
}
void TearDown() override {
ube_base_.TearDown();
}
bool UbeIsSupported() const {
return ube_base_.UbeIsSupported();
}
void allowMockedUbe() const {
ube_base_.allowMockedUbe();
}
};
static bool get_tree_drbg_call(const struct entropy_source_t *entropy_source,
struct test_tree_drbg_t *test_tree_drbg) {
if (get_thread_and_global_tree_drbg_calls_FOR_TESTING(
entropy_source, test_tree_drbg)) {
return true;
}
return false;
}
TEST_F(treeDrbgJitterentropyTest, BasicInitialization) {
if (runtimeEmulationIsIntelSde() && addressSanitizerIsEnabled()) {
GTEST_SKIP() << "Test not supported under Intel SDE + ASAN";
}
// Test only one seed occurs on initialization.
auto testFunc = []() {
struct entropy_source_t entropy_source = {0, 0};
struct test_tree_drbg_t new_test_tree_drbg = {0, 0, 0, 0};
TEST_IN_FORK_ASSERT_TRUE(tree_jitter_initialize(&entropy_source))
TEST_IN_FORK_ASSERT_TRUE(get_tree_drbg_call(&entropy_source, &new_test_tree_drbg))
TEST_IN_FORK_ASSERT_TRUE((new_test_tree_drbg.thread_reseed_calls_since_initialization == 1))
TEST_IN_FORK_ASSERT_TRUE((new_test_tree_drbg.global_reseed_calls_since_initialization == 1))
// Calling tree_jitter_initialize before thread test would set
// |thread_generate_calls_since_seed| equal to 1 and
// |global_generate_calls_since_seed| equal to 2. The latter because the
// initial value 1 and we perform a generate call on the global tree-DRBG
// to seed the thread-local tree-DRBG.
TEST_IN_FORK_ASSERT_TRUE((new_test_tree_drbg.thread_generate_calls_since_seed == 1))
TEST_IN_FORK_ASSERT_TRUE((new_test_tree_drbg.global_generate_calls_since_seed == 2))
tree_jitter_zeroize_thread_drbg(&entropy_source);
tree_jitter_free_thread_drbg(&entropy_source);
exit(0);
};
EXPECT_EXIT(testFunc(), ::testing::ExitedWithCode(0), "");
}
TEST_F(treeDrbgJitterentropyTest, BasicThread) {
// Test seeds are observed when spawning new threads.
auto testFunc = []() {
struct entropy_source_t entropy_source = {0, 0};
struct test_tree_drbg_t new_test_tree_drbg = {0, 0, 0, 0};
TEST_IN_FORK_ASSERT_TRUE(tree_jitter_initialize(&entropy_source))
std::function<void(bool*)> threadFunc = [](bool *result) {
struct entropy_source_t entropy_source_thread = {0, 0};
struct test_tree_drbg_t new_test_tree_drbg_thread = {0, 0, 0, 0};
bool test = tree_jitter_initialize(&entropy_source_thread);
test = test && get_tree_drbg_call(&entropy_source_thread, &new_test_tree_drbg_thread);
test = test && new_test_tree_drbg_thread.thread_reseed_calls_since_initialization == 1;
test = test && new_test_tree_drbg_thread.global_reseed_calls_since_initialization == 1;
*result = test;
tree_jitter_free_thread_drbg(&entropy_source_thread);
};
bool exit_code = threadTest(number_of_threads, threadFunc);
TEST_IN_FORK_ASSERT_TRUE(exit_code)
TEST_IN_FORK_ASSERT_TRUE(get_tree_drbg_call(&entropy_source, &new_test_tree_drbg))
TEST_IN_FORK_ASSERT_TRUE((new_test_tree_drbg.thread_reseed_calls_since_initialization == 1))
TEST_IN_FORK_ASSERT_TRUE((new_test_tree_drbg.global_reseed_calls_since_initialization == 1))
// Calling tree_jitter_initialize before thread test would set
// |global_generate_calls_since_seed| equal to 2. We then expect an
// additional |number_of_threads| thread-local tree-DRBGs to seed using the
// global tree-DRBG.
TEST_IN_FORK_ASSERT_TRUE((new_test_tree_drbg.global_generate_calls_since_seed == (number_of_threads+2)))
tree_jitter_zeroize_thread_drbg(&entropy_source);
tree_jitter_free_thread_drbg(&entropy_source);
exit(0);
};
EXPECT_EXIT(testFunc(), ::testing::ExitedWithCode(0), "");
}
TEST_F(treeDrbgJitterentropyTest, BasicReseed) {
if (runtimeEmulationIsIntelSde() && addressSanitizerIsEnabled()) {
GTEST_SKIP() << "Test not supported under Intel SDE + ASAN";
}
// Test reseeding happens as expected
auto testFunc = []() {
struct entropy_source_t entropy_source = {0, 0};
struct test_tree_drbg_t new_test_tree_drbg = {0, 0, 0, 0};
uint8_t seed_out[CTR_DRBG_ENTROPY_LEN];
const uint64_t tree_drbg_thread_reseed_limit = TREE_JITTER_THREAD_DRBG_MAX_GENERATE;
const uint64_t tree_drbg_global_reseed_limit = TREE_JITTER_GLOBAL_DRBG_MAX_GENERATE;
// Similar to initialization test above.
TEST_IN_FORK_ASSERT_TRUE(tree_jitter_initialize(&entropy_source))
TEST_IN_FORK_ASSERT_TRUE(get_tree_drbg_call(&entropy_source, &new_test_tree_drbg))
TEST_IN_FORK_ASSERT_TRUE((new_test_tree_drbg.thread_reseed_calls_since_initialization == 1))
TEST_IN_FORK_ASSERT_TRUE((new_test_tree_drbg.thread_generate_calls_since_seed == 1))
TEST_IN_FORK_ASSERT_TRUE((new_test_tree_drbg.global_reseed_calls_since_initialization == 1))
TEST_IN_FORK_ASSERT_TRUE((new_test_tree_drbg.global_generate_calls_since_seed == 2))
// Set reseed counter for thread-local tree-DRBG to max value + 1 (because
// the reseed interval condition uses strict inequality and
// drbg.reseed_counter is initialized to 1).
TEST_IN_FORK_ASSERT_TRUE(set_thread_and_global_tree_drbg_reseed_counter_FOR_TESTING(
&entropy_source, tree_drbg_thread_reseed_limit + 1, 0))
TEST_IN_FORK_ASSERT_TRUE(tree_jitter_get_seed(&entropy_source, seed_out))
TEST_IN_FORK_ASSERT_TRUE(get_tree_drbg_call(&entropy_source, &new_test_tree_drbg))
// Thread-local tree-DRBG should generate a seed from global tree-DRBG
// causing its generate call counter to increment by 1. Thread-local
// tree-DRBG reseed counter should also go increment by 1.
TEST_IN_FORK_ASSERT_TRUE((new_test_tree_drbg.thread_reseed_calls_since_initialization == 2)) // changed
TEST_IN_FORK_ASSERT_TRUE((new_test_tree_drbg.thread_generate_calls_since_seed == 2)) // changed
TEST_IN_FORK_ASSERT_TRUE((new_test_tree_drbg.global_reseed_calls_since_initialization == 1)) // unchanged
TEST_IN_FORK_ASSERT_TRUE((new_test_tree_drbg.global_generate_calls_since_seed == 3)) // changed
// Set reseed counter for global tree-DRBG to max value + 1. Thread-local
// tree-DRBG is unchanged
TEST_IN_FORK_ASSERT_TRUE(set_thread_and_global_tree_drbg_reseed_counter_FOR_TESTING(
&entropy_source, 0, tree_drbg_global_reseed_limit + 1))
TEST_IN_FORK_ASSERT_TRUE(tree_jitter_get_seed(&entropy_source, seed_out))
TEST_IN_FORK_ASSERT_TRUE(get_tree_drbg_call(&entropy_source, &new_test_tree_drbg))
// We generated a seed from the tread-local tree-DRBG which should not
// reseed. Hence, we do not expect a generate call made to the global
// tree-DRBG. The value of the latter will change though because the reseed
// counter is equal to the number of generate calls. Since we are generating
// a seed from the thread-local tree-DRBG its generate counter should increment by 1
TEST_IN_FORK_ASSERT_TRUE((new_test_tree_drbg.thread_reseed_calls_since_initialization == 2)) // unchanged
TEST_IN_FORK_ASSERT_TRUE((new_test_tree_drbg.thread_generate_calls_since_seed == 3)) // changed
TEST_IN_FORK_ASSERT_TRUE((new_test_tree_drbg.global_reseed_calls_since_initialization == 1)) // unchanged
TEST_IN_FORK_ASSERT_TRUE((new_test_tree_drbg.global_generate_calls_since_seed == (tree_drbg_global_reseed_limit + 1))) // changed
// Set reseed counter for both thread-local and global tree-DRBG to
// max value + 1.
TEST_IN_FORK_ASSERT_TRUE(set_thread_and_global_tree_drbg_reseed_counter_FOR_TESTING(
&entropy_source, tree_drbg_thread_reseed_limit + 1, tree_drbg_global_reseed_limit + 1))
TEST_IN_FORK_ASSERT_TRUE(tree_jitter_get_seed(&entropy_source, seed_out))
TEST_IN_FORK_ASSERT_TRUE(get_tree_drbg_call(&entropy_source, &new_test_tree_drbg))
// When generating a seed from from the thread-local tree-DRBG it should
// reseed by getting a seed from the global tree-DRBG. The global tree-DRBG
// should itself reseed. In both cases, their generate calls (since last
// seed/reseed) should be reset.
TEST_IN_FORK_ASSERT_TRUE((new_test_tree_drbg.thread_reseed_calls_since_initialization == 3)) // changed
TEST_IN_FORK_ASSERT_TRUE((new_test_tree_drbg.thread_generate_calls_since_seed == 2)) // changed
TEST_IN_FORK_ASSERT_TRUE((new_test_tree_drbg.global_reseed_calls_since_initialization == 2)) // changed
TEST_IN_FORK_ASSERT_TRUE((new_test_tree_drbg.global_generate_calls_since_seed == 2)) // changed
// Try without calling zeroize thread-local tree-DRBG first.
tree_jitter_free_thread_drbg(&entropy_source);
exit(0);
};
EXPECT_EXIT(testFunc(), ::testing::ExitedWithCode(0), "");
}
#if !defined(OPENSSL_WINDOWS)
static bool veryifySeedOrReseed(const struct test_tree_drbg_t *test_tree_drbg,
const struct test_tree_drbg_t *cached_test_tree_drbg,
size_t expect_reseed_thread, size_t expect_reseed_global,
const char *error_text) {
if (cached_test_tree_drbg->thread_reseed_calls_since_initialization + expect_reseed_thread != test_tree_drbg->thread_reseed_calls_since_initialization ||
cached_test_tree_drbg->global_reseed_calls_since_initialization + expect_reseed_global != test_tree_drbg->global_reseed_calls_since_initialization) {
std::cerr << "Tree-DRBG expected count mismatch " << error_text << '\n'
<< " Thread DRBG: expected=" << (cached_test_tree_drbg->thread_reseed_calls_since_initialization + expect_reseed_thread)
<< ", actual=" << test_tree_drbg->thread_reseed_calls_since_initialization << '\n'
<< " Global DRBG: expected=" << (cached_test_tree_drbg->global_reseed_calls_since_initialization + expect_reseed_global)
<< ", actual=" << test_tree_drbg->global_reseed_calls_since_initialization << '\n';
return false;
}
return true;
}
static bool assertSeedOrReseed(const struct entropy_source_t *entropy_source,
size_t expect_reseed_thread, size_t expect_reseed_global,
std::function<bool()> func, const char *error_text = "") {
struct test_tree_drbg_t cached_test_tree_drbg = {0, 0, 0, 0};
TEST_IN_FORK_ASSERT_TRUE(get_tree_drbg_call(entropy_source, &cached_test_tree_drbg))
TEST_IN_FORK_ASSERT_TRUE(func())
struct test_tree_drbg_t test_tree_drbg = {0, 0, 0, 0};
TEST_IN_FORK_ASSERT_TRUE(get_tree_drbg_call(entropy_source, &test_tree_drbg))
return veryifySeedOrReseed(&test_tree_drbg, &cached_test_tree_drbg,
expect_reseed_thread, expect_reseed_global, error_text);
}
TEST_F(treeDrbgJitterentropyTest, BasicFork) {
if (runtimeEmulationIsIntelSde() && addressSanitizerIsEnabled()) {
GTEST_SKIP() << "Test not supported under Intel SDE + ASAN";
}
auto testFuncSingleFork = [this]() {
struct entropy_source_t entropy_source = {0, 0};
uint8_t seed_out[CTR_DRBG_ENTROPY_LEN];
TEST_IN_FORK_ASSERT_TRUE(tree_jitter_initialize(&entropy_source))
TEST_IN_FORK_ASSERT_TRUE(tree_jitter_get_seed(&entropy_source, seed_out))
bool exit_code = forkAndRunTest(
[this, entropy_source]() {
// In child. If UBE detection is supported, we expect a reseed.
// No UBE detection is handled via prediction resistance.
size_t expect_reseed = 0;
if (UbeIsSupported()) {
expect_reseed = 1;
}
TEST_IN_FORK_ASSERT_TRUE(
assertSeedOrReseed(&entropy_source, expect_reseed, expect_reseed, [entropy_source]() {
uint8_t child_out[CTR_DRBG_ENTROPY_LEN];
return tree_jitter_get_seed(&entropy_source, child_out);
}, "child")
)
return true;
},
[entropy_source]() {
// In Parent we expect no reseed, even if UBE detection is not supported.
TEST_IN_FORK_ASSERT_TRUE(
assertSeedOrReseed(&entropy_source, 0, 0, [entropy_source]() {
uint8_t parent_out[CTR_DRBG_ENTROPY_LEN];
return tree_jitter_get_seed(&entropy_source, parent_out);
}, "parent")
)
return true;
}
);
tree_jitter_free_thread_drbg(&entropy_source);
exit(exit_code ? 0 : 1);
};
EXPECT_EXIT(testFuncSingleFork(), ::testing::ExitedWithCode(0), "");
auto testFuncSingleForkThenThread = [this]() {
struct entropy_source_t entropy_source = {0, 0};
uint8_t seed_out[CTR_DRBG_ENTROPY_LEN];
TEST_IN_FORK_ASSERT_TRUE(tree_jitter_initialize(&entropy_source))
TEST_IN_FORK_ASSERT_TRUE(tree_jitter_get_seed(&entropy_source, seed_out))
bool exit_code = forkAndRunTest(
[this, entropy_source]() {
// In child. Spawn a number of threads before generating randomness.
// If fork detection is supported, we expect a seed in each thread.
// If fork detection is not enabled, we also expect a seed in each
// thread. However, this seed should occur when calling
// tree_jitter_initialize.
std::function<void(bool*)> threadFunc = [](bool *result) {
struct entropy_source_t thread_entropy_source = {0, 0};
TEST_IN_FORK_ASSERT_TRUE(tree_jitter_initialize(&thread_entropy_source))
TEST_IN_FORK_ASSERT_TRUE(
assertSeedOrReseed(&thread_entropy_source, 0, 0, [thread_entropy_source]() {
uint8_t child_out[CTR_DRBG_ENTROPY_LEN];
return tree_jitter_get_seed(&thread_entropy_source, child_out);
}, "child")
)
tree_jitter_free_thread_drbg(&thread_entropy_source);
*result = true;
};
TEST_IN_FORK_ASSERT_TRUE(threadTest(number_of_threads, threadFunc))
// Now back to original thread.
size_t expect_reseed = 0;
if (UbeIsSupported()) {
expect_reseed = 1;
}
// Global would have been reseeded above.
TEST_IN_FORK_ASSERT_TRUE(
assertSeedOrReseed(&entropy_source, expect_reseed, 0, [entropy_source]() {
uint8_t child_out[CTR_DRBG_ENTROPY_LEN];
return tree_jitter_get_seed(&entropy_source, child_out);
}, "child")
)
return true;
},
[entropy_source]() {
// In Parent we expect no reseed, even if UBE detection is not supported.
TEST_IN_FORK_ASSERT_TRUE(
assertSeedOrReseed(&entropy_source, 0, 0, [entropy_source]() {
uint8_t child_out[CTR_DRBG_ENTROPY_LEN];
return tree_jitter_get_seed(&entropy_source, child_out);
}, "parent")
)
return true;
}
);
tree_jitter_free_thread_drbg(&entropy_source);
exit(exit_code ? 0 : 1);
};
EXPECT_EXIT(testFuncSingleForkThenThread(), ::testing::ExitedWithCode(0), "");
// Test reseed is observed when forking and then forking again before
// generating any randomness.
auto testFuncDoubleFork = [this]() {
struct entropy_source_t entropy_source = {0, 0};
uint8_t seed_out[CTR_DRBG_ENTROPY_LEN];
TEST_IN_FORK_ASSERT_TRUE(tree_jitter_initialize(&entropy_source))
TEST_IN_FORK_ASSERT_TRUE(tree_jitter_get_seed(&entropy_source, seed_out))
bool exit_code = forkAndRunTest(
[this, entropy_source]() {
// Fork again. In both child and parent we expect a reseed if UBE
// detection is supported.
bool exit_code_child = forkAndRunTest(
[this, entropy_source]() {
size_t expect_reseed = 0;
if (UbeIsSupported()) {
expect_reseed = 1;
}
TEST_IN_FORK_ASSERT_TRUE(
assertSeedOrReseed(&entropy_source, expect_reseed, expect_reseed, [entropy_source]() {
uint8_t child_out[CTR_DRBG_ENTROPY_LEN];
return tree_jitter_get_seed(&entropy_source, child_out);
}, "child-child")
)
return true;
},
[this, entropy_source]() {
size_t expect_reseed = 0;
if (UbeIsSupported()) {
expect_reseed = 1;
}
TEST_IN_FORK_ASSERT_TRUE(
assertSeedOrReseed(&entropy_source, expect_reseed, expect_reseed, [entropy_source]() {
uint8_t child_out[CTR_DRBG_ENTROPY_LEN];
return tree_jitter_get_seed(&entropy_source, child_out);
}, "child-parent")
)
return true;
}
);
return exit_code_child;
},
[entropy_source]() {
// In Parent we expect no reseed, even if UBE detection is not supported.
TEST_IN_FORK_ASSERT_TRUE(
assertSeedOrReseed(&entropy_source, 0, 0, [entropy_source]() {
uint8_t parent_out[CTR_DRBG_ENTROPY_LEN];
return tree_jitter_get_seed(&entropy_source, parent_out);
}, "parent")
)
return true;
}
);
tree_jitter_free_thread_drbg(&entropy_source);
exit(exit_code ? 0 : 1);
};
EXPECT_EXIT(testFuncDoubleFork(), ::testing::ExitedWithCode(0), "");
// Test reseed is observed when forking, generate randomness, and then fork
// again.
auto testFuncForkGenerateFork = [this]() {
struct entropy_source_t entropy_source = {0, 0};
uint8_t seed_out[CTR_DRBG_ENTROPY_LEN];
TEST_IN_FORK_ASSERT_TRUE(tree_jitter_initialize(&entropy_source))
TEST_IN_FORK_ASSERT_TRUE(tree_jitter_get_seed(&entropy_source, seed_out))
bool exit_code = forkAndRunTest(
[this, entropy_source]() {
size_t expect_reseed = 0;
if (UbeIsSupported()) {
expect_reseed = 1;
}
TEST_IN_FORK_ASSERT_TRUE(
assertSeedOrReseed(&entropy_source, expect_reseed, expect_reseed, [entropy_source]() {
uint8_t parent_out[CTR_DRBG_ENTROPY_LEN];
return tree_jitter_get_seed(&entropy_source, parent_out);
}, "child-parent")
)
bool exit_code_child = forkAndRunTest(
[this, entropy_source]() {
size_t expect_reseed_child = 0;
if (UbeIsSupported()) {
expect_reseed_child = 1;
}
TEST_IN_FORK_ASSERT_TRUE(
assertSeedOrReseed(&entropy_source, expect_reseed_child, expect_reseed_child, [entropy_source]() {
uint8_t parent_out[CTR_DRBG_ENTROPY_LEN];
return tree_jitter_get_seed(&entropy_source, parent_out);
}, "child-child")
)
return true;
},
[entropy_source]() {
TEST_IN_FORK_ASSERT_TRUE(
assertSeedOrReseed(&entropy_source, 0, 0, [entropy_source]() {
uint8_t parent_out[CTR_DRBG_ENTROPY_LEN];
return tree_jitter_get_seed(&entropy_source, parent_out);
}, "child-parent")
)
return true;
}
);
return exit_code_child;
},
[entropy_source]() {
TEST_IN_FORK_ASSERT_TRUE(
assertSeedOrReseed(&entropy_source, 0, 0, [entropy_source]() {
uint8_t parent_out[CTR_DRBG_ENTROPY_LEN];
return tree_jitter_get_seed(&entropy_source, parent_out);
}, "parent")
)
return true;
}
);
exit(exit_code ? 0 : 1);
};
EXPECT_EXIT(testFuncForkGenerateFork(), ::testing::ExitedWithCode(0), "");
}
#endif // !defined(OPENSSL_WINDOWS)
TEST_F(treeDrbgJitterentropyTest, TreeDRBGThreadReseedInterval) {
if (runtimeEmulationIsIntelSde() && addressSanitizerIsEnabled()) {
GTEST_SKIP() << "Test not supported under Intel SDE + ASAN";
}
// Test reseeding happens as expected
auto testFunc = []() {
struct entropy_source_t entropy_source = {0, 0};
struct test_tree_drbg_t new_test_tree_drbg = {0, 0, 0, 0};
uint8_t seed_out[CTR_DRBG_ENTROPY_LEN];
const uint64_t tree_drbg_thread_reseed_limit = TREE_JITTER_THREAD_DRBG_MAX_GENERATE;
// Similar to initialization test above.
TEST_IN_FORK_ASSERT_TRUE(tree_jitter_initialize(&entropy_source))
TEST_IN_FORK_ASSERT_TRUE(get_tree_drbg_call(&entropy_source, &new_test_tree_drbg))
TEST_IN_FORK_ASSERT_TRUE((new_test_tree_drbg.thread_reseed_calls_since_initialization == 1))
TEST_IN_FORK_ASSERT_TRUE((new_test_tree_drbg.thread_generate_calls_since_seed == 1))
TEST_IN_FORK_ASSERT_TRUE((new_test_tree_drbg.global_reseed_calls_since_initialization == 1))
TEST_IN_FORK_ASSERT_TRUE((new_test_tree_drbg.global_generate_calls_since_seed == 2))
// Must allow |tree_drbg_thread_reseed_limit| generate calls before
// reseeding. For the tree-DRBG, not having UBE detection does not trigger
// a pre-invocation reseed. Instead, prediction resistance is used. Hence,
// we do not need to cater for UBE in the logic below.
for (size_t i = 1; i <= tree_drbg_thread_reseed_limit; i++) {
TEST_IN_FORK_ASSERT_TRUE(tree_jitter_get_seed(&entropy_source, seed_out))
TEST_IN_FORK_ASSERT_TRUE(get_tree_drbg_call(&entropy_source, &new_test_tree_drbg))
TEST_IN_FORK_ASSERT_TRUE((new_test_tree_drbg.thread_reseed_calls_since_initialization == 1))
TEST_IN_FORK_ASSERT_TRUE((new_test_tree_drbg.thread_generate_calls_since_seed == (1 + i)))
TEST_IN_FORK_ASSERT_TRUE((new_test_tree_drbg.global_reseed_calls_since_initialization == 1))
TEST_IN_FORK_ASSERT_TRUE((new_test_tree_drbg.global_generate_calls_since_seed == 2))
}
// Now reseed should happen.
TEST_IN_FORK_ASSERT_TRUE(tree_jitter_get_seed(&entropy_source, seed_out))
TEST_IN_FORK_ASSERT_TRUE(get_tree_drbg_call(&entropy_source, &new_test_tree_drbg))
TEST_IN_FORK_ASSERT_TRUE((new_test_tree_drbg.thread_reseed_calls_since_initialization == 2))
TEST_IN_FORK_ASSERT_TRUE((new_test_tree_drbg.thread_generate_calls_since_seed == 2)) // Because drbg.reseed_counter is initialized to 1
TEST_IN_FORK_ASSERT_TRUE((new_test_tree_drbg.global_reseed_calls_since_initialization == 1))
TEST_IN_FORK_ASSERT_TRUE((new_test_tree_drbg.global_generate_calls_since_seed == 3))
// Try without calling zeroize thread-local tree-DRBG first.
tree_jitter_free_thread_drbg(&entropy_source);
exit(0);
};
EXPECT_EXIT(testFunc(), ::testing::ExitedWithCode(0), "");
}
#else // GTEST_HAS_DEATH_TEST
TEST(treeDrbgJitterentropyTest, SkippedALL) {
GTEST_SKIP() << "All treeDrbgJitterentropyTest tests are not supported due to Death Tests not supported on this platform";
}
#endif
#endif // !defined(DISABLE_CPU_JITTER_ENTROPY)

View File

@@ -0,0 +1,55 @@
// Copyright (c) 2015, Google Inc.
// SPDX-License-Identifier: ISC
#ifndef OPENSSL_HEADER_CRYPTO_RAND_INTERNAL_H
#define OPENSSL_HEADER_CRYPTO_RAND_INTERNAL_H
#include <openssl/aes.h>
#include <openssl/ctrdrbg.h>
#include <openssl/rand.h>
#include "../../internal.h"
#include "../modes/internal.h"
#if defined(__cplusplus)
extern "C" {
#endif
// kCtrDrbgReseedInterval is the number of generate calls made to CTR-DRBG,
// for a specific state, before reseeding.
static const uint64_t kCtrDrbgReseedInterval = 4096;
#define RAND_NO_USER_PRED_RESISTANCE 0
#define RAND_USE_USER_PRED_RESISTANCE 1
OPENSSL_EXPORT uint64_t get_private_thread_generate_calls_since_seed(void);
OPENSSL_EXPORT uint64_t get_private_thread_reseed_calls_since_initialization(void);
OPENSSL_EXPORT uint64_t get_public_thread_generate_calls_since_seed(void);
OPENSSL_EXPORT uint64_t get_public_thread_reseed_calls_since_initialization(void);
// CTR_DRBG_STATE contains the state of a CTR_DRBG based on AES-256. See SP
// 800-90Ar1.
struct ctr_drbg_state_st {
AES_KEY ks;
block128_f block;
ctr128_f ctr;
uint8_t counter[16];
uint64_t reseed_counter;
};
OPENSSL_STATIC_ASSERT((sizeof((struct ctr_drbg_state_st*)0)->reseed_counter) * 8 >= 48, value_can_overflow)
// CTR_DRBG_init initialises |*drbg| given |CTR_DRBG_ENTROPY_LEN| bytes of
// entropy in |entropy| and, optionally, a personalization string up to
// |CTR_DRBG_ENTROPY_LEN| bytes in length. It returns one on success and zero
// on error. |entropy| and |personalization| must not alias.
OPENSSL_EXPORT int CTR_DRBG_init(CTR_DRBG_STATE *drbg,
const uint8_t entropy[CTR_DRBG_ENTROPY_LEN],
const uint8_t *personalization,
size_t personalization_len);
#if defined(__cplusplus)
} // extern C
#endif
#endif // OPENSSL_HEADER_CRYPTO_RAND_INTERNAL_H

View File

@@ -0,0 +1,601 @@
// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC
#include <openssl/rand.h>
#include <openssl/mem.h>
#include <openssl/ctrdrbg.h>
#include <openssl/type_check.h>
#include "entropy/internal.h"
#include "internal.h"
#include "../../internal.h"
#include "../../ube/internal.h"
#include "../delocate.h"
#include "../service_indicator/internal.h"
// rand_thread_state contains the per-thread state for the RNG.
struct rand_thread_local_state {
// Thread-local CTR-DRBG state. UBE unique state.
CTR_DRBG_STATE drbg;
// generate_calls_since_seed is the number of generate calls made on |drbg|
// since it was last (re)seeded. Must be bounded by |kCtrDrbgReseedInterval|.
uint64_t generate_calls_since_seed;
// reseed_calls_since_initialization is the number of reseed calls made on
// |drbg| since its initialization.
// We assume 2^64 - 1 is an upper bound on the number of reseeds. Type must
// support that.
uint64_t reseed_calls_since_initialization;
// generation_number caches the UBE generation number.
uint64_t generation_number;
// Entropy source. UBE unique state.
struct entropy_source_t *entropy_source;
// Backward and forward references to nodes in a doubly-linked list.
struct rand_thread_local_state *next;
struct rand_thread_local_state *previous;
// Lock used when globally clearing (zeroising) all thread-local states at
// process exit.
CRYPTO_MUTEX state_clear_lock;
};
OPENSSL_STATIC_ASSERT((sizeof((struct rand_thread_local_state*)0)->generate_calls_since_seed) * 8 >= 48, value_can_overflow)
DEFINE_BSS_GET(struct rand_thread_local_state *, thread_states_list_head)
DEFINE_STATIC_MUTEX(thread_local_states_list_lock)
#if defined(_MSC_VER)
#pragma section(".CRT$XCU", read)
static void rand_thread_local_state_clear_all(void);
static void windows_install_rand_thread_local_state_clear_all(void) {
atexit(&rand_thread_local_state_clear_all);
}
__declspec(allocate(".CRT$XCU")) void(*rand_fips_library_destructor)(void) =
windows_install_rand_thread_local_state_clear_all;
#else
static void rand_thread_local_state_clear_all(void) __attribute__ ((destructor));
#endif
// At process exit not all threads will be scheduled and proper exited. To
// ensure no secret state is left, globally clear all thread-local states. This
// is a FIPS-derived requirement, see ISO/IEC 19790-2012 7.9.7.
//
// This is problematic because a thread might be scheduled and return
// randomness from a non-valid state. The linked application should obviously
// arrange that all threads are gracefully exited before exiting the process.
// Yet, in cases where such graceful exit does not happen we ensure that no
// output can be returned by locking all thread-local states and deliberately
// not releasing the lock. A synchronization step in the core randomness
// generation routine |RAND_bytes_core| then ensures that no randomness
// generation can occur after a thread-local state has been locked. It also
// ensures |rand_thread_local_state_free| cannot free any thread state while we
// own the lock.
//
// When a thread-local DRBGs is gated from returning output, we can invoke the
// entropy source zeroization from |state->entropy_source|. The entropy source
// implementation can assume that any returned seed is never used to generate
// any randomness that is later returned to a consumer.
static void rand_thread_local_state_clear_all(void) {
CRYPTO_STATIC_MUTEX_lock_write(thread_local_states_list_lock_bss_get());
for (struct rand_thread_local_state *state = *thread_states_list_head_bss_get();
state != NULL; state = state->next) {
CRYPTO_MUTEX_lock_write(&state->state_clear_lock);
CTR_DRBG_clear(&state->drbg);
}
for (struct rand_thread_local_state *state = *thread_states_list_head_bss_get();
state != NULL; state = state->next) {
state->entropy_source->methods->zeroize_thread(state->entropy_source);
}
}
static void thread_local_list_delete_node(
struct rand_thread_local_state *node_delete) {
// Mutating the global linked list. Need to synchronize over all threads.
CRYPTO_STATIC_MUTEX_lock_write(thread_local_states_list_lock_bss_get());
struct rand_thread_local_state *node_head = *thread_states_list_head_bss_get();
// We have [node_delete->previous] <--> [node_delete] <--> [node_delete->next]
// and must end up with [node_delete->previous] <--> [node_delete->next]
if (node_head == node_delete) {
// If node node_delete is the head, we know that the backwards reference
// does not exist but we need to update the head pointer.
*thread_states_list_head_bss_get() = node_delete->next;
} else {
// On the other hand, if node_delete is not the head, then we need to update
// the node node_delete->previous to point to the node node_delete->next.
// But only if node_delete->previous actually exists.
if (node_delete->previous != NULL) {
(node_delete->previous)->next = node_delete->next;
}
}
// Now [node_delete->previous] --> [node_delete->next]
// |
// [node_delete] <--|
// Final thing to do is to update the backwards reference for the node
// node_delete->next, if it exists.
if (node_delete->next != NULL) {
// If node_delete is the head, then node_delete->previous is NULL. But that
// is OK because node_delete->next is the new head and should therefore have
// a backwards reference that is NULL.
(node_delete->next)->previous = node_delete->previous;
}
CRYPTO_STATIC_MUTEX_unlock_write(thread_local_states_list_lock_bss_get());
}
// thread_local_list_add adds the state |node_add| to the linked list. Note that
// |node_add| is not added at the tail of the linked list, but is replacing the
// current head to keep the add operation at low time-complexity.
static void thread_local_list_add_node(
struct rand_thread_local_state *node_add) {
// node_add will be the new head and will not have a backwards reference.
node_add->previous = NULL;
// Mutating the global linked list. Need to synchronize over all threads.
CRYPTO_STATIC_MUTEX_lock_write(thread_local_states_list_lock_bss_get());
// First get a reference to the pointer of the head of the linked list.
// That is, the pointer to the head node node_head is *thread_states_head.
struct rand_thread_local_state **thread_states_head = thread_states_list_head_bss_get();
// We have [node_head] <--> [node_head->next] and must end up with
// [node_add] <--> [node_head] <--> [node_head->next]
// First make the forward reference
node_add->next = *thread_states_head;
// Only add a backwards reference if a head already existed (this might be
// the first add).
if (*thread_states_head != NULL) {
(*thread_states_head)->previous = node_add;
}
// The last thing is to assign the new head.
*thread_states_head = node_add;
CRYPTO_STATIC_MUTEX_unlock_write(thread_local_states_list_lock_bss_get());
}
// rand_thread_local_state frees a |rand_thread_local_state|. This is called
// when a thread exits.
static void rand_thread_local_state_free(void *state_in) {
struct rand_thread_local_state *state = state_in;
if (state_in == NULL) {
return;
}
thread_local_list_delete_node(state);
// Potentially, something could kill the thread before an entropy source has
// been associated to the thread-local randomness generator object.
if (state->entropy_source != NULL) {
state->entropy_source->methods->free_thread(state->entropy_source);
}
OPENSSL_free(state->entropy_source);
OPENSSL_free(state);
}
// rand_ensure_valid_state determines whether |state| is in a valid state. The
// reasons are documented with inline comments in the function.
//
// Returns 1 if |state| is in a valid state and 0 otherwise.
static int rand_ensure_valid_state(const struct rand_thread_local_state *state) {
// Currently, the Go based test runner cannot execute a unit test stanza with
// guaranteed sequential execution. VM UBE testing is using a global file
// that all unit tests will read if ever taking a path to |RAND_bytes|. The
// validation below will have a high likelihood of triggering. Disable the
// validation for VM UBE testing, until Go test runner can guarantee
// sequential execution.
#if !defined(AWSLC_VM_UBE_TESTING)
// We do not allow the UBE generation number to change while executing AWS-LC
// randomness generation code e.g. while |RAND_bytes| executes. One way to hit
// this error is if snapshotting the address space while executing
// |RAND_bytes| and while VM UBE is active.
uint64_t current_generation_number = 0;
if (CRYPTO_get_ube_generation_number(&current_generation_number) == 1 &&
current_generation_number != state->generation_number) {
return 0;
}
#endif
return 1;
}
// rand_check_ctr_drbg_uniqueness computes whether |state| must be randomized
// to ensure uniqueness.
//
// Note: If |rand_check_ctr_drbg_uniqueness| returns 0 it does not necessarily
// imply that an UBE occurred. It can also mean that no UBE detection is
// supported or that UBE detection failed. In these cases, |state| must also be
// randomized to ensure uniqueness. Any special future cases can be handled in
// this function.
//
// Return 0 if |state| must be randomized. 1 otherwise.
static int rand_check_ctr_drbg_uniqueness(struct rand_thread_local_state *state) {
uint64_t current_generation_number = 0;
if (CRYPTO_get_ube_generation_number(&current_generation_number) != 1) {
return 0;
}
if (current_generation_number != state->generation_number) {
state->generation_number = current_generation_number;
return 0;
}
return 1;
}
// rand_maybe_get_ctr_drbg_pred_resistance maybe fills |pred_resistance| with
// |RAND_PRED_RESISTANCE_LEN| bytes. The bytes are sourced from the prediction
// resistance source from the entropy source in |state|, if such a source has
// been configured.
//
// |*pred_resistance_len| is set to 0 if no prediction resistance source is
// available and |RAND_PRED_RESISTANCE_LEN| otherwise.
static void rand_maybe_get_ctr_drbg_pred_resistance(
const struct entropy_source_t *entropy_source,
uint8_t pred_resistance[RAND_PRED_RESISTANCE_LEN],
size_t *pred_resistance_len) {
GUARD_PTR_ABORT(entropy_source);
GUARD_PTR_ABORT(pred_resistance_len);
*pred_resistance_len = 0;
if (entropy_source->methods->get_prediction_resistance != NULL) {
if (entropy_source->methods->get_prediction_resistance(
entropy_source, pred_resistance) != 1) {
abort();
}
*pred_resistance_len = RAND_PRED_RESISTANCE_LEN;
}
}
// rand_get_ctr_drbg_seed_entropy source entropy for seeding and reseeding the
// CTR-DRBG state. Firstly, |seed| is filled with |CTR_DRBG_ENTROPY_LEN| bytes
// from the seed source configured in |entropy_source|. Secondly, if available,
// |CTR_DRBG_ENTROPY_LEN| bytes is filled into |extra_entropy| sourced
// from the extra entropy source configured in |entropy_source|.
//
// |*extra_entropy_len| is set to 0 if no extra entropy source
// is available and |CTR_DRBG_ENTROPY_LEN| otherwise.
static void rand_get_ctr_drbg_seed_entropy(
const struct entropy_source_t *entropy_source,
uint8_t seed[CTR_DRBG_ENTROPY_LEN],
uint8_t extra_entropy[CTR_DRBG_ENTROPY_LEN],
size_t *extra_entropy_len) {
GUARD_PTR_ABORT(entropy_source);
GUARD_PTR_ABORT(extra_entropy_len);
*extra_entropy_len = 0;
// If the seed source is missing it is impossible to source any entropy.
if (entropy_source->methods->get_seed(entropy_source, seed) != 1) {
abort();
}
// Not all entropy source configurations will have a personalization string
// source. Hence, it's optional. But use it if configured.
if (entropy_source->methods->get_extra_entropy != NULL) {
if(entropy_source->methods->get_extra_entropy(
entropy_source, extra_entropy) != 1) {
abort();
}
*extra_entropy_len = CTR_DRBG_ENTROPY_LEN;
}
}
// rand_ctr_drbg_reseed reseeds the CTR-DRBG state in |state|.
static void rand_ctr_drbg_reseed(struct rand_thread_local_state *state,
const uint8_t seed[CTR_DRBG_ENTROPY_LEN],
const uint8_t additional_data[CTR_DRBG_ENTROPY_LEN],
size_t additional_data_len) {
GUARD_PTR_ABORT(state);
if (CTR_DRBG_reseed(&(state->drbg), seed, additional_data,
additional_data_len) != 1) {
abort();
}
state->reseed_calls_since_initialization++;
state->generate_calls_since_seed = 0;
}
// rand_state_initialize initializes the thread-local state |state|. In
// particular initializes the CTR-DRBG state with the initial seed material.
static void rand_state_initialize(struct rand_thread_local_state *state) {
GUARD_PTR_ABORT(state);
state->entropy_source = get_entropy_source();
if (state->entropy_source == NULL) {
abort();
}
uint8_t seed[CTR_DRBG_ENTROPY_LEN];
uint8_t personalization_string[CTR_DRBG_ENTROPY_LEN];
size_t personalization_string_len = 0;
rand_get_ctr_drbg_seed_entropy(state->entropy_source, seed,
personalization_string, &personalization_string_len);
assert(personalization_string_len == 0 ||
personalization_string_len == CTR_DRBG_ENTROPY_LEN);
if (!CTR_DRBG_init(&(state->drbg), seed, personalization_string,
personalization_string_len)) {
abort();
}
state->reseed_calls_since_initialization = 0;
state->generate_calls_since_seed = 0;
uint64_t current_generation_number = 0;
if (CRYPTO_get_ube_generation_number(&current_generation_number) != 1) {
state->generation_number = 0;
} else {
state->generation_number = current_generation_number;
}
CRYPTO_MUTEX_init(&state->state_clear_lock);
OPENSSL_cleanse(seed, CTR_DRBG_ENTROPY_LEN);
OPENSSL_cleanse(personalization_string, CTR_DRBG_ENTROPY_LEN);
}
// RAND_bytes_core generates |out_len| bytes of randomness and puts them in
// |out|. The CTR-DRBG state in |state| is managed to ensure uniqueness and
// usage requirements are met.
//
// The argument |use_user_pred_resistance| must be either
// |RAND_USE_USER_PRED_RESISTANCE| or |RAND_NO_USER_PRED_RESISTANCE|. The former
// cause the content of |user_pred_resistance| to be mixed in as prediction
// resistance. The latter ensures that |user_pred_resistance| is not used.
//
// If the state has just been initialized, then |ctr_drbg_state_is_fresh| is 1.
// Otherwise, 0.
static void rand_bytes_core(
struct rand_thread_local_state *state,
uint8_t *out, size_t out_len,
const uint8_t user_pred_resistance[RAND_PRED_RESISTANCE_LEN],
int use_user_pred_resistance, int ctr_drbg_state_is_fresh) {
GUARD_PTR_ABORT(state);
GUARD_PTR_ABORT(out);
// must_reseed_before_generate is 1 if we must reseed before invoking the
// CTR-DRBG generate function CTR_DRBG_generate().
int must_reseed_before_generate = 0;
// Ensure that the CTR-DRBG state is unique. If the state is fresh then
// uniqueness is guaranteed.
if (rand_check_ctr_drbg_uniqueness(state) != 1 &&
ctr_drbg_state_is_fresh != 1) {
must_reseed_before_generate = 1;
}
// If a prediction resistance source is available, use it.
// Prediction resistance is only used on first invocation of the CTR-DRBG,
// ensuring that its state is randomized before generating output.
size_t first_pred_resistance_len = 0;
uint8_t pred_resistance[RAND_PRED_RESISTANCE_LEN] = {0};
rand_maybe_get_ctr_drbg_pred_resistance(state->entropy_source,
pred_resistance, &first_pred_resistance_len);
// If caller input user-controlled prediction resistance, use it.
if (use_user_pred_resistance == RAND_USE_USER_PRED_RESISTANCE) {
for (size_t i = 0; i < RAND_PRED_RESISTANCE_LEN; i++) {
pred_resistance[i] ^= user_pred_resistance[i];
}
first_pred_resistance_len = RAND_PRED_RESISTANCE_LEN;
}
assert(first_pred_resistance_len == 0 ||
first_pred_resistance_len == RAND_PRED_RESISTANCE_LEN);
// Synchronize with |rand_thread_local_state_clear_all|. In case a
// thread-local state has been zeroized, thread execution will block here
// because there is no secure way to generate randomness from that state.
// Note that this lock is thread-local and therefore not contended except at
// process exit.
CRYPTO_MUTEX_lock_read(&state->state_clear_lock);
// Iterate CTR-DRBG generate until |out_len| bytes of randomness have been
// generated. CTR_DRBG_generate can maximally generate
// |CTR_DRBG_MAX_GENERATE_LENGTH| bytes per usage of its state see
// SP800-90A Rev 1 Table 3. If user requests more, we must generate output in
// chunks and concatenate.
while (out_len > 0) {
size_t todo = out_len;
if (todo > CTR_DRBG_MAX_GENERATE_LENGTH) {
todo = CTR_DRBG_MAX_GENERATE_LENGTH;
}
if (must_reseed_before_generate == 1 ||
(state->generate_calls_since_seed + 1) > kCtrDrbgReseedInterval) {
// An unlock-lock cycle is located here to not acquire any locks while we
// might perform system calls (e.g. when sourcing OS entropy). This
// shields against known bugs. For example, glibc can implement locks
// using memory transactions on powerpc that has been observed to break
// when reaching |getrandom| through |syscall|. For this, see
// https://github.com/google/boringssl/commit/17ce286e0792fc2855fb7e34a968bed17ae914af
// https://www.kernel.org/doc/Documentation/powerpc/transactional_memory.txt
//
// Even though the unlock-lock cycle is under the loop iteration,
// practically a request size (i.e. the value of |out_len|), will
// almost-always be strictly less than |CTR_DRBG_MAX_GENERATE_LENGTH|.
// Hence, practically, only one lock-unlock rotation will be required.
CRYPTO_MUTEX_unlock_read(&state->state_clear_lock);
uint8_t seed[CTR_DRBG_ENTROPY_LEN];
uint8_t additional_data[CTR_DRBG_ENTROPY_LEN];
size_t additional_data_len = 0;
rand_get_ctr_drbg_seed_entropy(state->entropy_source, seed,
additional_data, &additional_data_len);
CRYPTO_MUTEX_lock_read(&state->state_clear_lock);
rand_ctr_drbg_reseed(state, seed, additional_data,
additional_data_len);
must_reseed_before_generate = 0;
OPENSSL_cleanse(seed, CTR_DRBG_ENTROPY_LEN);
OPENSSL_cleanse(additional_data, CTR_DRBG_ENTROPY_LEN);
}
if (!CTR_DRBG_generate(&(state->drbg), out, todo, pred_resistance,
first_pred_resistance_len)) {
abort();
}
out += todo;
out_len -= todo;
state->generate_calls_since_seed++;
first_pred_resistance_len = 0;
}
OPENSSL_cleanse(pred_resistance, RAND_PRED_RESISTANCE_LEN);
if (rand_ensure_valid_state(state) != 1) {
abort();
}
CRYPTO_MUTEX_unlock_read(&state->state_clear_lock);
}
static void rand_bytes_impl(thread_local_data_t tls_key, uint8_t *out,
size_t out_len, const uint8_t user_pred_resistance[RAND_PRED_RESISTANCE_LEN],
int use_user_pred_resistance) {
if (out_len == 0) {
return;
}
// Lock state here because CTR-DRBG-generate can be invoked multiple times
// and every successful invocation increments the service indicator.
FIPS_service_indicator_lock_state();
struct rand_thread_local_state *state =
CRYPTO_get_thread_local(tls_key);
int ctr_drbg_state_is_fresh = 0;
if (state == NULL) {
state = OPENSSL_zalloc(sizeof(struct rand_thread_local_state));
if (state == NULL ||
CRYPTO_set_thread_local(tls_key, state,
rand_thread_local_state_free) != 1) {
abort();
}
rand_state_initialize(state);
thread_local_list_add_node(state);
ctr_drbg_state_is_fresh = 1;
}
rand_bytes_core(state, out, out_len, user_pred_resistance,
use_user_pred_resistance, ctr_drbg_state_is_fresh);
FIPS_service_indicator_unlock_state();
FIPS_service_indicator_update_state();
}
int RAND_bytes_with_user_prediction_resistance(uint8_t *out, size_t out_len,
const uint8_t user_pred_resistance[RAND_PRED_RESISTANCE_LEN]) {
GUARD_PTR_ABORT(user_pred_resistance);
rand_bytes_impl(OPENSSL_THREAD_LOCAL_PRIVATE_RAND, out, out_len,
user_pred_resistance, RAND_USE_USER_PRED_RESISTANCE);
return 1;
}
int RAND_bytes(uint8_t *out, size_t out_len) {
static const uint8_t kZeroPredResistance[RAND_PRED_RESISTANCE_LEN] = {0};
rand_bytes_impl(OPENSSL_THREAD_LOCAL_PRIVATE_RAND, out, out_len,
kZeroPredResistance, RAND_NO_USER_PRED_RESISTANCE);
return 1;
}
int RAND_priv_bytes(uint8_t *out, size_t out_len) {
return RAND_bytes(out, out_len);
}
int RAND_public_bytes(uint8_t *out, size_t out_len) {
static const uint8_t kZeroPredResistance[RAND_PRED_RESISTANCE_LEN] = {0};
rand_bytes_impl(OPENSSL_THREAD_LOCAL_PUBLIC_RAND, out, out_len,
kZeroPredResistance, RAND_NO_USER_PRED_RESISTANCE);
return 1;
}
int RAND_pseudo_bytes(uint8_t *out, size_t out_len) {
return RAND_bytes(out, out_len);
}
// Returns the number of generate calls made on the private thread-local state
// since last seed/reseed. Returns 0 if private thread-local state has not been
// initialized.
uint64_t get_private_thread_generate_calls_since_seed(void) {
struct rand_thread_local_state *state =
CRYPTO_get_thread_local(OPENSSL_THREAD_LOCAL_PRIVATE_RAND);
if (state == NULL) {
return 0;
}
return state->generate_calls_since_seed;
}
// Returns the number of reseed calls made on the private thread-local state
// since initialization. Returns 0 if private thread-local state has not been
// initialized.
uint64_t get_private_thread_reseed_calls_since_initialization(void) {
struct rand_thread_local_state *state =
CRYPTO_get_thread_local(OPENSSL_THREAD_LOCAL_PRIVATE_RAND);
if (state == NULL) {
return 0;
}
return state->reseed_calls_since_initialization;
}
// Returns the number of generate calls made on the public thread-local state
// since last seed/reseed. Returns 0 if public thread-local state has not been
// initialized.
uint64_t get_public_thread_generate_calls_since_seed(void) {
struct rand_thread_local_state *state =
CRYPTO_get_thread_local(OPENSSL_THREAD_LOCAL_PUBLIC_RAND);
if (state == NULL) {
return 0;
}
return state->generate_calls_since_seed;
}
// Returns the number of reseed calls made on the public thread-local state
// since initialization. Returns 0 if public thread-local state has not been
// initialized.
uint64_t get_public_thread_reseed_calls_since_initialization(void) {
struct rand_thread_local_state *state =
CRYPTO_get_thread_local(OPENSSL_THREAD_LOCAL_PUBLIC_RAND);
if (state == NULL) {
return 0;
}
return state->reseed_calls_since_initialization;
}

View File

@@ -0,0 +1,840 @@
// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC
#include <gtest/gtest.h>
#include <openssl/ctrdrbg.h>
#include <openssl/mem.h>
#include <openssl/rand.h>
#include "entropy/internal.h"
#include "internal.h"
#include "../../ube/internal.h"
#include "../../test/ube_test.h"
#include "../../test/test_util.h"
#include <array>
#if defined(GTEST_HAS_DEATH_TEST)
static const size_t request_len = 64;
static const size_t number_of_threads = 8;
static thread_local size_t initialize_count = 0;
static thread_local size_t zeroize_thread_count = 0;
static thread_local size_t free_thread_count = 0;
static thread_local size_t get_seed_count = 0;
static thread_local size_t get_extra_entropy_count = 0;
static thread_local size_t get_prediction_resistance_count = 0;
static thread_local size_t cached_get_seed_count = 0;
static thread_local size_t cached_get_extra_entropy_count = 0;
static entropy_source_methods entropy_methods{
nullptr, // initialize
nullptr, // zeroize_thread
nullptr, // free_thread
nullptr, // get_seed
nullptr, // get_extra_entropy
nullptr, // get_prediction_resistance
OVERRIDDEN_ENTROPY_SOURCE
};
static int overrideInitialize(struct entropy_source_t *entropy_source) {
initialize_count++;
return 1;
}
static void overrideZeroizeThread(struct entropy_source_t *entropy_source) {
zeroize_thread_count++;
}
static void overrideFreeThread(struct entropy_source_t *entropy_source) {
free_thread_count++;
}
static int overrideGetSeed(const struct entropy_source_t *entropy_source,
uint8_t seed[CTR_DRBG_ENTROPY_LEN]) {
get_seed_count++;
return 1;
}
static int overrideGetExtraEntropy(const struct entropy_source_t *entropy_source,
uint8_t seed[CTR_DRBG_ENTROPY_LEN]) {
get_extra_entropy_count++;
return 1;
}
static int overrideGetPredictionResistance(
const struct entropy_source_t *entropy_source,
uint8_t seed[RAND_PRED_RESISTANCE_LEN]) {
get_prediction_resistance_count++;
return 1;
}
static bool assertSeedOrReseed(size_t expected_count, std::function<bool()> func,
const char *error_text = "") {
cached_get_seed_count = get_seed_count;
cached_get_extra_entropy_count = get_extra_entropy_count;
if (!func()) {
return false;
}
if (cached_get_seed_count + expected_count != get_seed_count ||
cached_get_extra_entropy_count + expected_count != get_extra_entropy_count) {
std::cerr << "Entropy source method expected count mismatch " << error_text << '\n'
<< " Get seed count: expected=" << (cached_get_seed_count + expected_count)
<< ", actual=" << get_seed_count << '\n'
<< " Get extra entropy count: expected=" << (cached_get_extra_entropy_count + expected_count)
<< ", actual=" << get_extra_entropy_count << '\n';
return false;
}
return true;
}
static void overrideEntropySourceMethodsCount() {
entropy_methods = {
&overrideInitialize,
&overrideZeroizeThread,
&overrideFreeThread,
&overrideGetSeed,
&overrideGetExtraEntropy,
&overrideGetPredictionResistance,
OVERRIDDEN_ENTROPY_SOURCE
};
override_entropy_source_method_FOR_TESTING(&entropy_methods);
}
class randIsolatedTest : public::testing::Test {
private:
UbeBase ube_base_;
protected:
void SetUp() override {
ube_base_.SetUp();
}
void TearDown() override {
ube_base_.TearDown();
}
bool UbeIsSupported() const {
return ube_base_.UbeIsSupported();
}
void allowMockedUbe() const {
ube_base_.allowMockedUbe();
}
};
static bool generateRandomness(size_t req_Len, const char *error_text = "") {
std::vector<uint8_t> output_rand(req_Len);
if (!RAND_bytes(output_rand.data(), output_rand.size())) {
std::cerr << "Generating randomness failed " << error_text << '\n';
return false;
}
return true;
}
TEST_F(randIsolatedTest, BasicThread) {
// Test reseeds are observed when spawning new threads.
auto testFunc = [this]() {
// Setup entropy source method override.
overrideEntropySourceMethodsCount();
generateRandomness(request_len);
std::function<void(bool*)> threadFunc = [this](bool *result) {
// In a fresh thread, we expect a seed.
bool test1 = assertSeedOrReseed(1, []() {
return generateRandomness(request_len);
});
// If UBE detection is not supported, we expect a reseed again. Otherwise,
// no reseed is expected.
size_t expect_reseed = 1;
if (UbeIsSupported()) {
expect_reseed = 0;
}
bool test2 = assertSeedOrReseed(expect_reseed, []() {
return generateRandomness(request_len);
});
*result = test1 && test2;
};
bool exit_code = threadTest(number_of_threads, threadFunc);
exit(exit_code ? 0 : 1);
};
EXPECT_EXIT(testFunc(), ::testing::ExitedWithCode(0), "");
}
#if !defined(OPENSSL_WINDOWS)
TEST_F(randIsolatedTest, BasicFork) {
if (runtimeEmulationIsIntelSde() && addressSanitizerIsEnabled()) {
GTEST_SKIP() << "Test not supported under Intel SDE + ASAN";
}
// Test reseed is observed when forking.
auto testFuncSingleFork = [this]() {
// Setup entropy source method override
overrideEntropySourceMethodsCount();
generateRandomness(request_len);
bool exit_code = forkAndRunTest(
[]() {
// In child. If fork detection is supported, we expect a reseed.
// If fork detection is not enabled, we also expect a reseed.
return assertSeedOrReseed(1, []() {
return generateRandomness(request_len, "child");
}, "child");
},
[this]() {
// In parent. If UBE detection is not supported, we expect a reseed
// again. Otherwise, no reseed is expected.
size_t expect_reseed = 1;
if (UbeIsSupported()) {
expect_reseed = 0;
}
return assertSeedOrReseed(expect_reseed, []() {
return generateRandomness(request_len, "parent");
}, "parent");
}
);
exit(exit_code ? 0 : 1);
};
EXPECT_EXIT(testFuncSingleFork(), ::testing::ExitedWithCode(0), "");
// Test reseed is observed when forking and spawning new threads before
// generating randomness.
auto testFuncSingleForkThenThread = [this]() {
// Setup entropy source method override
overrideEntropySourceMethodsCount();
generateRandomness(request_len);
bool exit_code = forkAndRunTest(
[]() {
// In child. Spawn a number of threads before generating randomness.
// If fork detection is supported, we expect a seed in each thread.
// If fork detection is not enabled, we also expect a seed in each
// thread.
std::function<void(bool*)> threadFunc = [](bool *result) {
*result = assertSeedOrReseed(1, []() {
return generateRandomness(request_len, "child");
}, "child");
};
return threadTest(number_of_threads, threadFunc);
},
[this]() {
// In parent. If UBE detection is not supported, we expect a reseed
// again. Otherwise, no reseed is expected.
size_t expect_reseed = 1;
if (UbeIsSupported()) {
expect_reseed = 0;
}
return assertSeedOrReseed(expect_reseed, []() {
return generateRandomness(request_len, "parent");
}, "parent");
}
);
exit(exit_code ? 0 : 1);
};
EXPECT_EXIT(testFuncSingleForkThenThread(), ::testing::ExitedWithCode(0), "");
// Test reseed is observed when forking and then forking again before
// generating any randomness.
auto testFuncDoubleFork = [this]() {
// Setup entropy source method override
overrideEntropySourceMethodsCount();
generateRandomness(request_len);
bool exit_code = forkAndRunTest(
[]() {
// In child. Fork again before generating randomness.
bool exit_code_child = forkAndRunTest(
[]() {
// In child-child. If fork detection is supported, we expect a
// reseed. If fork detection is not enabled, we also expect a reseed.
return assertSeedOrReseed(1, []() {
return generateRandomness(request_len, "child-child");
}, "child-child");
},
[]() {
// In a forked process, should expect a reseed no matter what.
return assertSeedOrReseed(1, []() {
return generateRandomness(request_len, "child-parent");
}, "child-parent");
}
);
return exit_code_child;
},
[this]() {
// In parent. If UBE detection is not supported, we expect a reseed
// again. Otherwise, no reseed is expected.
size_t expect_reseed = 1;
if (UbeIsSupported()) {
expect_reseed = 0;
}
return assertSeedOrReseed(expect_reseed, []() {
return generateRandomness(request_len, "parent");
}, "parent");
}
);
exit(exit_code ? 0 : 1);
};
EXPECT_EXIT(testFuncDoubleFork(), ::testing::ExitedWithCode(0), "");
// Test reseed is observed when forking, generate randomness, and then fork
// again.
auto testFuncForkGenerateFork = [this]() {
// Setup entropy source method override
overrideEntropySourceMethodsCount();
generateRandomness(request_len);
bool exit_code = forkAndRunTest(
[this]() {
// In a forked process, should expect a reseed no matter what.
return assertSeedOrReseed(1, []() {
return generateRandomness(request_len, "child-parent");
}, "child-parent");
bool exit_code_child = forkAndRunTest(
[]() {
// In a forked process, should expect a reseed no matter what.
return assertSeedOrReseed(1, []() {
return generateRandomness(request_len, "child-parent");
}, "child-parent");
},
[this]() {
// In parent. If UBE detection is not supported, we expect a reseed
// again. Otherwise, no reseed is expected.
size_t expect_reseed = 1;
if (UbeIsSupported()) {
expect_reseed = 0;
}
return assertSeedOrReseed(expect_reseed, []() {
return generateRandomness(request_len, "parent");
}, "parent");
}
);
return exit_code_child;
},
[this]() {
// In parent. If UBE detection is not supported, we expect a reseed
// again. Otherwise, no reseed is expected.
size_t expect_reseed = 1;
if (UbeIsSupported()) {
expect_reseed = 0;
}
return assertSeedOrReseed(expect_reseed, []() {
return generateRandomness(request_len, "parent");
}, "parent");
}
);
exit(exit_code ? 0 : 1);
};
EXPECT_EXIT(testFuncForkGenerateFork(), ::testing::ExitedWithCode(0), "");
}
#endif
TEST_F(randIsolatedTest, BasicInitialization) {
if (runtimeEmulationIsIntelSde() && addressSanitizerIsEnabled()) {
GTEST_SKIP() << "Test not supported under Intel SDE + ASAN";
}
// Test only one seed occurs on initialization.
auto testFunc = [this]() {
// Setup entropy source method override.
overrideEntropySourceMethodsCount();
bool test1 = assertSeedOrReseed(1, []() {
return generateRandomness(request_len);
});
// In parent. If UBE detection is not supported, we expect a reseed on
// second invocation. Otherwise, no reseed is expected.
size_t expect_reseed = 1;
if (UbeIsSupported()) {
expect_reseed = 0;
}
bool test2 = assertSeedOrReseed(expect_reseed, []() {
return generateRandomness(request_len);
});
exit((test1 && test2) ? 0 : 1);
};
EXPECT_EXIT(testFunc(), ::testing::ExitedWithCode(0), "");
}
// Test vectors for randomness generation KATs.
// The tests below proves that we can predict the input to the underlying
// CTR-DRBG implementation and that the result is what we expect. Test vectors
// and expected results are computed from an independent implementation. See
// original PR for source code.
namespace RngKatTestUtils {
enum class TestType {
NoReseedNoPr,
NoReseedAndPr,
NoReseedAndPrAndUserPr,
NoReseedNoPrAndUserPr,
WithReseedNoPr,
WithReseedAndPr,
WithReseedAndPrAndUserPr,
WithReseedNoPrAndUserPr
};
enum class GenerationState {
First,
Second
};
// Global state for test generation
thread_local GenerationState current_generation_state = GenerationState::First;
class RngKatTestData {
public:
static const size_t KAT_GENERATE_REQ_LEN = 64;
struct TestVectors {
std::array<uint8_t, CTR_DRBG_ENTROPY_LEN> seed;
std::array<uint8_t, CTR_DRBG_ENTROPY_LEN> seedPersonalization;
std::array<uint8_t, CTR_DRBG_ENTROPY_LEN> reseed;
std::array<uint8_t, CTR_DRBG_ENTROPY_LEN> reseedPersonalization;
std::array<uint8_t, RAND_PRED_RESISTANCE_LEN> predictionResistance1;
std::array<uint8_t, RAND_PRED_RESISTANCE_LEN> predictionResistance2;
std::array<uint8_t, RAND_PRED_RESISTANCE_LEN> userPredictionResistance1;
std::array<uint8_t, RAND_PRED_RESISTANCE_LEN> userPredictionResistance2;
};
struct ExpectedOutput {
std::vector<uint8_t> output1;
std::vector<uint8_t> output2;
};
static const TestVectors& getTestVectors() {
static const TestVectors vectors = {
// seed
{{0x22, 0xa8, 0x9e, 0xe0, 0xe3, 0x7b, 0x54, 0xea, 0x63, 0x68, 0x63, 0xd9,
0xfe, 0xd1, 0x08, 0x21, 0xf1, 0x95, 0x2a, 0x42, 0x84, 0x88, 0xd5, 0x28,
0xec, 0xeb, 0x9d, 0x2e, 0xc6, 0x9d, 0x57, 0x3e, 0xc6, 0x21, 0x62, 0x16,
0xfb, 0x3e, 0x8f, 0x72, 0xa1, 0x48, 0xa5, 0xad, 0xa9, 0xd6, 0x20, 0xb1}},
// seedPersonalization
{{0x95, 0x3c, 0x10, 0xba, 0xdc, 0xbc, 0xd4, 0x5f, 0xb4, 0xe5, 0x47, 0x58,
0x26, 0x47, 0x7f, 0xc1, 0x37, 0xac, 0x96, 0xa4, 0x9a, 0xd5, 0x00, 0x5f,
0xb1, 0x4b, 0xda, 0xf6, 0x46, 0x8a, 0xe7, 0xf4, 0x6c, 0x5d, 0x0d, 0xe2,
0x2d, 0x30, 0x4a, 0xfc, 0x67, 0x98, 0x96, 0x15, 0xad, 0xc2, 0xe9, 0x83}},
// reseed
{{0x8f, 0x84, 0x7a, 0x6f, 0x65, 0x5a, 0x50, 0x45, 0x3b, 0x30, 0x26, 0x1b,
0x11, 0x06, 0xfc, 0xf1, 0xe7, 0xdc, 0xd2, 0xc7, 0xbd, 0xb2, 0xa8, 0x9d,
0x93, 0x88, 0x7e, 0x73, 0x69, 0x5e, 0x54, 0x49, 0x3f, 0x34, 0x2a, 0x1f,
0x15, 0x0a, 0x00, 0xf5, 0xeb, 0xe0, 0xd6, 0xcb, 0xc1, 0xb6, 0xac, 0xa1}},
// reseedPersonalization
{{0xc7, 0xc0, 0xb9, 0xb2, 0xab, 0xa4, 0x9d, 0x96, 0x8f, 0x88, 0x81, 0x7a,
0x73, 0x6c, 0x65, 0x5e, 0x57, 0x50, 0x49, 0x42, 0x3b, 0x34, 0x2d, 0x26,
0x1f, 0x18, 0x11, 0x0a, 0x03, 0xfc, 0xf5, 0xee, 0xe7, 0xe0, 0xd9, 0xd2,
0xcb, 0xc4, 0xbd, 0xb6, 0xaf, 0xa8, 0xa1, 0x9a, 0x93, 0x8c, 0x85, 0x7e}},
// predictionResistance1
{{0x9d, 0x95, 0x8d, 0x85, 0x7d, 0x75, 0x6d, 0x65, 0x5d, 0x55, 0x4d, 0x45,
0x3d, 0x35, 0x2d, 0x25, 0x1d, 0x15, 0x0d, 0x05, 0xfd, 0xf5, 0xed, 0xe5,
0xdd, 0xd5, 0xcd, 0xc5, 0xbd, 0xb5, 0xad, 0xa5}},
// predictionResistance2
{{0xb5, 0xad, 0xa5, 0x9d, 0x95, 0x8d, 0x85, 0x7d, 0x75, 0x6d, 0x65, 0x5d,
0x55, 0x4d, 0x45, 0x3d, 0x35, 0x2d, 0x25, 0x1d, 0x15, 0x0d, 0x05, 0xfd,
0xf5, 0xed, 0xe5, 0xdd, 0xd5, 0xcd, 0xc5, 0xbd}},
// userPredictionResistance1
{{0x7b, 0x93, 0x45, 0xf2, 0x8d, 0x1c, 0xa4, 0xe6, 0x2f, 0xb8, 0x5d, 0x91,
0x3c, 0x6a, 0xd4, 0x87, 0x15, 0xc9, 0x4e, 0xb2, 0x7f, 0x38, 0x96, 0x5a,
0xd1, 0x4c, 0x83, 0x2b, 0xe5, 0x9f, 0x67, 0xa0}},
// userPredictionResistance2
{{0xe4, 0x2d, 0x9b, 0x56, 0xf8, 0x3a, 0xc1, 0x7d, 0x95, 0x42, 0xb6, 0x8f,
0x1e, 0xd3, 0x69, 0xa4, 0x5b, 0xf1, 0x87, 0x2c, 0xd5, 0x9e, 0x43, 0xb8,
0x6f, 0x12, 0xa7, 0x5d, 0xc4, 0x8b, 0x31, 0xe9}},
};
return vectors;
}
static const ExpectedOutput& getExpectedOutput(TestType type) {
static const std::map<TestType, ExpectedOutput> outputs = {
{TestType::NoReseedNoPr, {
// output1
{0x8e, 0xe6, 0x11, 0xf4, 0x76, 0x67, 0xa6, 0xab, 0xb5, 0x52, 0x55, 0xda,
0x07, 0x77, 0x66, 0xd5, 0x8f, 0xb9, 0x5d, 0x9c, 0x83, 0xdb, 0x46, 0x90,
0x74, 0x65, 0xce, 0x99, 0x8f, 0x54, 0xfb, 0x3b, 0x41, 0x8c, 0x21, 0xd0,
0x2a, 0x74, 0x32, 0xbb, 0x05, 0x6e, 0x99, 0xcf, 0x00, 0xa1, 0x78, 0x22,
0xc6, 0x72, 0x1f, 0x48, 0xeb, 0x9a, 0x1d, 0x9f, 0xf2, 0xa1, 0x1c, 0xa0,
0x4c, 0x2c, 0x37, 0x0d},
// output2
{0xf7, 0xfa, 0xb6, 0xa6, 0xfc, 0xf4, 0x45, 0xf0, 0xa0, 0x43, 0x4b, 0x2a,
0xa0, 0xc6, 0x10, 0xbd, 0xef, 0x54, 0x89, 0xec, 0xd9, 0x54, 0x14, 0x63,
0x46, 0x23, 0xad, 0xd1, 0x8a, 0x9f, 0x88, 0x8b, 0xca, 0x6b, 0xe1, 0x51,
0x31, 0x2d, 0x1b, 0x9e, 0x8f, 0x83, 0xbd, 0x0a, 0xca, 0xd6, 0x23, 0x4d,
0x3b, 0xcc, 0xc1, 0x1b, 0x63, 0xa4, 0x0d, 0x6f, 0xbf, 0xf4, 0x48, 0xf6,
0x7d, 0xb0, 0xb9, 0x1f},
}},
{TestType::NoReseedAndPr, {
// output1
{0x08, 0xe0, 0xa7, 0x5e, 0xb3, 0x83, 0x2e, 0xfe, 0xbf, 0xa9, 0x79, 0x98,
0x27, 0x12, 0xad, 0x1f, 0x31, 0x39, 0x86, 0x10, 0xf6, 0xaf, 0x6e, 0xfc,
0x0f, 0x9c, 0x18, 0x21, 0x50, 0x16, 0x4f, 0xf1, 0x96, 0x6e, 0x8b, 0xd3,
0x6f, 0x15, 0x39, 0x6b, 0x38, 0x29, 0x9c, 0x75, 0xf7, 0x34, 0x43, 0xc1,
0xa7, 0x69, 0x5e, 0x61, 0xce, 0xa2, 0x92, 0x05, 0x86, 0x7b, 0x95, 0x42,
0x24, 0x6e, 0x24, 0x5a},
// output2
{0x89, 0x80, 0xf5, 0x04, 0xbc, 0x05, 0xed, 0x77, 0xa7, 0x45, 0x7a, 0x5e,
0x41, 0x91, 0xd7, 0x70, 0xad, 0xcf, 0x61, 0xd8, 0x49, 0x74, 0xde, 0x76,
0x27, 0xd7, 0x21, 0x06, 0x4a, 0x0c, 0x63, 0xb5, 0xc4, 0x4e, 0xfb, 0x1d,
0x2b, 0xd7, 0x0c, 0x4e, 0x9b, 0x08, 0xbc, 0x02, 0xcb, 0x7e, 0xee, 0x3c,
0x03, 0xab, 0x59, 0x77, 0xc7, 0xbb, 0x8e, 0x4c, 0x90, 0xee, 0x83, 0x1e,
0x63, 0xd5, 0xf2, 0x4c},
}},
{TestType::NoReseedAndPrAndUserPr, {
// output1
{0x03, 0xa1, 0xe5, 0x62, 0x23, 0xd9, 0x9d, 0xef, 0x40, 0x41, 0x16, 0xab,
0x86, 0xbe, 0x6d, 0x5e, 0x10, 0x29, 0xc8, 0xb2, 0xc8, 0xfa, 0x37, 0xd0,
0x8d, 0xa5, 0xd5, 0xf4, 0x7d, 0x9c, 0x87, 0x15, 0x6f, 0x7c, 0x82, 0xe5,
0xab, 0xd8, 0x27, 0xb9, 0x82, 0xce, 0x9c, 0x83, 0xf2, 0x4d, 0x08, 0x02,
0x92, 0xfa, 0x7d, 0x4f, 0x0f, 0x93, 0x0d, 0x1b, 0xd1, 0x41, 0x28, 0xac,
0x2d, 0x06, 0x81, 0xf0},
// output2
{0x68, 0x30, 0x61, 0xfb, 0xde, 0x35, 0x3b, 0x5b, 0x6f, 0xc4, 0x75, 0x3d,
0x08, 0x07, 0xa5, 0xb9, 0x73, 0xce, 0x08, 0x0b, 0x27, 0x78, 0x96, 0xe5,
0x9d, 0xb2, 0x25, 0xcc, 0x38, 0x3d, 0x03, 0xf4, 0xc1, 0xae, 0x70, 0x1f,
0xbc, 0xbf, 0xbd, 0x28, 0x2d, 0x3c, 0x2b, 0x95, 0xd0, 0x96, 0xcb, 0xd5,
0xc0, 0xba, 0xb8, 0x7c, 0x71, 0x39, 0x44, 0x96, 0xb5, 0x3b, 0xe1, 0xf9,
0x59, 0xcd, 0xb2, 0xb8},
}},
{TestType::NoReseedNoPrAndUserPr, {
// output1
{0xcd, 0x7c, 0xba, 0x56, 0x74, 0x37, 0x7d, 0x9f, 0xb1, 0x43, 0x6f, 0xdf,
0xaa, 0x63, 0xa5, 0x12, 0x24, 0xec, 0x8e, 0xe8, 0x3a, 0xf0, 0x4a, 0xc7,
0xab, 0x3f, 0x57, 0x8d, 0xe8, 0xb4, 0x50, 0x41, 0x60, 0xfd, 0xd2, 0x5f,
0x0c, 0x04, 0x45, 0xca, 0x75, 0xf7, 0x71, 0x06, 0x2b, 0x78, 0xd3, 0xef,
0xcd, 0x4b, 0x4b, 0x24, 0xc1, 0xda, 0x9e, 0x24, 0x46, 0x5a, 0x4f, 0x2b,
0x08, 0x77, 0x35, 0x5f},
// output2
{0xac, 0x96, 0x12, 0x4f, 0x2c, 0x07, 0x93, 0x9c, 0x45, 0x67, 0x4b, 0x54,
0x69, 0x5a, 0x8a, 0x2c, 0x79, 0x3c, 0x7e, 0xef, 0xe5, 0x5c, 0xcb, 0x98,
0xbd, 0x0d, 0xef, 0xce, 0x53, 0x66, 0x6f, 0x26, 0xb6, 0xf7, 0x26, 0x23,
0xfc, 0x8b, 0x71, 0x1d, 0xd7, 0xcf, 0xb1, 0xce, 0xce, 0xb0, 0x87, 0x95,
0xd6, 0x8b, 0xa3, 0xf8, 0x52, 0xc7, 0xb7, 0xd1, 0x91, 0x1d, 0x4a, 0xc2,
0x4c, 0x33, 0x79, 0x33},
}},
{TestType::WithReseedNoPr, {
// output1
{0x8e, 0xe6, 0x11, 0xf4, 0x76, 0x67, 0xa6, 0xab, 0xb5, 0x52, 0x55, 0xda,
0x07, 0x77, 0x66, 0xd5, 0x8f, 0xb9, 0x5d, 0x9c, 0x83, 0xdb, 0x46, 0x90,
0x74, 0x65, 0xce, 0x99, 0x8f, 0x54, 0xfb, 0x3b, 0x41, 0x8c, 0x21, 0xd0,
0x2a, 0x74, 0x32, 0xbb, 0x05, 0x6e, 0x99, 0xcf, 0x00, 0xa1, 0x78, 0x22,
0xc6, 0x72, 0x1f, 0x48, 0xeb, 0x9a, 0x1d, 0x9f, 0xf2, 0xa1, 0x1c, 0xa0,
0x4c, 0x2c, 0x37, 0x0d},
// output2
{0x62, 0xb9, 0xd5, 0x94, 0x68, 0x6a, 0xce, 0x23, 0x33, 0xf1, 0xcb, 0xe9,
0x73, 0x2e, 0x15, 0xfd, 0x9f, 0x6d, 0xe5, 0xf5, 0x58, 0x9d, 0x2f, 0xc1,
0x41, 0xf6, 0x13, 0x2e, 0x2d, 0x64, 0x5c, 0x09, 0x3d, 0x9f, 0xa9, 0xf2,
0x2b, 0x91, 0xe1, 0x55, 0x07, 0x29, 0x1d, 0x97, 0xac, 0xb8, 0x6b, 0x4e,
0x85, 0xb4, 0x72, 0xdc, 0x32, 0x1b, 0x82, 0x11, 0xbb, 0x30, 0x20, 0xa7,
0xe7, 0xde, 0x71, 0x67},
}},
{TestType::WithReseedAndPr, {
// output1
{0x08, 0xe0, 0xa7, 0x5e, 0xb3, 0x83, 0x2e, 0xfe, 0xbf, 0xa9, 0x79, 0x98,
0x27, 0x12, 0xad, 0x1f, 0x31, 0x39, 0x86, 0x10, 0xf6, 0xaf, 0x6e, 0xfc,
0x0f, 0x9c, 0x18, 0x21, 0x50, 0x16, 0x4f, 0xf1, 0x96, 0x6e, 0x8b, 0xd3,
0x6f, 0x15, 0x39, 0x6b, 0x38, 0x29, 0x9c, 0x75, 0xf7, 0x34, 0x43, 0xc1,
0xa7, 0x69, 0x5e, 0x61, 0xce, 0xa2, 0x92, 0x05, 0x86, 0x7b, 0x95, 0x42,
0x24, 0x6e, 0x24, 0x5a},
// output2
{0x26, 0xaf, 0x50, 0x62, 0xec, 0xf2, 0xf9, 0x7b, 0x21, 0x31, 0xbf, 0x74,
0x68, 0xa1, 0x1a, 0xd9, 0x9a, 0x0d, 0xf6, 0x4f, 0x51, 0xa4, 0xaa, 0x61,
0x14, 0x7f, 0x5f, 0x68, 0x27, 0x80, 0xe4, 0x61, 0x34, 0xad, 0xa3, 0x8e,
0x7d, 0x03, 0x59, 0xab, 0x24, 0xbe, 0x96, 0x65, 0xd3, 0x64, 0x5d, 0x34,
0xdf, 0x77, 0xb2, 0x65, 0x57, 0xd2, 0xc3, 0xff, 0x40, 0xf8, 0xde, 0x8d,
0x0a, 0xb5, 0x98, 0x1f},
}},
{TestType::WithReseedAndPrAndUserPr, {
// output1
{0x03, 0xa1, 0xe5, 0x62, 0x23, 0xd9, 0x9d, 0xef, 0x40, 0x41, 0x16, 0xab,
0x86, 0xbe, 0x6d, 0x5e, 0x10, 0x29, 0xc8, 0xb2, 0xc8, 0xfa, 0x37, 0xd0,
0x8d, 0xa5, 0xd5, 0xf4, 0x7d, 0x9c, 0x87, 0x15, 0x6f, 0x7c, 0x82, 0xe5,
0xab, 0xd8, 0x27, 0xb9, 0x82, 0xce, 0x9c, 0x83, 0xf2, 0x4d, 0x08, 0x02,
0x92, 0xfa, 0x7d, 0x4f, 0x0f, 0x93, 0x0d, 0x1b, 0xd1, 0x41, 0x28, 0xac,
0x2d, 0x06, 0x81, 0xf0},
// output2
{0x9d, 0x72, 0x6a, 0xa2, 0xde, 0xbb, 0xfd, 0xab, 0x01, 0x05, 0xdc, 0xb3,
0xf6, 0x7d, 0x50, 0x18, 0xa3, 0x3a, 0xd5, 0xfb, 0x7c, 0xf9, 0xd4, 0x50,
0xad, 0x20, 0xee, 0x09, 0xcb, 0xc2, 0x71, 0x88, 0x1d, 0x5e, 0xbc, 0x56,
0xc0, 0x64, 0x5f, 0xe8, 0xe6, 0xe7, 0x76, 0xca, 0x7f, 0x8b, 0xfd, 0xd2,
0xf8, 0x16, 0x83, 0xfa, 0xe0, 0xf2, 0xa8, 0xed, 0x58, 0xdf, 0x73, 0xca,
0x38, 0x6a, 0xfa, 0xb6},
}},
{TestType::WithReseedNoPrAndUserPr, {
// output1
{0xcd, 0x7c, 0xba, 0x56, 0x74, 0x37, 0x7d, 0x9f, 0xb1, 0x43, 0x6f, 0xdf,
0xaa, 0x63, 0xa5, 0x12, 0x24, 0xec, 0x8e, 0xe8, 0x3a, 0xf0, 0x4a, 0xc7,
0xab, 0x3f, 0x57, 0x8d, 0xe8, 0xb4, 0x50, 0x41, 0x60, 0xfd, 0xd2, 0x5f,
0x0c, 0x04, 0x45, 0xca, 0x75, 0xf7, 0x71, 0x06, 0x2b, 0x78, 0xd3, 0xef,
0xcd, 0x4b, 0x4b, 0x24, 0xc1, 0xda, 0x9e, 0x24, 0x46, 0x5a, 0x4f, 0x2b,
0x08, 0x77, 0x35, 0x5f},
// output2
{0x58, 0x7f, 0xbe, 0x6a, 0x63, 0x4a, 0x2f, 0x2f, 0x14, 0xd1, 0x40, 0xb1,
0x9d, 0xa1, 0x22, 0xd4, 0x41, 0x42, 0x62, 0xc8, 0x8f, 0xb6, 0xd5, 0x91,
0xa6, 0x7c, 0x89, 0x00, 0x87, 0xa1, 0xd5, 0xeb, 0x8f, 0x4f, 0x2d, 0xc8,
0x0a, 0x95, 0x62, 0x2b, 0xfa, 0x1e, 0x51, 0x00, 0xb5, 0x26, 0x05, 0xdd,
0xbe, 0xff, 0x40, 0xe8, 0x73, 0x1d, 0xde, 0xa4, 0x86, 0xfd, 0x58, 0x31,
0x5c, 0x78, 0xcd, 0x16},
}},
};
return outputs.at(type);
}
static void printVector(const std::vector<uint8_t>& v, const char *label) {
printf("%s: ", label);
for (uint8_t byte : v) {
printf("%02x ", byte);
}
printf("\n");
}
};
class RngKatTestEnv {
public:
explicit RngKatTestEnv(TestType type) : kat_test_type_(type) {
setupEntropyMethods();
}
bool runTest() {
const auto& expected = RngKatTestData::getExpectedOutput(kat_test_type_);
current_generation_state = GenerationState::First;
bool test1 = generateRandomness(expected.output1, "gen 1");
current_generation_state = GenerationState::Second;
bool test2 = generateRandomness(expected.output2, "gen 2");
return test1 && test2;
}
private:
TestType kat_test_type_;
entropy_source_methods entropy_methods_;
using PredictionResistanceCallback = int (*)(const struct entropy_source_t*,
uint8_t[RAND_PRED_RESISTANCE_LEN]);
static int overrideInitializeKat(struct entropy_source_t *entropy_source) {
return 1;
}
static void overrideZeroizeThreadKat(struct entropy_source_t *entropy_source) {}
static void overrideFreeThreadKat(struct entropy_source_t *entropy_source) {}
static int overrideGetSeedKat(const struct entropy_source_t *entropy_source,
uint8_t seed[CTR_DRBG_ENTROPY_LEN]) {
const auto& vectors = RngKatTestData::getTestVectors();
switch (current_generation_state) {
case GenerationState::First:
std::copy(vectors.seed.begin(), vectors.seed.end(), seed);
return 1;
case GenerationState::Second:
std::copy(vectors.reseed.begin(), vectors.reseed.end(), seed);
return 1;
default:
return 0;
}
}
static int overrideGetExtraEntropyKat(const struct entropy_source_t *entropy_source,
uint8_t seed[CTR_DRBG_ENTROPY_LEN]) {
const auto& vectors = RngKatTestData::getTestVectors();
switch (current_generation_state) {
case GenerationState::First:
std::copy(vectors.seedPersonalization.begin(),
vectors.seedPersonalization.end(), seed);
return 1;
case GenerationState::Second:
std::copy(vectors.reseedPersonalization.begin(),
vectors.reseedPersonalization.end(), seed);
return 1;
default:
return 0;
}
}
static int overrideGetPredictionResistanceKat(
const struct entropy_source_t *entropy_source,
uint8_t seed[RAND_PRED_RESISTANCE_LEN]) {
const auto& vectors = RngKatTestData::getTestVectors();
switch (current_generation_state) {
case GenerationState::First:
std::copy(vectors.predictionResistance1.begin(),
vectors.predictionResistance1.end(), seed);
return 1;
case GenerationState::Second:
std::copy(vectors.predictionResistance2.begin(),
vectors.predictionResistance2.end(), seed);
return 1;
default:
return 0;
}
}
static PredictionResistanceCallback getPredictionResistanceCallback(TestType type) {
if (type == TestType::NoReseedAndPrAndUserPr ||
type == TestType::WithReseedAndPrAndUserPr ||
type == TestType::NoReseedAndPr ||
type == TestType::WithReseedAndPr) {
return &overrideGetPredictionResistanceKat;
}
return nullptr;
}
void setupEntropyMethods() {
entropy_methods_ = {
&overrideInitializeKat,
&overrideZeroizeThreadKat,
&overrideFreeThreadKat,
&overrideGetSeedKat,
&overrideGetExtraEntropyKat,
getPredictionResistanceCallback(kat_test_type_),
OVERRIDDEN_ENTROPY_SOURCE
};
override_entropy_source_method_FOR_TESTING(&entropy_methods_);
}
bool generateRandomness(const std::vector<uint8_t>& expected_output,
const char* error_text) {
std::vector<uint8_t> output(RngKatTestData::KAT_GENERATE_REQ_LEN);
bool success;
if (kat_test_type_ == TestType::NoReseedAndPrAndUserPr ||
kat_test_type_ == TestType::NoReseedNoPrAndUserPr ||
kat_test_type_ == TestType::WithReseedAndPrAndUserPr ||
kat_test_type_ == TestType::WithReseedNoPrAndUserPr) {
const auto& vectors = RngKatTestData::getTestVectors();
const std::array<uint8_t, RAND_PRED_RESISTANCE_LEN>* pr = nullptr;
switch (current_generation_state) {
case GenerationState::First:
pr = &vectors.userPredictionResistance1;
break;
case GenerationState::Second:
pr = &vectors.userPredictionResistance2;
break;
default:
return false;
}
success = RAND_bytes_with_user_prediction_resistance(output.data(),
output.size(), pr->data());
} else {
success = RAND_bytes(output.data(), output.size());
}
if (!success) {
std::cerr << "Generating randomness failed " << error_text << '\n';
return false;
}
if (expected_output != output) {
std::cerr << "Generated randomness not equal to expected value "
<< error_text << '\n';
RngKatTestData::printVector(output, "output");
RngKatTestData::printVector(expected_output, "expected_output");
return false;
}
return true;
}
};
} // namespace RngKatTestUtils
TEST_F(randIsolatedTest, RngKatWithUbe) {
if (!UbeIsSupported()) {
GTEST_SKIP() << "Test not supported when UBE is not supported";
}
if (runtimeEmulationIsIntelSde() && addressSanitizerIsEnabled()) {
GTEST_SKIP() << "Test not supported under Intel SDE + ASAN";
}
auto runTest = [](RngKatTestUtils::TestType type) {
RngKatTestUtils::RngKatTestEnv env(type);
exit(env.runTest() ? 0 : 1);
};
EXPECT_EXIT(runTest(RngKatTestUtils::TestType::NoReseedNoPr),
::testing::ExitedWithCode(0), "");
EXPECT_EXIT(runTest(RngKatTestUtils::TestType::NoReseedAndPr),
::testing::ExitedWithCode(0), "");
EXPECT_EXIT(runTest(RngKatTestUtils::TestType::NoReseedAndPrAndUserPr),
::testing::ExitedWithCode(0), "");
EXPECT_EXIT(runTest(RngKatTestUtils::TestType::NoReseedNoPrAndUserPr),
::testing::ExitedWithCode(0), "");
}
TEST_F(randIsolatedTest, RngKatNoUbe) {
if (UbeIsSupported()) {
GTEST_SKIP() << "Test not supported when UBE is supported";
}
if (runtimeEmulationIsIntelSde() && addressSanitizerIsEnabled()) {
GTEST_SKIP() << "Test not supported under Intel SDE + ASAN";
}
auto runTest = [](RngKatTestUtils::TestType type) {
RngKatTestUtils::RngKatTestEnv env(type);
exit(env.runTest() ? 0 : 1);
};
EXPECT_EXIT(runTest(RngKatTestUtils::TestType::WithReseedNoPr),
::testing::ExitedWithCode(0), "");
EXPECT_EXIT(runTest(RngKatTestUtils::TestType::WithReseedAndPr),
::testing::ExitedWithCode(0), "");
EXPECT_EXIT(runTest(RngKatTestUtils::TestType::WithReseedAndPrAndUserPr),
::testing::ExitedWithCode(0), "");
EXPECT_EXIT(runTest(RngKatTestUtils::TestType::WithReseedNoPrAndUserPr),
::testing::ExitedWithCode(0), "");
}
#else // GTEST_HAS_DEATH_TEST
TEST(randIsolatedTest, SkippedALL) {
GTEST_SKIP() << "All randIsolatedTest tests are not supported due to Death Tests not supported on this platform";
}
#endif

View File

@@ -0,0 +1,541 @@
// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC
#include <gtest/gtest.h>
#include <stdio.h>
#include <openssl/ctrdrbg.h>
#include <openssl/mem.h>
#include <openssl/rand.h>
#include <openssl/span.h>
#include "internal.h"
#include "entropy/internal.h"
#include "../../ube/internal.h"
#include "../../test/abi_test.h"
#include "../../test/ube_test.h"
#include "../../test/test_util.h"
#if defined(OPENSSL_THREADS)
#include <array>
#include <thread>
#include <vector>
#endif
#if !defined(OPENSSL_WINDOWS)
#include <errno.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <unistd.h>
#endif
#define MAX_REQUEST_SIZE (CTR_DRBG_MAX_GENERATE_LENGTH * 2 + 1)
static const size_t global_request_len = 64;
static const size_t number_of_threads = 8;
static void test_all_exported_functions(size_t request_len, uint8_t *out_buf,
uint8_t user_pred_res[RAND_PRED_RESISTANCE_LEN]) {
ASSERT_TRUE(RAND_bytes(out_buf, request_len));
ASSERT_TRUE(RAND_priv_bytes(out_buf, request_len));
ASSERT_TRUE(RAND_public_bytes(out_buf, request_len));
ASSERT_TRUE(RAND_pseudo_bytes(out_buf, request_len));
ASSERT_TRUE(RAND_bytes_with_user_prediction_resistance(out_buf, request_len, user_pred_res));
}
class randTest : public::testing::Test {
private:
UbeBase ube_base_;
protected:
void SetUp() override {
ube_base_.SetUp();
// Ensure randomness generation state is initialized.
uint8_t *randomness = (uint8_t *) OPENSSL_zalloc(MAX_REQUEST_SIZE);
bssl::UniquePtr<uint8_t> deleter(randomness);
uint8_t user_prediction_resistance[RAND_PRED_RESISTANCE_LEN] = {0};
test_all_exported_functions(global_request_len, randomness, user_prediction_resistance);
}
void TearDown() override {
ube_base_.TearDown();
}
bool UbeIsSupported() const {
return ube_base_.UbeIsSupported();
}
void allowMockedUbe() const {
return ube_base_.allowMockedUbe();
}
};
static void randBasicTests(bool *returnFlag) {
// Do not use stack arrays for these. For example, Alpine OS has too low
// default thread stack size limit to accommodate.
uint8_t *randomness = (uint8_t *) OPENSSL_zalloc(MAX_REQUEST_SIZE);
bssl::UniquePtr<uint8_t> deleter(randomness);
uint8_t user_prediction_resistance[RAND_PRED_RESISTANCE_LEN] = {0};
for (size_t i = 0; i < 65; i++) {
test_all_exported_functions(i, randomness, user_prediction_resistance);
}
for (size_t i : {CTR_DRBG_MAX_GENERATE_LENGTH-1, CTR_DRBG_MAX_GENERATE_LENGTH, CTR_DRBG_MAX_GENERATE_LENGTH + 1, CTR_DRBG_MAX_GENERATE_LENGTH * 2}) {
test_all_exported_functions(i, randomness, user_prediction_resistance);
}
*returnFlag = true;
}
TEST_F(randTest, Basic) {
ASSERT_TRUE(threadTest(number_of_threads, randBasicTests));
}
#if !defined(AWSLC_VM_UBE_TESTING)
// VM UBE testing is globally configured via a file. Predicting reseeding is
// sensitive to testing VM UBE in parallel because a UBE-triggered reseed can
// happen during execution.
static void randReseedIntervalUbeIsSupportedTests(bool *returnFlag) {
uint8_t *randomness = (uint8_t *) OPENSSL_zalloc(CTR_DRBG_MAX_GENERATE_LENGTH * 5 + 1);
bssl::UniquePtr<uint8_t> deleter(randomness);
// If in a new thread, this will initialize the state.
ASSERT_TRUE(RAND_bytes(randomness, global_request_len));
uint64_t reseed_calls_since_initialization = get_private_thread_reseed_calls_since_initialization();
uint64_t generate_calls_since_seed = get_private_thread_generate_calls_since_seed();
// First check that we can predict when a reseed happens based on the current
// number of invoked generate calls. After the loop, we expect to be one
// invoke generate call from a reseed.
for(size_t i = 0; i < (kCtrDrbgReseedInterval - generate_calls_since_seed); i++) {
ASSERT_TRUE(RAND_bytes(randomness, 1));
ASSERT_EQ(get_private_thread_reseed_calls_since_initialization(), reseed_calls_since_initialization);
}
ASSERT_TRUE(RAND_bytes(randomness, 1));
ASSERT_EQ(get_private_thread_reseed_calls_since_initialization(), reseed_calls_since_initialization + 1);
ASSERT_EQ(get_private_thread_generate_calls_since_seed(), 1ULL);
ASSERT_TRUE(RAND_bytes(randomness, 1));
ASSERT_EQ(get_private_thread_reseed_calls_since_initialization(), reseed_calls_since_initialization + 1);
ASSERT_EQ(get_private_thread_generate_calls_since_seed(), 2ULL);
// Should be able to perform kCtrDrbgReseedInterval-2 generate calls before a
// reseed is emitted. Requesting
// CTR_DRBG_MAX_GENERATE_LENGTH * (kCtrDrbgReseedInterval-2) + 1 would require
// quite a large buffer. Instead iterate until we need
// 5 iterations and request 5 * CTR_DRBG_MAX_GENERATE_LENGTH+1, which is a
// much smaller buffer.
for(size_t i = 0; i < (kCtrDrbgReseedInterval - 7); i++) {
ASSERT_TRUE(RAND_bytes(randomness, 1));
ASSERT_EQ(get_private_thread_reseed_calls_since_initialization(), reseed_calls_since_initialization + 1);
ASSERT_EQ(get_private_thread_generate_calls_since_seed(), 2 + (i + 1));
}
ASSERT_EQ(get_private_thread_generate_calls_since_seed(), kCtrDrbgReseedInterval - 5);
size_t request_len_new_reseed = CTR_DRBG_MAX_GENERATE_LENGTH * 5 + 1;
ASSERT_TRUE(RAND_bytes(randomness, request_len_new_reseed));
ASSERT_EQ(get_private_thread_reseed_calls_since_initialization(), reseed_calls_since_initialization + 2);
ASSERT_EQ(get_private_thread_generate_calls_since_seed(), 1ULL);
*returnFlag = true;
}
TEST_F(randTest, ReseedIntervalWhenUbeIsSupported) {
if (!UbeIsSupported()) {
GTEST_SKIP() << "UBE detection is not supported";
}
ASSERT_TRUE(threadTest(number_of_threads, randReseedIntervalUbeIsSupportedTests));
}
static void randReseedIntervalUbeNotSupportedTests(bool *returnFlag) {
uint8_t *randomness = (uint8_t *) OPENSSL_zalloc(CTR_DRBG_MAX_GENERATE_LENGTH);
bssl::UniquePtr<uint8_t> deleter(randomness);
// If in a new thread, this will initialize the state.
ASSERT_TRUE(RAND_bytes(randomness, global_request_len));
uint64_t generate_calls_since_seed = get_private_thread_generate_calls_since_seed();
uint64_t reseed_calls_since_initialization = get_private_thread_reseed_calls_since_initialization();
if (kCtrDrbgReseedInterval - generate_calls_since_seed < 2) {
// Ensure the reseed interval doesn't conflict with logic below.
ASSERT_TRUE(RAND_bytes(randomness, 1));
ASSERT_TRUE(RAND_bytes(randomness, 1));
}
// Each invocation of the randomness generation induce a reseed due to UBE
// detection not being supported.
ASSERT_TRUE(RAND_bytes(randomness, 1));
ASSERT_EQ(get_private_thread_generate_calls_since_seed(), 1ULL);
ASSERT_EQ(get_private_thread_reseed_calls_since_initialization(), reseed_calls_since_initialization + 1);
ASSERT_TRUE(RAND_bytes(randomness, 1));
ASSERT_EQ(get_private_thread_generate_calls_since_seed(), 1ULL);
ASSERT_EQ(get_private_thread_reseed_calls_since_initialization(), reseed_calls_since_initialization + 2);
*returnFlag = true;
}
TEST_F(randTest, ReseedIntervalWhenUbeNotSupported) {
if (UbeIsSupported()) {
GTEST_SKIP() << "UBE detection is supported";
}
ASSERT_TRUE(threadTest(number_of_threads, randReseedIntervalUbeNotSupportedTests));
}
static void MockedUbeDetection(std::function<void(uint64_t)> set_detection_method_gn) {
const size_t request_size_one_generate = 10;
const size_t request_size_two_generate = CTR_DRBG_MAX_GENERATE_LENGTH + 1;
uint64_t current_reseed_calls = 0;
uint8_t *randomness = (uint8_t *) OPENSSL_zalloc(CTR_DRBG_MAX_GENERATE_LENGTH * 5 + 1);
bssl::UniquePtr<uint8_t> deleter(randomness);
// Make sure things are initialized and at default values. Cache
// current_reseed_calls last in case RAND_bytes() invokes a reseed.
set_detection_method_gn(1);
ASSERT_TRUE(RAND_bytes(randomness, request_size_one_generate));
current_reseed_calls = get_private_thread_reseed_calls_since_initialization();
// Bump fork generation number and expect one reseed. In addition, expect one
// generate call since request size is less than CTR_DRBG_MAX_GENERATE_LENGTH.
set_detection_method_gn(2);
ASSERT_TRUE(RAND_bytes(randomness, request_size_one_generate));
ASSERT_EQ(get_private_thread_reseed_calls_since_initialization(), current_reseed_calls + 1ULL);
ASSERT_EQ(get_private_thread_generate_calls_since_seed(), 1ULL);
// Bump fork generation number again and expect one reseed. In addition,
// expect two generate call since request size is higher than
// CTR_DRBG_MAX_GENERATE_LENGTH.
set_detection_method_gn(3);
ASSERT_TRUE(RAND_bytes(randomness, request_size_two_generate));
ASSERT_EQ(get_private_thread_reseed_calls_since_initialization(), current_reseed_calls + 2ULL);
ASSERT_EQ(get_private_thread_generate_calls_since_seed(), 2ULL);
}
TEST_F(randTest, UbeDetectionMocked) {
allowMockedUbe();
MockedUbeDetection(
[](uint64_t gn) {
set_fork_ube_generation_number_FOR_TESTING(gn);
}
);
MockedUbeDetection(
[](uint64_t gn) {
set_vm_ube_generation_number_FOR_TESTING(static_cast<uint32_t>(gn));
}
);
}
#endif
// Attempts to verify that |RAND_bytes| (equivalent to |RAND_priv_bytes|) and
// |RAND_public_bytes| are independent. That is, calling one API should not
// affect the other's state counters and outputs should be different.
TEST_F(randTest, PublicPrivateStateIsolation) {
uint8_t private_buf[64];
uint8_t public_buf[64];
// Make sure both are initialized.
ASSERT_TRUE(RAND_bytes(private_buf, sizeof(private_buf)));
ASSERT_TRUE(RAND_public_bytes(public_buf, sizeof(public_buf)));
// Calling |RAND_bytes| shouldn't affect |RAND_public_bytes| counters.
uint64_t public_reseed_initial = get_public_thread_reseed_calls_since_initialization();
ASSERT_TRUE(RAND_bytes(private_buf, sizeof(private_buf)));
ASSERT_TRUE(RAND_bytes(private_buf, sizeof(private_buf)));
uint64_t public_reseed_after_rand = get_public_thread_reseed_calls_since_initialization();
EXPECT_EQ(public_reseed_after_rand, public_reseed_initial);
// Calling |RAND_public_bytes| shouldn't affect |RAND_bytes| counters.
uint64_t private_reseed_before_public = get_private_thread_reseed_calls_since_initialization();
ASSERT_TRUE(RAND_public_bytes(public_buf, sizeof(public_buf)));
ASSERT_TRUE(RAND_public_bytes(public_buf, sizeof(public_buf)));
uint64_t private_reseed_after_public = get_private_thread_reseed_calls_since_initialization();
EXPECT_EQ(private_reseed_after_public, private_reseed_before_public);
}
// These tests are, strictly speaking, flaky, but we use large enough buffers
// that the probability of failing when we should pass is negligible.
TEST_F(randTest, NotObviouslyBroken) {
static const uint8_t kZeros[256] = {0};
maybeDisableSomeForkUbeDetectMechanisms();
uint8_t buf1[256], buf2[256];
RAND_bytes(buf1, sizeof(buf1));
RAND_bytes(buf2, sizeof(buf2));
EXPECT_NE(Bytes(buf1), Bytes(buf2));
EXPECT_NE(Bytes(buf1), Bytes(kZeros));
EXPECT_NE(Bytes(buf2), Bytes(kZeros));
uint8_t buf3[256], buf4[256];
RAND_public_bytes(buf3, sizeof(buf3));
RAND_public_bytes(buf4, sizeof(buf4));
EXPECT_NE(Bytes(buf3), Bytes(buf4));
EXPECT_NE(Bytes(buf3), Bytes(kZeros));
EXPECT_NE(Bytes(buf4), Bytes(kZeros));
EXPECT_NE(Bytes(buf1), Bytes(buf3));
EXPECT_NE(Bytes(buf1), Bytes(buf4));
EXPECT_NE(Bytes(buf2), Bytes(buf3));
EXPECT_NE(Bytes(buf2), Bytes(buf4));
// Now try with private/public
ASSERT_TRUE(RAND_priv_bytes(buf1, sizeof(buf1)));
ASSERT_TRUE(RAND_public_bytes(buf2, sizeof(buf2)));
EXPECT_NE(Bytes(buf1), Bytes(buf2));
ASSERT_TRUE(RAND_priv_bytes(buf3, sizeof(buf3)));
ASSERT_TRUE(RAND_public_bytes(buf4, sizeof(buf4)));
EXPECT_NE(Bytes(buf3), Bytes(buf4));
EXPECT_NE(Bytes(buf1), Bytes(buf3));
EXPECT_NE(Bytes(buf2), Bytes(buf4));
}
#if !defined(OPENSSL_WINDOWS) && !defined(OPENSSL_IOS) && \
!defined(BORINGSSL_UNSAFE_DETERMINISTIC_MODE)
static bool ForkAndRand(bssl::Span<uint8_t> out) {
int pipefds[2];
if (pipe(pipefds) < 0) {
perror("pipe");
return false;
}
// This is a multi-threaded process, but GTest does not run tests concurrently
// and there currently are no threads, so this should be safe.
pid_t child = fork();
if (child < 0) {
perror("fork");
close(pipefds[0]);
close(pipefds[1]);
return false;
}
if (child == 0) {
// This is the child. Generate entropy and write it to the parent.
close(pipefds[0]);
RAND_bytes(out.data(), out.size());
while (!out.empty()) {
ssize_t ret = write(pipefds[1], out.data(), out.size());
if (ret < 0) {
if (errno == EINTR) {
continue;
}
perror("write");
_exit(1);
}
out = out.subspan(static_cast<size_t>(ret));
}
_exit(0);
}
// This is the parent. Read the entropy from the child.
close(pipefds[1]);
while (!out.empty()) {
ssize_t ret = read(pipefds[0], out.data(), out.size());
if (ret <= 0) {
if (ret == 0) {
fprintf(stderr, "Unexpected EOF from child.\n");
} else {
if (errno == EINTR) {
continue;
}
perror("read");
}
close(pipefds[0]);
return false;
}
out = out.subspan(static_cast<size_t>(ret));
}
close(pipefds[0]);
// Wait for the child to exit.
int status;
if (waitpid(child, &status, 0) < 0) {
perror("waitpid");
return false;
}
if (!WIFEXITED(status) || WEXITSTATUS(status) != 0) {
fprintf(stderr, "Child did not exit cleanly.\n");
return false;
}
return true;
}
TEST_F(randTest, Fork) {
static const uint8_t kZeros[16] = {0};
maybeDisableSomeForkUbeDetectMechanisms();
// Draw a little entropy to initialize any internal PRNG buffering.
uint8_t byte;
RAND_bytes(&byte, 1);
// Draw entropy in two child processes and the parent process. This test
// intentionally uses smaller buffers than the others, to minimize the chance
// of sneaking by with a large enough buffer that we've since reseeded from
// the OS.
//
// All child processes should have different PRNGs, including the ones that
// disavow fork-safety. Although they are produced by fork, they themselves do
// not fork after that call.
uint8_t bufs[5][16];
ASSERT_TRUE(ForkAndRand(bufs[0]));
ASSERT_TRUE(ForkAndRand(bufs[1]));
ASSERT_TRUE(ForkAndRand(bufs[2]));
ASSERT_TRUE(ForkAndRand(bufs[3]));
RAND_bytes(bufs[4], sizeof(bufs[4]));
// All should be different and non-zero.
for (const auto &buf : bufs) {
EXPECT_NE(Bytes(buf), Bytes(kZeros));
}
for (size_t i = 0; i < OPENSSL_ARRAY_SIZE(bufs); i++) {
for (size_t j = 0; j < i; j++) {
EXPECT_NE(Bytes(bufs[i]), Bytes(bufs[j]))
<< "buffers " << i << " and " << j << " matched";
}
}
}
#endif // !OPENSSL_WINDOWS && !OPENSSL_IOS &&
// !BORINGSSL_UNSAFE_DETERMINISTIC_MODE
#if defined(OPENSSL_THREADS)
using RandFunc = int (*)(uint8_t *, size_t);
static void RunConcurrentRands(size_t num_threads, RandFunc rand_func) {
static const uint8_t kZeros[256] = {0};
std::vector<std::array<uint8_t, 256>> bufs(num_threads);
std::vector<std::thread> threads(num_threads);
for (size_t i = 0; i < num_threads; i++) {
threads[i] = std::thread(
[i, &bufs, rand_func] { rand_func(bufs[i].data(), bufs[i].size()); });
}
for (size_t i = 0; i < num_threads; i++) {
threads[i].join();
}
for (size_t i = 0; i < num_threads; i++) {
EXPECT_NE(Bytes(bufs[i]), Bytes(kZeros));
for (size_t j = i + 1; j < num_threads; j++) {
EXPECT_NE(Bytes(bufs[i]), Bytes(bufs[j]));
}
}
}
// Test that threads may concurrently draw entropy without tripping TSan.
TEST_F(randTest, Threads) {
constexpr size_t kFewerThreads = 10;
constexpr size_t kMoreThreads = 20;
maybeDisableSomeForkUbeDetectMechanisms();
// Draw entropy in parallel.
RunConcurrentRands(kFewerThreads, RAND_bytes);
RunConcurrentRands(kFewerThreads, RAND_public_bytes);
// Draw entropy in parallel with higher concurrency than the previous maximum.
RunConcurrentRands(kMoreThreads, RAND_bytes);
RunConcurrentRands(kMoreThreads, RAND_public_bytes);
// Draw entropy in parallel with lower concurrency than the previous maximum.
RunConcurrentRands(kFewerThreads, RAND_bytes);
RunConcurrentRands(kFewerThreads, RAND_public_bytes);
}
// This test attempts to verify that when both |RAND_bytes| and
// |RAND_public_bytes| are called across multiple threads, each thread's
// private and public states produce unique output.
TEST_F(randTest, MixedUsageMultiThreaded) {
static const uint8_t kZeros[256] = {0};
static constexpr size_t kNumThreads = 10;
static constexpr size_t kIterationsPerThread = 5;
maybeDisableSomeForkUbeDetectMechanisms();
// Each thread will store its outputs from both APIs
// For each thread: kIterationsPerThread outputs from |RAND_bytes| and
// kIterationsPerThread outputs from |RAND_public_bytes|
std::vector<std::array<uint8_t, 256>> private_bufs(kNumThreads * kIterationsPerThread);
std::vector<std::array<uint8_t, 256>> public_bufs(kNumThreads * kIterationsPerThread);
std::vector<std::thread> threads(kNumThreads);
for (size_t t = 0; t < kNumThreads; t++) {
threads[t] = std::thread([t, &private_bufs, &public_bufs] {
// Each thread alternates between |RAND_bytes| and |RAND_public_bytes|
for (size_t i = 0; i < kIterationsPerThread; i++) {
size_t idx = t * kIterationsPerThread + i;
RAND_bytes(private_bufs[idx].data(), private_bufs[idx].size());
RAND_public_bytes(public_bufs[idx].data(), public_bufs[idx].size());
}
});
}
// Wait for all threads to complete
for (size_t t = 0; t < kNumThreads; t++) {
threads[t].join();
}
// Tests below are theoretically probabilistic (could draw all zeros). But in
// practice, this will rarely happen. They are also crude tests catching
// obvious errors in isolation of the two thread-local states.
// Verify all outputs from |RAND_bytes| are non-zero and unique.
for (size_t i = 0; i < private_bufs.size(); i++) {
EXPECT_NE(Bytes(private_bufs[i]), Bytes(kZeros));
for (size_t j = i + 1; j < private_bufs.size(); j++) {
EXPECT_NE(Bytes(private_bufs[i]), Bytes(private_bufs[j]));
}
}
// Verify all outputs from |RAND_public_bytes| are non-zero and unique.
for (size_t i = 0; i < public_bufs.size(); i++) {
EXPECT_NE(Bytes(public_bufs[i]), Bytes(kZeros));
for (size_t j = i + 1; j < public_bufs.size(); j++) {
EXPECT_NE(Bytes(public_bufs[i]), Bytes(public_bufs[j]));
}
}
// Verify outputs from |RAND_bytes| and |RAND_public_bytes| are different from
// each other.
for (size_t i = 0; i < private_bufs.size(); i++) {
for (size_t j = 0; j < public_bufs.size(); j++) {
EXPECT_NE(Bytes(private_bufs[i]), Bytes(public_bufs[j]));
}
}
}
#endif // OPENSSL_THREADS
#if defined(OPENSSL_X86_64) && defined(SUPPORTS_ABI_TEST)
TEST_F(randTest, RdrandABI) {
if (!have_hw_rng_x86_64_for_testing()) {
fprintf(stderr, "rdrand not supported. Skipping.\n");
return;
}
uint8_t buf[32];
CHECK_ABI_SEH(CRYPTO_rdrand_multiple8, nullptr, 0);
CHECK_ABI_SEH(CRYPTO_rdrand_multiple8, buf, 8);
CHECK_ABI_SEH(CRYPTO_rdrand_multiple8, buf, 16);
CHECK_ABI_SEH(CRYPTO_rdrand_multiple8, buf, 24);
CHECK_ABI_SEH(CRYPTO_rdrand_multiple8, buf, 32);
}
#endif // OPENSSL_X86_64 && SUPPORTS_ABI_TEST