2014-06-26 14:41:53 +02:00
|
|
|
// Copyright (c) 2009-2010 Satoshi Nakamoto
|
2022-12-24 23:49:50 +00:00
|
|
|
// Copyright (c) 2009-2022 The Bitcoin Core developers
|
2014-12-13 12:09:33 +08:00
|
|
|
// Distributed under the MIT software license, see the accompanying
|
2014-06-26 14:41:53 +02:00
|
|
|
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
|
|
|
|
2024-02-26 13:36:30 +01:00
|
|
|
#include <config/bitcoin-config.h> // IWYU pragma: keep
|
2024-02-13 08:03:02 +01:00
|
|
|
|
2017-11-10 13:57:53 +13:00
|
|
|
#include <random.h>
|
2014-06-26 14:41:53 +02:00
|
|
|
|
2023-09-07 09:03:04 +02:00
|
|
|
#include <compat/compat.h>
|
2019-10-27 12:52:31 -07:00
|
|
|
#include <compat/cpuid.h>
|
2023-07-18 13:52:52 -04:00
|
|
|
#include <crypto/chacha20.h>
|
2019-11-23 11:42:23 -05:00
|
|
|
#include <crypto/sha256.h>
|
2017-11-10 13:57:53 +13:00
|
|
|
#include <crypto/sha512.h>
|
2022-04-14 02:43:31 +02:00
|
|
|
#include <logging.h>
|
2020-04-27 09:25:05 -04:00
|
|
|
#include <randomenv.h>
|
2022-01-31 19:29:33 +07:00
|
|
|
#include <span.h>
|
2022-12-30 12:25:50 +00:00
|
|
|
#include <support/allocators/secure.h>
|
|
|
|
#include <support/cleanse.h>
|
|
|
|
#include <sync.h>
|
|
|
|
#include <util/time.h>
|
2014-06-26 14:41:53 +02:00
|
|
|
|
2023-07-18 10:11:49 -04:00
|
|
|
#include <array>
|
2021-12-14 10:15:10 +00:00
|
|
|
#include <cmath>
|
2022-09-23 10:48:47 +01:00
|
|
|
#include <cstdlib>
|
tests: overhaul deterministic test randomness
The existing code provides two randomness mechanisms for test purposes:
- g_insecure_rand_ctx (with its wrappers InsecureRand*), which during tests is
initialized using either zeros (SeedRand::ZEROS), or using environment-provided
randomness (SeedRand::SEED).
- g_mock_deterministic_tests, which controls some (but not all) of the normal
randomness output if set, but then makes it extremely predictable (identical
output repeatedly).
Replace this with a single mechanism, which retains the SeedRand modes to control
all randomness. There is a new internal deterministic PRNG inside the random
module, which is used in GetRandBytes() when in test mode, and which is also used
to initialize g_insecure_rand_ctx. This means that during tests, all random numbers
are made deterministic. There is one exception, GetStrongRandBytes(), which even
in test mode still uses the normal PRNG state.
This probably opens the door to removing a lot of the ad-hoc "deterministic" mode
functions littered through the codebase (by simply running relevant tests in
SeedRand::ZEROS mode), but this isn't done yet.
2024-03-10 19:49:42 -04:00
|
|
|
#include <optional>
|
2017-05-05 11:32:06 -07:00
|
|
|
#include <thread>
|
2014-09-14 12:43:56 +02:00
|
|
|
|
2022-12-30 12:25:50 +00:00
|
|
|
#ifdef WIN32
|
|
|
|
#include <windows.h>
|
|
|
|
#include <wincrypt.h>
|
|
|
|
#else
|
2018-05-15 09:27:14 +00:00
|
|
|
#include <fcntl.h>
|
2014-06-26 14:41:53 +02:00
|
|
|
#include <sys/time.h>
|
|
|
|
#endif
|
2014-09-14 12:43:56 +02:00
|
|
|
|
2023-05-18 11:25:44 +01:00
|
|
|
#if defined(HAVE_GETRANDOM) || (defined(HAVE_GETENTROPY_RAND) && defined(MAC_OSX))
|
2017-07-27 15:34:09 +03:00
|
|
|
#include <sys/random.h>
|
|
|
|
#endif
|
2023-05-18 11:25:44 +01:00
|
|
|
|
2017-02-21 17:36:37 +01:00
|
|
|
#ifdef HAVE_SYSCTL_ARND
|
|
|
|
#include <sys/sysctl.h>
|
|
|
|
#endif
|
2023-01-06 12:53:11 -05:00
|
|
|
#if defined(HAVE_STRONG_GETAUXVAL) && defined(__aarch64__)
|
|
|
|
#include <sys/auxv.h>
|
|
|
|
#endif
|
2017-02-21 17:36:37 +01:00
|
|
|
|
2024-03-11 14:10:44 -04:00
|
|
|
namespace {
|
|
|
|
|
|
|
|
/* Number of random bytes returned by GetOSRand.
|
|
|
|
* When changing this constant make sure to change all call sites, and make
|
|
|
|
* sure that the underlying OS APIs for all platforms support the number.
|
|
|
|
* (many cap out at 256 bytes).
|
|
|
|
*/
|
|
|
|
static const int NUM_OS_RANDOM_BYTES = 32;
|
|
|
|
|
|
|
|
|
|
|
|
[[noreturn]] void RandFailure()
|
2016-04-23 18:07:35 +02:00
|
|
|
{
|
2024-06-08 08:19:35 -04:00
|
|
|
LogError("Failed to read randomness, aborting\n");
|
2017-07-16 14:56:43 +02:00
|
|
|
std::abort();
|
2016-04-23 18:07:35 +02:00
|
|
|
}
|
|
|
|
|
2024-03-11 14:10:44 -04:00
|
|
|
inline int64_t GetPerformanceCounter() noexcept
|
2014-06-26 14:41:53 +02:00
|
|
|
{
|
2017-05-02 18:21:33 -07:00
|
|
|
// Read the hardware time stamp counter when available.
|
|
|
|
// See https://en.wikipedia.org/wiki/Time_Stamp_Counter for more information.
|
|
|
|
#if defined(_MSC_VER) && (defined(_M_IX86) || defined(_M_X64))
|
|
|
|
return __rdtsc();
|
|
|
|
#elif !defined(_MSC_VER) && defined(__i386__)
|
|
|
|
uint64_t r = 0;
|
|
|
|
__asm__ volatile ("rdtsc" : "=A"(r)); // Constrain the r variable to the eax:edx pair.
|
|
|
|
return r;
|
|
|
|
#elif !defined(_MSC_VER) && (defined(__x86_64__) || defined(__amd64__))
|
|
|
|
uint64_t r1 = 0, r2 = 0;
|
|
|
|
__asm__ volatile ("rdtsc" : "=a"(r1), "=d"(r2)); // Constrain r1 to rax and r2 to rdx.
|
|
|
|
return (r2 << 32) | r1;
|
2014-06-26 14:41:53 +02:00
|
|
|
#else
|
2023-01-05 19:52:33 +00:00
|
|
|
// Fall back to using standard library clock (usually microsecond or nanosecond precision)
|
2017-05-02 18:21:33 -07:00
|
|
|
return std::chrono::high_resolution_clock::now().time_since_epoch().count();
|
2014-06-26 14:41:53 +02:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2019-10-27 12:52:31 -07:00
|
|
|
#ifdef HAVE_GETCPUID
|
2024-03-11 14:10:44 -04:00
|
|
|
bool g_rdrand_supported = false;
|
|
|
|
bool g_rdseed_supported = false;
|
|
|
|
constexpr uint32_t CPUID_F1_ECX_RDRAND = 0x40000000;
|
|
|
|
constexpr uint32_t CPUID_F7_EBX_RDSEED = 0x00040000;
|
2019-01-24 18:40:02 -08:00
|
|
|
#ifdef bit_RDRND
|
|
|
|
static_assert(CPUID_F1_ECX_RDRAND == bit_RDRND, "Unexpected value for bit_RDRND");
|
|
|
|
#endif
|
|
|
|
#ifdef bit_RDSEED
|
|
|
|
static_assert(CPUID_F7_EBX_RDSEED == bit_RDSEED, "Unexpected value for bit_RDSEED");
|
|
|
|
#endif
|
|
|
|
|
2024-03-11 14:10:44 -04:00
|
|
|
void InitHardwareRand()
|
2017-05-09 15:13:00 -07:00
|
|
|
{
|
2017-07-13 16:43:05 -07:00
|
|
|
uint32_t eax, ebx, ecx, edx;
|
2019-01-24 18:40:02 -08:00
|
|
|
GetCPUID(1, 0, eax, ebx, ecx, edx);
|
|
|
|
if (ecx & CPUID_F1_ECX_RDRAND) {
|
|
|
|
g_rdrand_supported = true;
|
|
|
|
}
|
|
|
|
GetCPUID(7, 0, eax, ebx, ecx, edx);
|
|
|
|
if (ebx & CPUID_F7_EBX_RDSEED) {
|
|
|
|
g_rdseed_supported = true;
|
2017-05-09 15:13:00 -07:00
|
|
|
}
|
|
|
|
}
|
2018-12-17 16:48:21 -08:00
|
|
|
|
2024-03-11 14:10:44 -04:00
|
|
|
void ReportHardwareRand()
|
2018-12-17 16:48:21 -08:00
|
|
|
{
|
2019-10-16 20:20:59 -04:00
|
|
|
// This must be done in a separate function, as InitHardwareRand() may be indirectly called
|
2019-01-24 18:40:02 -08:00
|
|
|
// from global constructors, before logging is initialized.
|
|
|
|
if (g_rdseed_supported) {
|
2022-06-21 22:54:55 +07:00
|
|
|
LogPrintf("Using RdSeed as an additional entropy source\n");
|
2019-01-24 18:40:02 -08:00
|
|
|
}
|
|
|
|
if (g_rdrand_supported) {
|
2018-12-17 16:48:21 -08:00
|
|
|
LogPrintf("Using RdRand as an additional entropy source\n");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-01-24 18:40:02 -08:00
|
|
|
/** Read 64 bits of entropy using rdrand.
|
|
|
|
*
|
|
|
|
* Must only be called when RdRand is supported.
|
|
|
|
*/
|
2024-03-11 14:10:44 -04:00
|
|
|
uint64_t GetRdRand() noexcept
|
2019-01-24 18:40:02 -08:00
|
|
|
{
|
|
|
|
// RdRand may very rarely fail. Invoke it up to 10 times in a loop to reduce this risk.
|
|
|
|
#ifdef __i386__
|
|
|
|
uint8_t ok;
|
2020-05-01 14:05:48 +02:00
|
|
|
// Initialize to 0 to silence a compiler warning that r1 or r2 may be used
|
|
|
|
// uninitialized. Even if rdrand fails (!ok) it will set the output to 0,
|
|
|
|
// but there is no way that the compiler could know that.
|
|
|
|
uint32_t r1 = 0, r2 = 0;
|
2019-01-24 18:40:02 -08:00
|
|
|
for (int i = 0; i < 10; ++i) {
|
|
|
|
__asm__ volatile (".byte 0x0f, 0xc7, 0xf0; setc %1" : "=a"(r1), "=q"(ok) :: "cc"); // rdrand %eax
|
|
|
|
if (ok) break;
|
|
|
|
}
|
|
|
|
for (int i = 0; i < 10; ++i) {
|
|
|
|
__asm__ volatile (".byte 0x0f, 0xc7, 0xf0; setc %1" : "=a"(r2), "=q"(ok) :: "cc"); // rdrand %eax
|
|
|
|
if (ok) break;
|
|
|
|
}
|
|
|
|
return (((uint64_t)r2) << 32) | r1;
|
|
|
|
#elif defined(__x86_64__) || defined(__amd64__)
|
|
|
|
uint8_t ok;
|
2020-05-01 14:05:48 +02:00
|
|
|
uint64_t r1 = 0; // See above why we initialize to 0.
|
2019-01-24 18:40:02 -08:00
|
|
|
for (int i = 0; i < 10; ++i) {
|
|
|
|
__asm__ volatile (".byte 0x48, 0x0f, 0xc7, 0xf0; setc %1" : "=a"(r1), "=q"(ok) :: "cc"); // rdrand %rax
|
|
|
|
if (ok) break;
|
|
|
|
}
|
|
|
|
return r1;
|
|
|
|
#else
|
|
|
|
#error "RdRand is only supported on x86 and x86_64"
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
/** Read 64 bits of entropy using rdseed.
|
|
|
|
*
|
|
|
|
* Must only be called when RdSeed is supported.
|
|
|
|
*/
|
2024-03-11 14:10:44 -04:00
|
|
|
uint64_t GetRdSeed() noexcept
|
2019-01-24 18:40:02 -08:00
|
|
|
{
|
|
|
|
// RdSeed may fail when the HW RNG is overloaded. Loop indefinitely until enough entropy is gathered,
|
|
|
|
// but pause after every failure.
|
|
|
|
#ifdef __i386__
|
|
|
|
uint8_t ok;
|
|
|
|
uint32_t r1, r2;
|
|
|
|
do {
|
|
|
|
__asm__ volatile (".byte 0x0f, 0xc7, 0xf8; setc %1" : "=a"(r1), "=q"(ok) :: "cc"); // rdseed %eax
|
|
|
|
if (ok) break;
|
|
|
|
__asm__ volatile ("pause");
|
|
|
|
} while(true);
|
|
|
|
do {
|
|
|
|
__asm__ volatile (".byte 0x0f, 0xc7, 0xf8; setc %1" : "=a"(r2), "=q"(ok) :: "cc"); // rdseed %eax
|
|
|
|
if (ok) break;
|
|
|
|
__asm__ volatile ("pause");
|
|
|
|
} while(true);
|
|
|
|
return (((uint64_t)r2) << 32) | r1;
|
|
|
|
#elif defined(__x86_64__) || defined(__amd64__)
|
|
|
|
uint8_t ok;
|
|
|
|
uint64_t r1;
|
|
|
|
do {
|
|
|
|
__asm__ volatile (".byte 0x48, 0x0f, 0xc7, 0xf8; setc %1" : "=a"(r1), "=q"(ok) :: "cc"); // rdseed %rax
|
|
|
|
if (ok) break;
|
|
|
|
__asm__ volatile ("pause");
|
|
|
|
} while(true);
|
|
|
|
return r1;
|
|
|
|
#else
|
|
|
|
#error "RdSeed is only supported on x86 and x86_64"
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2023-01-06 12:53:11 -05:00
|
|
|
#elif defined(__aarch64__) && defined(HWCAP2_RNG)
|
|
|
|
|
2024-03-11 14:10:44 -04:00
|
|
|
bool g_rndr_supported = false;
|
2023-01-06 12:53:11 -05:00
|
|
|
|
2024-03-11 14:10:44 -04:00
|
|
|
void InitHardwareRand()
|
2023-01-06 12:53:11 -05:00
|
|
|
{
|
|
|
|
if (getauxval(AT_HWCAP2) & HWCAP2_RNG) {
|
|
|
|
g_rndr_supported = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-03-11 14:10:44 -04:00
|
|
|
void ReportHardwareRand()
|
2023-01-06 12:53:11 -05:00
|
|
|
{
|
|
|
|
// This must be done in a separate function, as InitHardwareRand() may be indirectly called
|
|
|
|
// from global constructors, before logging is initialized.
|
|
|
|
if (g_rndr_supported) {
|
|
|
|
LogPrintf("Using RNDR and RNDRRS as additional entropy sources\n");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/** Read 64 bits of entropy using rndr.
|
|
|
|
*
|
|
|
|
* Must only be called when RNDR is supported.
|
|
|
|
*/
|
2024-03-11 14:10:44 -04:00
|
|
|
uint64_t GetRNDR() noexcept
|
2023-01-06 12:53:11 -05:00
|
|
|
{
|
|
|
|
uint8_t ok;
|
|
|
|
uint64_t r1;
|
|
|
|
do {
|
|
|
|
// https://developer.arm.com/documentation/ddi0601/2022-12/AArch64-Registers/RNDR--Random-Number
|
|
|
|
__asm__ volatile("mrs %0, s3_3_c2_c4_0; cset %w1, ne;"
|
|
|
|
: "=r"(r1), "=r"(ok)::"cc");
|
|
|
|
if (ok) break;
|
|
|
|
__asm__ volatile("yield");
|
|
|
|
} while (true);
|
|
|
|
return r1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/** Read 64 bits of entropy using rndrrs.
|
|
|
|
*
|
|
|
|
* Must only be called when RNDRRS is supported.
|
|
|
|
*/
|
2024-03-11 14:10:44 -04:00
|
|
|
uint64_t GetRNDRRS() noexcept
|
2023-01-06 12:53:11 -05:00
|
|
|
{
|
|
|
|
uint8_t ok;
|
|
|
|
uint64_t r1;
|
|
|
|
do {
|
|
|
|
// https://developer.arm.com/documentation/ddi0601/2022-12/AArch64-Registers/RNDRRS--Reseeded-Random-Number
|
|
|
|
__asm__ volatile("mrs %0, s3_3_c2_c4_1; cset %w1, ne;"
|
|
|
|
: "=r"(r1), "=r"(ok)::"cc");
|
|
|
|
if (ok) break;
|
|
|
|
__asm__ volatile("yield");
|
|
|
|
} while (true);
|
|
|
|
return r1;
|
|
|
|
}
|
|
|
|
|
2017-05-09 15:13:00 -07:00
|
|
|
#else
|
2019-01-16 15:15:21 -08:00
|
|
|
/* Access to other hardware random number generators could be added here later,
|
|
|
|
* assuming it is sufficiently fast (in the order of a few hundred CPU cycles).
|
|
|
|
* Slower sources should probably be invoked separately, and/or only from
|
2019-11-18 09:58:10 -05:00
|
|
|
* RandAddPeriodic (which is called once a minute).
|
2019-01-16 15:15:21 -08:00
|
|
|
*/
|
2024-03-11 14:10:44 -04:00
|
|
|
void InitHardwareRand() {}
|
|
|
|
void ReportHardwareRand() {}
|
2017-05-09 15:13:00 -07:00
|
|
|
#endif
|
|
|
|
|
2019-01-24 18:40:02 -08:00
|
|
|
/** Add 64 bits of entropy gathered from hardware to hasher. Do nothing if not supported. */
|
2024-03-11 14:10:44 -04:00
|
|
|
void SeedHardwareFast(CSHA512& hasher) noexcept {
|
2017-05-09 15:13:00 -07:00
|
|
|
#if defined(__x86_64__) || defined(__amd64__) || defined(__i386__)
|
2019-01-24 18:40:02 -08:00
|
|
|
if (g_rdrand_supported) {
|
|
|
|
uint64_t out = GetRdRand();
|
|
|
|
hasher.Write((const unsigned char*)&out, sizeof(out));
|
|
|
|
return;
|
|
|
|
}
|
2023-01-06 12:53:11 -05:00
|
|
|
#elif defined(__aarch64__) && defined(HWCAP2_RNG)
|
|
|
|
if (g_rndr_supported) {
|
|
|
|
uint64_t out = GetRNDR();
|
|
|
|
hasher.Write((const unsigned char*)&out, sizeof(out));
|
|
|
|
return;
|
|
|
|
}
|
2017-05-09 15:13:00 -07:00
|
|
|
#endif
|
2019-01-24 18:40:02 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
/** Add 256 bits of entropy gathered from hardware to hasher. Do nothing if not supported. */
|
2024-03-11 14:10:44 -04:00
|
|
|
void SeedHardwareSlow(CSHA512& hasher) noexcept {
|
2019-01-24 18:40:02 -08:00
|
|
|
#if defined(__x86_64__) || defined(__amd64__) || defined(__i386__)
|
|
|
|
// When we want 256 bits of entropy, prefer RdSeed over RdRand, as it's
|
|
|
|
// guaranteed to produce independent randomness on every call.
|
|
|
|
if (g_rdseed_supported) {
|
|
|
|
for (int i = 0; i < 4; ++i) {
|
|
|
|
uint64_t out = GetRdSeed();
|
|
|
|
hasher.Write((const unsigned char*)&out, sizeof(out));
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
// When falling back to RdRand, XOR the result of 1024 results.
|
|
|
|
// This guarantees a reseeding occurs between each.
|
|
|
|
if (g_rdrand_supported) {
|
|
|
|
for (int i = 0; i < 4; ++i) {
|
|
|
|
uint64_t out = 0;
|
|
|
|
for (int j = 0; j < 1024; ++j) out ^= GetRdRand();
|
|
|
|
hasher.Write((const unsigned char*)&out, sizeof(out));
|
|
|
|
}
|
|
|
|
return;
|
2017-05-09 15:13:00 -07:00
|
|
|
}
|
2023-01-06 12:53:11 -05:00
|
|
|
#elif defined(__aarch64__) && defined(HWCAP2_RNG)
|
|
|
|
if (g_rndr_supported) {
|
|
|
|
for (int i = 0; i < 4; ++i) {
|
|
|
|
uint64_t out = GetRNDRRS();
|
|
|
|
hasher.Write((const unsigned char*)&out, sizeof(out));
|
2019-01-24 18:40:02 -08:00
|
|
|
}
|
|
|
|
return;
|
2017-05-09 15:13:00 -07:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2018-12-17 15:50:31 -08:00
|
|
|
/** Use repeated SHA512 to strengthen the randomness in seed32, and feed into hasher. */
|
2024-03-11 14:10:44 -04:00
|
|
|
void Strengthen(const unsigned char (&seed)[32], SteadyClock::duration dur, CSHA512& hasher) noexcept
|
2018-12-17 15:50:31 -08:00
|
|
|
{
|
|
|
|
CSHA512 inner_hasher;
|
|
|
|
inner_hasher.Write(seed, sizeof(seed));
|
|
|
|
|
|
|
|
// Hash loop
|
|
|
|
unsigned char buffer[64];
|
2023-03-02 14:43:43 +01:00
|
|
|
const auto stop{SteadyClock::now() + dur};
|
2018-12-17 15:50:31 -08:00
|
|
|
do {
|
|
|
|
for (int i = 0; i < 1000; ++i) {
|
|
|
|
inner_hasher.Finalize(buffer);
|
|
|
|
inner_hasher.Reset();
|
|
|
|
inner_hasher.Write(buffer, sizeof(buffer));
|
|
|
|
}
|
|
|
|
// Benchmark operation and feed it into outer hasher.
|
|
|
|
int64_t perf = GetPerformanceCounter();
|
|
|
|
hasher.Write((const unsigned char*)&perf, sizeof(perf));
|
2023-03-02 14:43:43 +01:00
|
|
|
} while (SteadyClock::now() < stop);
|
2018-12-17 15:50:31 -08:00
|
|
|
|
|
|
|
// Produce output from inner state and feed it to outer hasher.
|
|
|
|
inner_hasher.Finalize(buffer);
|
|
|
|
hasher.Write(buffer, sizeof(buffer));
|
|
|
|
// Try to clean up.
|
|
|
|
inner_hasher.Reset();
|
|
|
|
memory_cleanse(buffer, sizeof(buffer));
|
|
|
|
}
|
|
|
|
|
2017-02-22 08:51:26 +01:00
|
|
|
#ifndef WIN32
|
|
|
|
/** Fallback: get 32 bytes of system entropy from /dev/urandom. The most
|
|
|
|
* compatible way to get cryptographic randomness on UNIX-ish platforms.
|
|
|
|
*/
|
2024-03-11 14:10:44 -04:00
|
|
|
[[maybe_unused]] void GetDevURandom(unsigned char *ent32)
|
2017-02-22 08:51:26 +01:00
|
|
|
{
|
|
|
|
int f = open("/dev/urandom", O_RDONLY);
|
|
|
|
if (f == -1) {
|
|
|
|
RandFailure();
|
|
|
|
}
|
|
|
|
int have = 0;
|
|
|
|
do {
|
|
|
|
ssize_t n = read(f, ent32 + have, NUM_OS_RANDOM_BYTES - have);
|
|
|
|
if (n <= 0 || n + have > NUM_OS_RANDOM_BYTES) {
|
2017-07-15 21:34:52 +02:00
|
|
|
close(f);
|
2017-02-22 08:51:26 +01:00
|
|
|
RandFailure();
|
|
|
|
}
|
|
|
|
have += n;
|
|
|
|
} while (have < NUM_OS_RANDOM_BYTES);
|
|
|
|
close(f);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2016-04-16 12:25:12 +02:00
|
|
|
/** Get 32 bytes of system entropy. */
|
2017-02-21 17:36:37 +01:00
|
|
|
void GetOSRand(unsigned char *ent32)
|
2016-04-16 12:25:12 +02:00
|
|
|
{
|
2017-02-21 17:36:37 +01:00
|
|
|
#if defined(WIN32)
|
2016-04-16 12:25:12 +02:00
|
|
|
HCRYPTPROV hProvider;
|
2017-08-07 07:36:37 +02:00
|
|
|
int ret = CryptAcquireContextW(&hProvider, nullptr, nullptr, PROV_RSA_FULL, CRYPT_VERIFYCONTEXT);
|
2016-04-23 18:07:35 +02:00
|
|
|
if (!ret) {
|
|
|
|
RandFailure();
|
|
|
|
}
|
2017-02-21 17:36:37 +01:00
|
|
|
ret = CryptGenRandom(hProvider, NUM_OS_RANDOM_BYTES, ent32);
|
2016-04-23 18:07:35 +02:00
|
|
|
if (!ret) {
|
|
|
|
RandFailure();
|
|
|
|
}
|
2016-04-16 12:25:12 +02:00
|
|
|
CryptReleaseContext(hProvider, 0);
|
2023-05-18 11:25:44 +01:00
|
|
|
#elif defined(HAVE_GETRANDOM)
|
2017-02-21 17:36:37 +01:00
|
|
|
/* Linux. From the getrandom(2) man page:
|
|
|
|
* "If the urandom source has been initialized, reads of up to 256 bytes
|
|
|
|
* will always return as many bytes as requested and will not be
|
|
|
|
* interrupted by signals."
|
|
|
|
*/
|
2023-05-18 11:25:44 +01:00
|
|
|
if (getrandom(ent32, NUM_OS_RANDOM_BYTES, 0) != NUM_OS_RANDOM_BYTES) {
|
|
|
|
RandFailure();
|
2017-02-21 17:36:37 +01:00
|
|
|
}
|
2022-02-02 15:35:26 +01:00
|
|
|
#elif defined(__OpenBSD__)
|
|
|
|
/* OpenBSD. From the arc4random(3) man page:
|
|
|
|
"Use of these functions is encouraged for almost all random number
|
|
|
|
consumption because the other interfaces are deficient in either
|
|
|
|
quality, portability, standardization, or availability."
|
|
|
|
The function call is always successful.
|
2017-02-21 17:36:37 +01:00
|
|
|
*/
|
2022-02-02 15:35:26 +01:00
|
|
|
arc4random_buf(ent32, NUM_OS_RANDOM_BYTES);
|
2017-07-27 15:34:09 +03:00
|
|
|
#elif defined(HAVE_GETENTROPY_RAND) && defined(MAC_OSX)
|
2020-03-17 13:00:31 +08:00
|
|
|
if (getentropy(ent32, NUM_OS_RANDOM_BYTES) != 0) {
|
|
|
|
RandFailure();
|
2017-07-27 15:34:09 +03:00
|
|
|
}
|
2017-02-21 17:36:37 +01:00
|
|
|
#elif defined(HAVE_SYSCTL_ARND)
|
2020-03-16 17:20:27 +08:00
|
|
|
/* FreeBSD, NetBSD and similar. It is possible for the call to return less
|
2017-02-21 17:36:37 +01:00
|
|
|
* bytes than requested, so need to read in a loop.
|
|
|
|
*/
|
2020-03-16 17:20:27 +08:00
|
|
|
static int name[2] = {CTL_KERN, KERN_ARND};
|
2017-02-21 17:36:37 +01:00
|
|
|
int have = 0;
|
|
|
|
do {
|
|
|
|
size_t len = NUM_OS_RANDOM_BYTES - have;
|
2020-11-20 00:14:32 +01:00
|
|
|
if (sysctl(name, std::size(name), ent32 + have, &len, nullptr, 0) != 0) {
|
2017-02-21 17:36:37 +01:00
|
|
|
RandFailure();
|
|
|
|
}
|
|
|
|
have += len;
|
|
|
|
} while (have < NUM_OS_RANDOM_BYTES);
|
2016-04-16 12:25:12 +02:00
|
|
|
#else
|
2017-02-21 17:36:37 +01:00
|
|
|
/* Fall back to /dev/urandom if there is no specific method implemented to
|
|
|
|
* get system entropy for this OS.
|
|
|
|
*/
|
2017-02-22 08:51:26 +01:00
|
|
|
GetDevURandom(ent32);
|
2016-04-16 12:25:12 +02:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2019-01-10 18:19:50 -08:00
|
|
|
class RNGState {
|
2018-12-17 16:48:21 -08:00
|
|
|
Mutex m_mutex;
|
2019-01-13 10:51:17 -08:00
|
|
|
/* The RNG state consists of 256 bits of entropy, taken from the output of
|
|
|
|
* one operation's SHA512 output, and fed as input to the next one.
|
|
|
|
* Carrying 256 bits of entropy should be sufficient to guarantee
|
|
|
|
* unpredictability as long as any entropy source was ever unpredictable
|
|
|
|
* to an attacker. To protect against situations where an attacker might
|
|
|
|
* observe the RNG's state, fresh entropy is always mixed when
|
|
|
|
* GetStrongRandBytes is called.
|
|
|
|
*/
|
2018-12-17 17:03:30 -08:00
|
|
|
unsigned char m_state[32] GUARDED_BY(m_mutex) = {0};
|
|
|
|
uint64_t m_counter GUARDED_BY(m_mutex) = 0;
|
2018-12-13 18:37:29 -08:00
|
|
|
bool m_strongly_seeded GUARDED_BY(m_mutex) = false;
|
2018-12-17 16:48:21 -08:00
|
|
|
|
tests: overhaul deterministic test randomness
The existing code provides two randomness mechanisms for test purposes:
- g_insecure_rand_ctx (with its wrappers InsecureRand*), which during tests is
initialized using either zeros (SeedRand::ZEROS), or using environment-provided
randomness (SeedRand::SEED).
- g_mock_deterministic_tests, which controls some (but not all) of the normal
randomness output if set, but then makes it extremely predictable (identical
output repeatedly).
Replace this with a single mechanism, which retains the SeedRand modes to control
all randomness. There is a new internal deterministic PRNG inside the random
module, which is used in GetRandBytes() when in test mode, and which is also used
to initialize g_insecure_rand_ctx. This means that during tests, all random numbers
are made deterministic. There is one exception, GetStrongRandBytes(), which even
in test mode still uses the normal PRNG state.
This probably opens the door to removing a lot of the ad-hoc "deterministic" mode
functions littered through the codebase (by simply running relevant tests in
SeedRand::ZEROS mode), but this isn't done yet.
2024-03-10 19:49:42 -04:00
|
|
|
/** If not nullopt, the output of this RNGState is redirected and drawn from here
|
|
|
|
* (unless always_use_real_rng is passed to MixExtract). */
|
|
|
|
std::optional<ChaCha20> m_deterministic_prng GUARDED_BY(m_mutex);
|
|
|
|
|
2019-12-04 15:14:43 -08:00
|
|
|
Mutex m_events_mutex;
|
|
|
|
CSHA256 m_events_hasher GUARDED_BY(m_events_mutex);
|
|
|
|
|
2019-01-10 18:19:50 -08:00
|
|
|
public:
|
2018-12-17 15:11:33 -08:00
|
|
|
RNGState() noexcept
|
2018-12-17 17:03:30 -08:00
|
|
|
{
|
2019-01-16 15:15:21 -08:00
|
|
|
InitHardwareRand();
|
2019-01-15 16:03:54 -08:00
|
|
|
}
|
|
|
|
|
2022-05-11 16:02:15 +01:00
|
|
|
~RNGState() = default;
|
2018-12-17 16:04:35 -08:00
|
|
|
|
2022-04-20 16:47:29 +10:00
|
|
|
void AddEvent(uint32_t event_info) noexcept EXCLUSIVE_LOCKS_REQUIRED(!m_events_mutex)
|
2019-12-04 15:14:43 -08:00
|
|
|
{
|
|
|
|
LOCK(m_events_mutex);
|
|
|
|
|
|
|
|
m_events_hasher.Write((const unsigned char *)&event_info, sizeof(event_info));
|
|
|
|
// Get the low four bytes of the performance counter. This translates to roughly the
|
|
|
|
// subsecond part.
|
|
|
|
uint32_t perfcounter = (GetPerformanceCounter() & 0xffffffff);
|
|
|
|
m_events_hasher.Write((const unsigned char*)&perfcounter, sizeof(perfcounter));
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Feed (the hash of) all events added through AddEvent() to hasher.
|
|
|
|
*/
|
2022-04-20 16:47:29 +10:00
|
|
|
void SeedEvents(CSHA512& hasher) noexcept EXCLUSIVE_LOCKS_REQUIRED(!m_events_mutex)
|
2019-12-04 15:14:43 -08:00
|
|
|
{
|
|
|
|
// We use only SHA256 for the events hashing to get the ASM speedups we have for SHA256,
|
|
|
|
// since we want it to be fast as network peers may be able to trigger it repeatedly.
|
|
|
|
LOCK(m_events_mutex);
|
|
|
|
|
|
|
|
unsigned char events_hash[32];
|
|
|
|
m_events_hasher.Finalize(events_hash);
|
|
|
|
hasher.Write(events_hash, 32);
|
|
|
|
|
|
|
|
// Re-initialize the hasher with the finalized state to use later.
|
|
|
|
m_events_hasher.Reset();
|
|
|
|
m_events_hasher.Write(events_hash, 32);
|
|
|
|
}
|
|
|
|
|
tests: overhaul deterministic test randomness
The existing code provides two randomness mechanisms for test purposes:
- g_insecure_rand_ctx (with its wrappers InsecureRand*), which during tests is
initialized using either zeros (SeedRand::ZEROS), or using environment-provided
randomness (SeedRand::SEED).
- g_mock_deterministic_tests, which controls some (but not all) of the normal
randomness output if set, but then makes it extremely predictable (identical
output repeatedly).
Replace this with a single mechanism, which retains the SeedRand modes to control
all randomness. There is a new internal deterministic PRNG inside the random
module, which is used in GetRandBytes() when in test mode, and which is also used
to initialize g_insecure_rand_ctx. This means that during tests, all random numbers
are made deterministic. There is one exception, GetStrongRandBytes(), which even
in test mode still uses the normal PRNG state.
This probably opens the door to removing a lot of the ad-hoc "deterministic" mode
functions littered through the codebase (by simply running relevant tests in
SeedRand::ZEROS mode), but this isn't done yet.
2024-03-10 19:49:42 -04:00
|
|
|
/** Make the output of MixExtract (unless always_use_real_rng) deterministic, with specified seed. */
|
|
|
|
void MakeDeterministic(const uint256& seed) noexcept EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
|
|
|
|
{
|
|
|
|
LOCK(m_mutex);
|
|
|
|
m_deterministic_prng.emplace(MakeByteSpan(seed));
|
|
|
|
}
|
|
|
|
|
2018-12-13 18:37:29 -08:00
|
|
|
/** Extract up to 32 bytes of entropy from the RNG state, mixing in new entropy from hasher.
|
|
|
|
*
|
|
|
|
* If this function has never been called with strong_seed = true, false is returned.
|
tests: overhaul deterministic test randomness
The existing code provides two randomness mechanisms for test purposes:
- g_insecure_rand_ctx (with its wrappers InsecureRand*), which during tests is
initialized using either zeros (SeedRand::ZEROS), or using environment-provided
randomness (SeedRand::SEED).
- g_mock_deterministic_tests, which controls some (but not all) of the normal
randomness output if set, but then makes it extremely predictable (identical
output repeatedly).
Replace this with a single mechanism, which retains the SeedRand modes to control
all randomness. There is a new internal deterministic PRNG inside the random
module, which is used in GetRandBytes() when in test mode, and which is also used
to initialize g_insecure_rand_ctx. This means that during tests, all random numbers
are made deterministic. There is one exception, GetStrongRandBytes(), which even
in test mode still uses the normal PRNG state.
This probably opens the door to removing a lot of the ad-hoc "deterministic" mode
functions littered through the codebase (by simply running relevant tests in
SeedRand::ZEROS mode), but this isn't done yet.
2024-03-10 19:49:42 -04:00
|
|
|
*
|
|
|
|
* If always_use_real_rng is false, and MakeDeterministic has been called before, output
|
|
|
|
* from the deterministic PRNG instead.
|
2018-12-13 18:37:29 -08:00
|
|
|
*/
|
tests: overhaul deterministic test randomness
The existing code provides two randomness mechanisms for test purposes:
- g_insecure_rand_ctx (with its wrappers InsecureRand*), which during tests is
initialized using either zeros (SeedRand::ZEROS), or using environment-provided
randomness (SeedRand::SEED).
- g_mock_deterministic_tests, which controls some (but not all) of the normal
randomness output if set, but then makes it extremely predictable (identical
output repeatedly).
Replace this with a single mechanism, which retains the SeedRand modes to control
all randomness. There is a new internal deterministic PRNG inside the random
module, which is used in GetRandBytes() when in test mode, and which is also used
to initialize g_insecure_rand_ctx. This means that during tests, all random numbers
are made deterministic. There is one exception, GetStrongRandBytes(), which even
in test mode still uses the normal PRNG state.
This probably opens the door to removing a lot of the ad-hoc "deterministic" mode
functions littered through the codebase (by simply running relevant tests in
SeedRand::ZEROS mode), but this isn't done yet.
2024-03-10 19:49:42 -04:00
|
|
|
bool MixExtract(unsigned char* out, size_t num, CSHA512&& hasher, bool strong_seed, bool always_use_real_rng) noexcept EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
|
2018-12-17 16:04:35 -08:00
|
|
|
{
|
|
|
|
assert(num <= 32);
|
|
|
|
unsigned char buf[64];
|
|
|
|
static_assert(sizeof(buf) == CSHA512::OUTPUT_SIZE, "Buffer needs to have hasher's output size");
|
2018-12-13 18:37:29 -08:00
|
|
|
bool ret;
|
2018-12-17 16:04:35 -08:00
|
|
|
{
|
|
|
|
LOCK(m_mutex);
|
2018-12-13 18:37:29 -08:00
|
|
|
ret = (m_strongly_seeded |= strong_seed);
|
2018-12-17 16:04:35 -08:00
|
|
|
// Write the current state of the RNG into the hasher
|
|
|
|
hasher.Write(m_state, 32);
|
|
|
|
// Write a new counter number into the state
|
|
|
|
hasher.Write((const unsigned char*)&m_counter, sizeof(m_counter));
|
|
|
|
++m_counter;
|
|
|
|
// Finalize the hasher
|
|
|
|
hasher.Finalize(buf);
|
|
|
|
// Store the last 32 bytes of the hash output as new RNG state.
|
|
|
|
memcpy(m_state, buf + 32, 32);
|
tests: overhaul deterministic test randomness
The existing code provides two randomness mechanisms for test purposes:
- g_insecure_rand_ctx (with its wrappers InsecureRand*), which during tests is
initialized using either zeros (SeedRand::ZEROS), or using environment-provided
randomness (SeedRand::SEED).
- g_mock_deterministic_tests, which controls some (but not all) of the normal
randomness output if set, but then makes it extremely predictable (identical
output repeatedly).
Replace this with a single mechanism, which retains the SeedRand modes to control
all randomness. There is a new internal deterministic PRNG inside the random
module, which is used in GetRandBytes() when in test mode, and which is also used
to initialize g_insecure_rand_ctx. This means that during tests, all random numbers
are made deterministic. There is one exception, GetStrongRandBytes(), which even
in test mode still uses the normal PRNG state.
This probably opens the door to removing a lot of the ad-hoc "deterministic" mode
functions littered through the codebase (by simply running relevant tests in
SeedRand::ZEROS mode), but this isn't done yet.
2024-03-10 19:49:42 -04:00
|
|
|
// Handle requests for deterministic randomness.
|
|
|
|
if (!always_use_real_rng && m_deterministic_prng.has_value()) [[unlikely]] {
|
|
|
|
// Overwrite the beginning of buf, which will be used for output.
|
|
|
|
m_deterministic_prng->Keystream(AsWritableBytes(Span{buf, num}));
|
|
|
|
// Do not require strong seeding for deterministic output.
|
|
|
|
ret = true;
|
|
|
|
}
|
2018-12-17 16:04:35 -08:00
|
|
|
}
|
|
|
|
// If desired, copy (up to) the first 32 bytes of the hash output as output.
|
|
|
|
if (num) {
|
|
|
|
assert(out != nullptr);
|
|
|
|
memcpy(out, buf, num);
|
|
|
|
}
|
|
|
|
// Best effort cleanup of internal state
|
|
|
|
hasher.Reset();
|
|
|
|
memory_cleanse(buf, 64);
|
2018-12-13 18:37:29 -08:00
|
|
|
return ret;
|
2018-12-17 16:04:35 -08:00
|
|
|
}
|
2018-12-17 16:48:21 -08:00
|
|
|
};
|
|
|
|
|
2018-12-17 15:11:33 -08:00
|
|
|
RNGState& GetRNGState() noexcept
|
2018-12-17 16:48:21 -08:00
|
|
|
{
|
2023-01-05 19:52:33 +00:00
|
|
|
// This idiom relies on the guarantee that static variable are initialized
|
2018-12-17 16:48:21 -08:00
|
|
|
// on first call, even when multiple parallel calls are permitted.
|
2019-01-10 18:34:17 -08:00
|
|
|
static std::vector<RNGState, secure_allocator<RNGState>> g_rng(1);
|
|
|
|
return g_rng[0];
|
2018-12-17 16:48:21 -08:00
|
|
|
}
|
|
|
|
|
2018-12-17 15:11:33 -08:00
|
|
|
/* A note on the use of noexcept in the seeding functions below:
|
|
|
|
*
|
2019-11-18 10:21:28 -05:00
|
|
|
* None of the RNG code should ever throw any exception.
|
2018-12-17 15:11:33 -08:00
|
|
|
*/
|
|
|
|
|
2024-03-11 14:10:44 -04:00
|
|
|
void SeedTimestamp(CSHA512& hasher) noexcept
|
2018-12-13 18:37:29 -08:00
|
|
|
{
|
|
|
|
int64_t perfcounter = GetPerformanceCounter();
|
|
|
|
hasher.Write((const unsigned char*)&perfcounter, sizeof(perfcounter));
|
|
|
|
}
|
2017-05-22 16:01:52 -04:00
|
|
|
|
2024-03-11 14:10:44 -04:00
|
|
|
void SeedFast(CSHA512& hasher) noexcept
|
2017-05-22 16:01:52 -04:00
|
|
|
{
|
2018-12-13 18:37:29 -08:00
|
|
|
unsigned char buffer[32];
|
2018-12-17 16:04:35 -08:00
|
|
|
|
2018-12-13 18:37:29 -08:00
|
|
|
// Stack pointer to indirectly commit to thread/callstack
|
|
|
|
const unsigned char* ptr = buffer;
|
|
|
|
hasher.Write((const unsigned char*)&ptr, sizeof(ptr));
|
2017-05-22 16:01:52 -04:00
|
|
|
|
2018-12-13 18:37:29 -08:00
|
|
|
// Hardware randomness is very fast when available; use it always.
|
2019-01-24 18:40:02 -08:00
|
|
|
SeedHardwareFast(hasher);
|
2017-05-22 16:01:52 -04:00
|
|
|
|
2018-12-13 18:37:29 -08:00
|
|
|
// High-precision timestamp
|
|
|
|
SeedTimestamp(hasher);
|
2017-05-22 16:01:52 -04:00
|
|
|
}
|
|
|
|
|
2024-03-11 14:10:44 -04:00
|
|
|
void SeedSlow(CSHA512& hasher, RNGState& rng) noexcept
|
2018-12-13 18:37:29 -08:00
|
|
|
{
|
|
|
|
unsigned char buffer[32];
|
|
|
|
|
|
|
|
// Everything that the 'fast' seeder includes
|
|
|
|
SeedFast(hasher);
|
|
|
|
|
|
|
|
// OS randomness
|
|
|
|
GetOSRand(buffer);
|
|
|
|
hasher.Write(buffer, sizeof(buffer));
|
|
|
|
|
2019-11-23 11:42:23 -05:00
|
|
|
// Add the events hasher into the mix
|
2019-12-04 15:14:43 -08:00
|
|
|
rng.SeedEvents(hasher);
|
2019-11-23 11:42:23 -05:00
|
|
|
|
2018-12-13 18:37:29 -08:00
|
|
|
// High-precision timestamp.
|
|
|
|
//
|
|
|
|
// Note that we also commit to a timestamp in the Fast seeder, so we indirectly commit to a
|
|
|
|
// benchmark of all the entropy gathering sources in this function).
|
|
|
|
SeedTimestamp(hasher);
|
2017-05-22 16:01:29 -04:00
|
|
|
}
|
|
|
|
|
2018-12-17 15:50:31 -08:00
|
|
|
/** Extract entropy from rng, strengthen it, and feed it into hasher. */
|
2024-03-11 14:10:44 -04:00
|
|
|
void SeedStrengthen(CSHA512& hasher, RNGState& rng, SteadyClock::duration dur) noexcept
|
2018-12-17 15:50:31 -08:00
|
|
|
{
|
2019-10-29 11:55:59 -07:00
|
|
|
// Generate 32 bytes of entropy from the RNG, and a copy of the entropy already in hasher.
|
tests: overhaul deterministic test randomness
The existing code provides two randomness mechanisms for test purposes:
- g_insecure_rand_ctx (with its wrappers InsecureRand*), which during tests is
initialized using either zeros (SeedRand::ZEROS), or using environment-provided
randomness (SeedRand::SEED).
- g_mock_deterministic_tests, which controls some (but not all) of the normal
randomness output if set, but then makes it extremely predictable (identical
output repeatedly).
Replace this with a single mechanism, which retains the SeedRand modes to control
all randomness. There is a new internal deterministic PRNG inside the random
module, which is used in GetRandBytes() when in test mode, and which is also used
to initialize g_insecure_rand_ctx. This means that during tests, all random numbers
are made deterministic. There is one exception, GetStrongRandBytes(), which even
in test mode still uses the normal PRNG state.
This probably opens the door to removing a lot of the ad-hoc "deterministic" mode
functions littered through the codebase (by simply running relevant tests in
SeedRand::ZEROS mode), but this isn't done yet.
2024-03-10 19:49:42 -04:00
|
|
|
// Never use the deterministic PRNG for this, as the result is only used internally.
|
2019-10-29 11:55:59 -07:00
|
|
|
unsigned char strengthen_seed[32];
|
tests: overhaul deterministic test randomness
The existing code provides two randomness mechanisms for test purposes:
- g_insecure_rand_ctx (with its wrappers InsecureRand*), which during tests is
initialized using either zeros (SeedRand::ZEROS), or using environment-provided
randomness (SeedRand::SEED).
- g_mock_deterministic_tests, which controls some (but not all) of the normal
randomness output if set, but then makes it extremely predictable (identical
output repeatedly).
Replace this with a single mechanism, which retains the SeedRand modes to control
all randomness. There is a new internal deterministic PRNG inside the random
module, which is used in GetRandBytes() when in test mode, and which is also used
to initialize g_insecure_rand_ctx. This means that during tests, all random numbers
are made deterministic. There is one exception, GetStrongRandBytes(), which even
in test mode still uses the normal PRNG state.
This probably opens the door to removing a lot of the ad-hoc "deterministic" mode
functions littered through the codebase (by simply running relevant tests in
SeedRand::ZEROS mode), but this isn't done yet.
2024-03-10 19:49:42 -04:00
|
|
|
rng.MixExtract(strengthen_seed, sizeof(strengthen_seed), CSHA512(hasher), false, /*always_use_real_rng=*/true);
|
2019-10-29 11:55:59 -07:00
|
|
|
// Strengthen the seed, and feed it into hasher.
|
2023-03-02 14:43:43 +01:00
|
|
|
Strengthen(strengthen_seed, dur, hasher);
|
2018-12-17 15:50:31 -08:00
|
|
|
}
|
|
|
|
|
2024-03-11 14:10:44 -04:00
|
|
|
void SeedPeriodic(CSHA512& hasher, RNGState& rng) noexcept
|
2016-04-16 12:25:12 +02:00
|
|
|
{
|
2018-12-13 18:37:29 -08:00
|
|
|
// Everything that the 'fast' seeder includes
|
|
|
|
SeedFast(hasher);
|
2018-12-17 16:48:21 -08:00
|
|
|
|
2018-12-13 18:37:29 -08:00
|
|
|
// High-precision timestamp
|
|
|
|
SeedTimestamp(hasher);
|
2016-04-16 12:25:12 +02:00
|
|
|
|
2019-11-23 11:42:23 -05:00
|
|
|
// Add the events hasher into the mix
|
2019-12-04 15:14:43 -08:00
|
|
|
rng.SeedEvents(hasher);
|
2019-11-23 11:42:23 -05:00
|
|
|
|
2019-10-29 11:55:59 -07:00
|
|
|
// Dynamic environment data (performance monitoring, ...)
|
2019-11-05 10:54:20 -08:00
|
|
|
auto old_size = hasher.Size();
|
2019-10-26 12:20:12 -07:00
|
|
|
RandAddDynamicEnv(hasher);
|
2019-12-08 18:34:02 -08:00
|
|
|
LogPrint(BCLog::RAND, "Feeding %i bytes of dynamic environment data into RNG\n", hasher.Size() - old_size);
|
2018-12-17 15:50:31 -08:00
|
|
|
|
2019-10-29 11:55:59 -07:00
|
|
|
// Strengthen for 10 ms
|
2023-03-02 14:43:43 +01:00
|
|
|
SeedStrengthen(hasher, rng, 10ms);
|
2018-12-13 18:37:29 -08:00
|
|
|
}
|
|
|
|
|
2024-03-11 14:10:44 -04:00
|
|
|
void SeedStartup(CSHA512& hasher, RNGState& rng) noexcept
|
2018-12-13 18:37:29 -08:00
|
|
|
{
|
2019-01-24 18:40:02 -08:00
|
|
|
// Gather 256 bits of hardware randomness, if available
|
|
|
|
SeedHardwareSlow(hasher);
|
|
|
|
|
2018-12-13 18:37:29 -08:00
|
|
|
// Everything that the 'slow' seeder includes.
|
2019-12-04 15:14:43 -08:00
|
|
|
SeedSlow(hasher, rng);
|
2018-12-13 18:37:29 -08:00
|
|
|
|
2019-10-29 11:55:59 -07:00
|
|
|
// Dynamic environment data (performance monitoring, ...)
|
2019-11-05 10:54:20 -08:00
|
|
|
auto old_size = hasher.Size();
|
2019-10-26 12:20:12 -07:00
|
|
|
RandAddDynamicEnv(hasher);
|
|
|
|
|
|
|
|
// Static environment data
|
|
|
|
RandAddStaticEnv(hasher);
|
2019-12-08 18:34:02 -08:00
|
|
|
LogPrint(BCLog::RAND, "Feeding %i bytes of environment data into RNG\n", hasher.Size() - old_size);
|
2018-12-17 15:50:31 -08:00
|
|
|
|
2019-10-29 11:55:59 -07:00
|
|
|
// Strengthen for 100 ms
|
2023-03-02 14:43:43 +01:00
|
|
|
SeedStrengthen(hasher, rng, 100ms);
|
2018-12-13 18:37:29 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
enum class RNGLevel {
|
|
|
|
FAST, //!< Automatically called by GetRandBytes
|
|
|
|
SLOW, //!< Automatically called by GetStrongRandBytes
|
2019-10-29 11:55:59 -07:00
|
|
|
PERIODIC, //!< Called by RandAddPeriodic()
|
2018-12-13 18:37:29 -08:00
|
|
|
};
|
|
|
|
|
2024-03-11 14:10:44 -04:00
|
|
|
void ProcRand(unsigned char* out, int num, RNGLevel level, bool always_use_real_rng) noexcept
|
2018-12-13 18:37:29 -08:00
|
|
|
{
|
|
|
|
// Make sure the RNG is initialized first (as all Seed* function possibly need hwrand to be available).
|
|
|
|
RNGState& rng = GetRNGState();
|
|
|
|
|
|
|
|
assert(num <= 32);
|
|
|
|
|
|
|
|
CSHA512 hasher;
|
|
|
|
switch (level) {
|
|
|
|
case RNGLevel::FAST:
|
|
|
|
SeedFast(hasher);
|
|
|
|
break;
|
|
|
|
case RNGLevel::SLOW:
|
2019-12-04 15:14:43 -08:00
|
|
|
SeedSlow(hasher, rng);
|
2018-12-13 18:37:29 -08:00
|
|
|
break;
|
2019-10-29 11:55:59 -07:00
|
|
|
case RNGLevel::PERIODIC:
|
|
|
|
SeedPeriodic(hasher, rng);
|
2018-12-13 18:37:29 -08:00
|
|
|
break;
|
2017-05-09 15:13:00 -07:00
|
|
|
}
|
|
|
|
|
2017-05-03 03:07:53 -07:00
|
|
|
// Combine with and update state
|
tests: overhaul deterministic test randomness
The existing code provides two randomness mechanisms for test purposes:
- g_insecure_rand_ctx (with its wrappers InsecureRand*), which during tests is
initialized using either zeros (SeedRand::ZEROS), or using environment-provided
randomness (SeedRand::SEED).
- g_mock_deterministic_tests, which controls some (but not all) of the normal
randomness output if set, but then makes it extremely predictable (identical
output repeatedly).
Replace this with a single mechanism, which retains the SeedRand modes to control
all randomness. There is a new internal deterministic PRNG inside the random
module, which is used in GetRandBytes() when in test mode, and which is also used
to initialize g_insecure_rand_ctx. This means that during tests, all random numbers
are made deterministic. There is one exception, GetStrongRandBytes(), which even
in test mode still uses the normal PRNG state.
This probably opens the door to removing a lot of the ad-hoc "deterministic" mode
functions littered through the codebase (by simply running relevant tests in
SeedRand::ZEROS mode), but this isn't done yet.
2024-03-10 19:49:42 -04:00
|
|
|
if (!rng.MixExtract(out, num, std::move(hasher), false, always_use_real_rng)) {
|
2018-12-13 18:37:29 -08:00
|
|
|
// On the first invocation, also seed with SeedStartup().
|
|
|
|
CSHA512 startup_hasher;
|
2018-12-17 15:50:31 -08:00
|
|
|
SeedStartup(startup_hasher, rng);
|
tests: overhaul deterministic test randomness
The existing code provides two randomness mechanisms for test purposes:
- g_insecure_rand_ctx (with its wrappers InsecureRand*), which during tests is
initialized using either zeros (SeedRand::ZEROS), or using environment-provided
randomness (SeedRand::SEED).
- g_mock_deterministic_tests, which controls some (but not all) of the normal
randomness output if set, but then makes it extremely predictable (identical
output repeatedly).
Replace this with a single mechanism, which retains the SeedRand modes to control
all randomness. There is a new internal deterministic PRNG inside the random
module, which is used in GetRandBytes() when in test mode, and which is also used
to initialize g_insecure_rand_ctx. This means that during tests, all random numbers
are made deterministic. There is one exception, GetStrongRandBytes(), which even
in test mode still uses the normal PRNG state.
This probably opens the door to removing a lot of the ad-hoc "deterministic" mode
functions littered through the codebase (by simply running relevant tests in
SeedRand::ZEROS mode), but this isn't done yet.
2024-03-10 19:49:42 -04:00
|
|
|
rng.MixExtract(out, num, std::move(startup_hasher), true, always_use_real_rng);
|
2018-12-13 18:37:29 -08:00
|
|
|
}
|
2016-04-16 12:25:12 +02:00
|
|
|
}
|
|
|
|
|
2024-03-11 14:10:44 -04:00
|
|
|
} // namespace
|
|
|
|
|
|
|
|
|
tests: overhaul deterministic test randomness
The existing code provides two randomness mechanisms for test purposes:
- g_insecure_rand_ctx (with its wrappers InsecureRand*), which during tests is
initialized using either zeros (SeedRand::ZEROS), or using environment-provided
randomness (SeedRand::SEED).
- g_mock_deterministic_tests, which controls some (but not all) of the normal
randomness output if set, but then makes it extremely predictable (identical
output repeatedly).
Replace this with a single mechanism, which retains the SeedRand modes to control
all randomness. There is a new internal deterministic PRNG inside the random
module, which is used in GetRandBytes() when in test mode, and which is also used
to initialize g_insecure_rand_ctx. This means that during tests, all random numbers
are made deterministic. There is one exception, GetStrongRandBytes(), which even
in test mode still uses the normal PRNG state.
This probably opens the door to removing a lot of the ad-hoc "deterministic" mode
functions littered through the codebase (by simply running relevant tests in
SeedRand::ZEROS mode), but this isn't done yet.
2024-03-10 19:49:42 -04:00
|
|
|
/** Internal function to set g_determinstic_rng. Only accessed from tests. */
|
|
|
|
void MakeRandDeterministicDANGEROUS(const uint256& seed) noexcept
|
|
|
|
{
|
|
|
|
GetRNGState().MakeDeterministic(seed);
|
|
|
|
}
|
2019-11-23 11:42:23 -05:00
|
|
|
|
tests: overhaul deterministic test randomness
The existing code provides two randomness mechanisms for test purposes:
- g_insecure_rand_ctx (with its wrappers InsecureRand*), which during tests is
initialized using either zeros (SeedRand::ZEROS), or using environment-provided
randomness (SeedRand::SEED).
- g_mock_deterministic_tests, which controls some (but not all) of the normal
randomness output if set, but then makes it extremely predictable (identical
output repeatedly).
Replace this with a single mechanism, which retains the SeedRand modes to control
all randomness. There is a new internal deterministic PRNG inside the random
module, which is used in GetRandBytes() when in test mode, and which is also used
to initialize g_insecure_rand_ctx. This means that during tests, all random numbers
are made deterministic. There is one exception, GetStrongRandBytes(), which even
in test mode still uses the normal PRNG state.
This probably opens the door to removing a lot of the ad-hoc "deterministic" mode
functions littered through the codebase (by simply running relevant tests in
SeedRand::ZEROS mode), but this isn't done yet.
2024-03-10 19:49:42 -04:00
|
|
|
void GetRandBytes(Span<unsigned char> bytes) noexcept
|
|
|
|
{
|
|
|
|
ProcRand(bytes.data(), bytes.size(), RNGLevel::FAST, /*always_use_real_rng=*/false);
|
|
|
|
}
|
|
|
|
|
|
|
|
void GetStrongRandBytes(Span<unsigned char> bytes) noexcept
|
|
|
|
{
|
|
|
|
ProcRand(bytes.data(), bytes.size(), RNGLevel::SLOW, /*always_use_real_rng=*/true);
|
|
|
|
}
|
|
|
|
|
|
|
|
void RandAddPeriodic() noexcept
|
|
|
|
{
|
|
|
|
ProcRand(nullptr, 0, RNGLevel::PERIODIC, /*always_use_real_rng=*/false);
|
|
|
|
}
|
|
|
|
|
|
|
|
void RandAddEvent(const uint32_t event_info) noexcept { GetRNGState().AddEvent(event_info); }
|
2019-02-01 17:06:32 -05:00
|
|
|
|
2024-06-08 07:57:47 -04:00
|
|
|
void FastRandomContext::RandomSeed() noexcept
|
2014-06-26 14:41:53 +02:00
|
|
|
{
|
2017-02-15 17:45:22 -08:00
|
|
|
uint256 seed = GetRandHash();
|
2023-07-18 10:11:49 -04:00
|
|
|
rng.SetKey(MakeByteSpan(seed));
|
2017-02-15 17:45:22 -08:00
|
|
|
requires_seed = false;
|
|
|
|
}
|
|
|
|
|
2024-06-08 07:57:47 -04:00
|
|
|
void FastRandomContext::fillrand(Span<std::byte> output) noexcept
|
2023-04-18 12:16:45 -04:00
|
|
|
{
|
|
|
|
if (requires_seed) RandomSeed();
|
2023-07-18 10:11:49 -04:00
|
|
|
rng.Keystream(output);
|
2023-04-18 12:16:45 -04:00
|
|
|
}
|
|
|
|
|
2024-03-10 08:58:56 -04:00
|
|
|
FastRandomContext::FastRandomContext(const uint256& seed) noexcept : requires_seed(false), rng(MakeByteSpan(seed)) {}
|
2016-10-13 16:19:20 +02:00
|
|
|
|
2024-05-31 10:39:23 -04:00
|
|
|
void FastRandomContext::Reseed(const uint256& seed) noexcept
|
|
|
|
{
|
|
|
|
FlushCache();
|
|
|
|
requires_seed = false;
|
|
|
|
rng = {MakeByteSpan(seed)};
|
|
|
|
}
|
|
|
|
|
2017-02-22 08:02:50 +01:00
|
|
|
bool Random_SanityCheck()
|
|
|
|
{
|
2017-05-05 11:32:06 -07:00
|
|
|
uint64_t start = GetPerformanceCounter();
|
|
|
|
|
2017-02-22 08:02:50 +01:00
|
|
|
/* This does not measure the quality of randomness, but it does test that
|
2019-10-26 08:05:59 -04:00
|
|
|
* GetOSRand() overwrites all 32 bytes of the output given a maximum
|
2017-02-22 08:02:50 +01:00
|
|
|
* number of tries.
|
|
|
|
*/
|
2022-12-30 12:08:17 +00:00
|
|
|
static constexpr int MAX_TRIES{1024};
|
2017-02-22 08:02:50 +01:00
|
|
|
uint8_t data[NUM_OS_RANDOM_BYTES];
|
|
|
|
bool overwritten[NUM_OS_RANDOM_BYTES] = {}; /* Tracks which bytes have been overwritten at least once */
|
|
|
|
int num_overwritten;
|
|
|
|
int tries = 0;
|
|
|
|
/* Loop until all bytes have been overwritten at least once, or max number tries reached */
|
|
|
|
do {
|
|
|
|
memset(data, 0, NUM_OS_RANDOM_BYTES);
|
|
|
|
GetOSRand(data);
|
|
|
|
for (int x=0; x < NUM_OS_RANDOM_BYTES; ++x) {
|
|
|
|
overwritten[x] |= (data[x] != 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
num_overwritten = 0;
|
|
|
|
for (int x=0; x < NUM_OS_RANDOM_BYTES; ++x) {
|
|
|
|
if (overwritten[x]) {
|
|
|
|
num_overwritten += 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
tries += 1;
|
|
|
|
} while (num_overwritten < NUM_OS_RANDOM_BYTES && tries < MAX_TRIES);
|
2017-05-05 11:32:06 -07:00
|
|
|
if (num_overwritten != NUM_OS_RANDOM_BYTES) return false; /* If this failed, bailed out after too many tries */
|
|
|
|
|
|
|
|
// Check that GetPerformanceCounter increases at least during a GetOSRand() call + 1ms sleep.
|
|
|
|
std::this_thread::sleep_for(std::chrono::milliseconds(1));
|
|
|
|
uint64_t stop = GetPerformanceCounter();
|
|
|
|
if (stop == start) return false;
|
|
|
|
|
2017-05-05 11:45:37 -07:00
|
|
|
// We called GetPerformanceCounter. Use it as entropy.
|
2018-12-13 18:37:29 -08:00
|
|
|
CSHA512 to_add;
|
|
|
|
to_add.Write((const unsigned char*)&start, sizeof(start));
|
|
|
|
to_add.Write((const unsigned char*)&stop, sizeof(stop));
|
tests: overhaul deterministic test randomness
The existing code provides two randomness mechanisms for test purposes:
- g_insecure_rand_ctx (with its wrappers InsecureRand*), which during tests is
initialized using either zeros (SeedRand::ZEROS), or using environment-provided
randomness (SeedRand::SEED).
- g_mock_deterministic_tests, which controls some (but not all) of the normal
randomness output if set, but then makes it extremely predictable (identical
output repeatedly).
Replace this with a single mechanism, which retains the SeedRand modes to control
all randomness. There is a new internal deterministic PRNG inside the random
module, which is used in GetRandBytes() when in test mode, and which is also used
to initialize g_insecure_rand_ctx. This means that during tests, all random numbers
are made deterministic. There is one exception, GetStrongRandBytes(), which even
in test mode still uses the normal PRNG state.
This probably opens the door to removing a lot of the ad-hoc "deterministic" mode
functions littered through the codebase (by simply running relevant tests in
SeedRand::ZEROS mode), but this isn't done yet.
2024-03-10 19:49:42 -04:00
|
|
|
GetRNGState().MixExtract(nullptr, 0, std::move(to_add), false, /*always_use_real_rng=*/true);
|
2017-05-05 11:45:37 -07:00
|
|
|
|
2017-05-05 11:32:06 -07:00
|
|
|
return true;
|
2017-02-22 08:02:50 +01:00
|
|
|
}
|
2017-02-15 17:45:22 -08:00
|
|
|
|
2023-07-18 13:52:52 -04:00
|
|
|
static constexpr std::array<std::byte, ChaCha20::KEYLEN> ZERO_KEY{};
|
|
|
|
|
2024-03-10 08:58:56 -04:00
|
|
|
FastRandomContext::FastRandomContext(bool fDeterministic) noexcept : requires_seed(!fDeterministic), rng(ZERO_KEY)
|
2017-02-15 17:45:22 -08:00
|
|
|
{
|
2023-07-18 13:52:52 -04:00
|
|
|
// Note that despite always initializing with ZERO_KEY, requires_seed is set to true if not
|
|
|
|
// fDeterministic. That means the rng will be reinitialized with a secure random key upon first
|
|
|
|
// use.
|
2017-02-15 17:45:22 -08:00
|
|
|
}
|
2017-05-09 15:13:00 -07:00
|
|
|
|
|
|
|
void RandomInit()
|
|
|
|
{
|
2018-12-17 16:48:21 -08:00
|
|
|
// Invoke RNG code to trigger initialization (if not already performed)
|
tests: overhaul deterministic test randomness
The existing code provides two randomness mechanisms for test purposes:
- g_insecure_rand_ctx (with its wrappers InsecureRand*), which during tests is
initialized using either zeros (SeedRand::ZEROS), or using environment-provided
randomness (SeedRand::SEED).
- g_mock_deterministic_tests, which controls some (but not all) of the normal
randomness output if set, but then makes it extremely predictable (identical
output repeatedly).
Replace this with a single mechanism, which retains the SeedRand modes to control
all randomness. There is a new internal deterministic PRNG inside the random
module, which is used in GetRandBytes() when in test mode, and which is also used
to initialize g_insecure_rand_ctx. This means that during tests, all random numbers
are made deterministic. There is one exception, GetStrongRandBytes(), which even
in test mode still uses the normal PRNG state.
This probably opens the door to removing a lot of the ad-hoc "deterministic" mode
functions littered through the codebase (by simply running relevant tests in
SeedRand::ZEROS mode), but this isn't done yet.
2024-03-10 19:49:42 -04:00
|
|
|
ProcRand(nullptr, 0, RNGLevel::FAST, /*always_use_real_rng=*/true);
|
2018-12-17 16:48:21 -08:00
|
|
|
|
2019-01-16 15:15:21 -08:00
|
|
|
ReportHardwareRand();
|
2017-05-09 15:13:00 -07:00
|
|
|
}
|
2021-12-14 10:15:10 +00:00
|
|
|
|
2024-03-11 10:44:35 -04:00
|
|
|
double MakeExponentiallyDistributed(uint64_t uniform) noexcept
|
2021-12-14 10:15:10 +00:00
|
|
|
{
|
2024-06-18 12:39:56 -04:00
|
|
|
// To convert uniform into an exponentially-distributed double, we use two steps:
|
|
|
|
// - Convert uniform into a uniformly-distributed double in range [0, 1), use the expression
|
|
|
|
// ((uniform >> 11) * 0x1.0p-53), as described in https://prng.di.unimi.it/ under
|
|
|
|
// "Generating uniform doubles in the unit interval". Call this value x.
|
|
|
|
// - Given an x in uniformly distributed in [0, 1), we find an exponentially distributed value
|
|
|
|
// by applying the quantile function to it. For the exponential distribution with mean 1 this
|
|
|
|
// is F(x) = -log(1 - x).
|
|
|
|
//
|
|
|
|
// Combining the two, and using log1p(x) = log(1 + x), we obtain the following:
|
|
|
|
return -std::log1p((uniform >> 11) * -0x1.0p-53);
|
2021-12-14 10:15:10 +00:00
|
|
|
}
|