random: group entropy extraction functions
commit a5ed7cb1a7732ef11959332d507889fbc39ebbb4 upstream. This pulls all of the entropy extraction-focused functions into the third labeled section. No functional changes. Cc: Theodore Ts'o <tytso@mit.edu> Reviewed-by: Eric Biggers <ebiggers@google.com> Reviewed-by: Dominik Brodowski <linux@dominikbrodowski.net> Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:

committed by
Greg Kroah-Hartman

parent
d7e5b1925a
commit
e9ff357860
@@ -895,23 +895,36 @@ size_t __must_check get_random_bytes_arch(void *buf, size_t nbytes)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL(get_random_bytes_arch);
|
EXPORT_SYMBOL(get_random_bytes_arch);
|
||||||
|
|
||||||
|
|
||||||
|
/**********************************************************************
|
||||||
|
*
|
||||||
|
* Entropy accumulation and extraction routines.
|
||||||
|
*
|
||||||
|
* Callers may add entropy via:
|
||||||
|
*
|
||||||
|
* static void mix_pool_bytes(const void *in, size_t nbytes)
|
||||||
|
*
|
||||||
|
* After which, if added entropy should be credited:
|
||||||
|
*
|
||||||
|
* static void credit_entropy_bits(size_t nbits)
|
||||||
|
*
|
||||||
|
* Finally, extract entropy via these two, with the latter one
|
||||||
|
* setting the entropy count to zero and extracting only if there
|
||||||
|
* is POOL_MIN_BITS entropy credited prior:
|
||||||
|
*
|
||||||
|
* static void extract_entropy(void *buf, size_t nbytes)
|
||||||
|
* static bool drain_entropy(void *buf, size_t nbytes)
|
||||||
|
*
|
||||||
|
**********************************************************************/
|
||||||
|
|
||||||
enum {
|
enum {
|
||||||
POOL_BITS = BLAKE2S_HASH_SIZE * 8,
|
POOL_BITS = BLAKE2S_HASH_SIZE * 8,
|
||||||
POOL_MIN_BITS = POOL_BITS /* No point in settling for less. */
|
POOL_MIN_BITS = POOL_BITS /* No point in settling for less. */
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/* For notifying userspace should write into /dev/random. */
|
||||||
* Static global variables
|
|
||||||
*/
|
|
||||||
static DECLARE_WAIT_QUEUE_HEAD(random_write_wait);
|
static DECLARE_WAIT_QUEUE_HEAD(random_write_wait);
|
||||||
|
|
||||||
/**********************************************************************
|
|
||||||
*
|
|
||||||
* OS independent entropy store. Here are the functions which handle
|
|
||||||
* storing entropy in an entropy pool.
|
|
||||||
*
|
|
||||||
**********************************************************************/
|
|
||||||
|
|
||||||
static struct {
|
static struct {
|
||||||
struct blake2s_state hash;
|
struct blake2s_state hash;
|
||||||
spinlock_t lock;
|
spinlock_t lock;
|
||||||
@@ -924,21 +937,16 @@ static struct {
|
|||||||
.lock = __SPIN_LOCK_UNLOCKED(input_pool.lock),
|
.lock = __SPIN_LOCK_UNLOCKED(input_pool.lock),
|
||||||
};
|
};
|
||||||
|
|
||||||
static void extract_entropy(void *buf, size_t nbytes);
|
static void _mix_pool_bytes(const void *in, size_t nbytes)
|
||||||
static bool drain_entropy(void *buf, size_t nbytes);
|
{
|
||||||
|
blake2s_update(&input_pool.hash, in, nbytes);
|
||||||
static void crng_reseed(void);
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This function adds bytes into the entropy "pool". It does not
|
* This function adds bytes into the entropy "pool". It does not
|
||||||
* update the entropy estimate. The caller should call
|
* update the entropy estimate. The caller should call
|
||||||
* credit_entropy_bits if this is appropriate.
|
* credit_entropy_bits if this is appropriate.
|
||||||
*/
|
*/
|
||||||
static void _mix_pool_bytes(const void *in, size_t nbytes)
|
|
||||||
{
|
|
||||||
blake2s_update(&input_pool.hash, in, nbytes);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void mix_pool_bytes(const void *in, size_t nbytes)
|
static void mix_pool_bytes(const void *in, size_t nbytes)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
@@ -948,6 +956,89 @@ static void mix_pool_bytes(const void *in, size_t nbytes)
|
|||||||
spin_unlock_irqrestore(&input_pool.lock, flags);
|
spin_unlock_irqrestore(&input_pool.lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void credit_entropy_bits(size_t nbits)
|
||||||
|
{
|
||||||
|
unsigned int entropy_count, orig, add;
|
||||||
|
|
||||||
|
if (!nbits)
|
||||||
|
return;
|
||||||
|
|
||||||
|
add = min_t(size_t, nbits, POOL_BITS);
|
||||||
|
|
||||||
|
do {
|
||||||
|
orig = READ_ONCE(input_pool.entropy_count);
|
||||||
|
entropy_count = min_t(unsigned int, POOL_BITS, orig + add);
|
||||||
|
} while (cmpxchg(&input_pool.entropy_count, orig, entropy_count) != orig);
|
||||||
|
|
||||||
|
if (crng_init < 2 && entropy_count >= POOL_MIN_BITS)
|
||||||
|
crng_reseed();
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* This is an HKDF-like construction for using the hashed collected entropy
|
||||||
|
* as a PRF key, that's then expanded block-by-block.
|
||||||
|
*/
|
||||||
|
static void extract_entropy(void *buf, size_t nbytes)
|
||||||
|
{
|
||||||
|
unsigned long flags;
|
||||||
|
u8 seed[BLAKE2S_HASH_SIZE], next_key[BLAKE2S_HASH_SIZE];
|
||||||
|
struct {
|
||||||
|
unsigned long rdseed[32 / sizeof(long)];
|
||||||
|
size_t counter;
|
||||||
|
} block;
|
||||||
|
size_t i;
|
||||||
|
|
||||||
|
for (i = 0; i < ARRAY_SIZE(block.rdseed); ++i) {
|
||||||
|
if (!arch_get_random_seed_long(&block.rdseed[i]) &&
|
||||||
|
!arch_get_random_long(&block.rdseed[i]))
|
||||||
|
block.rdseed[i] = random_get_entropy();
|
||||||
|
}
|
||||||
|
|
||||||
|
spin_lock_irqsave(&input_pool.lock, flags);
|
||||||
|
|
||||||
|
/* seed = HASHPRF(last_key, entropy_input) */
|
||||||
|
blake2s_final(&input_pool.hash, seed);
|
||||||
|
|
||||||
|
/* next_key = HASHPRF(seed, RDSEED || 0) */
|
||||||
|
block.counter = 0;
|
||||||
|
blake2s(next_key, (u8 *)&block, seed, sizeof(next_key), sizeof(block), sizeof(seed));
|
||||||
|
blake2s_init_key(&input_pool.hash, BLAKE2S_HASH_SIZE, next_key, sizeof(next_key));
|
||||||
|
|
||||||
|
spin_unlock_irqrestore(&input_pool.lock, flags);
|
||||||
|
memzero_explicit(next_key, sizeof(next_key));
|
||||||
|
|
||||||
|
while (nbytes) {
|
||||||
|
i = min_t(size_t, nbytes, BLAKE2S_HASH_SIZE);
|
||||||
|
/* output = HASHPRF(seed, RDSEED || ++counter) */
|
||||||
|
++block.counter;
|
||||||
|
blake2s(buf, (u8 *)&block, seed, i, sizeof(block), sizeof(seed));
|
||||||
|
nbytes -= i;
|
||||||
|
buf += i;
|
||||||
|
}
|
||||||
|
|
||||||
|
memzero_explicit(seed, sizeof(seed));
|
||||||
|
memzero_explicit(&block, sizeof(block));
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* First we make sure we have POOL_MIN_BITS of entropy in the pool, and then we
|
||||||
|
* set the entropy count to zero (but don't actually touch any data). Only then
|
||||||
|
* can we extract a new key with extract_entropy().
|
||||||
|
*/
|
||||||
|
static bool drain_entropy(void *buf, size_t nbytes)
|
||||||
|
{
|
||||||
|
unsigned int entropy_count;
|
||||||
|
do {
|
||||||
|
entropy_count = READ_ONCE(input_pool.entropy_count);
|
||||||
|
if (entropy_count < POOL_MIN_BITS)
|
||||||
|
return false;
|
||||||
|
} while (cmpxchg(&input_pool.entropy_count, entropy_count, 0) != entropy_count);
|
||||||
|
extract_entropy(buf, nbytes);
|
||||||
|
wake_up_interruptible(&random_write_wait);
|
||||||
|
kill_fasync(&fasync, SIGIO, POLL_OUT);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
struct fast_pool {
|
struct fast_pool {
|
||||||
union {
|
union {
|
||||||
u32 pool32[4];
|
u32 pool32[4];
|
||||||
@@ -988,24 +1079,6 @@ static void fast_mix(u32 pool[4])
|
|||||||
pool[2] = c; pool[3] = d;
|
pool[2] = c; pool[3] = d;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void credit_entropy_bits(size_t nbits)
|
|
||||||
{
|
|
||||||
unsigned int entropy_count, orig, add;
|
|
||||||
|
|
||||||
if (!nbits)
|
|
||||||
return;
|
|
||||||
|
|
||||||
add = min_t(size_t, nbits, POOL_BITS);
|
|
||||||
|
|
||||||
do {
|
|
||||||
orig = READ_ONCE(input_pool.entropy_count);
|
|
||||||
entropy_count = min_t(unsigned int, POOL_BITS, orig + add);
|
|
||||||
} while (cmpxchg(&input_pool.entropy_count, orig, entropy_count) != orig);
|
|
||||||
|
|
||||||
if (crng_init < 2 && entropy_count >= POOL_MIN_BITS)
|
|
||||||
crng_reseed();
|
|
||||||
}
|
|
||||||
|
|
||||||
/*********************************************************************
|
/*********************************************************************
|
||||||
*
|
*
|
||||||
* Entropy input management
|
* Entropy input management
|
||||||
@@ -1202,77 +1275,6 @@ void add_disk_randomness(struct gendisk *disk)
|
|||||||
EXPORT_SYMBOL_GPL(add_disk_randomness);
|
EXPORT_SYMBOL_GPL(add_disk_randomness);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/*********************************************************************
|
|
||||||
*
|
|
||||||
* Entropy extraction routines
|
|
||||||
*
|
|
||||||
*********************************************************************/
|
|
||||||
|
|
||||||
/*
|
|
||||||
* This is an HKDF-like construction for using the hashed collected entropy
|
|
||||||
* as a PRF key, that's then expanded block-by-block.
|
|
||||||
*/
|
|
||||||
static void extract_entropy(void *buf, size_t nbytes)
|
|
||||||
{
|
|
||||||
unsigned long flags;
|
|
||||||
u8 seed[BLAKE2S_HASH_SIZE], next_key[BLAKE2S_HASH_SIZE];
|
|
||||||
struct {
|
|
||||||
unsigned long rdseed[32 / sizeof(long)];
|
|
||||||
size_t counter;
|
|
||||||
} block;
|
|
||||||
size_t i;
|
|
||||||
|
|
||||||
for (i = 0; i < ARRAY_SIZE(block.rdseed); ++i) {
|
|
||||||
if (!arch_get_random_seed_long(&block.rdseed[i]) &&
|
|
||||||
!arch_get_random_long(&block.rdseed[i]))
|
|
||||||
block.rdseed[i] = random_get_entropy();
|
|
||||||
}
|
|
||||||
|
|
||||||
spin_lock_irqsave(&input_pool.lock, flags);
|
|
||||||
|
|
||||||
/* seed = HASHPRF(last_key, entropy_input) */
|
|
||||||
blake2s_final(&input_pool.hash, seed);
|
|
||||||
|
|
||||||
/* next_key = HASHPRF(seed, RDSEED || 0) */
|
|
||||||
block.counter = 0;
|
|
||||||
blake2s(next_key, (u8 *)&block, seed, sizeof(next_key), sizeof(block), sizeof(seed));
|
|
||||||
blake2s_init_key(&input_pool.hash, BLAKE2S_HASH_SIZE, next_key, sizeof(next_key));
|
|
||||||
|
|
||||||
spin_unlock_irqrestore(&input_pool.lock, flags);
|
|
||||||
memzero_explicit(next_key, sizeof(next_key));
|
|
||||||
|
|
||||||
while (nbytes) {
|
|
||||||
i = min_t(size_t, nbytes, BLAKE2S_HASH_SIZE);
|
|
||||||
/* output = HASHPRF(seed, RDSEED || ++counter) */
|
|
||||||
++block.counter;
|
|
||||||
blake2s(buf, (u8 *)&block, seed, i, sizeof(block), sizeof(seed));
|
|
||||||
nbytes -= i;
|
|
||||||
buf += i;
|
|
||||||
}
|
|
||||||
|
|
||||||
memzero_explicit(seed, sizeof(seed));
|
|
||||||
memzero_explicit(&block, sizeof(block));
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* First we make sure we have POOL_MIN_BITS of entropy in the pool, and then we
|
|
||||||
* set the entropy count to zero (but don't actually touch any data). Only then
|
|
||||||
* can we extract a new key with extract_entropy().
|
|
||||||
*/
|
|
||||||
static bool drain_entropy(void *buf, size_t nbytes)
|
|
||||||
{
|
|
||||||
unsigned int entropy_count;
|
|
||||||
do {
|
|
||||||
entropy_count = READ_ONCE(input_pool.entropy_count);
|
|
||||||
if (entropy_count < POOL_MIN_BITS)
|
|
||||||
return false;
|
|
||||||
} while (cmpxchg(&input_pool.entropy_count, entropy_count, 0) != entropy_count);
|
|
||||||
extract_entropy(buf, nbytes);
|
|
||||||
wake_up_interruptible(&random_write_wait);
|
|
||||||
kill_fasync(&fasync, SIGIO, POLL_OUT);
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Each time the timer fires, we expect that we got an unpredictable
|
* Each time the timer fires, we expect that we got an unpredictable
|
||||||
* jump in the cycle counter. Even if the timer is running on another
|
* jump in the cycle counter. Even if the timer is running on another
|
||||||
|
Reference in New Issue
Block a user