random: do not throw away excess input to crng_fast_load

[ Upstream commit 73c7733f12 ]

When crng_fast_load() is called by add_hwgenerator_randomness(), we
currently will advance to crng_init==1 once we've acquired 64 bytes, and
then throw away the rest of the buffer. Usually, that is not a problem:
When add_hwgenerator_randomness() gets called via EFI or DT during
setup_arch(), there won't be any IRQ randomness. Therefore, the 64 bytes
passed by EFI exactly matches what is needed to advance to crng_init==1.
Usually, DT seems to pass 64 bytes as well -- with one notable exception
being kexec, which hands over 128 bytes of entropy to the kexec'd kernel.
In that case, we'll advance to crng_init==1 once 64 of those bytes are
consumed by crng_fast_load(), but won't continue onward feeding in bytes
to progress to crng_init==2. This commit fixes the issue by feeding
any leftover bytes into the next phase in add_hwgenerator_randomness().

[linux@dominikbrodowski.net: rewrite commit message]
Signed-off-by: Dominik Brodowski <linux@dominikbrodowski.net>
Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
Jason A. Donenfeld 2021-12-29 22:10:05 +01:00 committed by Greg Kroah-Hartman
parent 8f6cecfff3
commit 5e126f6880

View file

@ -975,12 +975,14 @@ static struct crng_state *select_crng(void)
/* /*
* crng_fast_load() can be called by code in the interrupt service * crng_fast_load() can be called by code in the interrupt service
* path. So we can't afford to dilly-dally. * path. So we can't afford to dilly-dally. Returns the number of
* bytes processed from cp.
*/ */
static int crng_fast_load(const char *cp, size_t len) static size_t crng_fast_load(const char *cp, size_t len)
{ {
unsigned long flags; unsigned long flags;
char *p; char *p;
size_t ret = 0;
if (!spin_trylock_irqsave(&primary_crng.lock, flags)) if (!spin_trylock_irqsave(&primary_crng.lock, flags))
return 0; return 0;
@ -991,7 +993,7 @@ static int crng_fast_load(const char *cp, size_t len)
p = (unsigned char *) &primary_crng.state[4]; p = (unsigned char *) &primary_crng.state[4];
while (len > 0 && crng_init_cnt < CRNG_INIT_CNT_THRESH) { while (len > 0 && crng_init_cnt < CRNG_INIT_CNT_THRESH) {
p[crng_init_cnt % CHACHA_KEY_SIZE] ^= *cp; p[crng_init_cnt % CHACHA_KEY_SIZE] ^= *cp;
cp++; crng_init_cnt++; len--; cp++; crng_init_cnt++; len--; ret++;
} }
spin_unlock_irqrestore(&primary_crng.lock, flags); spin_unlock_irqrestore(&primary_crng.lock, flags);
if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) { if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) {
@ -1000,7 +1002,7 @@ static int crng_fast_load(const char *cp, size_t len)
wake_up_interruptible(&crng_init_wait); wake_up_interruptible(&crng_init_wait);
pr_notice("random: fast init done\n"); pr_notice("random: fast init done\n");
} }
return 1; return ret;
} }
/* /*
@ -1353,7 +1355,7 @@ void add_interrupt_randomness(int irq, int irq_flags)
if (unlikely(crng_init == 0)) { if (unlikely(crng_init == 0)) {
if ((fast_pool->count >= 64) && if ((fast_pool->count >= 64) &&
crng_fast_load((char *) fast_pool->pool, crng_fast_load((char *) fast_pool->pool,
sizeof(fast_pool->pool))) { sizeof(fast_pool->pool)) > 0) {
fast_pool->count = 0; fast_pool->count = 0;
fast_pool->last = now; fast_pool->last = now;
} }
@ -2501,8 +2503,11 @@ void add_hwgenerator_randomness(const char *buffer, size_t count,
struct entropy_store *poolp = &input_pool; struct entropy_store *poolp = &input_pool;
if (unlikely(crng_init == 0)) { if (unlikely(crng_init == 0)) {
crng_fast_load(buffer, count); size_t ret = crng_fast_load(buffer, count);
return; count -= ret;
buffer += ret;
if (!count || crng_init == 0)
return;
} }
/* Suspend writing if we're above the trickle threshold. /* Suspend writing if we're above the trickle threshold.