arm64: Add support for SMCCC TRNG entropy source

The ARM architected TRNG firmware interface, described in ARM spec
DEN0098, defines an ARM SMCCC based interface to a true random number
generator, provided by firmware.
This can be discovered via the SMCCC >=v1.1 interface, and provides
up to 192 bits of entropy per call.

Hook this SMC call into arm64's arch_get_random_*() implementation,
coming to the rescue when the CPU does not implement the ARM v8.5 RNG
system registers.

For the detection, we piggy back on the PSCI/SMCCC discovery (which gives
us the conduit to use (hvc/smc)), then try to call the
ARM_SMCCC_TRNG_VERSION function, which returns -1 if this interface is
not implemented.

Reviewed-by: Mark Brown <broonie@kernel.org>
Signed-off-by: Andre Przywara <andre.przywara@arm.com>
Signed-off-by: Will Deacon <will@kernel.org>
This commit is contained in:
Andre Przywara 2021-01-06 10:34:52 +00:00 committed by Will Deacon
parent a37e31fc97
commit 38db987316
1 changed files with 61 additions and 11 deletions

View File

@ -4,13 +4,24 @@
#ifdef CONFIG_ARCH_RANDOM
#include <linux/arm-smccc.h>
#include <linux/bug.h>
#include <linux/kernel.h>
#include <asm/cpufeature.h>
#define ARM_SMCCC_TRNG_MIN_VERSION 0x10000UL
extern bool smccc_trng_available;
static inline bool __init smccc_probe_trng(void)
{
return false;
struct arm_smccc_res res;
arm_smccc_1_1_invoke(ARM_SMCCC_TRNG_VERSION, &res);
if ((s32)res.a0 < 0)
return false;
return res.a0 >= ARM_SMCCC_TRNG_MIN_VERSION;
}
static inline bool __arm64_rndr(unsigned long *v)
@ -43,26 +54,55 @@ static inline bool __must_check arch_get_random_int(unsigned int *v)
static inline bool __must_check arch_get_random_seed_long(unsigned long *v)
{
struct arm_smccc_res res;
/*
* We prefer the SMCCC call, since its semantics (return actual
* hardware backed entropy) is closer to the idea behind this
* function here than what even the RNDRSS register provides
* (the output of a pseudo RNG freshly seeded by a TRNG).
*/
if (smccc_trng_available) {
arm_smccc_1_1_invoke(ARM_SMCCC_TRNG_RND64, 64, &res);
if ((int)res.a0 >= 0) {
*v = res.a3;
return true;
}
}
/*
* Only support the generic interface after we have detected
* the system wide capability, avoiding complexity with the
* cpufeature code and with potential scheduling between CPUs
* with and without the feature.
*/
if (!cpus_have_const_cap(ARM64_HAS_RNG))
return false;
if (cpus_have_const_cap(ARM64_HAS_RNG) && __arm64_rndr(v))
return true;
return __arm64_rndr(v);
return false;
}
static inline bool __must_check arch_get_random_seed_int(unsigned int *v)
{
struct arm_smccc_res res;
unsigned long val;
bool ok = arch_get_random_seed_long(&val);
*v = val;
return ok;
if (smccc_trng_available) {
arm_smccc_1_1_invoke(ARM_SMCCC_TRNG_RND64, 32, &res);
if ((int)res.a0 >= 0) {
*v = res.a3 & GENMASK(31, 0);
return true;
}
}
if (cpus_have_const_cap(ARM64_HAS_RNG)) {
if (__arm64_rndr(&val)) {
*v = val;
return true;
}
}
return false;
}
static inline bool __init __early_cpu_has_rndr(void)
@ -77,10 +117,20 @@ arch_get_random_seed_long_early(unsigned long *v)
{
WARN_ON(system_state != SYSTEM_BOOTING);
if (!__early_cpu_has_rndr())
return false;
if (smccc_trng_available) {
struct arm_smccc_res res;
return __arm64_rndr(v);
arm_smccc_1_1_invoke(ARM_SMCCC_TRNG_RND64, 64, &res);
if ((int)res.a0 >= 0) {
*v = res.a3;
return true;
}
}
if (__early_cpu_has_rndr() && __arm64_rndr(v))
return true;
return false;
}
#define arch_get_random_seed_long_early arch_get_random_seed_long_early