x86/microcode: Prepare for minimal revision check

Applying microcode late can be fatal for the running kernel when the
update changes functionality which is in use already in a non-compatible
way, e.g. by removing a CPUID bit.

There is no way for admins which do not have access to the vendors deep
technical support to decide whether late loading of such a microcode is
safe or not.

Intel has added a new field to the microcode header which tells the
minimal microcode revision which is required to be active in the CPU in
order to be safe.

Provide infrastructure for handling this in the core code and a command
line switch which allows to enforce it.

If the update is considered safe the kernel is not tainted and the annoying
warning message not emitted. If it's enforced and the currently loaded
microcode revision is not safe for late loading then the load is aborted.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
Link: https://lore.kernel.org/r/20231017211724.079611170@linutronix.de
This commit is contained in:
Thomas Gleixner 2023-10-17 23:24:16 +02:00 committed by Borislav Petkov (AMD)
parent 8f849ff63b
commit 9407bda845
6 changed files with 49 additions and 6 deletions

View file

@ -3275,6 +3275,11 @@
mga= [HW,DRM]
microcode.force_minrev= [X86]
Format: <bool>
Enable or disable the microcode minimal revision
enforcement for the runtime microcode loader.
min_addr=nn[KMG] [KNL,BOOT,IA-64] All physical memory below this
physical address is ignored.

View file

@ -1326,7 +1326,28 @@ config MICROCODE_LATE_LOADING
is a tricky business and should be avoided if possible. Just the sequence
of synchronizing all cores and SMT threads is one fragile dance which does
not guarantee that cores might not softlock after the loading. Therefore,
use this at your own risk. Late loading taints the kernel too.
use this at your own risk. Late loading taints the kernel unless the
microcode header indicates that it is safe for late loading via the
minimal revision check. This minimal revision check can be enforced on
the kernel command line with "microcode.minrev=Y".
config MICROCODE_LATE_FORCE_MINREV
bool "Enforce late microcode loading minimal revision check"
default n
depends on MICROCODE_LATE_LOADING
help
To prevent that users load microcode late which modifies already
in use features, newer microcode patches have a minimum revision field
in the microcode header, which tells the kernel which minimum
revision must be active in the CPU to safely load that new microcode
late into the running system. If disabled the check will not
be enforced but the kernel will be tainted when the minimal
revision check fails.
This minimal revision check can also be controlled via the
"microcode.minrev" parameter on the kernel command line.
If unsure say Y.
config X86_MSR
tristate "/dev/cpu/*/msr - Model-specific register support"

View file

@ -888,6 +888,9 @@ static enum ucode_state request_microcode_amd(int cpu, struct device *device)
enum ucode_state ret = UCODE_NFOUND;
const struct firmware *fw;
if (force_minrev)
return UCODE_NFOUND;
if (c->x86 >= 0x15)
snprintf(fw_name, sizeof(fw_name), "amd-ucode/microcode_amd_fam%.2xh.bin", c->x86);

View file

@ -46,6 +46,9 @@
static struct microcode_ops *microcode_ops;
bool dis_ucode_ldr = true;
bool force_minrev = IS_ENABLED(CONFIG_MICROCODE_LATE_FORCE_MINREV);
module_param(force_minrev, bool, S_IRUSR | S_IWUSR);
/*
* Synchronization.
*
@ -531,15 +534,17 @@ static int load_cpus_stopped(void *unused)
return 0;
}
static int load_late_stop_cpus(void)
static int load_late_stop_cpus(bool is_safe)
{
unsigned int cpu, updated = 0, failed = 0, timedout = 0, siblings = 0;
unsigned int nr_offl, offline = 0;
int old_rev = boot_cpu_data.microcode;
struct cpuinfo_x86 prev_info;
pr_err("Attempting late microcode loading - it is dangerous and taints the kernel.\n");
pr_err("You should switch to early loading, if possible.\n");
if (!is_safe) {
pr_err("Late microcode loading without minimal revision check.\n");
pr_err("You should switch to early loading, if possible.\n");
}
atomic_set(&late_cpus_in, num_online_cpus());
atomic_set(&offline_in_nmi, 0);
@ -589,7 +594,9 @@ static int load_late_stop_cpus(void)
return -EIO;
}
add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
if (!is_safe || failed || timedout)
add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
pr_info("load: updated on %u primary CPUs with %u siblings\n", updated, siblings);
if (failed || timedout) {
pr_err("load incomplete. %u CPUs timed out or failed\n",
@ -679,7 +686,9 @@ static int load_late_locked(void)
switch (microcode_ops->request_microcode_fw(0, &microcode_pdev->dev)) {
case UCODE_NEW:
return load_late_stop_cpus();
return load_late_stop_cpus(false);
case UCODE_NEW_SAFE:
return load_late_stop_cpus(true);
case UCODE_NFOUND:
return -ENOENT;
default:

View file

@ -480,6 +480,9 @@ static enum ucode_state parse_microcode_blobs(int cpu, struct iov_iter *iter)
unsigned int curr_mc_size = 0;
u8 *new_mc = NULL, *mc = NULL;
if (force_minrev)
return UCODE_NFOUND;
while (iov_iter_count(iter)) {
struct microcode_header_intel mc_header;
unsigned int mc_size, data_size;

View file

@ -13,6 +13,7 @@ struct device;
enum ucode_state {
UCODE_OK = 0,
UCODE_NEW,
UCODE_NEW_SAFE,
UCODE_UPDATED,
UCODE_NFOUND,
UCODE_ERROR,
@ -88,6 +89,7 @@ static inline unsigned int x86_cpuid_family(void)
}
extern bool dis_ucode_ldr;
extern bool force_minrev;
#ifdef CONFIG_CPU_SUP_AMD
void load_ucode_amd_bsp(unsigned int family);