x86/bugs: Rename various 'ia32_cap' variables to 'x86_arch_cap_msr'

commit d0485730d2 upstream.

So we are using the 'ia32_cap' value in a number of places,
which got its name from MSR_IA32_ARCH_CAPABILITIES MSR register.

But there's very little 'IA32' about it - this isn't 32-bit only
code, nor does it originate from there, it's just a historic
quirk that many Intel MSR names are prefixed with IA32_.

This is already clear from the helper method around the MSR:
x86_read_arch_cap_msr(), which doesn't have the IA32 prefix.

So rename 'ia32_cap' to 'x86_arch_cap_msr' to be consistent with
its role and with the naming of the helper function.

Signed-off-by: Ingo Molnar <mingo@kernel.org>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Nikolay Borisov <nik.borisov@suse.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Sean Christopherson <seanjc@google.com>
Link: https://lore.kernel.org/r/9592a18a814368e75f8f4b9d74d3883aa4fd1eaf.1712813475.git.jpoimboe@kernel.org
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Ingo Molnar 2024-04-11 09:25:36 +02:00 committed by Greg Kroah-Hartman
parent bdbbe95b53
commit c83e35f475
3 changed files with 42 additions and 42 deletions

View File

@ -1724,11 +1724,11 @@ static int x2apic_state;
static bool x2apic_hw_locked(void) static bool x2apic_hw_locked(void)
{ {
u64 ia32_cap; u64 x86_arch_cap_msr;
u64 msr; u64 msr;
ia32_cap = x86_read_arch_cap_msr(); x86_arch_cap_msr = x86_read_arch_cap_msr();
if (ia32_cap & ARCH_CAP_XAPIC_DISABLE) { if (x86_arch_cap_msr & ARCH_CAP_XAPIC_DISABLE) {
rdmsrl(MSR_IA32_XAPIC_DISABLE_STATUS, msr); rdmsrl(MSR_IA32_XAPIC_DISABLE_STATUS, msr);
return (msr & LEGACY_XAPIC_DISABLED); return (msr & LEGACY_XAPIC_DISABLED);
} }

View File

@ -61,7 +61,7 @@ EXPORT_SYMBOL_GPL(x86_spec_ctrl_current);
u64 x86_pred_cmd __ro_after_init = PRED_CMD_IBPB; u64 x86_pred_cmd __ro_after_init = PRED_CMD_IBPB;
EXPORT_SYMBOL_GPL(x86_pred_cmd); EXPORT_SYMBOL_GPL(x86_pred_cmd);
static u64 __ro_after_init ia32_cap; static u64 __ro_after_init x86_arch_cap_msr;
static DEFINE_MUTEX(spec_ctrl_mutex); static DEFINE_MUTEX(spec_ctrl_mutex);
@ -146,7 +146,7 @@ void __init cpu_select_mitigations(void)
x86_spec_ctrl_base &= ~SPEC_CTRL_MITIGATIONS_MASK; x86_spec_ctrl_base &= ~SPEC_CTRL_MITIGATIONS_MASK;
} }
ia32_cap = x86_read_arch_cap_msr(); x86_arch_cap_msr = x86_read_arch_cap_msr();
/* Select the proper CPU mitigations before patching alternatives: */ /* Select the proper CPU mitigations before patching alternatives: */
spectre_v1_select_mitigation(); spectre_v1_select_mitigation();
@ -343,8 +343,8 @@ static void __init taa_select_mitigation(void)
* On MDS_NO=1 CPUs if ARCH_CAP_TSX_CTRL_MSR is not set, microcode * On MDS_NO=1 CPUs if ARCH_CAP_TSX_CTRL_MSR is not set, microcode
* update is required. * update is required.
*/ */
if ( (ia32_cap & ARCH_CAP_MDS_NO) && if ( (x86_arch_cap_msr & ARCH_CAP_MDS_NO) &&
!(ia32_cap & ARCH_CAP_TSX_CTRL_MSR)) !(x86_arch_cap_msr & ARCH_CAP_TSX_CTRL_MSR))
taa_mitigation = TAA_MITIGATION_UCODE_NEEDED; taa_mitigation = TAA_MITIGATION_UCODE_NEEDED;
/* /*
@ -434,7 +434,7 @@ static void __init mmio_select_mitigation(void)
* be propagated to uncore buffers, clearing the Fill buffers on idle * be propagated to uncore buffers, clearing the Fill buffers on idle
* is required irrespective of SMT state. * is required irrespective of SMT state.
*/ */
if (!(ia32_cap & ARCH_CAP_FBSDP_NO)) if (!(x86_arch_cap_msr & ARCH_CAP_FBSDP_NO))
static_branch_enable(&mds_idle_clear); static_branch_enable(&mds_idle_clear);
/* /*
@ -444,10 +444,10 @@ static void __init mmio_select_mitigation(void)
* FB_CLEAR or by the presence of both MD_CLEAR and L1D_FLUSH on MDS * FB_CLEAR or by the presence of both MD_CLEAR and L1D_FLUSH on MDS
* affected systems. * affected systems.
*/ */
if ((ia32_cap & ARCH_CAP_FB_CLEAR) || if ((x86_arch_cap_msr & ARCH_CAP_FB_CLEAR) ||
(boot_cpu_has(X86_FEATURE_MD_CLEAR) && (boot_cpu_has(X86_FEATURE_MD_CLEAR) &&
boot_cpu_has(X86_FEATURE_FLUSH_L1D) && boot_cpu_has(X86_FEATURE_FLUSH_L1D) &&
!(ia32_cap & ARCH_CAP_MDS_NO))) !(x86_arch_cap_msr & ARCH_CAP_MDS_NO)))
mmio_mitigation = MMIO_MITIGATION_VERW; mmio_mitigation = MMIO_MITIGATION_VERW;
else else
mmio_mitigation = MMIO_MITIGATION_UCODE_NEEDED; mmio_mitigation = MMIO_MITIGATION_UCODE_NEEDED;
@ -505,7 +505,7 @@ static void __init rfds_select_mitigation(void)
if (rfds_mitigation == RFDS_MITIGATION_OFF) if (rfds_mitigation == RFDS_MITIGATION_OFF)
return; return;
if (ia32_cap & ARCH_CAP_RFDS_CLEAR) if (x86_arch_cap_msr & ARCH_CAP_RFDS_CLEAR)
setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF); setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
else else
rfds_mitigation = RFDS_MITIGATION_UCODE_NEEDED; rfds_mitigation = RFDS_MITIGATION_UCODE_NEEDED;
@ -664,7 +664,7 @@ static void __init srbds_select_mitigation(void)
* are only exposed to SRBDS when TSX is enabled or when CPU is affected * are only exposed to SRBDS when TSX is enabled or when CPU is affected
* by Processor MMIO Stale Data vulnerability. * by Processor MMIO Stale Data vulnerability.
*/ */
if ((ia32_cap & ARCH_CAP_MDS_NO) && !boot_cpu_has(X86_FEATURE_RTM) && if ((x86_arch_cap_msr & ARCH_CAP_MDS_NO) && !boot_cpu_has(X86_FEATURE_RTM) &&
!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) !boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA))
srbds_mitigation = SRBDS_MITIGATION_TSX_OFF; srbds_mitigation = SRBDS_MITIGATION_TSX_OFF;
else if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) else if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
@ -807,7 +807,7 @@ static void __init gds_select_mitigation(void)
/* Will verify below that mitigation _can_ be disabled */ /* Will verify below that mitigation _can_ be disabled */
/* No microcode */ /* No microcode */
if (!(ia32_cap & ARCH_CAP_GDS_CTRL)) { if (!(x86_arch_cap_msr & ARCH_CAP_GDS_CTRL)) {
if (gds_mitigation == GDS_MITIGATION_FORCE) { if (gds_mitigation == GDS_MITIGATION_FORCE) {
/* /*
* This only needs to be done on the boot CPU so do it * This only needs to be done on the boot CPU so do it
@ -1540,14 +1540,14 @@ static enum spectre_v2_mitigation __init spectre_v2_select_retpoline(void)
/* Disable in-kernel use of non-RSB RET predictors */ /* Disable in-kernel use of non-RSB RET predictors */
static void __init spec_ctrl_disable_kernel_rrsba(void) static void __init spec_ctrl_disable_kernel_rrsba(void)
{ {
u64 ia32_cap; u64 x86_arch_cap_msr;
if (!boot_cpu_has(X86_FEATURE_RRSBA_CTRL)) if (!boot_cpu_has(X86_FEATURE_RRSBA_CTRL))
return; return;
ia32_cap = x86_read_arch_cap_msr(); x86_arch_cap_msr = x86_read_arch_cap_msr();
if (ia32_cap & ARCH_CAP_RRSBA) { if (x86_arch_cap_msr & ARCH_CAP_RRSBA) {
x86_spec_ctrl_base |= SPEC_CTRL_RRSBA_DIS_S; x86_spec_ctrl_base |= SPEC_CTRL_RRSBA_DIS_S;
update_spec_ctrl(x86_spec_ctrl_base); update_spec_ctrl(x86_spec_ctrl_base);
} }
@ -1915,7 +1915,7 @@ static void update_mds_branch_idle(void)
if (sched_smt_active()) { if (sched_smt_active()) {
static_branch_enable(&mds_idle_clear); static_branch_enable(&mds_idle_clear);
} else if (mmio_mitigation == MMIO_MITIGATION_OFF || } else if (mmio_mitigation == MMIO_MITIGATION_OFF ||
(ia32_cap & ARCH_CAP_FBSDP_NO)) { (x86_arch_cap_msr & ARCH_CAP_FBSDP_NO)) {
static_branch_disable(&mds_idle_clear); static_branch_disable(&mds_idle_clear);
} }
} }
@ -2809,7 +2809,7 @@ static const char *spectre_bhi_state(void)
else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_LOOP)) else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_LOOP))
return "; BHI: SW loop, KVM: SW loop"; return "; BHI: SW loop, KVM: SW loop";
else if (boot_cpu_has(X86_FEATURE_RETPOLINE) && else if (boot_cpu_has(X86_FEATURE_RETPOLINE) &&
!(ia32_cap & ARCH_CAP_RRSBA)) !(x86_arch_cap_msr & ARCH_CAP_RRSBA))
return "; BHI: Retpoline"; return "; BHI: Retpoline";
else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT)) else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT))
return "; BHI: Syscall hardening, KVM: SW loop"; return "; BHI: Syscall hardening, KVM: SW loop";

View File

@ -1329,25 +1329,25 @@ static bool __init cpu_matches(const struct x86_cpu_id *table, unsigned long whi
u64 x86_read_arch_cap_msr(void) u64 x86_read_arch_cap_msr(void)
{ {
u64 ia32_cap = 0; u64 x86_arch_cap_msr = 0;
if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES)) if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES))
rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap); rdmsrl(MSR_IA32_ARCH_CAPABILITIES, x86_arch_cap_msr);
return ia32_cap; return x86_arch_cap_msr;
} }
static bool arch_cap_mmio_immune(u64 ia32_cap) static bool arch_cap_mmio_immune(u64 x86_arch_cap_msr)
{ {
return (ia32_cap & ARCH_CAP_FBSDP_NO && return (x86_arch_cap_msr & ARCH_CAP_FBSDP_NO &&
ia32_cap & ARCH_CAP_PSDP_NO && x86_arch_cap_msr & ARCH_CAP_PSDP_NO &&
ia32_cap & ARCH_CAP_SBDR_SSDP_NO); x86_arch_cap_msr & ARCH_CAP_SBDR_SSDP_NO);
} }
static bool __init vulnerable_to_rfds(u64 ia32_cap) static bool __init vulnerable_to_rfds(u64 x86_arch_cap_msr)
{ {
/* The "immunity" bit trumps everything else: */ /* The "immunity" bit trumps everything else: */
if (ia32_cap & ARCH_CAP_RFDS_NO) if (x86_arch_cap_msr & ARCH_CAP_RFDS_NO)
return false; return false;
/* /*
@ -1355,7 +1355,7 @@ static bool __init vulnerable_to_rfds(u64 ia32_cap)
* indicate that mitigation is needed because guest is running on a * indicate that mitigation is needed because guest is running on a
* vulnerable hardware or may migrate to such hardware: * vulnerable hardware or may migrate to such hardware:
*/ */
if (ia32_cap & ARCH_CAP_RFDS_CLEAR) if (x86_arch_cap_msr & ARCH_CAP_RFDS_CLEAR)
return true; return true;
/* Only consult the blacklist when there is no enumeration: */ /* Only consult the blacklist when there is no enumeration: */
@ -1364,11 +1364,11 @@ static bool __init vulnerable_to_rfds(u64 ia32_cap)
static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c) static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
{ {
u64 ia32_cap = x86_read_arch_cap_msr(); u64 x86_arch_cap_msr = x86_read_arch_cap_msr();
/* Set ITLB_MULTIHIT bug if cpu is not in the whitelist and not mitigated */ /* Set ITLB_MULTIHIT bug if cpu is not in the whitelist and not mitigated */
if (!cpu_matches(cpu_vuln_whitelist, NO_ITLB_MULTIHIT) && if (!cpu_matches(cpu_vuln_whitelist, NO_ITLB_MULTIHIT) &&
!(ia32_cap & ARCH_CAP_PSCHANGE_MC_NO)) !(x86_arch_cap_msr & ARCH_CAP_PSCHANGE_MC_NO))
setup_force_cpu_bug(X86_BUG_ITLB_MULTIHIT); setup_force_cpu_bug(X86_BUG_ITLB_MULTIHIT);
if (cpu_matches(cpu_vuln_whitelist, NO_SPECULATION)) if (cpu_matches(cpu_vuln_whitelist, NO_SPECULATION))
@ -1380,7 +1380,7 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
setup_force_cpu_bug(X86_BUG_SPECTRE_V2); setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
if (!cpu_matches(cpu_vuln_whitelist, NO_SSB) && if (!cpu_matches(cpu_vuln_whitelist, NO_SSB) &&
!(ia32_cap & ARCH_CAP_SSB_NO) && !(x86_arch_cap_msr & ARCH_CAP_SSB_NO) &&
!cpu_has(c, X86_FEATURE_AMD_SSB_NO)) !cpu_has(c, X86_FEATURE_AMD_SSB_NO))
setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS); setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);
@ -1388,15 +1388,15 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
* AMD's AutoIBRS is equivalent to Intel's eIBRS - use the Intel feature * AMD's AutoIBRS is equivalent to Intel's eIBRS - use the Intel feature
* flag and protect from vendor-specific bugs via the whitelist. * flag and protect from vendor-specific bugs via the whitelist.
*/ */
if ((ia32_cap & ARCH_CAP_IBRS_ALL) || cpu_has(c, X86_FEATURE_AUTOIBRS)) { if ((x86_arch_cap_msr & ARCH_CAP_IBRS_ALL) || cpu_has(c, X86_FEATURE_AUTOIBRS)) {
setup_force_cpu_cap(X86_FEATURE_IBRS_ENHANCED); setup_force_cpu_cap(X86_FEATURE_IBRS_ENHANCED);
if (!cpu_matches(cpu_vuln_whitelist, NO_EIBRS_PBRSB) && if (!cpu_matches(cpu_vuln_whitelist, NO_EIBRS_PBRSB) &&
!(ia32_cap & ARCH_CAP_PBRSB_NO)) !(x86_arch_cap_msr & ARCH_CAP_PBRSB_NO))
setup_force_cpu_bug(X86_BUG_EIBRS_PBRSB); setup_force_cpu_bug(X86_BUG_EIBRS_PBRSB);
} }
if (!cpu_matches(cpu_vuln_whitelist, NO_MDS) && if (!cpu_matches(cpu_vuln_whitelist, NO_MDS) &&
!(ia32_cap & ARCH_CAP_MDS_NO)) { !(x86_arch_cap_msr & ARCH_CAP_MDS_NO)) {
setup_force_cpu_bug(X86_BUG_MDS); setup_force_cpu_bug(X86_BUG_MDS);
if (cpu_matches(cpu_vuln_whitelist, MSBDS_ONLY)) if (cpu_matches(cpu_vuln_whitelist, MSBDS_ONLY))
setup_force_cpu_bug(X86_BUG_MSBDS_ONLY); setup_force_cpu_bug(X86_BUG_MSBDS_ONLY);
@ -1415,9 +1415,9 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
* TSX_CTRL check alone is not sufficient for cases when the microcode * TSX_CTRL check alone is not sufficient for cases when the microcode
* update is not present or running as guest that don't get TSX_CTRL. * update is not present or running as guest that don't get TSX_CTRL.
*/ */
if (!(ia32_cap & ARCH_CAP_TAA_NO) && if (!(x86_arch_cap_msr & ARCH_CAP_TAA_NO) &&
(cpu_has(c, X86_FEATURE_RTM) || (cpu_has(c, X86_FEATURE_RTM) ||
(ia32_cap & ARCH_CAP_TSX_CTRL_MSR))) (x86_arch_cap_msr & ARCH_CAP_TSX_CTRL_MSR)))
setup_force_cpu_bug(X86_BUG_TAA); setup_force_cpu_bug(X86_BUG_TAA);
/* /*
@ -1443,7 +1443,7 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
* Set X86_BUG_MMIO_UNKNOWN for CPUs that are neither in the blacklist, * Set X86_BUG_MMIO_UNKNOWN for CPUs that are neither in the blacklist,
* nor in the whitelist and also don't enumerate MSR ARCH_CAP MMIO bits. * nor in the whitelist and also don't enumerate MSR ARCH_CAP MMIO bits.
*/ */
if (!arch_cap_mmio_immune(ia32_cap)) { if (!arch_cap_mmio_immune(x86_arch_cap_msr)) {
if (cpu_matches(cpu_vuln_blacklist, MMIO)) if (cpu_matches(cpu_vuln_blacklist, MMIO))
setup_force_cpu_bug(X86_BUG_MMIO_STALE_DATA); setup_force_cpu_bug(X86_BUG_MMIO_STALE_DATA);
else if (!cpu_matches(cpu_vuln_whitelist, NO_MMIO)) else if (!cpu_matches(cpu_vuln_whitelist, NO_MMIO))
@ -1451,7 +1451,7 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
} }
if (!cpu_has(c, X86_FEATURE_BTC_NO)) { if (!cpu_has(c, X86_FEATURE_BTC_NO)) {
if (cpu_matches(cpu_vuln_blacklist, RETBLEED) || (ia32_cap & ARCH_CAP_RSBA)) if (cpu_matches(cpu_vuln_blacklist, RETBLEED) || (x86_arch_cap_msr & ARCH_CAP_RSBA))
setup_force_cpu_bug(X86_BUG_RETBLEED); setup_force_cpu_bug(X86_BUG_RETBLEED);
} }
@ -1469,15 +1469,15 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
* disabling AVX2. The only way to do this in HW is to clear XCR0[2], * disabling AVX2. The only way to do this in HW is to clear XCR0[2],
* which means that AVX will be disabled. * which means that AVX will be disabled.
*/ */
if (cpu_matches(cpu_vuln_blacklist, GDS) && !(ia32_cap & ARCH_CAP_GDS_NO) && if (cpu_matches(cpu_vuln_blacklist, GDS) && !(x86_arch_cap_msr & ARCH_CAP_GDS_NO) &&
boot_cpu_has(X86_FEATURE_AVX)) boot_cpu_has(X86_FEATURE_AVX))
setup_force_cpu_bug(X86_BUG_GDS); setup_force_cpu_bug(X86_BUG_GDS);
if (vulnerable_to_rfds(ia32_cap)) if (vulnerable_to_rfds(x86_arch_cap_msr))
setup_force_cpu_bug(X86_BUG_RFDS); setup_force_cpu_bug(X86_BUG_RFDS);
/* When virtualized, eIBRS could be hidden, assume vulnerable */ /* When virtualized, eIBRS could be hidden, assume vulnerable */
if (!(ia32_cap & ARCH_CAP_BHI_NO) && if (!(x86_arch_cap_msr & ARCH_CAP_BHI_NO) &&
!cpu_matches(cpu_vuln_whitelist, NO_BHI) && !cpu_matches(cpu_vuln_whitelist, NO_BHI) &&
(boot_cpu_has(X86_FEATURE_IBRS_ENHANCED) || (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED) ||
boot_cpu_has(X86_FEATURE_HYPERVISOR))) boot_cpu_has(X86_FEATURE_HYPERVISOR)))
@ -1487,7 +1487,7 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
return; return;
/* Rogue Data Cache Load? No! */ /* Rogue Data Cache Load? No! */
if (ia32_cap & ARCH_CAP_RDCL_NO) if (x86_arch_cap_msr & ARCH_CAP_RDCL_NO)
return; return;
setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN); setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);