x86/mtrr: Let cache_aps_delayed_init replace mtrr_aps_delayed_init

In order to prepare decoupling MTRR and PAT replace the MTRR-specific
mtrr_aps_delayed_init flag with a more generic cache_aps_delayed_init
one.

Signed-off-by: Juergen Gross <jgross@suse.com>
Signed-off-by: Borislav Petkov <bp@suse.de>
Link: https://lore.kernel.org/r/20221102074713.21493-12-jgross@suse.com
Signed-off-by: Borislav Petkov <bp@suse.de>
This commit is contained in:
Juergen Gross 2022-11-02 08:47:08 +01:00 committed by Borislav Petkov
parent 2c15679e86
commit 955d0e0805
5 changed files with 22 additions and 17 deletions

View file

@ -13,5 +13,7 @@ void cacheinfo_hygon_init_llc_id(struct cpuinfo_x86 *c, int cpu);
void cache_disable(void);
void cache_enable(void);
void cache_cpu_init(void);
void set_cache_aps_delayed_init(bool val);
bool get_cache_aps_delayed_init(void);
#endif /* _ASM_X86_CACHEINFO_H */

View file

@ -43,7 +43,6 @@ extern int mtrr_del(int reg, unsigned long base, unsigned long size);
extern int mtrr_del_page(int reg, unsigned long base, unsigned long size);
extern void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi);
extern void mtrr_ap_init(void);
extern void set_mtrr_aps_delayed_init(void);
extern void mtrr_aps_init(void);
extern void mtrr_bp_restore(void);
extern int mtrr_trim_uncached_memory(unsigned long end_pfn);
@ -87,7 +86,6 @@ static inline void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi)
{
}
#define mtrr_ap_init() do {} while (0)
#define set_mtrr_aps_delayed_init() do {} while (0)
#define mtrr_aps_init() do {} while (0)
#define mtrr_bp_restore() do {} while (0)
#define mtrr_disable() do {} while (0)

View file

@ -1137,3 +1137,15 @@ void cache_cpu_init(void)
cache_enable();
local_irq_restore(flags);
}
static bool cache_aps_delayed_init;
void set_cache_aps_delayed_init(bool val)
{
cache_aps_delayed_init = val;
}
bool get_cache_aps_delayed_init(void)
{
return cache_aps_delayed_init;
}

View file

@ -68,7 +68,6 @@ unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES];
static DEFINE_MUTEX(mtrr_mutex);
u64 size_or_mask, size_and_mask;
static bool mtrr_aps_delayed_init;
static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __ro_after_init;
@ -175,7 +174,8 @@ static int mtrr_rendezvous_handler(void *info)
if (data->smp_reg != ~0U) {
mtrr_if->set(data->smp_reg, data->smp_base,
data->smp_size, data->smp_type);
} else if (mtrr_aps_delayed_init || !cpu_online(smp_processor_id())) {
} else if (get_cache_aps_delayed_init() ||
!cpu_online(smp_processor_id())) {
cache_cpu_init();
}
return 0;
@ -782,7 +782,7 @@ void __init mtrr_bp_init(void)
void mtrr_ap_init(void)
{
if (!memory_caching_control || mtrr_aps_delayed_init)
if (!memory_caching_control || get_cache_aps_delayed_init())
return;
/*
@ -816,14 +816,6 @@ void mtrr_save_state(void)
smp_call_function_single(first_cpu, mtrr_save_fixed_ranges, NULL, 1);
}
void set_mtrr_aps_delayed_init(void)
{
if (!memory_caching_control)
return;
mtrr_aps_delayed_init = true;
}
/*
* Delayed MTRR initialization for all AP's
*/
@ -837,11 +829,11 @@ void mtrr_aps_init(void)
* by doing set_mtrr_aps_delayed_init(), prior to this point. If not,
* then we are done.
*/
if (!mtrr_aps_delayed_init)
if (!get_cache_aps_delayed_init())
return;
set_mtrr(~0U, 0, 0, 0);
mtrr_aps_delayed_init = false;
set_cache_aps_delayed_init(false);
}
void mtrr_bp_restore(void)

View file

@ -58,6 +58,7 @@
#include <linux/overflow.h>
#include <asm/acpi.h>
#include <asm/cacheinfo.h>
#include <asm/desc.h>
#include <asm/nmi.h>
#include <asm/irq.h>
@ -1428,7 +1429,7 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
uv_system_init();
set_mtrr_aps_delayed_init();
set_cache_aps_delayed_init(true);
smp_quirk_init_udelay();
@ -1439,7 +1440,7 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
void arch_thaw_secondary_cpus_begin(void)
{
set_mtrr_aps_delayed_init();
set_cache_aps_delayed_init(true);
}
void arch_thaw_secondary_cpus_end(void)