mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-10-03 15:47:36 +00:00
x86/mtrr: Simplify mtrr_ops initialization
The way mtrr_if is initialized with the correct mtrr_ops structure is quite weird. Simplify that by dropping the vendor specific init functions and the mtrr_ops[] array. Replace those with direct assignments of the related vendor specific ops array to mtrr_if. Note that a direct assignment is okay even for 64-bit builds, where the symbol isn't present, as the related code will be subject to "dead code elimination" due to how cpu_feature_enabled() is implemented. Signed-off-by: Juergen Gross <jgross@suse.com> Signed-off-by: Borislav Petkov <bp@suse.de> Link: https://lore.kernel.org/r/20221102074713.21493-17-jgross@suse.com Signed-off-by: Borislav Petkov <bp@suse.de>
This commit is contained in:
parent
30f89e524b
commit
f8bd9f25c9
5 changed files with 10 additions and 54 deletions
|
@ -109,7 +109,7 @@ amd_validate_add_page(unsigned long base, unsigned long size, unsigned int type)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static const struct mtrr_ops amd_mtrr_ops = {
|
||||
const struct mtrr_ops amd_mtrr_ops = {
|
||||
.vendor = X86_VENDOR_AMD,
|
||||
.set = amd_set_mtrr,
|
||||
.get = amd_get_mtrr,
|
||||
|
@ -117,9 +117,3 @@ static const struct mtrr_ops amd_mtrr_ops = {
|
|||
.validate_add_page = amd_validate_add_page,
|
||||
.have_wrcomb = positive_have_wrcomb,
|
||||
};
|
||||
|
||||
int __init amd_init_mtrr(void)
|
||||
{
|
||||
set_mtrr_ops(&amd_mtrr_ops);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -111,7 +111,7 @@ centaur_validate_add_page(unsigned long base, unsigned long size, unsigned int t
|
|||
return 0;
|
||||
}
|
||||
|
||||
static const struct mtrr_ops centaur_mtrr_ops = {
|
||||
const struct mtrr_ops centaur_mtrr_ops = {
|
||||
.vendor = X86_VENDOR_CENTAUR,
|
||||
.set = centaur_set_mcr,
|
||||
.get = centaur_get_mcr,
|
||||
|
@ -119,9 +119,3 @@ static const struct mtrr_ops centaur_mtrr_ops = {
|
|||
.validate_add_page = centaur_validate_add_page,
|
||||
.have_wrcomb = positive_have_wrcomb,
|
||||
};
|
||||
|
||||
int __init centaur_init_mtrr(void)
|
||||
{
|
||||
set_mtrr_ops(¢aur_mtrr_ops);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -234,7 +234,7 @@ static void cyrix_set_arr(unsigned int reg, unsigned long base,
|
|||
post_set();
|
||||
}
|
||||
|
||||
static const struct mtrr_ops cyrix_mtrr_ops = {
|
||||
const struct mtrr_ops cyrix_mtrr_ops = {
|
||||
.vendor = X86_VENDOR_CYRIX,
|
||||
.set = cyrix_set_arr,
|
||||
.get = cyrix_get_arr,
|
||||
|
@ -242,9 +242,3 @@ static const struct mtrr_ops cyrix_mtrr_ops = {
|
|||
.validate_add_page = generic_validate_add_page,
|
||||
.have_wrcomb = positive_have_wrcomb,
|
||||
};
|
||||
|
||||
int __init cyrix_init_mtrr(void)
|
||||
{
|
||||
set_mtrr_ops(&cyrix_mtrr_ops);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -69,16 +69,8 @@ static DEFINE_MUTEX(mtrr_mutex);
|
|||
|
||||
u64 size_or_mask, size_and_mask;
|
||||
|
||||
static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __ro_after_init;
|
||||
|
||||
const struct mtrr_ops *mtrr_if;
|
||||
|
||||
void __init set_mtrr_ops(const struct mtrr_ops *ops)
|
||||
{
|
||||
if (ops->vendor && ops->vendor < X86_VENDOR_NUM)
|
||||
mtrr_ops[ops->vendor] = ops;
|
||||
}
|
||||
|
||||
/* Returns non-zero if we have the write-combining memory type */
|
||||
static int have_wrcomb(void)
|
||||
{
|
||||
|
@ -582,20 +574,6 @@ int arch_phys_wc_index(int handle)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(arch_phys_wc_index);
|
||||
|
||||
/*
|
||||
* HACK ALERT!
|
||||
* These should be called implicitly, but we can't yet until all the initcall
|
||||
* stuff is done...
|
||||
*/
|
||||
static void __init init_ifs(void)
|
||||
{
|
||||
#ifndef CONFIG_X86_64
|
||||
amd_init_mtrr();
|
||||
cyrix_init_mtrr();
|
||||
centaur_init_mtrr();
|
||||
#endif
|
||||
}
|
||||
|
||||
/* The suspend/resume methods are only for CPU without MTRR. CPU using generic
|
||||
* MTRR driver doesn't require this
|
||||
*/
|
||||
|
@ -653,8 +631,6 @@ void __init mtrr_bp_init(void)
|
|||
{
|
||||
u32 phys_addr;
|
||||
|
||||
init_ifs();
|
||||
|
||||
phys_addr = 32;
|
||||
|
||||
if (boot_cpu_has(X86_FEATURE_MTRR)) {
|
||||
|
@ -695,21 +671,21 @@ void __init mtrr_bp_init(void)
|
|||
case X86_VENDOR_AMD:
|
||||
if (cpu_feature_enabled(X86_FEATURE_K6_MTRR)) {
|
||||
/* Pre-Athlon (K6) AMD CPU MTRRs */
|
||||
mtrr_if = mtrr_ops[X86_VENDOR_AMD];
|
||||
mtrr_if = &amd_mtrr_ops;
|
||||
size_or_mask = SIZE_OR_MASK_BITS(32);
|
||||
size_and_mask = 0;
|
||||
}
|
||||
break;
|
||||
case X86_VENDOR_CENTAUR:
|
||||
if (cpu_feature_enabled(X86_FEATURE_CENTAUR_MCR)) {
|
||||
mtrr_if = mtrr_ops[X86_VENDOR_CENTAUR];
|
||||
mtrr_if = ¢aur_mtrr_ops;
|
||||
size_or_mask = SIZE_OR_MASK_BITS(32);
|
||||
size_and_mask = 0;
|
||||
}
|
||||
break;
|
||||
case X86_VENDOR_CYRIX:
|
||||
if (cpu_feature_enabled(X86_FEATURE_CYRIX_ARR)) {
|
||||
mtrr_if = mtrr_ops[X86_VENDOR_CYRIX];
|
||||
mtrr_if = &cyrix_mtrr_ops;
|
||||
size_or_mask = SIZE_OR_MASK_BITS(32);
|
||||
size_and_mask = 0;
|
||||
}
|
||||
|
|
|
@ -51,8 +51,6 @@ void fill_mtrr_var_range(unsigned int index,
|
|||
u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi);
|
||||
bool get_mtrr_state(void);
|
||||
|
||||
extern void __init set_mtrr_ops(const struct mtrr_ops *ops);
|
||||
|
||||
extern u64 size_or_mask, size_and_mask;
|
||||
extern const struct mtrr_ops *mtrr_if;
|
||||
|
||||
|
@ -66,10 +64,10 @@ void mtrr_state_warn(void);
|
|||
const char *mtrr_attrib_to_str(int x);
|
||||
void mtrr_wrmsr(unsigned, unsigned, unsigned);
|
||||
|
||||
/* CPU specific mtrr init functions */
|
||||
int amd_init_mtrr(void);
|
||||
int cyrix_init_mtrr(void);
|
||||
int centaur_init_mtrr(void);
|
||||
/* CPU specific mtrr_ops vectors. */
|
||||
extern const struct mtrr_ops amd_mtrr_ops;
|
||||
extern const struct mtrr_ops cyrix_mtrr_ops;
|
||||
extern const struct mtrr_ops centaur_mtrr_ops;
|
||||
|
||||
extern int changed_by_mtrr_cleanup;
|
||||
extern int mtrr_cleanup(unsigned address_bits);
|
||||
|
|
Loading…
Reference in a new issue