x86/CPU/AMD: Track SNP host status with cc_platform_*()

The host SNP worthiness can determined later, after alternatives have
been patched, in snp_rmptable_init() depending on cmdline options like
iommu=pt which is incompatible with SNP, for example.

Which means that one cannot use X86_FEATURE_SEV_SNP and will need to
have a special flag for that control.

Use that newly added CC_ATTR_HOST_SEV_SNP in the appropriate places.

Move kdump_sev_callback() to its rightful place, while at it.

Fixes: 216d106c7f ("x86/sev: Add SEV-SNP host initialization support")
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
Reviewed-by: Tom Lendacky <thomas.lendacky@amd.com>
Tested-by: Srikanth Aithal <sraithal@amd.com>
Link: https://lore.kernel.org/r/20240327154317.29909-6-bp@alien8.de
This commit is contained in:
Borislav Petkov (AMD) 2024-03-27 16:43:17 +01:00
parent bc6f707fc0
commit 0ecaefb303
8 changed files with 49 additions and 39 deletions

View File

@ -228,7 +228,6 @@ int snp_issue_guest_request(u64 exit_code, struct snp_req_data *input, struct sn
void snp_accept_memory(phys_addr_t start, phys_addr_t end);
u64 snp_get_unsupported_features(u64 status);
u64 sev_get_status(void);
void kdump_sev_callback(void);
void sev_show_status(void);
#else
static inline void sev_es_ist_enter(struct pt_regs *regs) { }
@ -258,7 +257,6 @@ static inline int snp_issue_guest_request(u64 exit_code, struct snp_req_data *in
static inline void snp_accept_memory(phys_addr_t start, phys_addr_t end) { }
static inline u64 snp_get_unsupported_features(u64 status) { return 0; }
static inline u64 sev_get_status(void) { return 0; }
static inline void kdump_sev_callback(void) { }
static inline void sev_show_status(void) { }
#endif
@ -270,6 +268,7 @@ int psmash(u64 pfn);
int rmp_make_private(u64 pfn, u64 gpa, enum pg_level level, u32 asid, bool immutable);
int rmp_make_shared(u64 pfn, enum pg_level level);
void snp_leak_pages(u64 pfn, unsigned int npages);
void kdump_sev_callback(void);
#else
static inline bool snp_probe_rmptable_info(void) { return false; }
static inline int snp_lookup_rmpentry(u64 pfn, bool *assigned, int *level) { return -ENODEV; }
@ -282,6 +281,7 @@ static inline int rmp_make_private(u64 pfn, u64 gpa, enum pg_level level, u32 as
}
static inline int rmp_make_shared(u64 pfn, enum pg_level level) { return -ENODEV; }
static inline void snp_leak_pages(u64 pfn, unsigned int npages) {}
static inline void kdump_sev_callback(void) { }
#endif
#endif

View File

@ -345,6 +345,28 @@ static void srat_detect_node(struct cpuinfo_x86 *c)
#endif
}
static void bsp_determine_snp(struct cpuinfo_x86 *c)
{
#ifdef CONFIG_ARCH_HAS_CC_PLATFORM
cc_vendor = CC_VENDOR_AMD;
if (cpu_has(c, X86_FEATURE_SEV_SNP)) {
/*
* RMP table entry format is not architectural and is defined by the
* per-processor PPR. Restrict SNP support on the known CPU models
* for which the RMP table entry format is currently defined for.
*/
if (!cpu_has(c, X86_FEATURE_HYPERVISOR) &&
c->x86 >= 0x19 && snp_probe_rmptable_info()) {
cc_platform_set(CC_ATTR_HOST_SEV_SNP);
} else {
setup_clear_cpu_cap(X86_FEATURE_SEV_SNP);
cc_platform_clear(CC_ATTR_HOST_SEV_SNP);
}
}
#endif
}
static void bsp_init_amd(struct cpuinfo_x86 *c)
{
if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
@ -452,21 +474,7 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)
break;
}
if (cpu_has(c, X86_FEATURE_SEV_SNP)) {
/*
* RMP table entry format is not architectural and it can vary by processor
* and is defined by the per-processor PPR. Restrict SNP support on the
* known CPU model and family for which the RMP table entry format is
* currently defined for.
*/
if (!boot_cpu_has(X86_FEATURE_ZEN3) &&
!boot_cpu_has(X86_FEATURE_ZEN4) &&
!boot_cpu_has(X86_FEATURE_ZEN5))
setup_clear_cpu_cap(X86_FEATURE_SEV_SNP);
else if (!snp_probe_rmptable_info())
setup_clear_cpu_cap(X86_FEATURE_SEV_SNP);
}
bsp_determine_snp(c);
return;
warn:

View File

@ -108,7 +108,7 @@ static inline void k8_check_syscfg_dram_mod_en(void)
(boot_cpu_data.x86 >= 0x0f)))
return;
if (cpu_feature_enabled(X86_FEATURE_SEV_SNP))
if (cc_platform_has(CC_ATTR_HOST_SEV_SNP))
return;
rdmsr(MSR_AMD64_SYSCFG, lo, hi);

View File

@ -2284,16 +2284,6 @@ static int __init snp_init_platform_device(void)
}
device_initcall(snp_init_platform_device);
void kdump_sev_callback(void)
{
/*
* Do wbinvd() on remote CPUs when SNP is enabled in order to
* safely do SNP_SHUTDOWN on the local CPU.
*/
if (cpu_feature_enabled(X86_FEATURE_SEV_SNP))
wbinvd();
}
void sev_show_status(void)
{
int i;

View File

@ -3174,7 +3174,7 @@ struct page *snp_safe_alloc_page(struct kvm_vcpu *vcpu)
unsigned long pfn;
struct page *p;
if (!cpu_feature_enabled(X86_FEATURE_SEV_SNP))
if (!cc_platform_has(CC_ATTR_HOST_SEV_SNP))
return alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
/*

View File

@ -77,7 +77,7 @@ static int __mfd_enable(unsigned int cpu)
{
u64 val;
if (!cpu_feature_enabled(X86_FEATURE_SEV_SNP))
if (!cc_platform_has(CC_ATTR_HOST_SEV_SNP))
return 0;
rdmsrl(MSR_AMD64_SYSCFG, val);
@ -98,7 +98,7 @@ static int __snp_enable(unsigned int cpu)
{
u64 val;
if (!cpu_feature_enabled(X86_FEATURE_SEV_SNP))
if (!cc_platform_has(CC_ATTR_HOST_SEV_SNP))
return 0;
rdmsrl(MSR_AMD64_SYSCFG, val);
@ -174,11 +174,11 @@ static int __init snp_rmptable_init(void)
u64 rmptable_size;
u64 val;
if (!cpu_feature_enabled(X86_FEATURE_SEV_SNP))
if (!cc_platform_has(CC_ATTR_HOST_SEV_SNP))
return 0;
if (!amd_iommu_snp_en)
return 0;
goto nosnp;
if (!probed_rmp_size)
goto nosnp;
@ -225,7 +225,7 @@ skip_enable:
return 0;
nosnp:
setup_clear_cpu_cap(X86_FEATURE_SEV_SNP);
cc_platform_clear(CC_ATTR_HOST_SEV_SNP);
return -ENOSYS;
}
@ -246,7 +246,7 @@ static struct rmpentry *__snp_lookup_rmpentry(u64 pfn, int *level)
{
struct rmpentry *large_entry, *entry;
if (!cpu_feature_enabled(X86_FEATURE_SEV_SNP))
if (!cc_platform_has(CC_ATTR_HOST_SEV_SNP))
return ERR_PTR(-ENODEV);
entry = get_rmpentry(pfn);
@ -363,7 +363,7 @@ int psmash(u64 pfn)
unsigned long paddr = pfn << PAGE_SHIFT;
int ret;
if (!cpu_feature_enabled(X86_FEATURE_SEV_SNP))
if (!cc_platform_has(CC_ATTR_HOST_SEV_SNP))
return -ENODEV;
if (!pfn_valid(pfn))
@ -472,7 +472,7 @@ static int rmpupdate(u64 pfn, struct rmp_state *state)
unsigned long paddr = pfn << PAGE_SHIFT;
int ret, level;
if (!cpu_feature_enabled(X86_FEATURE_SEV_SNP))
if (!cc_platform_has(CC_ATTR_HOST_SEV_SNP))
return -ENODEV;
level = RMP_TO_PG_LEVEL(state->pagesize);
@ -558,3 +558,13 @@ void snp_leak_pages(u64 pfn, unsigned int npages)
spin_unlock(&snp_leaked_pages_list_lock);
}
EXPORT_SYMBOL_GPL(snp_leak_pages);
void kdump_sev_callback(void)
{
/*
* Do wbinvd() on remote CPUs when SNP is enabled in order to
* safely do SNP_SHUTDOWN on the local CPU.
*/
if (cc_platform_has(CC_ATTR_HOST_SEV_SNP))
wbinvd();
}

View File

@ -1090,7 +1090,7 @@ static int __sev_snp_init_locked(int *error)
void *arg = &data;
int cmd, rc = 0;
if (!cpu_feature_enabled(X86_FEATURE_SEV_SNP))
if (!cc_platform_has(CC_ATTR_HOST_SEV_SNP))
return -ENODEV;
sev = psp->sev_data;

View File

@ -3228,7 +3228,7 @@ out:
static void iommu_snp_enable(void)
{
#ifdef CONFIG_KVM_AMD_SEV
if (!cpu_feature_enabled(X86_FEATURE_SEV_SNP))
if (!cc_platform_has(CC_ATTR_HOST_SEV_SNP))
return;
/*
* The SNP support requires that IOMMU must be enabled, and is
@ -3236,12 +3236,14 @@ static void iommu_snp_enable(void)
*/
if (no_iommu || iommu_default_passthrough()) {
pr_err("SNP: IOMMU disabled or configured in passthrough mode, SNP cannot be supported.\n");
cc_platform_clear(CC_ATTR_HOST_SEV_SNP);
return;
}
amd_iommu_snp_en = check_feature(FEATURE_SNP);
if (!amd_iommu_snp_en) {
pr_err("SNP: IOMMU SNP feature not enabled, SNP cannot be supported.\n");
cc_platform_clear(CC_ATTR_HOST_SEV_SNP);
return;
}