x86/mm: Validate memory when changing the C-bit

Add the needed functionality to change pages state from shared
to private and vice-versa using the Page State Change VMGEXIT as
documented in the GHCB spec.

Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
Signed-off-by: Borislav Petkov <bp@suse.de>
Link: https://lore.kernel.org/r/20220307213356.2797205-22-brijesh.singh@amd.com
This commit is contained in:
Brijesh Singh 2022-02-24 10:56:01 -06:00 committed by Borislav Petkov
parent 9704c07bf9
commit dc3f3d2474
5 changed files with 209 additions and 0 deletions

View File

@ -105,6 +105,28 @@ enum psc_op {
#define GHCB_HV_FT_SNP BIT_ULL(0)
/* SNP Page State Change NAE event */
#define VMGEXIT_PSC_MAX_ENTRY 253
struct psc_hdr {
u16 cur_entry;
u16 end_entry;
u32 reserved;
} __packed;
struct psc_entry {
u64 cur_page : 12,
gfn : 40,
operation : 4,
pagesize : 1,
reserved : 7;
} __packed;
struct snp_psc_desc {
struct psc_hdr hdr;
struct psc_entry entries[VMGEXIT_PSC_MAX_ENTRY];
} __packed;
#define GHCB_MSR_TERM_REQ 0x100
#define GHCB_MSR_TERM_REASON_SET_POS 12
#define GHCB_MSR_TERM_REASON_SET_MASK 0xf

View File

@ -128,6 +128,8 @@ void __init early_snp_set_memory_private(unsigned long vaddr, unsigned long padd
void __init early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr,
unsigned int npages);
void __init snp_prep_memory(unsigned long paddr, unsigned int sz, enum psc_op op);
void snp_set_memory_shared(unsigned long vaddr, unsigned int npages);
void snp_set_memory_private(unsigned long vaddr, unsigned int npages);
#else
static inline void sev_es_ist_enter(struct pt_regs *regs) { }
static inline void sev_es_ist_exit(void) { }
@ -142,6 +144,8 @@ early_snp_set_memory_private(unsigned long vaddr, unsigned long paddr, unsigned
static inline void __init
early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr, unsigned int npages) { }
static inline void __init snp_prep_memory(unsigned long paddr, unsigned int sz, enum psc_op op) { }
static inline void snp_set_memory_shared(unsigned long vaddr, unsigned int npages) { }
static inline void snp_set_memory_private(unsigned long vaddr, unsigned int npages) { }
#endif
#endif

View File

@ -108,6 +108,7 @@
#define SVM_VMGEXIT_AP_JUMP_TABLE 0x80000005
#define SVM_VMGEXIT_SET_AP_JUMP_TABLE 0
#define SVM_VMGEXIT_GET_AP_JUMP_TABLE 1
#define SVM_VMGEXIT_PSC 0x80000010
#define SVM_VMGEXIT_HV_FEATURES 0x8000fffd
#define SVM_VMGEXIT_UNSUPPORTED_EVENT 0x8000ffff
@ -219,6 +220,7 @@
{ SVM_VMGEXIT_NMI_COMPLETE, "vmgexit_nmi_complete" }, \
{ SVM_VMGEXIT_AP_HLT_LOOP, "vmgexit_ap_hlt_loop" }, \
{ SVM_VMGEXIT_AP_JUMP_TABLE, "vmgexit_ap_jump_table" }, \
{ SVM_VMGEXIT_PSC, "vmgexit_page_state_change" }, \
{ SVM_VMGEXIT_HV_FEATURES, "vmgexit_hypervisor_feature" }, \
{ SVM_EXIT_ERR, "invalid_guest_state" }

View File

@ -655,6 +655,174 @@ void __init snp_prep_memory(unsigned long paddr, unsigned int sz, enum psc_op op
WARN(1, "invalid memory op %d\n", op);
}
static int vmgexit_psc(struct snp_psc_desc *desc)
{
int cur_entry, end_entry, ret = 0;
struct snp_psc_desc *data;
struct ghcb_state state;
struct es_em_ctxt ctxt;
unsigned long flags;
struct ghcb *ghcb;
/*
* __sev_get_ghcb() needs to run with IRQs disabled because it is using
* a per-CPU GHCB.
*/
local_irq_save(flags);
ghcb = __sev_get_ghcb(&state);
if (!ghcb) {
ret = 1;
goto out_unlock;
}
/* Copy the input desc into GHCB shared buffer */
data = (struct snp_psc_desc *)ghcb->shared_buffer;
memcpy(ghcb->shared_buffer, desc, min_t(int, GHCB_SHARED_BUF_SIZE, sizeof(*desc)));
/*
* As per the GHCB specification, the hypervisor can resume the guest
* before processing all the entries. Check whether all the entries
* are processed. If not, then keep retrying. Note, the hypervisor
* will update the data memory directly to indicate the status, so
* reference the data->hdr everywhere.
*
* The strategy here is to wait for the hypervisor to change the page
* state in the RMP table before guest accesses the memory pages. If the
* page state change was not successful, then later memory access will
* result in a crash.
*/
cur_entry = data->hdr.cur_entry;
end_entry = data->hdr.end_entry;
while (data->hdr.cur_entry <= data->hdr.end_entry) {
ghcb_set_sw_scratch(ghcb, (u64)__pa(data));
/* This will advance the shared buffer data points to. */
ret = sev_es_ghcb_hv_call(ghcb, true, &ctxt, SVM_VMGEXIT_PSC, 0, 0);
/*
* Page State Change VMGEXIT can pass error code through
* exit_info_2.
*/
if (WARN(ret || ghcb->save.sw_exit_info_2,
"SNP: PSC failed ret=%d exit_info_2=%llx\n",
ret, ghcb->save.sw_exit_info_2)) {
ret = 1;
goto out;
}
/* Verify that reserved bit is not set */
if (WARN(data->hdr.reserved, "Reserved bit is set in the PSC header\n")) {
ret = 1;
goto out;
}
/*
* Sanity check that entry processing is not going backwards.
* This will happen only if hypervisor is tricking us.
*/
if (WARN(data->hdr.end_entry > end_entry || cur_entry > data->hdr.cur_entry,
"SNP: PSC processing going backward, end_entry %d (got %d) cur_entry %d (got %d)\n",
end_entry, data->hdr.end_entry, cur_entry, data->hdr.cur_entry)) {
ret = 1;
goto out;
}
}
out:
__sev_put_ghcb(&state);
out_unlock:
local_irq_restore(flags);
return ret;
}
static void __set_pages_state(struct snp_psc_desc *data, unsigned long vaddr,
unsigned long vaddr_end, int op)
{
struct psc_hdr *hdr;
struct psc_entry *e;
unsigned long pfn;
int i;
hdr = &data->hdr;
e = data->entries;
memset(data, 0, sizeof(*data));
i = 0;
while (vaddr < vaddr_end) {
if (is_vmalloc_addr((void *)vaddr))
pfn = vmalloc_to_pfn((void *)vaddr);
else
pfn = __pa(vaddr) >> PAGE_SHIFT;
e->gfn = pfn;
e->operation = op;
hdr->end_entry = i;
/*
* Current SNP implementation doesn't keep track of the RMP page
* size so use 4K for simplicity.
*/
e->pagesize = RMP_PG_SIZE_4K;
vaddr = vaddr + PAGE_SIZE;
e++;
i++;
}
if (vmgexit_psc(data))
sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PSC);
}
static void set_pages_state(unsigned long vaddr, unsigned int npages, int op)
{
unsigned long vaddr_end, next_vaddr;
struct snp_psc_desc *desc;
desc = kmalloc(sizeof(*desc), GFP_KERNEL_ACCOUNT);
if (!desc)
panic("SNP: failed to allocate memory for PSC descriptor\n");
vaddr = vaddr & PAGE_MASK;
vaddr_end = vaddr + (npages << PAGE_SHIFT);
while (vaddr < vaddr_end) {
/* Calculate the last vaddr that fits in one struct snp_psc_desc. */
next_vaddr = min_t(unsigned long, vaddr_end,
(VMGEXIT_PSC_MAX_ENTRY * PAGE_SIZE) + vaddr);
__set_pages_state(desc, vaddr, next_vaddr, op);
vaddr = next_vaddr;
}
kfree(desc);
}
void snp_set_memory_shared(unsigned long vaddr, unsigned int npages)
{
if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
return;
pvalidate_pages(vaddr, npages, false);
set_pages_state(vaddr, npages, SNP_PAGE_STATE_SHARED);
}
void snp_set_memory_private(unsigned long vaddr, unsigned int npages)
{
if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
return;
set_pages_state(vaddr, npages, SNP_PAGE_STATE_PRIVATE);
pvalidate_pages(vaddr, npages, true);
}
int sev_es_setup_ap_jump_table(struct real_mode_header *rmh)
{
u16 startup_cs, startup_ip;

View File

@ -316,11 +316,24 @@ static void enc_dec_hypercall(unsigned long vaddr, int npages, bool enc)
static void amd_enc_status_change_prepare(unsigned long vaddr, int npages, bool enc)
{
/*
* To maintain the security guarantees of SEV-SNP guests, make sure
* to invalidate the memory before encryption attribute is cleared.
*/
if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP) && !enc)
snp_set_memory_shared(vaddr, npages);
}
/* Return true unconditionally: return value doesn't matter for the SEV side */
static bool amd_enc_status_change_finish(unsigned long vaddr, int npages, bool enc)
{
/*
* After memory is mapped encrypted in the page table, validate it
* so that it is consistent with the page table updates.
*/
if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP) && enc)
snp_set_memory_private(vaddr, npages);
if (!cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT))
enc_dec_hypercall(vaddr, npages, enc);