x86/paravirt: Allow hypervisor-specific VMMCALL handling under SEV-ES

Add two new paravirt callbacks to provide hypervisor-specific processor
state in the GHCB and to copy state from the hypervisor back to the
processor.

Signed-off-by: Joerg Roedel <jroedel@suse.de>
Signed-off-by: Borislav Petkov <bp@suse.de>
Link: https://lkml.kernel.org/r/20200907131613.12703-63-joro@8bytes.org
This commit is contained in:
Joerg Roedel 2020-09-07 15:16:03 +02:00 committed by Borislav Petkov
parent cb1ad3ecea
commit f6a9f8a458
2 changed files with 27 additions and 1 deletions

View file

@ -4,8 +4,10 @@
#include <asm/bootparam.h> #include <asm/bootparam.h>
struct ghcb;
struct mpc_bus; struct mpc_bus;
struct mpc_cpu; struct mpc_cpu;
struct pt_regs;
struct mpc_table; struct mpc_table;
struct cpuinfo_x86; struct cpuinfo_x86;
@ -236,10 +238,22 @@ struct x86_legacy_features {
/** /**
* struct x86_hyper_runtime - x86 hypervisor specific runtime callbacks * struct x86_hyper_runtime - x86 hypervisor specific runtime callbacks
* *
* @pin_vcpu: pin current vcpu to specified physical cpu (run rarely) * @pin_vcpu: pin current vcpu to specified physical
* cpu (run rarely)
* @sev_es_hcall_prepare: Load additional hypervisor-specific
* state into the GHCB when doing a VMMCALL under
* SEV-ES. Called from the #VC exception handler.
* @sev_es_hcall_finish: Copies state from the GHCB back into the
* processor (or pt_regs). Also runs checks on the
* state returned from the hypervisor after a
* VMMCALL under SEV-ES. Needs to return 'false'
* if the checks fail. Called from the #VC
* exception handler.
*/ */
struct x86_hyper_runtime { struct x86_hyper_runtime {
void (*pin_vcpu)(int cpu); void (*pin_vcpu)(int cpu);
void (*sev_es_hcall_prepare)(struct ghcb *ghcb, struct pt_regs *regs);
bool (*sev_es_hcall_finish)(struct ghcb *ghcb, struct pt_regs *regs);
}; };
/** /**

View file

@ -897,6 +897,9 @@ static enum es_result vc_handle_vmmcall(struct ghcb *ghcb,
ghcb_set_rax(ghcb, ctxt->regs->ax); ghcb_set_rax(ghcb, ctxt->regs->ax);
ghcb_set_cpl(ghcb, user_mode(ctxt->regs) ? 3 : 0); ghcb_set_cpl(ghcb, user_mode(ctxt->regs) ? 3 : 0);
if (x86_platform.hyper.sev_es_hcall_prepare)
x86_platform.hyper.sev_es_hcall_prepare(ghcb, ctxt->regs);
ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_VMMCALL, 0, 0); ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_VMMCALL, 0, 0);
if (ret != ES_OK) if (ret != ES_OK)
return ret; return ret;
@ -906,6 +909,15 @@ static enum es_result vc_handle_vmmcall(struct ghcb *ghcb,
ctxt->regs->ax = ghcb->save.rax; ctxt->regs->ax = ghcb->save.rax;
/*
* Call sev_es_hcall_finish() after regs->ax is already set.
* This allows the hypervisor handler to overwrite it again if
* necessary.
*/
if (x86_platform.hyper.sev_es_hcall_finish &&
!x86_platform.hyper.sev_es_hcall_finish(ghcb, ctxt->regs))
return ES_VMM_ERROR;
return ES_OK; return ES_OK;
} }