KVM: SVM: Delay restoration of host MSR_TSC_AUX until return to userspace

Use KVM's "user return MSRs" framework to defer restoring the host's
MSR_TSC_AUX until the CPU returns to userspace.  Add/improve comments to
clarify why MSR_TSC_AUX is intercepted on both RDMSR and WRMSR, and why
it's safe for KVM to keep the guest's value loaded even if KVM is
scheduled out.

Cc: Reiji Watanabe <reijiw@google.com>
Signed-off-by: Sean Christopherson <seanjc@google.com>
Message-Id: <20210423223404.3860547-5-seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
Sean Christopherson 2021-04-23 15:34:04 -07:00 committed by Paolo Bonzini
parent dbd6127375
commit 844d69c26d
2 changed files with 24 additions and 36 deletions

View file

@ -213,6 +213,15 @@ struct kvm_ldttss_desc {
DEFINE_PER_CPU(struct svm_cpu_data *, svm_data); DEFINE_PER_CPU(struct svm_cpu_data *, svm_data);
/*
* Only MSR_TSC_AUX is switched via the user return hook. EFER is switched via
* the VMCB, and the SYSCALL/SYSENTER MSRs are handled by VMLOAD/VMSAVE.
*
* RDTSCP and RDPID are not used in the kernel, specifically to allow KVM to
* defer the restoration of TSC_AUX until the CPU returns to userspace.
*/
#define TSC_AUX_URET_SLOT 0
static const u32 msrpm_ranges[] = {0, 0xc0000000, 0xc0010000}; static const u32 msrpm_ranges[] = {0, 0xc0000000, 0xc0010000};
#define NUM_MSR_MAPS ARRAY_SIZE(msrpm_ranges) #define NUM_MSR_MAPS ARRAY_SIZE(msrpm_ranges)
@ -958,6 +967,9 @@ static __init int svm_hardware_setup(void)
kvm_tsc_scaling_ratio_frac_bits = 32; kvm_tsc_scaling_ratio_frac_bits = 32;
} }
if (boot_cpu_has(X86_FEATURE_RDTSCP))
kvm_define_user_return_msr(TSC_AUX_URET_SLOT, MSR_TSC_AUX);
/* Check for pause filtering support */ /* Check for pause filtering support */
if (!boot_cpu_has(X86_FEATURE_PAUSEFILTER)) { if (!boot_cpu_has(X86_FEATURE_PAUSEFILTER)) {
pause_filter_count = 0; pause_filter_count = 0;
@ -1423,19 +1435,10 @@ static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu)
{ {
struct vcpu_svm *svm = to_svm(vcpu); struct vcpu_svm *svm = to_svm(vcpu);
struct svm_cpu_data *sd = per_cpu(svm_data, vcpu->cpu); struct svm_cpu_data *sd = per_cpu(svm_data, vcpu->cpu);
unsigned int i;
if (svm->guest_state_loaded) if (svm->guest_state_loaded)
return; return;
/*
* Certain MSRs are restored on VMEXIT (sev-es), or vmload of host save
* area (non-sev-es). Save ones that aren't so we can restore them
* individually later.
*/
for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
rdmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
/* /*
* Save additional host state that will be restored on VMEXIT (sev-es) * Save additional host state that will be restored on VMEXIT (sev-es)
* or subsequent vmload of host save area. * or subsequent vmload of host save area.
@ -1454,29 +1457,15 @@ static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu)
} }
} }
/* This assumes that the kernel never uses MSR_TSC_AUX */
if (static_cpu_has(X86_FEATURE_RDTSCP)) if (static_cpu_has(X86_FEATURE_RDTSCP))
wrmsrl(MSR_TSC_AUX, svm->tsc_aux); kvm_set_user_return_msr(TSC_AUX_URET_SLOT, svm->tsc_aux, -1ull);
svm->guest_state_loaded = true; svm->guest_state_loaded = true;
} }
static void svm_prepare_host_switch(struct kvm_vcpu *vcpu) static void svm_prepare_host_switch(struct kvm_vcpu *vcpu)
{ {
struct vcpu_svm *svm = to_svm(vcpu); to_svm(vcpu)->guest_state_loaded = false;
unsigned int i;
if (!svm->guest_state_loaded)
return;
/*
* Certain MSRs are restored on VMEXIT (sev-es), or vmload of host save
* area (non-sev-es). Restore the ones that weren't.
*/
for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
wrmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
svm->guest_state_loaded = false;
} }
static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu) static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
@ -2790,6 +2779,7 @@ static int svm_set_vm_cr(struct kvm_vcpu *vcpu, u64 data)
static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
{ {
struct vcpu_svm *svm = to_svm(vcpu); struct vcpu_svm *svm = to_svm(vcpu);
int r;
u32 ecx = msr->index; u32 ecx = msr->index;
u64 data = msr->data; u64 data = msr->data;
@ -2910,11 +2900,16 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
data = (u32)data; data = (u32)data;
/* /*
* This is rare, so we update the MSR here instead of using * TSC_AUX is usually changed only during boot and never read
* direct_access_msrs. Doing that would require a rdmsr in * directly. Intercept TSC_AUX instead of exposing it to the
* svm_vcpu_put. * guest via direct_access_msrs, and switch it via user return.
*/ */
wrmsrl(MSR_TSC_AUX, data); preempt_disable();
r = kvm_set_user_return_msr(TSC_AUX_URET_SLOT, data, -1ull);
preempt_enable();
if (r)
return 1;
svm->tsc_aux = data; svm->tsc_aux = data;
break; break;
case MSR_IA32_DEBUGCTLMSR: case MSR_IA32_DEBUGCTLMSR:

View file

@ -23,11 +23,6 @@
#define __sme_page_pa(x) __sme_set(page_to_pfn(x) << PAGE_SHIFT) #define __sme_page_pa(x) __sme_set(page_to_pfn(x) << PAGE_SHIFT)
static const u32 host_save_user_msrs[] = {
MSR_TSC_AUX,
};
#define NR_HOST_SAVE_USER_MSRS ARRAY_SIZE(host_save_user_msrs)
#define IOPM_SIZE PAGE_SIZE * 3 #define IOPM_SIZE PAGE_SIZE * 3
#define MSRPM_SIZE PAGE_SIZE * 2 #define MSRPM_SIZE PAGE_SIZE * 2
@ -129,8 +124,6 @@ struct vcpu_svm {
u64 next_rip; u64 next_rip;
u64 host_user_msrs[NR_HOST_SAVE_USER_MSRS];
u64 spec_ctrl; u64 spec_ctrl;
/* /*
* Contains guest-controlled bits of VIRT_SPEC_CTRL, which will be * Contains guest-controlled bits of VIRT_SPEC_CTRL, which will be