KVM: x86: make Hyper-V PV TLB flush use tlb_flush_guest()

Hyper-V PV TLB flush mechanism does TLB flush on behalf of the guest
so doing tlb_flush_all() is an overkill, switch to using tlb_flush_guest()
(just like KVM PV TLB flush mechanism) instead. Introduce
KVM_REQ_HV_TLB_FLUSH to support the change.

Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
Vitaly Kuznetsov 2020-03-25 12:28:24 -04:00 committed by Paolo Bonzini
parent e64419d991
commit 0baedd7927
3 changed files with 12 additions and 3 deletions

View File

@ -83,6 +83,8 @@
#define KVM_REQ_GET_VMCS12_PAGES KVM_ARCH_REQ(24)
#define KVM_REQ_APICV_UPDATE \
KVM_ARCH_REQ_FLAGS(25, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
#define KVM_REQ_HV_TLB_FLUSH \
KVM_ARCH_REQ_FLAGS(26, KVM_REQUEST_NO_WAKEUP)
#define CR0_RESERVED_BITS \
(~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \

View File

@ -1425,8 +1425,7 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *current_vcpu, u64 ingpa,
* vcpu->arch.cr3 may not be up-to-date for running vCPUs so we can't
* analyze it here, flush TLB regardless of the specified address space.
*/
kvm_make_vcpus_request_mask(kvm,
KVM_REQ_TLB_FLUSH | KVM_REQUEST_NO_WAKEUP,
kvm_make_vcpus_request_mask(kvm, KVM_REQ_HV_TLB_FLUSH,
vcpu_mask, &hv_vcpu->tlb_flush);
ret_success:

View File

@ -2696,6 +2696,12 @@ static void kvm_vcpu_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa)
kvm_x86_ops.tlb_flush(vcpu, invalidate_gpa);
}
static void kvm_vcpu_flush_tlb_guest(struct kvm_vcpu *vcpu)
{
++vcpu->stat.tlb_flush;
kvm_x86_ops.tlb_flush_guest(vcpu);
}
static void record_steal_time(struct kvm_vcpu *vcpu)
{
struct kvm_host_map map;
@ -2719,7 +2725,7 @@ static void record_steal_time(struct kvm_vcpu *vcpu)
trace_kvm_pv_tlb_flush(vcpu->vcpu_id,
st->preempted & KVM_VCPU_FLUSH_TLB);
if (xchg(&st->preempted, 0) & KVM_VCPU_FLUSH_TLB)
kvm_x86_ops.tlb_flush_guest(vcpu);
kvm_vcpu_flush_tlb_guest(vcpu);
vcpu->arch.st.preempted = 0;
@ -8218,6 +8224,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
kvm_mmu_load_pgd(vcpu);
if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
kvm_vcpu_flush_tlb(vcpu, true);
if (kvm_check_request(KVM_REQ_HV_TLB_FLUSH, vcpu))
kvm_vcpu_flush_tlb_guest(vcpu);
if (kvm_check_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu)) {
vcpu->run->exit_reason = KVM_EXIT_TPR_ACCESS;
r = 0;