diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 77785587332e..7b088417d978 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -3036,7 +3036,7 @@ static bool handle_abnormal_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fa * and only if L1's MAXPHYADDR is inaccurate with respect to * the hardware's). */ - if (unlikely(!shadow_mmio_value) || + if (unlikely(!enable_mmio_caching) || unlikely(fault->gfn > kvm_mmu_max_gfn())) { *ret_val = RET_PF_EMULATE; return true; diff --git a/arch/x86/kvm/mmu/spte.c b/arch/x86/kvm/mmu/spte.c index aab78574e03d..3d611f07eee8 100644 --- a/arch/x86/kvm/mmu/spte.c +++ b/arch/x86/kvm/mmu/spte.c @@ -19,7 +19,7 @@ #include #include -static bool __read_mostly enable_mmio_caching = true; +bool __read_mostly enable_mmio_caching = true; module_param_named(mmio_caching, enable_mmio_caching, bool, 0444); u64 __read_mostly shadow_host_writable_mask; @@ -351,6 +351,9 @@ void kvm_mmu_set_mmio_spte_mask(u64 mmio_value, u64 mmio_mask, u64 access_mask) WARN_ON(mmio_value && (REMOVED_SPTE & mmio_mask) == mmio_value)) mmio_value = 0; + if (!mmio_value) + enable_mmio_caching = false; + shadow_mmio_value = mmio_value; shadow_mmio_mask = mmio_mask; shadow_mmio_access_mask = access_mask; diff --git a/arch/x86/kvm/mmu/spte.h b/arch/x86/kvm/mmu/spte.h index e4abeb5df1b1..43eceb821b31 100644 --- a/arch/x86/kvm/mmu/spte.h +++ b/arch/x86/kvm/mmu/spte.h @@ -5,6 +5,8 @@ #include "mmu_internal.h" +extern bool __read_mostly enable_mmio_caching; + /* * A MMU present SPTE is backed by actual memory and may or may not be present * in hardware. E.g. MMIO SPTEs are not considered present. Use bit 11, as it @@ -204,7 +206,7 @@ extern u64 __read_mostly shadow_nonpresent_or_rsvd_lower_gfn_mask; static inline bool is_mmio_spte(u64 spte) { return (spte & shadow_mmio_mask) == shadow_mmio_value && - likely(shadow_mmio_value); + likely(enable_mmio_caching); } static inline bool is_shadow_present_pte(u64 pte)