mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-10-02 23:27:06 +00:00
KVM: x86/mmu: Rename page-track APIs to reflect the new reality
Rename the page-track APIs to capture that they're all about tracking writes, now that the facade of supporting multiple modes is gone. Opportunstically replace "slot" with "gfn" in anticipation of removing the @slot param from the external APIs. No functional change intended. Tested-by: Yongwei Ma <yongwei.ma@intel.com> Link: https://lore.kernel.org/r/20230729013535.1070024-25-seanjc@google.com Signed-off-by: Sean Christopherson <seanjc@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
338068b5be
commit
7b574863e7
5 changed files with 21 additions and 23 deletions
|
@ -4,10 +4,10 @@
|
||||||
|
|
||||||
#include <linux/kvm_types.h>
|
#include <linux/kvm_types.h>
|
||||||
|
|
||||||
void kvm_slot_page_track_add_page(struct kvm *kvm,
|
void kvm_write_track_add_gfn(struct kvm *kvm,
|
||||||
struct kvm_memory_slot *slot, gfn_t gfn);
|
struct kvm_memory_slot *slot, gfn_t gfn);
|
||||||
void kvm_slot_page_track_remove_page(struct kvm *kvm,
|
void kvm_write_track_remove_gfn(struct kvm *kvm, struct kvm_memory_slot *slot,
|
||||||
struct kvm_memory_slot *slot, gfn_t gfn);
|
gfn_t gfn);
|
||||||
|
|
||||||
#ifdef CONFIG_KVM_EXTERNAL_WRITE_TRACKING
|
#ifdef CONFIG_KVM_EXTERNAL_WRITE_TRACKING
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -831,7 +831,7 @@ static void account_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
|
||||||
|
|
||||||
/* the non-leaf shadow pages are keeping readonly. */
|
/* the non-leaf shadow pages are keeping readonly. */
|
||||||
if (sp->role.level > PG_LEVEL_4K)
|
if (sp->role.level > PG_LEVEL_4K)
|
||||||
return kvm_slot_page_track_add_page(kvm, slot, gfn);
|
return kvm_write_track_add_gfn(kvm, slot, gfn);
|
||||||
|
|
||||||
kvm_mmu_gfn_disallow_lpage(slot, gfn);
|
kvm_mmu_gfn_disallow_lpage(slot, gfn);
|
||||||
|
|
||||||
|
@ -877,7 +877,7 @@ static void unaccount_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
|
||||||
slots = kvm_memslots_for_spte_role(kvm, sp->role);
|
slots = kvm_memslots_for_spte_role(kvm, sp->role);
|
||||||
slot = __gfn_to_memslot(slots, gfn);
|
slot = __gfn_to_memslot(slots, gfn);
|
||||||
if (sp->role.level > PG_LEVEL_4K)
|
if (sp->role.level > PG_LEVEL_4K)
|
||||||
return kvm_slot_page_track_remove_page(kvm, slot, gfn);
|
return kvm_write_track_remove_gfn(kvm, slot, gfn);
|
||||||
|
|
||||||
kvm_mmu_gfn_allow_lpage(slot, gfn);
|
kvm_mmu_gfn_allow_lpage(slot, gfn);
|
||||||
}
|
}
|
||||||
|
@ -2807,7 +2807,7 @@ int mmu_try_to_unsync_pages(struct kvm *kvm, const struct kvm_memory_slot *slot,
|
||||||
* track machinery is used to write-protect upper-level shadow pages,
|
* track machinery is used to write-protect upper-level shadow pages,
|
||||||
* i.e. this guards the role.level == 4K assertion below!
|
* i.e. this guards the role.level == 4K assertion below!
|
||||||
*/
|
*/
|
||||||
if (kvm_slot_page_track_is_active(kvm, slot, gfn))
|
if (kvm_gfn_is_write_tracked(kvm, slot, gfn))
|
||||||
return -EPERM;
|
return -EPERM;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -4201,7 +4201,7 @@ static bool page_fault_handle_page_track(struct kvm_vcpu *vcpu,
|
||||||
* guest is writing the page which is write tracked which can
|
* guest is writing the page which is write tracked which can
|
||||||
* not be fixed by page fault handler.
|
* not be fixed by page fault handler.
|
||||||
*/
|
*/
|
||||||
if (kvm_slot_page_track_is_active(vcpu->kvm, fault->slot, fault->gfn))
|
if (kvm_gfn_is_write_tracked(vcpu->kvm, fault->slot, fault->gfn))
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
return false;
|
return false;
|
||||||
|
|
|
@ -84,8 +84,8 @@ static void update_gfn_write_track(struct kvm_memory_slot *slot, gfn_t gfn,
|
||||||
* @slot: the @gfn belongs to.
|
* @slot: the @gfn belongs to.
|
||||||
* @gfn: the guest page.
|
* @gfn: the guest page.
|
||||||
*/
|
*/
|
||||||
void kvm_slot_page_track_add_page(struct kvm *kvm,
|
void kvm_write_track_add_gfn(struct kvm *kvm, struct kvm_memory_slot *slot,
|
||||||
struct kvm_memory_slot *slot, gfn_t gfn)
|
gfn_t gfn)
|
||||||
{
|
{
|
||||||
if (WARN_ON_ONCE(!kvm_page_track_write_tracking_enabled(kvm)))
|
if (WARN_ON_ONCE(!kvm_page_track_write_tracking_enabled(kvm)))
|
||||||
return;
|
return;
|
||||||
|
@ -101,12 +101,11 @@ void kvm_slot_page_track_add_page(struct kvm *kvm,
|
||||||
if (kvm_mmu_slot_gfn_write_protect(kvm, slot, gfn, PG_LEVEL_4K))
|
if (kvm_mmu_slot_gfn_write_protect(kvm, slot, gfn, PG_LEVEL_4K))
|
||||||
kvm_flush_remote_tlbs(kvm);
|
kvm_flush_remote_tlbs(kvm);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(kvm_slot_page_track_add_page);
|
EXPORT_SYMBOL_GPL(kvm_write_track_add_gfn);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* remove the guest page from the tracking pool which stops the interception
|
* remove the guest page from the tracking pool which stops the interception
|
||||||
* of corresponding access on that page. It is the opposed operation of
|
* of corresponding access on that page.
|
||||||
* kvm_slot_page_track_add_page().
|
|
||||||
*
|
*
|
||||||
* It should be called under the protection both of mmu-lock and kvm->srcu
|
* It should be called under the protection both of mmu-lock and kvm->srcu
|
||||||
* or kvm->slots_lock.
|
* or kvm->slots_lock.
|
||||||
|
@ -115,8 +114,8 @@ EXPORT_SYMBOL_GPL(kvm_slot_page_track_add_page);
|
||||||
* @slot: the @gfn belongs to.
|
* @slot: the @gfn belongs to.
|
||||||
* @gfn: the guest page.
|
* @gfn: the guest page.
|
||||||
*/
|
*/
|
||||||
void kvm_slot_page_track_remove_page(struct kvm *kvm,
|
void kvm_write_track_remove_gfn(struct kvm *kvm,
|
||||||
struct kvm_memory_slot *slot, gfn_t gfn)
|
struct kvm_memory_slot *slot, gfn_t gfn)
|
||||||
{
|
{
|
||||||
if (WARN_ON_ONCE(!kvm_page_track_write_tracking_enabled(kvm)))
|
if (WARN_ON_ONCE(!kvm_page_track_write_tracking_enabled(kvm)))
|
||||||
return;
|
return;
|
||||||
|
@ -129,14 +128,13 @@ void kvm_slot_page_track_remove_page(struct kvm *kvm,
|
||||||
*/
|
*/
|
||||||
kvm_mmu_gfn_allow_lpage(slot, gfn);
|
kvm_mmu_gfn_allow_lpage(slot, gfn);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(kvm_slot_page_track_remove_page);
|
EXPORT_SYMBOL_GPL(kvm_write_track_remove_gfn);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* check if the corresponding access on the specified guest page is tracked.
|
* check if the corresponding access on the specified guest page is tracked.
|
||||||
*/
|
*/
|
||||||
bool kvm_slot_page_track_is_active(struct kvm *kvm,
|
bool kvm_gfn_is_write_tracked(struct kvm *kvm,
|
||||||
const struct kvm_memory_slot *slot,
|
const struct kvm_memory_slot *slot, gfn_t gfn)
|
||||||
gfn_t gfn)
|
|
||||||
{
|
{
|
||||||
int index;
|
int index;
|
||||||
|
|
||||||
|
|
|
@ -15,8 +15,8 @@ int kvm_page_track_create_memslot(struct kvm *kvm,
|
||||||
struct kvm_memory_slot *slot,
|
struct kvm_memory_slot *slot,
|
||||||
unsigned long npages);
|
unsigned long npages);
|
||||||
|
|
||||||
bool kvm_slot_page_track_is_active(struct kvm *kvm,
|
bool kvm_gfn_is_write_tracked(struct kvm *kvm,
|
||||||
const struct kvm_memory_slot *slot, gfn_t gfn);
|
const struct kvm_memory_slot *slot, gfn_t gfn);
|
||||||
|
|
||||||
#ifdef CONFIG_KVM_EXTERNAL_WRITE_TRACKING
|
#ifdef CONFIG_KVM_EXTERNAL_WRITE_TRACKING
|
||||||
int kvm_page_track_init(struct kvm *kvm);
|
int kvm_page_track_init(struct kvm *kvm);
|
||||||
|
|
|
@ -1564,7 +1564,7 @@ int intel_gvt_page_track_add(struct intel_vgpu *info, u64 gfn)
|
||||||
}
|
}
|
||||||
|
|
||||||
write_lock(&kvm->mmu_lock);
|
write_lock(&kvm->mmu_lock);
|
||||||
kvm_slot_page_track_add_page(kvm, slot, gfn);
|
kvm_write_track_add_gfn(kvm, slot, gfn);
|
||||||
write_unlock(&kvm->mmu_lock);
|
write_unlock(&kvm->mmu_lock);
|
||||||
|
|
||||||
srcu_read_unlock(&kvm->srcu, idx);
|
srcu_read_unlock(&kvm->srcu, idx);
|
||||||
|
@ -1593,7 +1593,7 @@ int intel_gvt_page_track_remove(struct intel_vgpu *info, u64 gfn)
|
||||||
}
|
}
|
||||||
|
|
||||||
write_lock(&kvm->mmu_lock);
|
write_lock(&kvm->mmu_lock);
|
||||||
kvm_slot_page_track_remove_page(kvm, slot, gfn);
|
kvm_write_track_remove_gfn(kvm, slot, gfn);
|
||||||
write_unlock(&kvm->mmu_lock);
|
write_unlock(&kvm->mmu_lock);
|
||||||
srcu_read_unlock(&kvm->srcu, idx);
|
srcu_read_unlock(&kvm->srcu, idx);
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue