KVM: PPC: Convert to the gfn-based MMU notifier callbacks

Move PPC to the gfn-base MMU notifier APIs, and update all 15 bajillion
PPC-internal hooks to work with gfns instead of hvas.

No meaningful functional change intended, though the exact order of
operations is slightly different since the memslot lookups occur before
calling into arch code.

Signed-off-by: Sean Christopherson <seanjc@google.com>
Message-Id: <20210402005658.3024832-6-seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
Sean Christopherson 2021-04-01 17:56:53 -07:00 committed by Paolo Bonzini
parent d923ff2584
commit b1c5356e87
10 changed files with 95 additions and 173 deletions

View file

@ -210,12 +210,12 @@ extern void kvmppc_free_pgtable_radix(struct kvm *kvm, pgd_t *pgd,
unsigned int lpid); unsigned int lpid);
extern int kvmppc_radix_init(void); extern int kvmppc_radix_init(void);
extern void kvmppc_radix_exit(void); extern void kvmppc_radix_exit(void);
extern int kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, extern bool kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
unsigned long gfn); unsigned long gfn);
extern int kvm_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, extern bool kvm_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
unsigned long gfn); unsigned long gfn);
extern int kvm_test_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, extern bool kvm_test_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
unsigned long gfn); unsigned long gfn);
extern long kvmppc_hv_get_dirty_log_radix(struct kvm *kvm, extern long kvmppc_hv_get_dirty_log_radix(struct kvm *kvm,
struct kvm_memory_slot *memslot, unsigned long *map); struct kvm_memory_slot *memslot, unsigned long *map);
extern void kvmppc_radix_flush_memslot(struct kvm *kvm, extern void kvmppc_radix_flush_memslot(struct kvm *kvm,

View file

@ -55,6 +55,7 @@
#include <linux/mmu_notifier.h> #include <linux/mmu_notifier.h>
#define KVM_ARCH_WANT_MMU_NOTIFIER #define KVM_ARCH_WANT_MMU_NOTIFIER
#define KVM_ARCH_WANT_NEW_MMU_NOTIFIER_APIS
#define HPTEG_CACHE_NUM (1 << 15) #define HPTEG_CACHE_NUM (1 << 15)
#define HPTEG_HASH_BITS_PTE 13 #define HPTEG_HASH_BITS_PTE 13

View file

@ -281,11 +281,10 @@ struct kvmppc_ops {
const struct kvm_memory_slot *old, const struct kvm_memory_slot *old,
const struct kvm_memory_slot *new, const struct kvm_memory_slot *new,
enum kvm_mr_change change); enum kvm_mr_change change);
int (*unmap_hva_range)(struct kvm *kvm, unsigned long start, bool (*unmap_gfn_range)(struct kvm *kvm, struct kvm_gfn_range *range);
unsigned long end); bool (*age_gfn)(struct kvm *kvm, struct kvm_gfn_range *range);
int (*age_hva)(struct kvm *kvm, unsigned long start, unsigned long end); bool (*test_age_gfn)(struct kvm *kvm, struct kvm_gfn_range *range);
int (*test_age_hva)(struct kvm *kvm, unsigned long hva); bool (*set_spte_gfn)(struct kvm *kvm, struct kvm_gfn_range *range);
void (*set_spte_hva)(struct kvm *kvm, unsigned long hva, pte_t pte);
void (*free_memslot)(struct kvm_memory_slot *slot); void (*free_memslot)(struct kvm_memory_slot *slot);
int (*init_vm)(struct kvm *kvm); int (*init_vm)(struct kvm *kvm);
void (*destroy_vm)(struct kvm *kvm); void (*destroy_vm)(struct kvm *kvm);

View file

@ -834,26 +834,24 @@ void kvmppc_core_commit_memory_region(struct kvm *kvm,
kvm->arch.kvm_ops->commit_memory_region(kvm, mem, old, new, change); kvm->arch.kvm_ops->commit_memory_region(kvm, mem, old, new, change);
} }
int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end, bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
unsigned flags)
{ {
return kvm->arch.kvm_ops->unmap_hva_range(kvm, start, end); return kvm->arch.kvm_ops->unmap_gfn_range(kvm, range);
} }
int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end) bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
{ {
return kvm->arch.kvm_ops->age_hva(kvm, start, end); return kvm->arch.kvm_ops->age_gfn(kvm, range);
} }
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva) bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
{ {
return kvm->arch.kvm_ops->test_age_hva(kvm, hva); return kvm->arch.kvm_ops->test_age_gfn(kvm, range);
} }
int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
{ {
kvm->arch.kvm_ops->set_spte_hva(kvm, hva, pte); return kvm->arch.kvm_ops->set_spte_gfn(kvm, range);
return 0;
} }
int kvmppc_core_init_vm(struct kvm *kvm) int kvmppc_core_init_vm(struct kvm *kvm)

View file

@ -9,12 +9,10 @@
extern void kvmppc_core_flush_memslot_hv(struct kvm *kvm, extern void kvmppc_core_flush_memslot_hv(struct kvm *kvm,
struct kvm_memory_slot *memslot); struct kvm_memory_slot *memslot);
extern int kvm_unmap_hva_range_hv(struct kvm *kvm, unsigned long start, extern bool kvm_unmap_gfn_range_hv(struct kvm *kvm, struct kvm_gfn_range *range);
unsigned long end); extern bool kvm_age_gfn_hv(struct kvm *kvm, struct kvm_gfn_range *range);
extern int kvm_age_hva_hv(struct kvm *kvm, unsigned long start, extern bool kvm_test_age_gfn_hv(struct kvm *kvm, struct kvm_gfn_range *range);
unsigned long end); extern bool kvm_set_spte_gfn_hv(struct kvm *kvm, struct kvm_gfn_range *range);
extern int kvm_test_age_hva_hv(struct kvm *kvm, unsigned long hva);
extern void kvm_set_spte_hva_hv(struct kvm *kvm, unsigned long hva, pte_t pte);
extern int kvmppc_mmu_init_pr(struct kvm_vcpu *vcpu); extern int kvmppc_mmu_init_pr(struct kvm_vcpu *vcpu);
extern void kvmppc_mmu_destroy_pr(struct kvm_vcpu *vcpu); extern void kvmppc_mmu_destroy_pr(struct kvm_vcpu *vcpu);

View file

@ -752,51 +752,6 @@ void kvmppc_rmap_reset(struct kvm *kvm)
srcu_read_unlock(&kvm->srcu, srcu_idx); srcu_read_unlock(&kvm->srcu, srcu_idx);
} }
typedef int (*hva_handler_fn)(struct kvm *kvm, struct kvm_memory_slot *memslot,
unsigned long gfn);
static int kvm_handle_hva_range(struct kvm *kvm,
unsigned long start,
unsigned long end,
hva_handler_fn handler)
{
int ret;
int retval = 0;
struct kvm_memslots *slots;
struct kvm_memory_slot *memslot;
slots = kvm_memslots(kvm);
kvm_for_each_memslot(memslot, slots) {
unsigned long hva_start, hva_end;
gfn_t gfn, gfn_end;
hva_start = max(start, memslot->userspace_addr);
hva_end = min(end, memslot->userspace_addr +
(memslot->npages << PAGE_SHIFT));
if (hva_start >= hva_end)
continue;
/*
* {gfn(page) | page intersects with [hva_start, hva_end)} =
* {gfn, gfn+1, ..., gfn_end-1}.
*/
gfn = hva_to_gfn_memslot(hva_start, memslot);
gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
for (; gfn < gfn_end; ++gfn) {
ret = handler(kvm, memslot, gfn);
retval |= ret;
}
}
return retval;
}
static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
hva_handler_fn handler)
{
return kvm_handle_hva_range(kvm, hva, hva + 1, handler);
}
/* Must be called with both HPTE and rmap locked */ /* Must be called with both HPTE and rmap locked */
static void kvmppc_unmap_hpte(struct kvm *kvm, unsigned long i, static void kvmppc_unmap_hpte(struct kvm *kvm, unsigned long i,
struct kvm_memory_slot *memslot, struct kvm_memory_slot *memslot,
@ -840,8 +795,8 @@ static void kvmppc_unmap_hpte(struct kvm *kvm, unsigned long i,
} }
} }
static int kvm_unmap_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot, static bool kvm_unmap_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot,
unsigned long gfn) unsigned long gfn)
{ {
unsigned long i; unsigned long i;
__be64 *hptep; __be64 *hptep;
@ -874,16 +829,15 @@ static int kvm_unmap_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot,
unlock_rmap(rmapp); unlock_rmap(rmapp);
__unlock_hpte(hptep, be64_to_cpu(hptep[0])); __unlock_hpte(hptep, be64_to_cpu(hptep[0]));
} }
return 0; return false;
} }
int kvm_unmap_hva_range_hv(struct kvm *kvm, unsigned long start, unsigned long end) bool kvm_unmap_gfn_range_hv(struct kvm *kvm, struct kvm_gfn_range *range)
{ {
hva_handler_fn handler; if (kvm_is_radix(kvm))
return kvm_unmap_radix(kvm, range->slot, range->start);
handler = kvm_is_radix(kvm) ? kvm_unmap_radix : kvm_unmap_rmapp; return kvm_unmap_rmapp(kvm, range->slot, range->start);
kvm_handle_hva_range(kvm, start, end, handler);
return 0;
} }
void kvmppc_core_flush_memslot_hv(struct kvm *kvm, void kvmppc_core_flush_memslot_hv(struct kvm *kvm,
@ -913,8 +867,8 @@ void kvmppc_core_flush_memslot_hv(struct kvm *kvm,
} }
} }
static int kvm_age_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot, static bool kvm_age_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot,
unsigned long gfn) unsigned long gfn)
{ {
struct revmap_entry *rev = kvm->arch.hpt.rev; struct revmap_entry *rev = kvm->arch.hpt.rev;
unsigned long head, i, j; unsigned long head, i, j;
@ -968,26 +922,26 @@ static int kvm_age_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot,
return ret; return ret;
} }
int kvm_age_hva_hv(struct kvm *kvm, unsigned long start, unsigned long end) bool kvm_age_gfn_hv(struct kvm *kvm, struct kvm_gfn_range *range)
{ {
hva_handler_fn handler; if (kvm_is_radix(kvm))
kvm_age_radix(kvm, range->slot, range->start);
handler = kvm_is_radix(kvm) ? kvm_age_radix : kvm_age_rmapp; return kvm_age_rmapp(kvm, range->slot, range->start);
return kvm_handle_hva_range(kvm, start, end, handler);
} }
static int kvm_test_age_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot, static bool kvm_test_age_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot,
unsigned long gfn) unsigned long gfn)
{ {
struct revmap_entry *rev = kvm->arch.hpt.rev; struct revmap_entry *rev = kvm->arch.hpt.rev;
unsigned long head, i, j; unsigned long head, i, j;
unsigned long *hp; unsigned long *hp;
int ret = 1; bool ret = true;
unsigned long *rmapp; unsigned long *rmapp;
rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn]; rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn];
if (*rmapp & KVMPPC_RMAP_REFERENCED) if (*rmapp & KVMPPC_RMAP_REFERENCED)
return 1; return true;
lock_rmap(rmapp); lock_rmap(rmapp);
if (*rmapp & KVMPPC_RMAP_REFERENCED) if (*rmapp & KVMPPC_RMAP_REFERENCED)
@ -1002,27 +956,27 @@ static int kvm_test_age_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot,
goto out; goto out;
} while ((i = j) != head); } while ((i = j) != head);
} }
ret = 0; ret = false;
out: out:
unlock_rmap(rmapp); unlock_rmap(rmapp);
return ret; return ret;
} }
int kvm_test_age_hva_hv(struct kvm *kvm, unsigned long hva) bool kvm_test_age_gfn_hv(struct kvm *kvm, struct kvm_gfn_range *range)
{ {
hva_handler_fn handler; if (kvm_is_radix(kvm))
kvm_test_age_radix(kvm, range->slot, range->start);
handler = kvm_is_radix(kvm) ? kvm_test_age_radix : kvm_test_age_rmapp; return kvm_test_age_rmapp(kvm, range->slot, range->start);
return kvm_handle_hva(kvm, hva, handler);
} }
void kvm_set_spte_hva_hv(struct kvm *kvm, unsigned long hva, pte_t pte) bool kvm_set_spte_gfn_hv(struct kvm *kvm, struct kvm_gfn_range *range)
{ {
hva_handler_fn handler; if (kvm_is_radix(kvm))
return kvm_unmap_radix(kvm, range->slot, range->start);
handler = kvm_is_radix(kvm) ? kvm_unmap_radix : kvm_unmap_rmapp; return kvm_unmap_rmapp(kvm, range->slot, range->start);
kvm_handle_hva(kvm, hva, handler);
} }
static int vcpus_running(struct kvm *kvm) static int vcpus_running(struct kvm *kvm)

View file

@ -993,8 +993,8 @@ int kvmppc_book3s_radix_page_fault(struct kvm_vcpu *vcpu,
} }
/* Called with kvm->mmu_lock held */ /* Called with kvm->mmu_lock held */
int kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, bool kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
unsigned long gfn) unsigned long gfn)
{ {
pte_t *ptep; pte_t *ptep;
unsigned long gpa = gfn << PAGE_SHIFT; unsigned long gpa = gfn << PAGE_SHIFT;
@ -1002,24 +1002,24 @@ int kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE) { if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE) {
uv_page_inval(kvm->arch.lpid, gpa, PAGE_SHIFT); uv_page_inval(kvm->arch.lpid, gpa, PAGE_SHIFT);
return 0; return false;
} }
ptep = find_kvm_secondary_pte(kvm, gpa, &shift); ptep = find_kvm_secondary_pte(kvm, gpa, &shift);
if (ptep && pte_present(*ptep)) if (ptep && pte_present(*ptep))
kvmppc_unmap_pte(kvm, ptep, gpa, shift, memslot, kvmppc_unmap_pte(kvm, ptep, gpa, shift, memslot,
kvm->arch.lpid); kvm->arch.lpid);
return 0; return false;
} }
/* Called with kvm->mmu_lock held */ /* Called with kvm->mmu_lock held */
int kvm_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, bool kvm_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
unsigned long gfn) unsigned long gfn)
{ {
pte_t *ptep; pte_t *ptep;
unsigned long gpa = gfn << PAGE_SHIFT; unsigned long gpa = gfn << PAGE_SHIFT;
unsigned int shift; unsigned int shift;
int ref = 0; bool ref = false;
unsigned long old, *rmapp; unsigned long old, *rmapp;
if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE) if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE)
@ -1035,26 +1035,27 @@ int kvm_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
kvmhv_update_nest_rmap_rc_list(kvm, rmapp, _PAGE_ACCESSED, 0, kvmhv_update_nest_rmap_rc_list(kvm, rmapp, _PAGE_ACCESSED, 0,
old & PTE_RPN_MASK, old & PTE_RPN_MASK,
1UL << shift); 1UL << shift);
ref = 1; ref = true;
} }
return ref; return ref;
} }
/* Called with kvm->mmu_lock held */ /* Called with kvm->mmu_lock held */
int kvm_test_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, bool kvm_test_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
unsigned long gfn) unsigned long gfn)
{ {
pte_t *ptep; pte_t *ptep;
unsigned long gpa = gfn << PAGE_SHIFT; unsigned long gpa = gfn << PAGE_SHIFT;
unsigned int shift; unsigned int shift;
int ref = 0; bool ref = false;
if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE) if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE)
return ref; return ref;
ptep = find_kvm_secondary_pte(kvm, gpa, &shift); ptep = find_kvm_secondary_pte(kvm, gpa, &shift);
if (ptep && pte_present(*ptep) && pte_young(*ptep)) if (ptep && pte_present(*ptep) && pte_young(*ptep))
ref = 1; ref = true;
return ref; return ref;
} }

View file

@ -4770,7 +4770,7 @@ int kvmppc_switch_mmu_to_hpt(struct kvm *kvm)
kvmhv_release_all_nested(kvm); kvmhv_release_all_nested(kvm);
kvmppc_rmap_reset(kvm); kvmppc_rmap_reset(kvm);
kvm->arch.process_table = 0; kvm->arch.process_table = 0;
/* Mutual exclusion with kvm_unmap_hva_range etc. */ /* Mutual exclusion with kvm_unmap_gfn_range etc. */
spin_lock(&kvm->mmu_lock); spin_lock(&kvm->mmu_lock);
kvm->arch.radix = 0; kvm->arch.radix = 0;
spin_unlock(&kvm->mmu_lock); spin_unlock(&kvm->mmu_lock);
@ -4792,7 +4792,7 @@ int kvmppc_switch_mmu_to_radix(struct kvm *kvm)
if (err) if (err)
return err; return err;
kvmppc_rmap_reset(kvm); kvmppc_rmap_reset(kvm);
/* Mutual exclusion with kvm_unmap_hva_range etc. */ /* Mutual exclusion with kvm_unmap_gfn_range etc. */
spin_lock(&kvm->mmu_lock); spin_lock(&kvm->mmu_lock);
kvm->arch.radix = 1; kvm->arch.radix = 1;
spin_unlock(&kvm->mmu_lock); spin_unlock(&kvm->mmu_lock);
@ -5654,10 +5654,10 @@ static struct kvmppc_ops kvm_ops_hv = {
.flush_memslot = kvmppc_core_flush_memslot_hv, .flush_memslot = kvmppc_core_flush_memslot_hv,
.prepare_memory_region = kvmppc_core_prepare_memory_region_hv, .prepare_memory_region = kvmppc_core_prepare_memory_region_hv,
.commit_memory_region = kvmppc_core_commit_memory_region_hv, .commit_memory_region = kvmppc_core_commit_memory_region_hv,
.unmap_hva_range = kvm_unmap_hva_range_hv, .unmap_gfn_range = kvm_unmap_gfn_range_hv,
.age_hva = kvm_age_hva_hv, .age_gfn = kvm_age_gfn_hv,
.test_age_hva = kvm_test_age_hva_hv, .test_age_gfn = kvm_test_age_gfn_hv,
.set_spte_hva = kvm_set_spte_hva_hv, .set_spte_gfn = kvm_set_spte_gfn_hv,
.free_memslot = kvmppc_core_free_memslot_hv, .free_memslot = kvmppc_core_free_memslot_hv,
.init_vm = kvmppc_core_init_vm_hv, .init_vm = kvmppc_core_init_vm_hv,
.destroy_vm = kvmppc_core_destroy_vm_hv, .destroy_vm = kvmppc_core_destroy_vm_hv,

View file

@ -425,61 +425,39 @@ static int kvmppc_core_check_requests_pr(struct kvm_vcpu *vcpu)
} }
/************* MMU Notifiers *************/ /************* MMU Notifiers *************/
static void do_kvm_unmap_hva(struct kvm *kvm, unsigned long start, static bool do_kvm_unmap_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
unsigned long end)
{ {
long i; long i;
struct kvm_vcpu *vcpu; struct kvm_vcpu *vcpu;
struct kvm_memslots *slots;
struct kvm_memory_slot *memslot;
slots = kvm_memslots(kvm); kvm_for_each_vcpu(i, vcpu, kvm)
kvm_for_each_memslot(memslot, slots) { kvmppc_mmu_pte_pflush(vcpu, range->start << PAGE_SHIFT,
unsigned long hva_start, hva_end; range->end << PAGE_SHIFT);
gfn_t gfn, gfn_end;
hva_start = max(start, memslot->userspace_addr); return false;
hva_end = min(end, memslot->userspace_addr +
(memslot->npages << PAGE_SHIFT));
if (hva_start >= hva_end)
continue;
/*
* {gfn(page) | page intersects with [hva_start, hva_end)} =
* {gfn, gfn+1, ..., gfn_end-1}.
*/
gfn = hva_to_gfn_memslot(hva_start, memslot);
gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
kvm_for_each_vcpu(i, vcpu, kvm)
kvmppc_mmu_pte_pflush(vcpu, gfn << PAGE_SHIFT,
gfn_end << PAGE_SHIFT);
}
} }
static int kvm_unmap_hva_range_pr(struct kvm *kvm, unsigned long start, static bool kvm_unmap_gfn_range_pr(struct kvm *kvm, struct kvm_gfn_range *range)
unsigned long end)
{ {
do_kvm_unmap_hva(kvm, start, end); return do_kvm_unmap_gfn(kvm, range);
return 0;
} }
static int kvm_age_hva_pr(struct kvm *kvm, unsigned long start, static bool kvm_age_gfn_pr(struct kvm *kvm, struct kvm_gfn_range *range)
unsigned long end)
{ {
/* XXX could be more clever ;) */ /* XXX could be more clever ;) */
return 0; return false;
} }
static int kvm_test_age_hva_pr(struct kvm *kvm, unsigned long hva) static bool kvm_test_age_gfn_pr(struct kvm *kvm, struct kvm_gfn_range *range)
{ {
/* XXX could be more clever ;) */ /* XXX could be more clever ;) */
return 0; return false;
} }
static void kvm_set_spte_hva_pr(struct kvm *kvm, unsigned long hva, pte_t pte) static bool kvm_set_spte_gfn_pr(struct kvm *kvm, struct kvm_gfn_range *range)
{ {
/* The page will get remapped properly on its next fault */ /* The page will get remapped properly on its next fault */
do_kvm_unmap_hva(kvm, hva, hva + PAGE_SIZE); return do_kvm_unmap_gfn(kvm, range);
} }
/*****************************************/ /*****************************************/
@ -2079,10 +2057,10 @@ static struct kvmppc_ops kvm_ops_pr = {
.flush_memslot = kvmppc_core_flush_memslot_pr, .flush_memslot = kvmppc_core_flush_memslot_pr,
.prepare_memory_region = kvmppc_core_prepare_memory_region_pr, .prepare_memory_region = kvmppc_core_prepare_memory_region_pr,
.commit_memory_region = kvmppc_core_commit_memory_region_pr, .commit_memory_region = kvmppc_core_commit_memory_region_pr,
.unmap_hva_range = kvm_unmap_hva_range_pr, .unmap_gfn_range = kvm_unmap_gfn_range_pr,
.age_hva = kvm_age_hva_pr, .age_gfn = kvm_age_gfn_pr,
.test_age_hva = kvm_test_age_hva_pr, .test_age_gfn = kvm_test_age_gfn_pr,
.set_spte_hva = kvm_set_spte_hva_pr, .set_spte_gfn = kvm_set_spte_gfn_pr,
.free_memslot = kvmppc_core_free_memslot_pr, .free_memslot = kvmppc_core_free_memslot_pr,
.init_vm = kvmppc_core_init_vm_pr, .init_vm = kvmppc_core_init_vm_pr,
.destroy_vm = kvmppc_core_destroy_vm_pr, .destroy_vm = kvmppc_core_destroy_vm_pr,

View file

@ -721,43 +721,36 @@ int kvmppc_load_last_inst(struct kvm_vcpu *vcpu,
/************* MMU Notifiers *************/ /************* MMU Notifiers *************/
static int kvm_unmap_hva(struct kvm *kvm, unsigned long hva) static bool kvm_e500_mmu_unmap_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
{ {
/* /*
* Flush all shadow tlb entries everywhere. This is slow, but * Flush all shadow tlb entries everywhere. This is slow, but
* we are 100% sure that we catch the to be unmapped page * we are 100% sure that we catch the to be unmapped page
*/ */
kvm_flush_remote_tlbs(kvm); return true;
return 0;
} }
int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end, bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
unsigned flags)
{ {
/* kvm_unmap_hva flushes everything anyways */ return kvm_e500_mmu_unmap_gfn(kvm, range);
kvm_unmap_hva(kvm, start);
return 0;
} }
int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end) bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
{ {
/* XXX could be more clever ;) */ /* XXX could be more clever ;) */
return 0; return false;
} }
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva) bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
{ {
/* XXX could be more clever ;) */ /* XXX could be more clever ;) */
return 0; return false;
} }
int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
{ {
/* The page will get remapped properly on its next fault */ /* The page will get remapped properly on its next fault */
kvm_unmap_hva(kvm, hva); return kvm_e500_mmu_unmap_gfn(kvm, range);
return 0;
} }
/*****************************************/ /*****************************************/