mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-09-20 09:31:09 +00:00
KVM: MMU: inline set_spte in FNAME(sync_page)
Since the two callers of set_spte do different things with the results, inlining it actually makes the code simpler to reason about. For example, FNAME(sync_page) already has a struct kvm_mmu_page *, but set_spte had to fish it back out of sptep's private page data. Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
d786c7783b
commit
4758d47e0d
2 changed files with 12 additions and 30 deletions
|
@ -2674,27 +2674,6 @@ int mmu_try_to_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn, bool can_unsync,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
|
||||
unsigned int pte_access, int level,
|
||||
gfn_t gfn, kvm_pfn_t pfn, bool speculative,
|
||||
bool can_unsync, bool host_writable)
|
||||
{
|
||||
u64 spte;
|
||||
struct kvm_mmu_page *sp;
|
||||
int ret;
|
||||
|
||||
sp = sptep_to_sp(sptep);
|
||||
|
||||
ret = make_spte(vcpu, pte_access, level, gfn, pfn, *sptep, speculative,
|
||||
can_unsync, host_writable, sp_ad_disabled(sp), &spte);
|
||||
|
||||
if (*sptep == spte)
|
||||
ret |= SET_SPTE_SPURIOUS;
|
||||
else if (mmu_spte_update(sptep, spte))
|
||||
ret |= SET_SPTE_NEED_REMOTE_TLB_FLUSH;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
|
||||
unsigned int pte_access, bool write_fault, int level,
|
||||
gfn_t gfn, kvm_pfn_t pfn, bool speculative,
|
||||
|
|
|
@ -1061,7 +1061,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
|
|||
int i;
|
||||
bool host_writable;
|
||||
gpa_t first_pte_gpa;
|
||||
int set_spte_ret = 0;
|
||||
bool flush = false;
|
||||
|
||||
/*
|
||||
* Ignore various flags when verifying that it's safe to sync a shadow
|
||||
|
@ -1091,6 +1091,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
|
|||
first_pte_gpa = FNAME(get_level1_sp_gpa)(sp);
|
||||
|
||||
for (i = 0; i < PT64_ENT_PER_PAGE; i++) {
|
||||
u64 *sptep, spte;
|
||||
unsigned pte_access;
|
||||
pt_element_t gpte;
|
||||
gpa_t pte_gpa;
|
||||
|
@ -1106,7 +1107,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
|
|||
return -1;
|
||||
|
||||
if (FNAME(prefetch_invalid_gpte)(vcpu, sp, &sp->spt[i], gpte)) {
|
||||
set_spte_ret |= SET_SPTE_NEED_REMOTE_TLB_FLUSH;
|
||||
flush = true;
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -1120,19 +1121,21 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
|
|||
|
||||
if (gfn != sp->gfns[i]) {
|
||||
drop_spte(vcpu->kvm, &sp->spt[i]);
|
||||
set_spte_ret |= SET_SPTE_NEED_REMOTE_TLB_FLUSH;
|
||||
flush = true;
|
||||
continue;
|
||||
}
|
||||
|
||||
host_writable = sp->spt[i] & shadow_host_writable_mask;
|
||||
sptep = &sp->spt[i];
|
||||
spte = *sptep;
|
||||
host_writable = spte & shadow_host_writable_mask;
|
||||
make_spte(vcpu, pte_access, PG_LEVEL_4K, gfn,
|
||||
spte_to_pfn(spte), spte, true, false,
|
||||
host_writable, sp_ad_disabled(sp), &spte);
|
||||
|
||||
set_spte_ret |= set_spte(vcpu, &sp->spt[i],
|
||||
pte_access, PG_LEVEL_4K,
|
||||
gfn, spte_to_pfn(sp->spt[i]),
|
||||
true, false, host_writable);
|
||||
flush |= mmu_spte_update(sptep, spte);
|
||||
}
|
||||
|
||||
return set_spte_ret & SET_SPTE_NEED_REMOTE_TLB_FLUSH;
|
||||
return flush;
|
||||
}
|
||||
|
||||
#undef pt_element_t
|
||||
|
|
Loading…
Reference in a new issue