KVM: MMU: change disallowed_hugepage_adjust() arguments to kvm_page_fault

Pass struct kvm_page_fault to disallowed_hugepage_adjust() instead of
extracting the arguments from the struct.  Tweak a bit the conditions
to avoid long lines.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
Paolo Bonzini 2021-08-06 04:35:50 -04:00
parent 73a3c65947
commit 536f0e6ace
4 changed files with 11 additions and 17 deletions

View file

@ -2957,12 +2957,10 @@ void kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
fault->pfn &= ~mask;
}
void disallowed_hugepage_adjust(u64 spte, gfn_t gfn, int cur_level,
kvm_pfn_t *pfnp, u8 *goal_levelp)
void disallowed_hugepage_adjust(struct kvm_page_fault *fault, u64 spte, int cur_level)
{
int level = *goal_levelp;
if (cur_level == level && level > PG_LEVEL_4K &&
if (cur_level > PG_LEVEL_4K &&
cur_level == fault->goal_level &&
is_shadow_present_pte(spte) &&
!is_large_pte(spte)) {
/*
@ -2972,10 +2970,10 @@ void disallowed_hugepage_adjust(u64 spte, gfn_t gfn, int cur_level,
* patching back for them into pfn the next 9 bits of
* the address.
*/
u64 page_mask = KVM_PAGES_PER_HPAGE(level) -
KVM_PAGES_PER_HPAGE(level - 1);
*pfnp |= gfn & page_mask;
(*goal_levelp)--;
u64 page_mask = KVM_PAGES_PER_HPAGE(cur_level) -
KVM_PAGES_PER_HPAGE(cur_level - 1);
fault->pfn |= fault->gfn & page_mask;
fault->goal_level--;
}
}
@ -2995,8 +2993,7 @@ static int __direct_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
* large page, as the leaf could be executable.
*/
if (fault->nx_huge_page_workaround_enabled)
disallowed_hugepage_adjust(*it.sptep, fault->gfn, it.level,
&fault->pfn, &fault->goal_level);
disallowed_hugepage_adjust(fault, *it.sptep, it.level);
base_gfn = fault->gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1);
if (it.level == fault->goal_level)

View file

@ -159,8 +159,7 @@ int kvm_mmu_max_mapping_level(struct kvm *kvm,
const struct kvm_memory_slot *slot, gfn_t gfn,
kvm_pfn_t pfn, int max_level);
void kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault);
void disallowed_hugepage_adjust(u64 spte, gfn_t gfn, int cur_level,
kvm_pfn_t *pfnp, u8 *goal_levelp);
void disallowed_hugepage_adjust(struct kvm_page_fault *fault, u64 spte, int cur_level);
void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc);

View file

@ -740,8 +740,7 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
* large page, as the leaf could be executable.
*/
if (fault->nx_huge_page_workaround_enabled)
disallowed_hugepage_adjust(*it.sptep, fault->gfn, it.level,
&fault->pfn, &fault->goal_level);
disallowed_hugepage_adjust(fault, *it.sptep, it.level);
base_gfn = fault->gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1);
if (it.level == fault->goal_level)

View file

@ -1001,8 +1001,7 @@ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
tdp_mmu_for_each_pte(iter, mmu, fault->gfn, fault->gfn + 1) {
if (fault->nx_huge_page_workaround_enabled)
disallowed_hugepage_adjust(iter.old_spte, fault->gfn,
iter.level, &fault->pfn, &fault->goal_level);
disallowed_hugepage_adjust(fault, iter.old_spte, iter.level);
if (iter.level == fault->goal_level)
break;