mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-09-27 12:57:53 +00:00
mm: call update_mmu_cache_range() in more page fault handling paths
Pass the vm_fault to the architecture to help it make smarter decisions about which PTEs to insert into the TLB. Link: https://lkml.kernel.org/r/20230802151406.3735276-39-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
617c28ecab
commit
5003a2bdf6
1 changed files with 8 additions and 7 deletions
15
mm/memory.c
15
mm/memory.c
|
@ -2862,7 +2862,7 @@ static inline int __wp_page_copy_user(struct page *dst, struct page *src,
|
|||
|
||||
entry = pte_mkyoung(vmf->orig_pte);
|
||||
if (ptep_set_access_flags(vma, addr, vmf->pte, entry, 0))
|
||||
update_mmu_cache(vma, addr, vmf->pte);
|
||||
update_mmu_cache_range(vmf, vma, addr, vmf->pte, 1);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -3039,7 +3039,7 @@ static inline void wp_page_reuse(struct vm_fault *vmf)
|
|||
entry = pte_mkyoung(vmf->orig_pte);
|
||||
entry = maybe_mkwrite(pte_mkdirty(entry), vma);
|
||||
if (ptep_set_access_flags(vma, vmf->address, vmf->pte, entry, 1))
|
||||
update_mmu_cache(vma, vmf->address, vmf->pte);
|
||||
update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1);
|
||||
pte_unmap_unlock(vmf->pte, vmf->ptl);
|
||||
count_vm_event(PGREUSE);
|
||||
}
|
||||
|
@ -3163,7 +3163,7 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
|
|||
*/
|
||||
BUG_ON(unshare && pte_write(entry));
|
||||
set_pte_at_notify(mm, vmf->address, vmf->pte, entry);
|
||||
update_mmu_cache(vma, vmf->address, vmf->pte);
|
||||
update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1);
|
||||
if (old_folio) {
|
||||
/*
|
||||
* Only after switching the pte to the new page may
|
||||
|
@ -4046,7 +4046,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
|
|||
}
|
||||
|
||||
/* No need to invalidate - it was non-present before */
|
||||
update_mmu_cache(vma, vmf->address, vmf->pte);
|
||||
update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1);
|
||||
unlock:
|
||||
if (vmf->pte)
|
||||
pte_unmap_unlock(vmf->pte, vmf->ptl);
|
||||
|
@ -4170,7 +4170,7 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
|
|||
set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry);
|
||||
|
||||
/* No need to invalidate - it was non-present before */
|
||||
update_mmu_cache(vma, vmf->address, vmf->pte);
|
||||
update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1);
|
||||
unlock:
|
||||
if (vmf->pte)
|
||||
pte_unmap_unlock(vmf->pte, vmf->ptl);
|
||||
|
@ -4859,7 +4859,7 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf)
|
|||
if (writable)
|
||||
pte = pte_mkwrite(pte);
|
||||
ptep_modify_prot_commit(vma, vmf->address, vmf->pte, old_pte, pte);
|
||||
update_mmu_cache(vma, vmf->address, vmf->pte);
|
||||
update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1);
|
||||
pte_unmap_unlock(vmf->pte, vmf->ptl);
|
||||
goto out;
|
||||
}
|
||||
|
@ -5030,7 +5030,8 @@ static vm_fault_t handle_pte_fault(struct vm_fault *vmf)
|
|||
entry = pte_mkyoung(entry);
|
||||
if (ptep_set_access_flags(vmf->vma, vmf->address, vmf->pte, entry,
|
||||
vmf->flags & FAULT_FLAG_WRITE)) {
|
||||
update_mmu_cache(vmf->vma, vmf->address, vmf->pte);
|
||||
update_mmu_cache_range(vmf, vmf->vma, vmf->address,
|
||||
vmf->pte, 1);
|
||||
} else {
|
||||
/* Skip spurious TLB flush for retried page fault */
|
||||
if (vmf->flags & FAULT_FLAG_TRIED)
|
||||
|
|
Loading…
Reference in a new issue