mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-09-27 21:03:32 +00:00
mm/memory: pass PTE to copy_present_pte()
We already read it, let's just forward it. This patch is based on work by Ryan Roberts. [david@redhat.com: fix the hmm "exclusive_cow" selftest] Link: https://lkml.kernel.org/r/13f296b8-e882-47fd-b939-c2141dc28717@redhat.com Link: https://lkml.kernel.org/r/20240129124649.189745-13-david@redhat.com Signed-off-by: David Hildenbrand <david@redhat.com> Reviewed-by: Ryan Roberts <ryan.roberts@arm.com> Reviewed-by: Mike Rapoport (IBM) <rppt@kernel.org> Cc: Albert Ou <aou@eecs.berkeley.edu> Cc: Alexander Gordeev <agordeev@linux.ibm.com> Cc: Alexandre Ghiti <alexghiti@rivosinc.com> Cc: Aneesh Kumar K.V <aneesh.kumar@kernel.org> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Christian Borntraeger <borntraeger@linux.ibm.com> Cc: Christophe Leroy <christophe.leroy@csgroup.eu> Cc: David S. Miller <davem@davemloft.net> Cc: Dinh Nguyen <dinguyen@kernel.org> Cc: Gerald Schaefer <gerald.schaefer@linux.ibm.com> Cc: Heiko Carstens <hca@linux.ibm.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Naveen N. Rao <naveen.n.rao@linux.ibm.com> Cc: Nicholas Piggin <npiggin@gmail.com> Cc: Palmer Dabbelt <palmer@dabbelt.com> Cc: Paul Walmsley <paul.walmsley@sifive.com> Cc: Russell King (Oracle) <linux@armlinux.org.uk> Cc: Sven Schnelle <svens@linux.ibm.com> Cc: Vasily Gorbik <gor@linux.ibm.com> Cc: Will Deacon <will@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
23ed190868
commit
53723298ba
1 changed files with 5 additions and 4 deletions
|
@ -959,10 +959,9 @@ static inline void __copy_present_pte(struct vm_area_struct *dst_vma,
|
||||||
*/
|
*/
|
||||||
static inline int
|
static inline int
|
||||||
copy_present_pte(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
|
copy_present_pte(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
|
||||||
pte_t *dst_pte, pte_t *src_pte, unsigned long addr, int *rss,
|
pte_t *dst_pte, pte_t *src_pte, pte_t pte, unsigned long addr,
|
||||||
struct folio **prealloc)
|
int *rss, struct folio **prealloc)
|
||||||
{
|
{
|
||||||
pte_t pte = ptep_get(src_pte);
|
|
||||||
struct page *page;
|
struct page *page;
|
||||||
struct folio *folio;
|
struct folio *folio;
|
||||||
|
|
||||||
|
@ -1094,6 +1093,8 @@ copy_pte_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
|
||||||
progress += 8;
|
progress += 8;
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
ptent = ptep_get(src_pte);
|
||||||
|
VM_WARN_ON_ONCE(!pte_present(ptent));
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Device exclusive entry restored, continue by copying
|
* Device exclusive entry restored, continue by copying
|
||||||
|
@ -1103,7 +1104,7 @@ copy_pte_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
|
||||||
}
|
}
|
||||||
/* copy_present_pte() will clear `*prealloc' if consumed */
|
/* copy_present_pte() will clear `*prealloc' if consumed */
|
||||||
ret = copy_present_pte(dst_vma, src_vma, dst_pte, src_pte,
|
ret = copy_present_pte(dst_vma, src_vma, dst_pte, src_pte,
|
||||||
addr, rss, &prealloc);
|
ptent, addr, rss, &prealloc);
|
||||||
/*
|
/*
|
||||||
* If we need a pre-allocated page for this pte, drop the
|
* If we need a pre-allocated page for this pte, drop the
|
||||||
* locks, allocate, and try again.
|
* locks, allocate, and try again.
|
||||||
|
|
Loading…
Reference in a new issue