diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index 2cfa3075d148..6fbbc0b6c05e 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h @@ -400,6 +400,42 @@ static inline int pud_same(pud_t pud_a, pud_t pud_b) #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #endif +#ifndef __HAVE_ARCH_DO_SWAP_PAGE +/* + * Some architectures support metadata associated with a page. When a + * page is being swapped out, this metadata must be saved so it can be + * restored when the page is swapped back in. SPARC M7 and newer + * processors support an ADI (Application Data Integrity) tag for the + * page as metadata for the page. arch_do_swap_page() can restore this + * metadata when a page is swapped back in. + */ +static inline void arch_do_swap_page(struct mm_struct *mm, + struct vm_area_struct *vma, + unsigned long addr, + pte_t pte, pte_t oldpte) +{ + +} +#endif + +#ifndef __HAVE_ARCH_UNMAP_ONE +/* + * Some architectures support metadata associated with a page. When a + * page is being swapped out, this metadata must be saved so it can be + * restored when the page is swapped back in. SPARC M7 and newer + * processors support an ADI (Application Data Integrity) tag for the + * page as metadata for the page. arch_unmap_one() can save this + * metadata on a swap-out of a page. + */ +static inline int arch_unmap_one(struct mm_struct *mm, + struct vm_area_struct *vma, + unsigned long addr, + pte_t orig_pte) +{ + return 0; +} +#endif + #ifndef __HAVE_ARCH_PGD_OFFSET_GATE #define pgd_offset_gate(mm, addr) pgd_offset(mm, addr) #endif diff --git a/mm/memory.c b/mm/memory.c index 5fcfc24904d1..aed37325d94e 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -3053,6 +3053,7 @@ int do_swap_page(struct vm_fault *vmf) if (pte_swp_soft_dirty(vmf->orig_pte)) pte = pte_mksoft_dirty(pte); set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte); + arch_do_swap_page(vma->vm_mm, vma, vmf->address, pte, vmf->orig_pte); vmf->orig_pte = pte; /* ksm created a completely new copy */ diff --git a/mm/rmap.c b/mm/rmap.c index 47db27f8049e..144c66e688a9 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1497,6 +1497,14 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma, (flags & (TTU_MIGRATION|TTU_SPLIT_FREEZE))) { swp_entry_t entry; pte_t swp_pte; + + if (arch_unmap_one(mm, vma, address, pteval) < 0) { + set_pte_at(mm, address, pvmw.pte, pteval); + ret = false; + page_vma_mapped_walk_done(&pvmw); + break; + } + /* * Store the pfn of the page in a special migration * pte. do_swap_page() will wait until the migration @@ -1556,6 +1564,12 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma, page_vma_mapped_walk_done(&pvmw); break; } + if (arch_unmap_one(mm, vma, address, pteval) < 0) { + set_pte_at(mm, address, pvmw.pte, pteval); + ret = false; + page_vma_mapped_walk_done(&pvmw); + break; + } if (list_empty(&mm->mmlist)) { spin_lock(&mmlist_lock); if (list_empty(&mm->mmlist))