mm: Introduce vma_is_special_huge

For VM_PFNMAP and VM_MIXEDMAP vmas that want to support transhuge pages
and -page table entries, introduce vma_is_special_huge() that takes the
same codepaths as vma_is_dax().

The use of "special" follows the definition in memory.c, vm_normal_page():
"Special" mappings do not wish to be associated with a "struct page"
(either it doesn't exist, or it exists but they don't want to touch it)

For PAGE_SIZE pages, "special" is determined per page table entry to be
able to deal with COW pages. But since we don't have huge COW pages,
we can classify a vma as either "special huge" or "normal huge".

Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Michal Hocko <mhocko@suse.com>
Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Cc: Ralph Campbell <rcampbell@nvidia.com>
Cc: "Jérôme Glisse" <jglisse@redhat.com>
Cc: "Christian König" <christian.koenig@amd.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Signed-off-by: Thomas Hellstrom (VMware) <thomas_os@shipmail.org>
Acked-by: Christian König <christian.koenig@amd.com>
Acked-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Thomas Hellstrom (VMware) 2020-03-24 18:47:17 +01:00
parent f05a3849f6
commit 2484ca9b6a
2 changed files with 20 additions and 3 deletions

View file

@ -2867,6 +2867,23 @@ extern long copy_huge_page_from_user(struct page *dst_page,
const void __user *usr_src, const void __user *usr_src,
unsigned int pages_per_huge_page, unsigned int pages_per_huge_page,
bool allow_pagefault); bool allow_pagefault);
/**
* vma_is_special_huge - Are transhuge page-table entries considered special?
* @vma: Pointer to the struct vm_area_struct to consider
*
* Whether transhuge page-table entries are considered "special" following
* the definition in vm_normal_page().
*
* Return: true if transhuge page-table entries should be considered special,
* false otherwise.
*/
static inline bool vma_is_special_huge(const struct vm_area_struct *vma)
{
return vma_is_dax(vma) || (vma->vm_file &&
(vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP)));
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */ #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
#ifdef CONFIG_DEBUG_PAGEALLOC #ifdef CONFIG_DEBUG_PAGEALLOC

View file

@ -1802,7 +1802,7 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
orig_pmd = pmdp_huge_get_and_clear_full(tlb->mm, addr, pmd, orig_pmd = pmdp_huge_get_and_clear_full(tlb->mm, addr, pmd,
tlb->fullmm); tlb->fullmm);
tlb_remove_pmd_tlb_entry(tlb, pmd, addr); tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
if (vma_is_dax(vma)) { if (vma_is_special_huge(vma)) {
if (arch_needs_pgtable_deposit()) if (arch_needs_pgtable_deposit())
zap_deposited_table(tlb->mm, pmd); zap_deposited_table(tlb->mm, pmd);
spin_unlock(ptl); spin_unlock(ptl);
@ -2066,7 +2066,7 @@ int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma,
*/ */
pudp_huge_get_and_clear_full(tlb->mm, addr, pud, tlb->fullmm); pudp_huge_get_and_clear_full(tlb->mm, addr, pud, tlb->fullmm);
tlb_remove_pud_tlb_entry(tlb, pud, addr); tlb_remove_pud_tlb_entry(tlb, pud, addr);
if (vma_is_dax(vma)) { if (vma_is_special_huge(vma)) {
spin_unlock(ptl); spin_unlock(ptl);
/* No zero page support yet */ /* No zero page support yet */
} else { } else {
@ -2175,7 +2175,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
*/ */
if (arch_needs_pgtable_deposit()) if (arch_needs_pgtable_deposit())
zap_deposited_table(mm, pmd); zap_deposited_table(mm, pmd);
if (vma_is_dax(vma)) if (vma_is_special_huge(vma))
return; return;
page = pmd_page(_pmd); page = pmd_page(_pmd);
if (!PageDirty(page) && pmd_dirty(_pmd)) if (!PageDirty(page) && pmd_dirty(_pmd))