mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-09-26 04:16:39 +00:00
mm/khugepaged: delete khugepaged_collapse_pte_mapped_thps()
Now that retract_page_tables() can retract page tables reliably, without depending on trylocks, delete all the apparatus for khugepaged to try again later: khugepaged_collapse_pte_mapped_thps() etc; and free up the per-mm memory which was set aside for that in the khugepaged_mm_slot. But one part of that is worth keeping: when hpage_collapse_scan_file() found SCAN_PTE_MAPPED_HUGEPAGE, that address was noted in the mm_slot to be tried for retraction later - catching, for example, page tables where a reversible mprotect() of a portion had required splitting the pmd, but now it can be recollapsed. Call collapse_pte_mapped_thp() directly in this case (why was it deferred before? I assume an issue with needing mmap_lock for write, but now it's only needed for read). [hughd@google.com: fix mmap_locked handlng] Link: https://lkml.kernel.org/r/bfc6cab2-497f-32bf-dd5-98dc1987e4a9@google.com Link: https://lkml.kernel.org/r/a5dce57-6dfa-5559-4698-e817eb2f993@google.com Signed-off-by: Hugh Dickins <hughd@google.com> Cc: Alexander Gordeev <agordeev@linux.ibm.com> Cc: Alistair Popple <apopple@nvidia.com> Cc: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com> Cc: Anshuman Khandual <anshuman.khandual@arm.com> Cc: Axel Rasmussen <axelrasmussen@google.com> Cc: Christian Borntraeger <borntraeger@linux.ibm.com> Cc: Christophe Leroy <christophe.leroy@csgroup.eu> Cc: Christoph Hellwig <hch@infradead.org> Cc: Claudio Imbrenda <imbrenda@linux.ibm.com> Cc: David Hildenbrand <david@redhat.com> Cc: "David S. Miller" <davem@davemloft.net> Cc: Gerald Schaefer <gerald.schaefer@linux.ibm.com> Cc: Heiko Carstens <hca@linux.ibm.com> Cc: Huang, Ying <ying.huang@intel.com> Cc: Ira Weiny <ira.weiny@intel.com> Cc: Jann Horn <jannh@google.com> Cc: Jason Gunthorpe <jgg@ziepe.ca> Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Lorenzo Stoakes <lstoakes@gmail.com> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Mel Gorman <mgorman@techsingularity.net> Cc: Miaohe Lin <linmiaohe@huawei.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Mike Kravetz <mike.kravetz@oracle.com> Cc: Mike Rapoport (IBM) <rppt@kernel.org> Cc: Minchan Kim <minchan@kernel.org> Cc: Naoya Horiguchi <naoya.horiguchi@nec.com> Cc: Pavel Tatashin <pasha.tatashin@soleen.com> Cc: Peter Xu <peterx@redhat.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Qi Zheng <zhengqi.arch@bytedance.com> Cc: Ralph Campbell <rcampbell@nvidia.com> Cc: Russell King <linux@armlinux.org.uk> Cc: SeongJae Park <sj@kernel.org> Cc: Song Liu <song@kernel.org> Cc: Steven Price <steven.price@arm.com> Cc: Suren Baghdasaryan <surenb@google.com> Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com> Cc: Vasily Gorbik <gor@linux.ibm.com> Cc: Vishal Moola (Oracle) <vishal.moola@gmail.com> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Will Deacon <will@kernel.org> Cc: Yang Shi <shy828301@gmail.com> Cc: Yu Zhao <yuzhao@google.com> Cc: Zack Rusin <zackr@vmware.com> Cc: Zi Yan <ziy@nvidia.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
1043173eb5
commit
d50791c2be
1 changed files with 14 additions and 109 deletions
123
mm/khugepaged.c
123
mm/khugepaged.c
|
@ -93,8 +93,6 @@ static DEFINE_READ_MOSTLY_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS);
|
|||
|
||||
static struct kmem_cache *mm_slot_cache __read_mostly;
|
||||
|
||||
#define MAX_PTE_MAPPED_THP 8
|
||||
|
||||
struct collapse_control {
|
||||
bool is_khugepaged;
|
||||
|
||||
|
@ -108,15 +106,9 @@ struct collapse_control {
|
|||
/**
|
||||
* struct khugepaged_mm_slot - khugepaged information per mm that is being scanned
|
||||
* @slot: hash lookup from mm to mm_slot
|
||||
* @nr_pte_mapped_thp: number of pte mapped THP
|
||||
* @pte_mapped_thp: address array corresponding pte mapped THP
|
||||
*/
|
||||
struct khugepaged_mm_slot {
|
||||
struct mm_slot slot;
|
||||
|
||||
/* pte-mapped THP in this mm */
|
||||
int nr_pte_mapped_thp;
|
||||
unsigned long pte_mapped_thp[MAX_PTE_MAPPED_THP];
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -1441,50 +1433,6 @@ static void collect_mm_slot(struct khugepaged_mm_slot *mm_slot)
|
|||
}
|
||||
|
||||
#ifdef CONFIG_SHMEM
|
||||
/*
|
||||
* Notify khugepaged that given addr of the mm is pte-mapped THP. Then
|
||||
* khugepaged should try to collapse the page table.
|
||||
*
|
||||
* Note that following race exists:
|
||||
* (1) khugepaged calls khugepaged_collapse_pte_mapped_thps() for mm_struct A,
|
||||
* emptying the A's ->pte_mapped_thp[] array.
|
||||
* (2) MADV_COLLAPSE collapses some file extent with target mm_struct B, and
|
||||
* retract_page_tables() finds a VMA in mm_struct A mapping the same extent
|
||||
* (at virtual address X) and adds an entry (for X) into mm_struct A's
|
||||
* ->pte-mapped_thp[] array.
|
||||
* (3) khugepaged calls khugepaged_collapse_scan_file() for mm_struct A at X,
|
||||
* sees a pte-mapped THP (SCAN_PTE_MAPPED_HUGEPAGE) and adds an entry
|
||||
* (for X) into mm_struct A's ->pte-mapped_thp[] array.
|
||||
* Thus, it's possible the same address is added multiple times for the same
|
||||
* mm_struct. Should this happen, we'll simply attempt
|
||||
* collapse_pte_mapped_thp() multiple times for the same address, under the same
|
||||
* exclusive mmap_lock, and assuming the first call is successful, subsequent
|
||||
* attempts will return quickly (without grabbing any additional locks) when
|
||||
* a huge pmd is found in find_pmd_or_thp_or_none(). Since this is a cheap
|
||||
* check, and since this is a rare occurrence, the cost of preventing this
|
||||
* "multiple-add" is thought to be more expensive than just handling it, should
|
||||
* it occur.
|
||||
*/
|
||||
static bool khugepaged_add_pte_mapped_thp(struct mm_struct *mm,
|
||||
unsigned long addr)
|
||||
{
|
||||
struct khugepaged_mm_slot *mm_slot;
|
||||
struct mm_slot *slot;
|
||||
bool ret = false;
|
||||
|
||||
VM_BUG_ON(addr & ~HPAGE_PMD_MASK);
|
||||
|
||||
spin_lock(&khugepaged_mm_lock);
|
||||
slot = mm_slot_lookup(mm_slots_hash, mm);
|
||||
mm_slot = mm_slot_entry(slot, struct khugepaged_mm_slot, slot);
|
||||
if (likely(mm_slot && mm_slot->nr_pte_mapped_thp < MAX_PTE_MAPPED_THP)) {
|
||||
mm_slot->pte_mapped_thp[mm_slot->nr_pte_mapped_thp++] = addr;
|
||||
ret = true;
|
||||
}
|
||||
spin_unlock(&khugepaged_mm_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* hpage must be locked, and mmap_lock must be held */
|
||||
static int set_huge_pmd(struct vm_area_struct *vma, unsigned long addr,
|
||||
pmd_t *pmdp, struct page *hpage)
|
||||
|
@ -1708,29 +1656,6 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
|
|||
return result;
|
||||
}
|
||||
|
||||
static void khugepaged_collapse_pte_mapped_thps(struct khugepaged_mm_slot *mm_slot)
|
||||
{
|
||||
struct mm_slot *slot = &mm_slot->slot;
|
||||
struct mm_struct *mm = slot->mm;
|
||||
int i;
|
||||
|
||||
if (likely(mm_slot->nr_pte_mapped_thp == 0))
|
||||
return;
|
||||
|
||||
if (!mmap_write_trylock(mm))
|
||||
return;
|
||||
|
||||
if (unlikely(hpage_collapse_test_exit(mm)))
|
||||
goto out;
|
||||
|
||||
for (i = 0; i < mm_slot->nr_pte_mapped_thp; i++)
|
||||
collapse_pte_mapped_thp(mm, mm_slot->pte_mapped_thp[i], false);
|
||||
|
||||
out:
|
||||
mm_slot->nr_pte_mapped_thp = 0;
|
||||
mmap_write_unlock(mm);
|
||||
}
|
||||
|
||||
static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
|
||||
{
|
||||
struct vm_area_struct *vma;
|
||||
|
@ -2371,16 +2296,6 @@ static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr,
|
|||
{
|
||||
BUILD_BUG();
|
||||
}
|
||||
|
||||
static void khugepaged_collapse_pte_mapped_thps(struct khugepaged_mm_slot *mm_slot)
|
||||
{
|
||||
}
|
||||
|
||||
static bool khugepaged_add_pte_mapped_thp(struct mm_struct *mm,
|
||||
unsigned long addr)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
|
||||
|
@ -2410,7 +2325,6 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
|
|||
khugepaged_scan.mm_slot = mm_slot;
|
||||
}
|
||||
spin_unlock(&khugepaged_mm_lock);
|
||||
khugepaged_collapse_pte_mapped_thps(mm_slot);
|
||||
|
||||
mm = slot->mm;
|
||||
/*
|
||||
|
@ -2463,36 +2377,27 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
|
|||
khugepaged_scan.address);
|
||||
|
||||
mmap_read_unlock(mm);
|
||||
*result = hpage_collapse_scan_file(mm,
|
||||
khugepaged_scan.address,
|
||||
file, pgoff, cc);
|
||||
mmap_locked = false;
|
||||
*result = hpage_collapse_scan_file(mm,
|
||||
khugepaged_scan.address, file, pgoff, cc);
|
||||
fput(file);
|
||||
if (*result == SCAN_PTE_MAPPED_HUGEPAGE) {
|
||||
mmap_read_lock(mm);
|
||||
if (hpage_collapse_test_exit(mm))
|
||||
goto breakouterloop;
|
||||
*result = collapse_pte_mapped_thp(mm,
|
||||
khugepaged_scan.address, false);
|
||||
if (*result == SCAN_PMD_MAPPED)
|
||||
*result = SCAN_SUCCEED;
|
||||
mmap_read_unlock(mm);
|
||||
}
|
||||
} else {
|
||||
*result = hpage_collapse_scan_pmd(mm, vma,
|
||||
khugepaged_scan.address,
|
||||
&mmap_locked,
|
||||
cc);
|
||||
khugepaged_scan.address, &mmap_locked, cc);
|
||||
}
|
||||
switch (*result) {
|
||||
case SCAN_PTE_MAPPED_HUGEPAGE: {
|
||||
pmd_t *pmd;
|
||||
|
||||
*result = find_pmd_or_thp_or_none(mm,
|
||||
khugepaged_scan.address,
|
||||
&pmd);
|
||||
if (*result != SCAN_SUCCEED)
|
||||
break;
|
||||
if (!khugepaged_add_pte_mapped_thp(mm,
|
||||
khugepaged_scan.address))
|
||||
break;
|
||||
} fallthrough;
|
||||
case SCAN_SUCCEED:
|
||||
if (*result == SCAN_SUCCEED)
|
||||
++khugepaged_pages_collapsed;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
/* move to next address */
|
||||
khugepaged_scan.address += HPAGE_PMD_SIZE;
|
||||
|
|
Loading…
Reference in a new issue