mm: thp: convert to use common struct mm_slot

Rename private struct mm_slot to struct khugepaged_mm_slot and convert to
use common struct mm_slot with no functional change.

[zhengqi.arch@bytedance.com: fix build error with CONFIG_SHMEM disabled]
  Link: https://lkml.kernel.org/r/639fa8d5-8e5b-2333-69dc-40ed46219364@bytedance.com
Link: https://lkml.kernel.org/r/20220831031951.43152-3-zhengqi.arch@bytedance.com
Signed-off-by: Qi Zheng <zhengqi.arch@bytedance.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Mike Rapoport <rppt@kernel.org>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Yang Shi <shy828301@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Qi Zheng 2022-08-31 11:19:46 +08:00 committed by Andrew Morton
parent 7e736b8e36
commit b26e27015e

View file

@ -23,6 +23,7 @@
#include <asm/tlb.h>
#include <asm/pgalloc.h>
#include "internal.h"
#include "mm_slot.h"
enum scan_result {
SCAN_FAIL,
@ -99,17 +100,13 @@ struct collapse_control {
};
/**
* struct mm_slot - hash lookup from mm to mm_slot
* @hash: hash collision list
* @mm_node: khugepaged scan list headed in khugepaged_scan.mm_head
* @mm: the mm that this information is valid for
* struct khugepaged_mm_slot - khugepaged information per mm that is being scanned
* @slot: hash lookup from mm to mm_slot
* @nr_pte_mapped_thp: number of pte mapped THP
* @pte_mapped_thp: address array corresponding pte mapped THP
*/
struct mm_slot {
struct hlist_node hash;
struct list_head mm_node;
struct mm_struct *mm;
struct khugepaged_mm_slot {
struct mm_slot slot;
/* pte-mapped THP in this mm */
int nr_pte_mapped_thp;
@ -126,7 +123,7 @@ struct mm_slot {
*/
struct khugepaged_scan {
struct list_head mm_head;
struct mm_slot *mm_slot;
struct khugepaged_mm_slot *mm_slot;
unsigned long address;
};
@ -390,8 +387,9 @@ int hugepage_madvise(struct vm_area_struct *vma,
int __init khugepaged_init(void)
{
mm_slot_cache = kmem_cache_create("khugepaged_mm_slot",
sizeof(struct mm_slot),
__alignof__(struct mm_slot), 0, NULL);
sizeof(struct khugepaged_mm_slot),
__alignof__(struct khugepaged_mm_slot),
0, NULL);
if (!mm_slot_cache)
return -ENOMEM;
@ -408,36 +406,6 @@ void __init khugepaged_destroy(void)
kmem_cache_destroy(mm_slot_cache);
}
static inline struct mm_slot *alloc_mm_slot(void)
{
if (!mm_slot_cache) /* initialization failed */
return NULL;
return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL);
}
static inline void free_mm_slot(struct mm_slot *mm_slot)
{
kmem_cache_free(mm_slot_cache, mm_slot);
}
static struct mm_slot *get_mm_slot(struct mm_struct *mm)
{
struct mm_slot *mm_slot;
hash_for_each_possible(mm_slots_hash, mm_slot, hash, (unsigned long)mm)
if (mm == mm_slot->mm)
return mm_slot;
return NULL;
}
static void insert_to_mm_slots_hash(struct mm_struct *mm,
struct mm_slot *mm_slot)
{
mm_slot->mm = mm;
hash_add(mm_slots_hash, &mm_slot->hash, (long)mm);
}
static inline int hpage_collapse_test_exit(struct mm_struct *mm)
{
return atomic_read(&mm->mm_users) == 0;
@ -445,28 +413,31 @@ static inline int hpage_collapse_test_exit(struct mm_struct *mm)
void __khugepaged_enter(struct mm_struct *mm)
{
struct mm_slot *mm_slot;
struct khugepaged_mm_slot *mm_slot;
struct mm_slot *slot;
int wakeup;
mm_slot = alloc_mm_slot();
mm_slot = mm_slot_alloc(mm_slot_cache);
if (!mm_slot)
return;
slot = &mm_slot->slot;
/* __khugepaged_exit() must not run from under us */
VM_BUG_ON_MM(hpage_collapse_test_exit(mm), mm);
if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) {
free_mm_slot(mm_slot);
mm_slot_free(mm_slot_cache, mm_slot);
return;
}
spin_lock(&khugepaged_mm_lock);
insert_to_mm_slots_hash(mm, mm_slot);
mm_slot_insert(mm_slots_hash, mm, slot);
/*
* Insert just behind the scanning cursor, to let the area settle
* down a little.
*/
wakeup = list_empty(&khugepaged_scan.mm_head);
list_add_tail(&mm_slot->mm_node, &khugepaged_scan.mm_head);
list_add_tail(&slot->mm_node, &khugepaged_scan.mm_head);
spin_unlock(&khugepaged_mm_lock);
mmgrab(mm);
@ -486,21 +457,23 @@ void khugepaged_enter_vma(struct vm_area_struct *vma,
void __khugepaged_exit(struct mm_struct *mm)
{
struct mm_slot *mm_slot;
struct khugepaged_mm_slot *mm_slot;
struct mm_slot *slot;
int free = 0;
spin_lock(&khugepaged_mm_lock);
mm_slot = get_mm_slot(mm);
slot = mm_slot_lookup(mm_slots_hash, mm);
mm_slot = mm_slot_entry(slot, struct khugepaged_mm_slot, slot);
if (mm_slot && khugepaged_scan.mm_slot != mm_slot) {
hash_del(&mm_slot->hash);
list_del(&mm_slot->mm_node);
hash_del(&slot->hash);
list_del(&slot->mm_node);
free = 1;
}
spin_unlock(&khugepaged_mm_lock);
if (free) {
clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
free_mm_slot(mm_slot);
mm_slot_free(mm_slot_cache, mm_slot);
mmdrop(mm);
} else if (mm_slot) {
/*
@ -1318,16 +1291,17 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm,
return result;
}
static void collect_mm_slot(struct mm_slot *mm_slot)
static void collect_mm_slot(struct khugepaged_mm_slot *mm_slot)
{
struct mm_struct *mm = mm_slot->mm;
struct mm_slot *slot = &mm_slot->slot;
struct mm_struct *mm = slot->mm;
lockdep_assert_held(&khugepaged_mm_lock);
if (hpage_collapse_test_exit(mm)) {
/* free mm_slot */
hash_del(&mm_slot->hash);
list_del(&mm_slot->mm_node);
hash_del(&slot->hash);
list_del(&slot->mm_node);
/*
* Not strictly needed because the mm exited already.
@ -1336,7 +1310,7 @@ static void collect_mm_slot(struct mm_slot *mm_slot)
*/
/* khugepaged_mm_lock actually not necessary for the below */
free_mm_slot(mm_slot);
mm_slot_free(mm_slot_cache, mm_slot);
mmdrop(mm);
}
}
@ -1349,12 +1323,14 @@ static void collect_mm_slot(struct mm_slot *mm_slot)
static void khugepaged_add_pte_mapped_thp(struct mm_struct *mm,
unsigned long addr)
{
struct mm_slot *mm_slot;
struct khugepaged_mm_slot *mm_slot;
struct mm_slot *slot;
VM_BUG_ON(addr & ~HPAGE_PMD_MASK);
spin_lock(&khugepaged_mm_lock);
mm_slot = get_mm_slot(mm);
slot = mm_slot_lookup(mm_slots_hash, mm);
mm_slot = mm_slot_entry(slot, struct khugepaged_mm_slot, slot);
if (likely(mm_slot && mm_slot->nr_pte_mapped_thp < MAX_PTE_MAPPED_THP))
mm_slot->pte_mapped_thp[mm_slot->nr_pte_mapped_thp++] = addr;
spin_unlock(&khugepaged_mm_lock);
@ -1486,9 +1462,10 @@ void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr)
goto drop_hpage;
}
static void khugepaged_collapse_pte_mapped_thps(struct mm_slot *mm_slot)
static void khugepaged_collapse_pte_mapped_thps(struct khugepaged_mm_slot *mm_slot)
{
struct mm_struct *mm = mm_slot->mm;
struct mm_slot *slot = &mm_slot->slot;
struct mm_struct *mm = slot->mm;
int i;
if (likely(mm_slot->nr_pte_mapped_thp == 0))
@ -2040,7 +2017,7 @@ static int khugepaged_scan_file(struct mm_struct *mm, struct file *file,
BUILD_BUG();
}
static void khugepaged_collapse_pte_mapped_thps(struct mm_slot *mm_slot)
static void khugepaged_collapse_pte_mapped_thps(struct khugepaged_mm_slot *mm_slot)
{
}
#endif
@ -2051,7 +2028,8 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
__acquires(&khugepaged_mm_lock)
{
struct vma_iterator vmi;
struct mm_slot *mm_slot;
struct khugepaged_mm_slot *mm_slot;
struct mm_slot *slot;
struct mm_struct *mm;
struct vm_area_struct *vma;
int progress = 0;
@ -2060,18 +2038,20 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
lockdep_assert_held(&khugepaged_mm_lock);
*result = SCAN_FAIL;
if (khugepaged_scan.mm_slot)
if (khugepaged_scan.mm_slot) {
mm_slot = khugepaged_scan.mm_slot;
else {
mm_slot = list_entry(khugepaged_scan.mm_head.next,
slot = &mm_slot->slot;
} else {
slot = list_entry(khugepaged_scan.mm_head.next,
struct mm_slot, mm_node);
mm_slot = mm_slot_entry(slot, struct khugepaged_mm_slot, slot);
khugepaged_scan.address = 0;
khugepaged_scan.mm_slot = mm_slot;
}
spin_unlock(&khugepaged_mm_lock);
khugepaged_collapse_pte_mapped_thps(mm_slot);
mm = mm_slot->mm;
mm = slot->mm;
/*
* Don't wait for semaphore (to avoid long wait times). Just move to
* the next mm on the list.
@ -2166,10 +2146,11 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
* khugepaged runs here, khugepaged_exit will find
* mm_slot not pointing to the exiting mm.
*/
if (mm_slot->mm_node.next != &khugepaged_scan.mm_head) {
khugepaged_scan.mm_slot = list_entry(
mm_slot->mm_node.next,
struct mm_slot, mm_node);
if (slot->mm_node.next != &khugepaged_scan.mm_head) {
slot = list_entry(slot->mm_node.next,
struct mm_slot, mm_node);
khugepaged_scan.mm_slot =
mm_slot_entry(slot, struct khugepaged_mm_slot, slot);
khugepaged_scan.address = 0;
} else {
khugepaged_scan.mm_slot = NULL;
@ -2264,7 +2245,7 @@ static void khugepaged_wait_work(void)
static int khugepaged(void *none)
{
struct mm_slot *mm_slot;
struct khugepaged_mm_slot *mm_slot;
set_freezable();
set_user_nice(current, MAX_NICE);