mm/compaction: add support for >0 order folio memory compaction.

Before last commit, memory compaction only migrates order-0 folios and
skips >0 order folios.  Last commit splits all >0 order folios during
compaction.  This commit migrates >0 order folios during compaction by
keeping isolated free pages at their original size without splitting them
into order-0 pages and using them directly during migration process.

What is different from the prior implementation:
1. All isolated free pages are kept in a NR_PAGE_ORDERS array of page
   lists, where each page list stores free pages in the same order.
2. All free pages are not post_alloc_hook() processed nor buddy pages,
   although their orders are stored in first page's private like buddy
   pages.
3. During migration, in new page allocation time (i.e., in
   compaction_alloc()), free pages are then processed by post_alloc_hook().
   When migration fails and a new page is returned (i.e., in
   compaction_free()), free pages are restored by reversing the
   post_alloc_hook() operations using newly added
   free_pages_prepare_fpi_none().

Step 3 is done for a latter optimization that splitting and/or merging
free pages during compaction becomes easier.

Note: without splitting free pages, compaction can end prematurely due to
migration will return -ENOMEM even if there is free pages.  This happens
when no order-0 free page exist and compaction_alloc() return NULL.

Link: https://lkml.kernel.org/r/20240220183220.1451315-4-zi.yan@sent.com
Signed-off-by: Zi Yan <ziy@nvidia.com>
Reviewed-by: Baolin Wang <baolin.wang@linux.alibaba.com>
Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
Tested-by: Baolin Wang <baolin.wang@linux.alibaba.com>
Tested-by: Yu Zhao <yuzhao@google.com>
Cc: Adam Manzanares <a.manzanares@samsung.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Huang Ying <ying.huang@intel.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Kemeng Shi <shikemeng@huaweicloud.com>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Luis Chamberlain <mcgrof@kernel.org>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: Vishal Moola (Oracle) <vishal.moola@gmail.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Yin Fengwei <fengwei.yin@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Zi Yan 2024-02-20 13:32:19 -05:00 committed by Andrew Morton
parent ee6f62fd34
commit 733aea0b3a
3 changed files with 86 additions and 66 deletions

View File

@ -79,45 +79,56 @@ static inline bool is_via_compact_memory(int order) { return false; }
#define COMPACTION_HPAGE_ORDER (PMD_SHIFT - PAGE_SHIFT)
#endif
static unsigned long release_freepages(struct list_head *freelist)
static void split_map_pages(struct list_head *freepages)
{
struct page *page, *next;
unsigned long high_pfn = 0;
list_for_each_entry_safe(page, next, freelist, lru) {
unsigned long pfn = page_to_pfn(page);
list_del(&page->lru);
__free_page(page);
if (pfn > high_pfn)
high_pfn = pfn;
}
return high_pfn;
}
static void split_map_pages(struct list_head *list)
{
unsigned int i, order, nr_pages;
unsigned int i, order;
struct page *page, *next;
LIST_HEAD(tmp_list);
list_for_each_entry_safe(page, next, list, lru) {
list_del(&page->lru);
for (order = 0; order < NR_PAGE_ORDERS; order++) {
list_for_each_entry_safe(page, next, &freepages[order], lru) {
unsigned int nr_pages;
order = page_private(page);
nr_pages = 1 << order;
list_del(&page->lru);
post_alloc_hook(page, order, __GFP_MOVABLE);
if (order)
split_page(page, order);
nr_pages = 1 << order;
for (i = 0; i < nr_pages; i++) {
list_add(&page->lru, &tmp_list);
page++;
post_alloc_hook(page, order, __GFP_MOVABLE);
if (order)
split_page(page, order);
for (i = 0; i < nr_pages; i++) {
list_add(&page->lru, &tmp_list);
page++;
}
}
list_splice_init(&tmp_list, &freepages[0]);
}
}
static unsigned long release_free_list(struct list_head *freepages)
{
int order;
unsigned long high_pfn = 0;
for (order = 0; order < NR_PAGE_ORDERS; order++) {
struct page *page, *next;
list_for_each_entry_safe(page, next, &freepages[order], lru) {
unsigned long pfn = page_to_pfn(page);
list_del(&page->lru);
/*
* Convert free pages into post allocation pages, so
* that we can free them via __free_page.
*/
post_alloc_hook(page, order, __GFP_MOVABLE);
__free_pages(page, order);
if (pfn > high_pfn)
high_pfn = pfn;
}
}
list_splice(&tmp_list, list);
return high_pfn;
}
#ifdef CONFIG_COMPACTION
@ -670,7 +681,7 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
nr_scanned += isolated - 1;
total_isolated += isolated;
cc->nr_freepages += isolated;
list_add_tail(&page->lru, freelist);
list_add_tail(&page->lru, &freelist[order]);
if (!strict && cc->nr_migratepages <= cc->nr_freepages) {
blockpfn += isolated;
@ -735,7 +746,11 @@ isolate_freepages_range(struct compact_control *cc,
unsigned long start_pfn, unsigned long end_pfn)
{
unsigned long isolated, pfn, block_start_pfn, block_end_pfn;
LIST_HEAD(freelist);
int order;
struct list_head tmp_freepages[NR_PAGE_ORDERS];
for (order = 0; order < NR_PAGE_ORDERS; order++)
INIT_LIST_HEAD(&tmp_freepages[order]);
pfn = start_pfn;
block_start_pfn = pageblock_start_pfn(pfn);
@ -766,7 +781,7 @@ isolate_freepages_range(struct compact_control *cc,
break;
isolated = isolate_freepages_block(cc, &isolate_start_pfn,
block_end_pfn, &freelist, 0, true);
block_end_pfn, tmp_freepages, 0, true);
/*
* In strict mode, isolate_freepages_block() returns 0 if
@ -783,15 +798,15 @@ isolate_freepages_range(struct compact_control *cc,
*/
}
/* __isolate_free_page() does not map the pages */
split_map_pages(&freelist);
if (pfn < end_pfn) {
/* Loop terminated early, cleanup. */
release_freepages(&freelist);
release_free_list(tmp_freepages);
return 0;
}
/* __isolate_free_page() does not map the pages */
split_map_pages(tmp_freepages);
/* We don't use freelists for anything. */
return pfn;
}
@ -1518,7 +1533,7 @@ fast_isolate_around(struct compact_control *cc, unsigned long pfn)
if (!page)
return;
isolate_freepages_block(cc, &start_pfn, end_pfn, &cc->freepages, 1, false);
isolate_freepages_block(cc, &start_pfn, end_pfn, cc->freepages, 1, false);
/* Skip this pageblock in the future as it's full or nearly full */
if (start_pfn == end_pfn && !cc->no_set_skip_hint)
@ -1647,7 +1662,7 @@ static void fast_isolate_freepages(struct compact_control *cc)
nr_scanned += nr_isolated - 1;
total_isolated += nr_isolated;
cc->nr_freepages += nr_isolated;
list_add_tail(&page->lru, &cc->freepages);
list_add_tail(&page->lru, &cc->freepages[order]);
count_compact_events(COMPACTISOLATED, nr_isolated);
} else {
/* If isolation fails, abort the search */
@ -1724,13 +1739,12 @@ static void isolate_freepages(struct compact_control *cc)
unsigned long isolate_start_pfn; /* exact pfn we start at */
unsigned long block_end_pfn; /* end of current pageblock */
unsigned long low_pfn; /* lowest pfn scanner is able to scan */
struct list_head *freelist = &cc->freepages;
unsigned int stride;
/* Try a small search of the free lists for a candidate */
fast_isolate_freepages(cc);
if (cc->nr_freepages)
goto splitmap;
return;
/*
* Initialise the free scanner. The starting point is where we last
@ -1790,7 +1804,7 @@ static void isolate_freepages(struct compact_control *cc)
/* Found a block suitable for isolating free pages from. */
nr_isolated = isolate_freepages_block(cc, &isolate_start_pfn,
block_end_pfn, freelist, stride, false);
block_end_pfn, cc->freepages, stride, false);
/* Update the skip hint if the full pageblock was scanned */
if (isolate_start_pfn == block_end_pfn)
@ -1831,10 +1845,6 @@ static void isolate_freepages(struct compact_control *cc)
* and the loop terminated due to isolate_start_pfn < low_pfn
*/
cc->free_pfn = isolate_start_pfn;
splitmap:
/* __isolate_free_page() does not map the pages */
split_map_pages(freelist);
}
/*
@ -1845,24 +1855,22 @@ static struct folio *compaction_alloc(struct folio *src, unsigned long data)
{
struct compact_control *cc = (struct compact_control *)data;
struct folio *dst;
int order = folio_order(src);
/* this makes migrate_pages() split the source page and retry */
if (folio_test_large(src))
return NULL;
if (list_empty(&cc->freepages)) {
if (list_empty(&cc->freepages[order])) {
isolate_freepages(cc);
if (list_empty(&cc->freepages))
if (list_empty(&cc->freepages[order]))
return NULL;
}
dst = list_entry(cc->freepages.next, struct folio, lru);
dst = list_first_entry(&cc->freepages[order], struct folio, lru);
list_del(&dst->lru);
cc->nr_freepages--;
cc->nr_migratepages--;
return dst;
post_alloc_hook(&dst->page, order, __GFP_MOVABLE);
if (order)
prep_compound_page(&dst->page, order);
cc->nr_freepages -= 1 << order;
cc->nr_migratepages -= 1 << order;
return page_rmappable_folio(&dst->page);
}
/*
@ -1873,10 +1881,19 @@ static struct folio *compaction_alloc(struct folio *src, unsigned long data)
static void compaction_free(struct folio *dst, unsigned long data)
{
struct compact_control *cc = (struct compact_control *)data;
int order = folio_order(dst);
struct page *page = &dst->page;
list_add(&dst->lru, &cc->freepages);
cc->nr_freepages++;
cc->nr_migratepages++;
if (folio_put_testzero(dst)) {
free_pages_prepare(page, order);
list_add(&dst->lru, &cc->freepages[order]);
cc->nr_freepages += 1 << order;
}
cc->nr_migratepages += 1 << order;
/*
* someone else has referenced the page, we cannot take it back to our
* free list.
*/
}
/* possible outcome of isolate_migratepages */
@ -2489,6 +2506,7 @@ compact_zone(struct compact_control *cc, struct capture_control *capc)
const bool sync = cc->mode != MIGRATE_ASYNC;
bool update_cached;
unsigned int nr_succeeded = 0, nr_migratepages;
int order;
/*
* These counters track activities during zone compaction. Initialize
@ -2498,7 +2516,8 @@ compact_zone(struct compact_control *cc, struct capture_control *capc)
cc->total_free_scanned = 0;
cc->nr_migratepages = 0;
cc->nr_freepages = 0;
INIT_LIST_HEAD(&cc->freepages);
for (order = 0; order < NR_PAGE_ORDERS; order++)
INIT_LIST_HEAD(&cc->freepages[order]);
INIT_LIST_HEAD(&cc->migratepages);
cc->migratetype = gfp_migratetype(cc->gfp_mask);
@ -2690,7 +2709,7 @@ out:
* so we don't leave any returned pages behind in the next attempt.
*/
if (cc->nr_freepages > 0) {
unsigned long free_pfn = release_freepages(&cc->freepages);
unsigned long free_pfn = release_free_list(cc->freepages);
cc->nr_freepages = 0;
VM_BUG_ON(free_pfn == 0);
@ -2709,7 +2728,6 @@ out:
trace_mm_compaction_end(cc, start_pfn, end_pfn, sync, ret);
VM_BUG_ON(!list_empty(&cc->freepages));
VM_BUG_ON(!list_empty(&cc->migratepages));
return ret;

View File

@ -447,6 +447,8 @@ extern void prep_compound_page(struct page *page, unsigned int order);
extern void post_alloc_hook(struct page *page, unsigned int order,
gfp_t gfp_flags);
extern bool free_pages_prepare(struct page *page, unsigned int order);
extern int user_min_free_kbytes;
extern void free_unref_page(struct page *page, unsigned int order);
@ -481,7 +483,7 @@ int split_free_page(struct page *free_page,
* completes when free_pfn <= migrate_pfn
*/
struct compact_control {
struct list_head freepages; /* List of free pages to migrate to */
struct list_head freepages[NR_PAGE_ORDERS]; /* List of free pages to migrate to */
struct list_head migratepages; /* List of pages being migrated */
unsigned int nr_freepages; /* Number of isolated free pages */
unsigned int nr_migratepages; /* Number of pages to migrate */

View File

@ -1080,7 +1080,7 @@ static void kernel_init_pages(struct page *page, int numpages)
kasan_enable_current();
}
static __always_inline bool free_pages_prepare(struct page *page,
__always_inline bool free_pages_prepare(struct page *page,
unsigned int order)
{
int bad = 0;