mm/page_alloc: explicitly record high-order atomic allocations in alloc_flags

A high-order ALLOC_HARDER allocation is assumed to be atomic.  While that
is accurate, it changes later in the series.  In preparation, explicitly
record high-order atomic allocations in gfp_to_alloc_flags().

Link: https://lkml.kernel.org/r/20230113111217.14134-4-mgorman@techsingularity.net
Signed-off-by: Mel Gorman <mgorman@techsingularity.net>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Acked-by: Michal Hocko <mhocko@suse.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: NeilBrown <neilb@suse.de>
Cc: Thierry Reding <thierry.reding@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Mel Gorman 2023-01-13 11:12:14 +00:00 committed by Andrew Morton
parent c988dcbecf
commit eb2e2b425c
2 changed files with 24 additions and 6 deletions

View File

@ -789,6 +789,7 @@ unsigned int reclaim_clean_pages_from_list(struct zone *zone,
#else
#define ALLOC_NOFRAGMENT 0x0
#endif
#define ALLOC_HIGHATOMIC 0x200 /* Allows access to MIGRATE_HIGHATOMIC */
#define ALLOC_KSWAPD 0x800 /* allow waking of kswapd, __GFP_KSWAPD_RECLAIM set */
enum ttu_flags;

View File

@ -3724,10 +3724,20 @@ struct page *rmqueue_buddy(struct zone *preferred_zone, struct zone *zone,
* reserved for high-order atomic allocation, so order-0
* request should skip it.
*/
if (order > 0 && alloc_flags & ALLOC_HARDER)
if (alloc_flags & ALLOC_HIGHATOMIC)
page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC);
if (!page) {
page = __rmqueue(zone, order, migratetype, alloc_flags);
/*
* If the allocation fails, allow OOM handling access
* to HIGHATOMIC reserves as failing now is worse than
* failing a high-order atomic allocation in the
* future.
*/
if (!page && (alloc_flags & ALLOC_OOM))
page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC);
if (!page) {
spin_unlock_irqrestore(&zone->lock, flags);
return NULL;
@ -4041,8 +4051,10 @@ bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
return true;
}
#endif
if (alloc_harder && !free_area_empty(area, MIGRATE_HIGHATOMIC))
if ((alloc_flags & (ALLOC_HIGHATOMIC|ALLOC_OOM)) &&
!free_area_empty(area, MIGRATE_HIGHATOMIC)) {
return true;
}
}
return false;
}
@ -4304,7 +4316,7 @@ try_this_zone:
* If this is a high-order atomic allocation then check
* if the pageblock should be reserved for the future
*/
if (unlikely(order && (alloc_flags & ALLOC_HARDER)))
if (unlikely(alloc_flags & ALLOC_HIGHATOMIC))
reserve_highatomic_pageblock(page, zone, order);
return page;
@ -4831,7 +4843,7 @@ static void wake_all_kswapds(unsigned int order, gfp_t gfp_mask,
}
static inline unsigned int
gfp_to_alloc_flags(gfp_t gfp_mask)
gfp_to_alloc_flags(gfp_t gfp_mask, unsigned int order)
{
unsigned int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
@ -4857,8 +4869,13 @@ gfp_to_alloc_flags(gfp_t gfp_mask)
* Not worth trying to allocate harder for __GFP_NOMEMALLOC even
* if it can't schedule.
*/
if (!(gfp_mask & __GFP_NOMEMALLOC))
if (!(gfp_mask & __GFP_NOMEMALLOC)) {
alloc_flags |= ALLOC_HARDER;
if (order > 0)
alloc_flags |= ALLOC_HIGHATOMIC;
}
/*
* Ignore cpuset mems for GFP_ATOMIC rather than fail, see the
* comment for __cpuset_node_allowed().
@ -5066,7 +5083,7 @@ restart:
* kswapd needs to be woken up, and to avoid the cost of setting up
* alloc_flags precisely. So we do that now.
*/
alloc_flags = gfp_to_alloc_flags(gfp_mask);
alloc_flags = gfp_to_alloc_flags(gfp_mask, order);
/*
* We need to recalculate the starting point for the zonelist iterator