mm/page_alloc: explicitly define what alloc flags deplete min reserves

As there are more ALLOC_ flags that affect reserves, define what flags
affect reserves and clarify the effect of each flag.

Link: https://lkml.kernel.org/r/20230113111217.14134-5-mgorman@techsingularity.net
Signed-off-by: Mel Gorman <mgorman@techsingularity.net>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Acked-by: Michal Hocko <mhocko@suse.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: NeilBrown <neilb@suse.de>
Cc: Thierry Reding <thierry.reding@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Mel Gorman 2023-01-13 11:12:15 +00:00 committed by Andrew Morton
parent eb2e2b425c
commit ab35088543
2 changed files with 26 additions and 13 deletions

View file

@ -792,6 +792,9 @@ unsigned int reclaim_clean_pages_from_list(struct zone *zone,
#define ALLOC_HIGHATOMIC 0x200 /* Allows access to MIGRATE_HIGHATOMIC */
#define ALLOC_KSWAPD 0x800 /* allow waking of kswapd, __GFP_KSWAPD_RECLAIM set */
/* Flags that allow allocations below the min watermark. */
#define ALLOC_RESERVES (ALLOC_HARDER|ALLOC_MIN_RESERVE|ALLOC_HIGHATOMIC|ALLOC_OOM)
enum ttu_flags;
struct tlbflush_unmap_batch;

View file

@ -3967,15 +3967,14 @@ ALLOW_ERROR_INJECTION(should_fail_alloc_page, TRUE);
static inline long __zone_watermark_unusable_free(struct zone *z,
unsigned int order, unsigned int alloc_flags)
{
const bool alloc_harder = (alloc_flags & (ALLOC_HARDER|ALLOC_OOM));
long unusable_free = (1 << order) - 1;
/*
* If the caller does not have rights to ALLOC_HARDER then subtract
* the high-atomic reserves. This will over-estimate the size of the
* atomic reserve but it avoids a search.
* If the caller does not have rights to reserves below the min
* watermark then subtract the high-atomic reserves. This will
* over-estimate the size of the atomic reserve but it avoids a search.
*/
if (likely(!alloc_harder))
if (likely(!(alloc_flags & ALLOC_RESERVES)))
unusable_free += z->nr_reserved_highatomic;
#ifdef CONFIG_CMA
@ -3999,25 +3998,36 @@ bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
{
long min = mark;
int o;
const bool alloc_harder = (alloc_flags & (ALLOC_HARDER|ALLOC_OOM));
/* free_pages may go negative - that's OK */
free_pages -= __zone_watermark_unusable_free(z, order, alloc_flags);
if (alloc_flags & ALLOC_MIN_RESERVE)
min -= min / 2;
if (unlikely(alloc_harder)) {
if (unlikely(alloc_flags & ALLOC_RESERVES)) {
/*
* OOM victims can try even harder than normal ALLOC_HARDER
* __GFP_HIGH allows access to 50% of the min reserve as well
* as OOM.
*/
if (alloc_flags & ALLOC_MIN_RESERVE)
min -= min / 2;
/*
* Non-blocking allocations can access some of the reserve
* with more access if also __GFP_HIGH. The reasoning is that
* a non-blocking caller may incur a more severe penalty
* if it cannot get memory quickly, particularly if it's
* also __GFP_HIGH.
*/
if (alloc_flags & ALLOC_HARDER)
min -= min / 4;
/*
* OOM victims can try even harder than the normal reserve
* users on the grounds that it's definitely going to be in
* the exit path shortly and free memory. Any allocation it
* makes during the free path will be small and short-lived.
*/
if (alloc_flags & ALLOC_OOM)
min -= min / 2;
else
min -= min / 4;
}
/*