mm: compaction: remove compaction result helpers

Patch series "mm: compaction: cleanups & simplifications".

These compaction cleanups are split out from the huge page allocator
series[1], as requested by reviewer feedback.

[1] https://lore.kernel.org/linux-mm/20230418191313.268131-1-hannes@cmpxchg.org/


This patch (of 5):

The compaction result helpers encode quirks that are specific to the
allocator's retry logic.  E.g.  COMPACT_SUCCESS and COMPACT_COMPLETE
actually represent failures that should be retried upon, and so on.  I
frequently found myself pulling up the helper implementation in order to
understand and work on the retry logic.  They're not quite clean
abstractions; rather they split the retry logic into two locations.

Remove the helpers and inline the checks.  Then comment on the result
interpretations directly where the decision making happens.

Link: https://lkml.kernel.org/r/20230519123959.77335-1-hannes@cmpxchg.org
Link: https://lkml.kernel.org/r/20230519123959.77335-2-hannes@cmpxchg.org
Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Michal Hocko <mhocko@suse.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Johannes Weiner 2023-05-19 14:39:55 +02:00 committed by Andrew Morton
parent 62069aace1
commit ecd8b2928f
3 changed files with 19 additions and 107 deletions

View File

@ -95,78 +95,6 @@ extern enum compact_result compaction_suitable(struct zone *zone, int order,
extern void compaction_defer_reset(struct zone *zone, int order,
bool alloc_success);
/* Compaction has made some progress and retrying makes sense */
static inline bool compaction_made_progress(enum compact_result result)
{
/*
* Even though this might sound confusing this in fact tells us
* that the compaction successfully isolated and migrated some
* pageblocks.
*/
if (result == COMPACT_SUCCESS)
return true;
return false;
}
/* Compaction has failed and it doesn't make much sense to keep retrying. */
static inline bool compaction_failed(enum compact_result result)
{
/* All zones were scanned completely and still not result. */
if (result == COMPACT_COMPLETE)
return true;
return false;
}
/* Compaction needs reclaim to be performed first, so it can continue. */
static inline bool compaction_needs_reclaim(enum compact_result result)
{
/*
* Compaction backed off due to watermark checks for order-0
* so the regular reclaim has to try harder and reclaim something.
*/
if (result == COMPACT_SKIPPED)
return true;
return false;
}
/*
* Compaction has backed off for some reason after doing some work or none
* at all. It might be throttling or lock contention. Retrying might be still
* worthwhile, but with a higher priority if allowed.
*/
static inline bool compaction_withdrawn(enum compact_result result)
{
/*
* If compaction is deferred for high-order allocations, it is
* because sync compaction recently failed. If this is the case
* and the caller requested a THP allocation, we do not want
* to heavily disrupt the system, so we fail the allocation
* instead of entering direct reclaim.
*/
if (result == COMPACT_DEFERRED)
return true;
/*
* If compaction in async mode encounters contention or blocks higher
* priority task we back off early rather than cause stalls.
*/
if (result == COMPACT_CONTENDED)
return true;
/*
* Page scanners have met but we haven't scanned full zones so this
* is a back off in fact.
*/
if (result == COMPACT_PARTIAL_SKIPPED)
return true;
return false;
}
bool compaction_zonelist_suitable(struct alloc_context *ac, int order,
int alloc_flags);
@ -185,26 +113,6 @@ static inline enum compact_result compaction_suitable(struct zone *zone, int ord
return COMPACT_SKIPPED;
}
static inline bool compaction_made_progress(enum compact_result result)
{
return false;
}
static inline bool compaction_failed(enum compact_result result)
{
return false;
}
static inline bool compaction_needs_reclaim(enum compact_result result)
{
return false;
}
static inline bool compaction_withdrawn(enum compact_result result)
{
return true;
}
static inline void kcompactd_run(int nid)
{
}

View File

@ -223,8 +223,8 @@ IF_HAVE_VM_SOFTDIRTY(VM_SOFTDIRTY, "softdirty" ) \
#define compact_result_to_feedback(result) \
({ \
enum compact_result __result = result; \
(compaction_failed(__result)) ? COMPACTION_FAILED : \
(compaction_withdrawn(__result)) ? COMPACTION_WITHDRAWN : COMPACTION_PROGRESS; \
(__result == COMPACT_COMPLETE) ? COMPACTION_FAILED : \
(__result == COMPACT_SUCCESS) ? COMPACTION_PROGRESS : COMPACTION_WITHDRAWN; \
})
#define COMPACTION_FEEDBACK \

View File

@ -3469,35 +3469,39 @@ should_compact_retry(struct alloc_context *ac, int order, int alloc_flags,
if (fatal_signal_pending(current))
return false;
if (compaction_made_progress(compact_result))
/*
* Compaction managed to coalesce some page blocks, but the
* allocation failed presumably due to a race. Retry some.
*/
if (compact_result == COMPACT_SUCCESS)
(*compaction_retries)++;
/*
* compaction considers all the zone as desperately out of memory
* so it doesn't really make much sense to retry except when the
* All zones were scanned completely and still no result. It
* doesn't really make much sense to retry except when the
* failure could be caused by insufficient priority
*/
if (compaction_failed(compact_result))
if (compact_result == COMPACT_COMPLETE)
goto check_priority;
/*
* compaction was skipped because there are not enough order-0 pages
* to work with, so we retry only if it looks like reclaim can help.
* Compaction was skipped due to a lack of free order-0
* migration targets. Continue if reclaim can help.
*/
if (compaction_needs_reclaim(compact_result)) {
if (compact_result == COMPACT_SKIPPED) {
ret = compaction_zonelist_suitable(ac, order, alloc_flags);
goto out;
}
/*
* make sure the compaction wasn't deferred or didn't bail out early
* due to locks contention before we declare that we should give up.
* But the next retry should use a higher priority if allowed, so
* we don't just keep bailing out endlessly.
* If compaction backed due to being deferred, due to
* contended locks in async mode, or due to scanners meeting
* after a partial scan, retry with increased priority.
*/
if (compaction_withdrawn(compact_result)) {
if (compact_result == COMPACT_DEFERRED ||
compact_result == COMPACT_CONTENDED ||
compact_result == COMPACT_PARTIAL_SKIPPED)
goto check_priority;
}
/*
* !costly requests are much more important than __GFP_RETRY_MAYFAIL