mm: vmscan: harmonize writeback congestion tracking for nodes & memcgs

The current writeback congestion tracking has separate flags for kswapd
reclaim (node level) and cgroup limit reclaim (memcg-node level).  This is
unnecessarily complicated: the lruvec is an existing abstraction layer for
that node-memcg intersection.

Introduce lruvec->flags and LRUVEC_CONGESTED.  Then track that at the
reclaim root level, which is either the NUMA node for global reclaim, or
the cgroup-node intersection for cgroup reclaim.

Link: http://lkml.kernel.org/r/20191022144803.302233-9-hannes@cmpxchg.org
Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Reviewed-by: Roman Gushchin <guro@fb.com>
Reviewed-by: Shakeel Butt <shakeelb@google.com>
Cc: Michal Hocko <mhocko@suse.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Johannes Weiner 2019-11-30 17:55:52 -08:00 committed by Linus Torvalds
parent 0f6a5cff43
commit 1b05117df7
3 changed files with 37 additions and 64 deletions

View File

@ -132,9 +132,6 @@ struct mem_cgroup_per_node {
unsigned long usage_in_excess;/* Set to the value by which */
/* the soft limit is exceeded*/
bool on_tree;
bool congested; /* memcg has many dirty pages */
/* backed by a congested BDI */
struct mem_cgroup *memcg; /* Back pointer, we cannot */
/* use container_of */
};
@ -403,6 +400,9 @@ static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg,
goto out;
}
if (!memcg)
memcg = root_mem_cgroup;
mz = mem_cgroup_nodeinfo(memcg, pgdat->node_id);
lruvec = &mz->lruvec;
out:

View File

@ -296,6 +296,12 @@ struct zone_reclaim_stat {
unsigned long recent_scanned[2];
};
enum lruvec_flags {
LRUVEC_CONGESTED, /* lruvec has many dirty pages
* backed by a congested BDI
*/
};
struct lruvec {
struct list_head lists[NR_LRU_LISTS];
struct zone_reclaim_stat reclaim_stat;
@ -303,6 +309,8 @@ struct lruvec {
atomic_long_t inactive_age;
/* Refaults at the time of last reclaim cycle */
unsigned long refaults;
/* Various lruvec state flags (enum lruvec_flags) */
unsigned long flags;
#ifdef CONFIG_MEMCG
struct pglist_data *pgdat;
#endif
@ -572,9 +580,6 @@ struct zone {
} ____cacheline_internodealigned_in_smp;
enum pgdat_flags {
PGDAT_CONGESTED, /* pgdat has many dirty pages backed by
* a congested BDI
*/
PGDAT_DIRTY, /* reclaim scanning has recently found
* many dirty file pages at the tail
* of the LRU.

View File

@ -267,29 +267,6 @@ static bool writeback_throttling_sane(struct scan_control *sc)
#endif
return false;
}
static void set_memcg_congestion(pg_data_t *pgdat,
struct mem_cgroup *memcg,
bool congested)
{
struct mem_cgroup_per_node *mn;
if (!memcg)
return;
mn = mem_cgroup_nodeinfo(memcg, pgdat->node_id);
WRITE_ONCE(mn->congested, congested);
}
static bool memcg_congested(pg_data_t *pgdat,
struct mem_cgroup *memcg)
{
struct mem_cgroup_per_node *mn;
mn = mem_cgroup_nodeinfo(memcg, pgdat->node_id);
return READ_ONCE(mn->congested);
}
#else
static int prealloc_memcg_shrinker(struct shrinker *shrinker)
{
@ -309,18 +286,6 @@ static bool writeback_throttling_sane(struct scan_control *sc)
{
return true;
}
static inline void set_memcg_congestion(struct pglist_data *pgdat,
struct mem_cgroup *memcg, bool congested)
{
}
static inline bool memcg_congested(struct pglist_data *pgdat,
struct mem_cgroup *memcg)
{
return false;
}
#endif
/*
@ -2716,12 +2681,6 @@ static inline bool should_continue_reclaim(struct pglist_data *pgdat,
return inactive_lru_pages > pages_for_compaction;
}
static bool pgdat_memcg_congested(pg_data_t *pgdat, struct mem_cgroup *memcg)
{
return test_bit(PGDAT_CONGESTED, &pgdat->flags) ||
(memcg && memcg_congested(pgdat, memcg));
}
static void shrink_node_memcgs(pg_data_t *pgdat, struct scan_control *sc)
{
struct mem_cgroup *target_memcg = sc->target_mem_cgroup;
@ -2783,10 +2742,12 @@ static void shrink_node_memcgs(pg_data_t *pgdat, struct scan_control *sc)
static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc)
{
struct reclaim_state *reclaim_state = current->reclaim_state;
struct mem_cgroup *target_memcg = sc->target_mem_cgroup;
unsigned long nr_reclaimed, nr_scanned;
struct lruvec *target_lruvec;
bool reclaimable = false;
target_lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, pgdat);
again:
memset(&sc->nr, 0, sizeof(sc->nr));
@ -2801,7 +2762,7 @@ again:
}
/* Record the subtree's reclaim efficiency */
vmpressure(sc->gfp_mask, target_memcg, true,
vmpressure(sc->gfp_mask, sc->target_mem_cgroup, true,
sc->nr_scanned - nr_scanned,
sc->nr_reclaimed - nr_reclaimed);
@ -2829,14 +2790,6 @@ again:
if (sc->nr.writeback && sc->nr.writeback == sc->nr.taken)
set_bit(PGDAT_WRITEBACK, &pgdat->flags);
/*
* Tag a node as congested if all the dirty pages
* scanned were backed by a congested BDI and
* wait_iff_congested will stall.
*/
if (sc->nr.dirty && sc->nr.dirty == sc->nr.congested)
set_bit(PGDAT_CONGESTED, &pgdat->flags);
/* Allow kswapd to start writing pages during reclaim.*/
if (sc->nr.unqueued_dirty == sc->nr.file_taken)
set_bit(PGDAT_DIRTY, &pgdat->flags);
@ -2852,12 +2805,17 @@ again:
}
/*
* Tag a node/memcg as congested if all the dirty pages
* scanned were backed by a congested BDI and
* wait_iff_congested will stall.
*
* Legacy memcg will stall in page writeback so avoid forcibly
* stalling in wait_iff_congested().
*/
if (cgroup_reclaim(sc) && writeback_throttling_sane(sc) &&
if ((current_is_kswapd() ||
(cgroup_reclaim(sc) && writeback_throttling_sane(sc))) &&
sc->nr.dirty && sc->nr.dirty == sc->nr.congested)
set_memcg_congestion(pgdat, target_memcg, true);
set_bit(LRUVEC_CONGESTED, &target_lruvec->flags);
/*
* Stall direct reclaim for IO completions if underlying BDIs
@ -2865,9 +2823,9 @@ again:
* starts encountering unqueued dirty pages or cycling through
* the LRU too quickly.
*/
if (!sc->hibernation_mode && !current_is_kswapd() &&
current_may_throttle() &&
pgdat_memcg_congested(pgdat, target_memcg))
if (!current_is_kswapd() && current_may_throttle() &&
!sc->hibernation_mode &&
test_bit(LRUVEC_CONGESTED, &target_lruvec->flags))
wait_iff_congested(BLK_RW_ASYNC, HZ/10);
if (should_continue_reclaim(pgdat, sc->nr_reclaimed - nr_reclaimed,
@ -3081,8 +3039,16 @@ retry:
if (zone->zone_pgdat == last_pgdat)
continue;
last_pgdat = zone->zone_pgdat;
snapshot_refaults(sc->target_mem_cgroup, zone->zone_pgdat);
set_memcg_congestion(last_pgdat, sc->target_mem_cgroup, false);
if (cgroup_reclaim(sc)) {
struct lruvec *lruvec;
lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup,
zone->zone_pgdat);
clear_bit(LRUVEC_CONGESTED, &lruvec->flags);
}
}
delayacct_freepages_end();
@ -3450,7 +3416,9 @@ static bool pgdat_balanced(pg_data_t *pgdat, int order, int classzone_idx)
/* Clear pgdat state for congested, dirty or under writeback. */
static void clear_pgdat_congested(pg_data_t *pgdat)
{
clear_bit(PGDAT_CONGESTED, &pgdat->flags);
struct lruvec *lruvec = mem_cgroup_lruvec(NULL, pgdat);
clear_bit(LRUVEC_CONGESTED, &lruvec->flags);
clear_bit(PGDAT_DIRTY, &pgdat->flags);
clear_bit(PGDAT_WRITEBACK, &pgdat->flags);
}