mm: memcontrol: simplify lruvec_holds_page_lru_lock

We already have a helper lruvec_memcg() to get the memcg from lruvec, we
do not need to do it ourselves in the lruvec_holds_page_lru_lock().  So
use lruvec_memcg() instead.  And if mem_cgroup_disabled() returns false,
the page_memcg(page) (the LRU pages) cannot be NULL.  So remove the odd
logic of "memcg = page_memcg(page) ?  : root_mem_cgroup".  And use
lruvec_pgdat to simplify the code.  We can have a single definition for
this function that works for !CONFIG_MEMCG, CONFIG_MEMCG +
mem_cgroup_disabled() and CONFIG_MEMCG.

Link: https://lkml.kernel.org/r/20210417043538.9793-5-songmuchun@bytedance.com
Signed-off-by: Muchun Song <songmuchun@bytedance.com>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Reviewed-by: Shakeel Butt <shakeelb@google.com>
Acked-by: Roman Gushchin <guro@fb.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Cc: Vladimir Davydov <vdavydov.dev@gmail.com>
Cc: Xiongchun Duan <duanxiongchun@bytedance.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Muchun Song 2021-06-28 19:37:56 -07:00 committed by Linus Torvalds
parent a984226f45
commit f2e4d28dd9
1 changed files with 7 additions and 24 deletions

View File

@ -755,22 +755,6 @@ static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page)
return mem_cgroup_lruvec(memcg, pgdat);
}
static inline bool lruvec_holds_page_lru_lock(struct page *page,
struct lruvec *lruvec)
{
pg_data_t *pgdat = page_pgdat(page);
const struct mem_cgroup *memcg;
struct mem_cgroup_per_node *mz;
if (mem_cgroup_disabled())
return lruvec == &pgdat->__lruvec;
mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
memcg = page_memcg(page) ? : root_mem_cgroup;
return lruvec->pgdat == pgdat && mz->memcg == memcg;
}
struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm);
@ -1227,14 +1211,6 @@ static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page)
return &pgdat->__lruvec;
}
static inline bool lruvec_holds_page_lru_lock(struct page *page,
struct lruvec *lruvec)
{
pg_data_t *pgdat = page_pgdat(page);
return lruvec == &pgdat->__lruvec;
}
static inline void lruvec_memcg_debug(struct lruvec *lruvec, struct page *page)
{
}
@ -1516,6 +1492,13 @@ static inline void unlock_page_lruvec_irqrestore(struct lruvec *lruvec,
spin_unlock_irqrestore(&lruvec->lru_lock, flags);
}
static inline bool lruvec_holds_page_lru_lock(struct page *page,
struct lruvec *lruvec)
{
return lruvec_pgdat(lruvec) == page_pgdat(page) &&
lruvec_memcg(lruvec) == page_memcg(page);
}
/* Don't lock again iff page's lruvec locked */
static inline struct lruvec *relock_page_lruvec_irq(struct page *page,
struct lruvec *locked_lruvec)