mm: memcg/slab: cache page number in memcg_(un)charge_slab()

There are many places in memcg_charge_slab() and memcg_uncharge_slab()
which are calculating the number of pages to charge, css references to
grab etc depending on the order of the slab page.

Let's simplify the code by calculating it once and caching in the local
variable.

Signed-off-by: Roman Gushchin <guro@fb.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Reviewed-by: Shakeel Butt <shakeelb@google.com>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Vladimir Davydov <vdavydov.dev@gmail.com>
Link: http://lkml.kernel.org/r/20200109202659.752357-6-guro@fb.com
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Roman Gushchin 2020-04-01 21:06:53 -07:00 committed by Linus Torvalds
parent 92d0510c35
commit 9c315e4d7d

View file

@ -348,6 +348,7 @@ static __always_inline int memcg_charge_slab(struct page *page,
gfp_t gfp, int order, gfp_t gfp, int order,
struct kmem_cache *s) struct kmem_cache *s)
{ {
unsigned int nr_pages = 1 << order;
struct mem_cgroup *memcg; struct mem_cgroup *memcg;
struct lruvec *lruvec; struct lruvec *lruvec;
int ret; int ret;
@ -360,21 +361,21 @@ static __always_inline int memcg_charge_slab(struct page *page,
if (unlikely(!memcg || mem_cgroup_is_root(memcg))) { if (unlikely(!memcg || mem_cgroup_is_root(memcg))) {
mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s), mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s),
(1 << order)); nr_pages);
percpu_ref_get_many(&s->memcg_params.refcnt, 1 << order); percpu_ref_get_many(&s->memcg_params.refcnt, nr_pages);
return 0; return 0;
} }
ret = memcg_kmem_charge_memcg(memcg, gfp, 1 << order); ret = memcg_kmem_charge_memcg(memcg, gfp, nr_pages);
if (ret) if (ret)
goto out; goto out;
lruvec = mem_cgroup_lruvec(memcg, page_pgdat(page)); lruvec = mem_cgroup_lruvec(memcg, page_pgdat(page));
mod_lruvec_state(lruvec, cache_vmstat_idx(s), 1 << order); mod_lruvec_state(lruvec, cache_vmstat_idx(s), nr_pages);
/* transer try_charge() page references to kmem_cache */ /* transer try_charge() page references to kmem_cache */
percpu_ref_get_many(&s->memcg_params.refcnt, 1 << order); percpu_ref_get_many(&s->memcg_params.refcnt, nr_pages);
css_put_many(&memcg->css, 1 << order); css_put_many(&memcg->css, nr_pages);
out: out:
css_put(&memcg->css); css_put(&memcg->css);
return ret; return ret;
@ -387,6 +388,7 @@ static __always_inline int memcg_charge_slab(struct page *page,
static __always_inline void memcg_uncharge_slab(struct page *page, int order, static __always_inline void memcg_uncharge_slab(struct page *page, int order,
struct kmem_cache *s) struct kmem_cache *s)
{ {
unsigned int nr_pages = 1 << order;
struct mem_cgroup *memcg; struct mem_cgroup *memcg;
struct lruvec *lruvec; struct lruvec *lruvec;
@ -394,15 +396,15 @@ static __always_inline void memcg_uncharge_slab(struct page *page, int order,
memcg = READ_ONCE(s->memcg_params.memcg); memcg = READ_ONCE(s->memcg_params.memcg);
if (likely(!mem_cgroup_is_root(memcg))) { if (likely(!mem_cgroup_is_root(memcg))) {
lruvec = mem_cgroup_lruvec(memcg, page_pgdat(page)); lruvec = mem_cgroup_lruvec(memcg, page_pgdat(page));
mod_lruvec_state(lruvec, cache_vmstat_idx(s), -(1 << order)); mod_lruvec_state(lruvec, cache_vmstat_idx(s), -nr_pages);
memcg_kmem_uncharge_memcg(memcg, order); memcg_kmem_uncharge_memcg(memcg, nr_pages);
} else { } else {
mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s), mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s),
-(1 << order)); -nr_pages);
} }
rcu_read_unlock(); rcu_read_unlock();
percpu_ref_put_many(&s->memcg_params.refcnt, 1 << order); percpu_ref_put_many(&s->memcg_params.refcnt, nr_pages);
} }
extern void slab_init_memcg_params(struct kmem_cache *); extern void slab_init_memcg_params(struct kmem_cache *);