memcg: don't uncharge in mem_cgroup_move_account()

Now, all callers pass 'false' for 'bool uncharge' so remove this argument.

Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Ying Han <yinghan@google.com>
Cc: Glauber Costa <glommer@parallels.com>
Reviewed-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
KAMEZAWA Hiroyuki 2012-05-29 15:07:04 -07:00 committed by Linus Torvalds
parent cc926f7842
commit 2f3479b147
1 changed files with 6 additions and 14 deletions

View File

@ -2592,23 +2592,19 @@ void mem_cgroup_split_huge_fixup(struct page *head)
* @pc: page_cgroup of the page. * @pc: page_cgroup of the page.
* @from: mem_cgroup which the page is moved from. * @from: mem_cgroup which the page is moved from.
* @to: mem_cgroup which the page is moved to. @from != @to. * @to: mem_cgroup which the page is moved to. @from != @to.
* @uncharge: whether we should call uncharge and css_put against @from.
* *
* The caller must confirm following. * The caller must confirm following.
* - page is not on LRU (isolate_page() is useful.) * - page is not on LRU (isolate_page() is useful.)
* - compound_lock is held when nr_pages > 1 * - compound_lock is held when nr_pages > 1
* *
* This function doesn't do "charge" nor css_get to new cgroup. It should be * This function doesn't do "charge" to new cgroup and doesn't do "uncharge"
* done by a caller(__mem_cgroup_try_charge would be useful). If @uncharge is * from old cgroup.
* true, this function does "uncharge" from old cgroup, but it doesn't if
* @uncharge is false, so a caller should do "uncharge".
*/ */
static int mem_cgroup_move_account(struct page *page, static int mem_cgroup_move_account(struct page *page,
unsigned int nr_pages, unsigned int nr_pages,
struct page_cgroup *pc, struct page_cgroup *pc,
struct mem_cgroup *from, struct mem_cgroup *from,
struct mem_cgroup *to, struct mem_cgroup *to)
bool uncharge)
{ {
unsigned long flags; unsigned long flags;
int ret; int ret;
@ -2642,9 +2638,6 @@ static int mem_cgroup_move_account(struct page *page,
preempt_enable(); preempt_enable();
} }
mem_cgroup_charge_statistics(from, anon, -nr_pages); mem_cgroup_charge_statistics(from, anon, -nr_pages);
if (uncharge)
/* This is not "cancel", but cancel_charge does all we need. */
__mem_cgroup_cancel_charge(from, nr_pages);
/* caller should have done css_get */ /* caller should have done css_get */
pc->mem_cgroup = to; pc->mem_cgroup = to;
@ -2706,7 +2699,7 @@ static int mem_cgroup_move_parent(struct page *page,
flags = compound_lock_irqsave(page); flags = compound_lock_irqsave(page);
ret = mem_cgroup_move_account(page, nr_pages, ret = mem_cgroup_move_account(page, nr_pages,
pc, child, parent, false); pc, child, parent);
if (!ret) if (!ret)
__mem_cgroup_cancel_local_charge(child, nr_pages); __mem_cgroup_cancel_local_charge(child, nr_pages);
@ -5474,8 +5467,7 @@ static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
if (!isolate_lru_page(page)) { if (!isolate_lru_page(page)) {
pc = lookup_page_cgroup(page); pc = lookup_page_cgroup(page);
if (!mem_cgroup_move_account(page, HPAGE_PMD_NR, if (!mem_cgroup_move_account(page, HPAGE_PMD_NR,
pc, mc.from, mc.to, pc, mc.from, mc.to)) {
false)) {
mc.precharge -= HPAGE_PMD_NR; mc.precharge -= HPAGE_PMD_NR;
mc.moved_charge += HPAGE_PMD_NR; mc.moved_charge += HPAGE_PMD_NR;
} }
@ -5505,7 +5497,7 @@ retry:
goto put; goto put;
pc = lookup_page_cgroup(page); pc = lookup_page_cgroup(page);
if (!mem_cgroup_move_account(page, 1, pc, if (!mem_cgroup_move_account(page, 1, pc,
mc.from, mc.to, false)) { mc.from, mc.to)) {
mc.precharge--; mc.precharge--;
/* we uncharge from mc.from later. */ /* we uncharge from mc.from later. */
mc.moved_charge++; mc.moved_charge++;