mm/swap_slots.c: simplify alloc_swap_slot_cache()

Patch series "clean up some functions in mm/swap_slots.c".

When I studied the code of mm/swap_slots.c, I found some places can be
improved.

This patch (of 3):

Both "slots" and "slots_ret" are only need to be freed when cache already
allocated.  Make them closer, seems more clear.

No functional change.

Signed-off-by: Zhen Lei <thunder.leizhen@huawei.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Acked-by: Tim Chen <tim.c.chen@linux.intel.com>
Link: http://lkml.kernel.org/r/20200430061143.450-1-thunder.leizhen@huawei.com
Link: http://lkml.kernel.org/r/20200430061143.450-2-thunder.leizhen@huawei.com
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Zhen Lei 2020-08-06 23:20:05 -07:00 committed by Linus Torvalds
parent 0a36f7f85e
commit f90eae2a0f

View file

@ -136,9 +136,16 @@ static int alloc_swap_slot_cache(unsigned int cpu)
mutex_lock(&swap_slots_cache_mutex); mutex_lock(&swap_slots_cache_mutex);
cache = &per_cpu(swp_slots, cpu); cache = &per_cpu(swp_slots, cpu);
if (cache->slots || cache->slots_ret) if (cache->slots || cache->slots_ret) {
/* cache already allocated */ /* cache already allocated */
goto out; mutex_unlock(&swap_slots_cache_mutex);
kvfree(slots);
kvfree(slots_ret);
return 0;
}
if (!cache->lock_initialized) { if (!cache->lock_initialized) {
mutex_init(&cache->alloc_lock); mutex_init(&cache->alloc_lock);
spin_lock_init(&cache->free_lock); spin_lock_init(&cache->free_lock);
@ -155,15 +162,8 @@ static int alloc_swap_slot_cache(unsigned int cpu)
*/ */
mb(); mb();
cache->slots = slots; cache->slots = slots;
slots = NULL;
cache->slots_ret = slots_ret; cache->slots_ret = slots_ret;
slots_ret = NULL;
out:
mutex_unlock(&swap_slots_cache_mutex); mutex_unlock(&swap_slots_cache_mutex);
if (slots)
kvfree(slots);
if (slots_ret)
kvfree(slots_ret);
return 0; return 0;
} }