From f90eae2a0f8ec43081c2d590004da9dd55a64791 Mon Sep 17 00:00:00 2001 From: Zhen Lei Date: Thu, 6 Aug 2020 23:20:05 -0700 Subject: [PATCH] mm/swap_slots.c: simplify alloc_swap_slot_cache() Patch series "clean up some functions in mm/swap_slots.c". When I studied the code of mm/swap_slots.c, I found some places can be improved. This patch (of 3): Both "slots" and "slots_ret" are only need to be freed when cache already allocated. Make them closer, seems more clear. No functional change. Signed-off-by: Zhen Lei Signed-off-by: Andrew Morton Acked-by: Tim Chen Link: http://lkml.kernel.org/r/20200430061143.450-1-thunder.leizhen@huawei.com Link: http://lkml.kernel.org/r/20200430061143.450-2-thunder.leizhen@huawei.com Signed-off-by: Linus Torvalds --- mm/swap_slots.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/mm/swap_slots.c b/mm/swap_slots.c index 0975adc72253..01609b5f0c55 100644 --- a/mm/swap_slots.c +++ b/mm/swap_slots.c @@ -136,9 +136,16 @@ static int alloc_swap_slot_cache(unsigned int cpu) mutex_lock(&swap_slots_cache_mutex); cache = &per_cpu(swp_slots, cpu); - if (cache->slots || cache->slots_ret) + if (cache->slots || cache->slots_ret) { /* cache already allocated */ - goto out; + mutex_unlock(&swap_slots_cache_mutex); + + kvfree(slots); + kvfree(slots_ret); + + return 0; + } + if (!cache->lock_initialized) { mutex_init(&cache->alloc_lock); spin_lock_init(&cache->free_lock); @@ -155,15 +162,8 @@ static int alloc_swap_slot_cache(unsigned int cpu) */ mb(); cache->slots = slots; - slots = NULL; cache->slots_ret = slots_ret; - slots_ret = NULL; -out: mutex_unlock(&swap_slots_cache_mutex); - if (slots) - kvfree(slots); - if (slots_ret) - kvfree(slots_ret); return 0; }