mm/zswap: change zswap_pool kref to percpu_ref

All zswap entries will take a reference of zswap_pool when zswap_store(),
and drop it when free.  Change it to use the percpu_ref is better for
scalability performance.

Although percpu_ref use a bit more memory which should be ok for our use
case, since we almost have only one zswap_pool to be using.  The
performance gain is for zswap_store/load hotpath.

Testing kernel build (32 threads) in tmpfs with memory.max=2GB.  (zswap
shrinker and writeback enabled with one 50GB swapfile, on a 128 CPUs
x86-64 machine, below is the average of 5 runs)

        mm-unstable  zswap-global-lru
real    63.20        63.12
user    1061.75      1062.95
sys     268.74       264.44

[chengming.zhou@linux.dev: fix zswap_pools_lock usages after changing to percpu_ref]
  Link: https://lkml.kernel.org/r/20240228154954.3028626-1-chengming.zhou@linux.dev
Link: https://lkml.kernel.org/r/20240210-zswap-global-lru-v3-2-200495333595@bytedance.com
Signed-off-by: Chengming Zhou <zhouchengming@bytedance.com>
Reviewed-by: Nhat Pham <nphamcs@gmail.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Yosry Ahmed <yosryahmed@google.com>
Cc: Chengming Zhou <chengming.zhou@linux.dev>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Chengming Zhou 2024-02-16 08:55:05 +00:00 committed by Andrew Morton
parent bf9b7df23c
commit 94ace3fec8
1 changed files with 33 additions and 15 deletions

View File

@ -173,7 +173,7 @@ struct crypto_acomp_ctx {
struct zswap_pool {
struct zpool *zpools[ZSWAP_NR_ZPOOLS];
struct crypto_acomp_ctx __percpu *acomp_ctx;
struct kref kref;
struct percpu_ref ref;
struct list_head list;
struct work_struct release_work;
struct hlist_node node;
@ -305,6 +305,7 @@ static void zswap_update_total_size(void)
/*********************************
* pool functions
**********************************/
static void __zswap_pool_empty(struct percpu_ref *ref);
static struct zswap_pool *zswap_pool_create(char *type, char *compressor)
{
@ -358,13 +359,18 @@ static struct zswap_pool *zswap_pool_create(char *type, char *compressor)
/* being the current pool takes 1 ref; this func expects the
* caller to always add the new pool as the current pool
*/
kref_init(&pool->kref);
ret = percpu_ref_init(&pool->ref, __zswap_pool_empty,
PERCPU_REF_ALLOW_REINIT, GFP_KERNEL);
if (ret)
goto ref_fail;
INIT_LIST_HEAD(&pool->list);
zswap_pool_debug("created", pool);
return pool;
ref_fail:
cpuhp_state_remove_instance(CPUHP_MM_ZSWP_POOL_PREPARE, &pool->node);
error:
if (pool->acomp_ctx)
free_percpu(pool->acomp_ctx);
@ -437,8 +443,9 @@ static void __zswap_pool_release(struct work_struct *work)
synchronize_rcu();
/* nobody should have been able to get a kref... */
WARN_ON(kref_get_unless_zero(&pool->kref));
/* nobody should have been able to get a ref... */
WARN_ON(!percpu_ref_is_zero(&pool->ref));
percpu_ref_exit(&pool->ref);
/* pool is now off zswap_pools list and has no references. */
zswap_pool_destroy(pool);
@ -446,13 +453,13 @@ static void __zswap_pool_release(struct work_struct *work)
static struct zswap_pool *zswap_pool_current(void);
static void __zswap_pool_empty(struct kref *kref)
static void __zswap_pool_empty(struct percpu_ref *ref)
{
struct zswap_pool *pool;
pool = container_of(kref, typeof(*pool), kref);
pool = container_of(ref, typeof(*pool), ref);
spin_lock(&zswap_pools_lock);
spin_lock_bh(&zswap_pools_lock);
WARN_ON(pool == zswap_pool_current());
@ -461,7 +468,7 @@ static void __zswap_pool_empty(struct kref *kref)
INIT_WORK(&pool->release_work, __zswap_pool_release);
schedule_work(&pool->release_work);
spin_unlock(&zswap_pools_lock);
spin_unlock_bh(&zswap_pools_lock);
}
static int __must_check zswap_pool_get(struct zswap_pool *pool)
@ -469,12 +476,12 @@ static int __must_check zswap_pool_get(struct zswap_pool *pool)
if (!pool)
return 0;
return kref_get_unless_zero(&pool->kref);
return percpu_ref_tryget(&pool->ref);
}
static void zswap_pool_put(struct zswap_pool *pool)
{
kref_put(&pool->kref, __zswap_pool_empty);
percpu_ref_put(&pool->ref);
}
static struct zswap_pool *__zswap_pool_current(void)
@ -591,7 +598,7 @@ static int __zswap_param_set(const char *val, const struct kernel_param *kp,
return -EINVAL;
}
spin_lock(&zswap_pools_lock);
spin_lock_bh(&zswap_pools_lock);
pool = zswap_pool_find_get(type, compressor);
if (pool) {
@ -600,17 +607,28 @@ static int __zswap_param_set(const char *val, const struct kernel_param *kp,
list_del_rcu(&pool->list);
}
spin_unlock(&zswap_pools_lock);
spin_unlock_bh(&zswap_pools_lock);
if (!pool)
pool = zswap_pool_create(type, compressor);
else {
/*
* Restore the initial ref dropped by percpu_ref_kill()
* when the pool was decommissioned and switch it again
* to percpu mode.
*/
percpu_ref_resurrect(&pool->ref);
/* Drop the ref from zswap_pool_find_get(). */
zswap_pool_put(pool);
}
if (pool)
ret = param_set_charp(s, kp);
else
ret = -EINVAL;
spin_lock(&zswap_pools_lock);
spin_lock_bh(&zswap_pools_lock);
if (!ret) {
put_pool = zswap_pool_current();
@ -625,7 +643,7 @@ static int __zswap_param_set(const char *val, const struct kernel_param *kp,
put_pool = pool;
}
spin_unlock(&zswap_pools_lock);
spin_unlock_bh(&zswap_pools_lock);
if (!zswap_has_pool && !pool) {
/* if initial pool creation failed, and this pool creation also
@ -642,7 +660,7 @@ static int __zswap_param_set(const char *val, const struct kernel_param *kp,
* or the new pool we failed to add
*/
if (put_pool)
zswap_pool_put(put_pool);
percpu_ref_kill(&put_pool->ref);
return ret;
}