mm: zswap: remove shrink from zpool interface

Now that all three zswap backends have removed their shrink code, it is
no longer necessary for the zpool interface to include shrink/writeback
endpoints.

Link: https://lkml.kernel.org/r/20230612093815.133504-6-cerasuolodomenico@gmail.com
Signed-off-by: Domenico Cerasuolo <cerasuolodomenico@gmail.com>
Reviewed-by: Yosry Ahmed <yosryahmed@google.com>
Acked-by: Nhat Pham <nphamcs@gmail.com>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Reviewed-by: Sergey Senozhatsky <senozhatsky@chromium.org>
Cc: Dan Streetman <ddstreet@ieee.org>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Seth Jennings <sjenning@redhat.com>
Cc: Vitaly Wool <vitaly.wool@konsulko.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Domenico Cerasuolo 2023-06-12 11:38:13 +02:00 committed by Andrew Morton
parent b3067742ae
commit 35499e2b79
6 changed files with 14 additions and 93 deletions

View File

@ -14,10 +14,6 @@
struct zpool; struct zpool;
struct zpool_ops {
int (*evict)(struct zpool *pool, unsigned long handle);
};
/* /*
* Control how a handle is mapped. It will be ignored if the * Control how a handle is mapped. It will be ignored if the
* implementation does not support it. Its use is optional. * implementation does not support it. Its use is optional.
@ -39,8 +35,7 @@ enum zpool_mapmode {
bool zpool_has_pool(char *type); bool zpool_has_pool(char *type);
struct zpool *zpool_create_pool(const char *type, const char *name, struct zpool *zpool_create_pool(const char *type, const char *name, gfp_t gfp);
gfp_t gfp, const struct zpool_ops *ops);
const char *zpool_get_type(struct zpool *pool); const char *zpool_get_type(struct zpool *pool);
@ -53,9 +48,6 @@ int zpool_malloc(struct zpool *pool, size_t size, gfp_t gfp,
void zpool_free(struct zpool *pool, unsigned long handle); void zpool_free(struct zpool *pool, unsigned long handle);
int zpool_shrink(struct zpool *pool, unsigned int pages,
unsigned int *reclaimed);
void *zpool_map_handle(struct zpool *pool, unsigned long handle, void *zpool_map_handle(struct zpool *pool, unsigned long handle,
enum zpool_mapmode mm); enum zpool_mapmode mm);
@ -72,7 +64,6 @@ u64 zpool_get_total_size(struct zpool *pool);
* @destroy: destroy a pool. * @destroy: destroy a pool.
* @malloc: allocate mem from a pool. * @malloc: allocate mem from a pool.
* @free: free mem from a pool. * @free: free mem from a pool.
* @shrink: shrink the pool.
* @sleep_mapped: whether zpool driver can sleep during map. * @sleep_mapped: whether zpool driver can sleep during map.
* @map: map a handle. * @map: map a handle.
* @unmap: unmap a handle. * @unmap: unmap a handle.
@ -87,10 +78,7 @@ struct zpool_driver {
atomic_t refcount; atomic_t refcount;
struct list_head list; struct list_head list;
void *(*create)(const char *name, void *(*create)(const char *name, gfp_t gfp);
gfp_t gfp,
const struct zpool_ops *ops,
struct zpool *zpool);
void (*destroy)(void *pool); void (*destroy)(void *pool);
bool malloc_support_movable; bool malloc_support_movable;
@ -98,9 +86,6 @@ struct zpool_driver {
unsigned long *handle); unsigned long *handle);
void (*free)(void *pool, unsigned long handle); void (*free)(void *pool, unsigned long handle);
int (*shrink)(void *pool, unsigned int pages,
unsigned int *reclaimed);
bool sleep_mapped; bool sleep_mapped;
void *(*map)(void *pool, unsigned long handle, void *(*map)(void *pool, unsigned long handle,
enum zpool_mapmode mm); enum zpool_mapmode mm);
@ -113,7 +98,6 @@ void zpool_register_driver(struct zpool_driver *driver);
int zpool_unregister_driver(struct zpool_driver *driver); int zpool_unregister_driver(struct zpool_driver *driver);
bool zpool_evictable(struct zpool *pool);
bool zpool_can_sleep_mapped(struct zpool *pool); bool zpool_can_sleep_mapped(struct zpool *pool);
#endif #endif

View File

@ -1364,9 +1364,7 @@ static const struct movable_operations z3fold_mops = {
* zpool * zpool
****************/ ****************/
static void *z3fold_zpool_create(const char *name, gfp_t gfp, static void *z3fold_zpool_create(const char *name, gfp_t gfp)
const struct zpool_ops *zpool_ops,
struct zpool *zpool)
{ {
return z3fold_create_pool(name, gfp); return z3fold_create_pool(name, gfp);
} }

View File

@ -380,9 +380,7 @@ static u64 zbud_get_pool_size(struct zbud_pool *pool)
* zpool * zpool
****************/ ****************/
static void *zbud_zpool_create(const char *name, gfp_t gfp, static void *zbud_zpool_create(const char *name, gfp_t gfp)
const struct zpool_ops *zpool_ops,
struct zpool *zpool)
{ {
return zbud_create_pool(gfp); return zbud_create_pool(gfp);
} }

View File

@ -133,7 +133,6 @@ EXPORT_SYMBOL(zpool_has_pool);
* @type: The type of the zpool to create (e.g. zbud, zsmalloc) * @type: The type of the zpool to create (e.g. zbud, zsmalloc)
* @name: The name of the zpool (e.g. zram0, zswap) * @name: The name of the zpool (e.g. zram0, zswap)
* @gfp: The GFP flags to use when allocating the pool. * @gfp: The GFP flags to use when allocating the pool.
* @ops: The optional ops callback.
* *
* This creates a new zpool of the specified type. The gfp flags will be * This creates a new zpool of the specified type. The gfp flags will be
* used when allocating memory, if the implementation supports it. If the * used when allocating memory, if the implementation supports it. If the
@ -145,8 +144,7 @@ EXPORT_SYMBOL(zpool_has_pool);
* *
* Returns: New zpool on success, NULL on failure. * Returns: New zpool on success, NULL on failure.
*/ */
struct zpool *zpool_create_pool(const char *type, const char *name, gfp_t gfp, struct zpool *zpool_create_pool(const char *type, const char *name, gfp_t gfp)
const struct zpool_ops *ops)
{ {
struct zpool_driver *driver; struct zpool_driver *driver;
struct zpool *zpool; struct zpool *zpool;
@ -173,7 +171,7 @@ struct zpool *zpool_create_pool(const char *type, const char *name, gfp_t gfp,
} }
zpool->driver = driver; zpool->driver = driver;
zpool->pool = driver->create(name, gfp, ops, zpool); zpool->pool = driver->create(name, gfp);
if (!zpool->pool) { if (!zpool->pool) {
pr_err("couldn't create %s pool\n", type); pr_err("couldn't create %s pool\n", type);
@ -279,30 +277,6 @@ void zpool_free(struct zpool *zpool, unsigned long handle)
zpool->driver->free(zpool->pool, handle); zpool->driver->free(zpool->pool, handle);
} }
/**
* zpool_shrink() - Shrink the pool size
* @zpool: The zpool to shrink.
* @pages: The number of pages to shrink the pool.
* @reclaimed: The number of pages successfully evicted.
*
* This attempts to shrink the actual memory size of the pool
* by evicting currently used handle(s). If the pool was
* created with no zpool_ops, or the evict call fails for any
* of the handles, this will fail. If non-NULL, the @reclaimed
* parameter will be set to the number of pages reclaimed,
* which may be more than the number of pages requested.
*
* Implementations must guarantee this to be thread-safe.
*
* Returns: 0 on success, negative value on error/failure.
*/
int zpool_shrink(struct zpool *zpool, unsigned int pages,
unsigned int *reclaimed)
{
return zpool->driver->shrink ?
zpool->driver->shrink(zpool->pool, pages, reclaimed) : -EINVAL;
}
/** /**
* zpool_map_handle() - Map a previously allocated handle into memory * zpool_map_handle() - Map a previously allocated handle into memory
* @zpool: The zpool that the handle was allocated from * @zpool: The zpool that the handle was allocated from
@ -359,24 +333,6 @@ u64 zpool_get_total_size(struct zpool *zpool)
return zpool->driver->total_size(zpool->pool); return zpool->driver->total_size(zpool->pool);
} }
/**
* zpool_evictable() - Test if zpool is potentially evictable
* @zpool: The zpool to test
*
* Zpool is only potentially evictable when it's created with struct
* zpool_ops.evict and its driver implements struct zpool_driver.shrink.
*
* However, it doesn't necessarily mean driver will use zpool_ops.evict
* in its implementation of zpool_driver.shrink. It could do internal
* defragmentation instead.
*
* Returns: true if potentially evictable; false otherwise.
*/
bool zpool_evictable(struct zpool *zpool)
{
return zpool->driver->shrink;
}
/** /**
* zpool_can_sleep_mapped - Test if zpool can sleep when do mapped. * zpool_can_sleep_mapped - Test if zpool can sleep when do mapped.
* @zpool: The zpool to test * @zpool: The zpool to test

View File

@ -351,9 +351,7 @@ static void record_obj(unsigned long handle, unsigned long obj)
#ifdef CONFIG_ZPOOL #ifdef CONFIG_ZPOOL
static void *zs_zpool_create(const char *name, gfp_t gfp, static void *zs_zpool_create(const char *name, gfp_t gfp)
const struct zpool_ops *zpool_ops,
struct zpool *zpool)
{ {
/* /*
* Ignore global gfp flags: zs_malloc() may be invoked from * Ignore global gfp flags: zs_malloc() may be invoked from

View File

@ -258,10 +258,6 @@ static int zswap_writeback_entry(struct zpool *pool, unsigned long handle);
static int zswap_pool_get(struct zswap_pool *pool); static int zswap_pool_get(struct zswap_pool *pool);
static void zswap_pool_put(struct zswap_pool *pool); static void zswap_pool_put(struct zswap_pool *pool);
static const struct zpool_ops zswap_zpool_ops = {
.evict = zswap_writeback_entry
};
static bool zswap_is_full(void) static bool zswap_is_full(void)
{ {
return totalram_pages() * zswap_max_pool_percent / 100 < return totalram_pages() * zswap_max_pool_percent / 100 <
@ -379,12 +375,9 @@ static void zswap_free_entry(struct zswap_entry *entry)
if (!entry->length) if (!entry->length)
atomic_dec(&zswap_same_filled_pages); atomic_dec(&zswap_same_filled_pages);
else { else {
/* zpool_evictable will be removed once all 3 backends have migrated */ spin_lock(&entry->pool->lru_lock);
if (!zpool_evictable(entry->pool->zpool)) { list_del(&entry->lru);
spin_lock(&entry->pool->lru_lock); spin_unlock(&entry->pool->lru_lock);
list_del(&entry->lru);
spin_unlock(&entry->pool->lru_lock);
}
zpool_free(entry->pool->zpool, entry->handle); zpool_free(entry->pool->zpool, entry->handle);
zswap_pool_put(entry->pool); zswap_pool_put(entry->pool);
} }
@ -665,12 +658,8 @@ static void shrink_worker(struct work_struct *w)
shrink_work); shrink_work);
int ret, failures = 0; int ret, failures = 0;
/* zpool_evictable will be removed once all 3 backends have migrated */
do { do {
if (zpool_evictable(pool->zpool)) ret = zswap_reclaim_entry(pool);
ret = zpool_shrink(pool->zpool, 1, NULL);
else
ret = zswap_reclaim_entry(pool);
if (ret) { if (ret) {
zswap_reject_reclaim_fail++; zswap_reject_reclaim_fail++;
if (ret != -EAGAIN) if (ret != -EAGAIN)
@ -708,7 +697,7 @@ static struct zswap_pool *zswap_pool_create(char *type, char *compressor)
/* unique name for each pool specifically required by zsmalloc */ /* unique name for each pool specifically required by zsmalloc */
snprintf(name, 38, "zswap%x", atomic_inc_return(&zswap_pools_count)); snprintf(name, 38, "zswap%x", atomic_inc_return(&zswap_pools_count));
pool->zpool = zpool_create_pool(type, name, gfp, &zswap_zpool_ops); pool->zpool = zpool_create_pool(type, name, gfp);
if (!pool->zpool) { if (!pool->zpool) {
pr_err("%s zpool not available\n", type); pr_err("%s zpool not available\n", type);
goto error; goto error;
@ -1394,8 +1383,7 @@ insert_entry:
zswap_entry_put(tree, dupentry); zswap_entry_put(tree, dupentry);
} }
} while (ret == -EEXIST); } while (ret == -EEXIST);
/* zpool_evictable will be removed once all 3 backends have migrated */ if (entry->length) {
if (entry->length && !zpool_evictable(entry->pool->zpool)) {
spin_lock(&entry->pool->lru_lock); spin_lock(&entry->pool->lru_lock);
list_add(&entry->lru, &entry->pool->lru); list_add(&entry->lru, &entry->pool->lru);
spin_unlock(&entry->pool->lru_lock); spin_unlock(&entry->pool->lru_lock);
@ -1514,8 +1502,7 @@ freeentry:
if (!ret && zswap_exclusive_loads_enabled) { if (!ret && zswap_exclusive_loads_enabled) {
zswap_invalidate_entry(tree, entry); zswap_invalidate_entry(tree, entry);
*exclusive = true; *exclusive = true;
} else if (entry->length && !zpool_evictable(entry->pool->zpool)) { } else if (entry->length) {
/* zpool_evictable will be removed once all 3 backends have migrated */
spin_lock(&entry->pool->lru_lock); spin_lock(&entry->pool->lru_lock);
list_move(&entry->lru, &entry->pool->lru); list_move(&entry->lru, &entry->pool->lru);
spin_unlock(&entry->pool->lru_lock); spin_unlock(&entry->pool->lru_lock);