zswap: only save zswap header when necessary

We waste sizeof(swp_entry_t) for zswap header when using zsmalloc as
zpool driver because zsmalloc doesn't support eviction.

Add zpool_evictable() to detect if zpool is potentially evictable, and
use it in zswap to avoid waste memory for zswap header.

[yuzhao@google.com: The zpool->" prefix is a result of copy & paste]
  Link: http://lkml.kernel.org/r/20180110225626.110330-1-yuzhao@google.com
Link: http://lkml.kernel.org/r/20180110224741.83751-1-yuzhao@google.com
Signed-off-by: Yu Zhao <yuzhao@google.com>
Acked-by: Dan Streetman <ddstreet@ieee.org>
Reviewed-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
Cc: Seth Jennings <sjenning@redhat.com>
Cc: Minchan Kim <minchan@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Yu Zhao 2018-01-31 16:19:59 -08:00 committed by Linus Torvalds
parent a7ab400d6f
commit 9c3760eb80
4 changed files with 35 additions and 19 deletions

View File

@ -108,4 +108,6 @@ void zpool_register_driver(struct zpool_driver *driver);
int zpool_unregister_driver(struct zpool_driver *driver); int zpool_unregister_driver(struct zpool_driver *driver);
bool zpool_evictable(struct zpool *pool);
#endif #endif

View File

@ -21,6 +21,7 @@ struct zpool {
struct zpool_driver *driver; struct zpool_driver *driver;
void *pool; void *pool;
const struct zpool_ops *ops; const struct zpool_ops *ops;
bool evictable;
struct list_head list; struct list_head list;
}; };
@ -142,7 +143,7 @@ EXPORT_SYMBOL(zpool_has_pool);
* *
* This creates a new zpool of the specified type. The gfp flags will be * This creates a new zpool of the specified type. The gfp flags will be
* used when allocating memory, if the implementation supports it. If the * used when allocating memory, if the implementation supports it. If the
* ops param is NULL, then the created zpool will not be shrinkable. * ops param is NULL, then the created zpool will not be evictable.
* *
* Implementations must guarantee this to be thread-safe. * Implementations must guarantee this to be thread-safe.
* *
@ -180,6 +181,7 @@ struct zpool *zpool_create_pool(const char *type, const char *name, gfp_t gfp,
zpool->driver = driver; zpool->driver = driver;
zpool->pool = driver->create(name, gfp, ops, zpool); zpool->pool = driver->create(name, gfp, ops, zpool);
zpool->ops = ops; zpool->ops = ops;
zpool->evictable = driver->shrink && ops && ops->evict;
if (!zpool->pool) { if (!zpool->pool) {
pr_err("couldn't create %s pool\n", type); pr_err("couldn't create %s pool\n", type);
@ -296,7 +298,8 @@ void zpool_free(struct zpool *zpool, unsigned long handle)
int zpool_shrink(struct zpool *zpool, unsigned int pages, int zpool_shrink(struct zpool *zpool, unsigned int pages,
unsigned int *reclaimed) unsigned int *reclaimed)
{ {
return zpool->driver->shrink(zpool->pool, pages, reclaimed); return zpool->driver->shrink ?
zpool->driver->shrink(zpool->pool, pages, reclaimed) : -EINVAL;
} }
/** /**
@ -355,6 +358,24 @@ u64 zpool_get_total_size(struct zpool *zpool)
return zpool->driver->total_size(zpool->pool); return zpool->driver->total_size(zpool->pool);
} }
/**
* zpool_evictable() - Test if zpool is potentially evictable
* @pool The zpool to test
*
* Zpool is only potentially evictable when it's created with struct
* zpool_ops.evict and its driver implements struct zpool_driver.shrink.
*
* However, it doesn't necessarily mean driver will use zpool_ops.evict
* in its implementation of zpool_driver.shrink. It could do internal
* defragmentation instead.
*
* Returns: true if potentially evictable; false otherwise.
*/
bool zpool_evictable(struct zpool *zpool)
{
return zpool->evictable;
}
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
MODULE_AUTHOR("Dan Streetman <ddstreet@ieee.org>"); MODULE_AUTHOR("Dan Streetman <ddstreet@ieee.org>");
MODULE_DESCRIPTION("Common API for compressed memory storage"); MODULE_DESCRIPTION("Common API for compressed memory storage");

View File

@ -404,12 +404,6 @@ static void zs_zpool_free(void *pool, unsigned long handle)
zs_free(pool, handle); zs_free(pool, handle);
} }
static int zs_zpool_shrink(void *pool, unsigned int pages,
unsigned int *reclaimed)
{
return -EINVAL;
}
static void *zs_zpool_map(void *pool, unsigned long handle, static void *zs_zpool_map(void *pool, unsigned long handle,
enum zpool_mapmode mm) enum zpool_mapmode mm)
{ {
@ -447,7 +441,6 @@ static struct zpool_driver zs_zpool_driver = {
.destroy = zs_zpool_destroy, .destroy = zs_zpool_destroy,
.malloc = zs_zpool_malloc, .malloc = zs_zpool_malloc,
.free = zs_zpool_free, .free = zs_zpool_free,
.shrink = zs_zpool_shrink,
.map = zs_zpool_map, .map = zs_zpool_map,
.unmap = zs_zpool_unmap, .unmap = zs_zpool_unmap,
.total_size = zs_zpool_total_size, .total_size = zs_zpool_total_size,

View File

@ -1001,11 +1001,11 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset,
struct zswap_entry *entry, *dupentry; struct zswap_entry *entry, *dupentry;
struct crypto_comp *tfm; struct crypto_comp *tfm;
int ret; int ret;
unsigned int dlen = PAGE_SIZE, len; unsigned int hlen, dlen = PAGE_SIZE;
unsigned long handle, value; unsigned long handle, value;
char *buf; char *buf;
u8 *src, *dst; u8 *src, *dst;
struct zswap_header *zhdr; struct zswap_header zhdr = { .swpentry = swp_entry(type, offset) };
if (!zswap_enabled || !tree) { if (!zswap_enabled || !tree) {
ret = -ENODEV; ret = -ENODEV;
@ -1063,8 +1063,8 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset,
} }
/* store */ /* store */
len = dlen + sizeof(struct zswap_header); hlen = zpool_evictable(entry->pool->zpool) ? sizeof(zhdr) : 0;
ret = zpool_malloc(entry->pool->zpool, len, ret = zpool_malloc(entry->pool->zpool, hlen + dlen,
__GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM, __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM,
&handle); &handle);
if (ret == -ENOSPC) { if (ret == -ENOSPC) {
@ -1075,10 +1075,9 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset,
zswap_reject_alloc_fail++; zswap_reject_alloc_fail++;
goto put_dstmem; goto put_dstmem;
} }
zhdr = zpool_map_handle(entry->pool->zpool, handle, ZPOOL_MM_RW); buf = zpool_map_handle(entry->pool->zpool, handle, ZPOOL_MM_RW);
zhdr->swpentry = swp_entry(type, offset); memcpy(buf, &zhdr, hlen);
buf = (u8 *)(zhdr + 1); memcpy(buf + hlen, dst, dlen);
memcpy(buf, dst, dlen);
zpool_unmap_handle(entry->pool->zpool, handle); zpool_unmap_handle(entry->pool->zpool, handle);
put_cpu_var(zswap_dstmem); put_cpu_var(zswap_dstmem);
@ -1149,8 +1148,9 @@ static int zswap_frontswap_load(unsigned type, pgoff_t offset,
/* decompress */ /* decompress */
dlen = PAGE_SIZE; dlen = PAGE_SIZE;
src = (u8 *)zpool_map_handle(entry->pool->zpool, entry->handle, src = zpool_map_handle(entry->pool->zpool, entry->handle, ZPOOL_MM_RO);
ZPOOL_MM_RO) + sizeof(struct zswap_header); if (zpool_evictable(entry->pool->zpool))
src += sizeof(struct zswap_header);
dst = kmap_atomic(page); dst = kmap_atomic(page);
tfm = *get_cpu_ptr(entry->pool->tfm); tfm = *get_cpu_ptr(entry->pool->tfm);
ret = crypto_comp_decompress(tfm, src, entry->length, dst, &dlen); ret = crypto_comp_decompress(tfm, src, entry->length, dst, &dlen);