mm: zswap: kill zswap_get_swap_cache_page()

The __read_swap_cache_async() interface isn't more difficult to understand
than what the helper abstracts.  Save the indirection and a level of
indentation for the primary work of the writeback func.

Link: https://lkml.kernel.org/r/20230727162343.1415598-4-hannes@cmpxchg.org
Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Reviewed-by: Yosry Ahmed <yosryahmed@google.com>
Cc: Vitaly Wool <vitaly.wool@konsulko.com>
Cc: Barry Song <song.bao.hua@hisilicon.com>
Cc: Seth Jennings <sjenning@redhat.com>
Cc: Domenico Cerasuolo <cerasuolodomenico@gmail.com>
Cc: Nhat Pham <nphamcs@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Johannes Weiner 2023-07-27 12:22:25 -04:00 committed by Andrew Morton
parent 7310895779
commit 98804a944a

View file

@ -1039,43 +1039,6 @@ static int zswap_enabled_param_set(const char *val,
/*********************************
* writeback code
**********************************/
/* return enum for zswap_get_swap_cache_page */
enum zswap_get_swap_ret {
ZSWAP_SWAPCACHE_NEW,
ZSWAP_SWAPCACHE_EXIST,
ZSWAP_SWAPCACHE_FAIL,
};
/*
* zswap_get_swap_cache_page
*
* This is an adaption of read_swap_cache_async()
*
* This function tries to find a page with the given swap entry
* in the swapper_space address space (the swap cache). If the page
* is found, it is returned in retpage. Otherwise, a page is allocated,
* added to the swap cache, and returned in retpage.
*
* If success, the swap cache page is returned in retpage
* Returns ZSWAP_SWAPCACHE_EXIST if page was already in the swap cache
* Returns ZSWAP_SWAPCACHE_NEW if the new page needs to be populated,
* the new page is added to swapcache and locked
* Returns ZSWAP_SWAPCACHE_FAIL on error
*/
static int zswap_get_swap_cache_page(swp_entry_t entry,
struct page **retpage)
{
bool page_was_allocated;
*retpage = __read_swap_cache_async(entry, GFP_KERNEL,
NULL, 0, &page_was_allocated);
if (page_was_allocated)
return ZSWAP_SWAPCACHE_NEW;
if (!*retpage)
return ZSWAP_SWAPCACHE_FAIL;
return ZSWAP_SWAPCACHE_EXIST;
}
/*
* Attempts to free an entry by adding a page to the swap cache,
* decompressing the entry data into the page, and issuing a
@ -1096,7 +1059,7 @@ static int zswap_writeback_entry(struct zswap_entry *entry,
struct scatterlist input, output;
struct crypto_acomp_ctx *acomp_ctx;
struct zpool *pool = zswap_find_zpool(entry);
bool page_was_allocated;
u8 *src, *tmp = NULL;
unsigned int dlen;
int ret;
@ -1111,66 +1074,67 @@ static int zswap_writeback_entry(struct zswap_entry *entry,
}
/* try to allocate swap cache page */
switch (zswap_get_swap_cache_page(swpentry, &page)) {
case ZSWAP_SWAPCACHE_FAIL: /* no memory or invalidate happened */
page = __read_swap_cache_async(swpentry, GFP_KERNEL, NULL, 0,
&page_was_allocated);
if (!page) {
ret = -ENOMEM;
goto fail;
}
case ZSWAP_SWAPCACHE_EXIST:
/* page is already in the swap cache, ignore for now */
/* Found an existing page, we raced with load/swapin */
if (!page_was_allocated) {
put_page(page);
ret = -EEXIST;
goto fail;
case ZSWAP_SWAPCACHE_NEW: /* page is locked */
/*
* Having a local reference to the zswap entry doesn't exclude
* swapping from invalidating and recycling the swap slot. Once
* the swapcache is secured against concurrent swapping to and
* from the slot, recheck that the entry is still current before
* writing.
*/
spin_lock(&tree->lock);
if (zswap_rb_search(&tree->rbroot, swp_offset(entry->swpentry)) != entry) {
spin_unlock(&tree->lock);
delete_from_swap_cache(page_folio(page));
ret = -ENOMEM;
goto fail;
}
spin_unlock(&tree->lock);
/* decompress */
acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx);
dlen = PAGE_SIZE;
src = zpool_map_handle(pool, entry->handle, ZPOOL_MM_RO);
if (!zpool_can_sleep_mapped(pool)) {
memcpy(tmp, src, entry->length);
src = tmp;
zpool_unmap_handle(pool, entry->handle);
}
mutex_lock(acomp_ctx->mutex);
sg_init_one(&input, src, entry->length);
sg_init_table(&output, 1);
sg_set_page(&output, page, PAGE_SIZE, 0);
acomp_request_set_params(acomp_ctx->req, &input, &output, entry->length, dlen);
ret = crypto_wait_req(crypto_acomp_decompress(acomp_ctx->req), &acomp_ctx->wait);
dlen = acomp_ctx->req->dlen;
mutex_unlock(acomp_ctx->mutex);
if (!zpool_can_sleep_mapped(pool))
kfree(tmp);
else
zpool_unmap_handle(pool, entry->handle);
BUG_ON(ret);
BUG_ON(dlen != PAGE_SIZE);
/* page is up to date */
SetPageUptodate(page);
}
/*
* Page is locked, and the swapcache is now secured against
* concurrent swapping to and from the slot. Verify that the
* swap entry hasn't been invalidated and recycled behind our
* backs (our zswap_entry reference doesn't prevent that), to
* avoid overwriting a new swap page with old compressed data.
*/
spin_lock(&tree->lock);
if (zswap_rb_search(&tree->rbroot, swp_offset(entry->swpentry)) != entry) {
spin_unlock(&tree->lock);
delete_from_swap_cache(page_folio(page));
ret = -ENOMEM;
goto fail;
}
spin_unlock(&tree->lock);
/* decompress */
acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx);
dlen = PAGE_SIZE;
src = zpool_map_handle(pool, entry->handle, ZPOOL_MM_RO);
if (!zpool_can_sleep_mapped(pool)) {
memcpy(tmp, src, entry->length);
src = tmp;
zpool_unmap_handle(pool, entry->handle);
}
mutex_lock(acomp_ctx->mutex);
sg_init_one(&input, src, entry->length);
sg_init_table(&output, 1);
sg_set_page(&output, page, PAGE_SIZE, 0);
acomp_request_set_params(acomp_ctx->req, &input, &output, entry->length, dlen);
ret = crypto_wait_req(crypto_acomp_decompress(acomp_ctx->req), &acomp_ctx->wait);
dlen = acomp_ctx->req->dlen;
mutex_unlock(acomp_ctx->mutex);
if (!zpool_can_sleep_mapped(pool))
kfree(tmp);
else
zpool_unmap_handle(pool, entry->handle);
BUG_ON(ret);
BUG_ON(dlen != PAGE_SIZE);
/* page is up to date */
SetPageUptodate(page);
/* move it to the tail of the inactive list after end_writeback */
SetPageReclaim(page);
@ -1180,16 +1144,16 @@ static int zswap_writeback_entry(struct zswap_entry *entry,
zswap_written_back_pages++;
return ret;
fail:
if (!zpool_can_sleep_mapped(pool))
kfree(tmp);
/*
* if we get here due to ZSWAP_SWAPCACHE_EXIST
* a load may be happening concurrently.
* it is safe and okay to not free the entry.
* it is also okay to return !0
*/
* If we get here because the page is already in swapcache, a
* load may be happening concurrently. It is safe and okay to
* not free the entry. It is also okay to return !0.
*/
return ret;
}