mm: zswap: move more same-filled pages checks outside of zswap_store()

Currently, zswap_store() checks zswap_same_filled_pages_enabled, kmaps the
folio, then calls zswap_is_page_same_filled() to check the folio contents.
Move this logic into zswap_is_page_same_filled() as well (and rename it
to use 'folio' while we are at it).

This makes zswap_store() cleaner, and makes following changes to that
logic contained within the helper.

While we are at it:
- Rename the insert_entry label to store_entry to match xa_store().
- Add comment headers for same-filled functions and the main API
  functions (load, store, invalidate, swapon, swapoff).

No functional change intended.

Link: https://lkml.kernel.org/r/20240413022407.785696-4-yosryahmed@google.com
Signed-off-by: Yosry Ahmed <yosryahmed@google.com>
Reviewed-by: Nhat Pham <nphamcs@gmail.com>
Reviewed-by: Chengming Zhou <chengming.zhou@linux.dev>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Cc: "Maciej S. Szmigiero" <mail@maciej.szmigiero.name>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Yosry Ahmed 2024-04-13 02:24:06 +00:00 committed by Andrew Morton
parent 82e0f8e47b
commit e87b881489

View file

@ -1383,26 +1383,35 @@ static void shrink_worker(struct work_struct *w)
} while (zswap_total_pages() > thr);
}
static int zswap_is_page_same_filled(void *ptr, unsigned long *value)
/*********************************
* same-filled functions
**********************************/
static bool zswap_is_folio_same_filled(struct folio *folio, unsigned long *value)
{
unsigned long *page;
unsigned long val;
unsigned int pos, last_pos = PAGE_SIZE / sizeof(*page) - 1;
bool ret = false;
page = (unsigned long *)ptr;
if (!zswap_same_filled_pages_enabled)
return false;
page = kmap_local_folio(folio, 0);
val = page[0];
if (val != page[last_pos])
return 0;
goto out;
for (pos = 1; pos < last_pos; pos++) {
if (val != page[pos])
return 0;
goto out;
}
*value = val;
return 1;
ret = true;
out:
kunmap_local(page);
return ret;
}
static void zswap_fill_page(void *ptr, unsigned long value)
@ -1413,6 +1422,9 @@ static void zswap_fill_page(void *ptr, unsigned long value)
memset_l(page, value, PAGE_SIZE / sizeof(unsigned long));
}
/*********************************
* main API
**********************************/
bool zswap_store(struct folio *folio)
{
swp_entry_t swp = folio->swap;
@ -1421,6 +1433,7 @@ bool zswap_store(struct folio *folio)
struct zswap_entry *entry, *old;
struct obj_cgroup *objcg = NULL;
struct mem_cgroup *memcg = NULL;
unsigned long value;
VM_WARN_ON_ONCE(!folio_test_locked(folio));
VM_WARN_ON_ONCE(!folio_test_swapcache(folio));
@ -1453,19 +1466,11 @@ bool zswap_store(struct folio *folio)
goto reject;
}
if (zswap_same_filled_pages_enabled) {
unsigned long value;
u8 *src;
src = kmap_local_folio(folio, 0);
if (zswap_is_page_same_filled(src, &value)) {
kunmap_local(src);
entry->length = 0;
entry->value = value;
atomic_inc(&zswap_same_filled_pages);
goto insert_entry;
}
kunmap_local(src);
if (zswap_is_folio_same_filled(folio, &value)) {
entry->length = 0;
entry->value = value;
atomic_inc(&zswap_same_filled_pages);
goto store_entry;
}
if (!zswap_non_same_filled_pages_enabled)
@ -1488,7 +1493,7 @@ bool zswap_store(struct folio *folio)
if (!zswap_compress(folio, entry))
goto put_pool;
insert_entry:
store_entry:
entry->swpentry = swp;
entry->objcg = objcg;