linux-stable/arch/arm64/mm/mteswap.c
David Hildenbrand cfeed8ffe5 mm/swap: stop using page->private on tail pages for THP_SWAP
Patch series "mm/swap: stop using page->private on tail pages for THP_SWAP
+ cleanups".

This series stops using page->private on tail pages for THP_SWAP, replaces
folio->private by folio->swap for swapcache folios, and starts using
"new_folio" for tail pages that we are splitting to remove the usage of
page->private for swapcache handling completely.


This patch (of 4):

Let's stop using page->private on tail pages, making it possible to just
unconditionally reuse that field in the tail pages of large folios.

The remaining usage of the private field for THP_SWAP is in the THP
splitting code (mm/huge_memory.c), that we'll handle separately later.

Update the THP_SWAP documentation and sanity checks in mm_types.h and
__split_huge_page_tail().

[david@redhat.com: stop using page->private on tail pages for THP_SWAP]
  Link: https://lkml.kernel.org/r/6f0a82a3-6948-20d9-580b-be1dbf415701@redhat.com
Link: https://lkml.kernel.org/r/20230821160849.531668-1-david@redhat.com
Link: https://lkml.kernel.org/r/20230821160849.531668-2-david@redhat.com
Signed-off-by: David Hildenbrand <david@redhat.com>
Acked-by: Catalin Marinas <catalin.marinas@arm.com>	[arm64]
Reviewed-by: Yosry Ahmed <yosryahmed@google.com>
Cc: Dan Streetman <ddstreet@ieee.org>
Cc: Hugh Dickins <hughd@google.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Peter Xu <peterx@redhat.com>
Cc: Seth Jennings <sjenning@redhat.com>
Cc: Vitaly Wool <vitaly.wool@konsulko.com>
Cc: Will Deacon <will@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
2023-08-24 16:20:28 -07:00

85 lines
1.8 KiB
C

// SPDX-License-Identifier: GPL-2.0-only
#include <linux/pagemap.h>
#include <linux/xarray.h>
#include <linux/slab.h>
#include <linux/swap.h>
#include <linux/swapops.h>
#include <asm/mte.h>
static DEFINE_XARRAY(mte_pages);
void *mte_allocate_tag_storage(void)
{
/* tags granule is 16 bytes, 2 tags stored per byte */
return kmalloc(MTE_PAGE_TAG_STORAGE, GFP_KERNEL);
}
void mte_free_tag_storage(char *storage)
{
kfree(storage);
}
int mte_save_tags(struct page *page)
{
void *tag_storage, *ret;
if (!page_mte_tagged(page))
return 0;
tag_storage = mte_allocate_tag_storage();
if (!tag_storage)
return -ENOMEM;
mte_save_page_tags(page_address(page), tag_storage);
/* lookup the swap entry.val from the page */
ret = xa_store(&mte_pages, page_swap_entry(page).val, tag_storage,
GFP_KERNEL);
if (WARN(xa_is_err(ret), "Failed to store MTE tags")) {
mte_free_tag_storage(tag_storage);
return xa_err(ret);
} else if (ret) {
/* Entry is being replaced, free the old entry */
mte_free_tag_storage(ret);
}
return 0;
}
void mte_restore_tags(swp_entry_t entry, struct page *page)
{
void *tags = xa_load(&mte_pages, entry.val);
if (!tags)
return;
if (try_page_mte_tagging(page)) {
mte_restore_page_tags(page_address(page), tags);
set_page_mte_tagged(page);
}
}
void mte_invalidate_tags(int type, pgoff_t offset)
{
swp_entry_t entry = swp_entry(type, offset);
void *tags = xa_erase(&mte_pages, entry.val);
mte_free_tag_storage(tags);
}
void mte_invalidate_tags_area(int type)
{
swp_entry_t entry = swp_entry(type, 0);
swp_entry_t last_entry = swp_entry(type + 1, 0);
void *tags;
XA_STATE(xa_state, &mte_pages, entry.val);
xa_lock(&mte_pages);
xas_for_each(&xa_state, tags, last_entry.val - 1) {
__xa_erase(&mte_pages, xa_state.xa_index);
mte_free_tag_storage(tags);
}
xa_unlock(&mte_pages);
}