mm/secretmem: use a folio in secretmem_fault()

Saves four implicit call to compound_head().

Link: https://lkml.kernel.org/r/20230812062612.3184990-1-zhangpeng362@huawei.com
Signed-off-by: ZhangPeng <zhangpeng362@huawei.com>
Reviewed-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: David Hildenbrand <david@redhat.com>
Cc: Kefeng Wang <wangkefeng.wang@huawei.com>
Cc: Nanyong Sun <sunnanyong@huawei.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
ZhangPeng 2023-08-12 14:26:12 +08:00 committed by Andrew Morton
parent 1b6754fea4
commit 7e2fca52ef
1 changed files with 8 additions and 6 deletions

View File

@ -55,6 +55,7 @@ static vm_fault_t secretmem_fault(struct vm_fault *vmf)
gfp_t gfp = vmf->gfp_mask; gfp_t gfp = vmf->gfp_mask;
unsigned long addr; unsigned long addr;
struct page *page; struct page *page;
struct folio *folio;
vm_fault_t ret; vm_fault_t ret;
int err; int err;
@ -66,23 +67,24 @@ static vm_fault_t secretmem_fault(struct vm_fault *vmf)
retry: retry:
page = find_lock_page(mapping, offset); page = find_lock_page(mapping, offset);
if (!page) { if (!page) {
page = alloc_page(gfp | __GFP_ZERO); folio = folio_alloc(gfp | __GFP_ZERO, 0);
if (!page) { if (!folio) {
ret = VM_FAULT_OOM; ret = VM_FAULT_OOM;
goto out; goto out;
} }
page = &folio->page;
err = set_direct_map_invalid_noflush(page); err = set_direct_map_invalid_noflush(page);
if (err) { if (err) {
put_page(page); folio_put(folio);
ret = vmf_error(err); ret = vmf_error(err);
goto out; goto out;
} }
__SetPageUptodate(page); __folio_mark_uptodate(folio);
err = add_to_page_cache_lru(page, mapping, offset, gfp); err = filemap_add_folio(mapping, folio, offset, gfp);
if (unlikely(err)) { if (unlikely(err)) {
put_page(page); folio_put(folio);
/* /*
* If a split of large page was required, it * If a split of large page was required, it
* already happened when we marked the page invalid * already happened when we marked the page invalid