mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-09-27 21:03:32 +00:00
6564b014af
commit65291dcfcf
upstream. folio_is_secretmem() currently relies on secretmem folios being LRU folios, to save some cycles. However, folios might reside in a folio batch without the LRU flag set, or temporarily have their LRU flag cleared. Consequently, the LRU flag is unreliable for this purpose. In particular, this is the case when secretmem_fault() allocates a fresh page and calls filemap_add_folio()->folio_add_lru(). The folio might be added to the per-cpu folio batch and won't get the LRU flag set until the batch was drained using e.g., lru_add_drain(). Consequently, folio_is_secretmem() might not detect secretmem folios and GUP-fast can succeed in grabbing a secretmem folio, crashing the kernel when we would later try reading/writing to the folio, because the folio has been unmapped from the directmap. Fix it by removing that unreliable check. Link: https://lkml.kernel.org/r/20240326143210.291116-2-david@redhat.com Fixes:1507f51255
("mm: introduce memfd_secret system call to create "secret" memory areas") Signed-off-by: David Hildenbrand <david@redhat.com> Reported-by: xingwei lee <xrivendell7@gmail.com> Reported-by: yue sun <samsun1006219@gmail.com> Closes: https://lore.kernel.org/lkml/CABOYnLyevJeravW=QrH0JUPYEcDN160aZFb7kwndm-J2rmz0HQ@mail.gmail.com/ Debugged-by: Miklos Szeredi <miklos@szeredi.hu> Tested-by: Miklos Szeredi <mszeredi@redhat.com> Reviewed-by: Mike Rapoport (IBM) <rppt@kernel.org> Cc: Lorenzo Stoakes <lstoakes@gmail.com> Cc: <stable@vger.kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: David Hildenbrand <david@redhat.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
54 lines
1.1 KiB
C
54 lines
1.1 KiB
C
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
|
|
#ifndef _LINUX_SECRETMEM_H
|
|
#define _LINUX_SECRETMEM_H
|
|
|
|
#ifdef CONFIG_SECRETMEM
|
|
|
|
extern const struct address_space_operations secretmem_aops;
|
|
|
|
static inline bool page_is_secretmem(struct page *page)
|
|
{
|
|
struct address_space *mapping;
|
|
|
|
/*
|
|
* Using page_mapping() is quite slow because of the actual call
|
|
* instruction and repeated compound_head(page) inside the
|
|
* page_mapping() function.
|
|
* We know that secretmem pages are not compound, so we can
|
|
* save a couple of cycles here.
|
|
*/
|
|
if (PageCompound(page))
|
|
return false;
|
|
|
|
mapping = (struct address_space *)
|
|
((unsigned long)page->mapping & ~PAGE_MAPPING_FLAGS);
|
|
|
|
if (!mapping || mapping != page->mapping)
|
|
return false;
|
|
|
|
return mapping->a_ops == &secretmem_aops;
|
|
}
|
|
|
|
bool vma_is_secretmem(struct vm_area_struct *vma);
|
|
bool secretmem_active(void);
|
|
|
|
#else
|
|
|
|
static inline bool vma_is_secretmem(struct vm_area_struct *vma)
|
|
{
|
|
return false;
|
|
}
|
|
|
|
static inline bool page_is_secretmem(struct page *page)
|
|
{
|
|
return false;
|
|
}
|
|
|
|
static inline bool secretmem_active(void)
|
|
{
|
|
return false;
|
|
}
|
|
|
|
#endif /* CONFIG_SECRETMEM */
|
|
|
|
#endif /* _LINUX_SECRETMEM_H */
|