linux-stable/include/linux/page_table_check.h
Pasha Tatashin df4e817b71 mm: page table check
Check user page table entries at the time they are added and removed.

Allows to synchronously catch memory corruption issues related to double
mapping.

When a pte for an anonymous page is added into page table, we verify
that this pte does not already point to a file backed page, and vice
versa if this is a file backed page that is being added we verify that
this page does not have an anonymous mapping

We also enforce that read-only sharing for anonymous pages is allowed
(i.e.  cow after fork).  All other sharing must be for file pages.

Page table check allows to protect and debug cases where "struct page"
metadata became corrupted for some reason.  For example, when refcnt or
mapcount become invalid.

Link: https://lkml.kernel.org/r/20211221154650.1047963-4-pasha.tatashin@soleen.com
Signed-off-by: Pasha Tatashin <pasha.tatashin@soleen.com>
Cc: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Frederic Weisbecker <frederic@kernel.org>
Cc: Greg Thelen <gthelen@google.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jiri Slaby <jirislaby@kernel.org>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Kees Cook <keescook@chromium.org>
Cc: Masahiro Yamada <masahiroy@kernel.org>
Cc: Mike Rapoport <rppt@kernel.org>
Cc: Muchun Song <songmuchun@bytedance.com>
Cc: Paul Turner <pjt@google.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Sami Tolvanen <samitolvanen@google.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Wei Xu <weixugc@google.com>
Cc: Will Deacon <will@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2022-01-15 16:30:28 +02:00

147 lines
3.8 KiB
C

/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2021, Google LLC.
* Pasha Tatashin <pasha.tatashin@soleen.com>
*/
#ifndef __LINUX_PAGE_TABLE_CHECK_H
#define __LINUX_PAGE_TABLE_CHECK_H
#ifdef CONFIG_PAGE_TABLE_CHECK
#include <linux/jump_label.h>
extern struct static_key_true page_table_check_disabled;
extern struct page_ext_operations page_table_check_ops;
void __page_table_check_zero(struct page *page, unsigned int order);
void __page_table_check_pte_clear(struct mm_struct *mm, unsigned long addr,
pte_t pte);
void __page_table_check_pmd_clear(struct mm_struct *mm, unsigned long addr,
pmd_t pmd);
void __page_table_check_pud_clear(struct mm_struct *mm, unsigned long addr,
pud_t pud);
void __page_table_check_pte_set(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte);
void __page_table_check_pmd_set(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp, pmd_t pmd);
void __page_table_check_pud_set(struct mm_struct *mm, unsigned long addr,
pud_t *pudp, pud_t pud);
static inline void page_table_check_alloc(struct page *page, unsigned int order)
{
if (static_branch_likely(&page_table_check_disabled))
return;
__page_table_check_zero(page, order);
}
static inline void page_table_check_free(struct page *page, unsigned int order)
{
if (static_branch_likely(&page_table_check_disabled))
return;
__page_table_check_zero(page, order);
}
static inline void page_table_check_pte_clear(struct mm_struct *mm,
unsigned long addr, pte_t pte)
{
if (static_branch_likely(&page_table_check_disabled))
return;
__page_table_check_pte_clear(mm, addr, pte);
}
static inline void page_table_check_pmd_clear(struct mm_struct *mm,
unsigned long addr, pmd_t pmd)
{
if (static_branch_likely(&page_table_check_disabled))
return;
__page_table_check_pmd_clear(mm, addr, pmd);
}
static inline void page_table_check_pud_clear(struct mm_struct *mm,
unsigned long addr, pud_t pud)
{
if (static_branch_likely(&page_table_check_disabled))
return;
__page_table_check_pud_clear(mm, addr, pud);
}
static inline void page_table_check_pte_set(struct mm_struct *mm,
unsigned long addr, pte_t *ptep,
pte_t pte)
{
if (static_branch_likely(&page_table_check_disabled))
return;
__page_table_check_pte_set(mm, addr, ptep, pte);
}
static inline void page_table_check_pmd_set(struct mm_struct *mm,
unsigned long addr, pmd_t *pmdp,
pmd_t pmd)
{
if (static_branch_likely(&page_table_check_disabled))
return;
__page_table_check_pmd_set(mm, addr, pmdp, pmd);
}
static inline void page_table_check_pud_set(struct mm_struct *mm,
unsigned long addr, pud_t *pudp,
pud_t pud)
{
if (static_branch_likely(&page_table_check_disabled))
return;
__page_table_check_pud_set(mm, addr, pudp, pud);
}
#else
static inline void page_table_check_alloc(struct page *page, unsigned int order)
{
}
static inline void page_table_check_free(struct page *page, unsigned int order)
{
}
static inline void page_table_check_pte_clear(struct mm_struct *mm,
unsigned long addr, pte_t pte)
{
}
static inline void page_table_check_pmd_clear(struct mm_struct *mm,
unsigned long addr, pmd_t pmd)
{
}
static inline void page_table_check_pud_clear(struct mm_struct *mm,
unsigned long addr, pud_t pud)
{
}
static inline void page_table_check_pte_set(struct mm_struct *mm,
unsigned long addr, pte_t *ptep,
pte_t pte)
{
}
static inline void page_table_check_pmd_set(struct mm_struct *mm,
unsigned long addr, pmd_t *pmdp,
pmd_t pmd)
{
}
static inline void page_table_check_pud_set(struct mm_struct *mm,
unsigned long addr, pud_t *pudp,
pud_t pud)
{
}
#endif /* CONFIG_PAGE_TABLE_CHECK */
#endif /* __LINUX_PAGE_TABLE_CHECK_H */