linux-stable/mm/page_table_check.c
Charan Teja Kalla b1d5488a25 mm: fix use-after free of page_ext after race with memory-offline
The below is one path where race between page_ext and offline of the
respective memory blocks will cause use-after-free on the access of
page_ext structure.

process1		              process2
---------                             ---------
a)doing /proc/page_owner           doing memory offline
			           through offline_pages.

b) PageBuddy check is failed
   thus proceed to get the
   page_owner information
   through page_ext access.
page_ext = lookup_page_ext(page);

				    migrate_pages();
				    .................
				Since all pages are successfully
				migrated as part of the offline
				operation,send MEM_OFFLINE notification
				where for page_ext it calls:
				offline_page_ext()-->
				__free_page_ext()-->
				   free_page_ext()-->
				     vfree(ms->page_ext)
			           mem_section->page_ext = NULL

c) Check for the PAGE_EXT
   flags in the page_ext->flags
   access results into the
   use-after-free (leading to
   the translation faults).

As mentioned above, there is really no synchronization between page_ext
access and its freeing in the memory_offline.

The memory offline steps(roughly) on a memory block is as below:

1) Isolate all the pages

2) while(1)
  try free the pages to buddy.(->free_list[MIGRATE_ISOLATE])

3) delete the pages from this buddy list.

4) Then free page_ext.(Note: The struct page is still alive as it is
   freed only during hot remove of the memory which frees the memmap,
   which steps the user might not perform).

This design leads to the state where struct page is alive but the struct
page_ext is freed, where the later is ideally part of the former which
just representing the page_flags (check [3] for why this design is
chosen).

The abovementioned race is just one example __but the problem persists in
the other paths too involving page_ext->flags access(eg:
page_is_idle())__.

Fix all the paths where offline races with page_ext access by maintaining
synchronization with rcu lock and is achieved in 3 steps:

1) Invalidate all the page_ext's of the sections of a memory block by
   storing a flag in the LSB of mem_section->page_ext.

2) Wait until all the existing readers to finish working with the
   ->page_ext's with synchronize_rcu().  Any parallel process that starts
   after this call will not get page_ext, through lookup_page_ext(), for
   the block parallel offline operation is being performed.

3) Now safely free all sections ->page_ext's of the block on which
   offline operation is being performed.

Note: If synchronize_rcu() takes time then optimizations can be done in
this path through call_rcu()[2].

Thanks to David Hildenbrand for his views/suggestions on the initial
discussion[1] and Pavan kondeti for various inputs on this patch.

[1] https://lore.kernel.org/linux-mm/59edde13-4167-8550-86f0-11fc67882107@quicinc.com/
[2] https://lore.kernel.org/all/a26ce299-aed1-b8ad-711e-a49e82bdd180@quicinc.com/T/#u
[3] https://lore.kernel.org/all/6fa6b7aa-731e-891c-3efb-a03d6a700efa@redhat.com/

[quic_charante@quicinc.com: rename label `loop' to `ext_put_continue' per David]
  Link: https://lkml.kernel.org/r/1661496993-11473-1-git-send-email-quic_charante@quicinc.com
Link: https://lkml.kernel.org/r/1660830600-9068-1-git-send-email-quic_charante@quicinc.com
Signed-off-by: Charan Teja Kalla <quic_charante@quicinc.com>
Suggested-by: David Hildenbrand <david@redhat.com>
Suggested-by: Michal Hocko <mhocko@suse.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Acked-by: David Hildenbrand <david@redhat.com>
Cc: Fernand Sieber <sieberf@amazon.com>
Cc: Minchan Kim <minchan@google.com>
Cc: Pasha Tatashin <pasha.tatashin@soleen.com>
Cc: Pavan Kondeti <quic_pkondeti@quicinc.com>
Cc: SeongJae Park <sjpark@amazon.de>
Cc: Shakeel Butt <shakeelb@google.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: William Kucharski <william.kucharski@oracle.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
2022-09-11 20:25:57 -07:00

248 lines
5.9 KiB
C

// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2021, Google LLC.
* Pasha Tatashin <pasha.tatashin@soleen.com>
*/
#include <linux/mm.h>
#include <linux/page_table_check.h>
#undef pr_fmt
#define pr_fmt(fmt) "page_table_check: " fmt
struct page_table_check {
atomic_t anon_map_count;
atomic_t file_map_count;
};
static bool __page_table_check_enabled __initdata =
IS_ENABLED(CONFIG_PAGE_TABLE_CHECK_ENFORCED);
DEFINE_STATIC_KEY_TRUE(page_table_check_disabled);
EXPORT_SYMBOL(page_table_check_disabled);
static int __init early_page_table_check_param(char *buf)
{
return strtobool(buf, &__page_table_check_enabled);
}
early_param("page_table_check", early_page_table_check_param);
static bool __init need_page_table_check(void)
{
return __page_table_check_enabled;
}
static void __init init_page_table_check(void)
{
if (!__page_table_check_enabled)
return;
static_branch_disable(&page_table_check_disabled);
}
struct page_ext_operations page_table_check_ops = {
.size = sizeof(struct page_table_check),
.need = need_page_table_check,
.init = init_page_table_check,
};
static struct page_table_check *get_page_table_check(struct page_ext *page_ext)
{
BUG_ON(!page_ext);
return (void *)(page_ext) + page_table_check_ops.offset;
}
/*
* An enty is removed from the page table, decrement the counters for that page
* verify that it is of correct type and counters do not become negative.
*/
static void page_table_check_clear(struct mm_struct *mm, unsigned long addr,
unsigned long pfn, unsigned long pgcnt)
{
struct page_ext *page_ext;
struct page *page;
unsigned long i;
bool anon;
if (!pfn_valid(pfn))
return;
page = pfn_to_page(pfn);
page_ext = page_ext_get(page);
anon = PageAnon(page);
for (i = 0; i < pgcnt; i++) {
struct page_table_check *ptc = get_page_table_check(page_ext);
if (anon) {
BUG_ON(atomic_read(&ptc->file_map_count));
BUG_ON(atomic_dec_return(&ptc->anon_map_count) < 0);
} else {
BUG_ON(atomic_read(&ptc->anon_map_count));
BUG_ON(atomic_dec_return(&ptc->file_map_count) < 0);
}
page_ext = page_ext_next(page_ext);
}
page_ext_put(page_ext);
}
/*
* A new enty is added to the page table, increment the counters for that page
* verify that it is of correct type and is not being mapped with a different
* type to a different process.
*/
static void page_table_check_set(struct mm_struct *mm, unsigned long addr,
unsigned long pfn, unsigned long pgcnt,
bool rw)
{
struct page_ext *page_ext;
struct page *page;
unsigned long i;
bool anon;
if (!pfn_valid(pfn))
return;
page = pfn_to_page(pfn);
page_ext = page_ext_get(page);
anon = PageAnon(page);
for (i = 0; i < pgcnt; i++) {
struct page_table_check *ptc = get_page_table_check(page_ext);
if (anon) {
BUG_ON(atomic_read(&ptc->file_map_count));
BUG_ON(atomic_inc_return(&ptc->anon_map_count) > 1 && rw);
} else {
BUG_ON(atomic_read(&ptc->anon_map_count));
BUG_ON(atomic_inc_return(&ptc->file_map_count) < 0);
}
page_ext = page_ext_next(page_ext);
}
page_ext_put(page_ext);
}
/*
* page is on free list, or is being allocated, verify that counters are zeroes
* crash if they are not.
*/
void __page_table_check_zero(struct page *page, unsigned int order)
{
struct page_ext *page_ext;
unsigned long i;
page_ext = page_ext_get(page);
BUG_ON(!page_ext);
for (i = 0; i < (1ul << order); i++) {
struct page_table_check *ptc = get_page_table_check(page_ext);
BUG_ON(atomic_read(&ptc->anon_map_count));
BUG_ON(atomic_read(&ptc->file_map_count));
page_ext = page_ext_next(page_ext);
}
page_ext_put(page_ext);
}
void __page_table_check_pte_clear(struct mm_struct *mm, unsigned long addr,
pte_t pte)
{
if (&init_mm == mm)
return;
if (pte_user_accessible_page(pte)) {
page_table_check_clear(mm, addr, pte_pfn(pte),
PAGE_SIZE >> PAGE_SHIFT);
}
}
EXPORT_SYMBOL(__page_table_check_pte_clear);
void __page_table_check_pmd_clear(struct mm_struct *mm, unsigned long addr,
pmd_t pmd)
{
if (&init_mm == mm)
return;
if (pmd_user_accessible_page(pmd)) {
page_table_check_clear(mm, addr, pmd_pfn(pmd),
PMD_SIZE >> PAGE_SHIFT);
}
}
EXPORT_SYMBOL(__page_table_check_pmd_clear);
void __page_table_check_pud_clear(struct mm_struct *mm, unsigned long addr,
pud_t pud)
{
if (&init_mm == mm)
return;
if (pud_user_accessible_page(pud)) {
page_table_check_clear(mm, addr, pud_pfn(pud),
PUD_SIZE >> PAGE_SHIFT);
}
}
EXPORT_SYMBOL(__page_table_check_pud_clear);
void __page_table_check_pte_set(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte)
{
if (&init_mm == mm)
return;
__page_table_check_pte_clear(mm, addr, *ptep);
if (pte_user_accessible_page(pte)) {
page_table_check_set(mm, addr, pte_pfn(pte),
PAGE_SIZE >> PAGE_SHIFT,
pte_write(pte));
}
}
EXPORT_SYMBOL(__page_table_check_pte_set);
void __page_table_check_pmd_set(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp, pmd_t pmd)
{
if (&init_mm == mm)
return;
__page_table_check_pmd_clear(mm, addr, *pmdp);
if (pmd_user_accessible_page(pmd)) {
page_table_check_set(mm, addr, pmd_pfn(pmd),
PMD_SIZE >> PAGE_SHIFT,
pmd_write(pmd));
}
}
EXPORT_SYMBOL(__page_table_check_pmd_set);
void __page_table_check_pud_set(struct mm_struct *mm, unsigned long addr,
pud_t *pudp, pud_t pud)
{
if (&init_mm == mm)
return;
__page_table_check_pud_clear(mm, addr, *pudp);
if (pud_user_accessible_page(pud)) {
page_table_check_set(mm, addr, pud_pfn(pud),
PUD_SIZE >> PAGE_SHIFT,
pud_write(pud));
}
}
EXPORT_SYMBOL(__page_table_check_pud_set);
void __page_table_check_pte_clear_range(struct mm_struct *mm,
unsigned long addr,
pmd_t pmd)
{
if (&init_mm == mm)
return;
if (!pmd_bad(pmd) && !pmd_leaf(pmd)) {
pte_t *ptep = pte_offset_map(&pmd, addr);
unsigned long i;
for (i = 0; i < PTRS_PER_PTE; i++) {
__page_table_check_pte_clear(mm, addr, *ptep);
addr += PAGE_SIZE;
ptep++;
}
pte_unmap(ptep - PTRS_PER_PTE);
}
}