linux-stable/arch/s390/mm/kasan_init.c
Vasily Gorbik af0735269b s390/mem_detect: do not truncate online memory ranges info
Commit bf64f0517e ("s390/mem_detect: handle online memory limit
just once") introduced truncation of mem_detect online ranges
based on identity mapping size. For kdump case however the full
set of online memory ranges has to be feed into memblock_physmem_add
so that crashed system memory could be extracted.

Instead of truncating introduce a "usable limit" which is respected by
mem_detect api. Also add extra online memory ranges iterator which still
provides full set of online memory ranges disregarding the "usable limit".

Fixes: bf64f0517e ("s390/mem_detect: handle online memory limit just once")
Reported-by: Alexander Egorenkov <egorenar@linux.ibm.com>
Tested-by: Alexander Egorenkov <egorenar@linux.ibm.com>
Reviewed-by: Alexander Gordeev <agordeev@linux.ibm.com>
Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
2023-02-14 11:45:40 +01:00

301 lines
8.7 KiB
C

// SPDX-License-Identifier: GPL-2.0
#include <linux/kasan.h>
#include <linux/sched/task.h>
#include <linux/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/kasan.h>
#include <asm/mem_detect.h>
#include <asm/processor.h>
#include <asm/sclp.h>
#include <asm/facility.h>
#include <asm/sections.h>
#include <asm/setup.h>
#include <asm/uv.h>
static unsigned long segment_pos __initdata;
static unsigned long segment_low __initdata;
static bool has_edat __initdata;
static bool has_nx __initdata;
#define __sha(x) ((unsigned long)kasan_mem_to_shadow((void *)x))
static void __init kasan_early_panic(const char *reason)
{
sclp_early_printk("The Linux kernel failed to boot with the KernelAddressSanitizer:\n");
sclp_early_printk(reason);
disabled_wait();
}
static void * __init kasan_early_alloc_segment(void)
{
segment_pos -= _SEGMENT_SIZE;
if (segment_pos < segment_low)
kasan_early_panic("out of memory during initialisation\n");
return __va(segment_pos);
}
static void * __init kasan_early_alloc_pages(unsigned int order)
{
pgalloc_pos -= (PAGE_SIZE << order);
if (pgalloc_pos < pgalloc_low)
kasan_early_panic("out of memory during initialisation\n");
return __va(pgalloc_pos);
}
static void * __init kasan_early_crst_alloc(unsigned long val)
{
unsigned long *table;
table = kasan_early_alloc_pages(CRST_ALLOC_ORDER);
if (table)
crst_table_init(table, val);
return table;
}
static pte_t * __init kasan_early_pte_alloc(void)
{
static void *pte_leftover;
pte_t *pte;
BUILD_BUG_ON(_PAGE_TABLE_SIZE * 2 != PAGE_SIZE);
if (!pte_leftover) {
pte_leftover = kasan_early_alloc_pages(0);
pte = pte_leftover + _PAGE_TABLE_SIZE;
} else {
pte = pte_leftover;
pte_leftover = NULL;
}
memset64((u64 *)pte, _PAGE_INVALID, PTRS_PER_PTE);
return pte;
}
enum populate_mode {
POPULATE_MAP,
POPULATE_ZERO_SHADOW,
POPULATE_SHALLOW
};
static inline pgprot_t pgprot_clear_bit(pgprot_t pgprot, unsigned long bit)
{
return __pgprot(pgprot_val(pgprot) & ~bit);
}
static void __init kasan_early_pgtable_populate(unsigned long address,
unsigned long end,
enum populate_mode mode)
{
pgprot_t pgt_prot_zero = PAGE_KERNEL_RO;
pgprot_t pgt_prot = PAGE_KERNEL;
pgprot_t sgt_prot = SEGMENT_KERNEL;
pgd_t *pg_dir;
p4d_t *p4_dir;
pud_t *pu_dir;
pmd_t *pm_dir;
pte_t *pt_dir;
pmd_t pmd;
pte_t pte;
if (!has_nx) {
pgt_prot_zero = pgprot_clear_bit(pgt_prot_zero, _PAGE_NOEXEC);
pgt_prot = pgprot_clear_bit(pgt_prot, _PAGE_NOEXEC);
sgt_prot = pgprot_clear_bit(sgt_prot, _SEGMENT_ENTRY_NOEXEC);
}
while (address < end) {
pg_dir = pgd_offset_k(address);
if (pgd_none(*pg_dir)) {
if (mode == POPULATE_ZERO_SHADOW &&
IS_ALIGNED(address, PGDIR_SIZE) &&
end - address >= PGDIR_SIZE) {
pgd_populate(&init_mm, pg_dir,
kasan_early_shadow_p4d);
address = (address + PGDIR_SIZE) & PGDIR_MASK;
continue;
}
p4_dir = kasan_early_crst_alloc(_REGION2_ENTRY_EMPTY);
pgd_populate(&init_mm, pg_dir, p4_dir);
}
if (mode == POPULATE_SHALLOW) {
address = (address + P4D_SIZE) & P4D_MASK;
continue;
}
p4_dir = p4d_offset(pg_dir, address);
if (p4d_none(*p4_dir)) {
if (mode == POPULATE_ZERO_SHADOW &&
IS_ALIGNED(address, P4D_SIZE) &&
end - address >= P4D_SIZE) {
p4d_populate(&init_mm, p4_dir,
kasan_early_shadow_pud);
address = (address + P4D_SIZE) & P4D_MASK;
continue;
}
pu_dir = kasan_early_crst_alloc(_REGION3_ENTRY_EMPTY);
p4d_populate(&init_mm, p4_dir, pu_dir);
}
pu_dir = pud_offset(p4_dir, address);
if (pud_none(*pu_dir)) {
if (mode == POPULATE_ZERO_SHADOW &&
IS_ALIGNED(address, PUD_SIZE) &&
end - address >= PUD_SIZE) {
pud_populate(&init_mm, pu_dir,
kasan_early_shadow_pmd);
address = (address + PUD_SIZE) & PUD_MASK;
continue;
}
pm_dir = kasan_early_crst_alloc(_SEGMENT_ENTRY_EMPTY);
pud_populate(&init_mm, pu_dir, pm_dir);
}
pm_dir = pmd_offset(pu_dir, address);
if (pmd_none(*pm_dir)) {
if (IS_ALIGNED(address, PMD_SIZE) &&
end - address >= PMD_SIZE) {
if (mode == POPULATE_ZERO_SHADOW) {
pmd_populate(&init_mm, pm_dir, kasan_early_shadow_pte);
address = (address + PMD_SIZE) & PMD_MASK;
continue;
} else if (has_edat) {
void *page = kasan_early_alloc_segment();
memset(page, 0, _SEGMENT_SIZE);
pmd = __pmd(__pa(page));
pmd = set_pmd_bit(pmd, sgt_prot);
set_pmd(pm_dir, pmd);
address = (address + PMD_SIZE) & PMD_MASK;
continue;
}
}
pt_dir = kasan_early_pte_alloc();
pmd_populate(&init_mm, pm_dir, pt_dir);
} else if (pmd_large(*pm_dir)) {
address = (address + PMD_SIZE) & PMD_MASK;
continue;
}
pt_dir = pte_offset_kernel(pm_dir, address);
if (pte_none(*pt_dir)) {
void *page;
switch (mode) {
case POPULATE_MAP:
page = kasan_early_alloc_pages(0);
memset(page, 0, PAGE_SIZE);
pte = __pte(__pa(page));
pte = set_pte_bit(pte, pgt_prot);
set_pte(pt_dir, pte);
break;
case POPULATE_ZERO_SHADOW:
page = kasan_early_shadow_page;
pte = __pte(__pa(page));
pte = set_pte_bit(pte, pgt_prot_zero);
set_pte(pt_dir, pte);
break;
case POPULATE_SHALLOW:
/* should never happen */
break;
}
}
address += PAGE_SIZE;
}
}
static void __init kasan_early_detect_facilities(void)
{
if (test_facility(8)) {
has_edat = true;
__ctl_set_bit(0, 23);
}
if (!noexec_disabled && test_facility(130)) {
has_nx = true;
__ctl_set_bit(0, 20);
}
}
void __init kasan_early_init(void)
{
pte_t pte_z = __pte(__pa(kasan_early_shadow_page) | pgprot_val(PAGE_KERNEL_RO));
pmd_t pmd_z = __pmd(__pa(kasan_early_shadow_pte) | _SEGMENT_ENTRY);
pud_t pud_z = __pud(__pa(kasan_early_shadow_pmd) | _REGION3_ENTRY);
p4d_t p4d_z = __p4d(__pa(kasan_early_shadow_pud) | _REGION2_ENTRY);
unsigned long untracked_end = MODULES_VADDR;
unsigned long shadow_alloc_size;
unsigned long start, end;
int i;
kasan_early_detect_facilities();
if (!has_nx)
pte_z = clear_pte_bit(pte_z, __pgprot(_PAGE_NOEXEC));
BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START, P4D_SIZE));
BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, P4D_SIZE));
/* init kasan zero shadow */
crst_table_init((unsigned long *)kasan_early_shadow_p4d, p4d_val(p4d_z));
crst_table_init((unsigned long *)kasan_early_shadow_pud, pud_val(pud_z));
crst_table_init((unsigned long *)kasan_early_shadow_pmd, pmd_val(pmd_z));
memset64((u64 *)kasan_early_shadow_pte, pte_val(pte_z), PTRS_PER_PTE);
if (has_edat) {
shadow_alloc_size = get_mem_detect_usable_total() >> KASAN_SHADOW_SCALE_SHIFT;
segment_pos = round_down(pgalloc_pos, _SEGMENT_SIZE);
segment_low = segment_pos - shadow_alloc_size;
segment_low = round_down(segment_low, _SEGMENT_SIZE);
pgalloc_pos = segment_low;
}
/*
* Current memory layout:
* +- 0 -------------+ +- shadow start -+
* |1:1 ident mapping| /|1/8 of ident map|
* | | / | |
* +-end of ident map+ / +----------------+
* | ... gap ... | / | kasan |
* | | / | zero page |
* +- vmalloc area -+ / | mapping |
* | vmalloc_size | / | (untracked) |
* +- modules vaddr -+ / +----------------+
* | 2Gb |/ | unmapped | allocated per module
* +- shadow start -+ +----------------+
* | 1/8 addr space | | zero pg mapping| (untracked)
* +- shadow end ----+---------+- shadow end ---+
*
* Current memory layout (KASAN_VMALLOC):
* +- 0 -------------+ +- shadow start -+
* |1:1 ident mapping| /|1/8 of ident map|
* | | / | |
* +-end of ident map+ / +----------------+
* | ... gap ... | / | kasan zero page| (untracked)
* | | / | mapping |
* +- vmalloc area -+ / +----------------+
* | vmalloc_size | / |shallow populate|
* +- modules vaddr -+ / +----------------+
* | 2Gb |/ |shallow populate|
* +- shadow start -+ +----------------+
* | 1/8 addr space | | zero pg mapping| (untracked)
* +- shadow end ----+---------+- shadow end ---+
*/
/* populate kasan shadow (for identity mapping and zero page mapping) */
for_each_mem_detect_usable_block(i, &start, &end)
kasan_early_pgtable_populate(__sha(start), __sha(end), POPULATE_MAP);
if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
untracked_end = VMALLOC_START;
/* shallowly populate kasan shadow for vmalloc and modules */
kasan_early_pgtable_populate(__sha(VMALLOC_START), __sha(MODULES_END),
POPULATE_SHALLOW);
}
/* populate kasan shadow for untracked memory */
kasan_early_pgtable_populate(__sha(ident_map_size), __sha(untracked_end),
POPULATE_ZERO_SHADOW);
kasan_early_pgtable_populate(__sha(MODULES_END), __sha(_REGION1_SIZE),
POPULATE_ZERO_SHADOW);
/* enable kasan */
init_task.kasan_depth = 0;
sclp_early_printk("KernelAddressSanitizer initialized\n");
}