s390/mm: Uncouple physical vs virtual address spaces

The uncoupling physical vs virtual address spaces brings
the following benefits to s390:

- virtual memory layout flexibility;
- closes the address gap between kernel and modules, it
  caused s390-only problems in the past (e.g. 'perf' bugs);
- allows getting rid of trampolines used for module calls
  into kernel;
- allows simplifying BPF trampoline;
- minor performance improvement in branch prediction;
- kernel randomization entropy is magnitude bigger, as it is
  derived from the amount of available virtual, not physical
  memory;

The whole change could be described in two pictures below:
before and after the change.

Some aspects of the virtual memory layout setup are not
clarified (number of page levels, alignment, DMA memory),
since these are not a part of this change or secondary
with regard to how the uncoupling itself is implemented.

The focus of the pictures is to explain why __va() and __pa()
macros are implemented the way they are.

        Memory layout in V==R mode:

|    Physical      |    Virtual       |
+- 0 --------------+- 0 --------------+ identity mapping start
|                  | S390_lowcore     | Low-address memory
|                  +- 8 KB -----------+
|                  |                  |
|                  | identity         | phys == virt
|                  | mapping          | virt == phys
|                  |                  |
+- AMODE31_START --+- AMODE31_START --+ .amode31 rand. phys/virt start
|.amode31 text/data|.amode31 text/data|
+- AMODE31_END ----+- AMODE31_END ----+ .amode31 rand. phys/virt start
|                  |                  |
|                  |                  |
+- __kaslr_offset, __kaslr_offset_phys| kernel rand. phys/virt start
|                  |                  |
| kernel text/data | kernel text/data | phys == kvirt
|                  |                  |
+------------------+------------------+ kernel phys/virt end
|                  |                  |
|                  |                  |
|                  |                  |
|                  |                  |
+- ident_map_size -+- ident_map_size -+ identity mapping end
                   |                  |
                   |  ... unused gap  |
                   |                  |
                   +---- vmemmap -----+ 'struct page' array start
                   |                  |
                   | virtually mapped |
                   | memory map       |
                   |                  |
                   +- __abs_lowcore --+
                   |                  |
                   | Absolute Lowcore |
                   |                  |
                   +- __memcpy_real_area
                   |                  |
                   |  Real Memory Copy|
                   |                  |
                   +- VMALLOC_START --+ vmalloc area start
                   |                  |
                   |  vmalloc area    |
                   |                  |
                   +- MODULES_VADDR --+ modules area start
                   |                  |
                   |  modules area    |
                   |                  |
                   +------------------+ UltraVisor Secure Storage limit
                   |                  |
                   |  ... unused gap  |
                   |                  |
                   +KASAN_SHADOW_START+ KASAN shadow memory start
                   |                  |
                   |   KASAN shadow   |
                   |                  |
                   +------------------+ ASCE limit

        Memory layout in V!=R mode:

|    Physical      |    Virtual       |
+- 0 --------------+- 0 --------------+
|                  | S390_lowcore     | Low-address memory
|                  +- 8 KB -----------+
|                  |                  |
|                  |                  |
|                  | ... unused gap   |
|                  |                  |
+- AMODE31_START --+- AMODE31_START --+ .amode31 rand. phys/virt start
|.amode31 text/data|.amode31 text/data|
+- AMODE31_END ----+- AMODE31_END ----+ .amode31 rand. phys/virt end (<2GB)
|                  |                  |
|                  |                  |
+- __kaslr_offset_phys		     | kernel rand. phys start
|                  |                  |
| kernel text/data |                  |
|                  |                  |
+------------------+		     | kernel phys end
|                  |                  |
|                  |                  |
|                  |                  |
|                  |                  |
+- ident_map_size -+		     |
                   |                  |
                   |  ... unused gap  |
                   |                  |
                   +- __identity_base + identity mapping start (>= 2GB)
                   |                  |
                   | identity         | phys == virt - __identity_base
                   | mapping          | virt == phys + __identity_base
                   |                  |
                   |                  |
                   |                  |
                   |                  |
                   |                  |
                   |                  |
                   |                  |
                   |                  |
                   |                  |
                   |                  |
                   |                  |
                   |                  |
                   |                  |
                   |                  |
                   |                  |
                   |                  |
                   |                  |
                   +---- vmemmap -----+ 'struct page' array start
                   |                  |
                   | virtually mapped |
                   | memory map       |
                   |                  |
                   +- __abs_lowcore --+
                   |                  |
                   | Absolute Lowcore |
                   |                  |
                   +- __memcpy_real_area
                   |                  |
                   |  Real Memory Copy|
                   |                  |
                   +- VMALLOC_START --+ vmalloc area start
                   |                  |
                   |  vmalloc area    |
                   |                  |
                   +- MODULES_VADDR --+ modules area start
                   |                  |
                   |  modules area    |
                   |                  |
                   +- __kaslr_offset -+ kernel rand. virt start
                   |                  |
                   | kernel text/data | phys == (kvirt - __kaslr_offset) +
                   |                  |         __kaslr_offset_phys
                   +- kernel .bss end + kernel rand. virt end
                   |                  |
                   |  ... unused gap  |
                   |                  |
                   +------------------+ UltraVisor Secure Storage limit
                   |                  |
                   |  ... unused gap  |
                   |                  |
                   +KASAN_SHADOW_START+ KASAN shadow memory start
                   |                  |
                   |   KASAN shadow   |
                   |                  |
                   +------------------+ ASCE limit

Unused gaps in the virtual memory layout could be present
or not - depending on how partucular system is configured.
No page tables are created for the unused gaps.

The relative order of vmalloc, modules and kernel image in
virtual memory is defined by following considerations:

- start of the modules area and end of the kernel should reside
  within 4GB to accommodate relative 32-bit jumps. The best way
  to achieve that is to place kernel next to modules;

- vmalloc and module areas should locate next to each other
  to prevent failures and extra reworks in user level tools
  (makedumpfile, crash, etc.) which treat vmalloc and module
  addresses similarily;

- kernel needs to be the last area in the virtual memory
  layout to easily distinguish between kernel and non-kernel
  virtual addresses. That is needed to (again) simplify
  handling of addresses in user level tools and make __pa()
  macro faster (see below);

Concluding the above, the relative order of the considered
virtual areas in memory is: vmalloc - modules - kernel.
Therefore, the only change to the current memory layout is
moving kernel to the end of virtual address space.

With that approach the implementation of __pa() macro is
straightforward - all linear virtual addresses less than
kernel base are considered identity mapping:

	phys == virt - __identity_base

All addresses greater than kernel base are kernel ones:

	phys == (kvirt - __kaslr_offset) + __kaslr_offset_phys

By contrast, __va() macro deals only with identity mapping
addresses:

	virt == phys + __identity_base

.amode31 section is mapped separately and is not covered by
__pa() macro. In fact, it could have been handled easily by
checking whether a virtual address is within the section or
not, but there is no need for that. Thus, let __pa() code
do as little machine cycles as possible.

The KASAN shadow memory is located at the very end of the
virtual memory layout, at addresses higher than the kernel.
However, that is not a linear mapping and no code other than
KASAN instrumentation or API is expected to access it.

When KASLR mode is enabled the kernel base address randomized
within a memory window that spans whole unused virtual address
space. The size of that window depends from the amount of
physical memory available to the system, the limit imposed by
UltraVisor (if present) and the vmalloc area size as provided
by vmalloc= kernel command line parameter.

In case the virtual memory is exhausted the minimum size of
the randomization window is forcefully set to 2GB, which
amounts to in 15 bits of entropy if KASAN is enabled or 17
bits of entropy in default configuration.

The default kernel offset 0x100000 is used as a magic value
both in the decompressor code and vmlinux linker script, but
it will be removed with a follow-up change.

Acked-by: Heiko Carstens <hca@linux.ibm.com>
Signed-off-by: Alexander Gordeev <agordeev@linux.ibm.com>
This commit is contained in:
Alexander Gordeev 2024-03-01 07:15:22 +01:00
parent f4cac27dc0
commit c98d2ecae0
9 changed files with 241 additions and 68 deletions

View File

@ -8,6 +8,7 @@ s390 Architecture
cds
3270
driver-model
mm
monreader
qeth
s390dbf

View File

@ -0,0 +1,111 @@
.. SPDX-License-Identifier: GPL-2.0
=================
Memory Management
=================
Virtual memory layout
=====================
.. note::
- Some aspects of the virtual memory layout setup are not
clarified (number of page levels, alignment, DMA memory).
- Unused gaps in the virtual memory layout could be present
or not - depending on how partucular system is configured.
No page tables are created for the unused gaps.
- The virtual memory regions are tracked or untracked by KASAN
instrumentation, as well as the KASAN shadow memory itself is
created only when CONFIG_KASAN configuration option is enabled.
::
=============================================================================
| Physical | Virtual | VM area description
=============================================================================
+- 0 --------------+- 0 --------------+
| | S390_lowcore | Low-address memory
| +- 8 KB -----------+
| | |
| | |
| | ... unused gap | KASAN untracked
| | |
+- AMODE31_START --+- AMODE31_START --+ .amode31 rand. phys/virt start
|.amode31 text/data|.amode31 text/data| KASAN untracked
+- AMODE31_END ----+- AMODE31_END ----+ .amode31 rand. phys/virt end (<2GB)
| | |
| | |
+- __kaslr_offset_phys | kernel rand. phys start
| | |
| kernel text/data | |
| | |
+------------------+ | kernel phys end
| | |
| | |
| | |
| | |
+- ident_map_size -+ |
| |
| ... unused gap | KASAN untracked
| |
+- __identity_base + identity mapping start (>= 2GB)
| |
| identity | phys == virt - __identity_base
| mapping | virt == phys + __identity_base
| |
| | KASAN tracked
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
+---- vmemmap -----+ 'struct page' array start
| |
| virtually mapped |
| memory map | KASAN untracked
| |
+- __abs_lowcore --+
| |
| Absolute Lowcore | KASAN untracked
| |
+- __memcpy_real_area
| |
| Real Memory Copy| KASAN untracked
| |
+- VMALLOC_START --+ vmalloc area start
| | KASAN untracked or
| vmalloc area | KASAN shallowly populated in case
| | CONFIG_KASAN_VMALLOC=y
+- MODULES_VADDR --+ modules area start
| | KASAN allocated per module or
| modules area | KASAN shallowly populated in case
| | CONFIG_KASAN_VMALLOC=y
+- __kaslr_offset -+ kernel rand. virt start
| | KASAN tracked
| kernel text/data | phys == (kvirt - __kaslr_offset) +
| | __kaslr_offset_phys
+- kernel .bss end + kernel rand. virt end
| |
| ... unused gap | KASAN untracked
| |
+------------------+ UltraVisor Secure Storage limit
| |
| ... unused gap | KASAN untracked
| |
+KASAN_SHADOW_START+ KASAN shadow memory start
| |
| KASAN shadow | KASAN untracked
| |
+------------------+ ASCE limit

View File

@ -74,10 +74,11 @@ void sclp_early_setup_buffer(void);
void print_pgm_check_info(void);
unsigned long randomize_within_range(unsigned long size, unsigned long align,
unsigned long min, unsigned long max);
void setup_vmem(unsigned long asce_limit);
void setup_vmem(unsigned long kernel_start, unsigned long kernel_end, unsigned long asce_limit);
void __printf(1, 2) decompressor_printk(const char *fmt, ...);
void print_stacktrace(unsigned long sp);
void error(char *m);
int get_random(unsigned long limit, unsigned long *value);
extern struct machine_info machine;
@ -98,6 +99,10 @@ extern struct vmlinux_info _vmlinux_info;
#define vmlinux _vmlinux_info
#define __abs_lowcore_pa(x) (((unsigned long)(x) - __abs_lowcore) % sizeof(struct lowcore))
#define __kernel_va(x) ((void *)((unsigned long)(x) - __kaslr_offset_phys + __kaslr_offset))
#define __kernel_pa(x) ((unsigned long)(x) - __kaslr_offset + __kaslr_offset_phys)
#define __identity_va(x) ((void *)((unsigned long)(x) + __identity_base))
#define __identity_pa(x) ((unsigned long)(x) - __identity_base)
static inline bool intersects(unsigned long addr0, unsigned long size0,
unsigned long addr1, unsigned long size1)

View File

@ -43,7 +43,7 @@ static int check_prng(void)
return PRNG_MODE_TDES;
}
static int get_random(unsigned long limit, unsigned long *value)
int get_random(unsigned long limit, unsigned long *value)
{
struct prng_parm prng = {
/* initial parameter block for tdes mode, copied from libica */

View File

@ -203,7 +203,7 @@ static void kaslr_adjust_relocs(unsigned long min_addr, unsigned long max_addr,
/* Adjust R_390_64 relocations */
for (reloc = vmlinux_relocs_64_start; reloc < vmlinux_relocs_64_end; reloc++) {
loc = (long)*reloc + offset;
loc = (long)*reloc + phys_offset;
if (loc < min_addr || loc > max_addr)
error("64-bit relocation outside of kernel!\n");
*(u64 *)loc += offset;
@ -263,8 +263,25 @@ static void setup_ident_map_size(unsigned long max_physmem_end)
#endif
}
static unsigned long setup_kernel_memory_layout(void)
#define FIXMAP_SIZE round_up(MEMCPY_REAL_SIZE + ABS_LOWCORE_MAP_SIZE, sizeof(struct lowcore))
static unsigned long get_vmem_size(unsigned long identity_size,
unsigned long vmemmap_size,
unsigned long vmalloc_size,
unsigned long rte_size)
{
unsigned long max_mappable, vsize;
max_mappable = max(identity_size, MAX_DCSS_ADDR);
vsize = round_up(SZ_2G + max_mappable, rte_size) +
round_up(vmemmap_size, rte_size) +
FIXMAP_SIZE + MODULES_LEN + KASLR_LEN;
return size_add(vsize, vmalloc_size);
}
static unsigned long setup_kernel_memory_layout(unsigned long kernel_size)
{
unsigned long kernel_start, kernel_end;
unsigned long vmemmap_start;
unsigned long asce_limit;
unsigned long rte_size;
@ -277,12 +294,11 @@ static unsigned long setup_kernel_memory_layout(void)
vmemmap_size = SECTION_ALIGN_UP(pages) * sizeof(struct page);
/* choose kernel address space layout: 4 or 3 levels. */
vsize = round_up(ident_map_size, _REGION3_SIZE) + vmemmap_size +
MODULES_LEN + MEMCPY_REAL_SIZE + ABS_LOWCORE_MAP_SIZE;
vsize = size_add(vsize, vmalloc_size);
vsize = get_vmem_size(ident_map_size, vmemmap_size, vmalloc_size, _REGION3_SIZE);
if (IS_ENABLED(CONFIG_KASAN) || (vsize > _REGION2_SIZE)) {
asce_limit = _REGION1_SIZE;
rte_size = _REGION2_SIZE;
vsize = get_vmem_size(ident_map_size, vmemmap_size, vmalloc_size, _REGION2_SIZE);
} else {
asce_limit = _REGION2_SIZE;
rte_size = _REGION3_SIZE;
@ -298,12 +314,26 @@ static unsigned long setup_kernel_memory_layout(void)
/* force vmalloc and modules below kasan shadow */
vmax = min(vmax, KASAN_SHADOW_START);
#endif
MODULES_END = round_down(vmax, _SEGMENT_SIZE);
kernel_end = vmax;
if (kaslr_enabled()) {
unsigned long kaslr_len, slots, pos;
vsize = min(vsize, vmax);
kaslr_len = max(KASLR_LEN, vmax - vsize);
slots = DIV_ROUND_UP(kaslr_len - kernel_size, THREAD_SIZE);
if (get_random(slots, &pos))
pos = 0;
kernel_end -= pos * THREAD_SIZE;
}
kernel_start = round_down(kernel_end - kernel_size, THREAD_SIZE);
__kaslr_offset = kernel_start;
MODULES_END = round_down(kernel_start, _SEGMENT_SIZE);
MODULES_VADDR = MODULES_END - MODULES_LEN;
VMALLOC_END = MODULES_VADDR;
/* allow vmalloc area to occupy up to about 1/2 of the rest virtual space left */
vsize = (VMALLOC_END - (MEMCPY_REAL_SIZE + ABS_LOWCORE_MAP_SIZE)) / 2;
vsize = (VMALLOC_END - FIXMAP_SIZE) / 2;
vsize = round_down(vsize, _SEGMENT_SIZE);
vmalloc_size = min(vmalloc_size, vsize);
VMALLOC_START = VMALLOC_END - vmalloc_size;
@ -330,6 +360,7 @@ static unsigned long setup_kernel_memory_layout(void)
BUILD_BUG_ON(MAX_DCSS_ADDR > (1UL << MAX_PHYSMEM_BITS));
max_mappable = max(ident_map_size, MAX_DCSS_ADDR);
max_mappable = min(max_mappable, vmemmap_start);
__identity_base = round_down(vmemmap_start - max_mappable, rte_size);
return asce_limit;
}
@ -358,7 +389,6 @@ static void setup_vmalloc_size(void)
static void kaslr_adjust_vmlinux_info(unsigned long offset)
{
*(unsigned long *)(&vmlinux.entry) += offset;
vmlinux.bootdata_off += offset;
vmlinux.bootdata_preserved_off += offset;
#ifdef CONFIG_PIE_BUILD
@ -386,6 +416,7 @@ void startup_kernel(void)
unsigned long max_physmem_end;
unsigned long vmlinux_lma = 0;
unsigned long amode31_lma = 0;
unsigned long kernel_size;
unsigned long asce_limit;
unsigned long safe_addr;
void *img;
@ -417,7 +448,8 @@ void startup_kernel(void)
max_physmem_end = detect_max_physmem_end();
setup_ident_map_size(max_physmem_end);
setup_vmalloc_size();
asce_limit = setup_kernel_memory_layout();
kernel_size = vmlinux.default_lma + vmlinux.image_size + vmlinux.bss_size;
asce_limit = setup_kernel_memory_layout(kernel_size);
/* got final ident_map_size, physmem allocations could be performed now */
physmem_set_usable_limit(ident_map_size);
detect_physmem_online_ranges(max_physmem_end);
@ -432,7 +464,6 @@ void startup_kernel(void)
if (vmlinux_lma) {
__kaslr_offset_phys = vmlinux_lma - vmlinux.default_lma;
kaslr_adjust_vmlinux_info(__kaslr_offset_phys);
__kaslr_offset = __kaslr_offset_phys;
}
}
vmlinux_lma = vmlinux_lma ?: vmlinux.default_lma;
@ -472,7 +503,7 @@ void startup_kernel(void)
__kaslr_offset, __kaslr_offset_phys);
kaslr_adjust_got(__kaslr_offset);
free_relocs();
setup_vmem(asce_limit);
setup_vmem(__kaslr_offset, __kaslr_offset + kernel_size, asce_limit);
copy_bootdata();
/*
@ -484,7 +515,7 @@ void startup_kernel(void)
/*
* Jump to the decompressed kernel entry point and switch DAT mode on.
*/
psw.addr = vmlinux.entry;
psw.addr = __kaslr_offset + vmlinux.entry;
psw.mask = PSW_KERNEL_BITS;
__load_psw(psw);
}

View File

@ -27,6 +27,8 @@ enum populate_mode {
POPULATE_NONE,
POPULATE_DIRECT,
POPULATE_ABS_LOWCORE,
POPULATE_IDENTITY,
POPULATE_KERNEL,
#ifdef CONFIG_KASAN
POPULATE_KASAN_MAP_SHADOW,
POPULATE_KASAN_ZERO_SHADOW,
@ -54,7 +56,7 @@ static inline void kasan_populate(unsigned long start, unsigned long end, enum p
pgtable_populate(start, end, mode);
}
static void kasan_populate_shadow(void)
static void kasan_populate_shadow(unsigned long kernel_start, unsigned long kernel_end)
{
pmd_t pmd_z = __pmd(__pa(kasan_early_shadow_pte) | _SEGMENT_ENTRY);
pud_t pud_z = __pud(__pa(kasan_early_shadow_pmd) | _REGION3_ENTRY);
@ -76,44 +78,20 @@ static void kasan_populate_shadow(void)
__arch_set_page_dat(kasan_early_shadow_pmd, 1UL << CRST_ALLOC_ORDER);
__arch_set_page_dat(kasan_early_shadow_pte, 1);
/*
* Current memory layout:
* +- 0 -------------+ +- shadow start -+
* |1:1 ident mapping| /|1/8 of ident map|
* | | / | |
* +-end of ident map+ / +----------------+
* | ... gap ... | / | kasan |
* | | / | zero page |
* +- vmalloc area -+ / | mapping |
* | vmalloc_size | / | (untracked) |
* +- modules vaddr -+ / +----------------+
* | 2Gb |/ | unmapped | allocated per module
* +- shadow start -+ +----------------+
* | 1/8 addr space | | zero pg mapping| (untracked)
* +- shadow end ----+---------+- shadow end ---+
*
* Current memory layout (KASAN_VMALLOC):
* +- 0 -------------+ +- shadow start -+
* |1:1 ident mapping| /|1/8 of ident map|
* | | / | |
* +-end of ident map+ / +----------------+
* | ... gap ... | / | kasan zero page| (untracked)
* | | / | mapping |
* +- vmalloc area -+ / +----------------+
* | vmalloc_size | / |shallow populate|
* +- modules vaddr -+ / +----------------+
* | 2Gb |/ |shallow populate|
* +- shadow start -+ +----------------+
* | 1/8 addr space | | zero pg mapping| (untracked)
* +- shadow end ----+---------+- shadow end ---+
*/
for_each_physmem_usable_range(i, &start, &end) {
kasan_populate(start, end, POPULATE_KASAN_MAP_SHADOW);
if (memgap_start && physmem_info.info_source == MEM_DETECT_DIAG260)
kasan_populate(memgap_start, start, POPULATE_KASAN_ZERO_SHADOW);
kasan_populate((unsigned long)__identity_va(start),
(unsigned long)__identity_va(end),
POPULATE_KASAN_MAP_SHADOW);
if (memgap_start && physmem_info.info_source == MEM_DETECT_DIAG260) {
kasan_populate((unsigned long)__identity_va(memgap_start),
(unsigned long)__identity_va(start),
POPULATE_KASAN_ZERO_SHADOW);
}
memgap_start = end;
}
kasan_populate(kernel_start, kernel_end, POPULATE_KASAN_MAP_SHADOW);
kasan_populate(0, (unsigned long)__identity_va(0), POPULATE_KASAN_ZERO_SHADOW);
kasan_populate(AMODE31_START, AMODE31_END, POPULATE_KASAN_ZERO_SHADOW);
if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
untracked_end = VMALLOC_START;
/* shallowly populate kasan shadow for vmalloc and modules */
@ -122,8 +100,9 @@ static void kasan_populate_shadow(void)
untracked_end = MODULES_VADDR;
}
/* populate kasan shadow for untracked memory */
kasan_populate(ident_map_size, untracked_end, POPULATE_KASAN_ZERO_SHADOW);
kasan_populate(MODULES_END, _REGION1_SIZE, POPULATE_KASAN_ZERO_SHADOW);
kasan_populate((unsigned long)__identity_va(ident_map_size), untracked_end,
POPULATE_KASAN_ZERO_SHADOW);
kasan_populate(kernel_end, _REGION1_SIZE, POPULATE_KASAN_ZERO_SHADOW);
}
static bool kasan_pgd_populate_zero_shadow(pgd_t *pgd, unsigned long addr,
@ -180,7 +159,9 @@ static bool kasan_pte_populate_zero_shadow(pte_t *pte, enum populate_mode mode)
}
#else
static inline void kasan_populate_shadow(void) {}
static inline void kasan_populate_shadow(unsigned long kernel_start, unsigned long kernel_end)
{
}
static inline bool kasan_pgd_populate_zero_shadow(pgd_t *pgd, unsigned long addr,
unsigned long end, enum populate_mode mode)
@ -263,6 +244,10 @@ static unsigned long _pa(unsigned long addr, unsigned long size, enum populate_m
return addr;
case POPULATE_ABS_LOWCORE:
return __abs_lowcore_pa(addr);
case POPULATE_KERNEL:
return __kernel_pa(addr);
case POPULATE_IDENTITY:
return __identity_pa(addr);
#ifdef CONFIG_KASAN
case POPULATE_KASAN_MAP_SHADOW:
addr = physmem_alloc_top_down(RR_VMEM, size, size);
@ -274,15 +259,22 @@ static unsigned long _pa(unsigned long addr, unsigned long size, enum populate_m
}
}
static bool can_large_pud(pud_t *pu_dir, unsigned long addr, unsigned long end)
static bool large_allowed(enum populate_mode mode)
{
return machine.has_edat2 &&
return (mode == POPULATE_DIRECT) || (mode == POPULATE_IDENTITY);
}
static bool can_large_pud(pud_t *pu_dir, unsigned long addr, unsigned long end,
enum populate_mode mode)
{
return machine.has_edat2 && large_allowed(mode) &&
IS_ALIGNED(addr, PUD_SIZE) && (end - addr) >= PUD_SIZE;
}
static bool can_large_pmd(pmd_t *pm_dir, unsigned long addr, unsigned long end)
static bool can_large_pmd(pmd_t *pm_dir, unsigned long addr, unsigned long end,
enum populate_mode mode)
{
return machine.has_edat1 &&
return machine.has_edat1 && large_allowed(mode) &&
IS_ALIGNED(addr, PMD_SIZE) && (end - addr) >= PMD_SIZE;
}
@ -322,7 +314,7 @@ static void pgtable_pmd_populate(pud_t *pud, unsigned long addr, unsigned long e
if (pmd_none(*pmd)) {
if (kasan_pmd_populate_zero_shadow(pmd, addr, next, mode))
continue;
if (can_large_pmd(pmd, addr, next)) {
if (can_large_pmd(pmd, addr, next, mode)) {
entry = __pmd(_pa(addr, _SEGMENT_SIZE, mode));
entry = set_pmd_bit(entry, SEGMENT_KERNEL);
if (!machine.has_nx)
@ -355,7 +347,7 @@ static void pgtable_pud_populate(p4d_t *p4d, unsigned long addr, unsigned long e
if (pud_none(*pud)) {
if (kasan_pud_populate_zero_shadow(pud, addr, next, mode))
continue;
if (can_large_pud(pud, addr, next)) {
if (can_large_pud(pud, addr, next, mode)) {
entry = __pud(_pa(addr, _REGION3_SIZE, mode));
entry = set_pud_bit(entry, REGION3_KERNEL);
if (!machine.has_nx)
@ -418,11 +410,12 @@ static void pgtable_populate(unsigned long addr, unsigned long end, enum populat
}
}
void setup_vmem(unsigned long asce_limit)
void setup_vmem(unsigned long kernel_start, unsigned long kernel_end, unsigned long asce_limit)
{
unsigned long start, end;
unsigned long asce_type;
unsigned long asce_bits;
pgd_t *init_mm_pgd;
int i;
/*
@ -433,6 +426,15 @@ void setup_vmem(unsigned long asce_limit)
for_each_physmem_online_range(i, &start, &end)
__arch_set_page_nodat((void *)start, (end - start) >> PAGE_SHIFT);
/*
* init_mm->pgd contains virtual address of swapper_pg_dir.
* It is unusable at this stage since DAT is yet off. Swap
* it for physical address of swapper_pg_dir and restore
* the virtual address after all page tables are created.
*/
init_mm_pgd = init_mm.pgd;
init_mm.pgd = (pgd_t *)swapper_pg_dir;
if (asce_limit == _REGION1_SIZE) {
asce_type = _REGION2_ENTRY_EMPTY;
asce_bits = _ASCE_TYPE_REGION2 | _ASCE_TABLE_LENGTH;
@ -451,17 +453,25 @@ void setup_vmem(unsigned long asce_limit)
* To allow prefixing the lowcore must be mapped with 4KB pages.
* To prevent creation of a large page at address 0 first map
* the lowcore and create the identity mapping only afterwards.
*
* Skip 0x100000 bytes for kernel pgtables, as per the linker script:
* . = 0x100000;
*/
pgtable_populate(0, sizeof(struct lowcore), POPULATE_DIRECT);
for_each_physmem_usable_range(i, &start, &end)
pgtable_populate(start, end, POPULATE_DIRECT);
for_each_physmem_usable_range(i, &start, &end) {
pgtable_populate((unsigned long)__identity_va(start),
(unsigned long)__identity_va(end),
POPULATE_IDENTITY);
}
pgtable_populate(kernel_start + 0x100000, kernel_end, POPULATE_KERNEL);
pgtable_populate(AMODE31_START, AMODE31_END, POPULATE_DIRECT);
pgtable_populate(__abs_lowcore, __abs_lowcore + sizeof(struct lowcore),
POPULATE_ABS_LOWCORE);
pgtable_populate(__memcpy_real_area, __memcpy_real_area + PAGE_SIZE,
POPULATE_NONE);
memcpy_real_ptep = __virt_to_kpte(__memcpy_real_area);
memcpy_real_ptep = __identity_va(__virt_to_kpte(__memcpy_real_area));
kasan_populate_shadow();
kasan_populate_shadow(kernel_start + 0x100000, kernel_end);
S390_lowcore.kernel_asce.val = swapper_pg_dir | asce_bits;
S390_lowcore.user_asce = s390_invalid_asce;
@ -471,4 +481,5 @@ void setup_vmem(unsigned long asce_limit)
local_ctl_load(13, &S390_lowcore.kernel_asce);
init_mm.context.asce = S390_lowcore.kernel_asce.val;
init_mm.pgd = init_mm_pgd;
}

View File

@ -208,16 +208,22 @@ static inline int kaslr_enabled(void)
#define __PAGE_OFFSET __identity_base
#define PAGE_OFFSET __PAGE_OFFSET
#define __pa_nodebug(x) ((unsigned long)(x))
#ifdef __DECOMPRESSOR
#define __pa_nodebug(x) ((unsigned long)(x))
#define __pa(x) __pa_nodebug(x)
#define __pa32(x) __pa(x)
#define __va(x) ((void *)(unsigned long)(x))
#else /* __DECOMPRESSOR */
static inline unsigned long __pa_nodebug(unsigned long x)
{
if (x < __kaslr_offset)
return x - __identity_base;
return x - __kaslr_offset + __kaslr_offset_phys;
}
#ifdef CONFIG_DEBUG_VIRTUAL
unsigned long __phys_addr(unsigned long x, bool is_31bit);
@ -233,7 +239,7 @@ static inline unsigned long __phys_addr(unsigned long x, bool is_31bit)
#define __pa(x) __phys_addr((unsigned long)(x), false)
#define __pa32(x) __phys_addr((unsigned long)(x), true)
#define __va(x) ((void *)(unsigned long)(x))
#define __va(x) ((void *)((unsigned long)(x) + __identity_base))
#endif /* __DECOMPRESSOR */
@ -258,7 +264,7 @@ static inline unsigned long virt_to_pfn(const void *kaddr)
#define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr))
#define page_to_virt(page) pfn_to_virt(page_to_pfn(page))
#define virt_addr_valid(kaddr) pfn_valid(phys_to_pfn(__pa_nodebug(kaddr)))
#define virt_addr_valid(kaddr) pfn_valid(phys_to_pfn(__pa_nodebug((unsigned long)(kaddr))))
#define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_NON_EXEC

View File

@ -107,6 +107,12 @@ static inline int is_module_addr(void *addr)
return 1;
}
#ifdef CONFIG_RANDOMIZE_BASE
#define KASLR_LEN (1UL << 31)
#else
#define KASLR_LEN 0UL
#endif
/*
* A 64 bit pagetable entry of S390 has following format:
* | PFRA |0IPC| OS |

View File

@ -15,6 +15,7 @@
#include <asm/page-states.h>
#include <asm/abs_lowcore.h>
#include <asm/cacheflush.h>
#include <asm/maccess.h>
#include <asm/nospec-branch.h>
#include <asm/ctlreg.h>
#include <asm/pgalloc.h>
@ -22,6 +23,7 @@
#include <asm/tlbflush.h>
#include <asm/sections.h>
#include <asm/set_memory.h>
#include <asm/physmem_info.h>
static DEFINE_MUTEX(vmem_mutex);