mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-10-06 00:39:48 +00:00
s390 updates for 6.5
- Use correct type for size of memory allocated for ELF core header on kernel crash. - Fix insecure W+X mapping warning when KASAN shadow memory range is not aligned on page boundary. - Avoid allocation of short by one page KASAN shadow memory when the original memory range is less than (PAGE_SIZE << 3). - Fix virtual vs physical address confusion in physical memory enumerator. It is not a real issue, since virtual and physical addresses are currently the same. - Set CONFIG_NET_TC_SKB_EXT=y in s390 config files as it is required for offloading TC as well as bridges on switchdev capable ConnectX devices. -----BEGIN PGP SIGNATURE----- iI0EABYIADUWIQQrtrZiYVkVzKQcYivNdxKlNrRb8AUCZJWgRRccYWdvcmRlZXZA bGludXguaWJtLmNvbQAKCRDNdxKlNrRb8LuPAQDaL4mjPxKvleuiTeH1hf4id48X i5UZSf6mAirKwyo4WAEA0sAIvhdJlC8+MBEC1QtYkIJyoxmSg6AsMH5MJL+61wA= =mJ2Q -----END PGP SIGNATURE----- Merge tag 's390-6.4-4' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux Pull s390 updates from Alexander Gordeev: - Use correct type for size of memory allocated for ELF core header on kernel crash. - Fix insecure W+X mapping warning when KASAN shadow memory range is not aligned on page boundary. - Avoid allocation of short by one page KASAN shadow memory when the original memory range is less than (PAGE_SIZE << 3). - Fix virtual vs physical address confusion in physical memory enumerator. It is not a real issue, since virtual and physical addresses are currently the same. - Set CONFIG_NET_TC_SKB_EXT=y in s390 config files as it is required for offloading TC as well as bridges on switchdev capable ConnectX devices. * tag 's390-6.4-4' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: s390/defconfigs: set CONFIG_NET_TC_SKB_EXT=y s390/boot: fix physmem_info virtual vs physical address confusion s390/kasan: avoid short by one page shadow memory s390/kasan: fix insecure W+X mapping warning s390/crash: use the correct type for memory allocation
This commit is contained in:
commit
9d9a9bf07e
6 changed files with 27 additions and 11 deletions
|
@ -45,6 +45,13 @@ static void pgtable_populate(unsigned long addr, unsigned long end, enum populat
|
|||
|
||||
static pte_t pte_z;
|
||||
|
||||
static inline void kasan_populate(unsigned long start, unsigned long end, enum populate_mode mode)
|
||||
{
|
||||
start = PAGE_ALIGN_DOWN(__sha(start));
|
||||
end = PAGE_ALIGN(__sha(end));
|
||||
pgtable_populate(start, end, mode);
|
||||
}
|
||||
|
||||
static void kasan_populate_shadow(void)
|
||||
{
|
||||
pmd_t pmd_z = __pmd(__pa(kasan_early_shadow_pte) | _SEGMENT_ENTRY);
|
||||
|
@ -95,17 +102,17 @@ static void kasan_populate_shadow(void)
|
|||
*/
|
||||
|
||||
for_each_physmem_usable_range(i, &start, &end)
|
||||
pgtable_populate(__sha(start), __sha(end), POPULATE_KASAN_MAP_SHADOW);
|
||||
kasan_populate(start, end, POPULATE_KASAN_MAP_SHADOW);
|
||||
if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
|
||||
untracked_end = VMALLOC_START;
|
||||
/* shallowly populate kasan shadow for vmalloc and modules */
|
||||
pgtable_populate(__sha(VMALLOC_START), __sha(MODULES_END), POPULATE_KASAN_SHALLOW);
|
||||
kasan_populate(VMALLOC_START, MODULES_END, POPULATE_KASAN_SHALLOW);
|
||||
} else {
|
||||
untracked_end = MODULES_VADDR;
|
||||
}
|
||||
/* populate kasan shadow for untracked memory */
|
||||
pgtable_populate(__sha(ident_map_size), __sha(untracked_end), POPULATE_KASAN_ZERO_SHADOW);
|
||||
pgtable_populate(__sha(MODULES_END), __sha(_REGION1_SIZE), POPULATE_KASAN_ZERO_SHADOW);
|
||||
kasan_populate(ident_map_size, untracked_end, POPULATE_KASAN_ZERO_SHADOW);
|
||||
kasan_populate(MODULES_END, _REGION1_SIZE, POPULATE_KASAN_ZERO_SHADOW);
|
||||
}
|
||||
|
||||
static bool kasan_pgd_populate_zero_shadow(pgd_t *pgd, unsigned long addr,
|
||||
|
|
|
@ -116,6 +116,7 @@ CONFIG_UNIX=y
|
|||
CONFIG_UNIX_DIAG=m
|
||||
CONFIG_XFRM_USER=m
|
||||
CONFIG_NET_KEY=m
|
||||
CONFIG_NET_TC_SKB_EXT=y
|
||||
CONFIG_SMC=m
|
||||
CONFIG_SMC_DIAG=m
|
||||
CONFIG_INET=y
|
||||
|
|
|
@ -107,6 +107,7 @@ CONFIG_UNIX=y
|
|||
CONFIG_UNIX_DIAG=m
|
||||
CONFIG_XFRM_USER=m
|
||||
CONFIG_NET_KEY=m
|
||||
CONFIG_NET_TC_SKB_EXT=y
|
||||
CONFIG_SMC=m
|
||||
CONFIG_SMC_DIAG=m
|
||||
CONFIG_INET=y
|
||||
|
|
|
@ -3,6 +3,7 @@
|
|||
#define _ASM_S390_MEM_DETECT_H
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <asm/page.h>
|
||||
|
||||
enum physmem_info_source {
|
||||
MEM_DETECT_NONE = 0,
|
||||
|
@ -133,7 +134,7 @@ static inline const char *get_rr_type_name(enum reserved_range_type t)
|
|||
|
||||
#define for_each_physmem_reserved_type_range(t, range, p_start, p_end) \
|
||||
for (range = &physmem_info.reserved[t], *p_start = range->start, *p_end = range->end; \
|
||||
range && range->end; range = range->chain, \
|
||||
range && range->end; range = range->chain ? __va(range->chain) : NULL, \
|
||||
*p_start = range ? range->start : 0, *p_end = range ? range->end : 0)
|
||||
|
||||
static inline struct reserved_range *__physmem_reserved_next(enum reserved_range_type *t,
|
||||
|
@ -145,7 +146,7 @@ static inline struct reserved_range *__physmem_reserved_next(enum reserved_range
|
|||
return range;
|
||||
}
|
||||
if (range->chain)
|
||||
return range->chain;
|
||||
return __va(range->chain);
|
||||
while (++*t < RR_MAX) {
|
||||
range = &physmem_info.reserved[*t];
|
||||
if (range->end)
|
||||
|
|
|
@ -568,9 +568,9 @@ static size_t get_elfcorehdr_size(int mem_chunk_cnt)
|
|||
int elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size)
|
||||
{
|
||||
Elf64_Phdr *phdr_notes, *phdr_loads;
|
||||
size_t alloc_size;
|
||||
int mem_chunk_cnt;
|
||||
void *ptr, *hdr;
|
||||
u32 alloc_size;
|
||||
u64 hdr_off;
|
||||
|
||||
/* If we are not in kdump or zfcp/nvme dump mode return */
|
||||
|
|
|
@ -667,7 +667,15 @@ static void __init memblock_region_swap(void *a, void *b, int size)
|
|||
|
||||
#ifdef CONFIG_KASAN
|
||||
#define __sha(x) ((unsigned long)kasan_mem_to_shadow((void *)x))
|
||||
|
||||
static inline int set_memory_kasan(unsigned long start, unsigned long end)
|
||||
{
|
||||
start = PAGE_ALIGN_DOWN(__sha(start));
|
||||
end = PAGE_ALIGN(__sha(end));
|
||||
return set_memory_rwnx(start, (end - start) >> PAGE_SHIFT);
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* map whole physical memory to virtual memory (identity mapping)
|
||||
* we reserve enough space in the vmalloc area for vmemmap to hotplug
|
||||
|
@ -737,10 +745,8 @@ void __init vmem_map_init(void)
|
|||
}
|
||||
|
||||
#ifdef CONFIG_KASAN
|
||||
for_each_mem_range(i, &base, &end) {
|
||||
set_memory_rwnx(__sha(base),
|
||||
(__sha(end) - __sha(base)) >> PAGE_SHIFT);
|
||||
}
|
||||
for_each_mem_range(i, &base, &end)
|
||||
set_memory_kasan(base, end);
|
||||
#endif
|
||||
set_memory_rox((unsigned long)_stext,
|
||||
(unsigned long)(_etext - _stext) >> PAGE_SHIFT);
|
||||
|
|
Loading…
Reference in a new issue