mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-10-31 16:38:12 +00:00
443cbaf9e2
Now move the relevant codes into separate files: kernel/crash_reserve.c, include/linux/crash_reserve.h. And add config item CRASH_RESERVE to control its enabling. And also update the old ifdeffery of CONFIG_CRASH_CORE, including of <linux/crash_core.h> and config item dependency on CRASH_CORE accordingly. And also do renaming as follows: - arch/xxx/kernel/{crash_core.c => vmcore_info.c} because they are only related to vmcoreinfo exporting on x86, arm64, riscv. And also Remove config item CRASH_CORE, and rely on CONFIG_KEXEC_CORE to decide if build in crash_core.c. [yang.lee@linux.alibaba.com: remove duplicated include in vmcore_info.c] Link: https://lkml.kernel.org/r/20240126005744.16561-1-yang.lee@linux.alibaba.com Link: https://lkml.kernel.org/r/20240124051254.67105-3-bhe@redhat.com Signed-off-by: Baoquan He <bhe@redhat.com> Signed-off-by: Yang Li <yang.lee@linux.alibaba.com> Acked-by: Hari Bathini <hbathini@linux.ibm.com> Cc: Al Viro <viro@zeniv.linux.org.uk> Cc: Eric W. Biederman <ebiederm@xmission.com> Cc: Pingfan Liu <piliu@redhat.com> Cc: Klara Modin <klarasmodin@gmail.com> Cc: Michael Kelley <mhklinux@outlook.com> Cc: Nathan Chancellor <nathan@kernel.org> Cc: Stephen Rothwell <sfr@canb.auug.org.au> Cc: Yang Li <yang.lee@linux.alibaba.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
39 lines
1.3 KiB
C
39 lines
1.3 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/*
|
|
* Copyright (C) Linaro.
|
|
* Copyright (C) Huawei Futurewei Technologies.
|
|
*/
|
|
|
|
#include <linux/vmcore_info.h>
|
|
#include <asm/cpufeature.h>
|
|
#include <asm/memory.h>
|
|
#include <asm/pgtable-hwdef.h>
|
|
#include <asm/pointer_auth.h>
|
|
|
|
static inline u64 get_tcr_el1_t1sz(void);
|
|
|
|
static inline u64 get_tcr_el1_t1sz(void)
|
|
{
|
|
return (read_sysreg(tcr_el1) & TCR_T1SZ_MASK) >> TCR_T1SZ_OFFSET;
|
|
}
|
|
|
|
void arch_crash_save_vmcoreinfo(void)
|
|
{
|
|
VMCOREINFO_NUMBER(VA_BITS);
|
|
/* Please note VMCOREINFO_NUMBER() uses "%d", not "%x" */
|
|
vmcoreinfo_append_str("NUMBER(MODULES_VADDR)=0x%lx\n", MODULES_VADDR);
|
|
vmcoreinfo_append_str("NUMBER(MODULES_END)=0x%lx\n", MODULES_END);
|
|
vmcoreinfo_append_str("NUMBER(VMALLOC_END)=0x%lx\n", VMALLOC_END);
|
|
vmcoreinfo_append_str("NUMBER(VMEMMAP_START)=0x%lx\n", VMEMMAP_START);
|
|
vmcoreinfo_append_str("NUMBER(VMEMMAP_END)=0x%lx\n", VMEMMAP_END);
|
|
vmcoreinfo_append_str("NUMBER(kimage_voffset)=0x%llx\n",
|
|
kimage_voffset);
|
|
vmcoreinfo_append_str("NUMBER(PHYS_OFFSET)=0x%llx\n",
|
|
PHYS_OFFSET);
|
|
vmcoreinfo_append_str("NUMBER(TCR_EL1_T1SZ)=0x%llx\n",
|
|
get_tcr_el1_t1sz());
|
|
vmcoreinfo_append_str("KERNELOFFSET=%lx\n", kaslr_offset());
|
|
vmcoreinfo_append_str("NUMBER(KERNELPACMASK)=0x%llx\n",
|
|
system_supports_address_auth() ?
|
|
ptrauth_kernel_pac_mask() : 0);
|
|
}
|