From 094a3684b9b67758ccedf0e6068d90f22f2942d9 Mon Sep 17 00:00:00 2001 From: Pasha Tatashin Date: Thu, 30 Sep 2021 14:30:59 +0000 Subject: [PATCH 01/15] arm64: kernel: add helper for booted at EL2 and not VHE Replace places that contain logic like this: is_hyp_mode_available() && !is_kernel_in_hyp_mode() With a dedicated boolean function is_hyp_nvhe(). This will be needed later in kexec in order to sooner switch back to EL2. Suggested-by: James Morse Signed-off-by: Pasha Tatashin Acked-by: Catalin Marinas Link: https://lore.kernel.org/r/20210930143113.1502553-2-pasha.tatashin@soleen.com Signed-off-by: Will Deacon --- arch/arm64/include/asm/virt.h | 5 +++++ arch/arm64/kernel/cpu-reset.h | 3 +-- arch/arm64/kernel/hibernate.c | 2 +- arch/arm64/kernel/sdei.c | 2 +- 4 files changed, 8 insertions(+), 4 deletions(-) diff --git a/arch/arm64/include/asm/virt.h b/arch/arm64/include/asm/virt.h index 7379f35ae2c6..a9457e96203c 100644 --- a/arch/arm64/include/asm/virt.h +++ b/arch/arm64/include/asm/virt.h @@ -128,6 +128,11 @@ static __always_inline bool is_protected_kvm_enabled(void) return cpus_have_final_cap(ARM64_KVM_PROTECTED_MODE); } +static inline bool is_hyp_nvhe(void) +{ + return is_hyp_mode_available() && !is_kernel_in_hyp_mode(); +} + #endif /* __ASSEMBLY__ */ #endif /* ! __ASM__VIRT_H */ diff --git a/arch/arm64/kernel/cpu-reset.h b/arch/arm64/kernel/cpu-reset.h index 9a7b1262ef17..81b3d0fe7a63 100644 --- a/arch/arm64/kernel/cpu-reset.h +++ b/arch/arm64/kernel/cpu-reset.h @@ -20,8 +20,7 @@ static inline void __noreturn __nocfi cpu_soft_restart(unsigned long entry, { typeof(__cpu_soft_restart) *restart; - unsigned long el2_switch = !is_kernel_in_hyp_mode() && - is_hyp_mode_available(); + unsigned long el2_switch = is_hyp_nvhe(); restart = (void *)__pa_symbol(function_nocfi(__cpu_soft_restart)); cpu_install_idmap(); diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c index 46a0b4d6e251..4c9533f4c0c4 100644 --- a/arch/arm64/kernel/hibernate.c +++ b/arch/arm64/kernel/hibernate.c @@ -49,7 +49,7 @@ extern int in_suspend; /* Do we need to reset el2? */ -#define el2_reset_needed() (is_hyp_mode_available() && !is_kernel_in_hyp_mode()) +#define el2_reset_needed() (is_hyp_nvhe()) /* temporary el2 vectors in the __hibernate_exit_text section. */ extern char hibernate_el2_vectors[]; diff --git a/arch/arm64/kernel/sdei.c b/arch/arm64/kernel/sdei.c index 47f77d1234cb..d20620a1c51a 100644 --- a/arch/arm64/kernel/sdei.c +++ b/arch/arm64/kernel/sdei.c @@ -202,7 +202,7 @@ unsigned long sdei_arch_get_entry_point(int conduit) * dropped to EL1 because we don't support VHE, then we can't support * SDEI. */ - if (is_hyp_mode_available() && !is_kernel_in_hyp_mode()) { + if (is_hyp_nvhe()) { pr_err("Not supported on this hardware/boot configuration\n"); goto out_err; } From 788bfdd97434982b6d575062581e8e72eea755af Mon Sep 17 00:00:00 2001 From: Pasha Tatashin Date: Thu, 30 Sep 2021 14:31:00 +0000 Subject: [PATCH 02/15] arm64: trans_pgd: hibernate: Add trans_pgd_copy_el2_vectors Users of trans_pgd may also need a copy of vector table because it is also may be overwritten if a linear map can be overwritten. Move setup of EL2 vectors from hibernate to trans_pgd, so it can be later shared with kexec as well. Signed-off-by: Pasha Tatashin Acked-by: Catalin Marinas Link: https://lore.kernel.org/r/20210930143113.1502553-3-pasha.tatashin@soleen.com Signed-off-by: Will Deacon --- arch/arm64/include/asm/trans_pgd.h | 9 +++-- arch/arm64/include/asm/virt.h | 2 ++ arch/arm64/kernel/hibernate-asm.S | 52 --------------------------- arch/arm64/kernel/hibernate.c | 26 ++++++-------- arch/arm64/mm/Makefile | 1 + arch/arm64/mm/trans_pgd-asm.S | 58 ++++++++++++++++++++++++++++++ arch/arm64/mm/trans_pgd.c | 27 ++++++++++++-- 7 files changed, 103 insertions(+), 72 deletions(-) create mode 100644 arch/arm64/mm/trans_pgd-asm.S diff --git a/arch/arm64/include/asm/trans_pgd.h b/arch/arm64/include/asm/trans_pgd.h index 5d08e5adf3d5..7b04d32b102c 100644 --- a/arch/arm64/include/asm/trans_pgd.h +++ b/arch/arm64/include/asm/trans_pgd.h @@ -1,8 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* - * Copyright (c) 2020, Microsoft Corporation. - * Pavel Tatashin + * Copyright (c) 2021, Microsoft Corporation. + * Pasha Tatashin */ #ifndef _ASM_TRANS_TABLE_H @@ -36,4 +36,9 @@ int trans_pgd_map_page(struct trans_pgd_info *info, pgd_t *trans_pgd, int trans_pgd_idmap_page(struct trans_pgd_info *info, phys_addr_t *trans_ttbr0, unsigned long *t0sz, void *page); +int trans_pgd_copy_el2_vectors(struct trans_pgd_info *info, + phys_addr_t *el2_vectors); + +extern char trans_pgd_stub_vectors[]; + #endif /* _ASM_TRANS_TABLE_H */ diff --git a/arch/arm64/include/asm/virt.h b/arch/arm64/include/asm/virt.h index a9457e96203c..3c8af033a997 100644 --- a/arch/arm64/include/asm/virt.h +++ b/arch/arm64/include/asm/virt.h @@ -67,6 +67,8 @@ */ extern u32 __boot_cpu_mode[2]; +#define ARM64_VECTOR_TABLE_LEN SZ_2K + void __hyp_set_vectors(phys_addr_t phys_vector_base); void __hyp_reset_vectors(void); diff --git a/arch/arm64/kernel/hibernate-asm.S b/arch/arm64/kernel/hibernate-asm.S index 81c0186a5e32..a30a2c3f905e 100644 --- a/arch/arm64/kernel/hibernate-asm.S +++ b/arch/arm64/kernel/hibernate-asm.S @@ -112,56 +112,4 @@ alternative_insn "dc cvau, x4", "dc civac, x4", ARM64_WORKAROUND_CLEAN_CACHE hvc #0 3: ret SYM_CODE_END(swsusp_arch_suspend_exit) - -/* - * Restore the hyp stub. - * This must be done before the hibernate page is unmapped by _cpu_resume(), - * but happens before any of the hyp-stub's code is cleaned to PoC. - * - * x24: The physical address of __hyp_stub_vectors - */ -SYM_CODE_START_LOCAL(el1_sync) - msr vbar_el2, x24 - eret -SYM_CODE_END(el1_sync) - -.macro invalid_vector label -SYM_CODE_START_LOCAL(\label) - b \label -SYM_CODE_END(\label) -.endm - - invalid_vector el2_sync_invalid - invalid_vector el2_irq_invalid - invalid_vector el2_fiq_invalid - invalid_vector el2_error_invalid - invalid_vector el1_sync_invalid - invalid_vector el1_irq_invalid - invalid_vector el1_fiq_invalid - invalid_vector el1_error_invalid - -/* el2 vectors - switch el2 here while we restore the memory image. */ - .align 11 -SYM_CODE_START(hibernate_el2_vectors) - ventry el2_sync_invalid // Synchronous EL2t - ventry el2_irq_invalid // IRQ EL2t - ventry el2_fiq_invalid // FIQ EL2t - ventry el2_error_invalid // Error EL2t - - ventry el2_sync_invalid // Synchronous EL2h - ventry el2_irq_invalid // IRQ EL2h - ventry el2_fiq_invalid // FIQ EL2h - ventry el2_error_invalid // Error EL2h - - ventry el1_sync // Synchronous 64-bit EL1 - ventry el1_irq_invalid // IRQ 64-bit EL1 - ventry el1_fiq_invalid // FIQ 64-bit EL1 - ventry el1_error_invalid // Error 64-bit EL1 - - ventry el1_sync_invalid // Synchronous 32-bit EL1 - ventry el1_irq_invalid // IRQ 32-bit EL1 - ventry el1_fiq_invalid // FIQ 32-bit EL1 - ventry el1_error_invalid // Error 32-bit EL1 -SYM_CODE_END(hibernate_el2_vectors) - .popsection diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c index 4c9533f4c0c4..b96ef9060e4c 100644 --- a/arch/arm64/kernel/hibernate.c +++ b/arch/arm64/kernel/hibernate.c @@ -51,9 +51,6 @@ extern int in_suspend; /* Do we need to reset el2? */ #define el2_reset_needed() (is_hyp_nvhe()) -/* temporary el2 vectors in the __hibernate_exit_text section. */ -extern char hibernate_el2_vectors[]; - /* hyp-stub vectors, used to restore el2 during resume from hibernate. */ extern char __hyp_stub_vectors[]; @@ -434,6 +431,7 @@ int swsusp_arch_resume(void) void *zero_page; size_t exit_size; pgd_t *tmp_pg_dir; + phys_addr_t el2_vectors; void __noreturn (*hibernate_exit)(phys_addr_t, phys_addr_t, void *, void *, phys_addr_t, phys_addr_t); struct trans_pgd_info trans_info = { @@ -461,6 +459,14 @@ int swsusp_arch_resume(void) return -ENOMEM; } + if (el2_reset_needed()) { + rc = trans_pgd_copy_el2_vectors(&trans_info, &el2_vectors); + if (rc) { + pr_err("Failed to setup el2 vectors\n"); + return rc; + } + } + exit_size = __hibernate_exit_text_end - __hibernate_exit_text_start; /* * Copy swsusp_arch_suspend_exit() to a safe page. This will generate @@ -473,26 +479,14 @@ int swsusp_arch_resume(void) return rc; } - /* - * The hibernate exit text contains a set of el2 vectors, that will - * be executed at el2 with the mmu off in order to reload hyp-stub. - */ - dcache_clean_inval_poc((unsigned long)hibernate_exit, - (unsigned long)hibernate_exit + exit_size); - /* * KASLR will cause the el2 vectors to be in a different location in * the resumed kernel. Load hibernate's temporary copy into el2. * * We can skip this step if we booted at EL1, or are running with VHE. */ - if (el2_reset_needed()) { - phys_addr_t el2_vectors = (phys_addr_t)hibernate_exit; - el2_vectors += hibernate_el2_vectors - - __hibernate_exit_text_start; /* offset */ - + if (el2_reset_needed()) __hyp_set_vectors(el2_vectors); - } hibernate_exit(virt_to_phys(tmp_pg_dir), resume_hdr.ttbr1_el1, resume_hdr.reenter_kernel, restore_pblist, diff --git a/arch/arm64/mm/Makefile b/arch/arm64/mm/Makefile index f188c9092696..ff1e800ba7a1 100644 --- a/arch/arm64/mm/Makefile +++ b/arch/arm64/mm/Makefile @@ -7,6 +7,7 @@ obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o obj-$(CONFIG_PTDUMP_CORE) += ptdump.o obj-$(CONFIG_PTDUMP_DEBUGFS) += ptdump_debugfs.o obj-$(CONFIG_TRANS_TABLE) += trans_pgd.o +obj-$(CONFIG_TRANS_TABLE) += trans_pgd-asm.o obj-$(CONFIG_DEBUG_VIRTUAL) += physaddr.o obj-$(CONFIG_ARM64_MTE) += mteswap.o KASAN_SANITIZE_physaddr.o += n diff --git a/arch/arm64/mm/trans_pgd-asm.S b/arch/arm64/mm/trans_pgd-asm.S new file mode 100644 index 000000000000..8c4bffe3089d --- /dev/null +++ b/arch/arm64/mm/trans_pgd-asm.S @@ -0,0 +1,58 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +/* + * Copyright (c) 2021, Microsoft Corporation. + * Pasha Tatashin + */ + +#include +#include +#include + +.macro invalid_vector label +SYM_CODE_START_LOCAL(\label) + .align 7 + b \label +SYM_CODE_END(\label) +.endm + +.macro el1_sync_vector +SYM_CODE_START_LOCAL(el1_sync) + .align 7 + cmp x0, #HVC_SET_VECTORS /* Called from hibernate */ + b.ne 1f + msr vbar_el2, x1 + mov x0, xzr + eret +1: /* Unexpected argument, set an error */ + mov_q x0, HVC_STUB_ERR + eret +SYM_CODE_END(el1_sync) +.endm + +SYM_CODE_START(trans_pgd_stub_vectors) + invalid_vector hyp_stub_el2t_sync_invalid // Synchronous EL2t + invalid_vector hyp_stub_el2t_irq_invalid // IRQ EL2t + invalid_vector hyp_stub_el2t_fiq_invalid // FIQ EL2t + invalid_vector hyp_stub_el2t_error_invalid // Error EL2t + + invalid_vector hyp_stub_el2h_sync_invalid // Synchronous EL2h + invalid_vector hyp_stub_el2h_irq_invalid // IRQ EL2h + invalid_vector hyp_stub_el2h_fiq_invalid // FIQ EL2h + invalid_vector hyp_stub_el2h_error_invalid // Error EL2h + + el1_sync_vector // Synchronous 64-bit EL1 + invalid_vector hyp_stub_el1_irq_invalid // IRQ 64-bit EL1 + invalid_vector hyp_stub_el1_fiq_invalid // FIQ 64-bit EL1 + invalid_vector hyp_stub_el1_error_invalid // Error 64-bit EL1 + + invalid_vector hyp_stub_32b_el1_sync_invalid // Synchronous 32-bit EL1 + invalid_vector hyp_stub_32b_el1_irq_invalid // IRQ 32-bit EL1 + invalid_vector hyp_stub_32b_el1_fiq_invalid // FIQ 32-bit EL1 + invalid_vector hyp_stub_32b_el1_error_invalid // Error 32-bit EL1 + .align 11 +SYM_INNER_LABEL(__trans_pgd_stub_vectors_end, SYM_L_LOCAL) +SYM_CODE_END(trans_pgd_stub_vectors) + +# Check the trans_pgd_stub_vectors didn't overflow +.org . - (__trans_pgd_stub_vectors_end - trans_pgd_stub_vectors) + SZ_2K diff --git a/arch/arm64/mm/trans_pgd.c b/arch/arm64/mm/trans_pgd.c index 527f0a39c3da..26bd8f2d95af 100644 --- a/arch/arm64/mm/trans_pgd.c +++ b/arch/arm64/mm/trans_pgd.c @@ -5,8 +5,8 @@ * * This file derived from: arch/arm64/kernel/hibernate.c * - * Copyright (c) 2020, Microsoft Corporation. - * Pavel Tatashin + * Copyright (c) 2021, Microsoft Corporation. + * Pasha Tatashin * */ @@ -322,3 +322,26 @@ int trans_pgd_idmap_page(struct trans_pgd_info *info, phys_addr_t *trans_ttbr0, return 0; } + +/* + * Create a copy of the vector table so we can call HVC_SET_VECTORS or + * HVC_SOFT_RESTART from contexts where the table may be overwritten. + */ +int trans_pgd_copy_el2_vectors(struct trans_pgd_info *info, + phys_addr_t *el2_vectors) +{ + void *hyp_stub = trans_alloc(info); + + if (!hyp_stub) + return -ENOMEM; + *el2_vectors = virt_to_phys(hyp_stub); + memcpy(hyp_stub, &trans_pgd_stub_vectors, ARM64_VECTOR_TABLE_LEN); + caches_clean_inval_pou((unsigned long)hyp_stub, + (unsigned long)hyp_stub + + ARM64_VECTOR_TABLE_LEN); + dcache_clean_inval_poc((unsigned long)hyp_stub, + (unsigned long)hyp_stub + + ARM64_VECTOR_TABLE_LEN); + + return 0; +} From a347f601452ff3e7cc15bc31307915cea3b3f3f5 Mon Sep 17 00:00:00 2001 From: Pasha Tatashin Date: Thu, 30 Sep 2021 14:31:01 +0000 Subject: [PATCH 03/15] arm64: hibernate: abstract ttrb0 setup function Currently, only hibernate sets custom ttbr0 with safe idmaped function. Kexec, is also going to be using this functionality when relocation code is going to be idmapped. Move the setup sequence to a dedicated cpu_install_ttbr0() for custom ttbr0. Suggested-by: James Morse Signed-off-by: Pasha Tatashin Acked-by: Catalin Marinas Link: https://lore.kernel.org/r/20210930143113.1502553-4-pasha.tatashin@soleen.com Signed-off-by: Will Deacon --- arch/arm64/include/asm/mmu_context.h | 24 ++++++++++++++++++++++++ arch/arm64/kernel/hibernate.c | 21 +-------------------- 2 files changed, 25 insertions(+), 20 deletions(-) diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h index f4ba93d4ffeb..6770667b34a3 100644 --- a/arch/arm64/include/asm/mmu_context.h +++ b/arch/arm64/include/asm/mmu_context.h @@ -115,6 +115,30 @@ static inline void cpu_install_idmap(void) cpu_switch_mm(lm_alias(idmap_pg_dir), &init_mm); } +/* + * Load our new page tables. A strict BBM approach requires that we ensure that + * TLBs are free of any entries that may overlap with the global mappings we are + * about to install. + * + * For a real hibernate/resume/kexec cycle TTBR0 currently points to a zero + * page, but TLBs may contain stale ASID-tagged entries (e.g. for EFI runtime + * services), while for a userspace-driven test_resume cycle it points to + * userspace page tables (and we must point it at a zero page ourselves). + * + * We change T0SZ as part of installing the idmap. This is undone by + * cpu_uninstall_idmap() in __cpu_suspend_exit(). + */ +static inline void cpu_install_ttbr0(phys_addr_t ttbr0, unsigned long t0sz) +{ + cpu_set_reserved_ttbr0(); + local_flush_tlb_all(); + __cpu_set_tcr_t0sz(t0sz); + + /* avoid cpu_switch_mm() and its SW-PAN and CNP interactions */ + write_sysreg(ttbr0, ttbr0_el1); + isb(); +} + /* * Atomically replaces the active TTBR1_EL1 PGD with a new VA-compatible PGD, * avoiding the possibility of conflicting TLB entries being allocated. diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c index b96ef9060e4c..2758f75d6809 100644 --- a/arch/arm64/kernel/hibernate.c +++ b/arch/arm64/kernel/hibernate.c @@ -212,26 +212,7 @@ static int create_safe_exec_page(void *src_start, size_t length, if (rc) return rc; - /* - * Load our new page tables. A strict BBM approach requires that we - * ensure that TLBs are free of any entries that may overlap with the - * global mappings we are about to install. - * - * For a real hibernate/resume cycle TTBR0 currently points to a zero - * page, but TLBs may contain stale ASID-tagged entries (e.g. for EFI - * runtime services), while for a userspace-driven test_resume cycle it - * points to userspace page tables (and we must point it at a zero page - * ourselves). - * - * We change T0SZ as part of installing the idmap. This is undone by - * cpu_uninstall_idmap() in __cpu_suspend_exit(). - */ - cpu_set_reserved_ttbr0(); - local_flush_tlb_all(); - __cpu_set_tcr_t0sz(t0sz); - write_sysreg(trans_ttbr0, ttbr0_el1); - isb(); - + cpu_install_ttbr0(trans_ttbr0, t0sz); *phys_dst_addr = virt_to_phys(page); return 0; From 0d8732e461d6b4dc2c625a69225f20e24da4dd79 Mon Sep 17 00:00:00 2001 From: Pasha Tatashin Date: Thu, 30 Sep 2021 14:31:02 +0000 Subject: [PATCH 04/15] arm64: kexec: flush image and lists during kexec load time Currently, during kexec load we are copying relocation function and flushing it. However, we can also flush kexec relocation buffers and if new kernel image is already in place (i.e. crash kernel), we can also flush the new kernel image itself. Signed-off-by: Pasha Tatashin Acked-by: Catalin Marinas Link: https://lore.kernel.org/r/20210930143113.1502553-5-pasha.tatashin@soleen.com Signed-off-by: Will Deacon --- arch/arm64/kernel/machine_kexec.c | 58 ++++++++++++++----------------- 1 file changed, 26 insertions(+), 32 deletions(-) diff --git a/arch/arm64/kernel/machine_kexec.c b/arch/arm64/kernel/machine_kexec.c index 213d56c14f60..b6d5a02cba2e 100644 --- a/arch/arm64/kernel/machine_kexec.c +++ b/arch/arm64/kernel/machine_kexec.c @@ -60,29 +60,6 @@ void machine_kexec_cleanup(struct kimage *kimage) /* Empty routine needed to avoid build errors. */ } -int machine_kexec_post_load(struct kimage *kimage) -{ - void *reloc_code = page_to_virt(kimage->control_code_page); - - memcpy(reloc_code, arm64_relocate_new_kernel, - arm64_relocate_new_kernel_size); - kimage->arch.kern_reloc = __pa(reloc_code); - kexec_image_info(kimage); - - /* - * For execution with the MMU off, reloc_code needs to be cleaned to the - * PoC and invalidated from the I-cache. - */ - dcache_clean_inval_poc((unsigned long)reloc_code, - (unsigned long)reloc_code + - arm64_relocate_new_kernel_size); - icache_inval_pou((uintptr_t)reloc_code, - (uintptr_t)reloc_code + - arm64_relocate_new_kernel_size); - - return 0; -} - /** * machine_kexec_prepare - Prepare for a kexec reboot. * @@ -163,6 +140,32 @@ static void kexec_segment_flush(const struct kimage *kimage) } } +int machine_kexec_post_load(struct kimage *kimage) +{ + void *reloc_code = page_to_virt(kimage->control_code_page); + + /* If in place flush new kernel image, else flush lists and buffers */ + if (kimage->head & IND_DONE) + kexec_segment_flush(kimage); + else + kexec_list_flush(kimage); + + memcpy(reloc_code, arm64_relocate_new_kernel, + arm64_relocate_new_kernel_size); + kimage->arch.kern_reloc = __pa(reloc_code); + kexec_image_info(kimage); + + /* Flush the reloc_code in preparation for its execution. */ + dcache_clean_inval_poc((unsigned long)reloc_code, + (unsigned long)reloc_code + + arm64_relocate_new_kernel_size); + icache_inval_pou((uintptr_t)reloc_code, + (uintptr_t)reloc_code + + arm64_relocate_new_kernel_size); + + return 0; +} + /** * machine_kexec - Do the kexec reboot. * @@ -180,13 +183,6 @@ void machine_kexec(struct kimage *kimage) WARN(in_kexec_crash && (stuck_cpus || smp_crash_stop_failed()), "Some CPUs may be stale, kdump will be unreliable.\n"); - /* Flush the kimage list and its buffers. */ - kexec_list_flush(kimage); - - /* Flush the new image if already in place. */ - if ((kimage != kexec_crash_image) && (kimage->head & IND_DONE)) - kexec_segment_flush(kimage); - pr_info("Bye!\n"); local_daif_mask(); @@ -261,8 +257,6 @@ void arch_kexec_protect_crashkres(void) { int i; - kexec_segment_flush(kexec_crash_image); - for (i = 0; i < kexec_crash_image->nr_segments; i++) set_memory_valid( __phys_to_virt(kexec_crash_image->segment[i].mem), From 5bb6834fc2900052a377df79b9ab065a698bf70b Mon Sep 17 00:00:00 2001 From: Pasha Tatashin Date: Thu, 30 Sep 2021 14:31:03 +0000 Subject: [PATCH 05/15] arm64: kexec: skip relocation code for inplace kexec In case of kdump or when segments are already in place the relocation is not needed, therefore the setup of relocation function and call to it can be skipped. Signed-off-by: Pasha Tatashin Suggested-by: James Morse Acked-by: Catalin Marinas Link: https://lore.kernel.org/r/20210930143113.1502553-6-pasha.tatashin@soleen.com Signed-off-by: Will Deacon --- arch/arm64/kernel/machine_kexec.c | 34 ++++++++++++++++++----------- arch/arm64/kernel/relocate_kernel.S | 3 --- 2 files changed, 21 insertions(+), 16 deletions(-) diff --git a/arch/arm64/kernel/machine_kexec.c b/arch/arm64/kernel/machine_kexec.c index b6d5a02cba2e..7f1cb5a2a463 100644 --- a/arch/arm64/kernel/machine_kexec.c +++ b/arch/arm64/kernel/machine_kexec.c @@ -144,16 +144,16 @@ int machine_kexec_post_load(struct kimage *kimage) { void *reloc_code = page_to_virt(kimage->control_code_page); - /* If in place flush new kernel image, else flush lists and buffers */ - if (kimage->head & IND_DONE) + /* If in place, relocation is not used, only flush next kernel */ + if (kimage->head & IND_DONE) { kexec_segment_flush(kimage); - else - kexec_list_flush(kimage); + kexec_image_info(kimage); + return 0; + } memcpy(reloc_code, arm64_relocate_new_kernel, arm64_relocate_new_kernel_size); kimage->arch.kern_reloc = __pa(reloc_code); - kexec_image_info(kimage); /* Flush the reloc_code in preparation for its execution. */ dcache_clean_inval_poc((unsigned long)reloc_code, @@ -162,6 +162,8 @@ int machine_kexec_post_load(struct kimage *kimage) icache_inval_pou((uintptr_t)reloc_code, (uintptr_t)reloc_code + arm64_relocate_new_kernel_size); + kexec_list_flush(kimage); + kexec_image_info(kimage); return 0; } @@ -188,19 +190,25 @@ void machine_kexec(struct kimage *kimage) local_daif_mask(); /* - * cpu_soft_restart will shutdown the MMU, disable data caches, then - * transfer control to the kern_reloc which contains a copy of - * the arm64_relocate_new_kernel routine. arm64_relocate_new_kernel - * uses physical addressing to relocate the new image to its final - * position and transfers control to the image entry point when the - * relocation is complete. + * Both restart and cpu_soft_restart will shutdown the MMU, disable data + * caches. However, restart will start new kernel or purgatory directly, + * cpu_soft_restart will transfer control to arm64_relocate_new_kernel * In kexec case, kimage->start points to purgatory assuming that * kernel entry and dtb address are embedded in purgatory by * userspace (kexec-tools). * In kexec_file case, the kernel starts directly without purgatory. */ - cpu_soft_restart(kimage->arch.kern_reloc, kimage->head, kimage->start, - kimage->arch.dtb_mem); + if (kimage->head & IND_DONE) { + typeof(__cpu_soft_restart) *restart; + + cpu_install_idmap(); + restart = (void *)__pa_symbol(function_nocfi(__cpu_soft_restart)); + restart(is_hyp_nvhe(), kimage->start, kimage->arch.dtb_mem, + 0, 0); + } else { + cpu_soft_restart(kimage->arch.kern_reloc, kimage->head, + kimage->start, kimage->arch.dtb_mem); + } BUG(); /* Should never get here. */ } diff --git a/arch/arm64/kernel/relocate_kernel.S b/arch/arm64/kernel/relocate_kernel.S index b78ea5de97a4..8058fabe0a76 100644 --- a/arch/arm64/kernel/relocate_kernel.S +++ b/arch/arm64/kernel/relocate_kernel.S @@ -32,8 +32,6 @@ SYM_CODE_START(arm64_relocate_new_kernel) mov x16, x0 /* x16 = kimage_head */ mov x14, xzr /* x14 = entry ptr */ mov x13, xzr /* x13 = copy dest */ - /* Check if the new image needs relocation. */ - tbnz x16, IND_DONE_BIT, .Ldone raw_dcache_line_size x15, x1 /* x15 = dcache line size */ .Lloop: and x12, x16, PAGE_MASK /* x12 = addr */ @@ -65,7 +63,6 @@ SYM_CODE_START(arm64_relocate_new_kernel) .Lnext: ldr x16, [x14], #8 /* entry = *ptr++ */ tbz x16, IND_DONE_BIT, .Lloop /* while (!(entry & DONE)) */ -.Ldone: /* wait for writes from copy_page to finish */ dsb nsh ic iallu From 3036ec599332cdfb406249270e50ad3f1a5c5940 Mon Sep 17 00:00:00 2001 From: Pasha Tatashin Date: Thu, 30 Sep 2021 14:31:04 +0000 Subject: [PATCH 06/15] arm64: kexec: Use dcache ops macros instead of open-coding kexec does dcache maintenance when it re-writes all memory. Our dcache_by_line_op macro depends on reading the sanitized DminLine from memory. Kexec may have overwritten this, so open-codes the sequence. dcache_by_line_op is a whole set of macros, it uses dcache_line_size which uses read_ctr for the sanitsed DminLine. Reading the DminLine is the first thing the dcache_by_line_op does. Rename dcache_by_line_op dcache_by_myline_op and take DminLine as an argument. Kexec can now use the slightly smaller macro. This makes up-coming changes to the dcache maintenance easier on the eye. Code generated by the existing callers is unchanged. Suggested-by: James Morse Signed-off-by: Pasha Tatashin Acked-by: Catalin Marinas Link: https://lore.kernel.org/r/20210930143113.1502553-7-pasha.tatashin@soleen.com Signed-off-by: Will Deacon --- arch/arm64/include/asm/assembler.h | 30 ++++++++++++++++++++++------- arch/arm64/kernel/relocate_kernel.S | 13 +++---------- 2 files changed, 26 insertions(+), 17 deletions(-) diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index bfa58409a4d4..d5281f75a58d 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -405,19 +405,19 @@ alternative_endif /* * Macro to perform a data cache maintenance for the interval - * [start, end) + * [start, end) with dcache line size explicitly provided. * * op: operation passed to dc instruction * domain: domain used in dsb instruciton * start: starting virtual address of the region * end: end virtual address of the region + * linesz: dcache line size * fixup: optional label to branch to on user fault - * Corrupts: start, end, tmp1, tmp2 + * Corrupts: start, end, tmp */ - .macro dcache_by_line_op op, domain, start, end, tmp1, tmp2, fixup - dcache_line_size \tmp1, \tmp2 - sub \tmp2, \tmp1, #1 - bic \start, \start, \tmp2 + .macro dcache_by_myline_op op, domain, start, end, linesz, tmp, fixup + sub \tmp, \linesz, #1 + bic \start, \start, \tmp .Ldcache_op\@: .ifc \op, cvau __dcache_op_workaround_clean_cache \op, \start @@ -436,7 +436,7 @@ alternative_endif .endif .endif .endif - add \start, \start, \tmp1 + add \start, \start, \linesz cmp \start, \end b.lo .Ldcache_op\@ dsb \domain @@ -444,6 +444,22 @@ alternative_endif _cond_extable .Ldcache_op\@, \fixup .endm +/* + * Macro to perform a data cache maintenance for the interval + * [start, end) + * + * op: operation passed to dc instruction + * domain: domain used in dsb instruciton + * start: starting virtual address of the region + * end: end virtual address of the region + * fixup: optional label to branch to on user fault + * Corrupts: start, end, tmp1, tmp2 + */ + .macro dcache_by_line_op op, domain, start, end, tmp1, tmp2, fixup + dcache_line_size \tmp1, \tmp2 + dcache_by_myline_op \op, \domain, \start, \end, \tmp1, \tmp2, \fixup + .endm + /* * Macro to perform an instruction cache maintenance for the interval * [start, end) diff --git a/arch/arm64/kernel/relocate_kernel.S b/arch/arm64/kernel/relocate_kernel.S index 8058fabe0a76..8c43779e8cc6 100644 --- a/arch/arm64/kernel/relocate_kernel.S +++ b/arch/arm64/kernel/relocate_kernel.S @@ -41,16 +41,9 @@ SYM_CODE_START(arm64_relocate_new_kernel) tbz x16, IND_SOURCE_BIT, .Ltest_indirection /* Invalidate dest page to PoC. */ - mov x2, x13 - add x20, x2, #PAGE_SIZE - sub x1, x15, #1 - bic x2, x2, x1 -2: dc ivac, x2 - add x2, x2, x15 - cmp x2, x20 - b.lo 2b - dsb sy - + mov x2, x13 + add x1, x2, #PAGE_SIZE + dcache_by_myline_op ivac, sy, x2, x1, x15, x20 copy_page x13, x12, x1, x2, x3, x4, x5, x6, x7, x8 b .Lnext .Ltest_indirection: From 878fdbd704864352b9b11e29805e92ffa182904e Mon Sep 17 00:00:00 2001 From: Pasha Tatashin Date: Thu, 30 Sep 2021 14:31:05 +0000 Subject: [PATCH 07/15] arm64: kexec: pass kimage as the only argument to relocation function Currently, kexec relocation function (arm64_relocate_new_kernel) accepts the following arguments: head: start of array that contains relocation information. entry: entry point for new kernel or purgatory. dtb_mem: first and only argument to entry. The number of arguments cannot be easily expended, because this function is also called from HVC_SOFT_RESTART, which preserves only three arguments. And, also arm64_relocate_new_kernel is written in assembly but called without stack, thus no place to move extra arguments to free registers. Soon, we will need to pass more arguments: once we enable MMU we will need to pass information about page tables. Pass kimage to arm64_relocate_new_kernel, and teach it to get the required fields from kimage. Signed-off-by: Pasha Tatashin Acked-by: Catalin Marinas Link: https://lore.kernel.org/r/20210930143113.1502553-8-pasha.tatashin@soleen.com Signed-off-by: Will Deacon --- arch/arm64/kernel/asm-offsets.c | 7 +++++++ arch/arm64/kernel/machine_kexec.c | 7 +++++-- arch/arm64/kernel/relocate_kernel.S | 10 ++++------ 3 files changed, 16 insertions(+), 8 deletions(-) diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c index 551427ae8cc5..1d3319c7518e 100644 --- a/arch/arm64/kernel/asm-offsets.c +++ b/arch/arm64/kernel/asm-offsets.c @@ -9,6 +9,7 @@ #include #include +#include #include #include #include @@ -170,6 +171,12 @@ int main(void) DEFINE(PTRAUTH_KERNEL_KEY_APIA, offsetof(struct ptrauth_keys_kernel, apia)); #endif BLANK(); +#endif +#ifdef CONFIG_KEXEC_CORE + DEFINE(KIMAGE_ARCH_DTB_MEM, offsetof(struct kimage, arch.dtb_mem)); + DEFINE(KIMAGE_HEAD, offsetof(struct kimage, head)); + DEFINE(KIMAGE_START, offsetof(struct kimage, start)); + BLANK(); #endif return 0; } diff --git a/arch/arm64/kernel/machine_kexec.c b/arch/arm64/kernel/machine_kexec.c index 7f1cb5a2a463..e210b19592c6 100644 --- a/arch/arm64/kernel/machine_kexec.c +++ b/arch/arm64/kernel/machine_kexec.c @@ -84,6 +84,9 @@ static void kexec_list_flush(struct kimage *kimage) { kimage_entry_t *entry; + dcache_clean_inval_poc((unsigned long)kimage, + (unsigned long)kimage + sizeof(*kimage)); + for (entry = &kimage->head; ; entry++) { unsigned int flag; unsigned long addr; @@ -206,8 +209,8 @@ void machine_kexec(struct kimage *kimage) restart(is_hyp_nvhe(), kimage->start, kimage->arch.dtb_mem, 0, 0); } else { - cpu_soft_restart(kimage->arch.kern_reloc, kimage->head, - kimage->start, kimage->arch.dtb_mem); + cpu_soft_restart(kimage->arch.kern_reloc, virt_to_phys(kimage), + 0, 0); } BUG(); /* Should never get here. */ diff --git a/arch/arm64/kernel/relocate_kernel.S b/arch/arm64/kernel/relocate_kernel.S index 8c43779e8cc6..63ea19868f63 100644 --- a/arch/arm64/kernel/relocate_kernel.S +++ b/arch/arm64/kernel/relocate_kernel.S @@ -27,9 +27,7 @@ */ SYM_CODE_START(arm64_relocate_new_kernel) /* Setup the list loop variables. */ - mov x18, x2 /* x18 = dtb address */ - mov x17, x1 /* x17 = kimage_start */ - mov x16, x0 /* x16 = kimage_head */ + ldr x16, [x0, #KIMAGE_HEAD] /* x16 = kimage_head */ mov x14, xzr /* x14 = entry ptr */ mov x13, xzr /* x13 = copy dest */ raw_dcache_line_size x15, x1 /* x15 = dcache line size */ @@ -63,12 +61,12 @@ SYM_CODE_START(arm64_relocate_new_kernel) isb /* Start new image. */ - mov x0, x18 + ldr x4, [x0, #KIMAGE_START] /* relocation start */ + ldr x0, [x0, #KIMAGE_ARCH_DTB_MEM] /* dtb address */ mov x1, xzr mov x2, xzr mov x3, xzr - br x17 - + br x4 SYM_CODE_END(arm64_relocate_new_kernel) .align 3 /* To keep the 64-bit values below naturally aligned. */ From 08eae0ef618f34a813c1478200eb351d4416f3ca Mon Sep 17 00:00:00 2001 From: Pasha Tatashin Date: Thu, 30 Sep 2021 14:31:06 +0000 Subject: [PATCH 08/15] arm64: kexec: configure EL2 vectors for kexec If we have a EL2 mode without VHE, the EL2 vectors are needed in order to switch to EL2 and jump to new world with hypervisor privileges. In preparation to MMU enabled relocation, configure our EL2 table now. Kexec uses #HVC_SOFT_RESTART to branch to the new world, so extend el1_sync vector that is provided by trans_pgd_copy_el2_vectors() to support this case. Signed-off-by: Pasha Tatashin Acked-by: Catalin Marinas Link: https://lore.kernel.org/r/20210930143113.1502553-9-pasha.tatashin@soleen.com Signed-off-by: Will Deacon --- arch/arm64/Kconfig | 2 +- arch/arm64/include/asm/kexec.h | 1 + arch/arm64/kernel/asm-offsets.c | 1 + arch/arm64/kernel/machine_kexec.c | 31 +++++++++++++++++++++++++++++++ arch/arm64/mm/trans_pgd-asm.S | 9 ++++++++- 5 files changed, 42 insertions(+), 2 deletions(-) diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 5c7ae4c3954b..552a057b40af 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -1135,7 +1135,7 @@ config CRASH_DUMP config TRANS_TABLE def_bool y - depends on HIBERNATION + depends on HIBERNATION || KEXEC_CORE config XEN_DOM0 def_bool y diff --git a/arch/arm64/include/asm/kexec.h b/arch/arm64/include/asm/kexec.h index 00dbcc71aeb2..753a1c398898 100644 --- a/arch/arm64/include/asm/kexec.h +++ b/arch/arm64/include/asm/kexec.h @@ -96,6 +96,7 @@ struct kimage_arch { void *dtb; phys_addr_t dtb_mem; phys_addr_t kern_reloc; + phys_addr_t el2_vectors; }; #ifdef CONFIG_KEXEC_FILE diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c index 1d3319c7518e..6a2b8b1a4872 100644 --- a/arch/arm64/kernel/asm-offsets.c +++ b/arch/arm64/kernel/asm-offsets.c @@ -174,6 +174,7 @@ int main(void) #endif #ifdef CONFIG_KEXEC_CORE DEFINE(KIMAGE_ARCH_DTB_MEM, offsetof(struct kimage, arch.dtb_mem)); + DEFINE(KIMAGE_ARCH_EL2_VECTORS, offsetof(struct kimage, arch.el2_vectors)); DEFINE(KIMAGE_HEAD, offsetof(struct kimage, head)); DEFINE(KIMAGE_START, offsetof(struct kimage, start)); BLANK(); diff --git a/arch/arm64/kernel/machine_kexec.c b/arch/arm64/kernel/machine_kexec.c index e210b19592c6..59a4b4172b68 100644 --- a/arch/arm64/kernel/machine_kexec.c +++ b/arch/arm64/kernel/machine_kexec.c @@ -21,6 +21,7 @@ #include #include #include +#include #include "cpu-reset.h" @@ -43,7 +44,9 @@ static void _kexec_image_info(const char *func, int line, pr_debug(" start: %lx\n", kimage->start); pr_debug(" head: %lx\n", kimage->head); pr_debug(" nr_segments: %lu\n", kimage->nr_segments); + pr_debug(" dtb_mem: %pa\n", &kimage->arch.dtb_mem); pr_debug(" kern_reloc: %pa\n", &kimage->arch.kern_reloc); + pr_debug(" el2_vectors: %pa\n", &kimage->arch.el2_vectors); for (i = 0; i < kimage->nr_segments; i++) { pr_debug(" segment[%lu]: %016lx - %016lx, 0x%lx bytes, %lu pages\n", @@ -143,9 +146,27 @@ static void kexec_segment_flush(const struct kimage *kimage) } } +/* Allocates pages for kexec page table */ +static void *kexec_page_alloc(void *arg) +{ + struct kimage *kimage = (struct kimage *)arg; + struct page *page = kimage_alloc_control_pages(kimage, 0); + + if (!page) + return NULL; + + memset(page_address(page), 0, PAGE_SIZE); + + return page_address(page); +} + int machine_kexec_post_load(struct kimage *kimage) { void *reloc_code = page_to_virt(kimage->control_code_page); + struct trans_pgd_info info = { + .trans_alloc_page = kexec_page_alloc, + .trans_alloc_arg = kimage, + }; /* If in place, relocation is not used, only flush next kernel */ if (kimage->head & IND_DONE) { @@ -154,6 +175,14 @@ int machine_kexec_post_load(struct kimage *kimage) return 0; } + kimage->arch.el2_vectors = 0; + if (is_hyp_nvhe()) { + int rc = trans_pgd_copy_el2_vectors(&info, + &kimage->arch.el2_vectors); + if (rc) + return rc; + } + memcpy(reloc_code, arm64_relocate_new_kernel, arm64_relocate_new_kernel_size); kimage->arch.kern_reloc = __pa(reloc_code); @@ -209,6 +238,8 @@ void machine_kexec(struct kimage *kimage) restart(is_hyp_nvhe(), kimage->start, kimage->arch.dtb_mem, 0, 0); } else { + if (is_hyp_nvhe()) + __hyp_set_vectors(kimage->arch.el2_vectors); cpu_soft_restart(kimage->arch.kern_reloc, virt_to_phys(kimage), 0, 0); } diff --git a/arch/arm64/mm/trans_pgd-asm.S b/arch/arm64/mm/trans_pgd-asm.S index 8c4bffe3089d..021c31573bcb 100644 --- a/arch/arm64/mm/trans_pgd-asm.S +++ b/arch/arm64/mm/trans_pgd-asm.S @@ -24,7 +24,14 @@ SYM_CODE_START_LOCAL(el1_sync) msr vbar_el2, x1 mov x0, xzr eret -1: /* Unexpected argument, set an error */ +1: cmp x0, #HVC_SOFT_RESTART /* Called from kexec */ + b.ne 2f + mov x0, x2 + mov x2, x4 + mov x4, x1 + mov x1, x3 + br x4 +2: /* Unexpected argument, set an error */ mov_q x0, HVC_STUB_ERR eret SYM_CODE_END(el1_sync) From ba959fe96a1bbb98765762da20ecb3a6eb9c9d39 Mon Sep 17 00:00:00 2001 From: Pasha Tatashin Date: Thu, 30 Sep 2021 14:31:07 +0000 Subject: [PATCH 09/15] arm64: kexec: relocate in EL1 mode Since we are going to keep MMU enabled during relocation, we need to keep EL1 mode throughout the relocation. Keep EL1 enabled, and switch EL2 only before entering the new world. Suggested-by: James Morse Signed-off-by: Pasha Tatashin Acked-by: Catalin Marinas Link: https://lore.kernel.org/r/20210930143113.1502553-10-pasha.tatashin@soleen.com Signed-off-by: Will Deacon --- arch/arm64/kernel/cpu-reset.h | 3 +-- arch/arm64/kernel/machine_kexec.c | 4 ++-- arch/arm64/kernel/relocate_kernel.S | 13 +++++++++++-- 3 files changed, 14 insertions(+), 6 deletions(-) diff --git a/arch/arm64/kernel/cpu-reset.h b/arch/arm64/kernel/cpu-reset.h index 81b3d0fe7a63..296abbac7192 100644 --- a/arch/arm64/kernel/cpu-reset.h +++ b/arch/arm64/kernel/cpu-reset.h @@ -20,11 +20,10 @@ static inline void __noreturn __nocfi cpu_soft_restart(unsigned long entry, { typeof(__cpu_soft_restart) *restart; - unsigned long el2_switch = is_hyp_nvhe(); restart = (void *)__pa_symbol(function_nocfi(__cpu_soft_restart)); cpu_install_idmap(); - restart(el2_switch, entry, arg0, arg1, arg2); + restart(0, entry, arg0, arg1, arg2); unreachable(); } diff --git a/arch/arm64/kernel/machine_kexec.c b/arch/arm64/kernel/machine_kexec.c index 59a4b4172b68..cf5d6f22a041 100644 --- a/arch/arm64/kernel/machine_kexec.c +++ b/arch/arm64/kernel/machine_kexec.c @@ -240,8 +240,8 @@ void machine_kexec(struct kimage *kimage) } else { if (is_hyp_nvhe()) __hyp_set_vectors(kimage->arch.el2_vectors); - cpu_soft_restart(kimage->arch.kern_reloc, virt_to_phys(kimage), - 0, 0); + cpu_soft_restart(kimage->arch.kern_reloc, + virt_to_phys(kimage), 0, 0); } BUG(); /* Should never get here. */ diff --git a/arch/arm64/kernel/relocate_kernel.S b/arch/arm64/kernel/relocate_kernel.S index 63ea19868f63..b4fb97312a80 100644 --- a/arch/arm64/kernel/relocate_kernel.S +++ b/arch/arm64/kernel/relocate_kernel.S @@ -13,6 +13,7 @@ #include #include #include +#include /* * arm64_relocate_new_kernel - Put a 2nd stage image in place and boot it. @@ -61,12 +62,20 @@ SYM_CODE_START(arm64_relocate_new_kernel) isb /* Start new image. */ + ldr x1, [x0, #KIMAGE_ARCH_EL2_VECTORS] /* relocation start */ + cbz x1, .Lel1 + ldr x1, [x0, #KIMAGE_START] /* relocation start */ + ldr x2, [x0, #KIMAGE_ARCH_DTB_MEM] /* dtb address */ + mov x3, xzr + mov x4, xzr + mov x0, #HVC_SOFT_RESTART + hvc #0 /* Jumps from el2 */ +.Lel1: ldr x4, [x0, #KIMAGE_START] /* relocation start */ ldr x0, [x0, #KIMAGE_ARCH_DTB_MEM] /* dtb address */ - mov x1, xzr mov x2, xzr mov x3, xzr - br x4 + br x4 /* Jumps from el1 */ SYM_CODE_END(arm64_relocate_new_kernel) .align 3 /* To keep the 64-bit values below naturally aligned. */ From 19a046f07ce5a5c34ebb6432192d98cfdb38444f Mon Sep 17 00:00:00 2001 From: Pasha Tatashin Date: Thu, 30 Sep 2021 14:31:08 +0000 Subject: [PATCH 10/15] arm64: kexec: use ld script for relocation function Currently, relocation code declares start and end variables which are used to compute its size. The better way to do this is to use ld script, and put relocation function in its own section. Signed-off-by: Pasha Tatashin Acked-by: Catalin Marinas Link: https://lore.kernel.org/r/20210930143113.1502553-11-pasha.tatashin@soleen.com Signed-off-by: Will Deacon --- arch/arm64/include/asm/sections.h | 1 + arch/arm64/kernel/machine_kexec.c | 16 ++++++---------- arch/arm64/kernel/relocate_kernel.S | 14 +------------- arch/arm64/kernel/vmlinux.lds.S | 19 +++++++++++++++++++ 4 files changed, 27 insertions(+), 23 deletions(-) diff --git a/arch/arm64/include/asm/sections.h b/arch/arm64/include/asm/sections.h index e4ad9db53af1..152cb35bf9df 100644 --- a/arch/arm64/include/asm/sections.h +++ b/arch/arm64/include/asm/sections.h @@ -21,5 +21,6 @@ extern char __exittext_begin[], __exittext_end[]; extern char __irqentry_text_start[], __irqentry_text_end[]; extern char __mmuoff_data_start[], __mmuoff_data_end[]; extern char __entry_tramp_text_start[], __entry_tramp_text_end[]; +extern char __relocate_new_kernel_start[], __relocate_new_kernel_end[]; #endif /* __ASM_SECTIONS_H */ diff --git a/arch/arm64/kernel/machine_kexec.c b/arch/arm64/kernel/machine_kexec.c index cf5d6f22a041..320442d35811 100644 --- a/arch/arm64/kernel/machine_kexec.c +++ b/arch/arm64/kernel/machine_kexec.c @@ -21,14 +21,11 @@ #include #include #include +#include #include #include "cpu-reset.h" -/* Global variables for the arm64_relocate_new_kernel routine. */ -extern const unsigned char arm64_relocate_new_kernel[]; -extern const unsigned long arm64_relocate_new_kernel_size; - /** * kexec_image_info - For debugging output. */ @@ -163,6 +160,7 @@ static void *kexec_page_alloc(void *arg) int machine_kexec_post_load(struct kimage *kimage) { void *reloc_code = page_to_virt(kimage->control_code_page); + long reloc_size; struct trans_pgd_info info = { .trans_alloc_page = kexec_page_alloc, .trans_alloc_arg = kimage, @@ -183,17 +181,15 @@ int machine_kexec_post_load(struct kimage *kimage) return rc; } - memcpy(reloc_code, arm64_relocate_new_kernel, - arm64_relocate_new_kernel_size); + reloc_size = __relocate_new_kernel_end - __relocate_new_kernel_start; + memcpy(reloc_code, __relocate_new_kernel_start, reloc_size); kimage->arch.kern_reloc = __pa(reloc_code); /* Flush the reloc_code in preparation for its execution. */ dcache_clean_inval_poc((unsigned long)reloc_code, - (unsigned long)reloc_code + - arm64_relocate_new_kernel_size); + (unsigned long)reloc_code + reloc_size); icache_inval_pou((uintptr_t)reloc_code, - (uintptr_t)reloc_code + - arm64_relocate_new_kernel_size); + (uintptr_t)reloc_code + reloc_size); kexec_list_flush(kimage); kexec_image_info(kimage); diff --git a/arch/arm64/kernel/relocate_kernel.S b/arch/arm64/kernel/relocate_kernel.S index b4fb97312a80..2227741b96fa 100644 --- a/arch/arm64/kernel/relocate_kernel.S +++ b/arch/arm64/kernel/relocate_kernel.S @@ -15,6 +15,7 @@ #include #include +.section ".kexec_relocate.text", "ax" /* * arm64_relocate_new_kernel - Put a 2nd stage image in place and boot it. * @@ -77,16 +78,3 @@ SYM_CODE_START(arm64_relocate_new_kernel) mov x3, xzr br x4 /* Jumps from el1 */ SYM_CODE_END(arm64_relocate_new_kernel) - -.align 3 /* To keep the 64-bit values below naturally aligned. */ - -.Lcopy_end: -.org KEXEC_CONTROL_PAGE_SIZE - -/* - * arm64_relocate_new_kernel_size - Number of bytes to copy to the - * control_code_page. - */ -.globl arm64_relocate_new_kernel_size -arm64_relocate_new_kernel_size: - .quad .Lcopy_end - arm64_relocate_new_kernel diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S index f6b1a88245db..0760331af85c 100644 --- a/arch/arm64/kernel/vmlinux.lds.S +++ b/arch/arm64/kernel/vmlinux.lds.S @@ -63,6 +63,7 @@ #include #include #include +#include #include #include @@ -100,6 +101,16 @@ jiffies = jiffies_64; #define HIBERNATE_TEXT #endif +#ifdef CONFIG_KEXEC_CORE +#define KEXEC_TEXT \ + . = ALIGN(SZ_4K); \ + __relocate_new_kernel_start = .; \ + *(.kexec_relocate.text) \ + __relocate_new_kernel_end = .; +#else +#define KEXEC_TEXT +#endif + #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 #define TRAMP_TEXT \ . = ALIGN(PAGE_SIZE); \ @@ -160,6 +171,7 @@ SECTIONS HYPERVISOR_TEXT IDMAP_TEXT HIBERNATE_TEXT + KEXEC_TEXT TRAMP_TEXT *(.fixup) *(.gnu.warning) @@ -348,3 +360,10 @@ ASSERT(swapper_pg_dir - reserved_pg_dir == RESERVED_SWAPPER_OFFSET, ASSERT(swapper_pg_dir - tramp_pg_dir == TRAMP_SWAPPER_OFFSET, "TRAMP_SWAPPER_OFFSET is wrong!") #endif + +#ifdef CONFIG_KEXEC_CORE +/* kexec relocation code should fit into one KEXEC_CONTROL_PAGE_SIZE */ +ASSERT(__relocate_new_kernel_end - (__relocate_new_kernel_start & ~(SZ_4K - 1)) + <= SZ_4K, "kexec relocation code is too big or misaligned") +ASSERT(KEXEC_CONTROL_PAGE_SIZE >= SZ_4K, "KEXEC_CONTROL_PAGE_SIZE is broken") +#endif From 3744b5280e67f54579abe92576deec0079242323 Mon Sep 17 00:00:00 2001 From: Pasha Tatashin Date: Thu, 30 Sep 2021 14:31:09 +0000 Subject: [PATCH 11/15] arm64: kexec: install a copy of the linear-map To perform the kexec relocation with the MMU enabled, we need a copy of the linear map. Create one, and install it from the relocation code. This has to be done from the assembly code as it will be idmapped with TTBR0. The kernel runs in TTRB1, so can't use the break-before-make sequence on the mapping it is executing from. The makes no difference yet as the relocation code runs with the MMU disabled. Suggested-by: James Morse Signed-off-by: Pasha Tatashin Acked-by: Catalin Marinas Link: https://lore.kernel.org/r/20210930143113.1502553-12-pasha.tatashin@soleen.com Signed-off-by: Will Deacon --- arch/arm64/include/asm/assembler.h | 19 +++++++++++++++++++ arch/arm64/include/asm/kexec.h | 2 ++ arch/arm64/kernel/asm-offsets.c | 2 ++ arch/arm64/kernel/hibernate-asm.S | 20 -------------------- arch/arm64/kernel/machine_kexec.c | 16 ++++++++++++++-- arch/arm64/kernel/relocate_kernel.S | 3 +++ 6 files changed, 40 insertions(+), 22 deletions(-) diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index d5281f75a58d..5da176dd37a9 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -483,6 +483,25 @@ alternative_endif _cond_extable .Licache_op\@, \fixup .endm +/* + * To prevent the possibility of old and new partial table walks being visible + * in the tlb, switch the ttbr to a zero page when we invalidate the old + * records. D4.7.1 'General TLB maintenance requirements' in ARM DDI 0487A.i + * Even switching to our copied tables will cause a changed output address at + * each stage of the walk. + */ + .macro break_before_make_ttbr_switch zero_page, page_table, tmp, tmp2 + phys_to_ttbr \tmp, \zero_page + msr ttbr1_el1, \tmp + isb + tlbi vmalle1 + dsb nsh + phys_to_ttbr \tmp, \page_table + offset_ttbr1 \tmp, \tmp2 + msr ttbr1_el1, \tmp + isb + .endm + /* * reset_pmuserenr_el0 - reset PMUSERENR_EL0 if PMUv3 present */ diff --git a/arch/arm64/include/asm/kexec.h b/arch/arm64/include/asm/kexec.h index 753a1c398898..d678f0ceb7ee 100644 --- a/arch/arm64/include/asm/kexec.h +++ b/arch/arm64/include/asm/kexec.h @@ -97,6 +97,8 @@ struct kimage_arch { phys_addr_t dtb_mem; phys_addr_t kern_reloc; phys_addr_t el2_vectors; + phys_addr_t ttbr1; + phys_addr_t zero_page; }; #ifdef CONFIG_KEXEC_FILE diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c index 6a2b8b1a4872..1f565224dafd 100644 --- a/arch/arm64/kernel/asm-offsets.c +++ b/arch/arm64/kernel/asm-offsets.c @@ -175,6 +175,8 @@ int main(void) #ifdef CONFIG_KEXEC_CORE DEFINE(KIMAGE_ARCH_DTB_MEM, offsetof(struct kimage, arch.dtb_mem)); DEFINE(KIMAGE_ARCH_EL2_VECTORS, offsetof(struct kimage, arch.el2_vectors)); + DEFINE(KIMAGE_ARCH_ZERO_PAGE, offsetof(struct kimage, arch.zero_page)); + DEFINE(KIMAGE_ARCH_TTBR1, offsetof(struct kimage, arch.ttbr1)); DEFINE(KIMAGE_HEAD, offsetof(struct kimage, head)); DEFINE(KIMAGE_START, offsetof(struct kimage, start)); BLANK(); diff --git a/arch/arm64/kernel/hibernate-asm.S b/arch/arm64/kernel/hibernate-asm.S index a30a2c3f905e..0e1d9c3c6a93 100644 --- a/arch/arm64/kernel/hibernate-asm.S +++ b/arch/arm64/kernel/hibernate-asm.S @@ -15,26 +15,6 @@ #include #include -/* - * To prevent the possibility of old and new partial table walks being visible - * in the tlb, switch the ttbr to a zero page when we invalidate the old - * records. D4.7.1 'General TLB maintenance requirements' in ARM DDI 0487A.i - * Even switching to our copied tables will cause a changed output address at - * each stage of the walk. - */ -.macro break_before_make_ttbr_switch zero_page, page_table, tmp, tmp2 - phys_to_ttbr \tmp, \zero_page - msr ttbr1_el1, \tmp - isb - tlbi vmalle1 - dsb nsh - phys_to_ttbr \tmp, \page_table - offset_ttbr1 \tmp, \tmp2 - msr ttbr1_el1, \tmp - isb -.endm - - /* * Resume from hibernate * diff --git a/arch/arm64/kernel/machine_kexec.c b/arch/arm64/kernel/machine_kexec.c index 320442d35811..fbff545565f1 100644 --- a/arch/arm64/kernel/machine_kexec.c +++ b/arch/arm64/kernel/machine_kexec.c @@ -159,6 +159,8 @@ static void *kexec_page_alloc(void *arg) int machine_kexec_post_load(struct kimage *kimage) { + int rc; + pgd_t *trans_pgd; void *reloc_code = page_to_virt(kimage->control_code_page); long reloc_size; struct trans_pgd_info info = { @@ -175,12 +177,22 @@ int machine_kexec_post_load(struct kimage *kimage) kimage->arch.el2_vectors = 0; if (is_hyp_nvhe()) { - int rc = trans_pgd_copy_el2_vectors(&info, - &kimage->arch.el2_vectors); + rc = trans_pgd_copy_el2_vectors(&info, + &kimage->arch.el2_vectors); if (rc) return rc; } + /* Create a copy of the linear map */ + trans_pgd = kexec_page_alloc(kimage); + if (!trans_pgd) + return -ENOMEM; + rc = trans_pgd_create_copy(&info, &trans_pgd, PAGE_OFFSET, PAGE_END); + if (rc) + return rc; + kimage->arch.ttbr1 = __pa(trans_pgd); + kimage->arch.zero_page = __pa(empty_zero_page); + reloc_size = __relocate_new_kernel_end - __relocate_new_kernel_start; memcpy(reloc_code, __relocate_new_kernel_start, reloc_size); kimage->arch.kern_reloc = __pa(reloc_code); diff --git a/arch/arm64/kernel/relocate_kernel.S b/arch/arm64/kernel/relocate_kernel.S index 2227741b96fa..2b80232246f7 100644 --- a/arch/arm64/kernel/relocate_kernel.S +++ b/arch/arm64/kernel/relocate_kernel.S @@ -29,10 +29,13 @@ */ SYM_CODE_START(arm64_relocate_new_kernel) /* Setup the list loop variables. */ + ldr x18, [x0, #KIMAGE_ARCH_ZERO_PAGE] /* x18 = zero page for BBM */ + ldr x17, [x0, #KIMAGE_ARCH_TTBR1] /* x17 = linear map copy */ ldr x16, [x0, #KIMAGE_HEAD] /* x16 = kimage_head */ mov x14, xzr /* x14 = entry ptr */ mov x13, xzr /* x13 = copy dest */ raw_dcache_line_size x15, x1 /* x15 = dcache line size */ + break_before_make_ttbr_switch x18, x17, x1, x2 /* set linear map */ .Lloop: and x12, x16, PAGE_MASK /* x12 = addr */ From efc2d0f20a9dab2d0e92a271dc4b8e3496377739 Mon Sep 17 00:00:00 2001 From: Pasha Tatashin Date: Thu, 30 Sep 2021 14:31:10 +0000 Subject: [PATCH 12/15] arm64: kexec: keep MMU enabled during kexec relocation Now, that we have linear map page tables configured, keep MMU enabled to allow faster relocation of segments to final destination. Cavium ThunderX2: Kernel Image size: 38M Iniramfs size: 46M Total relocation size: 84M MMU-disabled: relocation 7.489539915s MMU-enabled: relocation 0.03946095s Broadcom Stingray: The performance data: for a moderate size kernel + initramfs: 25M the relocation was taking 0.382s, with enabled MMU it now takes 0.019s only or x20 improvement. The time is proportional to the size of relocation, therefore if initramfs is larger, 100M it could take over a second. Signed-off-by: Pasha Tatashin Tested-by: Pingfan Liu Acked-by: Catalin Marinas Link: https://lore.kernel.org/r/20210930143113.1502553-13-pasha.tatashin@soleen.com Signed-off-by: Will Deacon --- arch/arm64/include/asm/kexec.h | 3 +++ arch/arm64/kernel/asm-offsets.c | 1 + arch/arm64/kernel/machine_kexec.c | 16 +++++++++++---- arch/arm64/kernel/relocate_kernel.S | 31 +++++++++++++++++++---------- 4 files changed, 36 insertions(+), 15 deletions(-) diff --git a/arch/arm64/include/asm/kexec.h b/arch/arm64/include/asm/kexec.h index d678f0ceb7ee..dca6dedc3b25 100644 --- a/arch/arm64/include/asm/kexec.h +++ b/arch/arm64/include/asm/kexec.h @@ -97,8 +97,11 @@ struct kimage_arch { phys_addr_t dtb_mem; phys_addr_t kern_reloc; phys_addr_t el2_vectors; + phys_addr_t ttbr0; phys_addr_t ttbr1; phys_addr_t zero_page; + unsigned long phys_offset; + unsigned long t0sz; }; #ifdef CONFIG_KEXEC_FILE diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c index 1f565224dafd..2124357c2075 100644 --- a/arch/arm64/kernel/asm-offsets.c +++ b/arch/arm64/kernel/asm-offsets.c @@ -176,6 +176,7 @@ int main(void) DEFINE(KIMAGE_ARCH_DTB_MEM, offsetof(struct kimage, arch.dtb_mem)); DEFINE(KIMAGE_ARCH_EL2_VECTORS, offsetof(struct kimage, arch.el2_vectors)); DEFINE(KIMAGE_ARCH_ZERO_PAGE, offsetof(struct kimage, arch.zero_page)); + DEFINE(KIMAGE_ARCH_PHYS_OFFSET, offsetof(struct kimage, arch.phys_offset)); DEFINE(KIMAGE_ARCH_TTBR1, offsetof(struct kimage, arch.ttbr1)); DEFINE(KIMAGE_HEAD, offsetof(struct kimage, head)); DEFINE(KIMAGE_START, offsetof(struct kimage, start)); diff --git a/arch/arm64/kernel/machine_kexec.c b/arch/arm64/kernel/machine_kexec.c index fbff545565f1..1e9a2a45e016 100644 --- a/arch/arm64/kernel/machine_kexec.c +++ b/arch/arm64/kernel/machine_kexec.c @@ -196,6 +196,11 @@ int machine_kexec_post_load(struct kimage *kimage) reloc_size = __relocate_new_kernel_end - __relocate_new_kernel_start; memcpy(reloc_code, __relocate_new_kernel_start, reloc_size); kimage->arch.kern_reloc = __pa(reloc_code); + rc = trans_pgd_idmap_page(&info, &kimage->arch.ttbr0, + &kimage->arch.t0sz, reloc_code); + if (rc) + return rc; + kimage->arch.phys_offset = virt_to_phys(kimage) - (long)kimage; /* Flush the reloc_code in preparation for its execution. */ dcache_clean_inval_poc((unsigned long)reloc_code, @@ -230,9 +235,9 @@ void machine_kexec(struct kimage *kimage) local_daif_mask(); /* - * Both restart and cpu_soft_restart will shutdown the MMU, disable data + * Both restart and kernel_reloc will shutdown the MMU, disable data * caches. However, restart will start new kernel or purgatory directly, - * cpu_soft_restart will transfer control to arm64_relocate_new_kernel + * kernel_reloc contains the body of arm64_relocate_new_kernel * In kexec case, kimage->start points to purgatory assuming that * kernel entry and dtb address are embedded in purgatory by * userspace (kexec-tools). @@ -246,10 +251,13 @@ void machine_kexec(struct kimage *kimage) restart(is_hyp_nvhe(), kimage->start, kimage->arch.dtb_mem, 0, 0); } else { + void (*kernel_reloc)(struct kimage *kimage); + if (is_hyp_nvhe()) __hyp_set_vectors(kimage->arch.el2_vectors); - cpu_soft_restart(kimage->arch.kern_reloc, - virt_to_phys(kimage), 0, 0); + cpu_install_ttbr0(kimage->arch.ttbr0, kimage->arch.t0sz); + kernel_reloc = (void *)kimage->arch.kern_reloc; + kernel_reloc(kimage); } BUG(); /* Should never get here. */ diff --git a/arch/arm64/kernel/relocate_kernel.S b/arch/arm64/kernel/relocate_kernel.S index 2b80232246f7..f0a3df9e18a3 100644 --- a/arch/arm64/kernel/relocate_kernel.S +++ b/arch/arm64/kernel/relocate_kernel.S @@ -4,6 +4,8 @@ * * Copyright (C) Linaro. * Copyright (C) Huawei Futurewei Technologies. + * Copyright (C) 2021, Microsoft Corporation. + * Pasha Tatashin */ #include @@ -15,6 +17,13 @@ #include #include +.macro turn_off_mmu tmp1, tmp2 + mov_q \tmp1, INIT_SCTLR_EL1_MMU_OFF + pre_disable_mmu_workaround + msr sctlr_el1, \tmp1 + isb +.endm + .section ".kexec_relocate.text", "ax" /* * arm64_relocate_new_kernel - Put a 2nd stage image in place and boot it. @@ -32,22 +41,21 @@ SYM_CODE_START(arm64_relocate_new_kernel) ldr x18, [x0, #KIMAGE_ARCH_ZERO_PAGE] /* x18 = zero page for BBM */ ldr x17, [x0, #KIMAGE_ARCH_TTBR1] /* x17 = linear map copy */ ldr x16, [x0, #KIMAGE_HEAD] /* x16 = kimage_head */ - mov x14, xzr /* x14 = entry ptr */ - mov x13, xzr /* x13 = copy dest */ + ldr x22, [x0, #KIMAGE_ARCH_PHYS_OFFSET] /* x22 phys_offset */ raw_dcache_line_size x15, x1 /* x15 = dcache line size */ break_before_make_ttbr_switch x18, x17, x1, x2 /* set linear map */ .Lloop: and x12, x16, PAGE_MASK /* x12 = addr */ - + sub x12, x12, x22 /* Convert x12 to virt */ /* Test the entry flags. */ .Ltest_source: tbz x16, IND_SOURCE_BIT, .Ltest_indirection /* Invalidate dest page to PoC. */ - mov x2, x13 - add x1, x2, #PAGE_SIZE - dcache_by_myline_op ivac, sy, x2, x1, x15, x20 + mov x19, x13 copy_page x13, x12, x1, x2, x3, x4, x5, x6, x7, x8 + add x1, x19, #PAGE_SIZE + dcache_by_myline_op civac, sy, x19, x1, x15, x20 b .Lnext .Ltest_indirection: tbz x16, IND_INDIRECTION_BIT, .Ltest_destination @@ -64,19 +72,20 @@ SYM_CODE_START(arm64_relocate_new_kernel) ic iallu dsb nsh isb + ldr x4, [x0, #KIMAGE_START] /* relocation start */ + ldr x1, [x0, #KIMAGE_ARCH_EL2_VECTORS] /* relocation start */ + ldr x0, [x0, #KIMAGE_ARCH_DTB_MEM] /* dtb address */ + turn_off_mmu x12, x13 /* Start new image. */ - ldr x1, [x0, #KIMAGE_ARCH_EL2_VECTORS] /* relocation start */ cbz x1, .Lel1 - ldr x1, [x0, #KIMAGE_START] /* relocation start */ - ldr x2, [x0, #KIMAGE_ARCH_DTB_MEM] /* dtb address */ + mov x1, x4 /* relocation start */ + mov x2, x0 /* dtb address */ mov x3, xzr mov x4, xzr mov x0, #HVC_SOFT_RESTART hvc #0 /* Jumps from el2 */ .Lel1: - ldr x4, [x0, #KIMAGE_START] /* relocation start */ - ldr x0, [x0, #KIMAGE_ARCH_DTB_MEM] /* dtb address */ mov x2, xzr mov x3, xzr br x4 /* Jumps from el1 */ From 939f1b9564c6aa2bd0f4e4e336ac74379692c38b Mon Sep 17 00:00:00 2001 From: Pasha Tatashin Date: Thu, 30 Sep 2021 14:31:11 +0000 Subject: [PATCH 13/15] arm64: kexec: remove the pre-kexec PoC maintenance Now that kexec does its relocations with the MMU enabled, we no longer need to clean the relocation data to the PoC. Suggested-by: James Morse Signed-off-by: Pasha Tatashin Acked-by: Catalin Marinas Link: https://lore.kernel.org/r/20210930143113.1502553-14-pasha.tatashin@soleen.com Signed-off-by: Will Deacon --- arch/arm64/kernel/machine_kexec.c | 43 ------------------------------- 1 file changed, 43 deletions(-) diff --git a/arch/arm64/kernel/machine_kexec.c b/arch/arm64/kernel/machine_kexec.c index 1e9a2a45e016..559d47a3c59c 100644 --- a/arch/arm64/kernel/machine_kexec.c +++ b/arch/arm64/kernel/machine_kexec.c @@ -77,48 +77,6 @@ int machine_kexec_prepare(struct kimage *kimage) return 0; } -/** - * kexec_list_flush - Helper to flush the kimage list and source pages to PoC. - */ -static void kexec_list_flush(struct kimage *kimage) -{ - kimage_entry_t *entry; - - dcache_clean_inval_poc((unsigned long)kimage, - (unsigned long)kimage + sizeof(*kimage)); - - for (entry = &kimage->head; ; entry++) { - unsigned int flag; - unsigned long addr; - - /* flush the list entries. */ - dcache_clean_inval_poc((unsigned long)entry, - (unsigned long)entry + - sizeof(kimage_entry_t)); - - flag = *entry & IND_FLAGS; - if (flag == IND_DONE) - break; - - addr = (unsigned long)phys_to_virt(*entry & PAGE_MASK); - - switch (flag) { - case IND_INDIRECTION: - /* Set entry point just before the new list page. */ - entry = (kimage_entry_t *)addr - 1; - break; - case IND_SOURCE: - /* flush the source pages. */ - dcache_clean_inval_poc(addr, addr + PAGE_SIZE); - break; - case IND_DESTINATION: - break; - default: - BUG(); - } - } -} - /** * kexec_segment_flush - Helper to flush the kimage segments to PoC. */ @@ -207,7 +165,6 @@ int machine_kexec_post_load(struct kimage *kimage) (unsigned long)reloc_code + reloc_size); icache_inval_pou((uintptr_t)reloc_code, (uintptr_t)reloc_code + reloc_size); - kexec_list_flush(kimage); kexec_image_info(kimage); return 0; From 7a2512fa649397c68127a480ef8fdd9dcf323045 Mon Sep 17 00:00:00 2001 From: Pasha Tatashin Date: Thu, 30 Sep 2021 14:31:12 +0000 Subject: [PATCH 14/15] arm64: kexec: remove cpu-reset.h This header contains only cpu_soft_restart() which is never used directly anymore. So, remove this header, and rename the helper to be cpu_soft_restart(). Suggested-by: James Morse Signed-off-by: Pasha Tatashin Acked-by: Catalin Marinas Link: https://lore.kernel.org/r/20210930143113.1502553-15-pasha.tatashin@soleen.com Signed-off-by: Will Deacon --- arch/arm64/include/asm/kexec.h | 6 ++++++ arch/arm64/kernel/cpu-reset.S | 7 +++---- arch/arm64/kernel/cpu-reset.h | 30 ------------------------------ arch/arm64/kernel/machine_kexec.c | 6 ++---- 4 files changed, 11 insertions(+), 38 deletions(-) delete mode 100644 arch/arm64/kernel/cpu-reset.h diff --git a/arch/arm64/include/asm/kexec.h b/arch/arm64/include/asm/kexec.h index dca6dedc3b25..9839bfc163d7 100644 --- a/arch/arm64/include/asm/kexec.h +++ b/arch/arm64/include/asm/kexec.h @@ -90,6 +90,12 @@ static inline void crash_prepare_suspend(void) {} static inline void crash_post_resume(void) {} #endif +#if defined(CONFIG_KEXEC_CORE) +void cpu_soft_restart(unsigned long el2_switch, unsigned long entry, + unsigned long arg0, unsigned long arg1, + unsigned long arg2); +#endif + #define ARCH_HAS_KIMAGE_ARCH struct kimage_arch { diff --git a/arch/arm64/kernel/cpu-reset.S b/arch/arm64/kernel/cpu-reset.S index d47ff63a5b66..48a8af97faa9 100644 --- a/arch/arm64/kernel/cpu-reset.S +++ b/arch/arm64/kernel/cpu-reset.S @@ -16,8 +16,7 @@ .pushsection .idmap.text, "awx" /* - * __cpu_soft_restart(el2_switch, entry, arg0, arg1, arg2) - Helper for - * cpu_soft_restart. + * cpu_soft_restart(el2_switch, entry, arg0, arg1, arg2) * * @el2_switch: Flag to indicate a switch to EL2 is needed. * @entry: Location to jump to for soft reset. @@ -29,7 +28,7 @@ * branch to what would be the reset vector. It must be executed with the * flat identity mapping. */ -SYM_CODE_START(__cpu_soft_restart) +SYM_CODE_START(cpu_soft_restart) mov_q x12, INIT_SCTLR_EL1_MMU_OFF pre_disable_mmu_workaround /* @@ -48,6 +47,6 @@ SYM_CODE_START(__cpu_soft_restart) mov x1, x3 // arg1 mov x2, x4 // arg2 br x8 -SYM_CODE_END(__cpu_soft_restart) +SYM_CODE_END(cpu_soft_restart) .popsection diff --git a/arch/arm64/kernel/cpu-reset.h b/arch/arm64/kernel/cpu-reset.h deleted file mode 100644 index 296abbac7192..000000000000 --- a/arch/arm64/kernel/cpu-reset.h +++ /dev/null @@ -1,30 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * CPU reset routines - * - * Copyright (C) 2015 Huawei Futurewei Technologies. - */ - -#ifndef _ARM64_CPU_RESET_H -#define _ARM64_CPU_RESET_H - -#include - -void __cpu_soft_restart(unsigned long el2_switch, unsigned long entry, - unsigned long arg0, unsigned long arg1, unsigned long arg2); - -static inline void __noreturn __nocfi cpu_soft_restart(unsigned long entry, - unsigned long arg0, - unsigned long arg1, - unsigned long arg2) -{ - typeof(__cpu_soft_restart) *restart; - - restart = (void *)__pa_symbol(function_nocfi(__cpu_soft_restart)); - - cpu_install_idmap(); - restart(0, entry, arg0, arg1, arg2); - unreachable(); -} - -#endif diff --git a/arch/arm64/kernel/machine_kexec.c b/arch/arm64/kernel/machine_kexec.c index 559d47a3c59c..1038494135c8 100644 --- a/arch/arm64/kernel/machine_kexec.c +++ b/arch/arm64/kernel/machine_kexec.c @@ -24,8 +24,6 @@ #include #include -#include "cpu-reset.h" - /** * kexec_image_info - For debugging output. */ @@ -201,10 +199,10 @@ void machine_kexec(struct kimage *kimage) * In kexec_file case, the kernel starts directly without purgatory. */ if (kimage->head & IND_DONE) { - typeof(__cpu_soft_restart) *restart; + typeof(cpu_soft_restart) *restart; cpu_install_idmap(); - restart = (void *)__pa_symbol(function_nocfi(__cpu_soft_restart)); + restart = (void *)__pa_symbol(function_nocfi(cpu_soft_restart)); restart(is_hyp_nvhe(), kimage->start, kimage->arch.dtb_mem, 0, 0); } else { From 6091dd9eaf8e77311548b616281c1a9c67e6ca40 Mon Sep 17 00:00:00 2001 From: Pasha Tatashin Date: Thu, 30 Sep 2021 14:31:13 +0000 Subject: [PATCH 15/15] arm64: trans_pgd: remove trans_pgd_map_page() The intend of trans_pgd_map_page() was to map contiguous range of VA memory to the memory that is getting relocated during kexec. However, since we are now using linear map instead of contiguous range this function is not needed Suggested-by: Pingfan Liu Signed-off-by: Pasha Tatashin Acked-by: Catalin Marinas Link: https://lore.kernel.org/r/20210930143113.1502553-16-pasha.tatashin@soleen.com Signed-off-by: Will Deacon --- arch/arm64/include/asm/trans_pgd.h | 5 +-- arch/arm64/mm/trans_pgd.c | 57 ------------------------------ 2 files changed, 1 insertion(+), 61 deletions(-) diff --git a/arch/arm64/include/asm/trans_pgd.h b/arch/arm64/include/asm/trans_pgd.h index 7b04d32b102c..033d400a4ea4 100644 --- a/arch/arm64/include/asm/trans_pgd.h +++ b/arch/arm64/include/asm/trans_pgd.h @@ -15,7 +15,7 @@ /* * trans_alloc_page * - Allocator that should return exactly one zeroed page, if this - * allocator fails, trans_pgd_create_copy() and trans_pgd_map_page() + * allocator fails, trans_pgd_create_copy() and trans_pgd_idmap_page() * return -ENOMEM error. * * trans_alloc_arg @@ -30,9 +30,6 @@ struct trans_pgd_info { int trans_pgd_create_copy(struct trans_pgd_info *info, pgd_t **trans_pgd, unsigned long start, unsigned long end); -int trans_pgd_map_page(struct trans_pgd_info *info, pgd_t *trans_pgd, - void *page, unsigned long dst_addr, pgprot_t pgprot); - int trans_pgd_idmap_page(struct trans_pgd_info *info, phys_addr_t *trans_ttbr0, unsigned long *t0sz, void *page); diff --git a/arch/arm64/mm/trans_pgd.c b/arch/arm64/mm/trans_pgd.c index 26bd8f2d95af..d7da8ca40d2e 100644 --- a/arch/arm64/mm/trans_pgd.c +++ b/arch/arm64/mm/trans_pgd.c @@ -217,63 +217,6 @@ int trans_pgd_create_copy(struct trans_pgd_info *info, pgd_t **dst_pgdp, return rc; } -/* - * Add map entry to trans_pgd for a base-size page at PTE level. - * info: contains allocator and its argument - * trans_pgd: page table in which new map is added. - * page: page to be mapped. - * dst_addr: new VA address for the page - * pgprot: protection for the page. - * - * Returns 0 on success, and -ENOMEM on failure. - */ -int trans_pgd_map_page(struct trans_pgd_info *info, pgd_t *trans_pgd, - void *page, unsigned long dst_addr, pgprot_t pgprot) -{ - pgd_t *pgdp; - p4d_t *p4dp; - pud_t *pudp; - pmd_t *pmdp; - pte_t *ptep; - - pgdp = pgd_offset_pgd(trans_pgd, dst_addr); - if (pgd_none(READ_ONCE(*pgdp))) { - p4dp = trans_alloc(info); - if (!pgdp) - return -ENOMEM; - pgd_populate(NULL, pgdp, p4dp); - } - - p4dp = p4d_offset(pgdp, dst_addr); - if (p4d_none(READ_ONCE(*p4dp))) { - pudp = trans_alloc(info); - if (!pudp) - return -ENOMEM; - p4d_populate(NULL, p4dp, pudp); - } - - pudp = pud_offset(p4dp, dst_addr); - if (pud_none(READ_ONCE(*pudp))) { - pmdp = trans_alloc(info); - if (!pmdp) - return -ENOMEM; - pud_populate(NULL, pudp, pmdp); - } - - pmdp = pmd_offset(pudp, dst_addr); - if (pmd_none(READ_ONCE(*pmdp))) { - ptep = trans_alloc(info); - if (!ptep) - return -ENOMEM; - pmd_populate_kernel(NULL, pmdp, ptep); - } - - ptep = pte_offset_kernel(pmdp, dst_addr); - set_pte(ptep, pfn_pte(virt_to_pfn(page), pgprot)); - - return 0; -} - /* * The page we want to idmap may be outside the range covered by VA_BITS that * can be built using the kernel's p?d_populate() helpers. As a one off, for a