linux-stable/arch/arm64/kernel/entry-common.c

933 lines
23 KiB
C
Raw Normal View History

// SPDX-License-Identifier: GPL-2.0
/*
* Exception handling code
*
* Copyright (C) 2019 ARM Ltd.
*/
#include <linux/context_tracking.h>
#include <linux/kasan.h>
#include <linux/linkage.h>
#include <linux/lockdep.h>
#include <linux/ptrace.h>
#include <linux/sched.h>
#include <linux/sched/debug.h>
#include <linux/thread_info.h>
#include <asm/cpufeature.h>
#include <asm/daifflags.h>
#include <asm/esr.h>
#include <asm/exception.h>
irq: arm64: perform irqentry in entry code In preparation for removing HANDLE_DOMAIN_IRQ_IRQENTRY, have arch/arm64 perform all the irqentry accounting in its entry code. As arch/arm64 already performs portions of the irqentry logic in enter_from_kernel_mode() and exit_to_kernel_mode(), including rcu_irq_{enter,exit}(), the only additional calls that need to be made are to irq_{enter,exit}_rcu(). Removing the calls to rcu_irq_{enter,exit}() from handle_domain_irq() ensures that we inform RCU once per IRQ entry and will correctly identify quiescent periods. Since we should not call irq_{enter,exit}_rcu() when entering a pseudo-NMI, el1_interrupt() is reworked to have separate __el1_irq() and __el1_pnmi() paths for regular IRQ and psuedo-NMI entry, with irq_{enter,exit}_irq() only called for the former. In preparation for removing HANDLE_DOMAIN_IRQ, the irq regs are managed in do_interrupt_handler() for both regular IRQ and pseudo-NMI. This is currently redundant, but not harmful. For clarity the preemption logic is moved into __el1_irq(). We should never preempt within a pseudo-NMI, and arm64_enter_nmi() already enforces this by incrementing the preempt_count, but it's clearer if we never invoke the preemption logic when entering a pseudo-NMI. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Reviewed-by: Pingfan Liu <kernelfans@gmail.com> Reviewed-by: Marc Zyngier <maz@kernel.org> Acked-by: Catalin Marinas <catalin.marinas@arm.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Will Deacon <will@kernel.org>
2021-10-19 17:28:39 +00:00
#include <asm/irq_regs.h>
#include <asm/kprobes.h>
#include <asm/mmu.h>
#include <asm/processor.h>
#include <asm/sdei.h>
#include <asm/stacktrace.h>
#include <asm/sysreg.h>
#include <asm/system_misc.h>
/*
* Handle IRQ/context state management when entering from kernel mode.
* Before this function is called it is not safe to call regular kernel code,
* instrumentable code, or any code which may trigger an exception.
*
* This is intended to match the logic in irqentry_enter(), handling the kernel
* mode transitions only.
*/
static __always_inline void __enter_from_kernel_mode(struct pt_regs *regs)
{
regs->exit_rcu = false;
if (!IS_ENABLED(CONFIG_TINY_RCU) && is_idle_task(current)) {
lockdep_hardirqs_off(CALLER_ADDR0);
ct_irq_enter();
trace_hardirqs_off_finish();
regs->exit_rcu = true;
return;
}
lockdep_hardirqs_off(CALLER_ADDR0);
rcu_irq_enter_check_tick();
trace_hardirqs_off_finish();
}
static void noinstr enter_from_kernel_mode(struct pt_regs *regs)
{
__enter_from_kernel_mode(regs);
mte_check_tfsr_entry();
mte_disable_tco_entry(current);
}
/*
* Handle IRQ/context state management when exiting to kernel mode.
* After this function returns it is not safe to call regular kernel code,
* instrumentable code, or any code which may trigger an exception.
*
* This is intended to match the logic in irqentry_exit(), handling the kernel
* mode transitions only, and with preemption handled elsewhere.
*/
static __always_inline void __exit_to_kernel_mode(struct pt_regs *regs)
{
lockdep_assert_irqs_disabled();
if (interrupts_enabled(regs)) {
if (regs->exit_rcu) {
trace_hardirqs_on_prepare();
lockdep_hardirqs_on_prepare();
ct_irq_exit();
lockdep_hardirqs_on(CALLER_ADDR0);
return;
}
trace_hardirqs_on();
} else {
if (regs->exit_rcu)
ct_irq_exit();
}
}
static void noinstr exit_to_kernel_mode(struct pt_regs *regs)
{
mte_check_tfsr_exit();
__exit_to_kernel_mode(regs);
}
/*
* Handle IRQ/context state management when entering from user mode.
* Before this function is called it is not safe to call regular kernel code,
* instrumentable code, or any code which may trigger an exception.
*/
static __always_inline void __enter_from_user_mode(void)
{
lockdep_hardirqs_off(CALLER_ADDR0);
CT_WARN_ON(ct_state() != CONTEXT_USER);
user_exit_irqoff();
trace_hardirqs_off_finish();
mte_disable_tco_entry(current);
}
arm64: entry: call exit_to_user_mode() from C When handling an exception from EL0, we perform the entry work in that exception's C handler, and once the C handler has finished, we return back to the entry assembly. Subsequently in the common `ret_to_user` assembly we perform the exit work that balances with the entry work. This can be somewhat difficult to follow, and makes it hard to rework the return paths (e.g. to pass additional context to the exit code, or to have exception return logic for specific exceptions). This patch reworks the entry code such that each EL0 C exception handler is responsible for both the entry and exit work. This clearly balances the two (and will permit additional variation in future), and avoids an unnecessary bounce between assembly and C in the common case, leaving `ret_from_fork` as the only place assembly has to call the exit code. This means that the exit work is now inlined into the C handler, which is already the case for the entry work, and allows the compiler to generate better code (e.g. by immediately returning when there is no exit work to perform). To align with other exception entry/exit helpers, enter_from_user_mode() is updated to take the EL0 pt_regs as a parameter, though this is currently unused. There should be no functional change as a result of this patch. However, this should lead to slightly better backtraces when an error is encountered within do_notify_resume(), as the C handler should appear in the backtrace, indicating the specific exception that the kernel was entered with. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Cc: James Morse <james.morse@arm.com> Cc: Joey Gouly <joey.gouly@arm.com> Cc: Marc Zyngier <maz@kernel.org> Cc: Will Deacon <will@kernel.org> Reviewed-by: Joey Gouly <joey.gouly@arm.com> Link: https://lore.kernel.org/r/20210802140733.52716-5-mark.rutland@arm.com Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2021-08-02 14:07:33 +00:00
static __always_inline void enter_from_user_mode(struct pt_regs *regs)
{
__enter_from_user_mode();
}
/*
* Handle IRQ/context state management when exiting to user mode.
* After this function returns it is not safe to call regular kernel code,
* instrumentable code, or any code which may trigger an exception.
*/
static __always_inline void __exit_to_user_mode(void)
{
trace_hardirqs_on_prepare();
lockdep_hardirqs_on_prepare();
user_enter_irqoff();
lockdep_hardirqs_on(CALLER_ADDR0);
}
static __always_inline void exit_to_user_mode_prepare(struct pt_regs *regs)
arm64: entry: move bulk of ret_to_user to C In `ret_to_user` we perform some conditional work depending on the thread flags, then perform some IRQ/context tracking which is intended to balance with the IRQ/context tracking performed in the entry C code. For simplicity and consistency, it would be preferable to move this all to C. As a step towards that, this patch moves the conditional work and IRQ/context tracking into a C helper function. To aid bisectability, this is called from the `ret_to_user` assembly, and a subsequent patch will move the call to C code. As local_daif_mask() handles all necessary tracing and PMR manipulation, we no longer need to handle this explicitly. As we call exit_to_user_mode() directly, the `user_enter_irqoff` macro is no longer used, and can be removed. As enter_from_user_mode() and exit_to_user_mode() are no longer called from assembly, these can be made static, and as these are typically very small, they are marked __always_inline to avoid the overhead of a function call. For now, enablement of single-step is left in entry.S, and for this we still need to read the flags in ret_to_user(). It is safe to read this separately as TIF_SINGLESTEP is not part of _TIF_WORK_MASK. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Cc: James Morse <james.morse@arm.com> Cc: Joey Gouly <joey.gouly@arm.com> Cc: Marc Zyngier <maz@kernel.org> Cc: Will Deacon <will@kernel.org> Reviewed-by: Joey Gouly <joey.gouly@arm.com> Link: https://lore.kernel.org/r/20210802140733.52716-4-mark.rutland@arm.com [catalin.marinas@arm.com: removed unused gic_prio_kentry_setup macro] Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2021-08-02 14:07:32 +00:00
{
unsigned long flags;
local_daif_mask();
flags = read_thread_flags();
arm64: entry: move bulk of ret_to_user to C In `ret_to_user` we perform some conditional work depending on the thread flags, then perform some IRQ/context tracking which is intended to balance with the IRQ/context tracking performed in the entry C code. For simplicity and consistency, it would be preferable to move this all to C. As a step towards that, this patch moves the conditional work and IRQ/context tracking into a C helper function. To aid bisectability, this is called from the `ret_to_user` assembly, and a subsequent patch will move the call to C code. As local_daif_mask() handles all necessary tracing and PMR manipulation, we no longer need to handle this explicitly. As we call exit_to_user_mode() directly, the `user_enter_irqoff` macro is no longer used, and can be removed. As enter_from_user_mode() and exit_to_user_mode() are no longer called from assembly, these can be made static, and as these are typically very small, they are marked __always_inline to avoid the overhead of a function call. For now, enablement of single-step is left in entry.S, and for this we still need to read the flags in ret_to_user(). It is safe to read this separately as TIF_SINGLESTEP is not part of _TIF_WORK_MASK. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Cc: James Morse <james.morse@arm.com> Cc: Joey Gouly <joey.gouly@arm.com> Cc: Marc Zyngier <maz@kernel.org> Cc: Will Deacon <will@kernel.org> Reviewed-by: Joey Gouly <joey.gouly@arm.com> Link: https://lore.kernel.org/r/20210802140733.52716-4-mark.rutland@arm.com [catalin.marinas@arm.com: removed unused gic_prio_kentry_setup macro] Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2021-08-02 14:07:32 +00:00
if (unlikely(flags & _TIF_WORK_MASK))
do_notify_resume(regs, flags);
lockdep_sys_exit();
arm64: entry: move bulk of ret_to_user to C In `ret_to_user` we perform some conditional work depending on the thread flags, then perform some IRQ/context tracking which is intended to balance with the IRQ/context tracking performed in the entry C code. For simplicity and consistency, it would be preferable to move this all to C. As a step towards that, this patch moves the conditional work and IRQ/context tracking into a C helper function. To aid bisectability, this is called from the `ret_to_user` assembly, and a subsequent patch will move the call to C code. As local_daif_mask() handles all necessary tracing and PMR manipulation, we no longer need to handle this explicitly. As we call exit_to_user_mode() directly, the `user_enter_irqoff` macro is no longer used, and can be removed. As enter_from_user_mode() and exit_to_user_mode() are no longer called from assembly, these can be made static, and as these are typically very small, they are marked __always_inline to avoid the overhead of a function call. For now, enablement of single-step is left in entry.S, and for this we still need to read the flags in ret_to_user(). It is safe to read this separately as TIF_SINGLESTEP is not part of _TIF_WORK_MASK. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Cc: James Morse <james.morse@arm.com> Cc: Joey Gouly <joey.gouly@arm.com> Cc: Marc Zyngier <maz@kernel.org> Cc: Will Deacon <will@kernel.org> Reviewed-by: Joey Gouly <joey.gouly@arm.com> Link: https://lore.kernel.org/r/20210802140733.52716-4-mark.rutland@arm.com [catalin.marinas@arm.com: removed unused gic_prio_kentry_setup macro] Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2021-08-02 14:07:32 +00:00
}
arm64: entry: call exit_to_user_mode() from C When handling an exception from EL0, we perform the entry work in that exception's C handler, and once the C handler has finished, we return back to the entry assembly. Subsequently in the common `ret_to_user` assembly we perform the exit work that balances with the entry work. This can be somewhat difficult to follow, and makes it hard to rework the return paths (e.g. to pass additional context to the exit code, or to have exception return logic for specific exceptions). This patch reworks the entry code such that each EL0 C exception handler is responsible for both the entry and exit work. This clearly balances the two (and will permit additional variation in future), and avoids an unnecessary bounce between assembly and C in the common case, leaving `ret_from_fork` as the only place assembly has to call the exit code. This means that the exit work is now inlined into the C handler, which is already the case for the entry work, and allows the compiler to generate better code (e.g. by immediately returning when there is no exit work to perform). To align with other exception entry/exit helpers, enter_from_user_mode() is updated to take the EL0 pt_regs as a parameter, though this is currently unused. There should be no functional change as a result of this patch. However, this should lead to slightly better backtraces when an error is encountered within do_notify_resume(), as the C handler should appear in the backtrace, indicating the specific exception that the kernel was entered with. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Cc: James Morse <james.morse@arm.com> Cc: Joey Gouly <joey.gouly@arm.com> Cc: Marc Zyngier <maz@kernel.org> Cc: Will Deacon <will@kernel.org> Reviewed-by: Joey Gouly <joey.gouly@arm.com> Link: https://lore.kernel.org/r/20210802140733.52716-5-mark.rutland@arm.com Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2021-08-02 14:07:33 +00:00
static __always_inline void exit_to_user_mode(struct pt_regs *regs)
arm64: entry: move bulk of ret_to_user to C In `ret_to_user` we perform some conditional work depending on the thread flags, then perform some IRQ/context tracking which is intended to balance with the IRQ/context tracking performed in the entry C code. For simplicity and consistency, it would be preferable to move this all to C. As a step towards that, this patch moves the conditional work and IRQ/context tracking into a C helper function. To aid bisectability, this is called from the `ret_to_user` assembly, and a subsequent patch will move the call to C code. As local_daif_mask() handles all necessary tracing and PMR manipulation, we no longer need to handle this explicitly. As we call exit_to_user_mode() directly, the `user_enter_irqoff` macro is no longer used, and can be removed. As enter_from_user_mode() and exit_to_user_mode() are no longer called from assembly, these can be made static, and as these are typically very small, they are marked __always_inline to avoid the overhead of a function call. For now, enablement of single-step is left in entry.S, and for this we still need to read the flags in ret_to_user(). It is safe to read this separately as TIF_SINGLESTEP is not part of _TIF_WORK_MASK. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Cc: James Morse <james.morse@arm.com> Cc: Joey Gouly <joey.gouly@arm.com> Cc: Marc Zyngier <maz@kernel.org> Cc: Will Deacon <will@kernel.org> Reviewed-by: Joey Gouly <joey.gouly@arm.com> Link: https://lore.kernel.org/r/20210802140733.52716-4-mark.rutland@arm.com [catalin.marinas@arm.com: removed unused gic_prio_kentry_setup macro] Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2021-08-02 14:07:32 +00:00
{
exit_to_user_mode_prepare(regs);
arm64: entry: call exit_to_user_mode() from C When handling an exception from EL0, we perform the entry work in that exception's C handler, and once the C handler has finished, we return back to the entry assembly. Subsequently in the common `ret_to_user` assembly we perform the exit work that balances with the entry work. This can be somewhat difficult to follow, and makes it hard to rework the return paths (e.g. to pass additional context to the exit code, or to have exception return logic for specific exceptions). This patch reworks the entry code such that each EL0 C exception handler is responsible for both the entry and exit work. This clearly balances the two (and will permit additional variation in future), and avoids an unnecessary bounce between assembly and C in the common case, leaving `ret_from_fork` as the only place assembly has to call the exit code. This means that the exit work is now inlined into the C handler, which is already the case for the entry work, and allows the compiler to generate better code (e.g. by immediately returning when there is no exit work to perform). To align with other exception entry/exit helpers, enter_from_user_mode() is updated to take the EL0 pt_regs as a parameter, though this is currently unused. There should be no functional change as a result of this patch. However, this should lead to slightly better backtraces when an error is encountered within do_notify_resume(), as the C handler should appear in the backtrace, indicating the specific exception that the kernel was entered with. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Cc: James Morse <james.morse@arm.com> Cc: Joey Gouly <joey.gouly@arm.com> Cc: Marc Zyngier <maz@kernel.org> Cc: Will Deacon <will@kernel.org> Reviewed-by: Joey Gouly <joey.gouly@arm.com> Link: https://lore.kernel.org/r/20210802140733.52716-5-mark.rutland@arm.com Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2021-08-02 14:07:33 +00:00
mte_check_tfsr_exit();
__exit_to_user_mode();
}
asmlinkage void noinstr asm_exit_to_user_mode(struct pt_regs *regs)
{
exit_to_user_mode(regs);
arm64: entry: move bulk of ret_to_user to C In `ret_to_user` we perform some conditional work depending on the thread flags, then perform some IRQ/context tracking which is intended to balance with the IRQ/context tracking performed in the entry C code. For simplicity and consistency, it would be preferable to move this all to C. As a step towards that, this patch moves the conditional work and IRQ/context tracking into a C helper function. To aid bisectability, this is called from the `ret_to_user` assembly, and a subsequent patch will move the call to C code. As local_daif_mask() handles all necessary tracing and PMR manipulation, we no longer need to handle this explicitly. As we call exit_to_user_mode() directly, the `user_enter_irqoff` macro is no longer used, and can be removed. As enter_from_user_mode() and exit_to_user_mode() are no longer called from assembly, these can be made static, and as these are typically very small, they are marked __always_inline to avoid the overhead of a function call. For now, enablement of single-step is left in entry.S, and for this we still need to read the flags in ret_to_user(). It is safe to read this separately as TIF_SINGLESTEP is not part of _TIF_WORK_MASK. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Cc: James Morse <james.morse@arm.com> Cc: Joey Gouly <joey.gouly@arm.com> Cc: Marc Zyngier <maz@kernel.org> Cc: Will Deacon <will@kernel.org> Reviewed-by: Joey Gouly <joey.gouly@arm.com> Link: https://lore.kernel.org/r/20210802140733.52716-4-mark.rutland@arm.com [catalin.marinas@arm.com: removed unused gic_prio_kentry_setup macro] Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2021-08-02 14:07:32 +00:00
}
/*
* Handle IRQ/context state management when entering an NMI from user/kernel
* mode. Before this function is called it is not safe to call regular kernel
* code, instrumentable code, or any code which may trigger an exception.
*/
static void noinstr arm64_enter_nmi(struct pt_regs *regs)
arm64: entry: fix NMI {user, kernel}->kernel transitions Exceptions which can be taken at (almost) any time are consdiered to be NMIs. On arm64 that includes: * SDEI events * GICv3 Pseudo-NMIs * Kernel stack overflows * Unexpected/unhandled exceptions ... but currently debug exceptions (BRKs, breakpoints, watchpoints, single-step) are not considered NMIs. As these can be taken at any time, kernel features (lockdep, RCU, ftrace) may not be in a consistent kernel state. For example, we may take an NMI from the idle code or partway through an entry/exit path. While nmi_enter() and nmi_exit() handle most of this state, notably they don't save/restore the lockdep state across an NMI being taken and handled. When interrupts are enabled and an NMI is taken, lockdep may see interrupts become disabled within the NMI code, but not see interrupts become enabled when returning from the NMI, leaving lockdep believing interrupts are disabled when they are actually disabled. The x86 code handles this in idtentry_{enter,exit}_nmi(), which will shortly be moved to the generic entry code. As we can't use either yet, we copy the x86 approach in arm64-specific helpers. All the NMI entrypoints are marked as noinstr to prevent any instrumentation handling code being invoked before the state has been corrected. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: James Morse <james.morse@arm.com> Cc: Will Deacon <will@kernel.org> Link: https://lore.kernel.org/r/20201130115950.22492-11-mark.rutland@arm.com Signed-off-by: Will Deacon <will@kernel.org>
2020-11-30 11:59:49 +00:00
{
regs->lockdep_hardirqs = lockdep_hardirqs_enabled();
__nmi_enter();
lockdep_hardirqs_off(CALLER_ADDR0);
lockdep_hardirq_enter();
ct_nmi_enter();
arm64: entry: fix NMI {user, kernel}->kernel transitions Exceptions which can be taken at (almost) any time are consdiered to be NMIs. On arm64 that includes: * SDEI events * GICv3 Pseudo-NMIs * Kernel stack overflows * Unexpected/unhandled exceptions ... but currently debug exceptions (BRKs, breakpoints, watchpoints, single-step) are not considered NMIs. As these can be taken at any time, kernel features (lockdep, RCU, ftrace) may not be in a consistent kernel state. For example, we may take an NMI from the idle code or partway through an entry/exit path. While nmi_enter() and nmi_exit() handle most of this state, notably they don't save/restore the lockdep state across an NMI being taken and handled. When interrupts are enabled and an NMI is taken, lockdep may see interrupts become disabled within the NMI code, but not see interrupts become enabled when returning from the NMI, leaving lockdep believing interrupts are disabled when they are actually disabled. The x86 code handles this in idtentry_{enter,exit}_nmi(), which will shortly be moved to the generic entry code. As we can't use either yet, we copy the x86 approach in arm64-specific helpers. All the NMI entrypoints are marked as noinstr to prevent any instrumentation handling code being invoked before the state has been corrected. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: James Morse <james.morse@arm.com> Cc: Will Deacon <will@kernel.org> Link: https://lore.kernel.org/r/20201130115950.22492-11-mark.rutland@arm.com Signed-off-by: Will Deacon <will@kernel.org>
2020-11-30 11:59:49 +00:00
trace_hardirqs_off_finish();
ftrace_nmi_enter();
}
/*
* Handle IRQ/context state management when exiting an NMI from user/kernel
* mode. After this function returns it is not safe to call regular kernel
* code, instrumentable code, or any code which may trigger an exception.
*/
static void noinstr arm64_exit_nmi(struct pt_regs *regs)
arm64: entry: fix NMI {user, kernel}->kernel transitions Exceptions which can be taken at (almost) any time are consdiered to be NMIs. On arm64 that includes: * SDEI events * GICv3 Pseudo-NMIs * Kernel stack overflows * Unexpected/unhandled exceptions ... but currently debug exceptions (BRKs, breakpoints, watchpoints, single-step) are not considered NMIs. As these can be taken at any time, kernel features (lockdep, RCU, ftrace) may not be in a consistent kernel state. For example, we may take an NMI from the idle code or partway through an entry/exit path. While nmi_enter() and nmi_exit() handle most of this state, notably they don't save/restore the lockdep state across an NMI being taken and handled. When interrupts are enabled and an NMI is taken, lockdep may see interrupts become disabled within the NMI code, but not see interrupts become enabled when returning from the NMI, leaving lockdep believing interrupts are disabled when they are actually disabled. The x86 code handles this in idtentry_{enter,exit}_nmi(), which will shortly be moved to the generic entry code. As we can't use either yet, we copy the x86 approach in arm64-specific helpers. All the NMI entrypoints are marked as noinstr to prevent any instrumentation handling code being invoked before the state has been corrected. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: James Morse <james.morse@arm.com> Cc: Will Deacon <will@kernel.org> Link: https://lore.kernel.org/r/20201130115950.22492-11-mark.rutland@arm.com Signed-off-by: Will Deacon <will@kernel.org>
2020-11-30 11:59:49 +00:00
{
bool restore = regs->lockdep_hardirqs;
ftrace_nmi_exit();
if (restore) {
trace_hardirqs_on_prepare();
lockdep_hardirqs_on_prepare();
arm64: entry: fix NMI {user, kernel}->kernel transitions Exceptions which can be taken at (almost) any time are consdiered to be NMIs. On arm64 that includes: * SDEI events * GICv3 Pseudo-NMIs * Kernel stack overflows * Unexpected/unhandled exceptions ... but currently debug exceptions (BRKs, breakpoints, watchpoints, single-step) are not considered NMIs. As these can be taken at any time, kernel features (lockdep, RCU, ftrace) may not be in a consistent kernel state. For example, we may take an NMI from the idle code or partway through an entry/exit path. While nmi_enter() and nmi_exit() handle most of this state, notably they don't save/restore the lockdep state across an NMI being taken and handled. When interrupts are enabled and an NMI is taken, lockdep may see interrupts become disabled within the NMI code, but not see interrupts become enabled when returning from the NMI, leaving lockdep believing interrupts are disabled when they are actually disabled. The x86 code handles this in idtentry_{enter,exit}_nmi(), which will shortly be moved to the generic entry code. As we can't use either yet, we copy the x86 approach in arm64-specific helpers. All the NMI entrypoints are marked as noinstr to prevent any instrumentation handling code being invoked before the state has been corrected. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: James Morse <james.morse@arm.com> Cc: Will Deacon <will@kernel.org> Link: https://lore.kernel.org/r/20201130115950.22492-11-mark.rutland@arm.com Signed-off-by: Will Deacon <will@kernel.org>
2020-11-30 11:59:49 +00:00
}
ct_nmi_exit();
arm64: entry: fix NMI {user, kernel}->kernel transitions Exceptions which can be taken at (almost) any time are consdiered to be NMIs. On arm64 that includes: * SDEI events * GICv3 Pseudo-NMIs * Kernel stack overflows * Unexpected/unhandled exceptions ... but currently debug exceptions (BRKs, breakpoints, watchpoints, single-step) are not considered NMIs. As these can be taken at any time, kernel features (lockdep, RCU, ftrace) may not be in a consistent kernel state. For example, we may take an NMI from the idle code or partway through an entry/exit path. While nmi_enter() and nmi_exit() handle most of this state, notably they don't save/restore the lockdep state across an NMI being taken and handled. When interrupts are enabled and an NMI is taken, lockdep may see interrupts become disabled within the NMI code, but not see interrupts become enabled when returning from the NMI, leaving lockdep believing interrupts are disabled when they are actually disabled. The x86 code handles this in idtentry_{enter,exit}_nmi(), which will shortly be moved to the generic entry code. As we can't use either yet, we copy the x86 approach in arm64-specific helpers. All the NMI entrypoints are marked as noinstr to prevent any instrumentation handling code being invoked before the state has been corrected. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: James Morse <james.morse@arm.com> Cc: Will Deacon <will@kernel.org> Link: https://lore.kernel.org/r/20201130115950.22492-11-mark.rutland@arm.com Signed-off-by: Will Deacon <will@kernel.org>
2020-11-30 11:59:49 +00:00
lockdep_hardirq_exit();
if (restore)
lockdep_hardirqs_on(CALLER_ADDR0);
__nmi_exit();
}
/*
* Handle IRQ/context state management when entering a debug exception from
* kernel mode. Before this function is called it is not safe to call regular
* kernel code, instrumentable code, or any code which may trigger an exception.
*/
static void noinstr arm64_enter_el1_dbg(struct pt_regs *regs)
{
regs->lockdep_hardirqs = lockdep_hardirqs_enabled();
lockdep_hardirqs_off(CALLER_ADDR0);
ct_nmi_enter();
trace_hardirqs_off_finish();
}
/*
* Handle IRQ/context state management when exiting a debug exception from
* kernel mode. After this function returns it is not safe to call regular
* kernel code, instrumentable code, or any code which may trigger an exception.
*/
static void noinstr arm64_exit_el1_dbg(struct pt_regs *regs)
{
bool restore = regs->lockdep_hardirqs;
if (restore) {
trace_hardirqs_on_prepare();
lockdep_hardirqs_on_prepare();
}
ct_nmi_exit();
if (restore)
lockdep_hardirqs_on(CALLER_ADDR0);
}
arm64: Support PREEMPT_DYNAMIC This patch enables support for PREEMPT_DYNAMIC on arm64, allowing the preemption model to be chosen at boot time. Specifically, this patch selects HAVE_PREEMPT_DYNAMIC_KEY, so that each preemption function is an out-of-line call with an early return depending upon a static key. This leaves almost all the codegen up to the compiler, and side-steps a number of pain points with static calls (e.g. interaction with CFI schemes). This should have no worse overhead than using non-inline static calls, as those use out-of-line trampolines with early returns. For example, the dynamic_cond_resched() wrapper looks as follows when enabled. When disabled, the first `B` is replaced with a `NOP`, resulting in an early return. | <dynamic_cond_resched>: | bti c | b <dynamic_cond_resched+0x10> // or `nop` | mov w0, #0x0 | ret | mrs x0, sp_el0 | ldr x0, [x0, #8] | cbnz x0, <dynamic_cond_resched+0x8> | paciasp | stp x29, x30, [sp, #-16]! | mov x29, sp | bl <preempt_schedule_common> | mov w0, #0x1 | ldp x29, x30, [sp], #16 | autiasp | ret ... compared to the regular form of the function: | <__cond_resched>: | bti c | mrs x0, sp_el0 | ldr x1, [x0, #8] | cbz x1, <__cond_resched+0x18> | mov w0, #0x0 | ret | paciasp | stp x29, x30, [sp, #-16]! | mov x29, sp | bl <preempt_schedule_common> | mov w0, #0x1 | ldp x29, x30, [sp], #16 | autiasp | ret Since arm64 does not yet use the generic entry code, we must define our own `sk_dynamic_irqentry_exit_cond_resched`, which will be enabled/disabled by the common code in kernel/sched/core.c. All other preemption functions and associated static keys are defined there. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Ard Biesheuvel <ardb@kernel.org> Acked-by: Catalin Marinas <catalin.marinas@arm.com> Acked-by: Frederic Weisbecker <frederic@kernel.org> Link: https://lore.kernel.org/r/20220214165216.2231574-8-mark.rutland@arm.com
2022-02-14 16:52:16 +00:00
#ifdef CONFIG_PREEMPT_DYNAMIC
DEFINE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched);
#define need_irq_preemption() \
(static_branch_unlikely(&sk_dynamic_irqentry_exit_cond_resched))
#else
#define need_irq_preemption() (IS_ENABLED(CONFIG_PREEMPTION))
#endif
static void __sched arm64_preempt_schedule_irq(void)
{
arm64: Support PREEMPT_DYNAMIC This patch enables support for PREEMPT_DYNAMIC on arm64, allowing the preemption model to be chosen at boot time. Specifically, this patch selects HAVE_PREEMPT_DYNAMIC_KEY, so that each preemption function is an out-of-line call with an early return depending upon a static key. This leaves almost all the codegen up to the compiler, and side-steps a number of pain points with static calls (e.g. interaction with CFI schemes). This should have no worse overhead than using non-inline static calls, as those use out-of-line trampolines with early returns. For example, the dynamic_cond_resched() wrapper looks as follows when enabled. When disabled, the first `B` is replaced with a `NOP`, resulting in an early return. | <dynamic_cond_resched>: | bti c | b <dynamic_cond_resched+0x10> // or `nop` | mov w0, #0x0 | ret | mrs x0, sp_el0 | ldr x0, [x0, #8] | cbnz x0, <dynamic_cond_resched+0x8> | paciasp | stp x29, x30, [sp, #-16]! | mov x29, sp | bl <preempt_schedule_common> | mov w0, #0x1 | ldp x29, x30, [sp], #16 | autiasp | ret ... compared to the regular form of the function: | <__cond_resched>: | bti c | mrs x0, sp_el0 | ldr x1, [x0, #8] | cbz x1, <__cond_resched+0x18> | mov w0, #0x0 | ret | paciasp | stp x29, x30, [sp, #-16]! | mov x29, sp | bl <preempt_schedule_common> | mov w0, #0x1 | ldp x29, x30, [sp], #16 | autiasp | ret Since arm64 does not yet use the generic entry code, we must define our own `sk_dynamic_irqentry_exit_cond_resched`, which will be enabled/disabled by the common code in kernel/sched/core.c. All other preemption functions and associated static keys are defined there. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Ard Biesheuvel <ardb@kernel.org> Acked-by: Catalin Marinas <catalin.marinas@arm.com> Acked-by: Frederic Weisbecker <frederic@kernel.org> Link: https://lore.kernel.org/r/20220214165216.2231574-8-mark.rutland@arm.com
2022-02-14 16:52:16 +00:00
if (!need_irq_preemption())
return;
/*
* Note: thread_info::preempt_count includes both thread_info::count
* and thread_info::need_resched, and is not equivalent to
* preempt_count().
*/
if (READ_ONCE(current_thread_info()->preempt_count) != 0)
return;
/*
* DAIF.DA are cleared at the start of IRQ/FIQ handling, and when GIC
* priority masking is used the GIC irqchip driver will clear DAIF.IF
* using gic_arch_enable_irqs() for normal IRQs. If anything is set in
* DAIF we must have handled an NMI, so skip preemption.
*/
if (system_uses_irq_prio_masking() && read_sysreg(daif))
return;
/*
* Preempting a task from an IRQ means we leave copies of PSTATE
* on the stack. cpufeature's enable calls may modify PSTATE, but
* resuming one of these preempted tasks would undo those changes.
*
* Only allow a task to be preempted once cpufeatures have been
* enabled.
*/
if (system_capabilities_finalized())
preempt_schedule_irq();
}
static void do_interrupt_handler(struct pt_regs *regs,
void (*handler)(struct pt_regs *))
{
irq: arm64: perform irqentry in entry code In preparation for removing HANDLE_DOMAIN_IRQ_IRQENTRY, have arch/arm64 perform all the irqentry accounting in its entry code. As arch/arm64 already performs portions of the irqentry logic in enter_from_kernel_mode() and exit_to_kernel_mode(), including rcu_irq_{enter,exit}(), the only additional calls that need to be made are to irq_{enter,exit}_rcu(). Removing the calls to rcu_irq_{enter,exit}() from handle_domain_irq() ensures that we inform RCU once per IRQ entry and will correctly identify quiescent periods. Since we should not call irq_{enter,exit}_rcu() when entering a pseudo-NMI, el1_interrupt() is reworked to have separate __el1_irq() and __el1_pnmi() paths for regular IRQ and psuedo-NMI entry, with irq_{enter,exit}_irq() only called for the former. In preparation for removing HANDLE_DOMAIN_IRQ, the irq regs are managed in do_interrupt_handler() for both regular IRQ and pseudo-NMI. This is currently redundant, but not harmful. For clarity the preemption logic is moved into __el1_irq(). We should never preempt within a pseudo-NMI, and arm64_enter_nmi() already enforces this by incrementing the preempt_count, but it's clearer if we never invoke the preemption logic when entering a pseudo-NMI. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Reviewed-by: Pingfan Liu <kernelfans@gmail.com> Reviewed-by: Marc Zyngier <maz@kernel.org> Acked-by: Catalin Marinas <catalin.marinas@arm.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Will Deacon <will@kernel.org>
2021-10-19 17:28:39 +00:00
struct pt_regs *old_regs = set_irq_regs(regs);
if (on_thread_stack())
call_on_irq_stack(regs, handler);
else
handler(regs);
irq: arm64: perform irqentry in entry code In preparation for removing HANDLE_DOMAIN_IRQ_IRQENTRY, have arch/arm64 perform all the irqentry accounting in its entry code. As arch/arm64 already performs portions of the irqentry logic in enter_from_kernel_mode() and exit_to_kernel_mode(), including rcu_irq_{enter,exit}(), the only additional calls that need to be made are to irq_{enter,exit}_rcu(). Removing the calls to rcu_irq_{enter,exit}() from handle_domain_irq() ensures that we inform RCU once per IRQ entry and will correctly identify quiescent periods. Since we should not call irq_{enter,exit}_rcu() when entering a pseudo-NMI, el1_interrupt() is reworked to have separate __el1_irq() and __el1_pnmi() paths for regular IRQ and psuedo-NMI entry, with irq_{enter,exit}_irq() only called for the former. In preparation for removing HANDLE_DOMAIN_IRQ, the irq regs are managed in do_interrupt_handler() for both regular IRQ and pseudo-NMI. This is currently redundant, but not harmful. For clarity the preemption logic is moved into __el1_irq(). We should never preempt within a pseudo-NMI, and arm64_enter_nmi() already enforces this by incrementing the preempt_count, but it's clearer if we never invoke the preemption logic when entering a pseudo-NMI. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Reviewed-by: Pingfan Liu <kernelfans@gmail.com> Reviewed-by: Marc Zyngier <maz@kernel.org> Acked-by: Catalin Marinas <catalin.marinas@arm.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Will Deacon <will@kernel.org>
2021-10-19 17:28:39 +00:00
set_irq_regs(old_regs);
}
extern void (*handle_arch_irq)(struct pt_regs *);
extern void (*handle_arch_fiq)(struct pt_regs *);
arm64: entry: improve bad_mode() Our use of bad_mode() has a few rough edges: * AArch64 doesn't use the term "mode", and refers to "Execution states", "Exception levels", and "Selected stack pointer". * We log the exception type (SYNC/IRQ/FIQ/SError), but not the actual "mode" (though this can be decoded from the SPSR value). * We use bad_mode() as a second-level handler for unexpected synchronous exceptions, where the "mode" is legitimate, but the specific exception is not. * We dump the ESR value, but call this "code", and so it's not clear to all readers that this is the ESR. ... and all of this can be somewhat opaque to those who aren't extremely familiar with the code. Let's make this a bit clearer by having bad_mode() log "Unhandled ${TYPE} exception" rather than "Bad mode in ${TYPE} handler", using "ESR" rather than "code", and having the final panic() log "Unhandled exception" rather than "Bad mode". In future we'd like to log the specific architectural vector rather than just the type of exception, so we also split the core of bad_mode() out into a helper called __panic_unhandled(), which takes the vector as a string argument. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Acked-by: Catalin Marinas <catalin.marinas@arm.com> Acked-by: Marc Zyngier <maz@kernel.org> Reviewed-by: Joey Gouly <joey.gouly@arm.com> Cc: James Morse <james.morse@arm.com> Cc: Will Deacon <will@kernel.org> Link: https://lore.kernel.org/r/20210607094624.34689-13-mark.rutland@arm.com Signed-off-by: Will Deacon <will@kernel.org>
2021-06-07 09:46:16 +00:00
static void noinstr __panic_unhandled(struct pt_regs *regs, const char *vector,
arm64: Treat ESR_ELx as a 64-bit register In the initial release of the ARM Architecture Reference Manual for ARMv8-A, the ESR_ELx registers were defined as 32-bit registers. This changed in 2018 with version D.a (ARM DDI 0487D.a) of the architecture, when they became 64-bit registers, with bits [63:32] defined as RES0. In version G.a, a new field was added to ESR_ELx, ISS2, which covers bits [36:32]. This field is used when the Armv8.7 extension FEAT_LS64 is implemented. As a result of the evolution of the register width, Linux stores it as both a 64-bit value and a 32-bit value, which hasn't affected correctness so far as Linux only uses the lower 32 bits of the register. Make the register type consistent and always treat it as 64-bit wide. The register is redefined as an "unsigned long", which is an unsigned double-word (64-bit quantity) for the LP64 machine (aapcs64 [1], Table 1, page 14). The type was chosen because "unsigned int" is the most frequent type for ESR_ELx and because FAR_ELx, which is used together with ESR_ELx in exception handling, is also declared as "unsigned long". The 64-bit type also makes adding support for architectural features that use fields above bit 31 easier in the future. The KVM hypervisor will receive a similar update in a subsequent patch. [1] https://github.com/ARM-software/abi-aa/releases/download/2021Q3/aapcs64.pdf Signed-off-by: Alexandru Elisei <alexandru.elisei@arm.com> Reviewed-by: Marc Zyngier <maz@kernel.org> Link: https://lore.kernel.org/r/20220425114444.368693-4-alexandru.elisei@arm.com Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2022-04-25 11:44:42 +00:00
unsigned long esr)
{
arm64_enter_nmi(regs);
console_verbose();
arm64: Treat ESR_ELx as a 64-bit register In the initial release of the ARM Architecture Reference Manual for ARMv8-A, the ESR_ELx registers were defined as 32-bit registers. This changed in 2018 with version D.a (ARM DDI 0487D.a) of the architecture, when they became 64-bit registers, with bits [63:32] defined as RES0. In version G.a, a new field was added to ESR_ELx, ISS2, which covers bits [36:32]. This field is used when the Armv8.7 extension FEAT_LS64 is implemented. As a result of the evolution of the register width, Linux stores it as both a 64-bit value and a 32-bit value, which hasn't affected correctness so far as Linux only uses the lower 32 bits of the register. Make the register type consistent and always treat it as 64-bit wide. The register is redefined as an "unsigned long", which is an unsigned double-word (64-bit quantity) for the LP64 machine (aapcs64 [1], Table 1, page 14). The type was chosen because "unsigned int" is the most frequent type for ESR_ELx and because FAR_ELx, which is used together with ESR_ELx in exception handling, is also declared as "unsigned long". The 64-bit type also makes adding support for architectural features that use fields above bit 31 easier in the future. The KVM hypervisor will receive a similar update in a subsequent patch. [1] https://github.com/ARM-software/abi-aa/releases/download/2021Q3/aapcs64.pdf Signed-off-by: Alexandru Elisei <alexandru.elisei@arm.com> Reviewed-by: Marc Zyngier <maz@kernel.org> Link: https://lore.kernel.org/r/20220425114444.368693-4-alexandru.elisei@arm.com Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2022-04-25 11:44:42 +00:00
pr_crit("Unhandled %s exception on CPU%d, ESR 0x%016lx -- %s\n",
arm64: entry: improve bad_mode() Our use of bad_mode() has a few rough edges: * AArch64 doesn't use the term "mode", and refers to "Execution states", "Exception levels", and "Selected stack pointer". * We log the exception type (SYNC/IRQ/FIQ/SError), but not the actual "mode" (though this can be decoded from the SPSR value). * We use bad_mode() as a second-level handler for unexpected synchronous exceptions, where the "mode" is legitimate, but the specific exception is not. * We dump the ESR value, but call this "code", and so it's not clear to all readers that this is the ESR. ... and all of this can be somewhat opaque to those who aren't extremely familiar with the code. Let's make this a bit clearer by having bad_mode() log "Unhandled ${TYPE} exception" rather than "Bad mode in ${TYPE} handler", using "ESR" rather than "code", and having the final panic() log "Unhandled exception" rather than "Bad mode". In future we'd like to log the specific architectural vector rather than just the type of exception, so we also split the core of bad_mode() out into a helper called __panic_unhandled(), which takes the vector as a string argument. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Acked-by: Catalin Marinas <catalin.marinas@arm.com> Acked-by: Marc Zyngier <maz@kernel.org> Reviewed-by: Joey Gouly <joey.gouly@arm.com> Cc: James Morse <james.morse@arm.com> Cc: Will Deacon <will@kernel.org> Link: https://lore.kernel.org/r/20210607094624.34689-13-mark.rutland@arm.com Signed-off-by: Will Deacon <will@kernel.org>
2021-06-07 09:46:16 +00:00
vector, smp_processor_id(), esr,
esr_get_class_string(esr));
__show_regs(regs);
arm64: entry: improve bad_mode() Our use of bad_mode() has a few rough edges: * AArch64 doesn't use the term "mode", and refers to "Execution states", "Exception levels", and "Selected stack pointer". * We log the exception type (SYNC/IRQ/FIQ/SError), but not the actual "mode" (though this can be decoded from the SPSR value). * We use bad_mode() as a second-level handler for unexpected synchronous exceptions, where the "mode" is legitimate, but the specific exception is not. * We dump the ESR value, but call this "code", and so it's not clear to all readers that this is the ESR. ... and all of this can be somewhat opaque to those who aren't extremely familiar with the code. Let's make this a bit clearer by having bad_mode() log "Unhandled ${TYPE} exception" rather than "Bad mode in ${TYPE} handler", using "ESR" rather than "code", and having the final panic() log "Unhandled exception" rather than "Bad mode". In future we'd like to log the specific architectural vector rather than just the type of exception, so we also split the core of bad_mode() out into a helper called __panic_unhandled(), which takes the vector as a string argument. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Acked-by: Catalin Marinas <catalin.marinas@arm.com> Acked-by: Marc Zyngier <maz@kernel.org> Reviewed-by: Joey Gouly <joey.gouly@arm.com> Cc: James Morse <james.morse@arm.com> Cc: Will Deacon <will@kernel.org> Link: https://lore.kernel.org/r/20210607094624.34689-13-mark.rutland@arm.com Signed-off-by: Will Deacon <will@kernel.org>
2021-06-07 09:46:16 +00:00
panic("Unhandled exception");
}
arm64: entry: handle all vectors with C We have 16 architectural exception vectors, and depending on kernel configuration we handle 8 or 12 of these with C code, with the remaining 8 or 4 of these handled as special cases in the entry assembly. It would be nicer if the entry assembly were uniform for all exceptions, and we deferred any specific handling of the exceptions to C code. This way the entry assembly can be more easily templated without ifdeffery or special cases, and it's easier to modify the handling of these cases in future (e.g. to dump additional registers other context). This patch reworks the entry code so that we always have a C handler for every architectural exception vector, with the entry assembly being completely uniform. We now have to handle exceptions from EL1t and EL1h, and also have to handle exceptions from AArch32 even when the kernel is built without CONFIG_COMPAT. To make this clear and to simplify templating, we rename the top-level exception handlers with a consistent naming scheme: asm: <el+sp>_<regsize>_<type> c: <el+sp>_<regsize>_<type>_handler .. where: <el+sp> is `el1t`, `el1h`, or `el0t` <regsize> is `64` or `32` <type> is `sync`, `irq`, `fiq`, or `error` ... e.g. asm: el1h_64_sync c: el1h_64_sync_handler ... with lower-level handlers simply using "el1" and "compat" as today. For unexpected exceptions, this information is passed to __panic_unhandled(), so it can report the specific vector an unexpected exception was taken from, e.g. | Unhandled 64-bit el1t sync exception For vectors we never expect to enter legitimately, the C code is generated using a macro to avoid code duplication. The exceptions are handled via __panic_unhandled(), replacing bad_mode() (which is removed). The `kernel_ventry` and `entry_handler` assembly macros are updated to handle the new naming scheme. In theory it should be possible to generate the entry functions at the same time as the vectors using a single table, but this will require reworking the linker script to split the two into separate sections, so for now we have separate tables. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Acked-by: Catalin Marinas <catalin.marinas@arm.com> Acked-by: Marc Zyngier <maz@kernel.org> Reviewed-by: Joey Gouly <joey.gouly@arm.com> Cc: James Morse <james.morse@arm.com> Cc: Will Deacon <will@kernel.org> Link: https://lore.kernel.org/r/20210607094624.34689-15-mark.rutland@arm.com Signed-off-by: Will Deacon <will@kernel.org>
2021-06-07 09:46:18 +00:00
#define UNHANDLED(el, regsize, vector) \
asmlinkage void noinstr el##_##regsize##_##vector##_handler(struct pt_regs *regs) \
{ \
const char *desc = #regsize "-bit " #el " " #vector; \
__panic_unhandled(regs, desc, read_sysreg(esr_el1)); \
arm64: entry: improve bad_mode() Our use of bad_mode() has a few rough edges: * AArch64 doesn't use the term "mode", and refers to "Execution states", "Exception levels", and "Selected stack pointer". * We log the exception type (SYNC/IRQ/FIQ/SError), but not the actual "mode" (though this can be decoded from the SPSR value). * We use bad_mode() as a second-level handler for unexpected synchronous exceptions, where the "mode" is legitimate, but the specific exception is not. * We dump the ESR value, but call this "code", and so it's not clear to all readers that this is the ESR. ... and all of this can be somewhat opaque to those who aren't extremely familiar with the code. Let's make this a bit clearer by having bad_mode() log "Unhandled ${TYPE} exception" rather than "Bad mode in ${TYPE} handler", using "ESR" rather than "code", and having the final panic() log "Unhandled exception" rather than "Bad mode". In future we'd like to log the specific architectural vector rather than just the type of exception, so we also split the core of bad_mode() out into a helper called __panic_unhandled(), which takes the vector as a string argument. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Acked-by: Catalin Marinas <catalin.marinas@arm.com> Acked-by: Marc Zyngier <maz@kernel.org> Reviewed-by: Joey Gouly <joey.gouly@arm.com> Cc: James Morse <james.morse@arm.com> Cc: Will Deacon <will@kernel.org> Link: https://lore.kernel.org/r/20210607094624.34689-13-mark.rutland@arm.com Signed-off-by: Will Deacon <will@kernel.org>
2021-06-07 09:46:16 +00:00
}
2021-02-02 12:03:41 +00:00
#ifdef CONFIG_ARM64_ERRATUM_1463225
static DEFINE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
static void cortex_a76_erratum_1463225_svc_handler(void)
{
u32 reg, val;
if (!unlikely(test_thread_flag(TIF_SINGLESTEP)))
return;
if (!unlikely(this_cpu_has_cap(ARM64_WORKAROUND_1463225)))
return;
__this_cpu_write(__in_cortex_a76_erratum_1463225_wa, 1);
reg = read_sysreg(mdscr_el1);
val = reg | DBG_MDSCR_SS | DBG_MDSCR_KDE;
write_sysreg(val, mdscr_el1);
asm volatile("msr daifclr, #8");
isb();
/* We will have taken a single-step exception by this point */
write_sysreg(reg, mdscr_el1);
__this_cpu_write(__in_cortex_a76_erratum_1463225_wa, 0);
}
arm64: entry: avoid kprobe recursion The cortex_a76_erratum_1463225_debug_handler() function is called when handling debug exceptions (and synchronous exceptions from BRK instructions), and so is called when a probed function executes. If the compiler does not inline cortex_a76_erratum_1463225_debug_handler(), it can be probed. If cortex_a76_erratum_1463225_debug_handler() is probed, any debug exception or software breakpoint exception will result in recursive exceptions leading to a stack overflow. This can be triggered with the ftrace multiple_probes selftest, and as per the example splat below. This is a regression caused by commit: 6459b8469753e9fe ("arm64: entry: consolidate Cortex-A76 erratum 1463225 workaround") ... which removed the NOKPROBE_SYMBOL() annotation associated with the function. My intent was that cortex_a76_erratum_1463225_debug_handler() would be inlined into its caller, el1_dbg(), which is marked noinstr and cannot be probed. Mark cortex_a76_erratum_1463225_debug_handler() as __always_inline to ensure this. Example splat prior to this patch (with recursive entries elided): | # echo p cortex_a76_erratum_1463225_debug_handler > /sys/kernel/debug/tracing/kprobe_events | # echo p do_el0_svc >> /sys/kernel/debug/tracing/kprobe_events | # echo 1 > /sys/kernel/debug/tracing/events/kprobes/enable | Insufficient stack space to handle exception! | ESR: 0x0000000096000047 -- DABT (current EL) | FAR: 0xffff800009cefff0 | Task stack: [0xffff800009cf0000..0xffff800009cf4000] | IRQ stack: [0xffff800008000000..0xffff800008004000] | Overflow stack: [0xffff00007fbc00f0..0xffff00007fbc10f0] | CPU: 0 PID: 145 Comm: sh Not tainted 6.0.0 #2 | Hardware name: linux,dummy-virt (DT) | pstate: 604003c5 (nZCv DAIF +PAN -UAO -TCO -DIT -SSBS BTYPE=--) | pc : arm64_enter_el1_dbg+0x4/0x20 | lr : el1_dbg+0x24/0x5c | sp : ffff800009cf0000 | x29: ffff800009cf0000 x28: ffff000002c74740 x27: 0000000000000000 | x26: 0000000000000000 x25: 0000000000000000 x24: 0000000000000000 | x23: 00000000604003c5 x22: ffff80000801745c x21: 0000aaaac95ac068 | x20: 00000000f2000004 x19: ffff800009cf0040 x18: 0000000000000000 | x17: 0000000000000000 x16: 0000000000000000 x15: 0000000000000000 | x14: 0000000000000000 x13: 0000000000000000 x12: 0000000000000000 | x11: 0000000000000010 x10: ffff800008c87190 x9 : ffff800008ca00d0 | x8 : 000000000000003c x7 : 0000000000000000 x6 : 0000000000000000 | x5 : 0000000000000000 x4 : 0000000000000000 x3 : 00000000000043a4 | x2 : 00000000f2000004 x1 : 00000000f2000004 x0 : ffff800009cf0040 | Kernel panic - not syncing: kernel stack overflow | CPU: 0 PID: 145 Comm: sh Not tainted 6.0.0 #2 | Hardware name: linux,dummy-virt (DT) | Call trace: | dump_backtrace+0xe4/0x104 | show_stack+0x18/0x4c | dump_stack_lvl+0x64/0x7c | dump_stack+0x18/0x38 | panic+0x14c/0x338 | test_taint+0x0/0x2c | panic_bad_stack+0x104/0x118 | handle_bad_stack+0x34/0x48 | __bad_stack+0x78/0x7c | arm64_enter_el1_dbg+0x4/0x20 | el1h_64_sync_handler+0x40/0x98 | el1h_64_sync+0x64/0x68 | cortex_a76_erratum_1463225_debug_handler+0x0/0x34 ... | el1h_64_sync_handler+0x40/0x98 | el1h_64_sync+0x64/0x68 | cortex_a76_erratum_1463225_debug_handler+0x0/0x34 ... | el1h_64_sync_handler+0x40/0x98 | el1h_64_sync+0x64/0x68 | cortex_a76_erratum_1463225_debug_handler+0x0/0x34 | el1h_64_sync_handler+0x40/0x98 | el1h_64_sync+0x64/0x68 | do_el0_svc+0x0/0x28 | el0t_64_sync_handler+0x84/0xf0 | el0t_64_sync+0x18c/0x190 | Kernel Offset: disabled | CPU features: 0x0080,00005021,19001080 | Memory Limit: none | ---[ end Kernel panic - not syncing: kernel stack overflow ]--- With this patch, cortex_a76_erratum_1463225_debug_handler() is inlined into el1_dbg(), and el1_dbg() cannot be probed: | # echo p cortex_a76_erratum_1463225_debug_handler > /sys/kernel/debug/tracing/kprobe_events | sh: write error: No such file or directory | # grep -w cortex_a76_erratum_1463225_debug_handler /proc/kallsyms | wc -l | 0 | # echo p el1_dbg > /sys/kernel/debug/tracing/kprobe_events | sh: write error: Invalid argument | # grep -w el1_dbg /proc/kallsyms | wc -l | 1 Fixes: 6459b8469753 ("arm64: entry: consolidate Cortex-A76 erratum 1463225 workaround") Cc: <stable@vger.kernel.org> # 5.12.x Signed-off-by: Mark Rutland <mark.rutland@arm.com> Cc: Will Deacon <will@kernel.org> Link: https://lore.kernel.org/r/20221017090157.2881408-1-mark.rutland@arm.com Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2022-10-17 09:01:57 +00:00
static __always_inline bool
cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs)
2021-02-02 12:03:41 +00:00
{
if (!__this_cpu_read(__in_cortex_a76_erratum_1463225_wa))
return false;
/*
* We've taken a dummy step exception from the kernel to ensure
* that interrupts are re-enabled on the syscall path. Return back
* to cortex_a76_erratum_1463225_svc_handler() with debug exceptions
* masked so that we can safely restore the mdscr and get on with
* handling the syscall.
*/
regs->pstate |= PSR_D_BIT;
return true;
}
#else /* CONFIG_ARM64_ERRATUM_1463225 */
static void cortex_a76_erratum_1463225_svc_handler(void) { }
static bool cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs)
{
return false;
}
#endif /* CONFIG_ARM64_ERRATUM_1463225 */
arm64: syscall: unmask DAIF earlier for SVCs For a number of historical reasons, when handling SVCs we don't unmask DAIF in el0_svc() or el0_svc_compat(), and instead do so later in el0_svc_common(). This is unfortunate and makes it harder to make changes to the DAIF management in entry-common.c as we'd like to do as cleanup and preparation for FEAT_NMI support. We can move the DAIF unmasking to entry-common.c as long as we also hoist the fp_user_discard() logic, as reasoned below. We converted the syscall trace logic from assembly to C in commit: f37099b6992a0b81 ("arm64: convert syscall trace logic to C") ... which was intended to have no functional change, and mirrored the existing assembly logic to avoid the risk of any functional regression. With the logic in C, it's clear that there is currently no reason to unmask DAIF so late within el0_svc_common(): * The thread flags are read prior to unmasking DAIF, but are not consumed until after DAIF is unmasked, and we don't perform a read-modify-write sequence of the thread flags for which we might need to serialize against an IPI modifying the flags. Similarly, for any thread flags set by other threads, whether DAIF is masked or not has no impact. The read_thread_flags() helpers performs a single-copy-atomic read of the flags, and so this can safely be moved after unmasking DAIF. * The pt_regs::orig_x0 and pt_regs::syscallno fields are neither consumed nor modified by the handler for any DAIF exception (e.g. these do not exist in the `perf_event_arm_regs` enum and are not sampled by perf in its IRQ handler). Thus, the manipulation of pt_regs::orig_x0 and pt_regs::syscallno can safely be moved after unmasking DAIF. Given the above, we can safely hoist unmasking of DAIF out of el0_svc_common(), and into its immediate callers: do_el0_svc() and do_el0_svc_compat(). Further: * In do_el0_svc(), we sample the syscall number from pt_regs::regs[8]. This is not modified by the handler for any DAIF exception, and thus can safely be moved after unmasking DAIF. As fp_user_discard() operates on the live FP/SVE/SME register state, this needs to occur before we clear DAIF.IF, as interrupts could result in preemption which would cause this state to become foreign. As fp_user_discard() is the first function called within do_el0_svc(), it has no dependency on other parts of do_el0_svc() and can be moved earlier so long as it is called prior to unmasking DAIF.IF. * In do_el0_svc_compat(), we sample the syscall number from pt_regs::regs[7]. This is not modified by the handler for any DAIF exception, and thus can safely be moved after unmasking DAIF. Compat threads cannot use SVE or SME, so there's no need for el0_svc_compat() to call fp_user_discard(). Given the above, we can safely hoist the unmasking of DAIF out of do_el0_svc() and do_el0_svc_compat(), and into their immediate callers: el0_svc() and el0_svc_compat(), so long a we also hoist fp_user_discard() into el0_svc(). Signed-off-by: Mark Rutland <mark.rutland@arm.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Marc Zyngier <maz@kernel.org> Cc: Mark Brown <broonie@kernel.org> Cc: Will Deacon <will@kernel.org> Reviewed-by: Mark Brown <broonie@kernel.org> Link: https://lore.kernel.org/r/20230808101148.1064172-1-mark.rutland@arm.com Signed-off-by: Will Deacon <will@kernel.org>
2023-08-08 10:11:48 +00:00
/*
* As per the ABI exit SME streaming mode and clear the SVE state not
* shared with FPSIMD on syscall entry.
*/
static inline void fp_user_discard(void)
{
/*
* If SME is active then exit streaming mode. If ZA is active
* then flush the SVE registers but leave userspace access to
* both SVE and SME enabled, otherwise disable SME for the
* task and fall through to disabling SVE too. This means
* that after a syscall we never have any streaming mode
* register state to track, if this changes the KVM code will
* need updating.
*/
if (system_supports_sme())
sme_smstop_sm();
if (!system_supports_sve())
return;
if (test_thread_flag(TIF_SVE)) {
unsigned int sve_vq_minus_one;
sve_vq_minus_one = sve_vq_from_vl(task_get_sve_vl(current)) - 1;
sve_flush_live(true, sve_vq_minus_one);
}
}
arm64: entry: handle all vectors with C We have 16 architectural exception vectors, and depending on kernel configuration we handle 8 or 12 of these with C code, with the remaining 8 or 4 of these handled as special cases in the entry assembly. It would be nicer if the entry assembly were uniform for all exceptions, and we deferred any specific handling of the exceptions to C code. This way the entry assembly can be more easily templated without ifdeffery or special cases, and it's easier to modify the handling of these cases in future (e.g. to dump additional registers other context). This patch reworks the entry code so that we always have a C handler for every architectural exception vector, with the entry assembly being completely uniform. We now have to handle exceptions from EL1t and EL1h, and also have to handle exceptions from AArch32 even when the kernel is built without CONFIG_COMPAT. To make this clear and to simplify templating, we rename the top-level exception handlers with a consistent naming scheme: asm: <el+sp>_<regsize>_<type> c: <el+sp>_<regsize>_<type>_handler .. where: <el+sp> is `el1t`, `el1h`, or `el0t` <regsize> is `64` or `32` <type> is `sync`, `irq`, `fiq`, or `error` ... e.g. asm: el1h_64_sync c: el1h_64_sync_handler ... with lower-level handlers simply using "el1" and "compat" as today. For unexpected exceptions, this information is passed to __panic_unhandled(), so it can report the specific vector an unexpected exception was taken from, e.g. | Unhandled 64-bit el1t sync exception For vectors we never expect to enter legitimately, the C code is generated using a macro to avoid code duplication. The exceptions are handled via __panic_unhandled(), replacing bad_mode() (which is removed). The `kernel_ventry` and `entry_handler` assembly macros are updated to handle the new naming scheme. In theory it should be possible to generate the entry functions at the same time as the vectors using a single table, but this will require reworking the linker script to split the two into separate sections, so for now we have separate tables. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Acked-by: Catalin Marinas <catalin.marinas@arm.com> Acked-by: Marc Zyngier <maz@kernel.org> Reviewed-by: Joey Gouly <joey.gouly@arm.com> Cc: James Morse <james.morse@arm.com> Cc: Will Deacon <will@kernel.org> Link: https://lore.kernel.org/r/20210607094624.34689-15-mark.rutland@arm.com Signed-off-by: Will Deacon <will@kernel.org>
2021-06-07 09:46:18 +00:00
UNHANDLED(el1t, 64, sync)
UNHANDLED(el1t, 64, irq)
UNHANDLED(el1t, 64, fiq)
UNHANDLED(el1t, 64, error)
static void noinstr el1_abort(struct pt_regs *regs, unsigned long esr)
{
unsigned long far = read_sysreg(far_el1);
enter_from_kernel_mode(regs);
local_daif_inherit(regs);
do_mem_abort(far, esr, regs);
local_daif_mask();
exit_to_kernel_mode(regs);
}
static void noinstr el1_pc(struct pt_regs *regs, unsigned long esr)
{
unsigned long far = read_sysreg(far_el1);
enter_from_kernel_mode(regs);
local_daif_inherit(regs);
do_sp_pc_abort(far, esr, regs);
local_daif_mask();
exit_to_kernel_mode(regs);
}
static void noinstr el1_undef(struct pt_regs *regs, unsigned long esr)
{
enter_from_kernel_mode(regs);
local_daif_inherit(regs);
arm64: split EL0/EL1 UNDEF handlers In general, exceptions taken from EL1 need to be handled separately from exceptions taken from EL0, as the logic to handle the two cases can be significantly divergent, and exceptions taken from EL1 typically have more stringent requirements on locking and instrumentation. Subsequent patches will rework the way EL1 UNDEFs are handled in order to address longstanding soundness issues with instrumentation and RCU. In preparation for that rework, this patch splits the existing do_undefinstr() handler into separate do_el0_undef() and do_el1_undef() handlers. Prior to this patch, do_undefinstr() was marked with NOKPROBE_SYMBOL(), preventing instrumentation via kprobes. However, do_undefinstr() invokes other code which can be instrumented, and: * For UNDEFINED exceptions taken from EL0, there is no risk of recursion within kprobes. Therefore it is safe for do_el0_undef to be instrumented with kprobes, and it does not need to be marked with NOKPROBE_SYMBOL(). * For UNDEFINED exceptions taken from EL1, either: (a) The exception is has been taken when manipulating SSBS; these cases are limited and do not occur within code that can be invoked recursively via kprobes. Hence, in these cases instrumentation with kprobes is benign. (b) The exception has been taken for an unknown reason, as other than manipulating SSBS we do not expect to take UNDEFINED exceptions from EL1. Any handling of these exception is best-effort. ... and in either case, marking do_el1_undef() with NOKPROBE_SYMBOL() isn't sufficient to prevent recursion via kprobes as functions it calls (including die()) are instrumentable via kprobes. Hence, it's not worthwhile to mark do_el1_undef() with NOKPROBE_SYMBOL(). The same applies to do_el1_bti() and do_el1_fpac(), so their NOKPROBE_SYMBOL() annotations are also removed. Aside from the new instrumentability, there should be no functional change as a result of this patch. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: James Morse <james.morse@arm.com> Cc: Joey Gouly <joey.gouly@arm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Will Deacon <will@kernel.org> Link: https://lore.kernel.org/r/20221019144123.612388-3-mark.rutland@arm.com Signed-off-by: Will Deacon <will@kernel.org>
2022-10-19 14:41:16 +00:00
do_el1_undef(regs, esr);
local_daif_mask();
exit_to_kernel_mode(regs);
}
arm64: rework BTI exception handling If a BTI exception is taken from EL1, the entry code will treat this as an unhandled exception and will panic() the kernel. This is inconsistent with the way we handle FPAC exceptions, which have a dedicated handler and only necessarily kill the thread from which the exception was taken from, and we don't log all the information that could be relevant to debug the issue. The code in do_bti() has: BUG_ON(!user_mode(regs)); ... and it seems like the intent was to call this for EL1 BTI exceptions, as with FPAC, but this was omitted due to an oversight. This patch adds separate EL0 and EL1 BTI exception handlers, with the latter calling die() directly to report the original context the BTI exception was taken from. This matches our handling of FPAC exceptions. Prior to this patch, a BTI failure is reported as: | Unhandled 64-bit el1h sync exception on CPU0, ESR 0x0000000034000002 -- BTI | CPU: 0 PID: 1 Comm: swapper/0 Not tainted 5.19.0-rc3-00131-g7d937ff0221d-dirty #9 | Hardware name: linux,dummy-virt (DT) | pstate: 20400809 (nzCv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=-c) | pc : test_bti_callee+0x4/0x10 | lr : test_bti_caller+0x1c/0x28 | sp : ffff80000800bdf0 | x29: ffff80000800bdf0 x28: 0000000000000000 x27: 0000000000000000 | x26: 0000000000000000 x25: 0000000000000000 x24: 0000000000000000 | x23: ffff80000a2b8000 x22: 0000000000000000 x21: 0000000000000000 | x20: ffff8000099fa5b0 x19: ffff800009ff7000 x18: fffffbfffda37000 | x17: 3120676e696d7573 x16: 7361202c6e6f6974 x15: 0000000041a90000 | x14: 0040000000000041 x13: 0040000000000001 x12: ffff000001a90000 | x11: fffffbfffda37480 x10: 0068000000000703 x9 : 0001000040000000 | x8 : 0000000000090000 x7 : 0068000000000f03 x6 : 0060000000000f83 | x5 : ffff80000a2b6000 x4 : ffff0000028d0000 x3 : ffff800009f78378 | x2 : 0000000000000000 x1 : 0000000040210000 x0 : ffff8000080257e4 | Kernel panic - not syncing: Unhandled exception | CPU: 0 PID: 1 Comm: swapper/0 Not tainted 5.19.0-rc3-00131-g7d937ff0221d-dirty #9 | Hardware name: linux,dummy-virt (DT) | Call trace: | dump_backtrace.part.0+0xcc/0xe0 | show_stack+0x18/0x5c | dump_stack_lvl+0x64/0x80 | dump_stack+0x18/0x34 | panic+0x170/0x360 | arm64_exit_nmi.isra.0+0x0/0x80 | el1h_64_sync_handler+0x64/0xd0 | el1h_64_sync+0x64/0x68 | test_bti_callee+0x4/0x10 | smp_cpus_done+0xb0/0xbc | smp_init+0x7c/0x8c | kernel_init_freeable+0x128/0x28c | kernel_init+0x28/0x13c | ret_from_fork+0x10/0x20 With this patch applied, a BTI failure is reported as: | Internal error: Oops - BTI: 0000000034000002 [#1] PREEMPT SMP | Modules linked in: | CPU: 0 PID: 1 Comm: swapper/0 Not tainted 5.19.0-rc3-00132-g0ad98265d582-dirty #8 | Hardware name: linux,dummy-virt (DT) | pstate: 20400809 (nzCv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=-c) | pc : test_bti_callee+0x4/0x10 | lr : test_bti_caller+0x1c/0x28 | sp : ffff80000800bdf0 | x29: ffff80000800bdf0 x28: 0000000000000000 x27: 0000000000000000 | x26: 0000000000000000 x25: 0000000000000000 x24: 0000000000000000 | x23: ffff80000a2b8000 x22: 0000000000000000 x21: 0000000000000000 | x20: ffff8000099fa5b0 x19: ffff800009ff7000 x18: fffffbfffda37000 | x17: 3120676e696d7573 x16: 7361202c6e6f6974 x15: 0000000041a90000 | x14: 0040000000000041 x13: 0040000000000001 x12: ffff000001a90000 | x11: fffffbfffda37480 x10: 0068000000000703 x9 : 0001000040000000 | x8 : 0000000000090000 x7 : 0068000000000f03 x6 : 0060000000000f83 | x5 : ffff80000a2b6000 x4 : ffff0000028d0000 x3 : ffff800009f78378 | x2 : 0000000000000000 x1 : 0000000040210000 x0 : ffff800008025804 | Call trace: | test_bti_callee+0x4/0x10 | smp_cpus_done+0xb0/0xbc | smp_init+0x7c/0x8c | kernel_init_freeable+0x128/0x28c | kernel_init+0x28/0x13c | ret_from_fork+0x10/0x20 | Code: d50323bf d53cd040 d65f03c0 d503233f (d50323bf) Signed-off-by: Mark Rutland <mark.rutland@arm.com> Reviewed-by: Mark Brown <broonie@kernel.org> Reviewed-by: Anshuman Khandual <anshuman.khandual@arm.com> Cc: Alexandru Elisei <alexandru.elisei@arm.com> Cc: Amit Daniel Kachhap <amit.kachhap@arm.com> Cc: James Morse <james.morse@arm.com> Cc: Will Deacon <will@kernel.org> Link: https://lore.kernel.org/r/20220913101732.3925290-6-mark.rutland@arm.com Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2022-09-13 10:17:32 +00:00
static void noinstr el1_bti(struct pt_regs *regs, unsigned long esr)
{
enter_from_kernel_mode(regs);
local_daif_inherit(regs);
do_el1_bti(regs, esr);
local_daif_mask();
exit_to_kernel_mode(regs);
}
static void noinstr el1_dbg(struct pt_regs *regs, unsigned long esr)
{
unsigned long far = read_sysreg(far_el1);
arm64_enter_el1_dbg(regs);
2021-02-02 12:03:41 +00:00
if (!cortex_a76_erratum_1463225_debug_handler(regs))
do_debug_exception(far, esr, regs);
arm64_exit_el1_dbg(regs);
}
static void noinstr el1_fpac(struct pt_regs *regs, unsigned long esr)
{
enter_from_kernel_mode(regs);
local_daif_inherit(regs);
arm64: rework FPAC exception handling If an FPAC exception is taken from EL1, the entry code will call do_ptrauth_fault(), where due to: BUG_ON(!user_mode(regs)) ... the kernel will report a problem within do_ptrauth_fault() rather than reporting the original context the FPAC exception was taken from. The pt_regs and ESR value reported will be from within do_ptrauth_fault() and the code dump will be for the BRK in BUG_ON(), which isn't sufficient to debug the cause of the original exception. This patch makes the reporting better by having separate EL0 and EL1 FPAC exception handlers, with the latter calling die() directly to report the original context the FPAC exception was taken from. Note that we only need to prevent kprobes of the EL1 FPAC handler, since the EL0 FPAC handler cannot be called recursively. For consistency with do_el0_svc*(), I've named the split functions do_el{0,1}_fpac() rather than do_el{0,1}_ptrauth_fault(). I've also clarified the comment to not imply there are casues other than FPAC exceptions. Prior to this patch FPAC exceptions are reported as: | kernel BUG at arch/arm64/kernel/traps.c:517! | Internal error: Oops - BUG: 00000000f2000800 [#1] PREEMPT SMP | Modules linked in: | CPU: 0 PID: 1 Comm: swapper/0 Not tainted 5.19.0-rc3-00130-g9c8a180a1cdf-dirty #12 | Hardware name: FVP Base RevC (DT) | pstate: 00400009 (nzcv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--) | pc : do_ptrauth_fault+0x3c/0x40 | lr : el1_fpac+0x34/0x54 | sp : ffff80000a3bbc80 | x29: ffff80000a3bbc80 x28: ffff0008001d8000 x27: 0000000000000000 | x26: 0000000000000000 x25: 0000000000000000 x24: 0000000000000000 | x23: 0000000020400009 x22: ffff800008f70fa4 x21: ffff80000a3bbe00 | x20: 0000000072000000 x19: ffff80000a3bbcb0 x18: fffffbfffda37000 | x17: 3120676e696d7573 x16: 7361202c6e6f6974 x15: 0000000081a90000 | x14: 0040000000000041 x13: 0040000000000001 x12: ffff000001a90000 | x11: fffffbfffda37480 x10: 0068000000000703 x9 : 0001000080000000 | x8 : 0000000000090000 x7 : 0068000000000f03 x6 : 0060000000000783 | x5 : ffff80000a3bbcb0 x4 : ffff0008001d8000 x3 : 0000000072000000 | x2 : 0000000000000000 x1 : 0000000020400009 x0 : ffff80000a3bbcb0 | Call trace: | do_ptrauth_fault+0x3c/0x40 | el1h_64_sync_handler+0xc4/0xd0 | el1h_64_sync+0x64/0x68 | test_pac+0x8/0x10 | smp_init+0x7c/0x8c | kernel_init_freeable+0x128/0x28c | kernel_init+0x28/0x13c | ret_from_fork+0x10/0x20 | Code: 97fffe5e a8c17bfd d50323bf d65f03c0 (d4210000) With this patch applied FPAC exceptions are reported as: | Internal error: Oops - FPAC: 0000000072000000 [#1] PREEMPT SMP | Modules linked in: | CPU: 0 PID: 1 Comm: swapper/0 Not tainted 5.19.0-rc3-00132-g78846e1c4757-dirty #11 | Hardware name: FVP Base RevC (DT) | pstate: 20400009 (nzCv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--) | pc : test_pac+0x8/0x10 | lr : 0x0 | sp : ffff80000a3bbe00 | x29: ffff80000a3bbe00 x28: 0000000000000000 x27: 0000000000000000 | x26: 0000000000000000 x25: 0000000000000000 x24: 0000000000000000 | x23: ffff80000a2c8000 x22: 0000000000000000 x21: 0000000000000000 | x20: ffff8000099fa5b0 x19: ffff80000a007000 x18: fffffbfffda37000 | x17: 3120676e696d7573 x16: 7361202c6e6f6974 x15: 0000000081a90000 | x14: 0040000000000041 x13: 0040000000000001 x12: ffff000001a90000 | x11: fffffbfffda37480 x10: 0068000000000703 x9 : 0001000080000000 | x8 : 0000000000090000 x7 : 0068000000000f03 x6 : 0060000000000783 | x5 : ffff80000a2c6000 x4 : ffff0008001d8000 x3 : ffff800009f88378 | x2 : 0000000000000000 x1 : 0000000080210000 x0 : ffff000001a90000 | Call trace: | test_pac+0x8/0x10 | smp_init+0x7c/0x8c | kernel_init_freeable+0x128/0x28c | kernel_init+0x28/0x13c | ret_from_fork+0x10/0x20 | Code: d50323bf d65f03c0 d503233f aa1f03fe (d50323bf) Signed-off-by: Mark Rutland <mark.rutland@arm.com> Reviewed-by: Mark Brown <broonie@kernel.org> Reviewed-by: Anshuman Khandual <anshuman.khandual@arm.com> Cc: Alexandru Elisei <alexandru.elisei@arm.com> Cc: Amit Daniel Kachhap <amit.kachhap@arm.com> Cc: James Morse <james.morse@arm.com> Cc: Will Deacon <will@kernel.org> Link: https://lore.kernel.org/r/20220913101732.3925290-5-mark.rutland@arm.com Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2022-09-13 10:17:31 +00:00
do_el1_fpac(regs, esr);
local_daif_mask();
exit_to_kernel_mode(regs);
}
arm64: entry: handle all vectors with C We have 16 architectural exception vectors, and depending on kernel configuration we handle 8 or 12 of these with C code, with the remaining 8 or 4 of these handled as special cases in the entry assembly. It would be nicer if the entry assembly were uniform for all exceptions, and we deferred any specific handling of the exceptions to C code. This way the entry assembly can be more easily templated without ifdeffery or special cases, and it's easier to modify the handling of these cases in future (e.g. to dump additional registers other context). This patch reworks the entry code so that we always have a C handler for every architectural exception vector, with the entry assembly being completely uniform. We now have to handle exceptions from EL1t and EL1h, and also have to handle exceptions from AArch32 even when the kernel is built without CONFIG_COMPAT. To make this clear and to simplify templating, we rename the top-level exception handlers with a consistent naming scheme: asm: <el+sp>_<regsize>_<type> c: <el+sp>_<regsize>_<type>_handler .. where: <el+sp> is `el1t`, `el1h`, or `el0t` <regsize> is `64` or `32` <type> is `sync`, `irq`, `fiq`, or `error` ... e.g. asm: el1h_64_sync c: el1h_64_sync_handler ... with lower-level handlers simply using "el1" and "compat" as today. For unexpected exceptions, this information is passed to __panic_unhandled(), so it can report the specific vector an unexpected exception was taken from, e.g. | Unhandled 64-bit el1t sync exception For vectors we never expect to enter legitimately, the C code is generated using a macro to avoid code duplication. The exceptions are handled via __panic_unhandled(), replacing bad_mode() (which is removed). The `kernel_ventry` and `entry_handler` assembly macros are updated to handle the new naming scheme. In theory it should be possible to generate the entry functions at the same time as the vectors using a single table, but this will require reworking the linker script to split the two into separate sections, so for now we have separate tables. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Acked-by: Catalin Marinas <catalin.marinas@arm.com> Acked-by: Marc Zyngier <maz@kernel.org> Reviewed-by: Joey Gouly <joey.gouly@arm.com> Cc: James Morse <james.morse@arm.com> Cc: Will Deacon <will@kernel.org> Link: https://lore.kernel.org/r/20210607094624.34689-15-mark.rutland@arm.com Signed-off-by: Will Deacon <will@kernel.org>
2021-06-07 09:46:18 +00:00
asmlinkage void noinstr el1h_64_sync_handler(struct pt_regs *regs)
{
unsigned long esr = read_sysreg(esr_el1);
switch (ESR_ELx_EC(esr)) {
case ESR_ELx_EC_DABT_CUR:
case ESR_ELx_EC_IABT_CUR:
el1_abort(regs, esr);
break;
/*
* We don't handle ESR_ELx_EC_SP_ALIGN, since we will have hit a
* recursive exception when trying to push the initial pt_regs.
*/
case ESR_ELx_EC_PC_ALIGN:
el1_pc(regs, esr);
break;
case ESR_ELx_EC_SYS64:
case ESR_ELx_EC_UNKNOWN:
el1_undef(regs, esr);
break;
arm64: rework BTI exception handling If a BTI exception is taken from EL1, the entry code will treat this as an unhandled exception and will panic() the kernel. This is inconsistent with the way we handle FPAC exceptions, which have a dedicated handler and only necessarily kill the thread from which the exception was taken from, and we don't log all the information that could be relevant to debug the issue. The code in do_bti() has: BUG_ON(!user_mode(regs)); ... and it seems like the intent was to call this for EL1 BTI exceptions, as with FPAC, but this was omitted due to an oversight. This patch adds separate EL0 and EL1 BTI exception handlers, with the latter calling die() directly to report the original context the BTI exception was taken from. This matches our handling of FPAC exceptions. Prior to this patch, a BTI failure is reported as: | Unhandled 64-bit el1h sync exception on CPU0, ESR 0x0000000034000002 -- BTI | CPU: 0 PID: 1 Comm: swapper/0 Not tainted 5.19.0-rc3-00131-g7d937ff0221d-dirty #9 | Hardware name: linux,dummy-virt (DT) | pstate: 20400809 (nzCv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=-c) | pc : test_bti_callee+0x4/0x10 | lr : test_bti_caller+0x1c/0x28 | sp : ffff80000800bdf0 | x29: ffff80000800bdf0 x28: 0000000000000000 x27: 0000000000000000 | x26: 0000000000000000 x25: 0000000000000000 x24: 0000000000000000 | x23: ffff80000a2b8000 x22: 0000000000000000 x21: 0000000000000000 | x20: ffff8000099fa5b0 x19: ffff800009ff7000 x18: fffffbfffda37000 | x17: 3120676e696d7573 x16: 7361202c6e6f6974 x15: 0000000041a90000 | x14: 0040000000000041 x13: 0040000000000001 x12: ffff000001a90000 | x11: fffffbfffda37480 x10: 0068000000000703 x9 : 0001000040000000 | x8 : 0000000000090000 x7 : 0068000000000f03 x6 : 0060000000000f83 | x5 : ffff80000a2b6000 x4 : ffff0000028d0000 x3 : ffff800009f78378 | x2 : 0000000000000000 x1 : 0000000040210000 x0 : ffff8000080257e4 | Kernel panic - not syncing: Unhandled exception | CPU: 0 PID: 1 Comm: swapper/0 Not tainted 5.19.0-rc3-00131-g7d937ff0221d-dirty #9 | Hardware name: linux,dummy-virt (DT) | Call trace: | dump_backtrace.part.0+0xcc/0xe0 | show_stack+0x18/0x5c | dump_stack_lvl+0x64/0x80 | dump_stack+0x18/0x34 | panic+0x170/0x360 | arm64_exit_nmi.isra.0+0x0/0x80 | el1h_64_sync_handler+0x64/0xd0 | el1h_64_sync+0x64/0x68 | test_bti_callee+0x4/0x10 | smp_cpus_done+0xb0/0xbc | smp_init+0x7c/0x8c | kernel_init_freeable+0x128/0x28c | kernel_init+0x28/0x13c | ret_from_fork+0x10/0x20 With this patch applied, a BTI failure is reported as: | Internal error: Oops - BTI: 0000000034000002 [#1] PREEMPT SMP | Modules linked in: | CPU: 0 PID: 1 Comm: swapper/0 Not tainted 5.19.0-rc3-00132-g0ad98265d582-dirty #8 | Hardware name: linux,dummy-virt (DT) | pstate: 20400809 (nzCv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=-c) | pc : test_bti_callee+0x4/0x10 | lr : test_bti_caller+0x1c/0x28 | sp : ffff80000800bdf0 | x29: ffff80000800bdf0 x28: 0000000000000000 x27: 0000000000000000 | x26: 0000000000000000 x25: 0000000000000000 x24: 0000000000000000 | x23: ffff80000a2b8000 x22: 0000000000000000 x21: 0000000000000000 | x20: ffff8000099fa5b0 x19: ffff800009ff7000 x18: fffffbfffda37000 | x17: 3120676e696d7573 x16: 7361202c6e6f6974 x15: 0000000041a90000 | x14: 0040000000000041 x13: 0040000000000001 x12: ffff000001a90000 | x11: fffffbfffda37480 x10: 0068000000000703 x9 : 0001000040000000 | x8 : 0000000000090000 x7 : 0068000000000f03 x6 : 0060000000000f83 | x5 : ffff80000a2b6000 x4 : ffff0000028d0000 x3 : ffff800009f78378 | x2 : 0000000000000000 x1 : 0000000040210000 x0 : ffff800008025804 | Call trace: | test_bti_callee+0x4/0x10 | smp_cpus_done+0xb0/0xbc | smp_init+0x7c/0x8c | kernel_init_freeable+0x128/0x28c | kernel_init+0x28/0x13c | ret_from_fork+0x10/0x20 | Code: d50323bf d53cd040 d65f03c0 d503233f (d50323bf) Signed-off-by: Mark Rutland <mark.rutland@arm.com> Reviewed-by: Mark Brown <broonie@kernel.org> Reviewed-by: Anshuman Khandual <anshuman.khandual@arm.com> Cc: Alexandru Elisei <alexandru.elisei@arm.com> Cc: Amit Daniel Kachhap <amit.kachhap@arm.com> Cc: James Morse <james.morse@arm.com> Cc: Will Deacon <will@kernel.org> Link: https://lore.kernel.org/r/20220913101732.3925290-6-mark.rutland@arm.com Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2022-09-13 10:17:32 +00:00
case ESR_ELx_EC_BTI:
el1_bti(regs, esr);
break;
case ESR_ELx_EC_BREAKPT_CUR:
case ESR_ELx_EC_SOFTSTP_CUR:
case ESR_ELx_EC_WATCHPT_CUR:
case ESR_ELx_EC_BRK64:
el1_dbg(regs, esr);
break;
case ESR_ELx_EC_FPAC:
el1_fpac(regs, esr);
break;
default:
__panic_unhandled(regs, "64-bit el1h sync", esr);
}
}
irq: arm64: perform irqentry in entry code In preparation for removing HANDLE_DOMAIN_IRQ_IRQENTRY, have arch/arm64 perform all the irqentry accounting in its entry code. As arch/arm64 already performs portions of the irqentry logic in enter_from_kernel_mode() and exit_to_kernel_mode(), including rcu_irq_{enter,exit}(), the only additional calls that need to be made are to irq_{enter,exit}_rcu(). Removing the calls to rcu_irq_{enter,exit}() from handle_domain_irq() ensures that we inform RCU once per IRQ entry and will correctly identify quiescent periods. Since we should not call irq_{enter,exit}_rcu() when entering a pseudo-NMI, el1_interrupt() is reworked to have separate __el1_irq() and __el1_pnmi() paths for regular IRQ and psuedo-NMI entry, with irq_{enter,exit}_irq() only called for the former. In preparation for removing HANDLE_DOMAIN_IRQ, the irq regs are managed in do_interrupt_handler() for both regular IRQ and pseudo-NMI. This is currently redundant, but not harmful. For clarity the preemption logic is moved into __el1_irq(). We should never preempt within a pseudo-NMI, and arm64_enter_nmi() already enforces this by incrementing the preempt_count, but it's clearer if we never invoke the preemption logic when entering a pseudo-NMI. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Reviewed-by: Pingfan Liu <kernelfans@gmail.com> Reviewed-by: Marc Zyngier <maz@kernel.org> Acked-by: Catalin Marinas <catalin.marinas@arm.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Will Deacon <will@kernel.org>
2021-10-19 17:28:39 +00:00
static __always_inline void __el1_pnmi(struct pt_regs *regs,
void (*handler)(struct pt_regs *))
{
irq: arm64: perform irqentry in entry code In preparation for removing HANDLE_DOMAIN_IRQ_IRQENTRY, have arch/arm64 perform all the irqentry accounting in its entry code. As arch/arm64 already performs portions of the irqentry logic in enter_from_kernel_mode() and exit_to_kernel_mode(), including rcu_irq_{enter,exit}(), the only additional calls that need to be made are to irq_{enter,exit}_rcu(). Removing the calls to rcu_irq_{enter,exit}() from handle_domain_irq() ensures that we inform RCU once per IRQ entry and will correctly identify quiescent periods. Since we should not call irq_{enter,exit}_rcu() when entering a pseudo-NMI, el1_interrupt() is reworked to have separate __el1_irq() and __el1_pnmi() paths for regular IRQ and psuedo-NMI entry, with irq_{enter,exit}_irq() only called for the former. In preparation for removing HANDLE_DOMAIN_IRQ, the irq regs are managed in do_interrupt_handler() for both regular IRQ and pseudo-NMI. This is currently redundant, but not harmful. For clarity the preemption logic is moved into __el1_irq(). We should never preempt within a pseudo-NMI, and arm64_enter_nmi() already enforces this by incrementing the preempt_count, but it's clearer if we never invoke the preemption logic when entering a pseudo-NMI. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Reviewed-by: Pingfan Liu <kernelfans@gmail.com> Reviewed-by: Marc Zyngier <maz@kernel.org> Acked-by: Catalin Marinas <catalin.marinas@arm.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Will Deacon <will@kernel.org>
2021-10-19 17:28:39 +00:00
arm64_enter_nmi(regs);
do_interrupt_handler(regs, handler);
arm64_exit_nmi(regs);
}
static __always_inline void __el1_irq(struct pt_regs *regs,
void (*handler)(struct pt_regs *))
{
enter_from_kernel_mode(regs);
irq: arm64: perform irqentry in entry code In preparation for removing HANDLE_DOMAIN_IRQ_IRQENTRY, have arch/arm64 perform all the irqentry accounting in its entry code. As arch/arm64 already performs portions of the irqentry logic in enter_from_kernel_mode() and exit_to_kernel_mode(), including rcu_irq_{enter,exit}(), the only additional calls that need to be made are to irq_{enter,exit}_rcu(). Removing the calls to rcu_irq_{enter,exit}() from handle_domain_irq() ensures that we inform RCU once per IRQ entry and will correctly identify quiescent periods. Since we should not call irq_{enter,exit}_rcu() when entering a pseudo-NMI, el1_interrupt() is reworked to have separate __el1_irq() and __el1_pnmi() paths for regular IRQ and psuedo-NMI entry, with irq_{enter,exit}_irq() only called for the former. In preparation for removing HANDLE_DOMAIN_IRQ, the irq regs are managed in do_interrupt_handler() for both regular IRQ and pseudo-NMI. This is currently redundant, but not harmful. For clarity the preemption logic is moved into __el1_irq(). We should never preempt within a pseudo-NMI, and arm64_enter_nmi() already enforces this by incrementing the preempt_count, but it's clearer if we never invoke the preemption logic when entering a pseudo-NMI. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Reviewed-by: Pingfan Liu <kernelfans@gmail.com> Reviewed-by: Marc Zyngier <maz@kernel.org> Acked-by: Catalin Marinas <catalin.marinas@arm.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Will Deacon <will@kernel.org>
2021-10-19 17:28:39 +00:00
irq_enter_rcu();
do_interrupt_handler(regs, handler);
irq: arm64: perform irqentry in entry code In preparation for removing HANDLE_DOMAIN_IRQ_IRQENTRY, have arch/arm64 perform all the irqentry accounting in its entry code. As arch/arm64 already performs portions of the irqentry logic in enter_from_kernel_mode() and exit_to_kernel_mode(), including rcu_irq_{enter,exit}(), the only additional calls that need to be made are to irq_{enter,exit}_rcu(). Removing the calls to rcu_irq_{enter,exit}() from handle_domain_irq() ensures that we inform RCU once per IRQ entry and will correctly identify quiescent periods. Since we should not call irq_{enter,exit}_rcu() when entering a pseudo-NMI, el1_interrupt() is reworked to have separate __el1_irq() and __el1_pnmi() paths for regular IRQ and psuedo-NMI entry, with irq_{enter,exit}_irq() only called for the former. In preparation for removing HANDLE_DOMAIN_IRQ, the irq regs are managed in do_interrupt_handler() for both regular IRQ and pseudo-NMI. This is currently redundant, but not harmful. For clarity the preemption logic is moved into __el1_irq(). We should never preempt within a pseudo-NMI, and arm64_enter_nmi() already enforces this by incrementing the preempt_count, but it's clearer if we never invoke the preemption logic when entering a pseudo-NMI. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Reviewed-by: Pingfan Liu <kernelfans@gmail.com> Reviewed-by: Marc Zyngier <maz@kernel.org> Acked-by: Catalin Marinas <catalin.marinas@arm.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Will Deacon <will@kernel.org>
2021-10-19 17:28:39 +00:00
irq_exit_rcu();
arm64_preempt_schedule_irq();
irq: arm64: perform irqentry in entry code In preparation for removing HANDLE_DOMAIN_IRQ_IRQENTRY, have arch/arm64 perform all the irqentry accounting in its entry code. As arch/arm64 already performs portions of the irqentry logic in enter_from_kernel_mode() and exit_to_kernel_mode(), including rcu_irq_{enter,exit}(), the only additional calls that need to be made are to irq_{enter,exit}_rcu(). Removing the calls to rcu_irq_{enter,exit}() from handle_domain_irq() ensures that we inform RCU once per IRQ entry and will correctly identify quiescent periods. Since we should not call irq_{enter,exit}_rcu() when entering a pseudo-NMI, el1_interrupt() is reworked to have separate __el1_irq() and __el1_pnmi() paths for regular IRQ and psuedo-NMI entry, with irq_{enter,exit}_irq() only called for the former. In preparation for removing HANDLE_DOMAIN_IRQ, the irq regs are managed in do_interrupt_handler() for both regular IRQ and pseudo-NMI. This is currently redundant, but not harmful. For clarity the preemption logic is moved into __el1_irq(). We should never preempt within a pseudo-NMI, and arm64_enter_nmi() already enforces this by incrementing the preempt_count, but it's clearer if we never invoke the preemption logic when entering a pseudo-NMI. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Reviewed-by: Pingfan Liu <kernelfans@gmail.com> Reviewed-by: Marc Zyngier <maz@kernel.org> Acked-by: Catalin Marinas <catalin.marinas@arm.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Will Deacon <will@kernel.org>
2021-10-19 17:28:39 +00:00
exit_to_kernel_mode(regs);
}
static void noinstr el1_interrupt(struct pt_regs *regs,
void (*handler)(struct pt_regs *))
{
write_sysreg(DAIF_PROCCTX_NOIRQ, daif);
if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs))
__el1_pnmi(regs, handler);
else
__el1_irq(regs, handler);
}
arm64: entry: handle all vectors with C We have 16 architectural exception vectors, and depending on kernel configuration we handle 8 or 12 of these with C code, with the remaining 8 or 4 of these handled as special cases in the entry assembly. It would be nicer if the entry assembly were uniform for all exceptions, and we deferred any specific handling of the exceptions to C code. This way the entry assembly can be more easily templated without ifdeffery or special cases, and it's easier to modify the handling of these cases in future (e.g. to dump additional registers other context). This patch reworks the entry code so that we always have a C handler for every architectural exception vector, with the entry assembly being completely uniform. We now have to handle exceptions from EL1t and EL1h, and also have to handle exceptions from AArch32 even when the kernel is built without CONFIG_COMPAT. To make this clear and to simplify templating, we rename the top-level exception handlers with a consistent naming scheme: asm: <el+sp>_<regsize>_<type> c: <el+sp>_<regsize>_<type>_handler .. where: <el+sp> is `el1t`, `el1h`, or `el0t` <regsize> is `64` or `32` <type> is `sync`, `irq`, `fiq`, or `error` ... e.g. asm: el1h_64_sync c: el1h_64_sync_handler ... with lower-level handlers simply using "el1" and "compat" as today. For unexpected exceptions, this information is passed to __panic_unhandled(), so it can report the specific vector an unexpected exception was taken from, e.g. | Unhandled 64-bit el1t sync exception For vectors we never expect to enter legitimately, the C code is generated using a macro to avoid code duplication. The exceptions are handled via __panic_unhandled(), replacing bad_mode() (which is removed). The `kernel_ventry` and `entry_handler` assembly macros are updated to handle the new naming scheme. In theory it should be possible to generate the entry functions at the same time as the vectors using a single table, but this will require reworking the linker script to split the two into separate sections, so for now we have separate tables. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Acked-by: Catalin Marinas <catalin.marinas@arm.com> Acked-by: Marc Zyngier <maz@kernel.org> Reviewed-by: Joey Gouly <joey.gouly@arm.com> Cc: James Morse <james.morse@arm.com> Cc: Will Deacon <will@kernel.org> Link: https://lore.kernel.org/r/20210607094624.34689-15-mark.rutland@arm.com Signed-off-by: Will Deacon <will@kernel.org>
2021-06-07 09:46:18 +00:00
asmlinkage void noinstr el1h_64_irq_handler(struct pt_regs *regs)
{
el1_interrupt(regs, handle_arch_irq);
}
arm64: entry: handle all vectors with C We have 16 architectural exception vectors, and depending on kernel configuration we handle 8 or 12 of these with C code, with the remaining 8 or 4 of these handled as special cases in the entry assembly. It would be nicer if the entry assembly were uniform for all exceptions, and we deferred any specific handling of the exceptions to C code. This way the entry assembly can be more easily templated without ifdeffery or special cases, and it's easier to modify the handling of these cases in future (e.g. to dump additional registers other context). This patch reworks the entry code so that we always have a C handler for every architectural exception vector, with the entry assembly being completely uniform. We now have to handle exceptions from EL1t and EL1h, and also have to handle exceptions from AArch32 even when the kernel is built without CONFIG_COMPAT. To make this clear and to simplify templating, we rename the top-level exception handlers with a consistent naming scheme: asm: <el+sp>_<regsize>_<type> c: <el+sp>_<regsize>_<type>_handler .. where: <el+sp> is `el1t`, `el1h`, or `el0t` <regsize> is `64` or `32` <type> is `sync`, `irq`, `fiq`, or `error` ... e.g. asm: el1h_64_sync c: el1h_64_sync_handler ... with lower-level handlers simply using "el1" and "compat" as today. For unexpected exceptions, this information is passed to __panic_unhandled(), so it can report the specific vector an unexpected exception was taken from, e.g. | Unhandled 64-bit el1t sync exception For vectors we never expect to enter legitimately, the C code is generated using a macro to avoid code duplication. The exceptions are handled via __panic_unhandled(), replacing bad_mode() (which is removed). The `kernel_ventry` and `entry_handler` assembly macros are updated to handle the new naming scheme. In theory it should be possible to generate the entry functions at the same time as the vectors using a single table, but this will require reworking the linker script to split the two into separate sections, so for now we have separate tables. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Acked-by: Catalin Marinas <catalin.marinas@arm.com> Acked-by: Marc Zyngier <maz@kernel.org> Reviewed-by: Joey Gouly <joey.gouly@arm.com> Cc: James Morse <james.morse@arm.com> Cc: Will Deacon <will@kernel.org> Link: https://lore.kernel.org/r/20210607094624.34689-15-mark.rutland@arm.com Signed-off-by: Will Deacon <will@kernel.org>
2021-06-07 09:46:18 +00:00
asmlinkage void noinstr el1h_64_fiq_handler(struct pt_regs *regs)
{
el1_interrupt(regs, handle_arch_fiq);
}
arm64: entry: handle all vectors with C We have 16 architectural exception vectors, and depending on kernel configuration we handle 8 or 12 of these with C code, with the remaining 8 or 4 of these handled as special cases in the entry assembly. It would be nicer if the entry assembly were uniform for all exceptions, and we deferred any specific handling of the exceptions to C code. This way the entry assembly can be more easily templated without ifdeffery or special cases, and it's easier to modify the handling of these cases in future (e.g. to dump additional registers other context). This patch reworks the entry code so that we always have a C handler for every architectural exception vector, with the entry assembly being completely uniform. We now have to handle exceptions from EL1t and EL1h, and also have to handle exceptions from AArch32 even when the kernel is built without CONFIG_COMPAT. To make this clear and to simplify templating, we rename the top-level exception handlers with a consistent naming scheme: asm: <el+sp>_<regsize>_<type> c: <el+sp>_<regsize>_<type>_handler .. where: <el+sp> is `el1t`, `el1h`, or `el0t` <regsize> is `64` or `32` <type> is `sync`, `irq`, `fiq`, or `error` ... e.g. asm: el1h_64_sync c: el1h_64_sync_handler ... with lower-level handlers simply using "el1" and "compat" as today. For unexpected exceptions, this information is passed to __panic_unhandled(), so it can report the specific vector an unexpected exception was taken from, e.g. | Unhandled 64-bit el1t sync exception For vectors we never expect to enter legitimately, the C code is generated using a macro to avoid code duplication. The exceptions are handled via __panic_unhandled(), replacing bad_mode() (which is removed). The `kernel_ventry` and `entry_handler` assembly macros are updated to handle the new naming scheme. In theory it should be possible to generate the entry functions at the same time as the vectors using a single table, but this will require reworking the linker script to split the two into separate sections, so for now we have separate tables. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Acked-by: Catalin Marinas <catalin.marinas@arm.com> Acked-by: Marc Zyngier <maz@kernel.org> Reviewed-by: Joey Gouly <joey.gouly@arm.com> Cc: James Morse <james.morse@arm.com> Cc: Will Deacon <will@kernel.org> Link: https://lore.kernel.org/r/20210607094624.34689-15-mark.rutland@arm.com Signed-off-by: Will Deacon <will@kernel.org>
2021-06-07 09:46:18 +00:00
asmlinkage void noinstr el1h_64_error_handler(struct pt_regs *regs)
{
unsigned long esr = read_sysreg(esr_el1);
local_daif_restore(DAIF_ERRCTX);
arm64_enter_nmi(regs);
do_serror(regs, esr);
arm64_exit_nmi(regs);
}
static void noinstr el0_da(struct pt_regs *regs, unsigned long esr)
{
unsigned long far = read_sysreg(far_el1);
arm64: entry: call exit_to_user_mode() from C When handling an exception from EL0, we perform the entry work in that exception's C handler, and once the C handler has finished, we return back to the entry assembly. Subsequently in the common `ret_to_user` assembly we perform the exit work that balances with the entry work. This can be somewhat difficult to follow, and makes it hard to rework the return paths (e.g. to pass additional context to the exit code, or to have exception return logic for specific exceptions). This patch reworks the entry code such that each EL0 C exception handler is responsible for both the entry and exit work. This clearly balances the two (and will permit additional variation in future), and avoids an unnecessary bounce between assembly and C in the common case, leaving `ret_from_fork` as the only place assembly has to call the exit code. This means that the exit work is now inlined into the C handler, which is already the case for the entry work, and allows the compiler to generate better code (e.g. by immediately returning when there is no exit work to perform). To align with other exception entry/exit helpers, enter_from_user_mode() is updated to take the EL0 pt_regs as a parameter, though this is currently unused. There should be no functional change as a result of this patch. However, this should lead to slightly better backtraces when an error is encountered within do_notify_resume(), as the C handler should appear in the backtrace, indicating the specific exception that the kernel was entered with. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Cc: James Morse <james.morse@arm.com> Cc: Joey Gouly <joey.gouly@arm.com> Cc: Marc Zyngier <maz@kernel.org> Cc: Will Deacon <will@kernel.org> Reviewed-by: Joey Gouly <joey.gouly@arm.com> Link: https://lore.kernel.org/r/20210802140733.52716-5-mark.rutland@arm.com Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2021-08-02 14:07:33 +00:00
enter_from_user_mode(regs);
local_daif_restore(DAIF_PROCCTX);
do_mem_abort(far, esr, regs);
arm64: entry: call exit_to_user_mode() from C When handling an exception from EL0, we perform the entry work in that exception's C handler, and once the C handler has finished, we return back to the entry assembly. Subsequently in the common `ret_to_user` assembly we perform the exit work that balances with the entry work. This can be somewhat difficult to follow, and makes it hard to rework the return paths (e.g. to pass additional context to the exit code, or to have exception return logic for specific exceptions). This patch reworks the entry code such that each EL0 C exception handler is responsible for both the entry and exit work. This clearly balances the two (and will permit additional variation in future), and avoids an unnecessary bounce between assembly and C in the common case, leaving `ret_from_fork` as the only place assembly has to call the exit code. This means that the exit work is now inlined into the C handler, which is already the case for the entry work, and allows the compiler to generate better code (e.g. by immediately returning when there is no exit work to perform). To align with other exception entry/exit helpers, enter_from_user_mode() is updated to take the EL0 pt_regs as a parameter, though this is currently unused. There should be no functional change as a result of this patch. However, this should lead to slightly better backtraces when an error is encountered within do_notify_resume(), as the C handler should appear in the backtrace, indicating the specific exception that the kernel was entered with. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Cc: James Morse <james.morse@arm.com> Cc: Joey Gouly <joey.gouly@arm.com> Cc: Marc Zyngier <maz@kernel.org> Cc: Will Deacon <will@kernel.org> Reviewed-by: Joey Gouly <joey.gouly@arm.com> Link: https://lore.kernel.org/r/20210802140733.52716-5-mark.rutland@arm.com Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2021-08-02 14:07:33 +00:00
exit_to_user_mode(regs);
}
static void noinstr el0_ia(struct pt_regs *regs, unsigned long esr)
{
unsigned long far = read_sysreg(far_el1);
/*
* We've taken an instruction abort from userspace and not yet
* re-enabled IRQs. If the address is a kernel address, apply
* BP hardening prior to enabling IRQs and pre-emption.
*/
if (!is_ttbr0_addr(far))
arm64_apply_bp_hardening();
arm64: entry: call exit_to_user_mode() from C When handling an exception from EL0, we perform the entry work in that exception's C handler, and once the C handler has finished, we return back to the entry assembly. Subsequently in the common `ret_to_user` assembly we perform the exit work that balances with the entry work. This can be somewhat difficult to follow, and makes it hard to rework the return paths (e.g. to pass additional context to the exit code, or to have exception return logic for specific exceptions). This patch reworks the entry code such that each EL0 C exception handler is responsible for both the entry and exit work. This clearly balances the two (and will permit additional variation in future), and avoids an unnecessary bounce between assembly and C in the common case, leaving `ret_from_fork` as the only place assembly has to call the exit code. This means that the exit work is now inlined into the C handler, which is already the case for the entry work, and allows the compiler to generate better code (e.g. by immediately returning when there is no exit work to perform). To align with other exception entry/exit helpers, enter_from_user_mode() is updated to take the EL0 pt_regs as a parameter, though this is currently unused. There should be no functional change as a result of this patch. However, this should lead to slightly better backtraces when an error is encountered within do_notify_resume(), as the C handler should appear in the backtrace, indicating the specific exception that the kernel was entered with. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Cc: James Morse <james.morse@arm.com> Cc: Joey Gouly <joey.gouly@arm.com> Cc: Marc Zyngier <maz@kernel.org> Cc: Will Deacon <will@kernel.org> Reviewed-by: Joey Gouly <joey.gouly@arm.com> Link: https://lore.kernel.org/r/20210802140733.52716-5-mark.rutland@arm.com Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2021-08-02 14:07:33 +00:00
enter_from_user_mode(regs);
local_daif_restore(DAIF_PROCCTX);
do_mem_abort(far, esr, regs);
arm64: entry: call exit_to_user_mode() from C When handling an exception from EL0, we perform the entry work in that exception's C handler, and once the C handler has finished, we return back to the entry assembly. Subsequently in the common `ret_to_user` assembly we perform the exit work that balances with the entry work. This can be somewhat difficult to follow, and makes it hard to rework the return paths (e.g. to pass additional context to the exit code, or to have exception return logic for specific exceptions). This patch reworks the entry code such that each EL0 C exception handler is responsible for both the entry and exit work. This clearly balances the two (and will permit additional variation in future), and avoids an unnecessary bounce between assembly and C in the common case, leaving `ret_from_fork` as the only place assembly has to call the exit code. This means that the exit work is now inlined into the C handler, which is already the case for the entry work, and allows the compiler to generate better code (e.g. by immediately returning when there is no exit work to perform). To align with other exception entry/exit helpers, enter_from_user_mode() is updated to take the EL0 pt_regs as a parameter, though this is currently unused. There should be no functional change as a result of this patch. However, this should lead to slightly better backtraces when an error is encountered within do_notify_resume(), as the C handler should appear in the backtrace, indicating the specific exception that the kernel was entered with. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Cc: James Morse <james.morse@arm.com> Cc: Joey Gouly <joey.gouly@arm.com> Cc: Marc Zyngier <maz@kernel.org> Cc: Will Deacon <will@kernel.org> Reviewed-by: Joey Gouly <joey.gouly@arm.com> Link: https://lore.kernel.org/r/20210802140733.52716-5-mark.rutland@arm.com Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2021-08-02 14:07:33 +00:00
exit_to_user_mode(regs);
}
static void noinstr el0_fpsimd_acc(struct pt_regs *regs, unsigned long esr)
{
arm64: entry: call exit_to_user_mode() from C When handling an exception from EL0, we perform the entry work in that exception's C handler, and once the C handler has finished, we return back to the entry assembly. Subsequently in the common `ret_to_user` assembly we perform the exit work that balances with the entry work. This can be somewhat difficult to follow, and makes it hard to rework the return paths (e.g. to pass additional context to the exit code, or to have exception return logic for specific exceptions). This patch reworks the entry code such that each EL0 C exception handler is responsible for both the entry and exit work. This clearly balances the two (and will permit additional variation in future), and avoids an unnecessary bounce between assembly and C in the common case, leaving `ret_from_fork` as the only place assembly has to call the exit code. This means that the exit work is now inlined into the C handler, which is already the case for the entry work, and allows the compiler to generate better code (e.g. by immediately returning when there is no exit work to perform). To align with other exception entry/exit helpers, enter_from_user_mode() is updated to take the EL0 pt_regs as a parameter, though this is currently unused. There should be no functional change as a result of this patch. However, this should lead to slightly better backtraces when an error is encountered within do_notify_resume(), as the C handler should appear in the backtrace, indicating the specific exception that the kernel was entered with. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Cc: James Morse <james.morse@arm.com> Cc: Joey Gouly <joey.gouly@arm.com> Cc: Marc Zyngier <maz@kernel.org> Cc: Will Deacon <will@kernel.org> Reviewed-by: Joey Gouly <joey.gouly@arm.com> Link: https://lore.kernel.org/r/20210802140733.52716-5-mark.rutland@arm.com Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2021-08-02 14:07:33 +00:00
enter_from_user_mode(regs);
local_daif_restore(DAIF_PROCCTX);
do_fpsimd_acc(esr, regs);
arm64: entry: call exit_to_user_mode() from C When handling an exception from EL0, we perform the entry work in that exception's C handler, and once the C handler has finished, we return back to the entry assembly. Subsequently in the common `ret_to_user` assembly we perform the exit work that balances with the entry work. This can be somewhat difficult to follow, and makes it hard to rework the return paths (e.g. to pass additional context to the exit code, or to have exception return logic for specific exceptions). This patch reworks the entry code such that each EL0 C exception handler is responsible for both the entry and exit work. This clearly balances the two (and will permit additional variation in future), and avoids an unnecessary bounce between assembly and C in the common case, leaving `ret_from_fork` as the only place assembly has to call the exit code. This means that the exit work is now inlined into the C handler, which is already the case for the entry work, and allows the compiler to generate better code (e.g. by immediately returning when there is no exit work to perform). To align with other exception entry/exit helpers, enter_from_user_mode() is updated to take the EL0 pt_regs as a parameter, though this is currently unused. There should be no functional change as a result of this patch. However, this should lead to slightly better backtraces when an error is encountered within do_notify_resume(), as the C handler should appear in the backtrace, indicating the specific exception that the kernel was entered with. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Cc: James Morse <james.morse@arm.com> Cc: Joey Gouly <joey.gouly@arm.com> Cc: Marc Zyngier <maz@kernel.org> Cc: Will Deacon <will@kernel.org> Reviewed-by: Joey Gouly <joey.gouly@arm.com> Link: https://lore.kernel.org/r/20210802140733.52716-5-mark.rutland@arm.com Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2021-08-02 14:07:33 +00:00
exit_to_user_mode(regs);
}
static void noinstr el0_sve_acc(struct pt_regs *regs, unsigned long esr)
{
arm64: entry: call exit_to_user_mode() from C When handling an exception from EL0, we perform the entry work in that exception's C handler, and once the C handler has finished, we return back to the entry assembly. Subsequently in the common `ret_to_user` assembly we perform the exit work that balances with the entry work. This can be somewhat difficult to follow, and makes it hard to rework the return paths (e.g. to pass additional context to the exit code, or to have exception return logic for specific exceptions). This patch reworks the entry code such that each EL0 C exception handler is responsible for both the entry and exit work. This clearly balances the two (and will permit additional variation in future), and avoids an unnecessary bounce between assembly and C in the common case, leaving `ret_from_fork` as the only place assembly has to call the exit code. This means that the exit work is now inlined into the C handler, which is already the case for the entry work, and allows the compiler to generate better code (e.g. by immediately returning when there is no exit work to perform). To align with other exception entry/exit helpers, enter_from_user_mode() is updated to take the EL0 pt_regs as a parameter, though this is currently unused. There should be no functional change as a result of this patch. However, this should lead to slightly better backtraces when an error is encountered within do_notify_resume(), as the C handler should appear in the backtrace, indicating the specific exception that the kernel was entered with. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Cc: James Morse <james.morse@arm.com> Cc: Joey Gouly <joey.gouly@arm.com> Cc: Marc Zyngier <maz@kernel.org> Cc: Will Deacon <will@kernel.org> Reviewed-by: Joey Gouly <joey.gouly@arm.com> Link: https://lore.kernel.org/r/20210802140733.52716-5-mark.rutland@arm.com Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2021-08-02 14:07:33 +00:00
enter_from_user_mode(regs);
local_daif_restore(DAIF_PROCCTX);
do_sve_acc(esr, regs);
arm64: entry: call exit_to_user_mode() from C When handling an exception from EL0, we perform the entry work in that exception's C handler, and once the C handler has finished, we return back to the entry assembly. Subsequently in the common `ret_to_user` assembly we perform the exit work that balances with the entry work. This can be somewhat difficult to follow, and makes it hard to rework the return paths (e.g. to pass additional context to the exit code, or to have exception return logic for specific exceptions). This patch reworks the entry code such that each EL0 C exception handler is responsible for both the entry and exit work. This clearly balances the two (and will permit additional variation in future), and avoids an unnecessary bounce between assembly and C in the common case, leaving `ret_from_fork` as the only place assembly has to call the exit code. This means that the exit work is now inlined into the C handler, which is already the case for the entry work, and allows the compiler to generate better code (e.g. by immediately returning when there is no exit work to perform). To align with other exception entry/exit helpers, enter_from_user_mode() is updated to take the EL0 pt_regs as a parameter, though this is currently unused. There should be no functional change as a result of this patch. However, this should lead to slightly better backtraces when an error is encountered within do_notify_resume(), as the C handler should appear in the backtrace, indicating the specific exception that the kernel was entered with. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Cc: James Morse <james.morse@arm.com> Cc: Joey Gouly <joey.gouly@arm.com> Cc: Marc Zyngier <maz@kernel.org> Cc: Will Deacon <will@kernel.org> Reviewed-by: Joey Gouly <joey.gouly@arm.com> Link: https://lore.kernel.org/r/20210802140733.52716-5-mark.rutland@arm.com Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2021-08-02 14:07:33 +00:00
exit_to_user_mode(regs);
}
static void noinstr el0_sme_acc(struct pt_regs *regs, unsigned long esr)
{
enter_from_user_mode(regs);
local_daif_restore(DAIF_PROCCTX);
do_sme_acc(esr, regs);
exit_to_user_mode(regs);
}
static void noinstr el0_fpsimd_exc(struct pt_regs *regs, unsigned long esr)
{
arm64: entry: call exit_to_user_mode() from C When handling an exception from EL0, we perform the entry work in that exception's C handler, and once the C handler has finished, we return back to the entry assembly. Subsequently in the common `ret_to_user` assembly we perform the exit work that balances with the entry work. This can be somewhat difficult to follow, and makes it hard to rework the return paths (e.g. to pass additional context to the exit code, or to have exception return logic for specific exceptions). This patch reworks the entry code such that each EL0 C exception handler is responsible for both the entry and exit work. This clearly balances the two (and will permit additional variation in future), and avoids an unnecessary bounce between assembly and C in the common case, leaving `ret_from_fork` as the only place assembly has to call the exit code. This means that the exit work is now inlined into the C handler, which is already the case for the entry work, and allows the compiler to generate better code (e.g. by immediately returning when there is no exit work to perform). To align with other exception entry/exit helpers, enter_from_user_mode() is updated to take the EL0 pt_regs as a parameter, though this is currently unused. There should be no functional change as a result of this patch. However, this should lead to slightly better backtraces when an error is encountered within do_notify_resume(), as the C handler should appear in the backtrace, indicating the specific exception that the kernel was entered with. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Cc: James Morse <james.morse@arm.com> Cc: Joey Gouly <joey.gouly@arm.com> Cc: Marc Zyngier <maz@kernel.org> Cc: Will Deacon <will@kernel.org> Reviewed-by: Joey Gouly <joey.gouly@arm.com> Link: https://lore.kernel.org/r/20210802140733.52716-5-mark.rutland@arm.com Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2021-08-02 14:07:33 +00:00
enter_from_user_mode(regs);
local_daif_restore(DAIF_PROCCTX);
do_fpsimd_exc(esr, regs);
arm64: entry: call exit_to_user_mode() from C When handling an exception from EL0, we perform the entry work in that exception's C handler, and once the C handler has finished, we return back to the entry assembly. Subsequently in the common `ret_to_user` assembly we perform the exit work that balances with the entry work. This can be somewhat difficult to follow, and makes it hard to rework the return paths (e.g. to pass additional context to the exit code, or to have exception return logic for specific exceptions). This patch reworks the entry code such that each EL0 C exception handler is responsible for both the entry and exit work. This clearly balances the two (and will permit additional variation in future), and avoids an unnecessary bounce between assembly and C in the common case, leaving `ret_from_fork` as the only place assembly has to call the exit code. This means that the exit work is now inlined into the C handler, which is already the case for the entry work, and allows the compiler to generate better code (e.g. by immediately returning when there is no exit work to perform). To align with other exception entry/exit helpers, enter_from_user_mode() is updated to take the EL0 pt_regs as a parameter, though this is currently unused. There should be no functional change as a result of this patch. However, this should lead to slightly better backtraces when an error is encountered within do_notify_resume(), as the C handler should appear in the backtrace, indicating the specific exception that the kernel was entered with. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Cc: James Morse <james.morse@arm.com> Cc: Joey Gouly <joey.gouly@arm.com> Cc: Marc Zyngier <maz@kernel.org> Cc: Will Deacon <will@kernel.org> Reviewed-by: Joey Gouly <joey.gouly@arm.com> Link: https://lore.kernel.org/r/20210802140733.52716-5-mark.rutland@arm.com Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2021-08-02 14:07:33 +00:00
exit_to_user_mode(regs);
}
static void noinstr el0_sys(struct pt_regs *regs, unsigned long esr)
{
arm64: entry: call exit_to_user_mode() from C When handling an exception from EL0, we perform the entry work in that exception's C handler, and once the C handler has finished, we return back to the entry assembly. Subsequently in the common `ret_to_user` assembly we perform the exit work that balances with the entry work. This can be somewhat difficult to follow, and makes it hard to rework the return paths (e.g. to pass additional context to the exit code, or to have exception return logic for specific exceptions). This patch reworks the entry code such that each EL0 C exception handler is responsible for both the entry and exit work. This clearly balances the two (and will permit additional variation in future), and avoids an unnecessary bounce between assembly and C in the common case, leaving `ret_from_fork` as the only place assembly has to call the exit code. This means that the exit work is now inlined into the C handler, which is already the case for the entry work, and allows the compiler to generate better code (e.g. by immediately returning when there is no exit work to perform). To align with other exception entry/exit helpers, enter_from_user_mode() is updated to take the EL0 pt_regs as a parameter, though this is currently unused. There should be no functional change as a result of this patch. However, this should lead to slightly better backtraces when an error is encountered within do_notify_resume(), as the C handler should appear in the backtrace, indicating the specific exception that the kernel was entered with. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Cc: James Morse <james.morse@arm.com> Cc: Joey Gouly <joey.gouly@arm.com> Cc: Marc Zyngier <maz@kernel.org> Cc: Will Deacon <will@kernel.org> Reviewed-by: Joey Gouly <joey.gouly@arm.com> Link: https://lore.kernel.org/r/20210802140733.52716-5-mark.rutland@arm.com Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2021-08-02 14:07:33 +00:00
enter_from_user_mode(regs);
local_daif_restore(DAIF_PROCCTX);
do_el0_sys(esr, regs);
arm64: entry: call exit_to_user_mode() from C When handling an exception from EL0, we perform the entry work in that exception's C handler, and once the C handler has finished, we return back to the entry assembly. Subsequently in the common `ret_to_user` assembly we perform the exit work that balances with the entry work. This can be somewhat difficult to follow, and makes it hard to rework the return paths (e.g. to pass additional context to the exit code, or to have exception return logic for specific exceptions). This patch reworks the entry code such that each EL0 C exception handler is responsible for both the entry and exit work. This clearly balances the two (and will permit additional variation in future), and avoids an unnecessary bounce between assembly and C in the common case, leaving `ret_from_fork` as the only place assembly has to call the exit code. This means that the exit work is now inlined into the C handler, which is already the case for the entry work, and allows the compiler to generate better code (e.g. by immediately returning when there is no exit work to perform). To align with other exception entry/exit helpers, enter_from_user_mode() is updated to take the EL0 pt_regs as a parameter, though this is currently unused. There should be no functional change as a result of this patch. However, this should lead to slightly better backtraces when an error is encountered within do_notify_resume(), as the C handler should appear in the backtrace, indicating the specific exception that the kernel was entered with. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Cc: James Morse <james.morse@arm.com> Cc: Joey Gouly <joey.gouly@arm.com> Cc: Marc Zyngier <maz@kernel.org> Cc: Will Deacon <will@kernel.org> Reviewed-by: Joey Gouly <joey.gouly@arm.com> Link: https://lore.kernel.org/r/20210802140733.52716-5-mark.rutland@arm.com Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2021-08-02 14:07:33 +00:00
exit_to_user_mode(regs);
}
static void noinstr el0_pc(struct pt_regs *regs, unsigned long esr)
{
unsigned long far = read_sysreg(far_el1);
if (!is_ttbr0_addr(instruction_pointer(regs)))
arm64_apply_bp_hardening();
arm64: entry: call exit_to_user_mode() from C When handling an exception from EL0, we perform the entry work in that exception's C handler, and once the C handler has finished, we return back to the entry assembly. Subsequently in the common `ret_to_user` assembly we perform the exit work that balances with the entry work. This can be somewhat difficult to follow, and makes it hard to rework the return paths (e.g. to pass additional context to the exit code, or to have exception return logic for specific exceptions). This patch reworks the entry code such that each EL0 C exception handler is responsible for both the entry and exit work. This clearly balances the two (and will permit additional variation in future), and avoids an unnecessary bounce between assembly and C in the common case, leaving `ret_from_fork` as the only place assembly has to call the exit code. This means that the exit work is now inlined into the C handler, which is already the case for the entry work, and allows the compiler to generate better code (e.g. by immediately returning when there is no exit work to perform). To align with other exception entry/exit helpers, enter_from_user_mode() is updated to take the EL0 pt_regs as a parameter, though this is currently unused. There should be no functional change as a result of this patch. However, this should lead to slightly better backtraces when an error is encountered within do_notify_resume(), as the C handler should appear in the backtrace, indicating the specific exception that the kernel was entered with. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Cc: James Morse <james.morse@arm.com> Cc: Joey Gouly <joey.gouly@arm.com> Cc: Marc Zyngier <maz@kernel.org> Cc: Will Deacon <will@kernel.org> Reviewed-by: Joey Gouly <joey.gouly@arm.com> Link: https://lore.kernel.org/r/20210802140733.52716-5-mark.rutland@arm.com Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2021-08-02 14:07:33 +00:00
enter_from_user_mode(regs);
local_daif_restore(DAIF_PROCCTX);
do_sp_pc_abort(far, esr, regs);
arm64: entry: call exit_to_user_mode() from C When handling an exception from EL0, we perform the entry work in that exception's C handler, and once the C handler has finished, we return back to the entry assembly. Subsequently in the common `ret_to_user` assembly we perform the exit work that balances with the entry work. This can be somewhat difficult to follow, and makes it hard to rework the return paths (e.g. to pass additional context to the exit code, or to have exception return logic for specific exceptions). This patch reworks the entry code such that each EL0 C exception handler is responsible for both the entry and exit work. This clearly balances the two (and will permit additional variation in future), and avoids an unnecessary bounce between assembly and C in the common case, leaving `ret_from_fork` as the only place assembly has to call the exit code. This means that the exit work is now inlined into the C handler, which is already the case for the entry work, and allows the compiler to generate better code (e.g. by immediately returning when there is no exit work to perform). To align with other exception entry/exit helpers, enter_from_user_mode() is updated to take the EL0 pt_regs as a parameter, though this is currently unused. There should be no functional change as a result of this patch. However, this should lead to slightly better backtraces when an error is encountered within do_notify_resume(), as the C handler should appear in the backtrace, indicating the specific exception that the kernel was entered with. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Cc: James Morse <james.morse@arm.com> Cc: Joey Gouly <joey.gouly@arm.com> Cc: Marc Zyngier <maz@kernel.org> Cc: Will Deacon <will@kernel.org> Reviewed-by: Joey Gouly <joey.gouly@arm.com> Link: https://lore.kernel.org/r/20210802140733.52716-5-mark.rutland@arm.com Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2021-08-02 14:07:33 +00:00
exit_to_user_mode(regs);
}
static void noinstr el0_sp(struct pt_regs *regs, unsigned long esr)
{
arm64: entry: call exit_to_user_mode() from C When handling an exception from EL0, we perform the entry work in that exception's C handler, and once the C handler has finished, we return back to the entry assembly. Subsequently in the common `ret_to_user` assembly we perform the exit work that balances with the entry work. This can be somewhat difficult to follow, and makes it hard to rework the return paths (e.g. to pass additional context to the exit code, or to have exception return logic for specific exceptions). This patch reworks the entry code such that each EL0 C exception handler is responsible for both the entry and exit work. This clearly balances the two (and will permit additional variation in future), and avoids an unnecessary bounce between assembly and C in the common case, leaving `ret_from_fork` as the only place assembly has to call the exit code. This means that the exit work is now inlined into the C handler, which is already the case for the entry work, and allows the compiler to generate better code (e.g. by immediately returning when there is no exit work to perform). To align with other exception entry/exit helpers, enter_from_user_mode() is updated to take the EL0 pt_regs as a parameter, though this is currently unused. There should be no functional change as a result of this patch. However, this should lead to slightly better backtraces when an error is encountered within do_notify_resume(), as the C handler should appear in the backtrace, indicating the specific exception that the kernel was entered with. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Cc: James Morse <james.morse@arm.com> Cc: Joey Gouly <joey.gouly@arm.com> Cc: Marc Zyngier <maz@kernel.org> Cc: Will Deacon <will@kernel.org> Reviewed-by: Joey Gouly <joey.gouly@arm.com> Link: https://lore.kernel.org/r/20210802140733.52716-5-mark.rutland@arm.com Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2021-08-02 14:07:33 +00:00
enter_from_user_mode(regs);
2020-02-28 14:59:42 +00:00
local_daif_restore(DAIF_PROCCTX);
do_sp_pc_abort(regs->sp, esr, regs);
arm64: entry: call exit_to_user_mode() from C When handling an exception from EL0, we perform the entry work in that exception's C handler, and once the C handler has finished, we return back to the entry assembly. Subsequently in the common `ret_to_user` assembly we perform the exit work that balances with the entry work. This can be somewhat difficult to follow, and makes it hard to rework the return paths (e.g. to pass additional context to the exit code, or to have exception return logic for specific exceptions). This patch reworks the entry code such that each EL0 C exception handler is responsible for both the entry and exit work. This clearly balances the two (and will permit additional variation in future), and avoids an unnecessary bounce between assembly and C in the common case, leaving `ret_from_fork` as the only place assembly has to call the exit code. This means that the exit work is now inlined into the C handler, which is already the case for the entry work, and allows the compiler to generate better code (e.g. by immediately returning when there is no exit work to perform). To align with other exception entry/exit helpers, enter_from_user_mode() is updated to take the EL0 pt_regs as a parameter, though this is currently unused. There should be no functional change as a result of this patch. However, this should lead to slightly better backtraces when an error is encountered within do_notify_resume(), as the C handler should appear in the backtrace, indicating the specific exception that the kernel was entered with. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Cc: James Morse <james.morse@arm.com> Cc: Joey Gouly <joey.gouly@arm.com> Cc: Marc Zyngier <maz@kernel.org> Cc: Will Deacon <will@kernel.org> Reviewed-by: Joey Gouly <joey.gouly@arm.com> Link: https://lore.kernel.org/r/20210802140733.52716-5-mark.rutland@arm.com Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2021-08-02 14:07:33 +00:00
exit_to_user_mode(regs);
}
static void noinstr el0_undef(struct pt_regs *regs, unsigned long esr)
{
arm64: entry: call exit_to_user_mode() from C When handling an exception from EL0, we perform the entry work in that exception's C handler, and once the C handler has finished, we return back to the entry assembly. Subsequently in the common `ret_to_user` assembly we perform the exit work that balances with the entry work. This can be somewhat difficult to follow, and makes it hard to rework the return paths (e.g. to pass additional context to the exit code, or to have exception return logic for specific exceptions). This patch reworks the entry code such that each EL0 C exception handler is responsible for both the entry and exit work. This clearly balances the two (and will permit additional variation in future), and avoids an unnecessary bounce between assembly and C in the common case, leaving `ret_from_fork` as the only place assembly has to call the exit code. This means that the exit work is now inlined into the C handler, which is already the case for the entry work, and allows the compiler to generate better code (e.g. by immediately returning when there is no exit work to perform). To align with other exception entry/exit helpers, enter_from_user_mode() is updated to take the EL0 pt_regs as a parameter, though this is currently unused. There should be no functional change as a result of this patch. However, this should lead to slightly better backtraces when an error is encountered within do_notify_resume(), as the C handler should appear in the backtrace, indicating the specific exception that the kernel was entered with. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Cc: James Morse <james.morse@arm.com> Cc: Joey Gouly <joey.gouly@arm.com> Cc: Marc Zyngier <maz@kernel.org> Cc: Will Deacon <will@kernel.org> Reviewed-by: Joey Gouly <joey.gouly@arm.com> Link: https://lore.kernel.org/r/20210802140733.52716-5-mark.rutland@arm.com Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2021-08-02 14:07:33 +00:00
enter_from_user_mode(regs);
local_daif_restore(DAIF_PROCCTX);
arm64: split EL0/EL1 UNDEF handlers In general, exceptions taken from EL1 need to be handled separately from exceptions taken from EL0, as the logic to handle the two cases can be significantly divergent, and exceptions taken from EL1 typically have more stringent requirements on locking and instrumentation. Subsequent patches will rework the way EL1 UNDEFs are handled in order to address longstanding soundness issues with instrumentation and RCU. In preparation for that rework, this patch splits the existing do_undefinstr() handler into separate do_el0_undef() and do_el1_undef() handlers. Prior to this patch, do_undefinstr() was marked with NOKPROBE_SYMBOL(), preventing instrumentation via kprobes. However, do_undefinstr() invokes other code which can be instrumented, and: * For UNDEFINED exceptions taken from EL0, there is no risk of recursion within kprobes. Therefore it is safe for do_el0_undef to be instrumented with kprobes, and it does not need to be marked with NOKPROBE_SYMBOL(). * For UNDEFINED exceptions taken from EL1, either: (a) The exception is has been taken when manipulating SSBS; these cases are limited and do not occur within code that can be invoked recursively via kprobes. Hence, in these cases instrumentation with kprobes is benign. (b) The exception has been taken for an unknown reason, as other than manipulating SSBS we do not expect to take UNDEFINED exceptions from EL1. Any handling of these exception is best-effort. ... and in either case, marking do_el1_undef() with NOKPROBE_SYMBOL() isn't sufficient to prevent recursion via kprobes as functions it calls (including die()) are instrumentable via kprobes. Hence, it's not worthwhile to mark do_el1_undef() with NOKPROBE_SYMBOL(). The same applies to do_el1_bti() and do_el1_fpac(), so their NOKPROBE_SYMBOL() annotations are also removed. Aside from the new instrumentability, there should be no functional change as a result of this patch. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: James Morse <james.morse@arm.com> Cc: Joey Gouly <joey.gouly@arm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Will Deacon <will@kernel.org> Link: https://lore.kernel.org/r/20221019144123.612388-3-mark.rutland@arm.com Signed-off-by: Will Deacon <will@kernel.org>
2022-10-19 14:41:16 +00:00
do_el0_undef(regs, esr);
arm64: entry: call exit_to_user_mode() from C When handling an exception from EL0, we perform the entry work in that exception's C handler, and once the C handler has finished, we return back to the entry assembly. Subsequently in the common `ret_to_user` assembly we perform the exit work that balances with the entry work. This can be somewhat difficult to follow, and makes it hard to rework the return paths (e.g. to pass additional context to the exit code, or to have exception return logic for specific exceptions). This patch reworks the entry code such that each EL0 C exception handler is responsible for both the entry and exit work. This clearly balances the two (and will permit additional variation in future), and avoids an unnecessary bounce between assembly and C in the common case, leaving `ret_from_fork` as the only place assembly has to call the exit code. This means that the exit work is now inlined into the C handler, which is already the case for the entry work, and allows the compiler to generate better code (e.g. by immediately returning when there is no exit work to perform). To align with other exception entry/exit helpers, enter_from_user_mode() is updated to take the EL0 pt_regs as a parameter, though this is currently unused. There should be no functional change as a result of this patch. However, this should lead to slightly better backtraces when an error is encountered within do_notify_resume(), as the C handler should appear in the backtrace, indicating the specific exception that the kernel was entered with. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Cc: James Morse <james.morse@arm.com> Cc: Joey Gouly <joey.gouly@arm.com> Cc: Marc Zyngier <maz@kernel.org> Cc: Will Deacon <will@kernel.org> Reviewed-by: Joey Gouly <joey.gouly@arm.com> Link: https://lore.kernel.org/r/20210802140733.52716-5-mark.rutland@arm.com Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2021-08-02 14:07:33 +00:00
exit_to_user_mode(regs);
}
static void noinstr el0_bti(struct pt_regs *regs)
{
arm64: entry: call exit_to_user_mode() from C When handling an exception from EL0, we perform the entry work in that exception's C handler, and once the C handler has finished, we return back to the entry assembly. Subsequently in the common `ret_to_user` assembly we perform the exit work that balances with the entry work. This can be somewhat difficult to follow, and makes it hard to rework the return paths (e.g. to pass additional context to the exit code, or to have exception return logic for specific exceptions). This patch reworks the entry code such that each EL0 C exception handler is responsible for both the entry and exit work. This clearly balances the two (and will permit additional variation in future), and avoids an unnecessary bounce between assembly and C in the common case, leaving `ret_from_fork` as the only place assembly has to call the exit code. This means that the exit work is now inlined into the C handler, which is already the case for the entry work, and allows the compiler to generate better code (e.g. by immediately returning when there is no exit work to perform). To align with other exception entry/exit helpers, enter_from_user_mode() is updated to take the EL0 pt_regs as a parameter, though this is currently unused. There should be no functional change as a result of this patch. However, this should lead to slightly better backtraces when an error is encountered within do_notify_resume(), as the C handler should appear in the backtrace, indicating the specific exception that the kernel was entered with. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Cc: James Morse <james.morse@arm.com> Cc: Joey Gouly <joey.gouly@arm.com> Cc: Marc Zyngier <maz@kernel.org> Cc: Will Deacon <will@kernel.org> Reviewed-by: Joey Gouly <joey.gouly@arm.com> Link: https://lore.kernel.org/r/20210802140733.52716-5-mark.rutland@arm.com Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2021-08-02 14:07:33 +00:00
enter_from_user_mode(regs);
local_daif_restore(DAIF_PROCCTX);
arm64: rework BTI exception handling If a BTI exception is taken from EL1, the entry code will treat this as an unhandled exception and will panic() the kernel. This is inconsistent with the way we handle FPAC exceptions, which have a dedicated handler and only necessarily kill the thread from which the exception was taken from, and we don't log all the information that could be relevant to debug the issue. The code in do_bti() has: BUG_ON(!user_mode(regs)); ... and it seems like the intent was to call this for EL1 BTI exceptions, as with FPAC, but this was omitted due to an oversight. This patch adds separate EL0 and EL1 BTI exception handlers, with the latter calling die() directly to report the original context the BTI exception was taken from. This matches our handling of FPAC exceptions. Prior to this patch, a BTI failure is reported as: | Unhandled 64-bit el1h sync exception on CPU0, ESR 0x0000000034000002 -- BTI | CPU: 0 PID: 1 Comm: swapper/0 Not tainted 5.19.0-rc3-00131-g7d937ff0221d-dirty #9 | Hardware name: linux,dummy-virt (DT) | pstate: 20400809 (nzCv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=-c) | pc : test_bti_callee+0x4/0x10 | lr : test_bti_caller+0x1c/0x28 | sp : ffff80000800bdf0 | x29: ffff80000800bdf0 x28: 0000000000000000 x27: 0000000000000000 | x26: 0000000000000000 x25: 0000000000000000 x24: 0000000000000000 | x23: ffff80000a2b8000 x22: 0000000000000000 x21: 0000000000000000 | x20: ffff8000099fa5b0 x19: ffff800009ff7000 x18: fffffbfffda37000 | x17: 3120676e696d7573 x16: 7361202c6e6f6974 x15: 0000000041a90000 | x14: 0040000000000041 x13: 0040000000000001 x12: ffff000001a90000 | x11: fffffbfffda37480 x10: 0068000000000703 x9 : 0001000040000000 | x8 : 0000000000090000 x7 : 0068000000000f03 x6 : 0060000000000f83 | x5 : ffff80000a2b6000 x4 : ffff0000028d0000 x3 : ffff800009f78378 | x2 : 0000000000000000 x1 : 0000000040210000 x0 : ffff8000080257e4 | Kernel panic - not syncing: Unhandled exception | CPU: 0 PID: 1 Comm: swapper/0 Not tainted 5.19.0-rc3-00131-g7d937ff0221d-dirty #9 | Hardware name: linux,dummy-virt (DT) | Call trace: | dump_backtrace.part.0+0xcc/0xe0 | show_stack+0x18/0x5c | dump_stack_lvl+0x64/0x80 | dump_stack+0x18/0x34 | panic+0x170/0x360 | arm64_exit_nmi.isra.0+0x0/0x80 | el1h_64_sync_handler+0x64/0xd0 | el1h_64_sync+0x64/0x68 | test_bti_callee+0x4/0x10 | smp_cpus_done+0xb0/0xbc | smp_init+0x7c/0x8c | kernel_init_freeable+0x128/0x28c | kernel_init+0x28/0x13c | ret_from_fork+0x10/0x20 With this patch applied, a BTI failure is reported as: | Internal error: Oops - BTI: 0000000034000002 [#1] PREEMPT SMP | Modules linked in: | CPU: 0 PID: 1 Comm: swapper/0 Not tainted 5.19.0-rc3-00132-g0ad98265d582-dirty #8 | Hardware name: linux,dummy-virt (DT) | pstate: 20400809 (nzCv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=-c) | pc : test_bti_callee+0x4/0x10 | lr : test_bti_caller+0x1c/0x28 | sp : ffff80000800bdf0 | x29: ffff80000800bdf0 x28: 0000000000000000 x27: 0000000000000000 | x26: 0000000000000000 x25: 0000000000000000 x24: 0000000000000000 | x23: ffff80000a2b8000 x22: 0000000000000000 x21: 0000000000000000 | x20: ffff8000099fa5b0 x19: ffff800009ff7000 x18: fffffbfffda37000 | x17: 3120676e696d7573 x16: 7361202c6e6f6974 x15: 0000000041a90000 | x14: 0040000000000041 x13: 0040000000000001 x12: ffff000001a90000 | x11: fffffbfffda37480 x10: 0068000000000703 x9 : 0001000040000000 | x8 : 0000000000090000 x7 : 0068000000000f03 x6 : 0060000000000f83 | x5 : ffff80000a2b6000 x4 : ffff0000028d0000 x3 : ffff800009f78378 | x2 : 0000000000000000 x1 : 0000000040210000 x0 : ffff800008025804 | Call trace: | test_bti_callee+0x4/0x10 | smp_cpus_done+0xb0/0xbc | smp_init+0x7c/0x8c | kernel_init_freeable+0x128/0x28c | kernel_init+0x28/0x13c | ret_from_fork+0x10/0x20 | Code: d50323bf d53cd040 d65f03c0 d503233f (d50323bf) Signed-off-by: Mark Rutland <mark.rutland@arm.com> Reviewed-by: Mark Brown <broonie@kernel.org> Reviewed-by: Anshuman Khandual <anshuman.khandual@arm.com> Cc: Alexandru Elisei <alexandru.elisei@arm.com> Cc: Amit Daniel Kachhap <amit.kachhap@arm.com> Cc: James Morse <james.morse@arm.com> Cc: Will Deacon <will@kernel.org> Link: https://lore.kernel.org/r/20220913101732.3925290-6-mark.rutland@arm.com Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2022-09-13 10:17:32 +00:00
do_el0_bti(regs);
arm64: entry: call exit_to_user_mode() from C When handling an exception from EL0, we perform the entry work in that exception's C handler, and once the C handler has finished, we return back to the entry assembly. Subsequently in the common `ret_to_user` assembly we perform the exit work that balances with the entry work. This can be somewhat difficult to follow, and makes it hard to rework the return paths (e.g. to pass additional context to the exit code, or to have exception return logic for specific exceptions). This patch reworks the entry code such that each EL0 C exception handler is responsible for both the entry and exit work. This clearly balances the two (and will permit additional variation in future), and avoids an unnecessary bounce between assembly and C in the common case, leaving `ret_from_fork` as the only place assembly has to call the exit code. This means that the exit work is now inlined into the C handler, which is already the case for the entry work, and allows the compiler to generate better code (e.g. by immediately returning when there is no exit work to perform). To align with other exception entry/exit helpers, enter_from_user_mode() is updated to take the EL0 pt_regs as a parameter, though this is currently unused. There should be no functional change as a result of this patch. However, this should lead to slightly better backtraces when an error is encountered within do_notify_resume(), as the C handler should appear in the backtrace, indicating the specific exception that the kernel was entered with. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Cc: James Morse <james.morse@arm.com> Cc: Joey Gouly <joey.gouly@arm.com> Cc: Marc Zyngier <maz@kernel.org> Cc: Will Deacon <will@kernel.org> Reviewed-by: Joey Gouly <joey.gouly@arm.com> Link: https://lore.kernel.org/r/20210802140733.52716-5-mark.rutland@arm.com Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2021-08-02 14:07:33 +00:00
exit_to_user_mode(regs);
}
static void noinstr el0_mops(struct pt_regs *regs, unsigned long esr)
{
enter_from_user_mode(regs);
local_daif_restore(DAIF_PROCCTX);
do_el0_mops(regs, esr);
exit_to_user_mode(regs);
}
static void noinstr el0_inv(struct pt_regs *regs, unsigned long esr)
{
arm64: entry: call exit_to_user_mode() from C When handling an exception from EL0, we perform the entry work in that exception's C handler, and once the C handler has finished, we return back to the entry assembly. Subsequently in the common `ret_to_user` assembly we perform the exit work that balances with the entry work. This can be somewhat difficult to follow, and makes it hard to rework the return paths (e.g. to pass additional context to the exit code, or to have exception return logic for specific exceptions). This patch reworks the entry code such that each EL0 C exception handler is responsible for both the entry and exit work. This clearly balances the two (and will permit additional variation in future), and avoids an unnecessary bounce between assembly and C in the common case, leaving `ret_from_fork` as the only place assembly has to call the exit code. This means that the exit work is now inlined into the C handler, which is already the case for the entry work, and allows the compiler to generate better code (e.g. by immediately returning when there is no exit work to perform). To align with other exception entry/exit helpers, enter_from_user_mode() is updated to take the EL0 pt_regs as a parameter, though this is currently unused. There should be no functional change as a result of this patch. However, this should lead to slightly better backtraces when an error is encountered within do_notify_resume(), as the C handler should appear in the backtrace, indicating the specific exception that the kernel was entered with. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Cc: James Morse <james.morse@arm.com> Cc: Joey Gouly <joey.gouly@arm.com> Cc: Marc Zyngier <maz@kernel.org> Cc: Will Deacon <will@kernel.org> Reviewed-by: Joey Gouly <joey.gouly@arm.com> Link: https://lore.kernel.org/r/20210802140733.52716-5-mark.rutland@arm.com Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2021-08-02 14:07:33 +00:00
enter_from_user_mode(regs);
local_daif_restore(DAIF_PROCCTX);
bad_el0_sync(regs, 0, esr);
arm64: entry: call exit_to_user_mode() from C When handling an exception from EL0, we perform the entry work in that exception's C handler, and once the C handler has finished, we return back to the entry assembly. Subsequently in the common `ret_to_user` assembly we perform the exit work that balances with the entry work. This can be somewhat difficult to follow, and makes it hard to rework the return paths (e.g. to pass additional context to the exit code, or to have exception return logic for specific exceptions). This patch reworks the entry code such that each EL0 C exception handler is responsible for both the entry and exit work. This clearly balances the two (and will permit additional variation in future), and avoids an unnecessary bounce between assembly and C in the common case, leaving `ret_from_fork` as the only place assembly has to call the exit code. This means that the exit work is now inlined into the C handler, which is already the case for the entry work, and allows the compiler to generate better code (e.g. by immediately returning when there is no exit work to perform). To align with other exception entry/exit helpers, enter_from_user_mode() is updated to take the EL0 pt_regs as a parameter, though this is currently unused. There should be no functional change as a result of this patch. However, this should lead to slightly better backtraces when an error is encountered within do_notify_resume(), as the C handler should appear in the backtrace, indicating the specific exception that the kernel was entered with. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Cc: James Morse <james.morse@arm.com> Cc: Joey Gouly <joey.gouly@arm.com> Cc: Marc Zyngier <maz@kernel.org> Cc: Will Deacon <will@kernel.org> Reviewed-by: Joey Gouly <joey.gouly@arm.com> Link: https://lore.kernel.org/r/20210802140733.52716-5-mark.rutland@arm.com Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2021-08-02 14:07:33 +00:00
exit_to_user_mode(regs);
}
static void noinstr el0_dbg(struct pt_regs *regs, unsigned long esr)
{
/* Only watchpoints write FAR_EL1, otherwise its UNKNOWN */
unsigned long far = read_sysreg(far_el1);
arm64: entry: call exit_to_user_mode() from C When handling an exception from EL0, we perform the entry work in that exception's C handler, and once the C handler has finished, we return back to the entry assembly. Subsequently in the common `ret_to_user` assembly we perform the exit work that balances with the entry work. This can be somewhat difficult to follow, and makes it hard to rework the return paths (e.g. to pass additional context to the exit code, or to have exception return logic for specific exceptions). This patch reworks the entry code such that each EL0 C exception handler is responsible for both the entry and exit work. This clearly balances the two (and will permit additional variation in future), and avoids an unnecessary bounce between assembly and C in the common case, leaving `ret_from_fork` as the only place assembly has to call the exit code. This means that the exit work is now inlined into the C handler, which is already the case for the entry work, and allows the compiler to generate better code (e.g. by immediately returning when there is no exit work to perform). To align with other exception entry/exit helpers, enter_from_user_mode() is updated to take the EL0 pt_regs as a parameter, though this is currently unused. There should be no functional change as a result of this patch. However, this should lead to slightly better backtraces when an error is encountered within do_notify_resume(), as the C handler should appear in the backtrace, indicating the specific exception that the kernel was entered with. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Cc: James Morse <james.morse@arm.com> Cc: Joey Gouly <joey.gouly@arm.com> Cc: Marc Zyngier <maz@kernel.org> Cc: Will Deacon <will@kernel.org> Reviewed-by: Joey Gouly <joey.gouly@arm.com> Link: https://lore.kernel.org/r/20210802140733.52716-5-mark.rutland@arm.com Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2021-08-02 14:07:33 +00:00
enter_from_user_mode(regs);
do_debug_exception(far, esr, regs);
arm64: entry: unmask IRQ+FIQ after EL0 handling For non-fatal exceptions taken from EL0, we expect that at some point during exception handling it is possible to return to a regular process context with all exceptions unmasked (e.g. as we do in do_notify_resume()), and we generally aim to unmask exceptions wherever possible. While handling SError and debug exceptions from EL0, we need to leave some exceptions masked during handling. Handling SError requires us to mask SError (which also requires masking IRQ+FIQ), and handing debug exceptions requires us to mask debug (which also requires masking SError+IRQ+FIQ). Once do_serror() or do_debug_exception() has returned, we no longer need to mask exceptions, and can unmask them all, which is what we did prior to commit: 9034f6251572a474 ("arm64: Do not enable IRQs for ct_user_exit") ... where we had to mask IRQs as for context_tracking_user_exit() expected IRQs to be masked. Since then, we realised that our context tracking wasn't entirely correct, and reworked the entry code to fix this. As of commit: 23529049c6842382 ("arm64: entry: fix non-NMI user<->kernel transitions") ... we replaced the call to context_tracking_user_exit() with a call to user_exit_irqoff() as part of enter_from_user_mode(), which occurs earlier, before we run the body of the handler and unmask exceptions in DAIF. When we return to userspace, we go via ret_to_user(), which masks exceptions in DAIF prior to calling user_enter_irqoff() as part of exit_to_user_mode(). Thus, there's no longer a reason to leave IRQs or FIQs masked at the end of the EL0 debug or error handlers, as neither the user exit context tracking nor the user entry context tracking requires this. Let's bring these into line with other EL0 exception handlers and ensure that IRQ and FIQ are unmasked in DAIF at some point during the handler. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Acked-by: Catalin Marinas <catalin.marinas@arm.com> Acked-by: Marc Zyngier <maz@kernel.org> Reviewed-by: Joey Gouly <joey.gouly@arm.com> Cc: James Morse <james.morse@arm.com> Cc: Will Deacon <will@kernel.org> Link: https://lore.kernel.org/r/20210607094624.34689-3-mark.rutland@arm.com Signed-off-by: Will Deacon <will@kernel.org>
2021-06-07 09:46:06 +00:00
local_daif_restore(DAIF_PROCCTX);
arm64: entry: call exit_to_user_mode() from C When handling an exception from EL0, we perform the entry work in that exception's C handler, and once the C handler has finished, we return back to the entry assembly. Subsequently in the common `ret_to_user` assembly we perform the exit work that balances with the entry work. This can be somewhat difficult to follow, and makes it hard to rework the return paths (e.g. to pass additional context to the exit code, or to have exception return logic for specific exceptions). This patch reworks the entry code such that each EL0 C exception handler is responsible for both the entry and exit work. This clearly balances the two (and will permit additional variation in future), and avoids an unnecessary bounce between assembly and C in the common case, leaving `ret_from_fork` as the only place assembly has to call the exit code. This means that the exit work is now inlined into the C handler, which is already the case for the entry work, and allows the compiler to generate better code (e.g. by immediately returning when there is no exit work to perform). To align with other exception entry/exit helpers, enter_from_user_mode() is updated to take the EL0 pt_regs as a parameter, though this is currently unused. There should be no functional change as a result of this patch. However, this should lead to slightly better backtraces when an error is encountered within do_notify_resume(), as the C handler should appear in the backtrace, indicating the specific exception that the kernel was entered with. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Cc: James Morse <james.morse@arm.com> Cc: Joey Gouly <joey.gouly@arm.com> Cc: Marc Zyngier <maz@kernel.org> Cc: Will Deacon <will@kernel.org> Reviewed-by: Joey Gouly <joey.gouly@arm.com> Link: https://lore.kernel.org/r/20210802140733.52716-5-mark.rutland@arm.com Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2021-08-02 14:07:33 +00:00
exit_to_user_mode(regs);
}
static void noinstr el0_svc(struct pt_regs *regs)
{
arm64: entry: call exit_to_user_mode() from C When handling an exception from EL0, we perform the entry work in that exception's C handler, and once the C handler has finished, we return back to the entry assembly. Subsequently in the common `ret_to_user` assembly we perform the exit work that balances with the entry work. This can be somewhat difficult to follow, and makes it hard to rework the return paths (e.g. to pass additional context to the exit code, or to have exception return logic for specific exceptions). This patch reworks the entry code such that each EL0 C exception handler is responsible for both the entry and exit work. This clearly balances the two (and will permit additional variation in future), and avoids an unnecessary bounce between assembly and C in the common case, leaving `ret_from_fork` as the only place assembly has to call the exit code. This means that the exit work is now inlined into the C handler, which is already the case for the entry work, and allows the compiler to generate better code (e.g. by immediately returning when there is no exit work to perform). To align with other exception entry/exit helpers, enter_from_user_mode() is updated to take the EL0 pt_regs as a parameter, though this is currently unused. There should be no functional change as a result of this patch. However, this should lead to slightly better backtraces when an error is encountered within do_notify_resume(), as the C handler should appear in the backtrace, indicating the specific exception that the kernel was entered with. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Cc: James Morse <james.morse@arm.com> Cc: Joey Gouly <joey.gouly@arm.com> Cc: Marc Zyngier <maz@kernel.org> Cc: Will Deacon <will@kernel.org> Reviewed-by: Joey Gouly <joey.gouly@arm.com> Link: https://lore.kernel.org/r/20210802140733.52716-5-mark.rutland@arm.com Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2021-08-02 14:07:33 +00:00
enter_from_user_mode(regs);
2021-02-02 12:03:41 +00:00
cortex_a76_erratum_1463225_svc_handler();
arm64: syscall: unmask DAIF earlier for SVCs For a number of historical reasons, when handling SVCs we don't unmask DAIF in el0_svc() or el0_svc_compat(), and instead do so later in el0_svc_common(). This is unfortunate and makes it harder to make changes to the DAIF management in entry-common.c as we'd like to do as cleanup and preparation for FEAT_NMI support. We can move the DAIF unmasking to entry-common.c as long as we also hoist the fp_user_discard() logic, as reasoned below. We converted the syscall trace logic from assembly to C in commit: f37099b6992a0b81 ("arm64: convert syscall trace logic to C") ... which was intended to have no functional change, and mirrored the existing assembly logic to avoid the risk of any functional regression. With the logic in C, it's clear that there is currently no reason to unmask DAIF so late within el0_svc_common(): * The thread flags are read prior to unmasking DAIF, but are not consumed until after DAIF is unmasked, and we don't perform a read-modify-write sequence of the thread flags for which we might need to serialize against an IPI modifying the flags. Similarly, for any thread flags set by other threads, whether DAIF is masked or not has no impact. The read_thread_flags() helpers performs a single-copy-atomic read of the flags, and so this can safely be moved after unmasking DAIF. * The pt_regs::orig_x0 and pt_regs::syscallno fields are neither consumed nor modified by the handler for any DAIF exception (e.g. these do not exist in the `perf_event_arm_regs` enum and are not sampled by perf in its IRQ handler). Thus, the manipulation of pt_regs::orig_x0 and pt_regs::syscallno can safely be moved after unmasking DAIF. Given the above, we can safely hoist unmasking of DAIF out of el0_svc_common(), and into its immediate callers: do_el0_svc() and do_el0_svc_compat(). Further: * In do_el0_svc(), we sample the syscall number from pt_regs::regs[8]. This is not modified by the handler for any DAIF exception, and thus can safely be moved after unmasking DAIF. As fp_user_discard() operates on the live FP/SVE/SME register state, this needs to occur before we clear DAIF.IF, as interrupts could result in preemption which would cause this state to become foreign. As fp_user_discard() is the first function called within do_el0_svc(), it has no dependency on other parts of do_el0_svc() and can be moved earlier so long as it is called prior to unmasking DAIF.IF. * In do_el0_svc_compat(), we sample the syscall number from pt_regs::regs[7]. This is not modified by the handler for any DAIF exception, and thus can safely be moved after unmasking DAIF. Compat threads cannot use SVE or SME, so there's no need for el0_svc_compat() to call fp_user_discard(). Given the above, we can safely hoist the unmasking of DAIF out of do_el0_svc() and do_el0_svc_compat(), and into their immediate callers: el0_svc() and el0_svc_compat(), so long a we also hoist fp_user_discard() into el0_svc(). Signed-off-by: Mark Rutland <mark.rutland@arm.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Marc Zyngier <maz@kernel.org> Cc: Mark Brown <broonie@kernel.org> Cc: Will Deacon <will@kernel.org> Reviewed-by: Mark Brown <broonie@kernel.org> Link: https://lore.kernel.org/r/20230808101148.1064172-1-mark.rutland@arm.com Signed-off-by: Will Deacon <will@kernel.org>
2023-08-08 10:11:48 +00:00
fp_user_discard();
local_daif_restore(DAIF_PROCCTX);
do_el0_svc(regs);
arm64: entry: call exit_to_user_mode() from C When handling an exception from EL0, we perform the entry work in that exception's C handler, and once the C handler has finished, we return back to the entry assembly. Subsequently in the common `ret_to_user` assembly we perform the exit work that balances with the entry work. This can be somewhat difficult to follow, and makes it hard to rework the return paths (e.g. to pass additional context to the exit code, or to have exception return logic for specific exceptions). This patch reworks the entry code such that each EL0 C exception handler is responsible for both the entry and exit work. This clearly balances the two (and will permit additional variation in future), and avoids an unnecessary bounce between assembly and C in the common case, leaving `ret_from_fork` as the only place assembly has to call the exit code. This means that the exit work is now inlined into the C handler, which is already the case for the entry work, and allows the compiler to generate better code (e.g. by immediately returning when there is no exit work to perform). To align with other exception entry/exit helpers, enter_from_user_mode() is updated to take the EL0 pt_regs as a parameter, though this is currently unused. There should be no functional change as a result of this patch. However, this should lead to slightly better backtraces when an error is encountered within do_notify_resume(), as the C handler should appear in the backtrace, indicating the specific exception that the kernel was entered with. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Cc: James Morse <james.morse@arm.com> Cc: Joey Gouly <joey.gouly@arm.com> Cc: Marc Zyngier <maz@kernel.org> Cc: Will Deacon <will@kernel.org> Reviewed-by: Joey Gouly <joey.gouly@arm.com> Link: https://lore.kernel.org/r/20210802140733.52716-5-mark.rutland@arm.com Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2021-08-02 14:07:33 +00:00
exit_to_user_mode(regs);
}
static void noinstr el0_fpac(struct pt_regs *regs, unsigned long esr)
{
arm64: entry: call exit_to_user_mode() from C When handling an exception from EL0, we perform the entry work in that exception's C handler, and once the C handler has finished, we return back to the entry assembly. Subsequently in the common `ret_to_user` assembly we perform the exit work that balances with the entry work. This can be somewhat difficult to follow, and makes it hard to rework the return paths (e.g. to pass additional context to the exit code, or to have exception return logic for specific exceptions). This patch reworks the entry code such that each EL0 C exception handler is responsible for both the entry and exit work. This clearly balances the two (and will permit additional variation in future), and avoids an unnecessary bounce between assembly and C in the common case, leaving `ret_from_fork` as the only place assembly has to call the exit code. This means that the exit work is now inlined into the C handler, which is already the case for the entry work, and allows the compiler to generate better code (e.g. by immediately returning when there is no exit work to perform). To align with other exception entry/exit helpers, enter_from_user_mode() is updated to take the EL0 pt_regs as a parameter, though this is currently unused. There should be no functional change as a result of this patch. However, this should lead to slightly better backtraces when an error is encountered within do_notify_resume(), as the C handler should appear in the backtrace, indicating the specific exception that the kernel was entered with. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Cc: James Morse <james.morse@arm.com> Cc: Joey Gouly <joey.gouly@arm.com> Cc: Marc Zyngier <maz@kernel.org> Cc: Will Deacon <will@kernel.org> Reviewed-by: Joey Gouly <joey.gouly@arm.com> Link: https://lore.kernel.org/r/20210802140733.52716-5-mark.rutland@arm.com Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2021-08-02 14:07:33 +00:00
enter_from_user_mode(regs);
local_daif_restore(DAIF_PROCCTX);
arm64: rework FPAC exception handling If an FPAC exception is taken from EL1, the entry code will call do_ptrauth_fault(), where due to: BUG_ON(!user_mode(regs)) ... the kernel will report a problem within do_ptrauth_fault() rather than reporting the original context the FPAC exception was taken from. The pt_regs and ESR value reported will be from within do_ptrauth_fault() and the code dump will be for the BRK in BUG_ON(), which isn't sufficient to debug the cause of the original exception. This patch makes the reporting better by having separate EL0 and EL1 FPAC exception handlers, with the latter calling die() directly to report the original context the FPAC exception was taken from. Note that we only need to prevent kprobes of the EL1 FPAC handler, since the EL0 FPAC handler cannot be called recursively. For consistency with do_el0_svc*(), I've named the split functions do_el{0,1}_fpac() rather than do_el{0,1}_ptrauth_fault(). I've also clarified the comment to not imply there are casues other than FPAC exceptions. Prior to this patch FPAC exceptions are reported as: | kernel BUG at arch/arm64/kernel/traps.c:517! | Internal error: Oops - BUG: 00000000f2000800 [#1] PREEMPT SMP | Modules linked in: | CPU: 0 PID: 1 Comm: swapper/0 Not tainted 5.19.0-rc3-00130-g9c8a180a1cdf-dirty #12 | Hardware name: FVP Base RevC (DT) | pstate: 00400009 (nzcv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--) | pc : do_ptrauth_fault+0x3c/0x40 | lr : el1_fpac+0x34/0x54 | sp : ffff80000a3bbc80 | x29: ffff80000a3bbc80 x28: ffff0008001d8000 x27: 0000000000000000 | x26: 0000000000000000 x25: 0000000000000000 x24: 0000000000000000 | x23: 0000000020400009 x22: ffff800008f70fa4 x21: ffff80000a3bbe00 | x20: 0000000072000000 x19: ffff80000a3bbcb0 x18: fffffbfffda37000 | x17: 3120676e696d7573 x16: 7361202c6e6f6974 x15: 0000000081a90000 | x14: 0040000000000041 x13: 0040000000000001 x12: ffff000001a90000 | x11: fffffbfffda37480 x10: 0068000000000703 x9 : 0001000080000000 | x8 : 0000000000090000 x7 : 0068000000000f03 x6 : 0060000000000783 | x5 : ffff80000a3bbcb0 x4 : ffff0008001d8000 x3 : 0000000072000000 | x2 : 0000000000000000 x1 : 0000000020400009 x0 : ffff80000a3bbcb0 | Call trace: | do_ptrauth_fault+0x3c/0x40 | el1h_64_sync_handler+0xc4/0xd0 | el1h_64_sync+0x64/0x68 | test_pac+0x8/0x10 | smp_init+0x7c/0x8c | kernel_init_freeable+0x128/0x28c | kernel_init+0x28/0x13c | ret_from_fork+0x10/0x20 | Code: 97fffe5e a8c17bfd d50323bf d65f03c0 (d4210000) With this patch applied FPAC exceptions are reported as: | Internal error: Oops - FPAC: 0000000072000000 [#1] PREEMPT SMP | Modules linked in: | CPU: 0 PID: 1 Comm: swapper/0 Not tainted 5.19.0-rc3-00132-g78846e1c4757-dirty #11 | Hardware name: FVP Base RevC (DT) | pstate: 20400009 (nzCv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--) | pc : test_pac+0x8/0x10 | lr : 0x0 | sp : ffff80000a3bbe00 | x29: ffff80000a3bbe00 x28: 0000000000000000 x27: 0000000000000000 | x26: 0000000000000000 x25: 0000000000000000 x24: 0000000000000000 | x23: ffff80000a2c8000 x22: 0000000000000000 x21: 0000000000000000 | x20: ffff8000099fa5b0 x19: ffff80000a007000 x18: fffffbfffda37000 | x17: 3120676e696d7573 x16: 7361202c6e6f6974 x15: 0000000081a90000 | x14: 0040000000000041 x13: 0040000000000001 x12: ffff000001a90000 | x11: fffffbfffda37480 x10: 0068000000000703 x9 : 0001000080000000 | x8 : 0000000000090000 x7 : 0068000000000f03 x6 : 0060000000000783 | x5 : ffff80000a2c6000 x4 : ffff0008001d8000 x3 : ffff800009f88378 | x2 : 0000000000000000 x1 : 0000000080210000 x0 : ffff000001a90000 | Call trace: | test_pac+0x8/0x10 | smp_init+0x7c/0x8c | kernel_init_freeable+0x128/0x28c | kernel_init+0x28/0x13c | ret_from_fork+0x10/0x20 | Code: d50323bf d65f03c0 d503233f aa1f03fe (d50323bf) Signed-off-by: Mark Rutland <mark.rutland@arm.com> Reviewed-by: Mark Brown <broonie@kernel.org> Reviewed-by: Anshuman Khandual <anshuman.khandual@arm.com> Cc: Alexandru Elisei <alexandru.elisei@arm.com> Cc: Amit Daniel Kachhap <amit.kachhap@arm.com> Cc: James Morse <james.morse@arm.com> Cc: Will Deacon <will@kernel.org> Link: https://lore.kernel.org/r/20220913101732.3925290-5-mark.rutland@arm.com Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2022-09-13 10:17:31 +00:00
do_el0_fpac(regs, esr);
arm64: entry: call exit_to_user_mode() from C When handling an exception from EL0, we perform the entry work in that exception's C handler, and once the C handler has finished, we return back to the entry assembly. Subsequently in the common `ret_to_user` assembly we perform the exit work that balances with the entry work. This can be somewhat difficult to follow, and makes it hard to rework the return paths (e.g. to pass additional context to the exit code, or to have exception return logic for specific exceptions). This patch reworks the entry code such that each EL0 C exception handler is responsible for both the entry and exit work. This clearly balances the two (and will permit additional variation in future), and avoids an unnecessary bounce between assembly and C in the common case, leaving `ret_from_fork` as the only place assembly has to call the exit code. This means that the exit work is now inlined into the C handler, which is already the case for the entry work, and allows the compiler to generate better code (e.g. by immediately returning when there is no exit work to perform). To align with other exception entry/exit helpers, enter_from_user_mode() is updated to take the EL0 pt_regs as a parameter, though this is currently unused. There should be no functional change as a result of this patch. However, this should lead to slightly better backtraces when an error is encountered within do_notify_resume(), as the C handler should appear in the backtrace, indicating the specific exception that the kernel was entered with. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Cc: James Morse <james.morse@arm.com> Cc: Joey Gouly <joey.gouly@arm.com> Cc: Marc Zyngier <maz@kernel.org> Cc: Will Deacon <will@kernel.org> Reviewed-by: Joey Gouly <joey.gouly@arm.com> Link: https://lore.kernel.org/r/20210802140733.52716-5-mark.rutland@arm.com Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2021-08-02 14:07:33 +00:00
exit_to_user_mode(regs);
}
arm64: entry: handle all vectors with C We have 16 architectural exception vectors, and depending on kernel configuration we handle 8 or 12 of these with C code, with the remaining 8 or 4 of these handled as special cases in the entry assembly. It would be nicer if the entry assembly were uniform for all exceptions, and we deferred any specific handling of the exceptions to C code. This way the entry assembly can be more easily templated without ifdeffery or special cases, and it's easier to modify the handling of these cases in future (e.g. to dump additional registers other context). This patch reworks the entry code so that we always have a C handler for every architectural exception vector, with the entry assembly being completely uniform. We now have to handle exceptions from EL1t and EL1h, and also have to handle exceptions from AArch32 even when the kernel is built without CONFIG_COMPAT. To make this clear and to simplify templating, we rename the top-level exception handlers with a consistent naming scheme: asm: <el+sp>_<regsize>_<type> c: <el+sp>_<regsize>_<type>_handler .. where: <el+sp> is `el1t`, `el1h`, or `el0t` <regsize> is `64` or `32` <type> is `sync`, `irq`, `fiq`, or `error` ... e.g. asm: el1h_64_sync c: el1h_64_sync_handler ... with lower-level handlers simply using "el1" and "compat" as today. For unexpected exceptions, this information is passed to __panic_unhandled(), so it can report the specific vector an unexpected exception was taken from, e.g. | Unhandled 64-bit el1t sync exception For vectors we never expect to enter legitimately, the C code is generated using a macro to avoid code duplication. The exceptions are handled via __panic_unhandled(), replacing bad_mode() (which is removed). The `kernel_ventry` and `entry_handler` assembly macros are updated to handle the new naming scheme. In theory it should be possible to generate the entry functions at the same time as the vectors using a single table, but this will require reworking the linker script to split the two into separate sections, so for now we have separate tables. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Acked-by: Catalin Marinas <catalin.marinas@arm.com> Acked-by: Marc Zyngier <maz@kernel.org> Reviewed-by: Joey Gouly <joey.gouly@arm.com> Cc: James Morse <james.morse@arm.com> Cc: Will Deacon <will@kernel.org> Link: https://lore.kernel.org/r/20210607094624.34689-15-mark.rutland@arm.com Signed-off-by: Will Deacon <will@kernel.org>
2021-06-07 09:46:18 +00:00
asmlinkage void noinstr el0t_64_sync_handler(struct pt_regs *regs)
{
unsigned long esr = read_sysreg(esr_el1);
switch (ESR_ELx_EC(esr)) {
case ESR_ELx_EC_SVC64:
el0_svc(regs);
break;
case ESR_ELx_EC_DABT_LOW:
el0_da(regs, esr);
break;
case ESR_ELx_EC_IABT_LOW:
el0_ia(regs, esr);
break;
case ESR_ELx_EC_FP_ASIMD:
el0_fpsimd_acc(regs, esr);
break;
case ESR_ELx_EC_SVE:
el0_sve_acc(regs, esr);
break;
case ESR_ELx_EC_SME:
el0_sme_acc(regs, esr);
break;
case ESR_ELx_EC_FP_EXC64:
el0_fpsimd_exc(regs, esr);
break;
case ESR_ELx_EC_SYS64:
case ESR_ELx_EC_WFx:
el0_sys(regs, esr);
break;
case ESR_ELx_EC_SP_ALIGN:
el0_sp(regs, esr);
break;
case ESR_ELx_EC_PC_ALIGN:
el0_pc(regs, esr);
break;
case ESR_ELx_EC_UNKNOWN:
el0_undef(regs, esr);
break;
case ESR_ELx_EC_BTI:
el0_bti(regs);
break;
case ESR_ELx_EC_MOPS:
el0_mops(regs, esr);
break;
case ESR_ELx_EC_BREAKPT_LOW:
case ESR_ELx_EC_SOFTSTP_LOW:
case ESR_ELx_EC_WATCHPT_LOW:
case ESR_ELx_EC_BRK64:
el0_dbg(regs, esr);
break;
case ESR_ELx_EC_FPAC:
el0_fpac(regs, esr);
break;
default:
el0_inv(regs, esr);
}
}
static void noinstr el0_interrupt(struct pt_regs *regs,
void (*handler)(struct pt_regs *))
{
arm64: entry: call exit_to_user_mode() from C When handling an exception from EL0, we perform the entry work in that exception's C handler, and once the C handler has finished, we return back to the entry assembly. Subsequently in the common `ret_to_user` assembly we perform the exit work that balances with the entry work. This can be somewhat difficult to follow, and makes it hard to rework the return paths (e.g. to pass additional context to the exit code, or to have exception return logic for specific exceptions). This patch reworks the entry code such that each EL0 C exception handler is responsible for both the entry and exit work. This clearly balances the two (and will permit additional variation in future), and avoids an unnecessary bounce between assembly and C in the common case, leaving `ret_from_fork` as the only place assembly has to call the exit code. This means that the exit work is now inlined into the C handler, which is already the case for the entry work, and allows the compiler to generate better code (e.g. by immediately returning when there is no exit work to perform). To align with other exception entry/exit helpers, enter_from_user_mode() is updated to take the EL0 pt_regs as a parameter, though this is currently unused. There should be no functional change as a result of this patch. However, this should lead to slightly better backtraces when an error is encountered within do_notify_resume(), as the C handler should appear in the backtrace, indicating the specific exception that the kernel was entered with. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Cc: James Morse <james.morse@arm.com> Cc: Joey Gouly <joey.gouly@arm.com> Cc: Marc Zyngier <maz@kernel.org> Cc: Will Deacon <will@kernel.org> Reviewed-by: Joey Gouly <joey.gouly@arm.com> Link: https://lore.kernel.org/r/20210802140733.52716-5-mark.rutland@arm.com Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2021-08-02 14:07:33 +00:00
enter_from_user_mode(regs);
write_sysreg(DAIF_PROCCTX_NOIRQ, daif);
if (regs->pc & BIT(55))
arm64_apply_bp_hardening();
irq: arm64: perform irqentry in entry code In preparation for removing HANDLE_DOMAIN_IRQ_IRQENTRY, have arch/arm64 perform all the irqentry accounting in its entry code. As arch/arm64 already performs portions of the irqentry logic in enter_from_kernel_mode() and exit_to_kernel_mode(), including rcu_irq_{enter,exit}(), the only additional calls that need to be made are to irq_{enter,exit}_rcu(). Removing the calls to rcu_irq_{enter,exit}() from handle_domain_irq() ensures that we inform RCU once per IRQ entry and will correctly identify quiescent periods. Since we should not call irq_{enter,exit}_rcu() when entering a pseudo-NMI, el1_interrupt() is reworked to have separate __el1_irq() and __el1_pnmi() paths for regular IRQ and psuedo-NMI entry, with irq_{enter,exit}_irq() only called for the former. In preparation for removing HANDLE_DOMAIN_IRQ, the irq regs are managed in do_interrupt_handler() for both regular IRQ and pseudo-NMI. This is currently redundant, but not harmful. For clarity the preemption logic is moved into __el1_irq(). We should never preempt within a pseudo-NMI, and arm64_enter_nmi() already enforces this by incrementing the preempt_count, but it's clearer if we never invoke the preemption logic when entering a pseudo-NMI. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Reviewed-by: Pingfan Liu <kernelfans@gmail.com> Reviewed-by: Marc Zyngier <maz@kernel.org> Acked-by: Catalin Marinas <catalin.marinas@arm.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Will Deacon <will@kernel.org>
2021-10-19 17:28:39 +00:00
irq_enter_rcu();
do_interrupt_handler(regs, handler);
irq: arm64: perform irqentry in entry code In preparation for removing HANDLE_DOMAIN_IRQ_IRQENTRY, have arch/arm64 perform all the irqentry accounting in its entry code. As arch/arm64 already performs portions of the irqentry logic in enter_from_kernel_mode() and exit_to_kernel_mode(), including rcu_irq_{enter,exit}(), the only additional calls that need to be made are to irq_{enter,exit}_rcu(). Removing the calls to rcu_irq_{enter,exit}() from handle_domain_irq() ensures that we inform RCU once per IRQ entry and will correctly identify quiescent periods. Since we should not call irq_{enter,exit}_rcu() when entering a pseudo-NMI, el1_interrupt() is reworked to have separate __el1_irq() and __el1_pnmi() paths for regular IRQ and psuedo-NMI entry, with irq_{enter,exit}_irq() only called for the former. In preparation for removing HANDLE_DOMAIN_IRQ, the irq regs are managed in do_interrupt_handler() for both regular IRQ and pseudo-NMI. This is currently redundant, but not harmful. For clarity the preemption logic is moved into __el1_irq(). We should never preempt within a pseudo-NMI, and arm64_enter_nmi() already enforces this by incrementing the preempt_count, but it's clearer if we never invoke the preemption logic when entering a pseudo-NMI. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Reviewed-by: Pingfan Liu <kernelfans@gmail.com> Reviewed-by: Marc Zyngier <maz@kernel.org> Acked-by: Catalin Marinas <catalin.marinas@arm.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Will Deacon <will@kernel.org>
2021-10-19 17:28:39 +00:00
irq_exit_rcu();
arm64: entry: call exit_to_user_mode() from C When handling an exception from EL0, we perform the entry work in that exception's C handler, and once the C handler has finished, we return back to the entry assembly. Subsequently in the common `ret_to_user` assembly we perform the exit work that balances with the entry work. This can be somewhat difficult to follow, and makes it hard to rework the return paths (e.g. to pass additional context to the exit code, or to have exception return logic for specific exceptions). This patch reworks the entry code such that each EL0 C exception handler is responsible for both the entry and exit work. This clearly balances the two (and will permit additional variation in future), and avoids an unnecessary bounce between assembly and C in the common case, leaving `ret_from_fork` as the only place assembly has to call the exit code. This means that the exit work is now inlined into the C handler, which is already the case for the entry work, and allows the compiler to generate better code (e.g. by immediately returning when there is no exit work to perform). To align with other exception entry/exit helpers, enter_from_user_mode() is updated to take the EL0 pt_regs as a parameter, though this is currently unused. There should be no functional change as a result of this patch. However, this should lead to slightly better backtraces when an error is encountered within do_notify_resume(), as the C handler should appear in the backtrace, indicating the specific exception that the kernel was entered with. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Cc: James Morse <james.morse@arm.com> Cc: Joey Gouly <joey.gouly@arm.com> Cc: Marc Zyngier <maz@kernel.org> Cc: Will Deacon <will@kernel.org> Reviewed-by: Joey Gouly <joey.gouly@arm.com> Link: https://lore.kernel.org/r/20210802140733.52716-5-mark.rutland@arm.com Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2021-08-02 14:07:33 +00:00
exit_to_user_mode(regs);
}
static void noinstr __el0_irq_handler_common(struct pt_regs *regs)
{
el0_interrupt(regs, handle_arch_irq);
}
arm64: entry: handle all vectors with C We have 16 architectural exception vectors, and depending on kernel configuration we handle 8 or 12 of these with C code, with the remaining 8 or 4 of these handled as special cases in the entry assembly. It would be nicer if the entry assembly were uniform for all exceptions, and we deferred any specific handling of the exceptions to C code. This way the entry assembly can be more easily templated without ifdeffery or special cases, and it's easier to modify the handling of these cases in future (e.g. to dump additional registers other context). This patch reworks the entry code so that we always have a C handler for every architectural exception vector, with the entry assembly being completely uniform. We now have to handle exceptions from EL1t and EL1h, and also have to handle exceptions from AArch32 even when the kernel is built without CONFIG_COMPAT. To make this clear and to simplify templating, we rename the top-level exception handlers with a consistent naming scheme: asm: <el+sp>_<regsize>_<type> c: <el+sp>_<regsize>_<type>_handler .. where: <el+sp> is `el1t`, `el1h`, or `el0t` <regsize> is `64` or `32` <type> is `sync`, `irq`, `fiq`, or `error` ... e.g. asm: el1h_64_sync c: el1h_64_sync_handler ... with lower-level handlers simply using "el1" and "compat" as today. For unexpected exceptions, this information is passed to __panic_unhandled(), so it can report the specific vector an unexpected exception was taken from, e.g. | Unhandled 64-bit el1t sync exception For vectors we never expect to enter legitimately, the C code is generated using a macro to avoid code duplication. The exceptions are handled via __panic_unhandled(), replacing bad_mode() (which is removed). The `kernel_ventry` and `entry_handler` assembly macros are updated to handle the new naming scheme. In theory it should be possible to generate the entry functions at the same time as the vectors using a single table, but this will require reworking the linker script to split the two into separate sections, so for now we have separate tables. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Acked-by: Catalin Marinas <catalin.marinas@arm.com> Acked-by: Marc Zyngier <maz@kernel.org> Reviewed-by: Joey Gouly <joey.gouly@arm.com> Cc: James Morse <james.morse@arm.com> Cc: Will Deacon <will@kernel.org> Link: https://lore.kernel.org/r/20210607094624.34689-15-mark.rutland@arm.com Signed-off-by: Will Deacon <will@kernel.org>
2021-06-07 09:46:18 +00:00
asmlinkage void noinstr el0t_64_irq_handler(struct pt_regs *regs)
{
__el0_irq_handler_common(regs);
}
static void noinstr __el0_fiq_handler_common(struct pt_regs *regs)
{
el0_interrupt(regs, handle_arch_fiq);
}
arm64: entry: handle all vectors with C We have 16 architectural exception vectors, and depending on kernel configuration we handle 8 or 12 of these with C code, with the remaining 8 or 4 of these handled as special cases in the entry assembly. It would be nicer if the entry assembly were uniform for all exceptions, and we deferred any specific handling of the exceptions to C code. This way the entry assembly can be more easily templated without ifdeffery or special cases, and it's easier to modify the handling of these cases in future (e.g. to dump additional registers other context). This patch reworks the entry code so that we always have a C handler for every architectural exception vector, with the entry assembly being completely uniform. We now have to handle exceptions from EL1t and EL1h, and also have to handle exceptions from AArch32 even when the kernel is built without CONFIG_COMPAT. To make this clear and to simplify templating, we rename the top-level exception handlers with a consistent naming scheme: asm: <el+sp>_<regsize>_<type> c: <el+sp>_<regsize>_<type>_handler .. where: <el+sp> is `el1t`, `el1h`, or `el0t` <regsize> is `64` or `32` <type> is `sync`, `irq`, `fiq`, or `error` ... e.g. asm: el1h_64_sync c: el1h_64_sync_handler ... with lower-level handlers simply using "el1" and "compat" as today. For unexpected exceptions, this information is passed to __panic_unhandled(), so it can report the specific vector an unexpected exception was taken from, e.g. | Unhandled 64-bit el1t sync exception For vectors we never expect to enter legitimately, the C code is generated using a macro to avoid code duplication. The exceptions are handled via __panic_unhandled(), replacing bad_mode() (which is removed). The `kernel_ventry` and `entry_handler` assembly macros are updated to handle the new naming scheme. In theory it should be possible to generate the entry functions at the same time as the vectors using a single table, but this will require reworking the linker script to split the two into separate sections, so for now we have separate tables. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Acked-by: Catalin Marinas <catalin.marinas@arm.com> Acked-by: Marc Zyngier <maz@kernel.org> Reviewed-by: Joey Gouly <joey.gouly@arm.com> Cc: James Morse <james.morse@arm.com> Cc: Will Deacon <will@kernel.org> Link: https://lore.kernel.org/r/20210607094624.34689-15-mark.rutland@arm.com Signed-off-by: Will Deacon <will@kernel.org>
2021-06-07 09:46:18 +00:00
asmlinkage void noinstr el0t_64_fiq_handler(struct pt_regs *regs)
{
__el0_fiq_handler_common(regs);
}
static void noinstr __el0_error_handler_common(struct pt_regs *regs)
{
unsigned long esr = read_sysreg(esr_el1);
arm64: entry: call exit_to_user_mode() from C When handling an exception from EL0, we perform the entry work in that exception's C handler, and once the C handler has finished, we return back to the entry assembly. Subsequently in the common `ret_to_user` assembly we perform the exit work that balances with the entry work. This can be somewhat difficult to follow, and makes it hard to rework the return paths (e.g. to pass additional context to the exit code, or to have exception return logic for specific exceptions). This patch reworks the entry code such that each EL0 C exception handler is responsible for both the entry and exit work. This clearly balances the two (and will permit additional variation in future), and avoids an unnecessary bounce between assembly and C in the common case, leaving `ret_from_fork` as the only place assembly has to call the exit code. This means that the exit work is now inlined into the C handler, which is already the case for the entry work, and allows the compiler to generate better code (e.g. by immediately returning when there is no exit work to perform). To align with other exception entry/exit helpers, enter_from_user_mode() is updated to take the EL0 pt_regs as a parameter, though this is currently unused. There should be no functional change as a result of this patch. However, this should lead to slightly better backtraces when an error is encountered within do_notify_resume(), as the C handler should appear in the backtrace, indicating the specific exception that the kernel was entered with. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Cc: James Morse <james.morse@arm.com> Cc: Joey Gouly <joey.gouly@arm.com> Cc: Marc Zyngier <maz@kernel.org> Cc: Will Deacon <will@kernel.org> Reviewed-by: Joey Gouly <joey.gouly@arm.com> Link: https://lore.kernel.org/r/20210802140733.52716-5-mark.rutland@arm.com Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2021-08-02 14:07:33 +00:00
enter_from_user_mode(regs);
local_daif_restore(DAIF_ERRCTX);
arm64_enter_nmi(regs);
do_serror(regs, esr);
arm64_exit_nmi(regs);
local_daif_restore(DAIF_PROCCTX);
arm64: entry: call exit_to_user_mode() from C When handling an exception from EL0, we perform the entry work in that exception's C handler, and once the C handler has finished, we return back to the entry assembly. Subsequently in the common `ret_to_user` assembly we perform the exit work that balances with the entry work. This can be somewhat difficult to follow, and makes it hard to rework the return paths (e.g. to pass additional context to the exit code, or to have exception return logic for specific exceptions). This patch reworks the entry code such that each EL0 C exception handler is responsible for both the entry and exit work. This clearly balances the two (and will permit additional variation in future), and avoids an unnecessary bounce between assembly and C in the common case, leaving `ret_from_fork` as the only place assembly has to call the exit code. This means that the exit work is now inlined into the C handler, which is already the case for the entry work, and allows the compiler to generate better code (e.g. by immediately returning when there is no exit work to perform). To align with other exception entry/exit helpers, enter_from_user_mode() is updated to take the EL0 pt_regs as a parameter, though this is currently unused. There should be no functional change as a result of this patch. However, this should lead to slightly better backtraces when an error is encountered within do_notify_resume(), as the C handler should appear in the backtrace, indicating the specific exception that the kernel was entered with. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Cc: James Morse <james.morse@arm.com> Cc: Joey Gouly <joey.gouly@arm.com> Cc: Marc Zyngier <maz@kernel.org> Cc: Will Deacon <will@kernel.org> Reviewed-by: Joey Gouly <joey.gouly@arm.com> Link: https://lore.kernel.org/r/20210802140733.52716-5-mark.rutland@arm.com Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2021-08-02 14:07:33 +00:00
exit_to_user_mode(regs);
}
arm64: entry: handle all vectors with C We have 16 architectural exception vectors, and depending on kernel configuration we handle 8 or 12 of these with C code, with the remaining 8 or 4 of these handled as special cases in the entry assembly. It would be nicer if the entry assembly were uniform for all exceptions, and we deferred any specific handling of the exceptions to C code. This way the entry assembly can be more easily templated without ifdeffery or special cases, and it's easier to modify the handling of these cases in future (e.g. to dump additional registers other context). This patch reworks the entry code so that we always have a C handler for every architectural exception vector, with the entry assembly being completely uniform. We now have to handle exceptions from EL1t and EL1h, and also have to handle exceptions from AArch32 even when the kernel is built without CONFIG_COMPAT. To make this clear and to simplify templating, we rename the top-level exception handlers with a consistent naming scheme: asm: <el+sp>_<regsize>_<type> c: <el+sp>_<regsize>_<type>_handler .. where: <el+sp> is `el1t`, `el1h`, or `el0t` <regsize> is `64` or `32` <type> is `sync`, `irq`, `fiq`, or `error` ... e.g. asm: el1h_64_sync c: el1h_64_sync_handler ... with lower-level handlers simply using "el1" and "compat" as today. For unexpected exceptions, this information is passed to __panic_unhandled(), so it can report the specific vector an unexpected exception was taken from, e.g. | Unhandled 64-bit el1t sync exception For vectors we never expect to enter legitimately, the C code is generated using a macro to avoid code duplication. The exceptions are handled via __panic_unhandled(), replacing bad_mode() (which is removed). The `kernel_ventry` and `entry_handler` assembly macros are updated to handle the new naming scheme. In theory it should be possible to generate the entry functions at the same time as the vectors using a single table, but this will require reworking the linker script to split the two into separate sections, so for now we have separate tables. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Acked-by: Catalin Marinas <catalin.marinas@arm.com> Acked-by: Marc Zyngier <maz@kernel.org> Reviewed-by: Joey Gouly <joey.gouly@arm.com> Cc: James Morse <james.morse@arm.com> Cc: Will Deacon <will@kernel.org> Link: https://lore.kernel.org/r/20210607094624.34689-15-mark.rutland@arm.com Signed-off-by: Will Deacon <will@kernel.org>
2021-06-07 09:46:18 +00:00
asmlinkage void noinstr el0t_64_error_handler(struct pt_regs *regs)
{
__el0_error_handler_common(regs);
}
#ifdef CONFIG_COMPAT
static void noinstr el0_cp15(struct pt_regs *regs, unsigned long esr)
{
arm64: entry: call exit_to_user_mode() from C When handling an exception from EL0, we perform the entry work in that exception's C handler, and once the C handler has finished, we return back to the entry assembly. Subsequently in the common `ret_to_user` assembly we perform the exit work that balances with the entry work. This can be somewhat difficult to follow, and makes it hard to rework the return paths (e.g. to pass additional context to the exit code, or to have exception return logic for specific exceptions). This patch reworks the entry code such that each EL0 C exception handler is responsible for both the entry and exit work. This clearly balances the two (and will permit additional variation in future), and avoids an unnecessary bounce between assembly and C in the common case, leaving `ret_from_fork` as the only place assembly has to call the exit code. This means that the exit work is now inlined into the C handler, which is already the case for the entry work, and allows the compiler to generate better code (e.g. by immediately returning when there is no exit work to perform). To align with other exception entry/exit helpers, enter_from_user_mode() is updated to take the EL0 pt_regs as a parameter, though this is currently unused. There should be no functional change as a result of this patch. However, this should lead to slightly better backtraces when an error is encountered within do_notify_resume(), as the C handler should appear in the backtrace, indicating the specific exception that the kernel was entered with. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Cc: James Morse <james.morse@arm.com> Cc: Joey Gouly <joey.gouly@arm.com> Cc: Marc Zyngier <maz@kernel.org> Cc: Will Deacon <will@kernel.org> Reviewed-by: Joey Gouly <joey.gouly@arm.com> Link: https://lore.kernel.org/r/20210802140733.52716-5-mark.rutland@arm.com Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2021-08-02 14:07:33 +00:00
enter_from_user_mode(regs);
local_daif_restore(DAIF_PROCCTX);
do_el0_cp15(esr, regs);
arm64: entry: call exit_to_user_mode() from C When handling an exception from EL0, we perform the entry work in that exception's C handler, and once the C handler has finished, we return back to the entry assembly. Subsequently in the common `ret_to_user` assembly we perform the exit work that balances with the entry work. This can be somewhat difficult to follow, and makes it hard to rework the return paths (e.g. to pass additional context to the exit code, or to have exception return logic for specific exceptions). This patch reworks the entry code such that each EL0 C exception handler is responsible for both the entry and exit work. This clearly balances the two (and will permit additional variation in future), and avoids an unnecessary bounce between assembly and C in the common case, leaving `ret_from_fork` as the only place assembly has to call the exit code. This means that the exit work is now inlined into the C handler, which is already the case for the entry work, and allows the compiler to generate better code (e.g. by immediately returning when there is no exit work to perform). To align with other exception entry/exit helpers, enter_from_user_mode() is updated to take the EL0 pt_regs as a parameter, though this is currently unused. There should be no functional change as a result of this patch. However, this should lead to slightly better backtraces when an error is encountered within do_notify_resume(), as the C handler should appear in the backtrace, indicating the specific exception that the kernel was entered with. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Cc: James Morse <james.morse@arm.com> Cc: Joey Gouly <joey.gouly@arm.com> Cc: Marc Zyngier <maz@kernel.org> Cc: Will Deacon <will@kernel.org> Reviewed-by: Joey Gouly <joey.gouly@arm.com> Link: https://lore.kernel.org/r/20210802140733.52716-5-mark.rutland@arm.com Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2021-08-02 14:07:33 +00:00
exit_to_user_mode(regs);
}
static void noinstr el0_svc_compat(struct pt_regs *regs)
{
arm64: entry: call exit_to_user_mode() from C When handling an exception from EL0, we perform the entry work in that exception's C handler, and once the C handler has finished, we return back to the entry assembly. Subsequently in the common `ret_to_user` assembly we perform the exit work that balances with the entry work. This can be somewhat difficult to follow, and makes it hard to rework the return paths (e.g. to pass additional context to the exit code, or to have exception return logic for specific exceptions). This patch reworks the entry code such that each EL0 C exception handler is responsible for both the entry and exit work. This clearly balances the two (and will permit additional variation in future), and avoids an unnecessary bounce between assembly and C in the common case, leaving `ret_from_fork` as the only place assembly has to call the exit code. This means that the exit work is now inlined into the C handler, which is already the case for the entry work, and allows the compiler to generate better code (e.g. by immediately returning when there is no exit work to perform). To align with other exception entry/exit helpers, enter_from_user_mode() is updated to take the EL0 pt_regs as a parameter, though this is currently unused. There should be no functional change as a result of this patch. However, this should lead to slightly better backtraces when an error is encountered within do_notify_resume(), as the C handler should appear in the backtrace, indicating the specific exception that the kernel was entered with. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Cc: James Morse <james.morse@arm.com> Cc: Joey Gouly <joey.gouly@arm.com> Cc: Marc Zyngier <maz@kernel.org> Cc: Will Deacon <will@kernel.org> Reviewed-by: Joey Gouly <joey.gouly@arm.com> Link: https://lore.kernel.org/r/20210802140733.52716-5-mark.rutland@arm.com Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2021-08-02 14:07:33 +00:00
enter_from_user_mode(regs);
2021-02-02 12:03:41 +00:00
cortex_a76_erratum_1463225_svc_handler();
arm64: syscall: unmask DAIF earlier for SVCs For a number of historical reasons, when handling SVCs we don't unmask DAIF in el0_svc() or el0_svc_compat(), and instead do so later in el0_svc_common(). This is unfortunate and makes it harder to make changes to the DAIF management in entry-common.c as we'd like to do as cleanup and preparation for FEAT_NMI support. We can move the DAIF unmasking to entry-common.c as long as we also hoist the fp_user_discard() logic, as reasoned below. We converted the syscall trace logic from assembly to C in commit: f37099b6992a0b81 ("arm64: convert syscall trace logic to C") ... which was intended to have no functional change, and mirrored the existing assembly logic to avoid the risk of any functional regression. With the logic in C, it's clear that there is currently no reason to unmask DAIF so late within el0_svc_common(): * The thread flags are read prior to unmasking DAIF, but are not consumed until after DAIF is unmasked, and we don't perform a read-modify-write sequence of the thread flags for which we might need to serialize against an IPI modifying the flags. Similarly, for any thread flags set by other threads, whether DAIF is masked or not has no impact. The read_thread_flags() helpers performs a single-copy-atomic read of the flags, and so this can safely be moved after unmasking DAIF. * The pt_regs::orig_x0 and pt_regs::syscallno fields are neither consumed nor modified by the handler for any DAIF exception (e.g. these do not exist in the `perf_event_arm_regs` enum and are not sampled by perf in its IRQ handler). Thus, the manipulation of pt_regs::orig_x0 and pt_regs::syscallno can safely be moved after unmasking DAIF. Given the above, we can safely hoist unmasking of DAIF out of el0_svc_common(), and into its immediate callers: do_el0_svc() and do_el0_svc_compat(). Further: * In do_el0_svc(), we sample the syscall number from pt_regs::regs[8]. This is not modified by the handler for any DAIF exception, and thus can safely be moved after unmasking DAIF. As fp_user_discard() operates on the live FP/SVE/SME register state, this needs to occur before we clear DAIF.IF, as interrupts could result in preemption which would cause this state to become foreign. As fp_user_discard() is the first function called within do_el0_svc(), it has no dependency on other parts of do_el0_svc() and can be moved earlier so long as it is called prior to unmasking DAIF.IF. * In do_el0_svc_compat(), we sample the syscall number from pt_regs::regs[7]. This is not modified by the handler for any DAIF exception, and thus can safely be moved after unmasking DAIF. Compat threads cannot use SVE or SME, so there's no need for el0_svc_compat() to call fp_user_discard(). Given the above, we can safely hoist the unmasking of DAIF out of do_el0_svc() and do_el0_svc_compat(), and into their immediate callers: el0_svc() and el0_svc_compat(), so long a we also hoist fp_user_discard() into el0_svc(). Signed-off-by: Mark Rutland <mark.rutland@arm.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Marc Zyngier <maz@kernel.org> Cc: Mark Brown <broonie@kernel.org> Cc: Will Deacon <will@kernel.org> Reviewed-by: Mark Brown <broonie@kernel.org> Link: https://lore.kernel.org/r/20230808101148.1064172-1-mark.rutland@arm.com Signed-off-by: Will Deacon <will@kernel.org>
2023-08-08 10:11:48 +00:00
local_daif_restore(DAIF_PROCCTX);
do_el0_svc_compat(regs);
arm64: entry: call exit_to_user_mode() from C When handling an exception from EL0, we perform the entry work in that exception's C handler, and once the C handler has finished, we return back to the entry assembly. Subsequently in the common `ret_to_user` assembly we perform the exit work that balances with the entry work. This can be somewhat difficult to follow, and makes it hard to rework the return paths (e.g. to pass additional context to the exit code, or to have exception return logic for specific exceptions). This patch reworks the entry code such that each EL0 C exception handler is responsible for both the entry and exit work. This clearly balances the two (and will permit additional variation in future), and avoids an unnecessary bounce between assembly and C in the common case, leaving `ret_from_fork` as the only place assembly has to call the exit code. This means that the exit work is now inlined into the C handler, which is already the case for the entry work, and allows the compiler to generate better code (e.g. by immediately returning when there is no exit work to perform). To align with other exception entry/exit helpers, enter_from_user_mode() is updated to take the EL0 pt_regs as a parameter, though this is currently unused. There should be no functional change as a result of this patch. However, this should lead to slightly better backtraces when an error is encountered within do_notify_resume(), as the C handler should appear in the backtrace, indicating the specific exception that the kernel was entered with. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Cc: James Morse <james.morse@arm.com> Cc: Joey Gouly <joey.gouly@arm.com> Cc: Marc Zyngier <maz@kernel.org> Cc: Will Deacon <will@kernel.org> Reviewed-by: Joey Gouly <joey.gouly@arm.com> Link: https://lore.kernel.org/r/20210802140733.52716-5-mark.rutland@arm.com Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2021-08-02 14:07:33 +00:00
exit_to_user_mode(regs);
}
arm64: entry: handle all vectors with C We have 16 architectural exception vectors, and depending on kernel configuration we handle 8 or 12 of these with C code, with the remaining 8 or 4 of these handled as special cases in the entry assembly. It would be nicer if the entry assembly were uniform for all exceptions, and we deferred any specific handling of the exceptions to C code. This way the entry assembly can be more easily templated without ifdeffery or special cases, and it's easier to modify the handling of these cases in future (e.g. to dump additional registers other context). This patch reworks the entry code so that we always have a C handler for every architectural exception vector, with the entry assembly being completely uniform. We now have to handle exceptions from EL1t and EL1h, and also have to handle exceptions from AArch32 even when the kernel is built without CONFIG_COMPAT. To make this clear and to simplify templating, we rename the top-level exception handlers with a consistent naming scheme: asm: <el+sp>_<regsize>_<type> c: <el+sp>_<regsize>_<type>_handler .. where: <el+sp> is `el1t`, `el1h`, or `el0t` <regsize> is `64` or `32` <type> is `sync`, `irq`, `fiq`, or `error` ... e.g. asm: el1h_64_sync c: el1h_64_sync_handler ... with lower-level handlers simply using "el1" and "compat" as today. For unexpected exceptions, this information is passed to __panic_unhandled(), so it can report the specific vector an unexpected exception was taken from, e.g. | Unhandled 64-bit el1t sync exception For vectors we never expect to enter legitimately, the C code is generated using a macro to avoid code duplication. The exceptions are handled via __panic_unhandled(), replacing bad_mode() (which is removed). The `kernel_ventry` and `entry_handler` assembly macros are updated to handle the new naming scheme. In theory it should be possible to generate the entry functions at the same time as the vectors using a single table, but this will require reworking the linker script to split the two into separate sections, so for now we have separate tables. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Acked-by: Catalin Marinas <catalin.marinas@arm.com> Acked-by: Marc Zyngier <maz@kernel.org> Reviewed-by: Joey Gouly <joey.gouly@arm.com> Cc: James Morse <james.morse@arm.com> Cc: Will Deacon <will@kernel.org> Link: https://lore.kernel.org/r/20210607094624.34689-15-mark.rutland@arm.com Signed-off-by: Will Deacon <will@kernel.org>
2021-06-07 09:46:18 +00:00
asmlinkage void noinstr el0t_32_sync_handler(struct pt_regs *regs)
{
unsigned long esr = read_sysreg(esr_el1);
switch (ESR_ELx_EC(esr)) {
case ESR_ELx_EC_SVC32:
el0_svc_compat(regs);
break;
case ESR_ELx_EC_DABT_LOW:
el0_da(regs, esr);
break;
case ESR_ELx_EC_IABT_LOW:
el0_ia(regs, esr);
break;
case ESR_ELx_EC_FP_ASIMD:
el0_fpsimd_acc(regs, esr);
break;
case ESR_ELx_EC_FP_EXC32:
el0_fpsimd_exc(regs, esr);
break;
case ESR_ELx_EC_PC_ALIGN:
el0_pc(regs, esr);
break;
case ESR_ELx_EC_UNKNOWN:
case ESR_ELx_EC_CP14_MR:
case ESR_ELx_EC_CP14_LS:
case ESR_ELx_EC_CP14_64:
el0_undef(regs, esr);
break;
case ESR_ELx_EC_CP15_32:
case ESR_ELx_EC_CP15_64:
el0_cp15(regs, esr);
break;
case ESR_ELx_EC_BREAKPT_LOW:
case ESR_ELx_EC_SOFTSTP_LOW:
case ESR_ELx_EC_WATCHPT_LOW:
case ESR_ELx_EC_BKPT32:
el0_dbg(regs, esr);
break;
default:
el0_inv(regs, esr);
}
}
arm64: entry: handle all vectors with C We have 16 architectural exception vectors, and depending on kernel configuration we handle 8 or 12 of these with C code, with the remaining 8 or 4 of these handled as special cases in the entry assembly. It would be nicer if the entry assembly were uniform for all exceptions, and we deferred any specific handling of the exceptions to C code. This way the entry assembly can be more easily templated without ifdeffery or special cases, and it's easier to modify the handling of these cases in future (e.g. to dump additional registers other context). This patch reworks the entry code so that we always have a C handler for every architectural exception vector, with the entry assembly being completely uniform. We now have to handle exceptions from EL1t and EL1h, and also have to handle exceptions from AArch32 even when the kernel is built without CONFIG_COMPAT. To make this clear and to simplify templating, we rename the top-level exception handlers with a consistent naming scheme: asm: <el+sp>_<regsize>_<type> c: <el+sp>_<regsize>_<type>_handler .. where: <el+sp> is `el1t`, `el1h`, or `el0t` <regsize> is `64` or `32` <type> is `sync`, `irq`, `fiq`, or `error` ... e.g. asm: el1h_64_sync c: el1h_64_sync_handler ... with lower-level handlers simply using "el1" and "compat" as today. For unexpected exceptions, this information is passed to __panic_unhandled(), so it can report the specific vector an unexpected exception was taken from, e.g. | Unhandled 64-bit el1t sync exception For vectors we never expect to enter legitimately, the C code is generated using a macro to avoid code duplication. The exceptions are handled via __panic_unhandled(), replacing bad_mode() (which is removed). The `kernel_ventry` and `entry_handler` assembly macros are updated to handle the new naming scheme. In theory it should be possible to generate the entry functions at the same time as the vectors using a single table, but this will require reworking the linker script to split the two into separate sections, so for now we have separate tables. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Acked-by: Catalin Marinas <catalin.marinas@arm.com> Acked-by: Marc Zyngier <maz@kernel.org> Reviewed-by: Joey Gouly <joey.gouly@arm.com> Cc: James Morse <james.morse@arm.com> Cc: Will Deacon <will@kernel.org> Link: https://lore.kernel.org/r/20210607094624.34689-15-mark.rutland@arm.com Signed-off-by: Will Deacon <will@kernel.org>
2021-06-07 09:46:18 +00:00
asmlinkage void noinstr el0t_32_irq_handler(struct pt_regs *regs)
{
__el0_irq_handler_common(regs);
}
arm64: entry: handle all vectors with C We have 16 architectural exception vectors, and depending on kernel configuration we handle 8 or 12 of these with C code, with the remaining 8 or 4 of these handled as special cases in the entry assembly. It would be nicer if the entry assembly were uniform for all exceptions, and we deferred any specific handling of the exceptions to C code. This way the entry assembly can be more easily templated without ifdeffery or special cases, and it's easier to modify the handling of these cases in future (e.g. to dump additional registers other context). This patch reworks the entry code so that we always have a C handler for every architectural exception vector, with the entry assembly being completely uniform. We now have to handle exceptions from EL1t and EL1h, and also have to handle exceptions from AArch32 even when the kernel is built without CONFIG_COMPAT. To make this clear and to simplify templating, we rename the top-level exception handlers with a consistent naming scheme: asm: <el+sp>_<regsize>_<type> c: <el+sp>_<regsize>_<type>_handler .. where: <el+sp> is `el1t`, `el1h`, or `el0t` <regsize> is `64` or `32` <type> is `sync`, `irq`, `fiq`, or `error` ... e.g. asm: el1h_64_sync c: el1h_64_sync_handler ... with lower-level handlers simply using "el1" and "compat" as today. For unexpected exceptions, this information is passed to __panic_unhandled(), so it can report the specific vector an unexpected exception was taken from, e.g. | Unhandled 64-bit el1t sync exception For vectors we never expect to enter legitimately, the C code is generated using a macro to avoid code duplication. The exceptions are handled via __panic_unhandled(), replacing bad_mode() (which is removed). The `kernel_ventry` and `entry_handler` assembly macros are updated to handle the new naming scheme. In theory it should be possible to generate the entry functions at the same time as the vectors using a single table, but this will require reworking the linker script to split the two into separate sections, so for now we have separate tables. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Acked-by: Catalin Marinas <catalin.marinas@arm.com> Acked-by: Marc Zyngier <maz@kernel.org> Reviewed-by: Joey Gouly <joey.gouly@arm.com> Cc: James Morse <james.morse@arm.com> Cc: Will Deacon <will@kernel.org> Link: https://lore.kernel.org/r/20210607094624.34689-15-mark.rutland@arm.com Signed-off-by: Will Deacon <will@kernel.org>
2021-06-07 09:46:18 +00:00
asmlinkage void noinstr el0t_32_fiq_handler(struct pt_regs *regs)
{
__el0_fiq_handler_common(regs);
}
arm64: entry: handle all vectors with C We have 16 architectural exception vectors, and depending on kernel configuration we handle 8 or 12 of these with C code, with the remaining 8 or 4 of these handled as special cases in the entry assembly. It would be nicer if the entry assembly were uniform for all exceptions, and we deferred any specific handling of the exceptions to C code. This way the entry assembly can be more easily templated without ifdeffery or special cases, and it's easier to modify the handling of these cases in future (e.g. to dump additional registers other context). This patch reworks the entry code so that we always have a C handler for every architectural exception vector, with the entry assembly being completely uniform. We now have to handle exceptions from EL1t and EL1h, and also have to handle exceptions from AArch32 even when the kernel is built without CONFIG_COMPAT. To make this clear and to simplify templating, we rename the top-level exception handlers with a consistent naming scheme: asm: <el+sp>_<regsize>_<type> c: <el+sp>_<regsize>_<type>_handler .. where: <el+sp> is `el1t`, `el1h`, or `el0t` <regsize> is `64` or `32` <type> is `sync`, `irq`, `fiq`, or `error` ... e.g. asm: el1h_64_sync c: el1h_64_sync_handler ... with lower-level handlers simply using "el1" and "compat" as today. For unexpected exceptions, this information is passed to __panic_unhandled(), so it can report the specific vector an unexpected exception was taken from, e.g. | Unhandled 64-bit el1t sync exception For vectors we never expect to enter legitimately, the C code is generated using a macro to avoid code duplication. The exceptions are handled via __panic_unhandled(), replacing bad_mode() (which is removed). The `kernel_ventry` and `entry_handler` assembly macros are updated to handle the new naming scheme. In theory it should be possible to generate the entry functions at the same time as the vectors using a single table, but this will require reworking the linker script to split the two into separate sections, so for now we have separate tables. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Acked-by: Catalin Marinas <catalin.marinas@arm.com> Acked-by: Marc Zyngier <maz@kernel.org> Reviewed-by: Joey Gouly <joey.gouly@arm.com> Cc: James Morse <james.morse@arm.com> Cc: Will Deacon <will@kernel.org> Link: https://lore.kernel.org/r/20210607094624.34689-15-mark.rutland@arm.com Signed-off-by: Will Deacon <will@kernel.org>
2021-06-07 09:46:18 +00:00
asmlinkage void noinstr el0t_32_error_handler(struct pt_regs *regs)
{
__el0_error_handler_common(regs);
}
arm64: entry: handle all vectors with C We have 16 architectural exception vectors, and depending on kernel configuration we handle 8 or 12 of these with C code, with the remaining 8 or 4 of these handled as special cases in the entry assembly. It would be nicer if the entry assembly were uniform for all exceptions, and we deferred any specific handling of the exceptions to C code. This way the entry assembly can be more easily templated without ifdeffery or special cases, and it's easier to modify the handling of these cases in future (e.g. to dump additional registers other context). This patch reworks the entry code so that we always have a C handler for every architectural exception vector, with the entry assembly being completely uniform. We now have to handle exceptions from EL1t and EL1h, and also have to handle exceptions from AArch32 even when the kernel is built without CONFIG_COMPAT. To make this clear and to simplify templating, we rename the top-level exception handlers with a consistent naming scheme: asm: <el+sp>_<regsize>_<type> c: <el+sp>_<regsize>_<type>_handler .. where: <el+sp> is `el1t`, `el1h`, or `el0t` <regsize> is `64` or `32` <type> is `sync`, `irq`, `fiq`, or `error` ... e.g. asm: el1h_64_sync c: el1h_64_sync_handler ... with lower-level handlers simply using "el1" and "compat" as today. For unexpected exceptions, this information is passed to __panic_unhandled(), so it can report the specific vector an unexpected exception was taken from, e.g. | Unhandled 64-bit el1t sync exception For vectors we never expect to enter legitimately, the C code is generated using a macro to avoid code duplication. The exceptions are handled via __panic_unhandled(), replacing bad_mode() (which is removed). The `kernel_ventry` and `entry_handler` assembly macros are updated to handle the new naming scheme. In theory it should be possible to generate the entry functions at the same time as the vectors using a single table, but this will require reworking the linker script to split the two into separate sections, so for now we have separate tables. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Acked-by: Catalin Marinas <catalin.marinas@arm.com> Acked-by: Marc Zyngier <maz@kernel.org> Reviewed-by: Joey Gouly <joey.gouly@arm.com> Cc: James Morse <james.morse@arm.com> Cc: Will Deacon <will@kernel.org> Link: https://lore.kernel.org/r/20210607094624.34689-15-mark.rutland@arm.com Signed-off-by: Will Deacon <will@kernel.org>
2021-06-07 09:46:18 +00:00
#else /* CONFIG_COMPAT */
UNHANDLED(el0t, 32, sync)
UNHANDLED(el0t, 32, irq)
UNHANDLED(el0t, 32, fiq)
UNHANDLED(el0t, 32, error)
#endif /* CONFIG_COMPAT */
#ifdef CONFIG_VMAP_STACK
asmlinkage void noinstr __noreturn handle_bad_stack(struct pt_regs *regs)
{
arm64: Treat ESR_ELx as a 64-bit register In the initial release of the ARM Architecture Reference Manual for ARMv8-A, the ESR_ELx registers were defined as 32-bit registers. This changed in 2018 with version D.a (ARM DDI 0487D.a) of the architecture, when they became 64-bit registers, with bits [63:32] defined as RES0. In version G.a, a new field was added to ESR_ELx, ISS2, which covers bits [36:32]. This field is used when the Armv8.7 extension FEAT_LS64 is implemented. As a result of the evolution of the register width, Linux stores it as both a 64-bit value and a 32-bit value, which hasn't affected correctness so far as Linux only uses the lower 32 bits of the register. Make the register type consistent and always treat it as 64-bit wide. The register is redefined as an "unsigned long", which is an unsigned double-word (64-bit quantity) for the LP64 machine (aapcs64 [1], Table 1, page 14). The type was chosen because "unsigned int" is the most frequent type for ESR_ELx and because FAR_ELx, which is used together with ESR_ELx in exception handling, is also declared as "unsigned long". The 64-bit type also makes adding support for architectural features that use fields above bit 31 easier in the future. The KVM hypervisor will receive a similar update in a subsequent patch. [1] https://github.com/ARM-software/abi-aa/releases/download/2021Q3/aapcs64.pdf Signed-off-by: Alexandru Elisei <alexandru.elisei@arm.com> Reviewed-by: Marc Zyngier <maz@kernel.org> Link: https://lore.kernel.org/r/20220425114444.368693-4-alexandru.elisei@arm.com Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2022-04-25 11:44:42 +00:00
unsigned long esr = read_sysreg(esr_el1);
unsigned long far = read_sysreg(far_el1);
arm64_enter_nmi(regs);
panic_bad_stack(regs, esr, far);
}
#endif /* CONFIG_VMAP_STACK */
#ifdef CONFIG_ARM_SDE_INTERFACE
asmlinkage noinstr unsigned long
__sdei_handler(struct pt_regs *regs, struct sdei_registered_event *arg)
{
unsigned long ret;
/*
* We didn't take an exception to get here, so the HW hasn't
* set/cleared bits in PSTATE that we may rely on.
*
* The original SDEI spec (ARM DEN 0054A) can be read ambiguously as to
* whether PSTATE bits are inherited unchanged or generated from
* scratch, and the TF-A implementation always clears PAN and always
* clears UAO. There are no other known implementations.
*
* Subsequent revisions (ARM DEN 0054B) follow the usual rules for how
* PSTATE is modified upon architectural exceptions, and so PAN is
* either inherited or set per SCTLR_ELx.SPAN, and UAO is always
* cleared.
*
* We must explicitly reset PAN to the expected state, including
* clearing it when the host isn't using it, in case a VM had it set.
*/
if (system_uses_hw_pan())
set_pstate_pan(1);
else if (cpu_has_pan())
set_pstate_pan(0);
arm64_enter_nmi(regs);
ret = do_sdei_event(regs, arg);
arm64_exit_nmi(regs);
return ret;
}
#endif /* CONFIG_ARM_SDE_INTERFACE */