linux-stable/arch/arm64/kernel/stacktrace.c

211 lines
5 KiB
C
Raw Normal View History

// SPDX-License-Identifier: GPL-2.0-only
/*
* Stack tracing support
*
* Copyright (C) 2012 ARM Ltd.
*/
#include <linux/kernel.h>
#include <linux/export.h>
arm64: ftrace: fix a stack tracer's output under function graph tracer Function graph tracer modifies a return address (LR) in a stack frame to hook a function return. This will result in many useless entries (return_to_handler) showing up in a) a stack tracer's output b) perf call graph (with perf record -g) c) dump_backtrace (at panic et al.) For example, in case of a), $ echo function_graph > /sys/kernel/debug/tracing/current_tracer $ echo 1 > /proc/sys/kernel/stack_trace_enabled $ cat /sys/kernel/debug/tracing/stack_trace Depth Size Location (54 entries) ----- ---- -------- 0) 4504 16 gic_raise_softirq+0x28/0x150 1) 4488 80 smp_cross_call+0x38/0xb8 2) 4408 48 return_to_handler+0x0/0x40 3) 4360 32 return_to_handler+0x0/0x40 ... In case of b), $ echo function_graph > /sys/kernel/debug/tracing/current_tracer $ perf record -e mem:XXX:x -ag -- sleep 10 $ perf report ... | | |--0.22%-- 0x550f8 | | | 0x10888 | | | el0_svc_naked | | | sys_openat | | | return_to_handler | | | return_to_handler ... In case of c), $ echo function_graph > /sys/kernel/debug/tracing/current_tracer $ echo c > /proc/sysrq-trigger ... Call trace: [<ffffffc00044d3ac>] sysrq_handle_crash+0x24/0x30 [<ffffffc000092250>] return_to_handler+0x0/0x40 [<ffffffc000092250>] return_to_handler+0x0/0x40 ... This patch replaces such entries with real addresses preserved in current->ret_stack[] at unwind_frame(). This way, we can cover all the cases. Reviewed-by: Jungseok Lee <jungseoklee85@gmail.com> Signed-off-by: AKASHI Takahiro <takahiro.akashi@linaro.org> [will: fixed minor context changes conflicting with irq stack bits] Signed-off-by: Will Deacon <will.deacon@arm.com>
2015-12-15 08:33:41 +00:00
#include <linux/ftrace.h>
#include <linux/sched.h>
#include <linux/sched/debug.h>
#include <linux/sched/task_stack.h>
#include <linux/stacktrace.h>
#include <asm/irq.h>
#include <asm/stack_pointer.h>
#include <asm/stacktrace.h>
/*
* Start an unwind from a pt_regs.
*
* The unwind will begin at the PC within the regs.
*
* The regs must be on a stack currently owned by the calling task.
*/
static inline void unwind_init_from_regs(struct unwind_state *state,
struct pt_regs *regs)
{
unwind_init_common(state, current);
state->fp = regs->regs[29];
state->pc = regs->pc;
}
/*
* Start an unwind from a caller.
*
* The unwind will begin at the caller of whichever function this is inlined
* into.
*
* The function which invokes this must be noinline.
*/
static __always_inline void unwind_init_from_caller(struct unwind_state *state)
{
unwind_init_common(state, current);
state->fp = (unsigned long)__builtin_frame_address(1);
state->pc = (unsigned long)__builtin_return_address(0);
}
/*
* Start an unwind from a blocked task.
*
* The unwind will begin at the blocked tasks saved PC (i.e. the caller of
* cpu_switch_to()).
*
* The caller should ensure the task is blocked in cpu_switch_to() for the
* duration of the unwind, or the unwind will be bogus. It is never valid to
* call this for the current task.
*/
static inline void unwind_init_from_task(struct unwind_state *state,
struct task_struct *task)
{
unwind_init_common(state, task);
state->fp = thread_saved_fp(task);
state->pc = thread_saved_pc(task);
}
/*
* We can only safely access per-cpu stacks from current in a non-preemptible
* context.
*/
static bool on_accessible_stack(const struct task_struct *tsk,
unsigned long sp, unsigned long size,
struct stack_info *info)
{
if (info)
info->type = STACK_TYPE_UNKNOWN;
if (on_task_stack(tsk, sp, size, info))
return true;
if (tsk != current || preemptible())
return false;
if (on_irq_stack(sp, size, info))
return true;
if (on_overflow_stack(sp, size, info))
return true;
if (on_sdei_stack(sp, size, info))
return true;
return false;
}
/*
* Unwind from one frame record (A) to the next frame record (B).
*
* We terminate early if the location of B indicates a malformed chain of frame
* records (e.g. a cycle), determined based on the location and fp value of A
* and the location (but not the fp value) of B.
*/
static int notrace unwind_next(struct unwind_state *state)
{
struct task_struct *tsk = state->task;
unsigned long fp = state->fp;
struct stack_info info;
int err;
/* Final frame; nothing to unwind */
if (fp == (unsigned long)task_pt_regs(tsk)->stackframe)
return -ENOENT;
err = unwind_next_common(state, &info, on_accessible_stack, NULL);
if (err)
return err;
state->pc = ptrauth_strip_insn_pac(state->pc);
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
if (tsk->ret_stack &&
(state->pc == (unsigned long)return_to_handler)) {
unsigned long orig_pc;
/*
* This is a case where function graph tracer has
* modified a return address (LR) in a stack frame
* to hook a function return.
* So replace it to an original value.
*/
orig_pc = ftrace_graph_ret_addr(tsk, NULL, state->pc,
(void *)state->fp);
if (WARN_ON_ONCE(state->pc == orig_pc))
return -EINVAL;
state->pc = orig_pc;
}
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
#ifdef CONFIG_KRETPROBES
if (is_kretprobe_trampoline(state->pc))
state->pc = kretprobe_find_ret_addr(tsk, (void *)state->fp, &state->kr_cur);
#endif
return 0;
}
NOKPROBE_SYMBOL(unwind_next);
static void notrace unwind(struct unwind_state *state,
stack_trace_consume_fn consume_entry, void *cookie)
{
while (1) {
int ret;
if (!consume_entry(cookie, state->pc))
break;
ret = unwind_next(state);
if (ret < 0)
break;
}
}
NOKPROBE_SYMBOL(unwind);
arm64: Make dump_backtrace() use arch_stack_walk() To enable RELIABLE_STACKTRACE and LIVEPATCH on arm64, we need to substantially rework arm64's unwinding code. As part of this, we want to minimize the set of unwind interfaces we expose, and avoid open-coding of unwind logic. Currently, dump_backtrace() walks the stack of the current task or a blocked task by calling stact_backtrace() and iterating unwind steps using unwind_frame(). This can be written more simply in terms of arch_stack_walk(), considering three distinct cases: 1) When unwinding a blocked task, start_backtrace() is called with the blocked task's saved PC and FP, and the unwind proceeds immediately from this point without skipping any entries. This is functionally equivalent to calling arch_stack_walk() with the blocked task, which will start with the task's saved PC and FP. There is no functional change to this case. 2) When unwinding the current task without regs, start_backtrace() is called with dump_backtrace() as the PC and __builtin_frame_address(0) as the next frame, and the unwind proceeds immediately without skipping. This is *almost* functionally equivalent to calling arch_stack_walk() for the current task, which will start with its caller (i.e. an offset into dump_backtrace()) as the PC, and the callers frame record as the next frame. The only difference being that dump_backtrace() will be reported with an offset (which is strictly more correct than currently). Otherwise there is no functional cahnge to this case. 3) When unwinding the current task with regs, start_backtrace() is called with dump_backtrace() as the PC and __builtin_frame_address(0) as the next frame, and the unwind is performed silently until the next frame is the frame pointed to by regs->fp. Reporting starts from regs->pc and continues from the frame in regs->fp. Historically, this pre-unwind was necessary to correctly record return addresses rewritten by the ftrace graph calller, but this is no longer necessary as these are now recovered using the FP since commit: c6d3cd32fd0064af ("arm64: ftrace: use HAVE_FUNCTION_GRAPH_RET_ADDR_PTR") This pre-unwind is not necessary to recover return addresses rewritten by kretprobes, which historically were not recovered, and are now recovered using the FP since commit: cd9bc2c9258816dc ("arm64: Recover kretprobe modified return address in stacktrace") Thus, this is functionally equivalent to calling arch_stack_walk() with the current task and regs, which will start with regs->pc as the PC and regs->fp as the next frame, without a pre-unwind. This patch makes dump_backtrace() use arch_stack_walk(). This simplifies dump_backtrace() and will permit subsequent changes to the unwind code. Aside from the improved reporting when unwinding current without regs, there should be no functional change as a result of this patch. Signed-off-by: Madhavan T. Venkataraman <madvenka@linux.microsoft.com> [Mark: elaborate commit message] Signed-off-by: Mark Rutland <mark.rutland@arm.com> Reviewed-by: Mark Brown <broonie@kernel.org> Link: https://lore.kernel.org/r/20211129142849.3056714-9-mark.rutland@arm.com Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2021-11-29 14:28:48 +00:00
static bool dump_backtrace_entry(void *arg, unsigned long where)
{
arm64: Make dump_backtrace() use arch_stack_walk() To enable RELIABLE_STACKTRACE and LIVEPATCH on arm64, we need to substantially rework arm64's unwinding code. As part of this, we want to minimize the set of unwind interfaces we expose, and avoid open-coding of unwind logic. Currently, dump_backtrace() walks the stack of the current task or a blocked task by calling stact_backtrace() and iterating unwind steps using unwind_frame(). This can be written more simply in terms of arch_stack_walk(), considering three distinct cases: 1) When unwinding a blocked task, start_backtrace() is called with the blocked task's saved PC and FP, and the unwind proceeds immediately from this point without skipping any entries. This is functionally equivalent to calling arch_stack_walk() with the blocked task, which will start with the task's saved PC and FP. There is no functional change to this case. 2) When unwinding the current task without regs, start_backtrace() is called with dump_backtrace() as the PC and __builtin_frame_address(0) as the next frame, and the unwind proceeds immediately without skipping. This is *almost* functionally equivalent to calling arch_stack_walk() for the current task, which will start with its caller (i.e. an offset into dump_backtrace()) as the PC, and the callers frame record as the next frame. The only difference being that dump_backtrace() will be reported with an offset (which is strictly more correct than currently). Otherwise there is no functional cahnge to this case. 3) When unwinding the current task with regs, start_backtrace() is called with dump_backtrace() as the PC and __builtin_frame_address(0) as the next frame, and the unwind is performed silently until the next frame is the frame pointed to by regs->fp. Reporting starts from regs->pc and continues from the frame in regs->fp. Historically, this pre-unwind was necessary to correctly record return addresses rewritten by the ftrace graph calller, but this is no longer necessary as these are now recovered using the FP since commit: c6d3cd32fd0064af ("arm64: ftrace: use HAVE_FUNCTION_GRAPH_RET_ADDR_PTR") This pre-unwind is not necessary to recover return addresses rewritten by kretprobes, which historically were not recovered, and are now recovered using the FP since commit: cd9bc2c9258816dc ("arm64: Recover kretprobe modified return address in stacktrace") Thus, this is functionally equivalent to calling arch_stack_walk() with the current task and regs, which will start with regs->pc as the PC and regs->fp as the next frame, without a pre-unwind. This patch makes dump_backtrace() use arch_stack_walk(). This simplifies dump_backtrace() and will permit subsequent changes to the unwind code. Aside from the improved reporting when unwinding current without regs, there should be no functional change as a result of this patch. Signed-off-by: Madhavan T. Venkataraman <madvenka@linux.microsoft.com> [Mark: elaborate commit message] Signed-off-by: Mark Rutland <mark.rutland@arm.com> Reviewed-by: Mark Brown <broonie@kernel.org> Link: https://lore.kernel.org/r/20211129142849.3056714-9-mark.rutland@arm.com Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2021-11-29 14:28:48 +00:00
char *loglvl = arg;
arm64: stacktrace: use %pSb for backtrace printing Let's use the new printk format to print the stacktrace entry when printing a backtrace to the kernel logs. This will include any module's build ID[1] in it so that offline/crash debugging can easily locate the debuginfo for a module via something like debuginfod[2]. Link: https://lkml.kernel.org/r/20210511003845.2429846-7-swboyd@chromium.org Link: https://fedoraproject.org/wiki/Releases/FeatureBuildId [1] Link: https://sourceware.org/elfutils/Debuginfod.html [2] Signed-off-by: Stephen Boyd <swboyd@chromium.org> Acked-by: Catalin Marinas <catalin.marinas@arm.com> Cc: Will Deacon <will@kernel.org> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Alexei Starovoitov <ast@kernel.org> Cc: Jessica Yu <jeyu@kernel.org> Cc: Evan Green <evgreen@chromium.org> Cc: Hsin-Yi Wang <hsinyi@chromium.org> Cc: Petr Mladek <pmladek@suse.com> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Baoquan He <bhe@redhat.com> Cc: Borislav Petkov <bp@alien8.de> Cc: Dave Young <dyoung@redhat.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Konstantin Khlebnikov <khlebnikov@yandex-team.ru> Cc: Rasmus Villemoes <linux@rasmusvillemoes.dk> Cc: Sasha Levin <sashal@kernel.org> Cc: Sergey Senozhatsky <sergey.senozhatsky@gmail.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Vivek Goyal <vgoyal@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2021-07-08 01:09:24 +00:00
printk("%s %pSb\n", loglvl, (void *)where);
arm64: Make dump_backtrace() use arch_stack_walk() To enable RELIABLE_STACKTRACE and LIVEPATCH on arm64, we need to substantially rework arm64's unwinding code. As part of this, we want to minimize the set of unwind interfaces we expose, and avoid open-coding of unwind logic. Currently, dump_backtrace() walks the stack of the current task or a blocked task by calling stact_backtrace() and iterating unwind steps using unwind_frame(). This can be written more simply in terms of arch_stack_walk(), considering three distinct cases: 1) When unwinding a blocked task, start_backtrace() is called with the blocked task's saved PC and FP, and the unwind proceeds immediately from this point without skipping any entries. This is functionally equivalent to calling arch_stack_walk() with the blocked task, which will start with the task's saved PC and FP. There is no functional change to this case. 2) When unwinding the current task without regs, start_backtrace() is called with dump_backtrace() as the PC and __builtin_frame_address(0) as the next frame, and the unwind proceeds immediately without skipping. This is *almost* functionally equivalent to calling arch_stack_walk() for the current task, which will start with its caller (i.e. an offset into dump_backtrace()) as the PC, and the callers frame record as the next frame. The only difference being that dump_backtrace() will be reported with an offset (which is strictly more correct than currently). Otherwise there is no functional cahnge to this case. 3) When unwinding the current task with regs, start_backtrace() is called with dump_backtrace() as the PC and __builtin_frame_address(0) as the next frame, and the unwind is performed silently until the next frame is the frame pointed to by regs->fp. Reporting starts from regs->pc and continues from the frame in regs->fp. Historically, this pre-unwind was necessary to correctly record return addresses rewritten by the ftrace graph calller, but this is no longer necessary as these are now recovered using the FP since commit: c6d3cd32fd0064af ("arm64: ftrace: use HAVE_FUNCTION_GRAPH_RET_ADDR_PTR") This pre-unwind is not necessary to recover return addresses rewritten by kretprobes, which historically were not recovered, and are now recovered using the FP since commit: cd9bc2c9258816dc ("arm64: Recover kretprobe modified return address in stacktrace") Thus, this is functionally equivalent to calling arch_stack_walk() with the current task and regs, which will start with regs->pc as the PC and regs->fp as the next frame, without a pre-unwind. This patch makes dump_backtrace() use arch_stack_walk(). This simplifies dump_backtrace() and will permit subsequent changes to the unwind code. Aside from the improved reporting when unwinding current without regs, there should be no functional change as a result of this patch. Signed-off-by: Madhavan T. Venkataraman <madvenka@linux.microsoft.com> [Mark: elaborate commit message] Signed-off-by: Mark Rutland <mark.rutland@arm.com> Reviewed-by: Mark Brown <broonie@kernel.org> Link: https://lore.kernel.org/r/20211129142849.3056714-9-mark.rutland@arm.com Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2021-11-29 14:28:48 +00:00
return true;
}
void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk,
const char *loglvl)
{
pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
arm64: Make dump_backtrace() use arch_stack_walk() To enable RELIABLE_STACKTRACE and LIVEPATCH on arm64, we need to substantially rework arm64's unwinding code. As part of this, we want to minimize the set of unwind interfaces we expose, and avoid open-coding of unwind logic. Currently, dump_backtrace() walks the stack of the current task or a blocked task by calling stact_backtrace() and iterating unwind steps using unwind_frame(). This can be written more simply in terms of arch_stack_walk(), considering three distinct cases: 1) When unwinding a blocked task, start_backtrace() is called with the blocked task's saved PC and FP, and the unwind proceeds immediately from this point without skipping any entries. This is functionally equivalent to calling arch_stack_walk() with the blocked task, which will start with the task's saved PC and FP. There is no functional change to this case. 2) When unwinding the current task without regs, start_backtrace() is called with dump_backtrace() as the PC and __builtin_frame_address(0) as the next frame, and the unwind proceeds immediately without skipping. This is *almost* functionally equivalent to calling arch_stack_walk() for the current task, which will start with its caller (i.e. an offset into dump_backtrace()) as the PC, and the callers frame record as the next frame. The only difference being that dump_backtrace() will be reported with an offset (which is strictly more correct than currently). Otherwise there is no functional cahnge to this case. 3) When unwinding the current task with regs, start_backtrace() is called with dump_backtrace() as the PC and __builtin_frame_address(0) as the next frame, and the unwind is performed silently until the next frame is the frame pointed to by regs->fp. Reporting starts from regs->pc and continues from the frame in regs->fp. Historically, this pre-unwind was necessary to correctly record return addresses rewritten by the ftrace graph calller, but this is no longer necessary as these are now recovered using the FP since commit: c6d3cd32fd0064af ("arm64: ftrace: use HAVE_FUNCTION_GRAPH_RET_ADDR_PTR") This pre-unwind is not necessary to recover return addresses rewritten by kretprobes, which historically were not recovered, and are now recovered using the FP since commit: cd9bc2c9258816dc ("arm64: Recover kretprobe modified return address in stacktrace") Thus, this is functionally equivalent to calling arch_stack_walk() with the current task and regs, which will start with regs->pc as the PC and regs->fp as the next frame, without a pre-unwind. This patch makes dump_backtrace() use arch_stack_walk(). This simplifies dump_backtrace() and will permit subsequent changes to the unwind code. Aside from the improved reporting when unwinding current without regs, there should be no functional change as a result of this patch. Signed-off-by: Madhavan T. Venkataraman <madvenka@linux.microsoft.com> [Mark: elaborate commit message] Signed-off-by: Mark Rutland <mark.rutland@arm.com> Reviewed-by: Mark Brown <broonie@kernel.org> Link: https://lore.kernel.org/r/20211129142849.3056714-9-mark.rutland@arm.com Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2021-11-29 14:28:48 +00:00
if (regs && user_mode(regs))
return;
if (!tsk)
tsk = current;
if (!try_get_task_stack(tsk))
return;
printk("%sCall trace:\n", loglvl);
arm64: Make dump_backtrace() use arch_stack_walk() To enable RELIABLE_STACKTRACE and LIVEPATCH on arm64, we need to substantially rework arm64's unwinding code. As part of this, we want to minimize the set of unwind interfaces we expose, and avoid open-coding of unwind logic. Currently, dump_backtrace() walks the stack of the current task or a blocked task by calling stact_backtrace() and iterating unwind steps using unwind_frame(). This can be written more simply in terms of arch_stack_walk(), considering three distinct cases: 1) When unwinding a blocked task, start_backtrace() is called with the blocked task's saved PC and FP, and the unwind proceeds immediately from this point without skipping any entries. This is functionally equivalent to calling arch_stack_walk() with the blocked task, which will start with the task's saved PC and FP. There is no functional change to this case. 2) When unwinding the current task without regs, start_backtrace() is called with dump_backtrace() as the PC and __builtin_frame_address(0) as the next frame, and the unwind proceeds immediately without skipping. This is *almost* functionally equivalent to calling arch_stack_walk() for the current task, which will start with its caller (i.e. an offset into dump_backtrace()) as the PC, and the callers frame record as the next frame. The only difference being that dump_backtrace() will be reported with an offset (which is strictly more correct than currently). Otherwise there is no functional cahnge to this case. 3) When unwinding the current task with regs, start_backtrace() is called with dump_backtrace() as the PC and __builtin_frame_address(0) as the next frame, and the unwind is performed silently until the next frame is the frame pointed to by regs->fp. Reporting starts from regs->pc and continues from the frame in regs->fp. Historically, this pre-unwind was necessary to correctly record return addresses rewritten by the ftrace graph calller, but this is no longer necessary as these are now recovered using the FP since commit: c6d3cd32fd0064af ("arm64: ftrace: use HAVE_FUNCTION_GRAPH_RET_ADDR_PTR") This pre-unwind is not necessary to recover return addresses rewritten by kretprobes, which historically were not recovered, and are now recovered using the FP since commit: cd9bc2c9258816dc ("arm64: Recover kretprobe modified return address in stacktrace") Thus, this is functionally equivalent to calling arch_stack_walk() with the current task and regs, which will start with regs->pc as the PC and regs->fp as the next frame, without a pre-unwind. This patch makes dump_backtrace() use arch_stack_walk(). This simplifies dump_backtrace() and will permit subsequent changes to the unwind code. Aside from the improved reporting when unwinding current without regs, there should be no functional change as a result of this patch. Signed-off-by: Madhavan T. Venkataraman <madvenka@linux.microsoft.com> [Mark: elaborate commit message] Signed-off-by: Mark Rutland <mark.rutland@arm.com> Reviewed-by: Mark Brown <broonie@kernel.org> Link: https://lore.kernel.org/r/20211129142849.3056714-9-mark.rutland@arm.com Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2021-11-29 14:28:48 +00:00
arch_stack_walk(dump_backtrace_entry, (void *)loglvl, tsk, regs);
put_task_stack(tsk);
}
void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl)
{
dump_backtrace(NULL, tsk, loglvl);
barrier();
}
arm64: stacktrace: avoid tracing arch_stack_walk() When the function_graph tracer is in use, arch_stack_walk() may unwind the stack incorrectly, erroneously reporting itself, missing the final entry which is being traced, and reporting all traced entries between these off-by-one from where they should be. When ftrace hooks a function return, the original return address is saved to the fgraph ret_stack, and the return address in the LR (or the function's frame record) is replaced with `return_to_handler`. When arm64's unwinder encounter frames returning to `return_to_handler`, it finds the associated original return address from the fgraph ret stack, assuming the most recent `ret_to_hander` entry on the stack corresponds to the most recent entry in the fgraph ret stack, and so on. When arch_stack_walk() is used to dump the current task's stack, it starts from the caller of arch_stack_walk(). However, arch_stack_walk() can be traced, and so may push an entry on to the fgraph ret stack, leaving the fgraph ret stack offset by one from the expected position. This can be seen when dumping the stack via /proc/self/stack, where enabling the graph tracer results in an unexpected `stack_trace_save_tsk` entry at the start of the trace, and `el0_svc` missing form the end of the trace. This patch fixes this by marking arch_stack_walk() as notrace, as we do for all other functions on the path to ftrace_graph_get_ret_stack(). While a few helper functions are not marked notrace, their calls/returns are balanced, and will have no observable effect when examining the fgraph ret stack. It is possible for an exeption boundary to cause a similar offset if the return address of the interrupted context was in the LR. Fixing those cases will require some more substantial rework, and is left for subsequent patches. Before: | # cat /proc/self/stack | [<0>] proc_pid_stack+0xc4/0x140 | [<0>] proc_single_show+0x6c/0x120 | [<0>] seq_read_iter+0x240/0x4e0 | [<0>] seq_read+0xe8/0x140 | [<0>] vfs_read+0xb8/0x1e4 | [<0>] ksys_read+0x74/0x100 | [<0>] __arm64_sys_read+0x28/0x3c | [<0>] invoke_syscall+0x50/0x120 | [<0>] el0_svc_common.constprop.0+0xc4/0xd4 | [<0>] do_el0_svc+0x30/0x9c | [<0>] el0_svc+0x2c/0x54 | [<0>] el0t_64_sync_handler+0x1a8/0x1b0 | [<0>] el0t_64_sync+0x198/0x19c | # echo function_graph > /sys/kernel/tracing/current_tracer | # cat /proc/self/stack | [<0>] stack_trace_save_tsk+0xa4/0x110 | [<0>] proc_pid_stack+0xc4/0x140 | [<0>] proc_single_show+0x6c/0x120 | [<0>] seq_read_iter+0x240/0x4e0 | [<0>] seq_read+0xe8/0x140 | [<0>] vfs_read+0xb8/0x1e4 | [<0>] ksys_read+0x74/0x100 | [<0>] __arm64_sys_read+0x28/0x3c | [<0>] invoke_syscall+0x50/0x120 | [<0>] el0_svc_common.constprop.0+0xc4/0xd4 | [<0>] do_el0_svc+0x30/0x9c | [<0>] el0t_64_sync_handler+0x1a8/0x1b0 | [<0>] el0t_64_sync+0x198/0x19c After: | # cat /proc/self/stack | [<0>] proc_pid_stack+0xc4/0x140 | [<0>] proc_single_show+0x6c/0x120 | [<0>] seq_read_iter+0x240/0x4e0 | [<0>] seq_read+0xe8/0x140 | [<0>] vfs_read+0xb8/0x1e4 | [<0>] ksys_read+0x74/0x100 | [<0>] __arm64_sys_read+0x28/0x3c | [<0>] invoke_syscall+0x50/0x120 | [<0>] el0_svc_common.constprop.0+0xc4/0xd4 | [<0>] do_el0_svc+0x30/0x9c | [<0>] el0_svc+0x2c/0x54 | [<0>] el0t_64_sync_handler+0x1a8/0x1b0 | [<0>] el0t_64_sync+0x198/0x19c | # echo function_graph > /sys/kernel/tracing/current_tracer | # cat /proc/self/stack | [<0>] proc_pid_stack+0xc4/0x140 | [<0>] proc_single_show+0x6c/0x120 | [<0>] seq_read_iter+0x240/0x4e0 | [<0>] seq_read+0xe8/0x140 | [<0>] vfs_read+0xb8/0x1e4 | [<0>] ksys_read+0x74/0x100 | [<0>] __arm64_sys_read+0x28/0x3c | [<0>] invoke_syscall+0x50/0x120 | [<0>] el0_svc_common.constprop.0+0xc4/0xd4 | [<0>] do_el0_svc+0x30/0x9c | [<0>] el0_svc+0x2c/0x54 | [<0>] el0t_64_sync_handler+0x1a8/0x1b0 | [<0>] el0t_64_sync+0x198/0x19c Cc: <stable@vger.kernel.org> Signed-off-by: Mark Rutland <mark.rutland@arm.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Madhavan T. Venkataraman <madvenka@linux.microsoft.com> Cc: Mark Brown <broonie@kernel.org> Cc: Will Deacon <will@kernel.org> Reviwed-by: Mark Brown <broonie@kernel.org> Link: https://lore.kernel.org/r/20210802164845.45506-3-mark.rutland@arm.com Signed-off-by: Will Deacon <will@kernel.org>
2021-08-02 16:48:45 +00:00
noinline notrace void arch_stack_walk(stack_trace_consume_fn consume_entry,
arm64: stacktrace: don't trace arch_stack_walk() We recently converted arm64 to use arch_stack_walk() in commit: 5fc57df2f6fd ("arm64: stacktrace: Convert to ARCH_STACKWALK") The core stacktrace code expects that (when tracing the current task) arch_stack_walk() starts a trace at its caller, and does not include itself in the trace. However, arm64's arch_stack_walk() includes itself, and so traces include one more entry than callers expect. The core stacktrace code which calls arch_stack_walk() tries to skip a number of entries to prevent itself appearing in a trace, and the additional entry prevents skipping one of the core stacktrace functions, leaving this in the trace unexpectedly. We can fix this by having arm64's arch_stack_walk() begin the trace with its caller. The first value returned by the trace will be __builtin_return_address(0), i.e. the caller of arch_stack_walk(). The first frame record to be unwound will be __builtin_frame_address(1), i.e. the caller's frame record. To prevent surprises, arch_stack_walk() is also marked noinline. While __builtin_frame_address(1) is not safe in portable code, local GCC developers have confirmed that it is safe on arm64. To find the caller's frame record, the builtin can safely dereference the current function's frame record or (in theory) could stash the original FP into another GPR at function entry time, neither of which are problematic. Prior to this patch, the tracing code would unexpectedly show up in traces of the current task, e.g. | # cat /proc/self/stack | [<0>] stack_trace_save_tsk+0x98/0x100 | [<0>] proc_pid_stack+0xb4/0x130 | [<0>] proc_single_show+0x60/0x110 | [<0>] seq_read_iter+0x230/0x4d0 | [<0>] seq_read+0xdc/0x130 | [<0>] vfs_read+0xac/0x1e0 | [<0>] ksys_read+0x6c/0xfc | [<0>] __arm64_sys_read+0x20/0x30 | [<0>] el0_svc_common.constprop.0+0x60/0x120 | [<0>] do_el0_svc+0x24/0x90 | [<0>] el0_svc+0x2c/0x54 | [<0>] el0_sync_handler+0x1a4/0x1b0 | [<0>] el0_sync+0x170/0x180 After this patch, the tracing code will not show up in such traces: | # cat /proc/self/stack | [<0>] proc_pid_stack+0xb4/0x130 | [<0>] proc_single_show+0x60/0x110 | [<0>] seq_read_iter+0x230/0x4d0 | [<0>] seq_read+0xdc/0x130 | [<0>] vfs_read+0xac/0x1e0 | [<0>] ksys_read+0x6c/0xfc | [<0>] __arm64_sys_read+0x20/0x30 | [<0>] el0_svc_common.constprop.0+0x60/0x120 | [<0>] do_el0_svc+0x24/0x90 | [<0>] el0_svc+0x2c/0x54 | [<0>] el0_sync_handler+0x1a4/0x1b0 | [<0>] el0_sync+0x170/0x180 Erring on the side of caution, I've given this a spin with a bunch of toolchains, verifying the output of /proc/self/stack and checking that the assembly looked sound. For GCC (where we require version 5.1.0 or later) I tested with the kernel.org crosstool binares for versions 5.5.0, 6.4.0, 6.5.0, 7.3.0, 7.5.0, 8.1.0, 8.3.0, 8.4.0, 9.2.0, and 10.1.0. For clang (where we require version 10.0.1 or later) I tested with the llvm.org binary releases of 11.0.0, and 11.0.1. Fixes: 5fc57df2f6fd ("arm64: stacktrace: Convert to ARCH_STACKWALK") Signed-off-by: Mark Rutland <mark.rutland@arm.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Chen Jun <chenjun102@huawei.com> Cc: Marco Elver <elver@google.com> Cc: Mark Brown <broonie@kernel.org> Cc: Will Deacon <will@kernel.org> Cc: <stable@vger.kernel.org> # 5.10.x Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> Reviewed-by: Mark Brown <broonie@kernel.org> Link: https://lore.kernel.org/r/20210319184106.5688-1-mark.rutland@arm.com Signed-off-by: Will Deacon <will@kernel.org>
2021-03-19 18:41:06 +00:00
void *cookie, struct task_struct *task,
struct pt_regs *regs)
{
struct unwind_state state;
if (regs) {
if (task != current)
return;
unwind_init_from_regs(&state, regs);
} else if (task == current) {
unwind_init_from_caller(&state);
} else {
unwind_init_from_task(&state, task);
}
unwind(&state, consume_entry, cookie);
}