Merge branch 'for-next/stacktrace' into for-next/core

* for-next/stacktrace:
  arm64: Copy the task argument to unwind_state
  arm64: Split unwind_init()
  arm64: stacktrace: use non-atomic __set_bit
  arm64: kasan: do not instrument stacktrace.c
This commit is contained in:
Will Deacon 2022-07-25 10:57:26 +01:00
commit cb20311e5e
2 changed files with 79 additions and 23 deletions

View file

@ -14,6 +14,11 @@ CFLAGS_REMOVE_return_address.o = $(CC_FLAGS_FTRACE)
CFLAGS_REMOVE_syscall.o = -fstack-protector -fstack-protector-strong CFLAGS_REMOVE_syscall.o = -fstack-protector -fstack-protector-strong
CFLAGS_syscall.o += -fno-stack-protector CFLAGS_syscall.o += -fno-stack-protector
# When KASAN is enabled, a stack trace is recorded for every alloc/free, which
# can significantly impact performance. Avoid instrumenting the stack trace
# collection code to minimize this impact.
KASAN_SANITIZE_stacktrace.o := n
# It's not safe to invoke KCOV when portions of the kernel environment aren't # It's not safe to invoke KCOV when portions of the kernel environment aren't
# available or are out-of-sync with HW state. Since `noinstr` doesn't always # available or are out-of-sync with HW state. Since `noinstr` doesn't always
# inhibit KCOV instrumentation, disable it for the entire compilation unit. # inhibit KCOV instrumentation, disable it for the entire compilation unit.

View file

@ -38,6 +38,8 @@
* @kr_cur: When KRETPROBES is selected, holds the kretprobe instance * @kr_cur: When KRETPROBES is selected, holds the kretprobe instance
* associated with the most recently encountered replacement lr * associated with the most recently encountered replacement lr
* value. * value.
*
* @task: The task being unwound.
*/ */
struct unwind_state { struct unwind_state {
unsigned long fp; unsigned long fp;
@ -48,13 +50,13 @@ struct unwind_state {
#ifdef CONFIG_KRETPROBES #ifdef CONFIG_KRETPROBES
struct llist_node *kr_cur; struct llist_node *kr_cur;
#endif #endif
struct task_struct *task;
}; };
static notrace void unwind_init(struct unwind_state *state, unsigned long fp, static void unwind_init_common(struct unwind_state *state,
unsigned long pc) struct task_struct *task)
{ {
state->fp = fp; state->task = task;
state->pc = pc;
#ifdef CONFIG_KRETPROBES #ifdef CONFIG_KRETPROBES
state->kr_cur = NULL; state->kr_cur = NULL;
#endif #endif
@ -72,7 +74,57 @@ static notrace void unwind_init(struct unwind_state *state, unsigned long fp,
state->prev_fp = 0; state->prev_fp = 0;
state->prev_type = STACK_TYPE_UNKNOWN; state->prev_type = STACK_TYPE_UNKNOWN;
} }
NOKPROBE_SYMBOL(unwind_init);
/*
* Start an unwind from a pt_regs.
*
* The unwind will begin at the PC within the regs.
*
* The regs must be on a stack currently owned by the calling task.
*/
static inline void unwind_init_from_regs(struct unwind_state *state,
struct pt_regs *regs)
{
unwind_init_common(state, current);
state->fp = regs->regs[29];
state->pc = regs->pc;
}
/*
* Start an unwind from a caller.
*
* The unwind will begin at the caller of whichever function this is inlined
* into.
*
* The function which invokes this must be noinline.
*/
static __always_inline void unwind_init_from_caller(struct unwind_state *state)
{
unwind_init_common(state, current);
state->fp = (unsigned long)__builtin_frame_address(1);
state->pc = (unsigned long)__builtin_return_address(0);
}
/*
* Start an unwind from a blocked task.
*
* The unwind will begin at the blocked tasks saved PC (i.e. the caller of
* cpu_switch_to()).
*
* The caller should ensure the task is blocked in cpu_switch_to() for the
* duration of the unwind, or the unwind will be bogus. It is never valid to
* call this for the current task.
*/
static inline void unwind_init_from_task(struct unwind_state *state,
struct task_struct *task)
{
unwind_init_common(state, task);
state->fp = thread_saved_fp(task);
state->pc = thread_saved_pc(task);
}
/* /*
* Unwind from one frame record (A) to the next frame record (B). * Unwind from one frame record (A) to the next frame record (B).
@ -81,9 +133,9 @@ NOKPROBE_SYMBOL(unwind_init);
* records (e.g. a cycle), determined based on the location and fp value of A * records (e.g. a cycle), determined based on the location and fp value of A
* and the location (but not the fp value) of B. * and the location (but not the fp value) of B.
*/ */
static int notrace unwind_next(struct task_struct *tsk, static int notrace unwind_next(struct unwind_state *state)
struct unwind_state *state)
{ {
struct task_struct *tsk = state->task;
unsigned long fp = state->fp; unsigned long fp = state->fp;
struct stack_info info; struct stack_info info;
@ -117,15 +169,15 @@ static int notrace unwind_next(struct task_struct *tsk,
if (fp <= state->prev_fp) if (fp <= state->prev_fp)
return -EINVAL; return -EINVAL;
} else { } else {
set_bit(state->prev_type, state->stacks_done); __set_bit(state->prev_type, state->stacks_done);
} }
/* /*
* Record this frame record's values and location. The prev_fp and * Record this frame record's values and location. The prev_fp and
* prev_type are only meaningful to the next unwind_next() invocation. * prev_type are only meaningful to the next unwind_next() invocation.
*/ */
state->fp = READ_ONCE_NOCHECK(*(unsigned long *)(fp)); state->fp = READ_ONCE(*(unsigned long *)(fp));
state->pc = READ_ONCE_NOCHECK(*(unsigned long *)(fp + 8)); state->pc = READ_ONCE(*(unsigned long *)(fp + 8));
state->prev_fp = fp; state->prev_fp = fp;
state->prev_type = info.type; state->prev_type = info.type;
@ -157,8 +209,7 @@ static int notrace unwind_next(struct task_struct *tsk,
} }
NOKPROBE_SYMBOL(unwind_next); NOKPROBE_SYMBOL(unwind_next);
static void notrace unwind(struct task_struct *tsk, static void notrace unwind(struct unwind_state *state,
struct unwind_state *state,
stack_trace_consume_fn consume_entry, void *cookie) stack_trace_consume_fn consume_entry, void *cookie)
{ {
while (1) { while (1) {
@ -166,7 +217,7 @@ static void notrace unwind(struct task_struct *tsk,
if (!consume_entry(cookie, state->pc)) if (!consume_entry(cookie, state->pc))
break; break;
ret = unwind_next(tsk, state); ret = unwind_next(state);
if (ret < 0) if (ret < 0)
break; break;
} }
@ -212,15 +263,15 @@ noinline notrace void arch_stack_walk(stack_trace_consume_fn consume_entry,
{ {
struct unwind_state state; struct unwind_state state;
if (regs) if (regs) {
unwind_init(&state, regs->regs[29], regs->pc); if (task != current)
else if (task == current) return;
unwind_init(&state, unwind_init_from_regs(&state, regs);
(unsigned long)__builtin_frame_address(1), } else if (task == current) {
(unsigned long)__builtin_return_address(0)); unwind_init_from_caller(&state);
else } else {
unwind_init(&state, thread_saved_fp(task), unwind_init_from_task(&state, task);
thread_saved_pc(task)); }
unwind(task, &state, consume_entry, cookie); unwind(&state, consume_entry, cookie);
} }