linux-stable/arch/sparc/kernel/ftrace.c
Josh Poimboeuf 9a7c348ba6 ftrace: Add return address pointer to ftrace_ret_stack
Storing this value will help prevent unwinders from getting out of sync
with the function graph tracer ret_stack.  Now instead of needing a
stateful iterator, they can compare the return address pointer to find
the right ret_stack entry.

Note that an array of 50 ftrace_ret_stack structs is allocated for every
task.  So when an arch implements this, it will add either 200 or 400
bytes of memory usage per task (depending on whether it's a 32-bit or
64-bit platform).

Signed-off-by: Josh Poimboeuf <jpoimboe@redhat.com>
Acked-by: Steven Rostedt <rostedt@goodmis.org>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Byungchul Park <byungchul.park@lge.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Kees Cook <keescook@chromium.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Nilay Vaish <nilayvaish@gmail.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/a95cfcc39e8f26b89a430c56926af0bb217bc0a1.1471607358.git.jpoimboe@redhat.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2016-08-24 12:15:14 +02:00

147 lines
3.2 KiB
C

#include <linux/spinlock.h>
#include <linux/hardirq.h>
#include <linux/ftrace.h>
#include <linux/percpu.h>
#include <linux/init.h>
#include <linux/list.h>
#include <trace/syscall.h>
#include <asm/ftrace.h>
#ifdef CONFIG_DYNAMIC_FTRACE
static const u32 ftrace_nop = 0x01000000;
static u32 ftrace_call_replace(unsigned long ip, unsigned long addr)
{
u32 call;
s32 off;
off = ((s32)addr - (s32)ip);
call = 0x40000000 | ((u32)off >> 2);
return call;
}
static int ftrace_modify_code(unsigned long ip, u32 old, u32 new)
{
u32 replaced;
int faulted;
__asm__ __volatile__(
"1: cas [%[ip]], %[old], %[new]\n"
" flush %[ip]\n"
" mov 0, %[faulted]\n"
"2:\n"
" .section .fixup,#alloc,#execinstr\n"
" .align 4\n"
"3: sethi %%hi(2b), %[faulted]\n"
" jmpl %[faulted] + %%lo(2b), %%g0\n"
" mov 1, %[faulted]\n"
" .previous\n"
" .section __ex_table,\"a\"\n"
" .align 4\n"
" .word 1b, 3b\n"
" .previous\n"
: "=r" (replaced), [faulted] "=r" (faulted)
: [new] "0" (new), [old] "r" (old), [ip] "r" (ip)
: "memory");
if (replaced != old && replaced != new)
faulted = 2;
return faulted;
}
int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr)
{
unsigned long ip = rec->ip;
u32 old, new;
old = ftrace_call_replace(ip, addr);
new = ftrace_nop;
return ftrace_modify_code(ip, old, new);
}
int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
{
unsigned long ip = rec->ip;
u32 old, new;
old = ftrace_nop;
new = ftrace_call_replace(ip, addr);
return ftrace_modify_code(ip, old, new);
}
int ftrace_update_ftrace_func(ftrace_func_t func)
{
unsigned long ip = (unsigned long)(&ftrace_call);
u32 old, new;
old = *(u32 *) &ftrace_call;
new = ftrace_call_replace(ip, (unsigned long)func);
return ftrace_modify_code(ip, old, new);
}
int __init ftrace_dyn_arch_init(void)
{
return 0;
}
#endif
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
#ifdef CONFIG_DYNAMIC_FTRACE
extern void ftrace_graph_call(void);
int ftrace_enable_ftrace_graph_caller(void)
{
unsigned long ip = (unsigned long)(&ftrace_graph_call);
u32 old, new;
old = *(u32 *) &ftrace_graph_call;
new = ftrace_call_replace(ip, (unsigned long) &ftrace_graph_caller);
return ftrace_modify_code(ip, old, new);
}
int ftrace_disable_ftrace_graph_caller(void)
{
unsigned long ip = (unsigned long)(&ftrace_graph_call);
u32 old, new;
old = *(u32 *) &ftrace_graph_call;
new = ftrace_call_replace(ip, (unsigned long) &ftrace_stub);
return ftrace_modify_code(ip, old, new);
}
#endif /* !CONFIG_DYNAMIC_FTRACE */
/*
* Hook the return address and push it in the stack of return addrs
* in current thread info.
*/
unsigned long prepare_ftrace_return(unsigned long parent,
unsigned long self_addr,
unsigned long frame_pointer)
{
unsigned long return_hooker = (unsigned long) &return_to_handler;
struct ftrace_graph_ent trace;
if (unlikely(atomic_read(&current->tracing_graph_pause)))
return parent + 8UL;
if (ftrace_push_return_trace(parent, self_addr, &trace.depth,
frame_pointer, NULL) == -EBUSY)
return parent + 8UL;
trace.func = self_addr;
/* Only trace if the calling function expects to */
if (!ftrace_graph_entry(&trace)) {
current->curr_ret_stack--;
return parent + 8UL;
}
return return_hooker;
}
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */