sparc64: add ftrace support.

Signed-off-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
David Miller 2008-05-13 22:06:59 -07:00 committed by Thomas Gleixner
parent aa5e5ceaf5
commit d05f5f9906
5 changed files with 156 additions and 5 deletions

View File

@ -11,6 +11,7 @@ config SPARC
config SPARC64
bool
default y
select HAVE_FTRACE
select HAVE_IDE
select HAVE_LMB
select HAVE_ARCH_KGDB

View File

@ -33,7 +33,7 @@ config DEBUG_PAGEALLOC
config MCOUNT
bool
depends on STACK_DEBUG
depends on STACK_DEBUG || FTRACE
default y
config FRAME_POINTER

View File

@ -14,6 +14,7 @@ obj-y := process.o setup.o cpu.o idprom.o \
power.o sbus.o sparc64_ksyms.o chmc.o \
visemul.o prom.o of_device.o hvapi.o sstate.o mdesc.o
obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
obj-$(CONFIG_STACKTRACE) += stacktrace.o
obj-$(CONFIG_PCI) += ebus.o pci_common.o \
pci_psycho.o pci_sabre.o pci_schizo.o \

View File

@ -0,0 +1,99 @@
#include <linux/spinlock.h>
#include <linux/hardirq.h>
#include <linux/ftrace.h>
#include <linux/percpu.h>
#include <linux/init.h>
#include <linux/list.h>
static const u32 ftrace_nop = 0x01000000;
notrace int ftrace_ip_converted(unsigned long ip)
{
u32 insn = *(u32 *) ip;
return (insn == ftrace_nop);
}
notrace unsigned char *ftrace_nop_replace(void)
{
return (char *)&ftrace_nop;
}
notrace unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
{
static u32 call;
s32 off;
off = ((s32)addr - (s32)ip);
call = 0x40000000 | ((u32)off >> 2);
return (unsigned char *) &call;
}
notrace int
ftrace_modify_code(unsigned long ip, unsigned char *old_code,
unsigned char *new_code)
{
u32 old = *(u32 *)old_code;
u32 new = *(u32 *)new_code;
u32 replaced;
int faulted;
__asm__ __volatile__(
"1: cas [%[ip]], %[old], %[new]\n"
" flush %[ip]\n"
" mov 0, %[faulted]\n"
"2:\n"
" .section .fixup,#alloc,#execinstr\n"
" .align 4\n"
"3: sethi %%hi(2b), %[faulted]\n"
" jmpl %[faulted] + %%lo(2b), %%g0\n"
" mov 1, %[faulted]\n"
" .previous\n"
" .section __ex_table,\"a\"\n"
" .align 4\n"
" .word 1b, 3b\n"
" .previous\n"
: "=r" (replaced), [faulted] "=r" (faulted)
: [new] "0" (new), [old] "r" (old), [ip] "r" (ip)
: "memory");
if (replaced != old && replaced != new)
faulted = 2;
return faulted;
}
notrace int ftrace_update_ftrace_func(ftrace_func_t func)
{
unsigned long ip = (unsigned long)(&ftrace_call);
unsigned char old[4], *new;
memcpy(old, &ftrace_call, 4);
new = ftrace_call_replace(ip, (unsigned long)func);
return ftrace_modify_code(ip, old, new);
}
notrace int ftrace_mcount_set(unsigned long *data)
{
unsigned long ip = (long)(&mcount_call);
unsigned long *addr = data;
unsigned char old[4], *new;
/*
* Replace the mcount stub with a pointer to the
* ip recorder function.
*/
memcpy(old, &mcount_call, 4);
new = ftrace_call_replace(ip, *addr);
*addr = ftrace_modify_code(ip, old, new);
return 0;
}
int __init ftrace_dyn_arch_init(void *data)
{
ftrace_mcount_set(data);
return 0;
}

View File

@ -28,10 +28,13 @@ ovstack:
.skip OVSTACKSIZE
#endif
.text
.align 32
.globl mcount, _mcount
mcount:
.align 32
.globl _mcount
.type _mcount,#function
.globl mcount
.type mcount,#function
_mcount:
mcount:
#ifdef CONFIG_STACK_DEBUG
/*
* Check whether %sp is dangerously low.
@ -55,6 +58,53 @@ _mcount:
or %g3, %lo(panicstring), %o0
call prom_halt
nop
1:
#endif
1: retl
#ifdef CONFIG_FTRACE
#ifdef CONFIG_DYNAMIC_FTRACE
mov %o7, %o0
.globl mcount_call
mcount_call:
call ftrace_stub
mov %o0, %o7
#else
sethi %hi(ftrace_trace_function), %g1
sethi %hi(ftrace_stub), %g2
ldx [%g1 + %lo(ftrace_trace_function)], %g1
or %g2, %lo(ftrace_stub), %g2
cmp %g1, %g2
be,pn %icc, 1f
mov %i7, %o1
jmpl %g1, %g0
mov %o7, %o0
/* not reached */
1:
#endif
#endif
retl
nop
.size _mcount,.-_mcount
.size mcount,.-mcount
#ifdef CONFIG_FTRACE
.globl ftrace_stub
.type ftrace_stub,#function
ftrace_stub:
retl
nop
.size ftrace_stub,.-ftrace_stub
#ifdef CONFIG_DYNAMIC_FTRACE
.globl ftrace_caller
.type ftrace_caller,#function
ftrace_caller:
mov %i7, %o1
mov %o7, %o0
.globl ftrace_call
ftrace_call:
call ftrace_stub
mov %o0, %o7
retl
nop
.size ftrace_caller,.-ftrace_caller
#endif
#endif