[S390] ftrace: add function graph tracer support

Function graph tracer support for s390.

Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
This commit is contained in:
Heiko Carstens 2009-06-12 10:26:46 +02:00 committed by Martin Schwidefsky
parent 8b4488f85d
commit 88dbd20372
9 changed files with 166 additions and 12 deletions

View file

@ -85,6 +85,7 @@ config S390
select HAVE_FUNCTION_TRACE_MCOUNT_TEST select HAVE_FUNCTION_TRACE_MCOUNT_TEST
select HAVE_FTRACE_MCOUNT_RECORD select HAVE_FTRACE_MCOUNT_RECORD
select HAVE_DYNAMIC_FTRACE select HAVE_DYNAMIC_FTRACE
select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_DEFAULT_NO_SPIN_MUTEXES select HAVE_DEFAULT_NO_SPIN_MUTEXES
select HAVE_OPROFILE select HAVE_OPROFILE
select HAVE_KPROBES select HAVE_KPROBES

View file

@ -11,11 +11,13 @@ struct dyn_arch_ftrace { };
#define MCOUNT_ADDR ((long)_mcount) #define MCOUNT_ADDR ((long)_mcount)
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
#define MCOUNT_INSN_SIZE 24 #define MCOUNT_OFFSET_RET 18
#define MCOUNT_OFFSET 14 #define MCOUNT_INSN_SIZE 24
#define MCOUNT_OFFSET 14
#else #else
#define MCOUNT_INSN_SIZE 30 #define MCOUNT_OFFSET_RET 26
#define MCOUNT_OFFSET 8 #define MCOUNT_INSN_SIZE 30
#define MCOUNT_OFFSET 8
#endif #endif
static inline unsigned long ftrace_call_adjust(unsigned long addr) static inline unsigned long ftrace_call_adjust(unsigned long addr)

View file

@ -3,11 +3,8 @@
# #
ifdef CONFIG_FUNCTION_TRACER ifdef CONFIG_FUNCTION_TRACER
# Do not trace early boot code # Don't trace early setup code and tracing code
CFLAGS_REMOVE_early.o = -pg CFLAGS_REMOVE_early.o = -pg
endif
ifdef CONFIG_DYNAMIC_FTRACE
CFLAGS_REMOVE_ftrace.o = -pg CFLAGS_REMOVE_ftrace.o = -pg
endif endif
@ -46,6 +43,7 @@ obj-$(CONFIG_STACKTRACE) += stacktrace.o
obj-$(CONFIG_KPROBES) += kprobes.o obj-$(CONFIG_KPROBES) += kprobes.o
obj-$(CONFIG_FUNCTION_TRACER) += mcount.o obj-$(CONFIG_FUNCTION_TRACER) += mcount.o
obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
# Kexec part # Kexec part
S390_KEXEC_OBJS := machine_kexec.o crash.o S390_KEXEC_OBJS := machine_kexec.o crash.o

View file

@ -7,13 +7,17 @@
* *
*/ */
#include <linux/hardirq.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <linux/ftrace.h> #include <linux/ftrace.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/types.h> #include <linux/types.h>
#include <asm/lowcore.h> #include <asm/lowcore.h>
#ifdef CONFIG_DYNAMIC_FTRACE
void ftrace_disable_code(void); void ftrace_disable_code(void);
void ftrace_disable_return(void);
void ftrace_call_code(void); void ftrace_call_code(void);
void ftrace_nop_code(void); void ftrace_nop_code(void);
@ -28,6 +32,7 @@ asm(
" .word 0x0024\n" " .word 0x0024\n"
" lg %r1,"__stringify(__LC_FTRACE_FUNC)"\n" " lg %r1,"__stringify(__LC_FTRACE_FUNC)"\n"
" basr %r14,%r1\n" " basr %r14,%r1\n"
"ftrace_disable_return:\n"
" lg %r14,8(15)\n" " lg %r14,8(15)\n"
" lgr %r0,%r0\n" " lgr %r0,%r0\n"
"0:\n"); "0:\n");
@ -50,6 +55,7 @@ asm(
" j 0f\n" " j 0f\n"
" l %r1,"__stringify(__LC_FTRACE_FUNC)"\n" " l %r1,"__stringify(__LC_FTRACE_FUNC)"\n"
" basr %r14,%r1\n" " basr %r14,%r1\n"
"ftrace_disable_return:\n"
" l %r14,4(%r15)\n" " l %r14,4(%r15)\n"
" j 0f\n" " j 0f\n"
" bcr 0,%r7\n" " bcr 0,%r7\n"
@ -130,3 +136,69 @@ int __init ftrace_dyn_arch_init(void *data)
*(unsigned long *)data = 0; *(unsigned long *)data = 0;
return 0; return 0;
} }
#endif /* CONFIG_DYNAMIC_FTRACE */
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
#ifdef CONFIG_DYNAMIC_FTRACE
/*
* Patch the kernel code at ftrace_graph_caller location:
* The instruction there is branch relative on condition. The condition mask
* is either all ones (always branch aka disable ftrace_graph_caller) or all
* zeroes (nop aka enable ftrace_graph_caller).
* Instruction format for brc is a7m4xxxx where m is the condition mask.
*/
int ftrace_enable_ftrace_graph_caller(void)
{
unsigned short opcode = 0xa704;
return probe_kernel_write(ftrace_graph_caller, &opcode, sizeof(opcode));
}
int ftrace_disable_ftrace_graph_caller(void)
{
unsigned short opcode = 0xa7f4;
return probe_kernel_write(ftrace_graph_caller, &opcode, sizeof(opcode));
}
static inline unsigned long ftrace_mcount_call_adjust(unsigned long addr)
{
return addr - (ftrace_disable_return - ftrace_disable_code);
}
#else /* CONFIG_DYNAMIC_FTRACE */
static inline unsigned long ftrace_mcount_call_adjust(unsigned long addr)
{
return addr - MCOUNT_OFFSET_RET;
}
#endif /* CONFIG_DYNAMIC_FTRACE */
/*
* Hook the return address and push it in the stack of return addresses
* in current thread info.
*/
unsigned long prepare_ftrace_return(unsigned long ip, unsigned long parent)
{
struct ftrace_graph_ent trace;
/* Nmi's are currently unsupported. */
if (unlikely(in_nmi()))
goto out;
if (unlikely(atomic_read(&current->tracing_graph_pause)))
goto out;
if (ftrace_push_return_trace(parent, ip, &trace.depth) == -EBUSY)
goto out;
trace.func = ftrace_mcount_call_adjust(ip) & PSW_ADDR_INSN;
/* Only trace if the calling function expects to. */
if (!ftrace_graph_entry(&trace)) {
current->curr_ret_stack--;
goto out;
}
parent = (unsigned long)return_to_handler;
out:
return parent;
}
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */

View file

@ -34,6 +34,18 @@ ftrace_caller:
larl %r14,ftrace_dyn_func larl %r14,ftrace_dyn_func
lg %r14,0(%r14) lg %r14,0(%r14)
basr %r14,%r14 basr %r14,%r14
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
.globl ftrace_graph_caller
ftrace_graph_caller:
# This unconditional branch gets runtime patched. Change only if
# you know what you are doing. See ftrace_enable_graph_caller().
j 0f
lg %r2,272(%r15)
lg %r3,168(%r15)
brasl %r14,prepare_ftrace_return
stg %r2,168(%r15)
0:
#endif
aghi %r15,160 aghi %r15,160
lmg %r2,%r5,32(%r15) lmg %r2,%r5,32(%r15)
lg %r14,112(%r15) lg %r14,112(%r15)
@ -62,6 +74,12 @@ _mcount:
larl %r14,ftrace_trace_function larl %r14,ftrace_trace_function
lg %r14,0(%r14) lg %r14,0(%r14)
basr %r14,%r14 basr %r14,%r14
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
lg %r2,272(%r15)
lg %r3,168(%r15)
brasl %r14,prepare_ftrace_return
stg %r2,168(%r15)
#endif
aghi %r15,160 aghi %r15,160
lmg %r2,%r5,32(%r15) lmg %r2,%r5,32(%r15)
lg %r14,112(%r15) lg %r14,112(%r15)
@ -69,6 +87,22 @@ _mcount:
#endif /* CONFIG_DYNAMIC_FTRACE */ #endif /* CONFIG_DYNAMIC_FTRACE */
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
.globl return_to_handler
return_to_handler:
stmg %r2,%r5,32(%r15)
lgr %r1,%r15
aghi %r15,-160
stg %r1,__SF_BACKCHAIN(%r15)
brasl %r14,ftrace_return_to_handler
aghi %r15,160
lgr %r14,%r2
lmg %r2,%r5,32(%r15)
br %r14
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
#else /* CONFIG_64BIT */ #else /* CONFIG_64BIT */
#ifdef CONFIG_DYNAMIC_FTRACE #ifdef CONFIG_DYNAMIC_FTRACE
@ -96,6 +130,21 @@ ftrace_caller:
l %r14,0b-0b(%r1) l %r14,0b-0b(%r1)
l %r14,0(%r14) l %r14,0(%r14)
basr %r14,%r14 basr %r14,%r14
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
.globl ftrace_graph_caller
ftrace_graph_caller:
# This unconditional branch gets runtime patched. Change only if
# you know what you are doing. See ftrace_enable_graph_caller().
j 1f
bras %r1,0f
.long prepare_ftrace_return
0: l %r2,152(%r15)
l %r4,0(%r1)
l %r3,100(%r15)
basr %r14,%r4
st %r2,100(%r15)
1:
#endif
ahi %r15,96 ahi %r15,96
l %r14,56(%r15) l %r14,56(%r15)
3: lm %r2,%r5,16(%r15) 3: lm %r2,%r5,16(%r15)
@ -128,10 +177,40 @@ _mcount:
l %r14,0b-0b(%r1) l %r14,0b-0b(%r1)
l %r14,0(%r14) l %r14,0(%r14)
basr %r14,%r14 basr %r14,%r14
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
bras %r1,0f
.long prepare_ftrace_return
0: l %r2,152(%r15)
l %r4,0(%r1)
l %r3,100(%r15)
basr %r14,%r4
st %r2,100(%r15)
#endif
ahi %r15,96 ahi %r15,96
l %r14,56(%r15) l %r14,56(%r15)
3: lm %r2,%r5,16(%r15) 3: lm %r2,%r5,16(%r15)
br %r14 br %r14
#endif /* CONFIG_DYNAMIC_FTRACE */ #endif /* CONFIG_DYNAMIC_FTRACE */
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
.globl return_to_handler
return_to_handler:
stm %r2,%r5,16(%r15)
st %r14,56(%r15)
lr %r0,%r15
ahi %r15,-96
st %r0,__SF_BACKCHAIN(%r15)
bras %r1,0f
.long ftrace_return_to_handler
0: l %r2,0b-0b(%r1)
basr %r14,%r2
lr %r14,%r2
ahi %r15,96
lm %r2,%r5,16(%r15)
br %r14
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
#endif /* CONFIG_64BIT */ #endif /* CONFIG_64BIT */

View file

@ -10,6 +10,7 @@
#include <linux/module.h> #include <linux/module.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/ftrace.h>
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/kernel_stat.h> #include <linux/kernel_stat.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
@ -112,7 +113,7 @@ int unregister_early_external_interrupt(__u16 code, ext_int_handler_t handler,
return 0; return 0;
} }
void do_extint(struct pt_regs *regs, unsigned short code) void __irq_entry do_extint(struct pt_regs *regs, unsigned short code)
{ {
ext_int_info_t *p; ext_int_info_t *p;
int index; int index;

View file

@ -70,7 +70,7 @@ static DEFINE_PER_CPU(struct clock_event_device, comparators);
/* /*
* Scheduler clock - returns current time in nanosec units. * Scheduler clock - returns current time in nanosec units.
*/ */
unsigned long long sched_clock(void) unsigned long long notrace sched_clock(void)
{ {
return ((get_clock_xt() - sched_clock_base_cc) * 125) >> 9; return ((get_clock_xt() - sched_clock_base_cc) * 125) >> 9;
} }

View file

@ -34,6 +34,7 @@ SECTIONS
SCHED_TEXT SCHED_TEXT
LOCK_TEXT LOCK_TEXT
KPROBES_TEXT KPROBES_TEXT
IRQENTRY_TEXT
*(.fixup) *(.fixup)
*(.gnu.warning) *(.gnu.warning)
} :text = 0x0700 } :text = 0x0700

View file

@ -12,6 +12,7 @@
#define KMSG_COMPONENT "cio" #define KMSG_COMPONENT "cio"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/ftrace.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/slab.h> #include <linux/slab.h>
@ -626,8 +627,7 @@ int cio_validate_subchannel(struct subchannel *sch, struct subchannel_id schid)
* handlers). * handlers).
* *
*/ */
void void __irq_entry do_IRQ(struct pt_regs *regs)
do_IRQ (struct pt_regs *regs)
{ {
struct tpi_info *tpi_info; struct tpi_info *tpi_info;
struct subchannel *sch; struct subchannel *sch;