2009-04-13 15:20:49 +00:00
|
|
|
#ifndef _LINUX_FTRACE_EVENT_H
|
|
|
|
#define _LINUX_FTRACE_EVENT_H
|
|
|
|
|
|
|
|
#include <linux/ring_buffer.h>
|
2009-09-12 23:04:54 +00:00
|
|
|
#include <linux/trace_seq.h>
|
2009-05-26 18:25:22 +00:00
|
|
|
#include <linux/percpu.h>
|
2009-09-18 04:10:28 +00:00
|
|
|
#include <linux/hardirq.h>
|
2010-01-28 01:32:29 +00:00
|
|
|
#include <linux/perf_event.h>
|
2009-04-13 15:20:49 +00:00
|
|
|
|
|
|
|
struct trace_array;
|
|
|
|
struct tracer;
|
2009-04-10 18:53:50 +00:00
|
|
|
struct dentry;
|
2009-04-13 15:20:49 +00:00
|
|
|
|
2009-05-26 18:25:22 +00:00
|
|
|
struct trace_print_flags {
|
|
|
|
unsigned long mask;
|
|
|
|
const char *name;
|
|
|
|
};
|
|
|
|
|
|
|
|
const char *ftrace_print_flags_seq(struct trace_seq *p, const char *delim,
|
|
|
|
unsigned long flags,
|
|
|
|
const struct trace_print_flags *flag_array);
|
|
|
|
|
2009-05-20 23:21:47 +00:00
|
|
|
const char *ftrace_print_symbols_seq(struct trace_seq *p, unsigned long val,
|
|
|
|
const struct trace_print_flags *symbol_array);
|
|
|
|
|
2010-04-01 11:40:58 +00:00
|
|
|
const char *ftrace_print_hex_seq(struct trace_seq *p,
|
|
|
|
const unsigned char *buf, int len);
|
|
|
|
|
2009-04-13 15:20:49 +00:00
|
|
|
/*
|
|
|
|
* The trace entry - the most basic unit of tracing. This is what
|
|
|
|
* is printed in the end as a single line in the trace output, such as:
|
|
|
|
*
|
|
|
|
* bash-15816 [01] 235.197585: idle_cpu <- irq_enter
|
|
|
|
*/
|
|
|
|
struct trace_entry {
|
2009-03-26 15:03:29 +00:00
|
|
|
unsigned short type;
|
2009-04-13 15:20:49 +00:00
|
|
|
unsigned char flags;
|
|
|
|
unsigned char preempt_count;
|
|
|
|
int pid;
|
|
|
|
};
|
|
|
|
|
2009-03-26 15:03:29 +00:00
|
|
|
#define FTRACE_MAX_EVENT \
|
|
|
|
((1 << (sizeof(((struct trace_entry *)0)->type) * 8)) - 1)
|
|
|
|
|
2009-04-13 15:20:49 +00:00
|
|
|
/*
|
|
|
|
* Trace iterator - used by printout routines who present trace
|
|
|
|
* results to users and which routines might sleep, etc:
|
|
|
|
*/
|
|
|
|
struct trace_iterator {
|
|
|
|
struct trace_array *tr;
|
|
|
|
struct tracer *trace;
|
|
|
|
void *private;
|
|
|
|
int cpu_file;
|
|
|
|
struct mutex mutex;
|
|
|
|
struct ring_buffer_iter *buffer_iter[NR_CPUS];
|
2009-06-01 19:16:05 +00:00
|
|
|
unsigned long iter_flags;
|
2009-04-13 15:20:49 +00:00
|
|
|
|
2010-06-03 10:26:24 +00:00
|
|
|
/* trace_seq for __print_flags() and __print_symbolic() etc. */
|
|
|
|
struct trace_seq tmp_seq;
|
|
|
|
|
2009-04-13 15:20:49 +00:00
|
|
|
/* The below is zeroed out in pipe_read */
|
|
|
|
struct trace_seq seq;
|
|
|
|
struct trace_entry *ent;
|
2010-03-31 23:49:26 +00:00
|
|
|
unsigned long lost_events;
|
2009-12-07 14:11:39 +00:00
|
|
|
int leftover;
|
2009-04-13 15:20:49 +00:00
|
|
|
int cpu;
|
|
|
|
u64 ts;
|
|
|
|
|
|
|
|
loff_t pos;
|
|
|
|
long idx;
|
|
|
|
|
|
|
|
cpumask_var_t started;
|
|
|
|
};
|
|
|
|
|
|
|
|
|
2010-04-22 22:46:14 +00:00
|
|
|
struct trace_event;
|
|
|
|
|
2009-04-13 15:20:49 +00:00
|
|
|
typedef enum print_line_t (*trace_print_func)(struct trace_iterator *iter,
|
2010-04-22 22:46:14 +00:00
|
|
|
int flags, struct trace_event *event);
|
|
|
|
|
|
|
|
struct trace_event_functions {
|
2009-04-13 15:20:49 +00:00
|
|
|
trace_print_func trace;
|
|
|
|
trace_print_func raw;
|
|
|
|
trace_print_func hex;
|
|
|
|
trace_print_func binary;
|
|
|
|
};
|
|
|
|
|
2010-04-22 22:46:14 +00:00
|
|
|
struct trace_event {
|
|
|
|
struct hlist_node node;
|
|
|
|
struct list_head list;
|
|
|
|
int type;
|
|
|
|
struct trace_event_functions *funcs;
|
|
|
|
};
|
|
|
|
|
2009-04-13 15:20:49 +00:00
|
|
|
extern int register_ftrace_event(struct trace_event *event);
|
|
|
|
extern int unregister_ftrace_event(struct trace_event *event);
|
|
|
|
|
|
|
|
/* Return values for print_line callback */
|
|
|
|
enum print_line_t {
|
|
|
|
TRACE_TYPE_PARTIAL_LINE = 0, /* Retry after flushing the seq */
|
|
|
|
TRACE_TYPE_HANDLED = 1,
|
|
|
|
TRACE_TYPE_UNHANDLED = 2, /* Relay to other output functions */
|
|
|
|
TRACE_TYPE_NO_CONSUME = 3 /* Handled but ask to not consume */
|
|
|
|
};
|
|
|
|
|
2009-08-06 23:25:54 +00:00
|
|
|
void tracing_generic_entry_update(struct trace_entry *entry,
|
|
|
|
unsigned long flags,
|
|
|
|
int pc);
|
2009-04-13 15:20:49 +00:00
|
|
|
struct ring_buffer_event *
|
2009-09-02 18:17:06 +00:00
|
|
|
trace_current_buffer_lock_reserve(struct ring_buffer **current_buffer,
|
|
|
|
int type, unsigned long len,
|
2009-04-13 15:20:49 +00:00
|
|
|
unsigned long flags, int pc);
|
2009-09-02 18:17:06 +00:00
|
|
|
void trace_current_buffer_unlock_commit(struct ring_buffer *buffer,
|
|
|
|
struct ring_buffer_event *event,
|
2009-04-13 15:20:49 +00:00
|
|
|
unsigned long flags, int pc);
|
2009-09-02 18:17:06 +00:00
|
|
|
void trace_nowake_buffer_unlock_commit(struct ring_buffer *buffer,
|
|
|
|
struct ring_buffer_event *event,
|
2009-04-13 15:20:49 +00:00
|
|
|
unsigned long flags, int pc);
|
2009-09-02 18:17:06 +00:00
|
|
|
void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
|
|
|
|
struct ring_buffer_event *event);
|
2009-04-13 15:20:49 +00:00
|
|
|
|
|
|
|
void tracing_record_cmdline(struct task_struct *tsk);
|
|
|
|
|
2009-07-20 02:20:53 +00:00
|
|
|
struct event_filter;
|
|
|
|
|
2010-04-21 16:27:06 +00:00
|
|
|
enum trace_reg {
|
|
|
|
TRACE_REG_REGISTER,
|
|
|
|
TRACE_REG_UNREGISTER,
|
|
|
|
TRACE_REG_PERF_REGISTER,
|
|
|
|
TRACE_REG_PERF_UNREGISTER,
|
|
|
|
};
|
|
|
|
|
|
|
|
struct ftrace_event_call;
|
|
|
|
|
2010-04-20 14:47:33 +00:00
|
|
|
struct ftrace_event_class {
|
|
|
|
char *system;
|
2010-04-21 16:27:06 +00:00
|
|
|
void *probe;
|
|
|
|
#ifdef CONFIG_PERF_EVENTS
|
|
|
|
void *perf_probe;
|
|
|
|
#endif
|
|
|
|
int (*reg)(struct ftrace_event_call *event,
|
|
|
|
enum trace_reg type);
|
2010-04-22 14:35:55 +00:00
|
|
|
int (*define_fields)(struct ftrace_event_call *);
|
|
|
|
struct list_head *(*get_fields)(struct ftrace_event_call *);
|
|
|
|
struct list_head fields;
|
2010-04-22 15:46:44 +00:00
|
|
|
int (*raw_init)(struct ftrace_event_call *);
|
2010-04-20 14:47:33 +00:00
|
|
|
};
|
|
|
|
|
2010-06-08 15:22:06 +00:00
|
|
|
extern int ftrace_event_reg(struct ftrace_event_call *event,
|
|
|
|
enum trace_reg type);
|
|
|
|
|
2010-04-23 15:12:36 +00:00
|
|
|
enum {
|
|
|
|
TRACE_EVENT_FL_ENABLED_BIT,
|
|
|
|
TRACE_EVENT_FL_FILTERED_BIT,
|
2010-07-02 03:07:32 +00:00
|
|
|
TRACE_EVENT_FL_RECORDED_CMD_BIT,
|
2010-11-18 00:39:17 +00:00
|
|
|
TRACE_EVENT_FL_CAP_ANY_BIT,
|
2010-04-23 15:12:36 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
enum {
|
2010-07-02 03:07:32 +00:00
|
|
|
TRACE_EVENT_FL_ENABLED = (1 << TRACE_EVENT_FL_ENABLED_BIT),
|
|
|
|
TRACE_EVENT_FL_FILTERED = (1 << TRACE_EVENT_FL_FILTERED_BIT),
|
|
|
|
TRACE_EVENT_FL_RECORDED_CMD = (1 << TRACE_EVENT_FL_RECORDED_CMD_BIT),
|
2010-11-18 00:39:17 +00:00
|
|
|
TRACE_EVENT_FL_CAP_ANY = (1 << TRACE_EVENT_FL_CAP_ANY_BIT),
|
2010-04-23 15:12:36 +00:00
|
|
|
};
|
|
|
|
|
2009-04-13 15:20:49 +00:00
|
|
|
struct ftrace_event_call {
|
2009-04-10 17:52:20 +00:00
|
|
|
struct list_head list;
|
2010-04-20 14:47:33 +00:00
|
|
|
struct ftrace_event_class *class;
|
2009-04-13 15:20:49 +00:00
|
|
|
char *name;
|
|
|
|
struct dentry *dir;
|
2010-04-23 14:00:22 +00:00
|
|
|
struct trace_event event;
|
2009-12-15 07:39:42 +00:00
|
|
|
const char *print_fmt;
|
2009-07-20 02:20:53 +00:00
|
|
|
struct event_filter *filter;
|
2009-04-10 18:53:50 +00:00
|
|
|
void *mod;
|
2009-08-10 20:52:44 +00:00
|
|
|
void *data;
|
2009-04-13 15:20:49 +00:00
|
|
|
|
2010-04-23 15:12:36 +00:00
|
|
|
/*
|
|
|
|
* 32 bit flags:
|
|
|
|
* bit 1: enabled
|
|
|
|
* bit 2: filter_active
|
2010-07-02 03:07:32 +00:00
|
|
|
* bit 3: enabled cmd record
|
2010-04-23 15:12:36 +00:00
|
|
|
*
|
2010-05-14 14:19:13 +00:00
|
|
|
* Changes to flags must hold the event_mutex.
|
|
|
|
*
|
|
|
|
* Note: Reads of flags do not hold the event_mutex since
|
|
|
|
* they occur in critical sections. But the way flags
|
|
|
|
* is currently used, these changes do no affect the code
|
|
|
|
* except that when a change is made, it may have a slight
|
|
|
|
* delay in propagating the changes to other CPUs due to
|
|
|
|
* caching and such.
|
2010-04-23 15:12:36 +00:00
|
|
|
*/
|
|
|
|
unsigned int flags;
|
|
|
|
|
2010-05-21 15:49:57 +00:00
|
|
|
#ifdef CONFIG_PERF_EVENTS
|
2010-08-11 03:47:59 +00:00
|
|
|
int perf_refcount;
|
|
|
|
struct hlist_head __percpu *perf_events;
|
2010-05-21 15:49:57 +00:00
|
|
|
#endif
|
2009-04-13 15:20:49 +00:00
|
|
|
};
|
|
|
|
|
2010-11-18 01:11:42 +00:00
|
|
|
#define __TRACE_EVENT_FLAGS(name, value) \
|
|
|
|
static int __init trace_init_flags_##name(void) \
|
|
|
|
{ \
|
|
|
|
event_##name.flags = value; \
|
|
|
|
return 0; \
|
|
|
|
} \
|
|
|
|
early_initcall(trace_init_flags_##name);
|
|
|
|
|
2010-03-05 04:35:37 +00:00
|
|
|
#define PERF_MAX_TRACE_SIZE 2048
|
2009-09-18 04:10:28 +00:00
|
|
|
|
2009-09-12 23:04:54 +00:00
|
|
|
#define MAX_FILTER_STR_VAL 256 /* Should handle KSYM_SYMBOL_LEN */
|
2009-04-13 15:20:49 +00:00
|
|
|
|
2009-05-06 02:33:04 +00:00
|
|
|
extern void destroy_preds(struct ftrace_event_call *call);
|
2009-10-15 03:21:42 +00:00
|
|
|
extern int filter_match_preds(struct event_filter *filter, void *rec);
|
2009-09-02 18:17:06 +00:00
|
|
|
extern int filter_current_check_discard(struct ring_buffer *buffer,
|
|
|
|
struct ftrace_event_call *call,
|
2009-04-13 15:20:49 +00:00
|
|
|
void *rec,
|
|
|
|
struct ring_buffer_event *event);
|
|
|
|
|
2009-08-07 02:33:22 +00:00
|
|
|
enum {
|
|
|
|
FILTER_OTHER = 0,
|
|
|
|
FILTER_STATIC_STRING,
|
|
|
|
FILTER_DYN_STRING,
|
2009-08-07 02:33:43 +00:00
|
|
|
FILTER_PTR_STRING,
|
2009-08-07 02:33:22 +00:00
|
|
|
};
|
|
|
|
|
2010-11-13 03:32:11 +00:00
|
|
|
#define EVENT_STORAGE_SIZE 128
|
|
|
|
extern struct mutex event_storage_mutex;
|
|
|
|
extern char event_storage[EVENT_STORAGE_SIZE];
|
|
|
|
|
2009-12-08 03:14:20 +00:00
|
|
|
extern int trace_event_raw_init(struct ftrace_event_call *call);
|
2009-08-27 03:09:51 +00:00
|
|
|
extern int trace_define_field(struct ftrace_event_call *call, const char *type,
|
|
|
|
const char *name, int offset, int size,
|
|
|
|
int is_signed, int filter_type);
|
2009-08-13 20:34:53 +00:00
|
|
|
extern int trace_add_event_call(struct ftrace_event_call *call);
|
|
|
|
extern void trace_remove_event_call(struct ftrace_event_call *call);
|
2009-04-13 15:20:49 +00:00
|
|
|
|
2009-04-28 08:04:53 +00:00
|
|
|
#define is_signed_type(type) (((type)(-1)) < 0)
|
2009-04-13 15:20:49 +00:00
|
|
|
|
2009-05-08 20:27:41 +00:00
|
|
|
int trace_set_clr_event(const char *system, const char *event, int set);
|
|
|
|
|
2009-04-13 15:20:49 +00:00
|
|
|
/*
|
|
|
|
* The double __builtin_constant_p is because gcc will give us an error
|
|
|
|
* if we try to allocate the static variable to fmt if it is not a
|
|
|
|
* constant. Even with the outer if statement optimizing out.
|
|
|
|
*/
|
|
|
|
#define event_trace_printk(ip, fmt, args...) \
|
|
|
|
do { \
|
|
|
|
__trace_printk_check_format(fmt, ##args); \
|
|
|
|
tracing_record_cmdline(current); \
|
|
|
|
if (__builtin_constant_p(fmt)) { \
|
|
|
|
static const char *trace_printk_fmt \
|
|
|
|
__attribute__((section("__trace_printk_fmt"))) = \
|
|
|
|
__builtin_constant_p(fmt) ? fmt : NULL; \
|
|
|
|
\
|
|
|
|
__trace_bprintk(ip, trace_printk_fmt, ##args); \
|
|
|
|
} else \
|
|
|
|
__trace_printk(ip, fmt, ##args); \
|
|
|
|
} while (0)
|
|
|
|
|
2009-12-21 06:27:35 +00:00
|
|
|
#ifdef CONFIG_PERF_EVENTS
|
2009-10-15 03:21:42 +00:00
|
|
|
struct perf_event;
|
2010-03-03 06:16:16 +00:00
|
|
|
|
|
|
|
DECLARE_PER_CPU(struct pt_regs, perf_trace_regs);
|
|
|
|
|
2010-05-19 12:02:22 +00:00
|
|
|
extern int perf_trace_init(struct perf_event *event);
|
|
|
|
extern void perf_trace_destroy(struct perf_event *event);
|
perf: Rework the PMU methods
Replace pmu::{enable,disable,start,stop,unthrottle} with
pmu::{add,del,start,stop}, all of which take a flags argument.
The new interface extends the capability to stop a counter while
keeping it scheduled on the PMU. We replace the throttled state with
the generic stopped state.
This also allows us to efficiently stop/start counters over certain
code paths (like IRQ handlers).
It also allows scheduling a counter without it starting, allowing for
a generic frozen state (useful for rotating stopped counters).
The stopped state is implemented in two different ways, depending on
how the architecture implemented the throttled state:
1) We disable the counter:
a) the pmu has per-counter enable bits, we flip that
b) we program a NOP event, preserving the counter state
2) We store the counter state and ignore all read/overflow events
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: paulus <paulus@samba.org>
Cc: stephane eranian <eranian@googlemail.com>
Cc: Robert Richter <robert.richter@amd.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Cyrill Gorcunov <gorcunov@gmail.com>
Cc: Lin Ming <ming.m.lin@intel.com>
Cc: Yanmin <yanmin_zhang@linux.intel.com>
Cc: Deng-Cheng Zhu <dengcheng.zhu@gmail.com>
Cc: David Miller <davem@davemloft.net>
Cc: Michael Cree <mcree@orcon.net.nz>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2010-06-16 12:37:10 +00:00
|
|
|
extern int perf_trace_add(struct perf_event *event, int flags);
|
|
|
|
extern void perf_trace_del(struct perf_event *event, int flags);
|
2010-05-19 12:02:22 +00:00
|
|
|
extern int ftrace_profile_set_filter(struct perf_event *event, int event_id,
|
2009-10-15 03:21:42 +00:00
|
|
|
char *filter_str);
|
|
|
|
extern void ftrace_profile_free_filter(struct perf_event *event);
|
2010-05-19 08:52:27 +00:00
|
|
|
extern void *perf_trace_buf_prepare(int size, unsigned short type,
|
|
|
|
struct pt_regs *regs, int *rctxp);
|
2010-01-28 01:32:29 +00:00
|
|
|
|
|
|
|
static inline void
|
2010-03-05 04:35:37 +00:00
|
|
|
perf_trace_buf_submit(void *raw_data, int size, int rctx, u64 addr,
|
2010-05-19 12:02:22 +00:00
|
|
|
u64 count, struct pt_regs *regs, void *head)
|
2010-01-28 01:32:29 +00:00
|
|
|
{
|
2010-05-21 13:11:34 +00:00
|
|
|
perf_tp_event(addr, count, raw_data, size, regs, head, rctx);
|
2010-01-28 01:32:29 +00:00
|
|
|
}
|
2009-10-15 03:21:42 +00:00
|
|
|
#endif
|
|
|
|
|
2009-04-13 15:20:49 +00:00
|
|
|
#endif /* _LINUX_FTRACE_EVENT_H */
|