2018-08-16 15:23:53 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
2009-08-13 20:35:11 +00:00
|
|
|
/*
|
2009-11-04 00:12:47 +00:00
|
|
|
* Kprobes-based tracing events
|
2009-08-13 20:35:11 +00:00
|
|
|
*
|
|
|
|
* Created by Masami Hiramatsu <mhiramat@redhat.com>
|
|
|
|
*
|
|
|
|
*/
|
2017-02-07 11:21:28 +00:00
|
|
|
#define pr_fmt(fmt) "trace_kprobe: " fmt
|
2009-08-13 20:35:11 +00:00
|
|
|
|
2019-10-11 21:22:50 +00:00
|
|
|
#include <linux/security.h>
|
2009-08-13 20:35:11 +00:00
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/uaccess.h>
|
2017-02-04 00:27:20 +00:00
|
|
|
#include <linux/rculist.h>
|
2018-01-12 17:55:03 +00:00
|
|
|
#include <linux/error-injection.h>
|
2009-08-13 20:35:11 +00:00
|
|
|
|
2019-05-22 08:32:35 +00:00
|
|
|
#include <asm/setup.h> /* for COMMAND_LINE_SIZE */
|
|
|
|
|
2018-11-05 09:02:36 +00:00
|
|
|
#include "trace_dynevent.h"
|
2018-07-30 10:20:42 +00:00
|
|
|
#include "trace_kprobe_selftest.h"
|
2012-04-09 09:11:44 +00:00
|
|
|
#include "trace_probe.h"
|
2018-04-25 12:18:03 +00:00
|
|
|
#include "trace_probe_tmpl.h"
|
2011-02-04 12:52:05 +00:00
|
|
|
|
2012-04-09 09:11:44 +00:00
|
|
|
#define KPROBE_EVENT_SYSTEM "kprobes"
|
2017-04-03 10:36:22 +00:00
|
|
|
#define KRETPROBE_MAXACTIVE_MAX 4096
|
2019-05-22 08:32:35 +00:00
|
|
|
|
|
|
|
/* Kprobe early definition from command line */
|
|
|
|
static char kprobe_boot_events_buf[COMMAND_LINE_SIZE] __initdata;
|
|
|
|
|
|
|
|
static int __init set_kprobe_boot_events(char *str)
|
|
|
|
{
|
|
|
|
strlcpy(kprobe_boot_events_buf, str, COMMAND_LINE_SIZE);
|
2020-12-08 08:54:09 +00:00
|
|
|
disable_tracing_selftest("running kprobe events");
|
|
|
|
|
2019-05-22 08:32:35 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
__setup("kprobe_event=", set_kprobe_boot_events);
|
2010-07-05 18:54:45 +00:00
|
|
|
|
2021-02-01 19:48:11 +00:00
|
|
|
static int trace_kprobe_create(const char *raw_command);
|
2018-11-05 09:02:36 +00:00
|
|
|
static int trace_kprobe_show(struct seq_file *m, struct dyn_event *ev);
|
|
|
|
static int trace_kprobe_release(struct dyn_event *ev);
|
|
|
|
static bool trace_kprobe_is_busy(struct dyn_event *ev);
|
|
|
|
static bool trace_kprobe_match(const char *system, const char *event,
|
2019-06-19 15:07:39 +00:00
|
|
|
int argc, const char **argv, struct dyn_event *ev);
|
2018-11-05 09:02:36 +00:00
|
|
|
|
|
|
|
static struct dyn_event_operations trace_kprobe_ops = {
|
|
|
|
.create = trace_kprobe_create,
|
|
|
|
.show = trace_kprobe_show,
|
|
|
|
.is_busy = trace_kprobe_is_busy,
|
|
|
|
.free = trace_kprobe_release,
|
|
|
|
.match = trace_kprobe_match,
|
|
|
|
};
|
|
|
|
|
2019-03-12 08:58:32 +00:00
|
|
|
/*
|
2009-11-04 00:12:47 +00:00
|
|
|
* Kprobe event core functions
|
2009-08-13 20:35:11 +00:00
|
|
|
*/
|
2013-07-03 04:50:51 +00:00
|
|
|
struct trace_kprobe {
|
2018-11-05 09:02:36 +00:00
|
|
|
struct dyn_event devent;
|
2009-09-11 03:31:21 +00:00
|
|
|
struct kretprobe rp; /* Use rp.kp for kprobe use */
|
2016-02-03 20:28:28 +00:00
|
|
|
unsigned long __percpu *nhit;
|
2009-08-13 20:35:11 +00:00
|
|
|
const char *symbol; /* symbol name */
|
2013-07-03 04:50:51 +00:00
|
|
|
struct trace_probe tp;
|
2009-08-13 20:35:11 +00:00
|
|
|
};
|
|
|
|
|
2018-11-05 09:02:36 +00:00
|
|
|
static bool is_trace_kprobe(struct dyn_event *ev)
|
|
|
|
{
|
|
|
|
return ev->ops == &trace_kprobe_ops;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct trace_kprobe *to_trace_kprobe(struct dyn_event *ev)
|
|
|
|
{
|
|
|
|
return container_of(ev, struct trace_kprobe, devent);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* for_each_trace_kprobe - iterate over the trace_kprobe list
|
|
|
|
* @pos: the struct trace_kprobe * for each entry
|
|
|
|
* @dpos: the struct dyn_event * to use as a loop cursor
|
|
|
|
*/
|
|
|
|
#define for_each_trace_kprobe(pos, dpos) \
|
|
|
|
for_each_dyn_event(dpos) \
|
|
|
|
if (is_trace_kprobe(dpos) && (pos = to_trace_kprobe(dpos)))
|
|
|
|
|
2013-07-03 04:50:51 +00:00
|
|
|
#define SIZEOF_TRACE_KPROBE(n) \
|
|
|
|
(offsetof(struct trace_kprobe, tp.args) + \
|
2009-09-10 23:53:38 +00:00
|
|
|
(sizeof(struct probe_arg) * (n)))
|
2009-08-13 20:35:18 +00:00
|
|
|
|
2014-04-17 08:18:28 +00:00
|
|
|
static nokprobe_inline bool trace_kprobe_is_return(struct trace_kprobe *tk)
|
2009-08-13 20:35:11 +00:00
|
|
|
{
|
2013-07-03 04:50:51 +00:00
|
|
|
return tk->rp.handler != NULL;
|
2009-08-13 20:35:11 +00:00
|
|
|
}
|
|
|
|
|
2014-04-17 08:18:28 +00:00
|
|
|
static nokprobe_inline const char *trace_kprobe_symbol(struct trace_kprobe *tk)
|
2009-08-13 20:35:11 +00:00
|
|
|
{
|
2013-07-03 04:50:51 +00:00
|
|
|
return tk->symbol ? tk->symbol : "unknown";
|
2009-08-13 20:35:11 +00:00
|
|
|
}
|
|
|
|
|
2014-04-17 08:18:28 +00:00
|
|
|
static nokprobe_inline unsigned long trace_kprobe_offset(struct trace_kprobe *tk)
|
2011-06-27 07:26:56 +00:00
|
|
|
{
|
2013-07-03 04:50:51 +00:00
|
|
|
return tk->rp.kp.offset;
|
2011-06-27 07:26:56 +00:00
|
|
|
}
|
|
|
|
|
2014-04-17 08:18:28 +00:00
|
|
|
static nokprobe_inline bool trace_kprobe_has_gone(struct trace_kprobe *tk)
|
2011-06-27 07:26:56 +00:00
|
|
|
{
|
2013-07-03 04:50:51 +00:00
|
|
|
return !!(kprobe_gone(&tk->rp.kp));
|
2011-06-27 07:26:56 +00:00
|
|
|
}
|
|
|
|
|
2014-04-17 08:18:28 +00:00
|
|
|
static nokprobe_inline bool trace_kprobe_within_module(struct trace_kprobe *tk,
|
2013-07-03 04:50:51 +00:00
|
|
|
struct module *mod)
|
2011-06-27 07:26:56 +00:00
|
|
|
{
|
2020-08-18 05:08:57 +00:00
|
|
|
int len = strlen(module_name(mod));
|
2013-07-03 04:50:51 +00:00
|
|
|
const char *name = trace_kprobe_symbol(tk);
|
2020-08-18 05:08:57 +00:00
|
|
|
|
|
|
|
return strncmp(module_name(mod), name, len) == 0 && name[len] == ':';
|
2011-06-27 07:26:56 +00:00
|
|
|
}
|
|
|
|
|
2018-08-28 16:18:15 +00:00
|
|
|
static nokprobe_inline bool trace_kprobe_module_exist(struct trace_kprobe *tk)
|
2011-06-27 07:26:56 +00:00
|
|
|
{
|
2018-08-28 16:18:15 +00:00
|
|
|
char *p;
|
|
|
|
bool ret;
|
|
|
|
|
|
|
|
if (!tk->symbol)
|
|
|
|
return false;
|
|
|
|
p = strchr(tk->symbol, ':');
|
|
|
|
if (!p)
|
|
|
|
return true;
|
|
|
|
*p = '\0';
|
2021-02-02 12:13:25 +00:00
|
|
|
rcu_read_lock_sched();
|
2018-08-28 16:18:15 +00:00
|
|
|
ret = !!find_module(tk->symbol);
|
2021-02-02 12:13:25 +00:00
|
|
|
rcu_read_unlock_sched();
|
2018-08-28 16:18:15 +00:00
|
|
|
*p = ':';
|
|
|
|
|
|
|
|
return ret;
|
2011-06-27 07:26:56 +00:00
|
|
|
}
|
|
|
|
|
2018-11-05 09:02:36 +00:00
|
|
|
static bool trace_kprobe_is_busy(struct dyn_event *ev)
|
|
|
|
{
|
|
|
|
struct trace_kprobe *tk = to_trace_kprobe(ev);
|
|
|
|
|
|
|
|
return trace_probe_is_enabled(&tk->tp);
|
|
|
|
}
|
|
|
|
|
2019-06-19 15:08:08 +00:00
|
|
|
static bool trace_kprobe_match_command_head(struct trace_kprobe *tk,
|
|
|
|
int argc, const char **argv)
|
|
|
|
{
|
|
|
|
char buf[MAX_ARGSTR_LEN + 1];
|
|
|
|
|
|
|
|
if (!argc)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
if (!tk->symbol)
|
|
|
|
snprintf(buf, sizeof(buf), "0x%p", tk->rp.kp.addr);
|
|
|
|
else if (tk->rp.kp.offset)
|
|
|
|
snprintf(buf, sizeof(buf), "%s+%u",
|
|
|
|
trace_kprobe_symbol(tk), tk->rp.kp.offset);
|
|
|
|
else
|
|
|
|
snprintf(buf, sizeof(buf), "%s", trace_kprobe_symbol(tk));
|
|
|
|
if (strcmp(buf, argv[0]))
|
|
|
|
return false;
|
|
|
|
argc--; argv++;
|
|
|
|
|
|
|
|
return trace_probe_match_command_args(&tk->tp, argc, argv);
|
|
|
|
}
|
|
|
|
|
2018-11-05 09:02:36 +00:00
|
|
|
static bool trace_kprobe_match(const char *system, const char *event,
|
2019-06-19 15:07:39 +00:00
|
|
|
int argc, const char **argv, struct dyn_event *ev)
|
2018-11-05 09:02:36 +00:00
|
|
|
{
|
|
|
|
struct trace_kprobe *tk = to_trace_kprobe(ev);
|
|
|
|
|
2019-05-31 15:17:47 +00:00
|
|
|
return strcmp(trace_probe_name(&tk->tp), event) == 0 &&
|
2019-06-19 15:08:08 +00:00
|
|
|
(!system || strcmp(trace_probe_group_name(&tk->tp), system) == 0) &&
|
|
|
|
trace_kprobe_match_command_head(tk, argc, argv);
|
2018-11-05 09:02:36 +00:00
|
|
|
}
|
|
|
|
|
2016-12-09 14:19:37 +00:00
|
|
|
static nokprobe_inline unsigned long trace_kprobe_nhit(struct trace_kprobe *tk)
|
|
|
|
{
|
|
|
|
unsigned long nhit = 0;
|
|
|
|
int cpu;
|
|
|
|
|
|
|
|
for_each_possible_cpu(cpu)
|
|
|
|
nhit += *per_cpu_ptr(tk->nhit, cpu);
|
|
|
|
|
|
|
|
return nhit;
|
|
|
|
}
|
|
|
|
|
2019-05-31 15:18:07 +00:00
|
|
|
static nokprobe_inline bool trace_kprobe_is_registered(struct trace_kprobe *tk)
|
|
|
|
{
|
|
|
|
return !(list_empty(&tk->rp.kp.list) &&
|
|
|
|
hlist_unhashed(&tk->rp.kp.hlist));
|
|
|
|
}
|
|
|
|
|
2018-08-02 07:50:48 +00:00
|
|
|
/* Return 0 if it fails to find the symbol address */
|
2018-07-30 10:20:14 +00:00
|
|
|
static nokprobe_inline
|
|
|
|
unsigned long trace_kprobe_address(struct trace_kprobe *tk)
|
|
|
|
{
|
|
|
|
unsigned long addr;
|
|
|
|
|
|
|
|
if (tk->symbol) {
|
|
|
|
addr = (unsigned long)
|
|
|
|
kallsyms_lookup_name(trace_kprobe_symbol(tk));
|
2018-08-02 07:50:48 +00:00
|
|
|
if (addr)
|
|
|
|
addr += tk->rp.kp.offset;
|
2018-07-30 10:20:14 +00:00
|
|
|
} else {
|
|
|
|
addr = (unsigned long)tk->rp.kp.addr;
|
|
|
|
}
|
|
|
|
return addr;
|
|
|
|
}
|
|
|
|
|
2019-06-19 15:07:20 +00:00
|
|
|
static nokprobe_inline struct trace_kprobe *
|
|
|
|
trace_kprobe_primary_from_call(struct trace_event_call *call)
|
|
|
|
{
|
|
|
|
struct trace_probe *tp;
|
|
|
|
|
|
|
|
tp = trace_probe_primary_from_call(call);
|
|
|
|
if (WARN_ON_ONCE(!tp))
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
return container_of(tp, struct trace_kprobe, tp);
|
|
|
|
}
|
|
|
|
|
2018-01-12 17:54:04 +00:00
|
|
|
bool trace_kprobe_on_func_entry(struct trace_event_call *call)
|
2017-12-11 16:36:48 +00:00
|
|
|
{
|
2019-06-19 15:07:20 +00:00
|
|
|
struct trace_kprobe *tk = trace_kprobe_primary_from_call(call);
|
2018-01-12 17:54:04 +00:00
|
|
|
|
2021-01-27 15:37:51 +00:00
|
|
|
return tk ? (kprobe_on_func_entry(tk->rp.kp.addr,
|
2018-01-12 17:54:04 +00:00
|
|
|
tk->rp.kp.addr ? NULL : tk->rp.kp.symbol_name,
|
2021-01-27 15:37:51 +00:00
|
|
|
tk->rp.kp.addr ? 0 : tk->rp.kp.offset) == 0) : false;
|
2017-12-11 16:36:48 +00:00
|
|
|
}
|
|
|
|
|
2018-01-12 17:54:04 +00:00
|
|
|
bool trace_kprobe_error_injectable(struct trace_event_call *call)
|
2017-12-11 16:36:48 +00:00
|
|
|
{
|
2019-06-19 15:07:20 +00:00
|
|
|
struct trace_kprobe *tk = trace_kprobe_primary_from_call(call);
|
2017-12-11 16:36:48 +00:00
|
|
|
|
2019-06-19 15:07:20 +00:00
|
|
|
return tk ? within_error_injection_list(trace_kprobe_address(tk)) :
|
|
|
|
false;
|
2017-12-11 16:36:48 +00:00
|
|
|
}
|
|
|
|
|
2013-07-03 04:50:51 +00:00
|
|
|
static int register_kprobe_event(struct trace_kprobe *tk);
|
|
|
|
static int unregister_kprobe_event(struct trace_kprobe *tk);
|
2009-08-13 20:35:11 +00:00
|
|
|
|
2009-09-14 20:49:20 +00:00
|
|
|
static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs);
|
|
|
|
static int kretprobe_dispatcher(struct kretprobe_instance *ri,
|
|
|
|
struct pt_regs *regs);
|
|
|
|
|
2019-05-31 15:17:06 +00:00
|
|
|
static void free_trace_kprobe(struct trace_kprobe *tk)
|
|
|
|
{
|
|
|
|
if (tk) {
|
|
|
|
trace_probe_cleanup(&tk->tp);
|
|
|
|
kfree(tk->symbol);
|
|
|
|
free_percpu(tk->nhit);
|
|
|
|
kfree(tk);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-09-11 03:31:21 +00:00
|
|
|
/*
|
|
|
|
* Allocate new trace_probe and initialize it (including kprobes).
|
|
|
|
*/
|
2013-07-03 04:50:51 +00:00
|
|
|
static struct trace_kprobe *alloc_trace_kprobe(const char *group,
|
2009-09-10 23:53:53 +00:00
|
|
|
const char *event,
|
2009-09-11 03:31:21 +00:00
|
|
|
void *addr,
|
|
|
|
const char *symbol,
|
|
|
|
unsigned long offs,
|
2017-04-03 10:36:22 +00:00
|
|
|
int maxactive,
|
2012-04-09 09:11:33 +00:00
|
|
|
int nargs, bool is_return)
|
2009-08-13 20:35:11 +00:00
|
|
|
{
|
2013-07-03 04:50:51 +00:00
|
|
|
struct trace_kprobe *tk;
|
2009-12-16 22:24:08 +00:00
|
|
|
int ret = -ENOMEM;
|
2009-08-13 20:35:11 +00:00
|
|
|
|
2013-07-03 04:50:51 +00:00
|
|
|
tk = kzalloc(SIZEOF_TRACE_KPROBE(nargs), GFP_KERNEL);
|
|
|
|
if (!tk)
|
2009-12-16 22:24:08 +00:00
|
|
|
return ERR_PTR(ret);
|
2009-08-13 20:35:11 +00:00
|
|
|
|
2016-02-03 20:28:28 +00:00
|
|
|
tk->nhit = alloc_percpu(unsigned long);
|
|
|
|
if (!tk->nhit)
|
|
|
|
goto error;
|
|
|
|
|
2009-08-13 20:35:11 +00:00
|
|
|
if (symbol) {
|
2013-07-03 04:50:51 +00:00
|
|
|
tk->symbol = kstrdup(symbol, GFP_KERNEL);
|
|
|
|
if (!tk->symbol)
|
2009-08-13 20:35:11 +00:00
|
|
|
goto error;
|
2013-07-03 04:50:51 +00:00
|
|
|
tk->rp.kp.symbol_name = tk->symbol;
|
|
|
|
tk->rp.kp.offset = offs;
|
2009-09-11 03:31:21 +00:00
|
|
|
} else
|
2013-07-03 04:50:51 +00:00
|
|
|
tk->rp.kp.addr = addr;
|
2009-09-11 03:31:21 +00:00
|
|
|
|
|
|
|
if (is_return)
|
2013-07-03 04:50:51 +00:00
|
|
|
tk->rp.handler = kretprobe_dispatcher;
|
2009-09-11 03:31:21 +00:00
|
|
|
else
|
2013-07-03 04:50:51 +00:00
|
|
|
tk->rp.kp.pre_handler = kprobe_dispatcher;
|
2009-09-11 03:31:21 +00:00
|
|
|
|
2017-04-03 10:36:22 +00:00
|
|
|
tk->rp.maxactive = maxactive;
|
2019-05-31 15:18:07 +00:00
|
|
|
INIT_HLIST_NODE(&tk->rp.kp.hlist);
|
|
|
|
INIT_LIST_HEAD(&tk->rp.kp.list);
|
2017-04-03 10:36:22 +00:00
|
|
|
|
2020-01-22 03:23:25 +00:00
|
|
|
ret = trace_probe_init(&tk->tp, event, group, false);
|
2019-05-31 15:17:06 +00:00
|
|
|
if (ret < 0)
|
2009-09-10 23:53:53 +00:00
|
|
|
goto error;
|
|
|
|
|
2018-11-05 09:02:36 +00:00
|
|
|
dyn_event_init(&tk->devent, &trace_kprobe_ops);
|
2013-07-03 04:50:51 +00:00
|
|
|
return tk;
|
2009-08-13 20:35:11 +00:00
|
|
|
error:
|
2019-05-31 15:17:06 +00:00
|
|
|
free_trace_kprobe(tk);
|
2009-12-16 22:24:08 +00:00
|
|
|
return ERR_PTR(ret);
|
2009-08-13 20:35:11 +00:00
|
|
|
}
|
|
|
|
|
2013-07-03 04:50:51 +00:00
|
|
|
static struct trace_kprobe *find_trace_kprobe(const char *event,
|
|
|
|
const char *group)
|
2009-08-13 20:35:11 +00:00
|
|
|
{
|
2018-11-05 09:02:36 +00:00
|
|
|
struct dyn_event *pos;
|
2013-07-03 04:50:51 +00:00
|
|
|
struct trace_kprobe *tk;
|
2009-08-13 20:35:11 +00:00
|
|
|
|
2018-11-05 09:02:36 +00:00
|
|
|
for_each_trace_kprobe(tk, pos)
|
2019-05-31 15:17:47 +00:00
|
|
|
if (strcmp(trace_probe_name(&tk->tp), event) == 0 &&
|
|
|
|
strcmp(trace_probe_group_name(&tk->tp), group) == 0)
|
2013-07-03 04:50:51 +00:00
|
|
|
return tk;
|
2009-08-13 20:35:11 +00:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2018-07-26 16:07:32 +00:00
|
|
|
static inline int __enable_trace_kprobe(struct trace_kprobe *tk)
|
|
|
|
{
|
|
|
|
int ret = 0;
|
|
|
|
|
2019-05-31 15:18:07 +00:00
|
|
|
if (trace_kprobe_is_registered(tk) && !trace_kprobe_has_gone(tk)) {
|
2018-07-26 16:07:32 +00:00
|
|
|
if (trace_kprobe_is_return(tk))
|
|
|
|
ret = enable_kretprobe(&tk->rp);
|
|
|
|
else
|
|
|
|
ret = enable_kprobe(&tk->rp.kp);
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2019-06-19 15:07:20 +00:00
|
|
|
static void __disable_trace_kprobe(struct trace_probe *tp)
|
|
|
|
{
|
|
|
|
struct trace_probe *pos;
|
|
|
|
struct trace_kprobe *tk;
|
|
|
|
|
|
|
|
list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
|
|
|
|
tk = container_of(pos, struct trace_kprobe, tp);
|
|
|
|
if (!trace_kprobe_is_registered(tk))
|
|
|
|
continue;
|
|
|
|
if (trace_kprobe_is_return(tk))
|
|
|
|
disable_kretprobe(&tk->rp);
|
|
|
|
else
|
|
|
|
disable_kprobe(&tk->rp.kp);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-05-09 05:44:49 +00:00
|
|
|
/*
|
|
|
|
* Enable trace_probe
|
|
|
|
* if the file is NULL, enable "perf" handler, or enable "trace" handler.
|
|
|
|
*/
|
2019-06-19 15:07:20 +00:00
|
|
|
static int enable_trace_kprobe(struct trace_event_call *call,
|
|
|
|
struct trace_event_file *file)
|
2011-06-27 07:26:44 +00:00
|
|
|
{
|
2019-06-19 15:07:20 +00:00
|
|
|
struct trace_probe *pos, *tp;
|
|
|
|
struct trace_kprobe *tk;
|
|
|
|
bool enabled;
|
2011-06-27 07:26:44 +00:00
|
|
|
int ret = 0;
|
|
|
|
|
2019-06-19 15:07:20 +00:00
|
|
|
tp = trace_probe_primary_from_call(call);
|
|
|
|
if (WARN_ON_ONCE(!tp))
|
|
|
|
return -ENODEV;
|
|
|
|
enabled = trace_probe_is_enabled(tp);
|
|
|
|
|
|
|
|
/* This also changes "enabled" state */
|
2013-05-09 05:44:49 +00:00
|
|
|
if (file) {
|
2019-06-19 15:07:20 +00:00
|
|
|
ret = trace_probe_add_file(tp, file);
|
2019-05-31 15:17:26 +00:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
} else
|
2019-06-19 15:07:20 +00:00
|
|
|
trace_probe_set_flag(tp, TP_FLAG_PROFILE);
|
2013-05-09 05:44:49 +00:00
|
|
|
|
2019-05-31 15:17:26 +00:00
|
|
|
if (enabled)
|
|
|
|
return 0;
|
2018-07-26 16:07:32 +00:00
|
|
|
|
2019-06-19 15:07:20 +00:00
|
|
|
list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
|
|
|
|
tk = container_of(pos, struct trace_kprobe, tp);
|
|
|
|
if (trace_kprobe_has_gone(tk))
|
|
|
|
continue;
|
|
|
|
ret = __enable_trace_kprobe(tk);
|
2019-09-18 08:55:37 +00:00
|
|
|
if (ret)
|
2019-06-19 15:07:20 +00:00
|
|
|
break;
|
|
|
|
enabled = true;
|
|
|
|
}
|
|
|
|
|
2019-09-18 08:55:37 +00:00
|
|
|
if (ret) {
|
|
|
|
/* Failed to enable one of them. Roll back all */
|
|
|
|
if (enabled)
|
|
|
|
__disable_trace_kprobe(tp);
|
2019-05-31 15:17:26 +00:00
|
|
|
if (file)
|
2019-06-19 15:07:20 +00:00
|
|
|
trace_probe_remove_file(tp, file);
|
2019-05-31 15:17:26 +00:00
|
|
|
else
|
2019-06-19 15:07:20 +00:00
|
|
|
trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
|
2018-07-25 14:20:38 +00:00
|
|
|
}
|
2019-05-31 15:17:26 +00:00
|
|
|
|
2011-06-27 07:26:44 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2013-05-09 05:44:49 +00:00
|
|
|
/*
|
|
|
|
* Disable trace_probe
|
|
|
|
* if the file is NULL, disable "perf" handler, or disable "trace" handler.
|
|
|
|
*/
|
2019-06-19 15:07:20 +00:00
|
|
|
static int disable_trace_kprobe(struct trace_event_call *call,
|
|
|
|
struct trace_event_file *file)
|
2011-06-27 07:26:44 +00:00
|
|
|
{
|
2019-06-19 15:07:20 +00:00
|
|
|
struct trace_probe *tp;
|
|
|
|
|
|
|
|
tp = trace_probe_primary_from_call(call);
|
|
|
|
if (WARN_ON_ONCE(!tp))
|
|
|
|
return -ENODEV;
|
2013-05-09 05:44:49 +00:00
|
|
|
|
|
|
|
if (file) {
|
2019-05-31 15:17:26 +00:00
|
|
|
if (!trace_probe_get_file_link(tp, file))
|
|
|
|
return -ENOENT;
|
|
|
|
if (!trace_probe_has_single_file(tp))
|
2013-06-20 17:38:14 +00:00
|
|
|
goto out;
|
2019-05-31 15:17:37 +00:00
|
|
|
trace_probe_clear_flag(tp, TP_FLAG_TRACE);
|
2013-05-09 05:44:49 +00:00
|
|
|
} else
|
2019-05-31 15:17:37 +00:00
|
|
|
trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
|
2013-05-09 05:44:49 +00:00
|
|
|
|
2019-06-19 15:07:20 +00:00
|
|
|
if (!trace_probe_is_enabled(tp))
|
|
|
|
__disable_trace_kprobe(tp);
|
2017-12-06 22:45:15 +00:00
|
|
|
|
2013-06-20 17:38:09 +00:00
|
|
|
out:
|
2019-05-31 15:17:26 +00:00
|
|
|
if (file)
|
2013-07-09 09:35:26 +00:00
|
|
|
/*
|
2019-05-31 15:17:26 +00:00
|
|
|
* Synchronization is done in below function. For perf event,
|
|
|
|
* file == NULL and perf_trace_event_unreg() calls
|
|
|
|
* tracepoint_synchronize_unregister() to ensure synchronize
|
|
|
|
* event. We don't need to care about it.
|
2013-07-09 09:35:26 +00:00
|
|
|
*/
|
2019-05-31 15:17:26 +00:00
|
|
|
trace_probe_remove_file(tp, file);
|
2013-07-09 09:35:26 +00:00
|
|
|
|
2019-06-19 15:07:20 +00:00
|
|
|
return 0;
|
2011-06-27 07:26:44 +00:00
|
|
|
}
|
|
|
|
|
2021-01-08 04:19:38 +00:00
|
|
|
#if defined(CONFIG_DYNAMIC_FTRACE) && \
|
2018-07-30 10:20:14 +00:00
|
|
|
!defined(CONFIG_KPROBE_EVENTS_ON_NOTRACE)
|
2019-10-29 08:31:44 +00:00
|
|
|
static bool __within_notrace_func(unsigned long addr)
|
2018-07-30 10:20:14 +00:00
|
|
|
{
|
2019-10-29 08:31:44 +00:00
|
|
|
unsigned long offset, size;
|
2018-07-30 10:20:14 +00:00
|
|
|
|
2018-08-02 07:50:48 +00:00
|
|
|
if (!addr || !kallsyms_lookup_size_offset(addr, &size, &offset))
|
|
|
|
return false;
|
2018-07-30 10:20:14 +00:00
|
|
|
|
2018-08-21 13:04:57 +00:00
|
|
|
/* Get the entry address of the target function */
|
|
|
|
addr -= offset;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Since ftrace_location_range() does inclusive range check, we need
|
|
|
|
* to subtract 1 byte from the end address.
|
|
|
|
*/
|
|
|
|
return !ftrace_location_range(addr, addr + size - 1);
|
2018-07-30 10:20:14 +00:00
|
|
|
}
|
2019-10-29 08:31:44 +00:00
|
|
|
|
|
|
|
static bool within_notrace_func(struct trace_kprobe *tk)
|
|
|
|
{
|
2020-04-25 05:49:09 +00:00
|
|
|
unsigned long addr = trace_kprobe_address(tk);
|
2019-10-29 08:31:44 +00:00
|
|
|
char symname[KSYM_NAME_LEN], *p;
|
|
|
|
|
|
|
|
if (!__within_notrace_func(addr))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
/* Check if the address is on a suffixed-symbol */
|
|
|
|
if (!lookup_symbol_name(addr, symname)) {
|
|
|
|
p = strchr(symname, '.');
|
|
|
|
if (!p)
|
|
|
|
return true;
|
|
|
|
*p = '\0';
|
|
|
|
addr = (unsigned long)kprobe_lookup_name(symname, 0);
|
|
|
|
if (addr)
|
|
|
|
return __within_notrace_func(addr);
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
2018-07-30 10:20:14 +00:00
|
|
|
#else
|
|
|
|
#define within_notrace_func(tk) (false)
|
|
|
|
#endif
|
|
|
|
|
2011-06-27 07:26:56 +00:00
|
|
|
/* Internal register function - just handle k*probes and flags */
|
2013-07-03 04:50:51 +00:00
|
|
|
static int __register_trace_kprobe(struct trace_kprobe *tk)
|
2009-08-13 20:35:11 +00:00
|
|
|
{
|
2018-08-28 16:18:43 +00:00
|
|
|
int i, ret;
|
2011-06-27 07:26:56 +00:00
|
|
|
|
2019-08-20 00:17:58 +00:00
|
|
|
ret = security_locked_down(LOCKDOWN_KPROBES);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2019-05-31 15:18:07 +00:00
|
|
|
if (trace_kprobe_is_registered(tk))
|
2011-06-27 07:26:56 +00:00
|
|
|
return -EINVAL;
|
|
|
|
|
2018-07-30 10:20:14 +00:00
|
|
|
if (within_notrace_func(tk)) {
|
|
|
|
pr_warn("Could not probe notrace function %s\n",
|
|
|
|
trace_kprobe_symbol(tk));
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2018-08-28 16:18:43 +00:00
|
|
|
for (i = 0; i < tk->tp.nr_args; i++) {
|
|
|
|
ret = traceprobe_update_arg(&tk->tp.args[i]);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2011-06-27 07:26:56 +00:00
|
|
|
/* Set/clear disabled flag according to tp->flag */
|
2013-07-03 04:50:51 +00:00
|
|
|
if (trace_probe_is_enabled(&tk->tp))
|
|
|
|
tk->rp.kp.flags &= ~KPROBE_FLAG_DISABLED;
|
2011-06-27 07:26:56 +00:00
|
|
|
else
|
2013-07-03 04:50:51 +00:00
|
|
|
tk->rp.kp.flags |= KPROBE_FLAG_DISABLED;
|
2011-06-27 07:26:56 +00:00
|
|
|
|
2013-07-03 04:50:51 +00:00
|
|
|
if (trace_kprobe_is_return(tk))
|
|
|
|
ret = register_kretprobe(&tk->rp);
|
2009-08-13 20:35:11 +00:00
|
|
|
else
|
2013-07-03 04:50:51 +00:00
|
|
|
ret = register_kprobe(&tk->rp.kp);
|
2011-06-27 07:26:56 +00:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Internal unregister function - just handle k*probes and flags */
|
2013-07-03 04:50:51 +00:00
|
|
|
static void __unregister_trace_kprobe(struct trace_kprobe *tk)
|
2011-06-27 07:26:56 +00:00
|
|
|
{
|
2019-05-31 15:18:07 +00:00
|
|
|
if (trace_kprobe_is_registered(tk)) {
|
2013-07-03 04:50:51 +00:00
|
|
|
if (trace_kprobe_is_return(tk))
|
|
|
|
unregister_kretprobe(&tk->rp);
|
2011-06-27 07:26:56 +00:00
|
|
|
else
|
2013-07-03 04:50:51 +00:00
|
|
|
unregister_kprobe(&tk->rp.kp);
|
2019-05-31 15:18:07 +00:00
|
|
|
/* Cleanup kprobe for reuse and mark it unregistered */
|
|
|
|
INIT_HLIST_NODE(&tk->rp.kp.hlist);
|
|
|
|
INIT_LIST_HEAD(&tk->rp.kp.list);
|
2013-07-03 04:50:51 +00:00
|
|
|
if (tk->rp.kp.symbol_name)
|
|
|
|
tk->rp.kp.addr = NULL;
|
2011-06-27 07:26:56 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-11-05 09:02:36 +00:00
|
|
|
/* Unregister a trace_probe and probe_event */
|
2013-07-03 04:50:51 +00:00
|
|
|
static int unregister_trace_kprobe(struct trace_kprobe *tk)
|
2011-06-27 07:26:56 +00:00
|
|
|
{
|
2019-06-19 15:07:49 +00:00
|
|
|
/* If other probes are on the event, just unregister kprobe */
|
|
|
|
if (trace_probe_has_sibling(&tk->tp))
|
|
|
|
goto unreg;
|
|
|
|
|
2011-10-04 10:44:38 +00:00
|
|
|
/* Enabled event can not be unregistered */
|
2013-07-03 04:50:51 +00:00
|
|
|
if (trace_probe_is_enabled(&tk->tp))
|
2011-10-04 10:44:38 +00:00
|
|
|
return -EBUSY;
|
|
|
|
|
2013-07-04 03:33:50 +00:00
|
|
|
/* Will fail if probe is being used by ftrace or perf */
|
2013-07-03 04:50:51 +00:00
|
|
|
if (unregister_kprobe_event(tk))
|
2013-07-04 03:33:50 +00:00
|
|
|
return -EBUSY;
|
|
|
|
|
2019-06-19 15:07:49 +00:00
|
|
|
unreg:
|
2013-07-03 04:50:51 +00:00
|
|
|
__unregister_trace_kprobe(tk);
|
2018-11-05 09:02:36 +00:00
|
|
|
dyn_event_remove(&tk->devent);
|
2019-06-19 15:07:49 +00:00
|
|
|
trace_probe_unlink(&tk->tp);
|
2011-10-04 10:44:38 +00:00
|
|
|
|
|
|
|
return 0;
|
2009-08-13 20:35:11 +00:00
|
|
|
}
|
|
|
|
|
2019-09-18 08:55:46 +00:00
|
|
|
static bool trace_kprobe_has_same_kprobe(struct trace_kprobe *orig,
|
|
|
|
struct trace_kprobe *comp)
|
|
|
|
{
|
|
|
|
struct trace_probe_event *tpe = orig->tp.event;
|
|
|
|
struct trace_probe *pos;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
list_for_each_entry(pos, &tpe->probes, list) {
|
|
|
|
orig = container_of(pos, struct trace_kprobe, tp);
|
|
|
|
if (strcmp(trace_kprobe_symbol(orig),
|
|
|
|
trace_kprobe_symbol(comp)) ||
|
|
|
|
trace_kprobe_offset(orig) != trace_kprobe_offset(comp))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* trace_probe_compare_arg_type() ensured that nr_args and
|
|
|
|
* each argument name and type are same. Let's compare comm.
|
|
|
|
*/
|
|
|
|
for (i = 0; i < orig->tp.nr_args; i++) {
|
|
|
|
if (strcmp(orig->tp.args[i].comm,
|
|
|
|
comp->tp.args[i].comm))
|
2019-09-24 11:49:06 +00:00
|
|
|
break;
|
2019-09-18 08:55:46 +00:00
|
|
|
}
|
|
|
|
|
2019-09-24 11:49:06 +00:00
|
|
|
if (i == orig->tp.nr_args)
|
|
|
|
return true;
|
2019-09-18 08:55:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2019-06-19 15:07:49 +00:00
|
|
|
static int append_trace_kprobe(struct trace_kprobe *tk, struct trace_kprobe *to)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
2019-09-18 08:55:46 +00:00
|
|
|
ret = trace_probe_compare_arg_type(&tk->tp, &to->tp);
|
|
|
|
if (ret) {
|
|
|
|
/* Note that argument starts index = 2 */
|
|
|
|
trace_probe_log_set_index(ret + 1);
|
|
|
|
trace_probe_log_err(0, DIFF_ARG_TYPE);
|
|
|
|
return -EEXIST;
|
|
|
|
}
|
|
|
|
if (trace_kprobe_has_same_kprobe(to, tk)) {
|
|
|
|
trace_probe_log_set_index(0);
|
|
|
|
trace_probe_log_err(0, SAME_PROBE);
|
|
|
|
return -EEXIST;
|
|
|
|
}
|
|
|
|
|
2019-06-19 15:07:49 +00:00
|
|
|
/* Append to existing event */
|
|
|
|
ret = trace_probe_append(&tk->tp, &to->tp);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
/* Register k*probe */
|
|
|
|
ret = __register_trace_kprobe(tk);
|
|
|
|
if (ret == -ENOENT && !trace_kprobe_module_exist(tk)) {
|
|
|
|
pr_warn("This probe might be able to register after target module is loaded. Continue.\n");
|
|
|
|
ret = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ret)
|
|
|
|
trace_probe_unlink(&tk->tp);
|
|
|
|
else
|
|
|
|
dyn_event_add(&tk->devent);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2009-08-13 20:35:11 +00:00
|
|
|
/* Register a trace_probe and probe_event */
|
2013-07-03 04:50:51 +00:00
|
|
|
static int register_trace_kprobe(struct trace_kprobe *tk)
|
2009-08-13 20:35:11 +00:00
|
|
|
{
|
2013-07-03 04:50:51 +00:00
|
|
|
struct trace_kprobe *old_tk;
|
2009-08-13 20:35:11 +00:00
|
|
|
int ret;
|
|
|
|
|
2018-11-05 09:02:36 +00:00
|
|
|
mutex_lock(&event_mutex);
|
2009-08-13 20:35:11 +00:00
|
|
|
|
2019-05-31 15:17:47 +00:00
|
|
|
old_tk = find_trace_kprobe(trace_probe_name(&tk->tp),
|
|
|
|
trace_probe_group_name(&tk->tp));
|
2013-07-03 04:50:51 +00:00
|
|
|
if (old_tk) {
|
2019-06-19 15:07:49 +00:00
|
|
|
if (trace_kprobe_is_return(tk) != trace_kprobe_is_return(old_tk)) {
|
|
|
|
trace_probe_log_set_index(0);
|
|
|
|
trace_probe_log_err(0, DIFF_PROBE_TYPE);
|
|
|
|
ret = -EEXIST;
|
|
|
|
} else {
|
2019-09-18 08:55:46 +00:00
|
|
|
ret = append_trace_kprobe(tk, old_tk);
|
2019-06-19 15:07:49 +00:00
|
|
|
}
|
|
|
|
goto end;
|
2009-09-14 20:48:56 +00:00
|
|
|
}
|
2011-06-27 07:26:56 +00:00
|
|
|
|
|
|
|
/* Register new event */
|
2013-07-03 04:50:51 +00:00
|
|
|
ret = register_kprobe_event(tk);
|
2009-09-14 20:48:56 +00:00
|
|
|
if (ret) {
|
2016-03-22 21:28:09 +00:00
|
|
|
pr_warn("Failed to register probe event(%d)\n", ret);
|
2009-09-14 20:48:56 +00:00
|
|
|
goto end;
|
|
|
|
}
|
|
|
|
|
2011-06-27 07:26:56 +00:00
|
|
|
/* Register k*probe */
|
2013-07-03 04:50:51 +00:00
|
|
|
ret = __register_trace_kprobe(tk);
|
2018-08-28 16:18:15 +00:00
|
|
|
if (ret == -ENOENT && !trace_kprobe_module_exist(tk)) {
|
|
|
|
pr_warn("This probe might be able to register after target module is loaded. Continue.\n");
|
|
|
|
ret = 0;
|
|
|
|
}
|
|
|
|
|
2011-06-27 07:26:56 +00:00
|
|
|
if (ret < 0)
|
2013-07-03 04:50:51 +00:00
|
|
|
unregister_kprobe_event(tk);
|
2011-06-27 07:26:56 +00:00
|
|
|
else
|
2018-11-05 09:02:36 +00:00
|
|
|
dyn_event_add(&tk->devent);
|
2011-06-27 07:26:56 +00:00
|
|
|
|
2009-08-13 20:35:11 +00:00
|
|
|
end:
|
2018-11-05 09:02:36 +00:00
|
|
|
mutex_unlock(&event_mutex);
|
2009-08-13 20:35:11 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2011-06-27 07:26:56 +00:00
|
|
|
/* Module notifier call back, checking event on the module */
|
2013-07-03 04:50:51 +00:00
|
|
|
static int trace_kprobe_module_callback(struct notifier_block *nb,
|
2011-06-27 07:26:56 +00:00
|
|
|
unsigned long val, void *data)
|
|
|
|
{
|
|
|
|
struct module *mod = data;
|
2018-11-05 09:02:36 +00:00
|
|
|
struct dyn_event *pos;
|
2013-07-03 04:50:51 +00:00
|
|
|
struct trace_kprobe *tk;
|
2011-06-27 07:26:56 +00:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (val != MODULE_STATE_COMING)
|
|
|
|
return NOTIFY_DONE;
|
|
|
|
|
|
|
|
/* Update probes on coming module */
|
2018-11-05 09:02:36 +00:00
|
|
|
mutex_lock(&event_mutex);
|
|
|
|
for_each_trace_kprobe(tk, pos) {
|
2013-07-03 04:50:51 +00:00
|
|
|
if (trace_kprobe_within_module(tk, mod)) {
|
2011-10-04 10:44:38 +00:00
|
|
|
/* Don't need to check busy - this should have gone. */
|
2013-07-03 04:50:51 +00:00
|
|
|
__unregister_trace_kprobe(tk);
|
|
|
|
ret = __register_trace_kprobe(tk);
|
2011-06-27 07:26:56 +00:00
|
|
|
if (ret)
|
2016-03-22 21:28:09 +00:00
|
|
|
pr_warn("Failed to re-register probe %s on %s: %d\n",
|
2019-05-31 15:17:47 +00:00
|
|
|
trace_probe_name(&tk->tp),
|
2020-08-18 05:08:57 +00:00
|
|
|
module_name(mod), ret);
|
2011-06-27 07:26:56 +00:00
|
|
|
}
|
|
|
|
}
|
2018-11-05 09:02:36 +00:00
|
|
|
mutex_unlock(&event_mutex);
|
2011-06-27 07:26:56 +00:00
|
|
|
|
|
|
|
return NOTIFY_DONE;
|
|
|
|
}
|
|
|
|
|
2013-07-03 04:50:51 +00:00
|
|
|
static struct notifier_block trace_kprobe_module_nb = {
|
|
|
|
.notifier_call = trace_kprobe_module_callback,
|
2011-06-27 07:26:56 +00:00
|
|
|
.priority = 1 /* Invoked after kprobe module callback */
|
|
|
|
};
|
|
|
|
|
2017-07-07 18:57:30 +00:00
|
|
|
/* Convert certain expected symbols into '_' when generating event names */
|
|
|
|
static inline void sanitize_event_name(char *name)
|
|
|
|
{
|
|
|
|
while (*name++ != '\0')
|
|
|
|
if (*name == ':' || *name == '.')
|
|
|
|
*name = '_';
|
|
|
|
}
|
|
|
|
|
2021-02-01 19:48:11 +00:00
|
|
|
static int __trace_kprobe_create(int argc, const char *argv[])
|
2009-08-13 20:35:11 +00:00
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Argument syntax:
|
2017-04-03 10:36:22 +00:00
|
|
|
* - Add kprobe:
|
|
|
|
* p[:[GRP/]EVENT] [MOD:]KSYM[+OFFS]|KADDR [FETCHARGS]
|
|
|
|
* - Add kretprobe:
|
|
|
|
* r[MAXACTIVE][:[GRP/]EVENT] [MOD:]KSYM[+0] [FETCHARGS]
|
2020-09-10 08:55:35 +00:00
|
|
|
* Or
|
|
|
|
* p:[GRP/]EVENT] [MOD:]KSYM[+0]%return [FETCHARGS]
|
|
|
|
*
|
2009-08-13 20:35:11 +00:00
|
|
|
* Fetch args:
|
2009-10-07 22:27:59 +00:00
|
|
|
* $retval : fetch return value
|
|
|
|
* $stack : fetch stack address
|
|
|
|
* $stackN : fetch Nth of stack (N:0-)
|
2016-06-09 01:38:02 +00:00
|
|
|
* $comm : fetch current task comm
|
2009-08-13 20:35:11 +00:00
|
|
|
* @ADDR : fetch memory at ADDR (ADDR should be in kernel)
|
|
|
|
* @SYM[+|-offs] : fetch memory at SYM +|- offs (SYM is a data symbol)
|
|
|
|
* %REG : fetch register REG
|
tracing/kprobes: Support basic types on dynamic events
Support basic types of integer (u8, u16, u32, u64, s8, s16, s32, s64) in
kprobe tracer. With this patch, users can specify above basic types on
each arguments after ':'. If omitted, the argument type is set as
unsigned long (u32 or u64, arch-dependent).
e.g.
echo 'p account_system_time+0 hardirq_offset=%si:s32' > kprobe_events
adds a probe recording hardirq_offset in signed-32bits value on the
entry of account_system_time.
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
LKML-Reference: <20100412171708.3790.18599.stgit@localhost6.localdomain6>
Signed-off-by: Masami Hiramatsu <mhiramat@redhat.com>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2010-04-12 17:17:08 +00:00
|
|
|
* Dereferencing memory fetch:
|
2009-08-13 20:35:11 +00:00
|
|
|
* +|-offs(ARG) : fetch memory at ARG +|- offs address.
|
2009-09-10 23:53:38 +00:00
|
|
|
* Alias name of args:
|
|
|
|
* NAME=FETCHARG : set NAME as alias of FETCHARG.
|
tracing/kprobes: Support basic types on dynamic events
Support basic types of integer (u8, u16, u32, u64, s8, s16, s32, s64) in
kprobe tracer. With this patch, users can specify above basic types on
each arguments after ':'. If omitted, the argument type is set as
unsigned long (u32 or u64, arch-dependent).
e.g.
echo 'p account_system_time+0 hardirq_offset=%si:s32' > kprobe_events
adds a probe recording hardirq_offset in signed-32bits value on the
entry of account_system_time.
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
LKML-Reference: <20100412171708.3790.18599.stgit@localhost6.localdomain6>
Signed-off-by: Masami Hiramatsu <mhiramat@redhat.com>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2010-04-12 17:17:08 +00:00
|
|
|
* Type of args:
|
|
|
|
* FETCHARG:TYPE : use TYPE instead of unsigned long.
|
2009-08-13 20:35:11 +00:00
|
|
|
*/
|
2019-03-31 23:48:19 +00:00
|
|
|
struct trace_kprobe *tk = NULL;
|
2018-11-05 09:02:36 +00:00
|
|
|
int i, len, ret = 0;
|
|
|
|
bool is_return = false;
|
|
|
|
char *symbol = NULL, *tmp = NULL;
|
|
|
|
const char *event = NULL, *group = KPROBE_EVENT_SYSTEM;
|
2017-04-03 10:36:22 +00:00
|
|
|
int maxactive = 0;
|
2018-03-17 12:38:10 +00:00
|
|
|
long offset = 0;
|
2009-08-13 20:35:11 +00:00
|
|
|
void *addr = NULL;
|
2009-09-11 03:31:21 +00:00
|
|
|
char buf[MAX_EVENT_NAME_LEN];
|
2018-04-25 12:21:26 +00:00
|
|
|
unsigned int flags = TPARG_FL_KERNEL;
|
2009-08-13 20:35:11 +00:00
|
|
|
|
2019-01-11 06:01:13 +00:00
|
|
|
switch (argv[0][0]) {
|
|
|
|
case 'r':
|
2012-04-09 09:11:33 +00:00
|
|
|
is_return = true;
|
2019-01-11 06:01:13 +00:00
|
|
|
break;
|
|
|
|
case 'p':
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return -ECANCELED;
|
|
|
|
}
|
|
|
|
if (argc < 2)
|
2018-11-05 09:02:36 +00:00
|
|
|
return -ECANCELED;
|
2009-08-13 20:35:11 +00:00
|
|
|
|
2019-03-31 23:48:19 +00:00
|
|
|
trace_probe_log_init("trace_kprobe", argc, argv);
|
|
|
|
|
2017-04-03 10:36:22 +00:00
|
|
|
event = strchr(&argv[0][1], ':');
|
2018-11-05 09:02:36 +00:00
|
|
|
if (event)
|
2017-04-03 10:36:22 +00:00
|
|
|
event++;
|
2018-11-05 09:02:36 +00:00
|
|
|
|
2019-03-14 04:30:09 +00:00
|
|
|
if (isdigit(argv[0][1])) {
|
|
|
|
if (!is_return) {
|
2019-03-31 23:48:19 +00:00
|
|
|
trace_probe_log_err(1, MAXACT_NO_KPROBE);
|
|
|
|
goto parse_error;
|
2019-03-14 04:30:09 +00:00
|
|
|
}
|
2018-11-05 09:02:36 +00:00
|
|
|
if (event)
|
|
|
|
len = event - &argv[0][1] - 1;
|
|
|
|
else
|
|
|
|
len = strlen(&argv[0][1]);
|
2019-03-31 23:48:19 +00:00
|
|
|
if (len > MAX_EVENT_NAME_LEN - 1) {
|
|
|
|
trace_probe_log_err(1, BAD_MAXACT);
|
|
|
|
goto parse_error;
|
|
|
|
}
|
2018-11-05 09:02:36 +00:00
|
|
|
memcpy(buf, &argv[0][1], len);
|
|
|
|
buf[len] = '\0';
|
|
|
|
ret = kstrtouint(buf, 0, &maxactive);
|
2019-03-14 04:30:09 +00:00
|
|
|
if (ret || !maxactive) {
|
2019-03-31 23:48:19 +00:00
|
|
|
trace_probe_log_err(1, BAD_MAXACT);
|
|
|
|
goto parse_error;
|
2017-04-03 10:36:22 +00:00
|
|
|
}
|
|
|
|
/* kretprobes instances are iterated over via a list. The
|
|
|
|
* maximum should stay reasonable.
|
|
|
|
*/
|
|
|
|
if (maxactive > KRETPROBE_MAXACTIVE_MAX) {
|
2019-03-31 23:48:19 +00:00
|
|
|
trace_probe_log_err(1, MAXACT_TOO_BIG);
|
|
|
|
goto parse_error;
|
2017-04-03 10:36:22 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-06-22 09:24:42 +00:00
|
|
|
/* try to parse an address. if that fails, try to read the
|
|
|
|
* input as a symbol. */
|
|
|
|
if (kstrtoul(argv[1], 0, (unsigned long *)&addr)) {
|
2019-03-31 23:48:19 +00:00
|
|
|
trace_probe_log_set_index(1);
|
2018-11-05 09:02:36 +00:00
|
|
|
/* Check whether uprobe event specified */
|
2019-03-31 23:48:19 +00:00
|
|
|
if (strchr(argv[1], '/') && strchr(argv[1], ':')) {
|
|
|
|
ret = -ECANCELED;
|
|
|
|
goto error;
|
|
|
|
}
|
2009-08-13 20:35:11 +00:00
|
|
|
/* a symbol specified */
|
2018-11-05 09:02:36 +00:00
|
|
|
symbol = kstrdup(argv[1], GFP_KERNEL);
|
|
|
|
if (!symbol)
|
|
|
|
return -ENOMEM;
|
2020-09-10 08:55:35 +00:00
|
|
|
|
|
|
|
tmp = strchr(symbol, '%');
|
|
|
|
if (tmp) {
|
|
|
|
if (!strcmp(tmp, "%return")) {
|
|
|
|
*tmp = '\0';
|
|
|
|
is_return = true;
|
|
|
|
} else {
|
|
|
|
trace_probe_log_err(tmp - symbol, BAD_ADDR_SUFFIX);
|
|
|
|
goto parse_error;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-08-13 20:35:11 +00:00
|
|
|
/* TODO: support .init module functions */
|
2012-04-09 09:11:44 +00:00
|
|
|
ret = traceprobe_split_symbol_offset(symbol, &offset);
|
2018-03-17 12:38:10 +00:00
|
|
|
if (ret || offset < 0 || offset > UINT_MAX) {
|
2019-03-31 23:48:19 +00:00
|
|
|
trace_probe_log_err(0, BAD_PROBE_ADDR);
|
|
|
|
goto parse_error;
|
2009-10-17 00:07:28 +00:00
|
|
|
}
|
2020-09-10 08:55:35 +00:00
|
|
|
if (is_return)
|
|
|
|
flags |= TPARG_FL_RETURN;
|
2021-01-27 15:37:51 +00:00
|
|
|
ret = kprobe_on_func_entry(NULL, symbol, offset);
|
|
|
|
if (ret == 0)
|
2018-04-25 12:21:26 +00:00
|
|
|
flags |= TPARG_FL_FENTRY;
|
2021-01-27 15:37:51 +00:00
|
|
|
/* Defer the ENOENT case until register kprobe */
|
|
|
|
if (ret == -EINVAL && is_return) {
|
2019-03-31 23:48:19 +00:00
|
|
|
trace_probe_log_err(0, BAD_RETPROBE);
|
|
|
|
goto parse_error;
|
2009-10-17 00:07:28 +00:00
|
|
|
}
|
2009-08-13 20:35:11 +00:00
|
|
|
}
|
|
|
|
|
2019-03-31 23:48:19 +00:00
|
|
|
trace_probe_log_set_index(0);
|
2018-11-05 09:02:36 +00:00
|
|
|
if (event) {
|
2019-03-31 23:48:19 +00:00
|
|
|
ret = traceprobe_parse_event_name(&event, &group, buf,
|
|
|
|
event - argv[0]);
|
2018-11-05 09:02:36 +00:00
|
|
|
if (ret)
|
2019-03-31 23:48:19 +00:00
|
|
|
goto parse_error;
|
2018-11-05 09:02:36 +00:00
|
|
|
} else {
|
2009-08-13 20:35:26 +00:00
|
|
|
/* Make a new event name */
|
|
|
|
if (symbol)
|
2009-12-16 22:24:08 +00:00
|
|
|
snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_%ld",
|
2009-08-13 20:35:26 +00:00
|
|
|
is_return ? 'r' : 'p', symbol, offset);
|
|
|
|
else
|
2009-12-16 22:24:08 +00:00
|
|
|
snprintf(buf, MAX_EVENT_NAME_LEN, "%c_0x%p",
|
2009-08-13 20:35:26 +00:00
|
|
|
is_return ? 'r' : 'p', addr);
|
2017-07-07 18:57:30 +00:00
|
|
|
sanitize_event_name(buf);
|
2009-09-11 03:31:21 +00:00
|
|
|
event = buf;
|
|
|
|
}
|
2018-11-05 09:02:36 +00:00
|
|
|
|
|
|
|
/* setup a probe */
|
2017-04-03 10:36:22 +00:00
|
|
|
tk = alloc_trace_kprobe(group, event, addr, symbol, offset, maxactive,
|
2019-03-31 23:48:19 +00:00
|
|
|
argc - 2, is_return);
|
2013-07-03 04:50:51 +00:00
|
|
|
if (IS_ERR(tk)) {
|
2018-11-05 09:02:36 +00:00
|
|
|
ret = PTR_ERR(tk);
|
2019-03-31 23:48:19 +00:00
|
|
|
/* This must return -ENOMEM, else there is a bug */
|
2019-03-14 04:30:50 +00:00
|
|
|
WARN_ON_ONCE(ret != -ENOMEM);
|
2019-03-31 23:48:19 +00:00
|
|
|
goto out; /* We know tk is not allocated */
|
2009-10-17 00:07:28 +00:00
|
|
|
}
|
2019-03-31 23:48:19 +00:00
|
|
|
argc -= 2; argv += 2;
|
2009-08-13 20:35:11 +00:00
|
|
|
|
|
|
|
/* parse arguments */
|
2009-08-13 20:35:18 +00:00
|
|
|
for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
|
2018-11-05 09:02:36 +00:00
|
|
|
tmp = kstrdup(argv[i], GFP_KERNEL);
|
|
|
|
if (!tmp) {
|
2009-12-01 00:19:20 +00:00
|
|
|
ret = -ENOMEM;
|
2009-08-13 20:35:11 +00:00
|
|
|
goto error;
|
|
|
|
}
|
2010-08-27 11:39:12 +00:00
|
|
|
|
2019-03-31 23:48:19 +00:00
|
|
|
trace_probe_log_set_index(i + 2);
|
2018-11-05 09:02:36 +00:00
|
|
|
ret = traceprobe_parse_probe_arg(&tk->tp, i, tmp, flags);
|
|
|
|
kfree(tmp);
|
2018-11-05 09:01:40 +00:00
|
|
|
if (ret)
|
2019-03-31 23:48:19 +00:00
|
|
|
goto error; /* This can be -ENOMEM */
|
2009-08-13 20:35:11 +00:00
|
|
|
}
|
|
|
|
|
2019-05-31 15:16:46 +00:00
|
|
|
ret = traceprobe_set_print_fmt(&tk->tp, is_return);
|
|
|
|
if (ret < 0)
|
|
|
|
goto error;
|
|
|
|
|
2013-07-03 04:50:51 +00:00
|
|
|
ret = register_trace_kprobe(tk);
|
2019-03-31 23:48:19 +00:00
|
|
|
if (ret) {
|
|
|
|
trace_probe_log_set_index(1);
|
|
|
|
if (ret == -EILSEQ)
|
|
|
|
trace_probe_log_err(0, BAD_INSN_BNDRY);
|
|
|
|
else if (ret == -ENOENT)
|
|
|
|
trace_probe_log_err(0, BAD_PROBE_ADDR);
|
2019-06-19 15:07:49 +00:00
|
|
|
else if (ret != -ENOMEM && ret != -EEXIST)
|
2019-03-31 23:48:19 +00:00
|
|
|
trace_probe_log_err(0, FAIL_REG_PROBE);
|
2009-08-13 20:35:11 +00:00
|
|
|
goto error;
|
2019-03-31 23:48:19 +00:00
|
|
|
}
|
|
|
|
|
2018-11-05 09:02:36 +00:00
|
|
|
out:
|
2019-03-31 23:48:19 +00:00
|
|
|
trace_probe_log_clear();
|
2018-11-05 09:02:36 +00:00
|
|
|
kfree(symbol);
|
|
|
|
return ret;
|
2009-08-13 20:35:11 +00:00
|
|
|
|
2019-03-31 23:48:19 +00:00
|
|
|
parse_error:
|
|
|
|
ret = -EINVAL;
|
2009-08-13 20:35:11 +00:00
|
|
|
error:
|
2013-07-03 04:50:51 +00:00
|
|
|
free_trace_kprobe(tk);
|
2018-11-05 09:02:36 +00:00
|
|
|
goto out;
|
2009-08-13 20:35:11 +00:00
|
|
|
}
|
|
|
|
|
2021-02-01 19:48:11 +00:00
|
|
|
static int trace_kprobe_create(const char *raw_command)
|
|
|
|
{
|
|
|
|
return trace_probe_create(raw_command, __trace_kprobe_create);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int create_or_delete_trace_kprobe(const char *raw_command)
|
2009-08-13 20:35:11 +00:00
|
|
|
{
|
2018-11-05 09:02:36 +00:00
|
|
|
int ret;
|
2011-10-04 10:44:38 +00:00
|
|
|
|
2021-02-01 19:48:11 +00:00
|
|
|
if (raw_command[0] == '-')
|
|
|
|
return dyn_event_release(raw_command, &trace_kprobe_ops);
|
2009-08-13 20:35:11 +00:00
|
|
|
|
2021-02-01 19:48:11 +00:00
|
|
|
ret = trace_kprobe_create(raw_command);
|
2018-11-05 09:02:36 +00:00
|
|
|
return ret == -ECANCELED ? -EINVAL : ret;
|
2009-08-13 20:35:11 +00:00
|
|
|
}
|
|
|
|
|
2020-01-29 18:59:30 +00:00
|
|
|
static int trace_kprobe_run_command(struct dynevent_cmd *cmd)
|
2020-01-29 18:59:29 +00:00
|
|
|
{
|
2021-02-01 19:48:11 +00:00
|
|
|
return create_or_delete_trace_kprobe(cmd->seq.buffer);
|
2020-01-29 18:59:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* kprobe_event_cmd_init - Initialize a kprobe event command object
|
|
|
|
* @cmd: A pointer to the dynevent_cmd struct representing the new event
|
|
|
|
* @buf: A pointer to the buffer used to build the command
|
|
|
|
* @maxlen: The length of the buffer passed in @buf
|
|
|
|
*
|
|
|
|
* Initialize a synthetic event command object. Use this before
|
|
|
|
* calling any of the other kprobe_event functions.
|
|
|
|
*/
|
|
|
|
void kprobe_event_cmd_init(struct dynevent_cmd *cmd, char *buf, int maxlen)
|
|
|
|
{
|
|
|
|
dynevent_cmd_init(cmd, buf, maxlen, DYNEVENT_TYPE_KPROBE,
|
2020-01-29 18:59:30 +00:00
|
|
|
trace_kprobe_run_command);
|
2020-01-29 18:59:29 +00:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(kprobe_event_cmd_init);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* __kprobe_event_gen_cmd_start - Generate a kprobe event command from arg list
|
|
|
|
* @cmd: A pointer to the dynevent_cmd struct representing the new event
|
|
|
|
* @name: The name of the kprobe event
|
|
|
|
* @loc: The location of the kprobe event
|
|
|
|
* @kretprobe: Is this a return probe?
|
|
|
|
* @args: Variable number of arg (pairs), one pair for each field
|
|
|
|
*
|
|
|
|
* NOTE: Users normally won't want to call this function directly, but
|
|
|
|
* rather use the kprobe_event_gen_cmd_start() wrapper, which automatically
|
|
|
|
* adds a NULL to the end of the arg list. If this function is used
|
|
|
|
* directly, make sure the last arg in the variable arg list is NULL.
|
|
|
|
*
|
|
|
|
* Generate a kprobe event command to be executed by
|
|
|
|
* kprobe_event_gen_cmd_end(). This function can be used to generate the
|
|
|
|
* complete command or only the first part of it; in the latter case,
|
|
|
|
* kprobe_event_add_fields() can be used to add more fields following this.
|
|
|
|
*
|
2020-04-25 05:49:26 +00:00
|
|
|
* Unlikely the synth_event_gen_cmd_start(), @loc must be specified. This
|
|
|
|
* returns -EINVAL if @loc == NULL.
|
|
|
|
*
|
2020-01-29 18:59:29 +00:00
|
|
|
* Return: 0 if successful, error otherwise.
|
|
|
|
*/
|
|
|
|
int __kprobe_event_gen_cmd_start(struct dynevent_cmd *cmd, bool kretprobe,
|
|
|
|
const char *name, const char *loc, ...)
|
|
|
|
{
|
|
|
|
char buf[MAX_EVENT_NAME_LEN];
|
|
|
|
struct dynevent_arg arg;
|
|
|
|
va_list args;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (cmd->type != DYNEVENT_TYPE_KPROBE)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2020-04-25 05:49:26 +00:00
|
|
|
if (!loc)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2020-01-29 18:59:29 +00:00
|
|
|
if (kretprobe)
|
|
|
|
snprintf(buf, MAX_EVENT_NAME_LEN, "r:kprobes/%s", name);
|
|
|
|
else
|
|
|
|
snprintf(buf, MAX_EVENT_NAME_LEN, "p:kprobes/%s", name);
|
|
|
|
|
|
|
|
ret = dynevent_str_add(cmd, buf);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2020-01-31 21:55:32 +00:00
|
|
|
dynevent_arg_init(&arg, 0);
|
2020-01-29 18:59:29 +00:00
|
|
|
arg.str = loc;
|
2020-01-31 21:55:32 +00:00
|
|
|
ret = dynevent_arg_add(cmd, &arg, NULL);
|
2020-01-29 18:59:29 +00:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
va_start(args, loc);
|
|
|
|
for (;;) {
|
|
|
|
const char *field;
|
|
|
|
|
|
|
|
field = va_arg(args, const char *);
|
|
|
|
if (!field)
|
|
|
|
break;
|
|
|
|
|
|
|
|
if (++cmd->n_fields > MAX_TRACE_ARGS) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
arg.str = field;
|
2020-01-31 21:55:32 +00:00
|
|
|
ret = dynevent_arg_add(cmd, &arg, NULL);
|
2020-01-29 18:59:29 +00:00
|
|
|
if (ret)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
va_end(args);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(__kprobe_event_gen_cmd_start);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* __kprobe_event_add_fields - Add probe fields to a kprobe command from arg list
|
|
|
|
* @cmd: A pointer to the dynevent_cmd struct representing the new event
|
|
|
|
* @args: Variable number of arg (pairs), one pair for each field
|
|
|
|
*
|
|
|
|
* NOTE: Users normally won't want to call this function directly, but
|
|
|
|
* rather use the kprobe_event_add_fields() wrapper, which
|
|
|
|
* automatically adds a NULL to the end of the arg list. If this
|
|
|
|
* function is used directly, make sure the last arg in the variable
|
|
|
|
* arg list is NULL.
|
|
|
|
*
|
|
|
|
* Add probe fields to an existing kprobe command using a variable
|
|
|
|
* list of args. Fields are added in the same order they're listed.
|
|
|
|
*
|
|
|
|
* Return: 0 if successful, error otherwise.
|
|
|
|
*/
|
|
|
|
int __kprobe_event_add_fields(struct dynevent_cmd *cmd, ...)
|
|
|
|
{
|
|
|
|
struct dynevent_arg arg;
|
|
|
|
va_list args;
|
2020-02-05 22:34:04 +00:00
|
|
|
int ret = 0;
|
2020-01-29 18:59:29 +00:00
|
|
|
|
|
|
|
if (cmd->type != DYNEVENT_TYPE_KPROBE)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2020-01-31 21:55:32 +00:00
|
|
|
dynevent_arg_init(&arg, 0);
|
2020-01-29 18:59:29 +00:00
|
|
|
|
|
|
|
va_start(args, cmd);
|
|
|
|
for (;;) {
|
|
|
|
const char *field;
|
|
|
|
|
|
|
|
field = va_arg(args, const char *);
|
|
|
|
if (!field)
|
|
|
|
break;
|
|
|
|
|
|
|
|
if (++cmd->n_fields > MAX_TRACE_ARGS) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
arg.str = field;
|
2020-01-31 21:55:32 +00:00
|
|
|
ret = dynevent_arg_add(cmd, &arg, NULL);
|
2020-01-29 18:59:29 +00:00
|
|
|
if (ret)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
va_end(args);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(__kprobe_event_add_fields);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* kprobe_event_delete - Delete a kprobe event
|
|
|
|
* @name: The name of the kprobe event to delete
|
|
|
|
*
|
|
|
|
* Delete a kprobe event with the give @name from kernel code rather
|
|
|
|
* than directly from the command line.
|
|
|
|
*
|
|
|
|
* Return: 0 if successful, error otherwise.
|
|
|
|
*/
|
|
|
|
int kprobe_event_delete(const char *name)
|
|
|
|
{
|
|
|
|
char buf[MAX_EVENT_NAME_LEN];
|
|
|
|
|
|
|
|
snprintf(buf, MAX_EVENT_NAME_LEN, "-:%s", name);
|
|
|
|
|
2021-02-01 19:48:11 +00:00
|
|
|
return create_or_delete_trace_kprobe(buf);
|
2020-01-29 18:59:29 +00:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(kprobe_event_delete);
|
|
|
|
|
2018-11-05 09:02:36 +00:00
|
|
|
static int trace_kprobe_release(struct dyn_event *ev)
|
2009-08-13 20:35:11 +00:00
|
|
|
{
|
2018-11-05 09:02:36 +00:00
|
|
|
struct trace_kprobe *tk = to_trace_kprobe(ev);
|
|
|
|
int ret = unregister_trace_kprobe(tk);
|
2009-08-13 20:35:11 +00:00
|
|
|
|
2018-11-05 09:02:36 +00:00
|
|
|
if (!ret)
|
|
|
|
free_trace_kprobe(tk);
|
|
|
|
return ret;
|
2009-08-13 20:35:11 +00:00
|
|
|
}
|
|
|
|
|
2018-11-05 09:02:36 +00:00
|
|
|
static int trace_kprobe_show(struct seq_file *m, struct dyn_event *ev)
|
2009-08-13 20:35:11 +00:00
|
|
|
{
|
2018-11-05 09:02:36 +00:00
|
|
|
struct trace_kprobe *tk = to_trace_kprobe(ev);
|
tracing/kprobes: Support basic types on dynamic events
Support basic types of integer (u8, u16, u32, u64, s8, s16, s32, s64) in
kprobe tracer. With this patch, users can specify above basic types on
each arguments after ':'. If omitted, the argument type is set as
unsigned long (u32 or u64, arch-dependent).
e.g.
echo 'p account_system_time+0 hardirq_offset=%si:s32' > kprobe_events
adds a probe recording hardirq_offset in signed-32bits value on the
entry of account_system_time.
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
LKML-Reference: <20100412171708.3790.18599.stgit@localhost6.localdomain6>
Signed-off-by: Masami Hiramatsu <mhiramat@redhat.com>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2010-04-12 17:17:08 +00:00
|
|
|
int i;
|
2009-08-13 20:35:11 +00:00
|
|
|
|
2014-11-08 20:42:10 +00:00
|
|
|
seq_putc(m, trace_kprobe_is_return(tk) ? 'r' : 'p');
|
2020-03-24 07:34:48 +00:00
|
|
|
if (trace_kprobe_is_return(tk) && tk->rp.maxactive)
|
|
|
|
seq_printf(m, "%d", tk->rp.maxactive);
|
2019-05-31 15:17:47 +00:00
|
|
|
seq_printf(m, ":%s/%s", trace_probe_group_name(&tk->tp),
|
|
|
|
trace_probe_name(&tk->tp));
|
2009-08-13 20:35:11 +00:00
|
|
|
|
2013-07-03 04:50:51 +00:00
|
|
|
if (!tk->symbol)
|
|
|
|
seq_printf(m, " 0x%p", tk->rp.kp.addr);
|
|
|
|
else if (tk->rp.kp.offset)
|
|
|
|
seq_printf(m, " %s+%u", trace_kprobe_symbol(tk),
|
|
|
|
tk->rp.kp.offset);
|
2009-08-13 20:35:11 +00:00
|
|
|
else
|
2013-07-03 04:50:51 +00:00
|
|
|
seq_printf(m, " %s", trace_kprobe_symbol(tk));
|
2009-08-13 20:35:11 +00:00
|
|
|
|
2013-07-03 04:50:51 +00:00
|
|
|
for (i = 0; i < tk->tp.nr_args; i++)
|
|
|
|
seq_printf(m, " %s=%s", tk->tp.args[i].name, tk->tp.args[i].comm);
|
2014-11-08 20:42:10 +00:00
|
|
|
seq_putc(m, '\n');
|
tracing/kprobes: Support basic types on dynamic events
Support basic types of integer (u8, u16, u32, u64, s8, s16, s32, s64) in
kprobe tracer. With this patch, users can specify above basic types on
each arguments after ':'. If omitted, the argument type is set as
unsigned long (u32 or u64, arch-dependent).
e.g.
echo 'p account_system_time+0 hardirq_offset=%si:s32' > kprobe_events
adds a probe recording hardirq_offset in signed-32bits value on the
entry of account_system_time.
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
LKML-Reference: <20100412171708.3790.18599.stgit@localhost6.localdomain6>
Signed-off-by: Masami Hiramatsu <mhiramat@redhat.com>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2010-04-12 17:17:08 +00:00
|
|
|
|
2009-08-13 20:35:11 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-11-05 09:02:36 +00:00
|
|
|
static int probes_seq_show(struct seq_file *m, void *v)
|
|
|
|
{
|
|
|
|
struct dyn_event *ev = v;
|
|
|
|
|
|
|
|
if (!is_trace_kprobe(ev))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
return trace_kprobe_show(m, ev);
|
|
|
|
}
|
|
|
|
|
2009-08-13 20:35:11 +00:00
|
|
|
static const struct seq_operations probes_seq_op = {
|
2018-11-05 09:02:36 +00:00
|
|
|
.start = dyn_event_seq_start,
|
|
|
|
.next = dyn_event_seq_next,
|
|
|
|
.stop = dyn_event_seq_stop,
|
2009-08-13 20:35:11 +00:00
|
|
|
.show = probes_seq_show
|
|
|
|
};
|
|
|
|
|
|
|
|
static int probes_open(struct inode *inode, struct file *file)
|
|
|
|
{
|
2011-10-04 10:44:38 +00:00
|
|
|
int ret;
|
|
|
|
|
2019-10-11 21:22:50 +00:00
|
|
|
ret = security_locked_down(LOCKDOWN_TRACEFS);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2011-10-04 10:44:38 +00:00
|
|
|
if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
|
2018-11-05 09:02:36 +00:00
|
|
|
ret = dyn_events_release_all(&trace_kprobe_ops);
|
2011-10-04 10:44:38 +00:00
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
}
|
2009-08-13 20:35:11 +00:00
|
|
|
|
|
|
|
return seq_open(file, &probes_seq_op);
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t probes_write(struct file *file, const char __user *buffer,
|
|
|
|
size_t count, loff_t *ppos)
|
|
|
|
{
|
2017-09-22 19:58:20 +00:00
|
|
|
return trace_parse_run_command(file, buffer, count, ppos,
|
2018-11-05 09:02:36 +00:00
|
|
|
create_or_delete_trace_kprobe);
|
2009-08-13 20:35:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static const struct file_operations kprobe_events_ops = {
|
|
|
|
.owner = THIS_MODULE,
|
|
|
|
.open = probes_open,
|
|
|
|
.read = seq_read,
|
|
|
|
.llseek = seq_lseek,
|
|
|
|
.release = seq_release,
|
|
|
|
.write = probes_write,
|
|
|
|
};
|
|
|
|
|
2009-08-13 20:35:42 +00:00
|
|
|
/* Probes profiling interfaces */
|
|
|
|
static int probes_profile_seq_show(struct seq_file *m, void *v)
|
|
|
|
{
|
2018-11-05 09:02:36 +00:00
|
|
|
struct dyn_event *ev = v;
|
|
|
|
struct trace_kprobe *tk;
|
2009-08-13 20:35:42 +00:00
|
|
|
|
2018-11-05 09:02:36 +00:00
|
|
|
if (!is_trace_kprobe(ev))
|
|
|
|
return 0;
|
2009-08-13 20:35:42 +00:00
|
|
|
|
2018-11-05 09:02:36 +00:00
|
|
|
tk = to_trace_kprobe(ev);
|
2014-04-08 21:26:21 +00:00
|
|
|
seq_printf(m, " %-44s %15lu %15lu\n",
|
2019-05-31 15:17:47 +00:00
|
|
|
trace_probe_name(&tk->tp),
|
2016-12-09 14:19:37 +00:00
|
|
|
trace_kprobe_nhit(tk),
|
2013-07-03 04:50:51 +00:00
|
|
|
tk->rp.kp.nmissed);
|
2009-08-13 20:35:42 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct seq_operations profile_seq_op = {
|
2018-11-05 09:02:36 +00:00
|
|
|
.start = dyn_event_seq_start,
|
|
|
|
.next = dyn_event_seq_next,
|
|
|
|
.stop = dyn_event_seq_stop,
|
2009-08-13 20:35:42 +00:00
|
|
|
.show = probes_profile_seq_show
|
|
|
|
};
|
|
|
|
|
|
|
|
static int profile_open(struct inode *inode, struct file *file)
|
|
|
|
{
|
2019-10-11 21:22:50 +00:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = security_locked_down(LOCKDOWN_TRACEFS);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2009-08-13 20:35:42 +00:00
|
|
|
return seq_open(file, &profile_seq_op);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct file_operations kprobe_profile_ops = {
|
|
|
|
.owner = THIS_MODULE,
|
|
|
|
.open = profile_open,
|
|
|
|
.read = seq_read,
|
|
|
|
.llseek = seq_lseek,
|
|
|
|
.release = seq_release,
|
|
|
|
};
|
|
|
|
|
2018-04-25 12:18:03 +00:00
|
|
|
/* Kprobe specific fetch functions */
|
|
|
|
|
2020-06-09 04:34:44 +00:00
|
|
|
/* Return the length of string -- including null terminal byte */
|
|
|
|
static nokprobe_inline int
|
|
|
|
fetch_store_strlen_user(unsigned long addr)
|
|
|
|
{
|
|
|
|
const void __user *uaddr = (__force const void __user *)addr;
|
|
|
|
|
|
|
|
return strnlen_user_nofault(uaddr, MAX_STRING_SIZE);
|
|
|
|
}
|
|
|
|
|
2018-04-25 12:18:03 +00:00
|
|
|
/* Return the length of string -- including null terminal byte */
|
2018-04-25 12:19:01 +00:00
|
|
|
static nokprobe_inline int
|
|
|
|
fetch_store_strlen(unsigned long addr)
|
2018-04-25 12:18:03 +00:00
|
|
|
{
|
|
|
|
int ret, len = 0;
|
|
|
|
u8 c;
|
|
|
|
|
2020-06-09 04:34:44 +00:00
|
|
|
#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
|
|
|
|
if (addr < TASK_SIZE)
|
|
|
|
return fetch_store_strlen_user(addr);
|
|
|
|
#endif
|
|
|
|
|
2018-04-25 12:18:03 +00:00
|
|
|
do {
|
2020-06-17 07:37:53 +00:00
|
|
|
ret = copy_from_kernel_nofault(&c, (u8 *)addr + len, 1);
|
2018-04-25 12:18:03 +00:00
|
|
|
len++;
|
|
|
|
} while (c && ret == 0 && len < MAX_STRING_SIZE);
|
|
|
|
|
2018-04-25 12:19:01 +00:00
|
|
|
return (ret < 0) ? ret : len;
|
2018-04-25 12:18:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2020-06-09 04:34:44 +00:00
|
|
|
* Fetch a null-terminated string from user. Caller MUST set *(u32 *)buf
|
|
|
|
* with max length and relative data location.
|
2018-04-25 12:18:03 +00:00
|
|
|
*/
|
2018-04-25 12:19:01 +00:00
|
|
|
static nokprobe_inline int
|
2020-06-09 04:34:44 +00:00
|
|
|
fetch_store_string_user(unsigned long addr, void *dest, void *base)
|
2018-04-25 12:18:03 +00:00
|
|
|
{
|
2020-06-09 04:34:44 +00:00
|
|
|
const void __user *uaddr = (__force const void __user *)addr;
|
2018-04-25 12:19:01 +00:00
|
|
|
int maxlen = get_loc_len(*(u32 *)dest);
|
2019-05-15 05:38:30 +00:00
|
|
|
void *__dest;
|
2018-04-25 12:18:03 +00:00
|
|
|
long ret;
|
|
|
|
|
2018-04-25 12:19:01 +00:00
|
|
|
if (unlikely(!maxlen))
|
|
|
|
return -ENOMEM;
|
2019-05-15 05:38:30 +00:00
|
|
|
|
|
|
|
__dest = get_loc_data(dest, base);
|
|
|
|
|
2020-06-09 04:34:44 +00:00
|
|
|
ret = strncpy_from_user_nofault(__dest, uaddr, maxlen);
|
2019-05-15 05:38:30 +00:00
|
|
|
if (ret >= 0)
|
|
|
|
*(u32 *)dest = make_data_loc(ret, __dest - base);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
2018-04-25 12:18:03 +00:00
|
|
|
|
2019-05-15 05:38:30 +00:00
|
|
|
/*
|
2020-06-09 04:34:44 +00:00
|
|
|
* Fetch a null-terminated string. Caller MUST set *(u32 *)buf with max
|
|
|
|
* length and relative data location.
|
2019-05-15 05:38:30 +00:00
|
|
|
*/
|
|
|
|
static nokprobe_inline int
|
2020-06-09 04:34:44 +00:00
|
|
|
fetch_store_string(unsigned long addr, void *dest, void *base)
|
2019-05-15 05:38:30 +00:00
|
|
|
{
|
|
|
|
int maxlen = get_loc_len(*(u32 *)dest);
|
|
|
|
void *__dest;
|
|
|
|
long ret;
|
|
|
|
|
2020-06-09 04:34:44 +00:00
|
|
|
#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
|
|
|
|
if ((unsigned long)addr < TASK_SIZE)
|
|
|
|
return fetch_store_string_user(addr, dest, base);
|
|
|
|
#endif
|
|
|
|
|
2019-05-15 05:38:30 +00:00
|
|
|
if (unlikely(!maxlen))
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
__dest = get_loc_data(dest, base);
|
|
|
|
|
2020-06-09 04:34:44 +00:00
|
|
|
/*
|
|
|
|
* Try to get string again, since the string can be changed while
|
|
|
|
* probing.
|
|
|
|
*/
|
|
|
|
ret = strncpy_from_kernel_nofault(__dest, (void *)addr, maxlen);
|
2018-04-25 12:19:01 +00:00
|
|
|
if (ret >= 0)
|
2019-05-15 05:38:30 +00:00
|
|
|
*(u32 *)dest = make_data_loc(ret, __dest - base);
|
|
|
|
|
2018-04-25 12:19:01 +00:00
|
|
|
return ret;
|
2018-04-25 12:18:03 +00:00
|
|
|
}
|
|
|
|
|
2019-05-15 05:38:42 +00:00
|
|
|
static nokprobe_inline int
|
|
|
|
probe_mem_read_user(void *dest, void *src, size_t size)
|
|
|
|
{
|
2019-05-22 08:27:52 +00:00
|
|
|
const void __user *uaddr = (__force const void __user *)src;
|
|
|
|
|
2020-06-17 07:37:54 +00:00
|
|
|
return copy_from_user_nofault(dest, uaddr, size);
|
2019-05-15 05:38:42 +00:00
|
|
|
}
|
|
|
|
|
2020-06-09 04:34:44 +00:00
|
|
|
static nokprobe_inline int
|
|
|
|
probe_mem_read(void *dest, void *src, size_t size)
|
|
|
|
{
|
|
|
|
#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
|
|
|
|
if ((unsigned long)src < TASK_SIZE)
|
|
|
|
return probe_mem_read_user(dest, src, size);
|
|
|
|
#endif
|
2020-06-17 07:37:53 +00:00
|
|
|
return copy_from_kernel_nofault(dest, src, size);
|
2020-06-09 04:34:44 +00:00
|
|
|
}
|
|
|
|
|
2018-04-25 12:18:03 +00:00
|
|
|
/* Note that we don't verify it, since the code does not come from user space */
|
|
|
|
static int
|
|
|
|
process_fetch_insn(struct fetch_insn *code, struct pt_regs *regs, void *dest,
|
2018-04-25 12:19:01 +00:00
|
|
|
void *base)
|
2018-04-25 12:18:03 +00:00
|
|
|
{
|
|
|
|
unsigned long val;
|
|
|
|
|
2018-08-28 16:18:43 +00:00
|
|
|
retry:
|
2018-04-25 12:18:03 +00:00
|
|
|
/* 1st stage: get value from context */
|
|
|
|
switch (code->op) {
|
|
|
|
case FETCH_OP_REG:
|
|
|
|
val = regs_get_register(regs, code->param);
|
|
|
|
break;
|
|
|
|
case FETCH_OP_STACK:
|
|
|
|
val = regs_get_kernel_stack_nth(regs, code->param);
|
|
|
|
break;
|
|
|
|
case FETCH_OP_STACKP:
|
|
|
|
val = kernel_stack_pointer(regs);
|
|
|
|
break;
|
|
|
|
case FETCH_OP_RETVAL:
|
|
|
|
val = regs_return_value(regs);
|
|
|
|
break;
|
|
|
|
case FETCH_OP_IMM:
|
|
|
|
val = code->immediate;
|
|
|
|
break;
|
|
|
|
case FETCH_OP_COMM:
|
|
|
|
val = (unsigned long)current->comm;
|
|
|
|
break;
|
2019-06-19 15:08:37 +00:00
|
|
|
case FETCH_OP_DATA:
|
|
|
|
val = (unsigned long)code->data;
|
|
|
|
break;
|
2018-04-25 12:21:26 +00:00
|
|
|
#ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
|
|
|
|
case FETCH_OP_ARG:
|
|
|
|
val = regs_get_kernel_argument(regs, code->param);
|
|
|
|
break;
|
|
|
|
#endif
|
2018-08-28 16:18:43 +00:00
|
|
|
case FETCH_NOP_SYMBOL: /* Ignore a place holder */
|
|
|
|
code++;
|
|
|
|
goto retry;
|
2018-04-25 12:18:03 +00:00
|
|
|
default:
|
|
|
|
return -EILSEQ;
|
|
|
|
}
|
|
|
|
code++;
|
|
|
|
|
2018-04-25 12:19:59 +00:00
|
|
|
return process_fetch_insn_bottom(code, val, dest, base);
|
2018-04-25 12:18:03 +00:00
|
|
|
}
|
|
|
|
NOKPROBE_SYMBOL(process_fetch_insn)
|
|
|
|
|
2009-08-13 20:35:11 +00:00
|
|
|
/* Kprobe handler */
|
2014-04-17 08:18:28 +00:00
|
|
|
static nokprobe_inline void
|
2013-07-03 04:50:51 +00:00
|
|
|
__kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs,
|
2015-05-05 14:09:53 +00:00
|
|
|
struct trace_event_file *trace_file)
|
2009-08-13 20:35:11 +00:00
|
|
|
{
|
tracing/kprobes: Support basic types on dynamic events
Support basic types of integer (u8, u16, u32, u64, s8, s16, s32, s64) in
kprobe tracer. With this patch, users can specify above basic types on
each arguments after ':'. If omitted, the argument type is set as
unsigned long (u32 or u64, arch-dependent).
e.g.
echo 'p account_system_time+0 hardirq_offset=%si:s32' > kprobe_events
adds a probe recording hardirq_offset in signed-32bits value on the
entry of account_system_time.
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
LKML-Reference: <20100412171708.3790.18599.stgit@localhost6.localdomain6>
Signed-off-by: Masami Hiramatsu <mhiramat@redhat.com>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2010-04-12 17:17:08 +00:00
|
|
|
struct kprobe_trace_entry_head *entry;
|
2019-05-31 15:17:57 +00:00
|
|
|
struct trace_event_call *call = trace_probe_event_call(&tk->tp);
|
2020-01-10 16:05:31 +00:00
|
|
|
struct trace_event_buffer fbuffer;
|
|
|
|
int dsize;
|
2009-08-13 20:35:11 +00:00
|
|
|
|
2015-05-05 14:09:53 +00:00
|
|
|
WARN_ON(call != trace_file->event_call);
|
2013-05-09 05:44:49 +00:00
|
|
|
|
2015-05-13 19:21:25 +00:00
|
|
|
if (trace_trigger_soft_disabled(trace_file))
|
2014-01-07 02:32:10 +00:00
|
|
|
return;
|
2013-05-09 05:44:54 +00:00
|
|
|
|
2021-01-25 19:45:08 +00:00
|
|
|
fbuffer.trace_ctx = tracing_gen_ctx();
|
2020-01-10 16:05:31 +00:00
|
|
|
fbuffer.trace_file = trace_file;
|
2009-08-13 20:35:11 +00:00
|
|
|
|
2013-07-03 04:50:51 +00:00
|
|
|
dsize = __get_data_size(&tk->tp, regs);
|
2009-08-13 20:35:11 +00:00
|
|
|
|
2020-01-10 16:05:31 +00:00
|
|
|
fbuffer.event =
|
|
|
|
trace_event_buffer_lock_reserve(&fbuffer.buffer, trace_file,
|
|
|
|
call->event.type,
|
|
|
|
sizeof(*entry) + tk->tp.size + dsize,
|
2021-01-25 19:45:08 +00:00
|
|
|
fbuffer.trace_ctx);
|
2020-01-10 16:05:31 +00:00
|
|
|
if (!fbuffer.event)
|
2010-01-28 01:34:27 +00:00
|
|
|
return;
|
2009-08-13 20:35:11 +00:00
|
|
|
|
2020-01-10 16:05:31 +00:00
|
|
|
fbuffer.regs = regs;
|
|
|
|
entry = fbuffer.entry = ring_buffer_event_data(fbuffer.event);
|
2013-07-03 04:50:51 +00:00
|
|
|
entry->ip = (unsigned long)tk->rp.kp.addr;
|
2018-04-25 12:19:01 +00:00
|
|
|
store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
|
2009-08-13 20:35:11 +00:00
|
|
|
|
2020-01-10 16:05:31 +00:00
|
|
|
trace_event_buffer_commit(&fbuffer);
|
2009-08-13 20:35:11 +00:00
|
|
|
}
|
|
|
|
|
2014-04-17 08:18:28 +00:00
|
|
|
static void
|
2013-07-03 04:50:51 +00:00
|
|
|
kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs)
|
2013-05-09 05:44:49 +00:00
|
|
|
{
|
2013-06-20 17:38:14 +00:00
|
|
|
struct event_file_link *link;
|
2013-05-09 05:44:49 +00:00
|
|
|
|
2019-05-31 15:17:26 +00:00
|
|
|
trace_probe_for_each_link_rcu(link, &tk->tp)
|
2013-07-03 04:50:51 +00:00
|
|
|
__kprobe_trace_func(tk, regs, link->file);
|
2013-05-09 05:44:49 +00:00
|
|
|
}
|
2014-04-17 08:18:28 +00:00
|
|
|
NOKPROBE_SYMBOL(kprobe_trace_func);
|
2013-05-09 05:44:49 +00:00
|
|
|
|
2009-08-13 20:35:11 +00:00
|
|
|
/* Kretprobe handler */
|
2014-04-17 08:18:28 +00:00
|
|
|
static nokprobe_inline void
|
2013-07-03 04:50:51 +00:00
|
|
|
__kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
|
2013-05-09 05:44:49 +00:00
|
|
|
struct pt_regs *regs,
|
2015-05-05 14:09:53 +00:00
|
|
|
struct trace_event_file *trace_file)
|
2009-08-13 20:35:11 +00:00
|
|
|
{
|
tracing/kprobes: Support basic types on dynamic events
Support basic types of integer (u8, u16, u32, u64, s8, s16, s32, s64) in
kprobe tracer. With this patch, users can specify above basic types on
each arguments after ':'. If omitted, the argument type is set as
unsigned long (u32 or u64, arch-dependent).
e.g.
echo 'p account_system_time+0 hardirq_offset=%si:s32' > kprobe_events
adds a probe recording hardirq_offset in signed-32bits value on the
entry of account_system_time.
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
LKML-Reference: <20100412171708.3790.18599.stgit@localhost6.localdomain6>
Signed-off-by: Masami Hiramatsu <mhiramat@redhat.com>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2010-04-12 17:17:08 +00:00
|
|
|
struct kretprobe_trace_entry_head *entry;
|
2020-01-10 16:05:31 +00:00
|
|
|
struct trace_event_buffer fbuffer;
|
2019-05-31 15:17:57 +00:00
|
|
|
struct trace_event_call *call = trace_probe_event_call(&tk->tp);
|
2020-01-10 16:05:31 +00:00
|
|
|
int dsize;
|
2009-08-13 20:35:11 +00:00
|
|
|
|
2015-05-05 14:09:53 +00:00
|
|
|
WARN_ON(call != trace_file->event_call);
|
2013-05-09 05:44:49 +00:00
|
|
|
|
2015-05-13 19:21:25 +00:00
|
|
|
if (trace_trigger_soft_disabled(trace_file))
|
2014-01-07 02:32:10 +00:00
|
|
|
return;
|
2013-05-09 05:44:54 +00:00
|
|
|
|
2021-01-25 19:45:08 +00:00
|
|
|
fbuffer.trace_ctx = tracing_gen_ctx();
|
2020-01-10 16:05:31 +00:00
|
|
|
fbuffer.trace_file = trace_file;
|
2009-08-13 20:35:11 +00:00
|
|
|
|
2013-07-03 04:50:51 +00:00
|
|
|
dsize = __get_data_size(&tk->tp, regs);
|
2020-01-10 16:05:31 +00:00
|
|
|
fbuffer.event =
|
|
|
|
trace_event_buffer_lock_reserve(&fbuffer.buffer, trace_file,
|
|
|
|
call->event.type,
|
|
|
|
sizeof(*entry) + tk->tp.size + dsize,
|
2021-01-25 19:45:08 +00:00
|
|
|
fbuffer.trace_ctx);
|
2020-01-10 16:05:31 +00:00
|
|
|
if (!fbuffer.event)
|
2010-01-28 01:34:27 +00:00
|
|
|
return;
|
2009-08-13 20:35:11 +00:00
|
|
|
|
2020-01-10 16:05:31 +00:00
|
|
|
fbuffer.regs = regs;
|
|
|
|
entry = fbuffer.entry = ring_buffer_event_data(fbuffer.event);
|
2013-07-03 04:50:51 +00:00
|
|
|
entry->func = (unsigned long)tk->rp.kp.addr;
|
2009-08-13 20:35:11 +00:00
|
|
|
entry->ret_ip = (unsigned long)ri->ret_addr;
|
2018-04-25 12:19:01 +00:00
|
|
|
store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
|
2009-08-13 20:35:11 +00:00
|
|
|
|
2020-01-10 16:05:31 +00:00
|
|
|
trace_event_buffer_commit(&fbuffer);
|
2009-08-13 20:35:11 +00:00
|
|
|
}
|
|
|
|
|
2014-04-17 08:18:28 +00:00
|
|
|
static void
|
2013-07-03 04:50:51 +00:00
|
|
|
kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
|
2013-05-09 05:44:49 +00:00
|
|
|
struct pt_regs *regs)
|
|
|
|
{
|
2013-06-20 17:38:14 +00:00
|
|
|
struct event_file_link *link;
|
2013-05-09 05:44:49 +00:00
|
|
|
|
2019-05-31 15:17:26 +00:00
|
|
|
trace_probe_for_each_link_rcu(link, &tk->tp)
|
2013-07-03 04:50:51 +00:00
|
|
|
__kretprobe_trace_func(tk, ri, regs, link->file);
|
2013-05-09 05:44:49 +00:00
|
|
|
}
|
2014-04-17 08:18:28 +00:00
|
|
|
NOKPROBE_SYMBOL(kretprobe_trace_func);
|
2013-05-09 05:44:49 +00:00
|
|
|
|
2009-08-13 20:35:11 +00:00
|
|
|
/* Event entry printers */
|
2013-05-13 11:58:39 +00:00
|
|
|
static enum print_line_t
|
2010-04-22 22:46:14 +00:00
|
|
|
print_kprobe_event(struct trace_iterator *iter, int flags,
|
|
|
|
struct trace_event *event)
|
2009-08-13 20:35:11 +00:00
|
|
|
{
|
tracing/kprobes: Support basic types on dynamic events
Support basic types of integer (u8, u16, u32, u64, s8, s16, s32, s64) in
kprobe tracer. With this patch, users can specify above basic types on
each arguments after ':'. If omitted, the argument type is set as
unsigned long (u32 or u64, arch-dependent).
e.g.
echo 'p account_system_time+0 hardirq_offset=%si:s32' > kprobe_events
adds a probe recording hardirq_offset in signed-32bits value on the
entry of account_system_time.
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
LKML-Reference: <20100412171708.3790.18599.stgit@localhost6.localdomain6>
Signed-off-by: Masami Hiramatsu <mhiramat@redhat.com>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2010-04-12 17:17:08 +00:00
|
|
|
struct kprobe_trace_entry_head *field;
|
2009-08-13 20:35:11 +00:00
|
|
|
struct trace_seq *s = &iter->seq;
|
2009-09-10 23:53:38 +00:00
|
|
|
struct trace_probe *tp;
|
2009-08-13 20:35:11 +00:00
|
|
|
|
tracing/kprobes: Support basic types on dynamic events
Support basic types of integer (u8, u16, u32, u64, s8, s16, s32, s64) in
kprobe tracer. With this patch, users can specify above basic types on
each arguments after ':'. If omitted, the argument type is set as
unsigned long (u32 or u64, arch-dependent).
e.g.
echo 'p account_system_time+0 hardirq_offset=%si:s32' > kprobe_events
adds a probe recording hardirq_offset in signed-32bits value on the
entry of account_system_time.
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
LKML-Reference: <20100412171708.3790.18599.stgit@localhost6.localdomain6>
Signed-off-by: Masami Hiramatsu <mhiramat@redhat.com>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2010-04-12 17:17:08 +00:00
|
|
|
field = (struct kprobe_trace_entry_head *)iter->ent;
|
2019-06-19 15:07:20 +00:00
|
|
|
tp = trace_probe_primary_from_call(
|
|
|
|
container_of(event, struct trace_event_call, event));
|
|
|
|
if (WARN_ON_ONCE(!tp))
|
|
|
|
goto out;
|
2009-08-13 20:35:11 +00:00
|
|
|
|
2019-05-31 15:17:47 +00:00
|
|
|
trace_seq_printf(s, "%s: (", trace_probe_name(tp));
|
2009-09-10 23:53:45 +00:00
|
|
|
|
2009-08-13 20:35:11 +00:00
|
|
|
if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET))
|
2014-11-12 20:18:16 +00:00
|
|
|
goto out;
|
2009-08-13 20:35:11 +00:00
|
|
|
|
2014-11-12 20:18:16 +00:00
|
|
|
trace_seq_putc(s, ')');
|
2009-08-13 20:35:11 +00:00
|
|
|
|
2018-04-25 12:16:36 +00:00
|
|
|
if (print_probe_args(s, tp->args, tp->nr_args,
|
|
|
|
(u8 *)&field[1], field) < 0)
|
|
|
|
goto out;
|
2009-08-13 20:35:11 +00:00
|
|
|
|
2014-11-12 20:18:16 +00:00
|
|
|
trace_seq_putc(s, '\n');
|
|
|
|
out:
|
|
|
|
return trace_handle_return(s);
|
2009-08-13 20:35:11 +00:00
|
|
|
}
|
|
|
|
|
2013-05-13 11:58:39 +00:00
|
|
|
static enum print_line_t
|
2010-04-22 22:46:14 +00:00
|
|
|
print_kretprobe_event(struct trace_iterator *iter, int flags,
|
|
|
|
struct trace_event *event)
|
2009-08-13 20:35:11 +00:00
|
|
|
{
|
tracing/kprobes: Support basic types on dynamic events
Support basic types of integer (u8, u16, u32, u64, s8, s16, s32, s64) in
kprobe tracer. With this patch, users can specify above basic types on
each arguments after ':'. If omitted, the argument type is set as
unsigned long (u32 or u64, arch-dependent).
e.g.
echo 'p account_system_time+0 hardirq_offset=%si:s32' > kprobe_events
adds a probe recording hardirq_offset in signed-32bits value on the
entry of account_system_time.
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
LKML-Reference: <20100412171708.3790.18599.stgit@localhost6.localdomain6>
Signed-off-by: Masami Hiramatsu <mhiramat@redhat.com>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2010-04-12 17:17:08 +00:00
|
|
|
struct kretprobe_trace_entry_head *field;
|
2009-08-13 20:35:11 +00:00
|
|
|
struct trace_seq *s = &iter->seq;
|
2009-09-10 23:53:38 +00:00
|
|
|
struct trace_probe *tp;
|
2009-08-13 20:35:11 +00:00
|
|
|
|
tracing/kprobes: Support basic types on dynamic events
Support basic types of integer (u8, u16, u32, u64, s8, s16, s32, s64) in
kprobe tracer. With this patch, users can specify above basic types on
each arguments after ':'. If omitted, the argument type is set as
unsigned long (u32 or u64, arch-dependent).
e.g.
echo 'p account_system_time+0 hardirq_offset=%si:s32' > kprobe_events
adds a probe recording hardirq_offset in signed-32bits value on the
entry of account_system_time.
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
LKML-Reference: <20100412171708.3790.18599.stgit@localhost6.localdomain6>
Signed-off-by: Masami Hiramatsu <mhiramat@redhat.com>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2010-04-12 17:17:08 +00:00
|
|
|
field = (struct kretprobe_trace_entry_head *)iter->ent;
|
2019-06-19 15:07:20 +00:00
|
|
|
tp = trace_probe_primary_from_call(
|
|
|
|
container_of(event, struct trace_event_call, event));
|
|
|
|
if (WARN_ON_ONCE(!tp))
|
|
|
|
goto out;
|
2009-08-13 20:35:11 +00:00
|
|
|
|
2019-05-31 15:17:47 +00:00
|
|
|
trace_seq_printf(s, "%s: (", trace_probe_name(tp));
|
2009-09-10 23:53:45 +00:00
|
|
|
|
2009-08-13 20:35:11 +00:00
|
|
|
if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET))
|
2014-11-12 20:18:16 +00:00
|
|
|
goto out;
|
2009-08-13 20:35:11 +00:00
|
|
|
|
2014-11-12 20:18:16 +00:00
|
|
|
trace_seq_puts(s, " <- ");
|
2009-08-13 20:35:11 +00:00
|
|
|
|
|
|
|
if (!seq_print_ip_sym(s, field->func, flags & ~TRACE_ITER_SYM_OFFSET))
|
2014-11-12 20:18:16 +00:00
|
|
|
goto out;
|
2009-08-13 20:35:11 +00:00
|
|
|
|
2014-11-12 20:18:16 +00:00
|
|
|
trace_seq_putc(s, ')');
|
2009-08-13 20:35:11 +00:00
|
|
|
|
2018-04-25 12:16:36 +00:00
|
|
|
if (print_probe_args(s, tp->args, tp->nr_args,
|
|
|
|
(u8 *)&field[1], field) < 0)
|
|
|
|
goto out;
|
2009-08-13 20:35:11 +00:00
|
|
|
|
2014-11-12 20:18:16 +00:00
|
|
|
trace_seq_putc(s, '\n');
|
2009-08-13 20:35:11 +00:00
|
|
|
|
2014-11-12 20:18:16 +00:00
|
|
|
out:
|
|
|
|
return trace_handle_return(s);
|
2009-08-13 20:35:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2015-05-05 15:45:27 +00:00
|
|
|
static int kprobe_event_define_fields(struct trace_event_call *event_call)
|
2009-08-13 20:35:11 +00:00
|
|
|
{
|
2018-04-25 12:17:05 +00:00
|
|
|
int ret;
|
tracing/kprobes: Support basic types on dynamic events
Support basic types of integer (u8, u16, u32, u64, s8, s16, s32, s64) in
kprobe tracer. With this patch, users can specify above basic types on
each arguments after ':'. If omitted, the argument type is set as
unsigned long (u32 or u64, arch-dependent).
e.g.
echo 'p account_system_time+0 hardirq_offset=%si:s32' > kprobe_events
adds a probe recording hardirq_offset in signed-32bits value on the
entry of account_system_time.
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
LKML-Reference: <20100412171708.3790.18599.stgit@localhost6.localdomain6>
Signed-off-by: Masami Hiramatsu <mhiramat@redhat.com>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2010-04-12 17:17:08 +00:00
|
|
|
struct kprobe_trace_entry_head field;
|
2019-06-19 15:07:20 +00:00
|
|
|
struct trace_probe *tp;
|
|
|
|
|
|
|
|
tp = trace_probe_primary_from_call(event_call);
|
|
|
|
if (WARN_ON_ONCE(!tp))
|
|
|
|
return -ENOENT;
|
2009-08-13 20:35:11 +00:00
|
|
|
|
2009-10-07 22:28:07 +00:00
|
|
|
DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0);
|
2013-07-03 04:50:51 +00:00
|
|
|
|
2019-06-19 15:07:20 +00:00
|
|
|
return traceprobe_define_arg_fields(event_call, sizeof(field), tp);
|
2009-08-13 20:35:11 +00:00
|
|
|
}
|
|
|
|
|
2015-05-05 15:45:27 +00:00
|
|
|
static int kretprobe_event_define_fields(struct trace_event_call *event_call)
|
2009-08-13 20:35:11 +00:00
|
|
|
{
|
2018-04-25 12:17:05 +00:00
|
|
|
int ret;
|
tracing/kprobes: Support basic types on dynamic events
Support basic types of integer (u8, u16, u32, u64, s8, s16, s32, s64) in
kprobe tracer. With this patch, users can specify above basic types on
each arguments after ':'. If omitted, the argument type is set as
unsigned long (u32 or u64, arch-dependent).
e.g.
echo 'p account_system_time+0 hardirq_offset=%si:s32' > kprobe_events
adds a probe recording hardirq_offset in signed-32bits value on the
entry of account_system_time.
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
LKML-Reference: <20100412171708.3790.18599.stgit@localhost6.localdomain6>
Signed-off-by: Masami Hiramatsu <mhiramat@redhat.com>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2010-04-12 17:17:08 +00:00
|
|
|
struct kretprobe_trace_entry_head field;
|
2019-06-19 15:07:20 +00:00
|
|
|
struct trace_probe *tp;
|
|
|
|
|
|
|
|
tp = trace_probe_primary_from_call(event_call);
|
|
|
|
if (WARN_ON_ONCE(!tp))
|
|
|
|
return -ENOENT;
|
2009-08-13 20:35:11 +00:00
|
|
|
|
2009-10-07 22:28:07 +00:00
|
|
|
DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0);
|
|
|
|
DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0);
|
2013-07-03 04:50:51 +00:00
|
|
|
|
2019-06-19 15:07:20 +00:00
|
|
|
return traceprobe_define_arg_fields(event_call, sizeof(field), tp);
|
2009-08-13 20:35:11 +00:00
|
|
|
}
|
|
|
|
|
2009-12-21 06:27:35 +00:00
|
|
|
#ifdef CONFIG_PERF_EVENTS
|
2009-09-10 23:53:30 +00:00
|
|
|
|
|
|
|
/* Kprobe profile handler */
|
2017-12-11 16:36:48 +00:00
|
|
|
static int
|
2013-07-03 04:50:51 +00:00
|
|
|
kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs)
|
2009-09-10 23:53:30 +00:00
|
|
|
{
|
2019-05-31 15:17:57 +00:00
|
|
|
struct trace_event_call *call = trace_probe_event_call(&tk->tp);
|
tracing/kprobes: Support basic types on dynamic events
Support basic types of integer (u8, u16, u32, u64, s8, s16, s32, s64) in
kprobe tracer. With this patch, users can specify above basic types on
each arguments after ':'. If omitted, the argument type is set as
unsigned long (u32 or u64, arch-dependent).
e.g.
echo 'p account_system_time+0 hardirq_offset=%si:s32' > kprobe_events
adds a probe recording hardirq_offset in signed-32bits value on the
entry of account_system_time.
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
LKML-Reference: <20100412171708.3790.18599.stgit@localhost6.localdomain6>
Signed-off-by: Masami Hiramatsu <mhiramat@redhat.com>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2010-04-12 17:17:08 +00:00
|
|
|
struct kprobe_trace_entry_head *entry;
|
2010-05-19 12:02:22 +00:00
|
|
|
struct hlist_head *head;
|
2010-07-05 18:54:45 +00:00
|
|
|
int size, __size, dsize;
|
2009-11-23 10:37:29 +00:00
|
|
|
int rctx;
|
2009-09-10 23:53:30 +00:00
|
|
|
|
2017-12-11 16:36:48 +00:00
|
|
|
if (bpf_prog_array_valid(call)) {
|
2018-01-12 17:54:33 +00:00
|
|
|
unsigned long orig_ip = instruction_pointer(regs);
|
2017-12-11 16:36:48 +00:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = trace_call_bpf(call, regs);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We need to check and see if we modified the pc of the
|
2018-06-19 16:15:45 +00:00
|
|
|
* pt_regs, and if so return 1 so that we don't do the
|
|
|
|
* single stepping.
|
2017-12-11 16:36:48 +00:00
|
|
|
*/
|
2018-06-19 16:15:45 +00:00
|
|
|
if (orig_ip != instruction_pointer(regs))
|
2017-12-11 16:36:48 +00:00
|
|
|
return 1;
|
|
|
|
if (!ret)
|
|
|
|
return 0;
|
|
|
|
}
|
tracing, perf: Implement BPF programs attached to kprobes
BPF programs, attached to kprobes, provide a safe way to execute
user-defined BPF byte-code programs without being able to crash or
hang the kernel in any way. The BPF engine makes sure that such
programs have a finite execution time and that they cannot break
out of their sandbox.
The user interface is to attach to a kprobe via the perf syscall:
struct perf_event_attr attr = {
.type = PERF_TYPE_TRACEPOINT,
.config = event_id,
...
};
event_fd = perf_event_open(&attr,...);
ioctl(event_fd, PERF_EVENT_IOC_SET_BPF, prog_fd);
'prog_fd' is a file descriptor associated with BPF program
previously loaded.
'event_id' is an ID of the kprobe created.
Closing 'event_fd':
close(event_fd);
... automatically detaches BPF program from it.
BPF programs can call in-kernel helper functions to:
- lookup/update/delete elements in maps
- probe_read - wraper of probe_kernel_read() used to access any
kernel data structures
BPF programs receive 'struct pt_regs *' as an input ('struct pt_regs' is
architecture dependent) and return 0 to ignore the event and 1 to store
kprobe event into the ring buffer.
Note, kprobes are a fundamentally _not_ a stable kernel ABI,
so BPF programs attached to kprobes must be recompiled for
every kernel version and user must supply correct LINUX_VERSION_CODE
in attr.kern_version during bpf_prog_load() call.
Signed-off-by: Alexei Starovoitov <ast@plumgrid.com>
Reviewed-by: Steven Rostedt <rostedt@goodmis.org>
Reviewed-by: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Arnaldo Carvalho de Melo <acme@infradead.org>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Daniel Borkmann <daniel@iogearbox.net>
Cc: David S. Miller <davem@davemloft.net>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1427312966-8434-4-git-send-email-ast@plumgrid.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2015-03-25 19:49:20 +00:00
|
|
|
|
2013-06-20 17:38:06 +00:00
|
|
|
head = this_cpu_ptr(call->perf_events);
|
|
|
|
if (hlist_empty(head))
|
2017-12-11 16:36:48 +00:00
|
|
|
return 0;
|
2013-06-20 17:38:06 +00:00
|
|
|
|
2013-07-03 04:50:51 +00:00
|
|
|
dsize = __get_data_size(&tk->tp, regs);
|
|
|
|
__size = sizeof(*entry) + tk->tp.size + dsize;
|
2009-09-14 20:49:28 +00:00
|
|
|
size = ALIGN(__size + sizeof(u32), sizeof(u64));
|
|
|
|
size -= sizeof(u32);
|
2009-11-22 04:26:55 +00:00
|
|
|
|
2016-04-07 01:43:24 +00:00
|
|
|
entry = perf_trace_buf_alloc(size, NULL, &rctx);
|
2010-01-28 01:32:29 +00:00
|
|
|
if (!entry)
|
2017-12-11 16:36:48 +00:00
|
|
|
return 0;
|
2009-09-25 18:20:12 +00:00
|
|
|
|
2013-07-03 04:50:51 +00:00
|
|
|
entry->ip = (unsigned long)tk->rp.kp.addr;
|
2010-07-05 18:54:45 +00:00
|
|
|
memset(&entry[1], 0, dsize);
|
2018-04-25 12:19:01 +00:00
|
|
|
store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
|
2016-04-07 01:43:24 +00:00
|
|
|
perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
|
2017-10-11 07:45:29 +00:00
|
|
|
head, NULL);
|
2017-12-11 16:36:48 +00:00
|
|
|
return 0;
|
2009-09-10 23:53:30 +00:00
|
|
|
}
|
2014-04-17 08:18:28 +00:00
|
|
|
NOKPROBE_SYMBOL(kprobe_perf_func);
|
2009-09-10 23:53:30 +00:00
|
|
|
|
|
|
|
/* Kretprobe profile handler */
|
2014-04-17 08:18:28 +00:00
|
|
|
static void
|
2013-07-03 04:50:51 +00:00
|
|
|
kretprobe_perf_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
|
2013-05-09 05:44:41 +00:00
|
|
|
struct pt_regs *regs)
|
2009-09-10 23:53:30 +00:00
|
|
|
{
|
2019-05-31 15:17:57 +00:00
|
|
|
struct trace_event_call *call = trace_probe_event_call(&tk->tp);
|
tracing/kprobes: Support basic types on dynamic events
Support basic types of integer (u8, u16, u32, u64, s8, s16, s32, s64) in
kprobe tracer. With this patch, users can specify above basic types on
each arguments after ':'. If omitted, the argument type is set as
unsigned long (u32 or u64, arch-dependent).
e.g.
echo 'p account_system_time+0 hardirq_offset=%si:s32' > kprobe_events
adds a probe recording hardirq_offset in signed-32bits value on the
entry of account_system_time.
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
LKML-Reference: <20100412171708.3790.18599.stgit@localhost6.localdomain6>
Signed-off-by: Masami Hiramatsu <mhiramat@redhat.com>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2010-04-12 17:17:08 +00:00
|
|
|
struct kretprobe_trace_entry_head *entry;
|
2010-05-19 12:02:22 +00:00
|
|
|
struct hlist_head *head;
|
2010-07-05 18:54:45 +00:00
|
|
|
int size, __size, dsize;
|
2009-11-23 10:37:29 +00:00
|
|
|
int rctx;
|
2009-09-10 23:53:30 +00:00
|
|
|
|
2017-10-24 06:53:08 +00:00
|
|
|
if (bpf_prog_array_valid(call) && !trace_call_bpf(call, regs))
|
tracing, perf: Implement BPF programs attached to kprobes
BPF programs, attached to kprobes, provide a safe way to execute
user-defined BPF byte-code programs without being able to crash or
hang the kernel in any way. The BPF engine makes sure that such
programs have a finite execution time and that they cannot break
out of their sandbox.
The user interface is to attach to a kprobe via the perf syscall:
struct perf_event_attr attr = {
.type = PERF_TYPE_TRACEPOINT,
.config = event_id,
...
};
event_fd = perf_event_open(&attr,...);
ioctl(event_fd, PERF_EVENT_IOC_SET_BPF, prog_fd);
'prog_fd' is a file descriptor associated with BPF program
previously loaded.
'event_id' is an ID of the kprobe created.
Closing 'event_fd':
close(event_fd);
... automatically detaches BPF program from it.
BPF programs can call in-kernel helper functions to:
- lookup/update/delete elements in maps
- probe_read - wraper of probe_kernel_read() used to access any
kernel data structures
BPF programs receive 'struct pt_regs *' as an input ('struct pt_regs' is
architecture dependent) and return 0 to ignore the event and 1 to store
kprobe event into the ring buffer.
Note, kprobes are a fundamentally _not_ a stable kernel ABI,
so BPF programs attached to kprobes must be recompiled for
every kernel version and user must supply correct LINUX_VERSION_CODE
in attr.kern_version during bpf_prog_load() call.
Signed-off-by: Alexei Starovoitov <ast@plumgrid.com>
Reviewed-by: Steven Rostedt <rostedt@goodmis.org>
Reviewed-by: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Arnaldo Carvalho de Melo <acme@infradead.org>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Daniel Borkmann <daniel@iogearbox.net>
Cc: David S. Miller <davem@davemloft.net>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1427312966-8434-4-git-send-email-ast@plumgrid.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2015-03-25 19:49:20 +00:00
|
|
|
return;
|
|
|
|
|
2013-06-20 17:38:06 +00:00
|
|
|
head = this_cpu_ptr(call->perf_events);
|
|
|
|
if (hlist_empty(head))
|
|
|
|
return;
|
|
|
|
|
2013-07-03 04:50:51 +00:00
|
|
|
dsize = __get_data_size(&tk->tp, regs);
|
|
|
|
__size = sizeof(*entry) + tk->tp.size + dsize;
|
2009-09-14 20:49:28 +00:00
|
|
|
size = ALIGN(__size + sizeof(u32), sizeof(u64));
|
|
|
|
size -= sizeof(u32);
|
tracing, perf_events: Protect the buffer from recursion in perf
While tracing using events with perf, if one enables the
lockdep:lock_acquire event, it will infect every other perf
trace events.
Basically, you can enable whatever set of trace events through
perf but if this event is part of the set, the only result we
can get is a long list of lock_acquire events of rcu read lock,
and only that.
This is because of a recursion inside perf.
1) When a trace event is triggered, it will fill a per cpu
buffer and submit it to perf.
2) Perf will commit this event but will also protect some data
using rcu_read_lock
3) A recursion appears: rcu_read_lock triggers a lock_acquire
event that will fill the per cpu event and then submit the
buffer to perf.
4) Perf detects a recursion and ignores it
5) Perf continues its work on the previous event, but its buffer
has been overwritten by the lock_acquire event, it has then
been turned into a lock_acquire event of rcu read lock
Such scenario also happens with lock_release with
rcu_read_unlock().
We could turn the rcu_read_lock() into __rcu_read_lock() to drop
the lock debugging from perf fast path, but that would make us
lose the rcu debugging and that doesn't prevent from other
possible kind of recursion from perf in the future.
This patch adds a recursion protection based on a counter on the
perf trace per cpu buffers to solve the problem.
-v2: Fixed lost whitespace, added reviewed-by tag
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Reviewed-by: Masami Hiramatsu <mhiramat@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Li Zefan <lizf@cn.fujitsu.com>
Cc: Jason Baron <jbaron@redhat.com>
LKML-Reference: <1257477185-7838-1-git-send-email-fweisbec@gmail.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-11-06 03:13:05 +00:00
|
|
|
|
2016-04-07 01:43:24 +00:00
|
|
|
entry = perf_trace_buf_alloc(size, NULL, &rctx);
|
2010-01-28 01:32:29 +00:00
|
|
|
if (!entry)
|
2010-01-28 01:34:27 +00:00
|
|
|
return;
|
2009-09-10 23:53:30 +00:00
|
|
|
|
2013-07-03 04:50:51 +00:00
|
|
|
entry->func = (unsigned long)tk->rp.kp.addr;
|
2009-09-25 18:20:12 +00:00
|
|
|
entry->ret_ip = (unsigned long)ri->ret_addr;
|
2018-04-25 12:19:01 +00:00
|
|
|
store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
|
2016-04-07 01:43:24 +00:00
|
|
|
perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
|
2017-10-11 07:45:29 +00:00
|
|
|
head, NULL);
|
2009-09-10 23:53:30 +00:00
|
|
|
}
|
2014-04-17 08:18:28 +00:00
|
|
|
NOKPROBE_SYMBOL(kretprobe_perf_func);
|
bpf: introduce bpf subcommand BPF_TASK_FD_QUERY
Currently, suppose a userspace application has loaded a bpf program
and attached it to a tracepoint/kprobe/uprobe, and a bpf
introspection tool, e.g., bpftool, wants to show which bpf program
is attached to which tracepoint/kprobe/uprobe. Such attachment
information will be really useful to understand the overall bpf
deployment in the system.
There is a name field (16 bytes) for each program, which could
be used to encode the attachment point. There are some drawbacks
for this approaches. First, bpftool user (e.g., an admin) may not
really understand the association between the name and the
attachment point. Second, if one program is attached to multiple
places, encoding a proper name which can imply all these
attachments becomes difficult.
This patch introduces a new bpf subcommand BPF_TASK_FD_QUERY.
Given a pid and fd, if the <pid, fd> is associated with a
tracepoint/kprobe/uprobe perf event, BPF_TASK_FD_QUERY will return
. prog_id
. tracepoint name, or
. k[ret]probe funcname + offset or kernel addr, or
. u[ret]probe filename + offset
to the userspace.
The user can use "bpftool prog" to find more information about
bpf program itself with prog_id.
Acked-by: Martin KaFai Lau <kafai@fb.com>
Signed-off-by: Yonghong Song <yhs@fb.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2018-05-24 18:21:09 +00:00
|
|
|
|
|
|
|
int bpf_get_kprobe_info(const struct perf_event *event, u32 *fd_type,
|
|
|
|
const char **symbol, u64 *probe_offset,
|
|
|
|
u64 *probe_addr, bool perf_type_tracepoint)
|
|
|
|
{
|
|
|
|
const char *pevent = trace_event_name(event->tp_event);
|
|
|
|
const char *group = event->tp_event->class->system;
|
|
|
|
struct trace_kprobe *tk;
|
|
|
|
|
|
|
|
if (perf_type_tracepoint)
|
|
|
|
tk = find_trace_kprobe(pevent, group);
|
|
|
|
else
|
2020-06-08 12:45:32 +00:00
|
|
|
tk = trace_kprobe_primary_from_call(event->tp_event);
|
bpf: introduce bpf subcommand BPF_TASK_FD_QUERY
Currently, suppose a userspace application has loaded a bpf program
and attached it to a tracepoint/kprobe/uprobe, and a bpf
introspection tool, e.g., bpftool, wants to show which bpf program
is attached to which tracepoint/kprobe/uprobe. Such attachment
information will be really useful to understand the overall bpf
deployment in the system.
There is a name field (16 bytes) for each program, which could
be used to encode the attachment point. There are some drawbacks
for this approaches. First, bpftool user (e.g., an admin) may not
really understand the association between the name and the
attachment point. Second, if one program is attached to multiple
places, encoding a proper name which can imply all these
attachments becomes difficult.
This patch introduces a new bpf subcommand BPF_TASK_FD_QUERY.
Given a pid and fd, if the <pid, fd> is associated with a
tracepoint/kprobe/uprobe perf event, BPF_TASK_FD_QUERY will return
. prog_id
. tracepoint name, or
. k[ret]probe funcname + offset or kernel addr, or
. u[ret]probe filename + offset
to the userspace.
The user can use "bpftool prog" to find more information about
bpf program itself with prog_id.
Acked-by: Martin KaFai Lau <kafai@fb.com>
Signed-off-by: Yonghong Song <yhs@fb.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2018-05-24 18:21:09 +00:00
|
|
|
if (!tk)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
*fd_type = trace_kprobe_is_return(tk) ? BPF_FD_TYPE_KRETPROBE
|
|
|
|
: BPF_FD_TYPE_KPROBE;
|
|
|
|
if (tk->symbol) {
|
|
|
|
*symbol = tk->symbol;
|
|
|
|
*probe_offset = tk->rp.kp.offset;
|
|
|
|
*probe_addr = 0;
|
|
|
|
} else {
|
|
|
|
*symbol = NULL;
|
|
|
|
*probe_offset = 0;
|
|
|
|
*probe_addr = (unsigned long)tk->rp.kp.addr;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
2009-12-21 06:27:35 +00:00
|
|
|
#endif /* CONFIG_PERF_EVENTS */
|
2009-09-14 20:49:20 +00:00
|
|
|
|
2013-06-20 17:38:09 +00:00
|
|
|
/*
|
|
|
|
* called by perf_trace_init() or __ftrace_set_clr_event() under event_mutex.
|
|
|
|
*
|
|
|
|
* kprobe_trace_self_tests_init() does enable_trace_probe/disable_trace_probe
|
|
|
|
* lockless, but we can't race with this __init function.
|
|
|
|
*/
|
2015-05-05 15:45:27 +00:00
|
|
|
static int kprobe_register(struct trace_event_call *event,
|
2014-04-17 08:18:00 +00:00
|
|
|
enum trace_reg type, void *data)
|
2010-04-21 16:27:06 +00:00
|
|
|
{
|
2015-05-05 14:09:53 +00:00
|
|
|
struct trace_event_file *file = data;
|
2011-06-27 07:26:44 +00:00
|
|
|
|
2010-04-21 16:27:06 +00:00
|
|
|
switch (type) {
|
|
|
|
case TRACE_REG_REGISTER:
|
2019-06-19 15:07:20 +00:00
|
|
|
return enable_trace_kprobe(event, file);
|
2010-04-21 16:27:06 +00:00
|
|
|
case TRACE_REG_UNREGISTER:
|
2019-06-19 15:07:20 +00:00
|
|
|
return disable_trace_kprobe(event, file);
|
2010-04-21 16:27:06 +00:00
|
|
|
|
|
|
|
#ifdef CONFIG_PERF_EVENTS
|
|
|
|
case TRACE_REG_PERF_REGISTER:
|
2019-06-19 15:07:20 +00:00
|
|
|
return enable_trace_kprobe(event, NULL);
|
2010-04-21 16:27:06 +00:00
|
|
|
case TRACE_REG_PERF_UNREGISTER:
|
2019-06-19 15:07:20 +00:00
|
|
|
return disable_trace_kprobe(event, NULL);
|
2012-02-15 14:51:49 +00:00
|
|
|
case TRACE_REG_PERF_OPEN:
|
|
|
|
case TRACE_REG_PERF_CLOSE:
|
2012-02-15 14:51:50 +00:00
|
|
|
case TRACE_REG_PERF_ADD:
|
|
|
|
case TRACE_REG_PERF_DEL:
|
2012-02-15 14:51:49 +00:00
|
|
|
return 0;
|
2010-04-21 16:27:06 +00:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
2009-09-14 20:49:20 +00:00
|
|
|
|
2014-04-17 08:18:28 +00:00
|
|
|
static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
|
2009-09-14 20:49:20 +00:00
|
|
|
{
|
2013-07-03 04:50:51 +00:00
|
|
|
struct trace_kprobe *tk = container_of(kp, struct trace_kprobe, rp.kp);
|
2017-12-11 16:36:48 +00:00
|
|
|
int ret = 0;
|
2009-09-10 23:53:30 +00:00
|
|
|
|
2016-02-03 20:28:28 +00:00
|
|
|
raw_cpu_inc(*tk->nhit);
|
2013-05-09 05:44:36 +00:00
|
|
|
|
2019-05-31 15:17:37 +00:00
|
|
|
if (trace_probe_test_flag(&tk->tp, TP_FLAG_TRACE))
|
2013-07-03 04:50:51 +00:00
|
|
|
kprobe_trace_func(tk, regs);
|
2009-12-21 06:27:35 +00:00
|
|
|
#ifdef CONFIG_PERF_EVENTS
|
2019-05-31 15:17:37 +00:00
|
|
|
if (trace_probe_test_flag(&tk->tp, TP_FLAG_PROFILE))
|
2017-12-11 16:36:48 +00:00
|
|
|
ret = kprobe_perf_func(tk, regs);
|
2009-12-21 06:27:35 +00:00
|
|
|
#endif
|
2017-12-11 16:36:48 +00:00
|
|
|
return ret;
|
2009-09-14 20:49:20 +00:00
|
|
|
}
|
2014-04-17 08:18:28 +00:00
|
|
|
NOKPROBE_SYMBOL(kprobe_dispatcher);
|
2009-09-14 20:49:20 +00:00
|
|
|
|
2014-04-17 08:18:28 +00:00
|
|
|
static int
|
|
|
|
kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs)
|
2009-09-14 20:49:20 +00:00
|
|
|
{
|
2020-08-29 13:03:24 +00:00
|
|
|
struct kretprobe *rp = get_kretprobe(ri);
|
|
|
|
struct trace_kprobe *tk = container_of(rp, struct trace_kprobe, rp);
|
2009-09-14 20:49:20 +00:00
|
|
|
|
2016-02-03 20:28:28 +00:00
|
|
|
raw_cpu_inc(*tk->nhit);
|
2013-05-09 05:44:36 +00:00
|
|
|
|
2019-05-31 15:17:37 +00:00
|
|
|
if (trace_probe_test_flag(&tk->tp, TP_FLAG_TRACE))
|
2013-07-03 04:50:51 +00:00
|
|
|
kretprobe_trace_func(tk, ri, regs);
|
2009-12-21 06:27:35 +00:00
|
|
|
#ifdef CONFIG_PERF_EVENTS
|
2019-05-31 15:17:37 +00:00
|
|
|
if (trace_probe_test_flag(&tk->tp, TP_FLAG_PROFILE))
|
2013-07-03 04:50:51 +00:00
|
|
|
kretprobe_perf_func(tk, ri, regs);
|
2009-12-21 06:27:35 +00:00
|
|
|
#endif
|
2009-09-14 20:49:20 +00:00
|
|
|
return 0; /* We don't tweek kernel, so just return 0 */
|
|
|
|
}
|
2014-04-17 08:18:28 +00:00
|
|
|
NOKPROBE_SYMBOL(kretprobe_dispatcher);
|
2009-09-10 23:53:30 +00:00
|
|
|
|
2010-04-22 22:46:14 +00:00
|
|
|
static struct trace_event_functions kretprobe_funcs = {
|
|
|
|
.trace = print_kretprobe_event
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct trace_event_functions kprobe_funcs = {
|
|
|
|
.trace = print_kprobe_event
|
|
|
|
};
|
|
|
|
|
2019-10-24 20:26:59 +00:00
|
|
|
static struct trace_event_fields kretprobe_fields_array[] = {
|
|
|
|
{ .type = TRACE_FUNCTION_TYPE,
|
|
|
|
.define_fields = kretprobe_event_define_fields },
|
|
|
|
{}
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct trace_event_fields kprobe_fields_array[] = {
|
|
|
|
{ .type = TRACE_FUNCTION_TYPE,
|
|
|
|
.define_fields = kprobe_event_define_fields },
|
|
|
|
{}
|
|
|
|
};
|
|
|
|
|
2019-05-31 15:17:57 +00:00
|
|
|
static inline void init_trace_event_call(struct trace_kprobe *tk)
|
2009-08-13 20:35:11 +00:00
|
|
|
{
|
2019-05-31 15:17:57 +00:00
|
|
|
struct trace_event_call *call = trace_probe_event_call(&tk->tp);
|
|
|
|
|
2013-07-03 04:50:51 +00:00
|
|
|
if (trace_kprobe_is_return(tk)) {
|
2010-04-23 14:00:22 +00:00
|
|
|
call->event.funcs = &kretprobe_funcs;
|
2019-10-24 20:26:59 +00:00
|
|
|
call->class->fields_array = kretprobe_fields_array;
|
2009-08-13 20:35:11 +00:00
|
|
|
} else {
|
2010-04-23 14:00:22 +00:00
|
|
|
call->event.funcs = &kprobe_funcs;
|
2019-10-24 20:26:59 +00:00
|
|
|
call->class->fields_array = kprobe_fields_array;
|
2009-08-13 20:35:11 +00:00
|
|
|
}
|
2017-12-06 22:45:15 +00:00
|
|
|
|
|
|
|
call->flags = TRACE_EVENT_FL_KPROBE;
|
|
|
|
call->class->reg = kprobe_register;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int register_kprobe_event(struct trace_kprobe *tk)
|
|
|
|
{
|
2019-05-31 15:17:57 +00:00
|
|
|
init_trace_event_call(tk);
|
2019-05-31 15:16:46 +00:00
|
|
|
|
2019-05-31 15:17:16 +00:00
|
|
|
return trace_probe_register_event_call(&tk->tp);
|
2009-08-13 20:35:11 +00:00
|
|
|
}
|
|
|
|
|
2013-07-03 04:50:51 +00:00
|
|
|
static int unregister_kprobe_event(struct trace_kprobe *tk)
|
2009-08-13 20:35:11 +00:00
|
|
|
{
|
2019-05-31 15:17:16 +00:00
|
|
|
return trace_probe_unregister_event_call(&tk->tp);
|
2009-08-13 20:35:11 +00:00
|
|
|
}
|
|
|
|
|
2017-12-06 22:45:15 +00:00
|
|
|
#ifdef CONFIG_PERF_EVENTS
|
|
|
|
/* create a trace_kprobe, but don't add it to global lists */
|
|
|
|
struct trace_event_call *
|
|
|
|
create_local_trace_kprobe(char *func, void *addr, unsigned long offs,
|
|
|
|
bool is_return)
|
|
|
|
{
|
|
|
|
struct trace_kprobe *tk;
|
|
|
|
int ret;
|
|
|
|
char *event;
|
|
|
|
|
|
|
|
/*
|
2018-11-05 09:02:36 +00:00
|
|
|
* local trace_kprobes are not added to dyn_event, so they are never
|
2017-12-06 22:45:15 +00:00
|
|
|
* searched in find_trace_kprobe(). Therefore, there is no concern of
|
|
|
|
* duplicated name here.
|
|
|
|
*/
|
|
|
|
event = func ? func : "DUMMY_EVENT";
|
|
|
|
|
|
|
|
tk = alloc_trace_kprobe(KPROBE_EVENT_SYSTEM, event, (void *)addr, func,
|
|
|
|
offs, 0 /* maxactive */, 0 /* nargs */,
|
|
|
|
is_return);
|
|
|
|
|
|
|
|
if (IS_ERR(tk)) {
|
|
|
|
pr_info("Failed to allocate trace_probe.(%d)\n",
|
|
|
|
(int)PTR_ERR(tk));
|
|
|
|
return ERR_CAST(tk);
|
|
|
|
}
|
|
|
|
|
2019-05-31 15:17:57 +00:00
|
|
|
init_trace_event_call(tk);
|
2017-12-06 22:45:15 +00:00
|
|
|
|
2018-04-25 12:19:30 +00:00
|
|
|
if (traceprobe_set_print_fmt(&tk->tp, trace_kprobe_is_return(tk)) < 0) {
|
2017-12-06 22:45:15 +00:00
|
|
|
ret = -ENOMEM;
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = __register_trace_kprobe(tk);
|
2019-05-31 15:16:46 +00:00
|
|
|
if (ret < 0)
|
2017-12-06 22:45:15 +00:00
|
|
|
goto error;
|
|
|
|
|
2019-05-31 15:17:57 +00:00
|
|
|
return trace_probe_event_call(&tk->tp);
|
2017-12-06 22:45:15 +00:00
|
|
|
error:
|
|
|
|
free_trace_kprobe(tk);
|
|
|
|
return ERR_PTR(ret);
|
|
|
|
}
|
|
|
|
|
|
|
|
void destroy_local_trace_kprobe(struct trace_event_call *event_call)
|
|
|
|
{
|
|
|
|
struct trace_kprobe *tk;
|
|
|
|
|
2019-06-19 15:07:20 +00:00
|
|
|
tk = trace_kprobe_primary_from_call(event_call);
|
|
|
|
if (unlikely(!tk))
|
|
|
|
return;
|
2017-12-06 22:45:15 +00:00
|
|
|
|
|
|
|
if (trace_probe_is_enabled(&tk->tp)) {
|
|
|
|
WARN_ON(1);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
__unregister_trace_kprobe(tk);
|
2018-07-09 14:19:06 +00:00
|
|
|
|
2017-12-06 22:45:15 +00:00
|
|
|
free_trace_kprobe(tk);
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_PERF_EVENTS */
|
|
|
|
|
2019-05-22 08:32:35 +00:00
|
|
|
static __init void enable_boot_kprobe_events(void)
|
|
|
|
{
|
|
|
|
struct trace_array *tr = top_trace_array();
|
|
|
|
struct trace_event_file *file;
|
|
|
|
struct trace_kprobe *tk;
|
|
|
|
struct dyn_event *pos;
|
|
|
|
|
|
|
|
mutex_lock(&event_mutex);
|
|
|
|
for_each_trace_kprobe(tk, pos) {
|
|
|
|
list_for_each_entry(file, &tr->events, list)
|
2019-05-31 15:17:57 +00:00
|
|
|
if (file->event_call == trace_probe_event_call(&tk->tp))
|
2019-05-22 08:32:35 +00:00
|
|
|
trace_event_enable_disable(file, 1, 0);
|
|
|
|
}
|
|
|
|
mutex_unlock(&event_mutex);
|
|
|
|
}
|
|
|
|
|
|
|
|
static __init void setup_boot_kprobe_events(void)
|
|
|
|
{
|
|
|
|
char *p, *cmd = kprobe_boot_events_buf;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
strreplace(kprobe_boot_events_buf, ',', ' ');
|
|
|
|
|
|
|
|
while (cmd && *cmd != '\0') {
|
|
|
|
p = strchr(cmd, ';');
|
|
|
|
if (p)
|
|
|
|
*p++ = '\0';
|
|
|
|
|
2021-02-01 19:48:11 +00:00
|
|
|
ret = create_or_delete_trace_kprobe(cmd);
|
2019-05-22 08:32:35 +00:00
|
|
|
if (ret)
|
|
|
|
pr_warn("Failed to add event(%d): %s\n", ret, cmd);
|
|
|
|
|
|
|
|
cmd = p;
|
|
|
|
}
|
|
|
|
|
|
|
|
enable_boot_kprobe_events();
|
|
|
|
}
|
|
|
|
|
2020-01-10 16:05:42 +00:00
|
|
|
/*
|
2020-09-10 12:39:17 +00:00
|
|
|
* Register dynevent at core_initcall. This allows kernel to setup kprobe
|
|
|
|
* events in postcore_initcall without tracefs.
|
2020-01-10 16:05:42 +00:00
|
|
|
*/
|
|
|
|
static __init int init_kprobe_trace_early(void)
|
2009-08-13 20:35:11 +00:00
|
|
|
{
|
2018-11-05 09:02:36 +00:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = dyn_event_register(&trace_kprobe_ops);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
2009-08-13 20:35:11 +00:00
|
|
|
|
2013-07-03 04:50:51 +00:00
|
|
|
if (register_module_notifier(&trace_kprobe_module_nb))
|
2011-06-27 07:26:56 +00:00
|
|
|
return -EINVAL;
|
|
|
|
|
2020-01-10 16:05:42 +00:00
|
|
|
return 0;
|
|
|
|
}
|
2020-09-10 12:39:17 +00:00
|
|
|
core_initcall(init_kprobe_trace_early);
|
2020-01-10 16:05:42 +00:00
|
|
|
|
|
|
|
/* Make a tracefs interface for controlling probe points */
|
|
|
|
static __init int init_kprobe_trace(void)
|
|
|
|
{
|
2020-07-12 01:10:36 +00:00
|
|
|
int ret;
|
2020-01-10 16:05:42 +00:00
|
|
|
struct dentry *entry;
|
|
|
|
|
2020-07-12 01:10:36 +00:00
|
|
|
ret = tracing_init_dentry();
|
|
|
|
if (ret)
|
2009-08-13 20:35:11 +00:00
|
|
|
return 0;
|
|
|
|
|
2020-07-12 01:10:36 +00:00
|
|
|
entry = tracefs_create_file("kprobe_events", 0644, NULL,
|
2009-08-13 20:35:11 +00:00
|
|
|
NULL, &kprobe_events_ops);
|
|
|
|
|
2009-08-13 20:35:42 +00:00
|
|
|
/* Event list interface */
|
2009-08-13 20:35:11 +00:00
|
|
|
if (!entry)
|
2016-03-22 21:28:09 +00:00
|
|
|
pr_warn("Could not create tracefs 'kprobe_events' entry\n");
|
2009-08-13 20:35:42 +00:00
|
|
|
|
|
|
|
/* Profile interface */
|
2020-07-12 01:10:36 +00:00
|
|
|
entry = tracefs_create_file("kprobe_profile", 0444, NULL,
|
2009-08-13 20:35:42 +00:00
|
|
|
NULL, &kprobe_profile_ops);
|
|
|
|
|
|
|
|
if (!entry)
|
2016-03-22 21:28:09 +00:00
|
|
|
pr_warn("Could not create tracefs 'kprobe_profile' entry\n");
|
2019-05-22 08:32:35 +00:00
|
|
|
|
|
|
|
setup_boot_kprobe_events();
|
|
|
|
|
2009-08-13 20:35:11 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
fs_initcall(init_kprobe_trace);
|
|
|
|
|
|
|
|
|
|
|
|
#ifdef CONFIG_FTRACE_STARTUP_TEST
|
2017-02-01 16:57:56 +00:00
|
|
|
static __init struct trace_event_file *
|
2013-07-03 04:50:51 +00:00
|
|
|
find_trace_probe_file(struct trace_kprobe *tk, struct trace_array *tr)
|
2013-05-09 05:44:49 +00:00
|
|
|
{
|
2015-05-05 14:09:53 +00:00
|
|
|
struct trace_event_file *file;
|
2013-05-09 05:44:49 +00:00
|
|
|
|
|
|
|
list_for_each_entry(file, &tr->events, list)
|
2019-05-31 15:17:57 +00:00
|
|
|
if (file->event_call == trace_probe_event_call(&tk->tp))
|
2013-05-09 05:44:49 +00:00
|
|
|
return file;
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2013-06-20 17:38:09 +00:00
|
|
|
/*
|
2013-07-03 04:50:51 +00:00
|
|
|
* Nobody but us can call enable_trace_kprobe/disable_trace_kprobe at this
|
2013-06-20 17:38:09 +00:00
|
|
|
* stage, we can do this lockless.
|
|
|
|
*/
|
2009-08-13 20:35:11 +00:00
|
|
|
static __init int kprobe_trace_self_tests_init(void)
|
|
|
|
{
|
2010-01-14 05:12:12 +00:00
|
|
|
int ret, warn = 0;
|
2009-08-13 20:35:11 +00:00
|
|
|
int (*target)(int, int, int, int, int, int);
|
2013-07-03 04:50:51 +00:00
|
|
|
struct trace_kprobe *tk;
|
2015-05-05 14:09:53 +00:00
|
|
|
struct trace_event_file *file;
|
2009-08-13 20:35:11 +00:00
|
|
|
|
2014-06-05 22:35:20 +00:00
|
|
|
if (tracing_is_disabled())
|
|
|
|
return -ENODEV;
|
|
|
|
|
2020-12-08 08:54:09 +00:00
|
|
|
if (tracing_selftest_disabled)
|
2019-05-23 23:50:34 +00:00
|
|
|
return 0;
|
|
|
|
|
2009-08-13 20:35:11 +00:00
|
|
|
target = kprobe_trace_selftest_target;
|
|
|
|
|
|
|
|
pr_info("Testing kprobe tracing: ");
|
|
|
|
|
2021-02-01 19:48:11 +00:00
|
|
|
ret = create_or_delete_trace_kprobe("p:testprobe kprobe_trace_selftest_target $stack $stack0 +0($stack)");
|
2010-01-14 05:12:12 +00:00
|
|
|
if (WARN_ON_ONCE(ret)) {
|
2013-05-09 05:44:49 +00:00
|
|
|
pr_warn("error on probing function entry.\n");
|
2010-01-14 05:12:12 +00:00
|
|
|
warn++;
|
|
|
|
} else {
|
|
|
|
/* Enable trace point */
|
2013-07-03 04:50:51 +00:00
|
|
|
tk = find_trace_kprobe("testprobe", KPROBE_EVENT_SYSTEM);
|
|
|
|
if (WARN_ON_ONCE(tk == NULL)) {
|
2013-05-09 05:44:49 +00:00
|
|
|
pr_warn("error on getting new probe.\n");
|
2010-01-14 05:12:12 +00:00
|
|
|
warn++;
|
2013-05-09 05:44:49 +00:00
|
|
|
} else {
|
2013-07-03 04:50:51 +00:00
|
|
|
file = find_trace_probe_file(tk, top_trace_array());
|
2013-05-09 05:44:49 +00:00
|
|
|
if (WARN_ON_ONCE(file == NULL)) {
|
|
|
|
pr_warn("error on getting probe file.\n");
|
|
|
|
warn++;
|
|
|
|
} else
|
2019-06-19 15:07:20 +00:00
|
|
|
enable_trace_kprobe(
|
|
|
|
trace_probe_event_call(&tk->tp), file);
|
2013-05-09 05:44:49 +00:00
|
|
|
}
|
2010-01-14 05:12:12 +00:00
|
|
|
}
|
2009-08-13 20:35:11 +00:00
|
|
|
|
2021-02-01 19:48:11 +00:00
|
|
|
ret = create_or_delete_trace_kprobe("r:testprobe2 kprobe_trace_selftest_target $retval");
|
2010-01-14 05:12:12 +00:00
|
|
|
if (WARN_ON_ONCE(ret)) {
|
2013-05-09 05:44:49 +00:00
|
|
|
pr_warn("error on probing function return.\n");
|
2010-01-14 05:12:12 +00:00
|
|
|
warn++;
|
|
|
|
} else {
|
|
|
|
/* Enable trace point */
|
2013-07-03 04:50:51 +00:00
|
|
|
tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM);
|
|
|
|
if (WARN_ON_ONCE(tk == NULL)) {
|
2013-05-09 05:44:49 +00:00
|
|
|
pr_warn("error on getting 2nd new probe.\n");
|
2010-01-14 05:12:12 +00:00
|
|
|
warn++;
|
2013-05-09 05:44:49 +00:00
|
|
|
} else {
|
2013-07-03 04:50:51 +00:00
|
|
|
file = find_trace_probe_file(tk, top_trace_array());
|
2013-05-09 05:44:49 +00:00
|
|
|
if (WARN_ON_ONCE(file == NULL)) {
|
|
|
|
pr_warn("error on getting probe file.\n");
|
|
|
|
warn++;
|
|
|
|
} else
|
2019-06-19 15:07:20 +00:00
|
|
|
enable_trace_kprobe(
|
|
|
|
trace_probe_event_call(&tk->tp), file);
|
2013-05-09 05:44:49 +00:00
|
|
|
}
|
2010-01-14 05:12:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (warn)
|
|
|
|
goto end;
|
2009-08-13 20:35:11 +00:00
|
|
|
|
|
|
|
ret = target(1, 2, 3, 4, 5, 6);
|
|
|
|
|
2016-12-09 14:19:38 +00:00
|
|
|
/*
|
|
|
|
* Not expecting an error here, the check is only to prevent the
|
|
|
|
* optimizer from removing the call to target() as otherwise there
|
|
|
|
* are no side-effects and the call is never performed.
|
|
|
|
*/
|
|
|
|
if (ret != 21)
|
|
|
|
warn++;
|
|
|
|
|
2011-10-04 10:44:38 +00:00
|
|
|
/* Disable trace points before removing it */
|
2013-07-03 04:50:51 +00:00
|
|
|
tk = find_trace_kprobe("testprobe", KPROBE_EVENT_SYSTEM);
|
|
|
|
if (WARN_ON_ONCE(tk == NULL)) {
|
2013-05-09 05:44:49 +00:00
|
|
|
pr_warn("error on getting test probe.\n");
|
2011-10-04 10:44:38 +00:00
|
|
|
warn++;
|
2013-05-09 05:44:49 +00:00
|
|
|
} else {
|
2016-12-09 14:19:38 +00:00
|
|
|
if (trace_kprobe_nhit(tk) != 1) {
|
|
|
|
pr_warn("incorrect number of testprobe hits\n");
|
|
|
|
warn++;
|
|
|
|
}
|
|
|
|
|
2013-07-03 04:50:51 +00:00
|
|
|
file = find_trace_probe_file(tk, top_trace_array());
|
2013-05-09 05:44:49 +00:00
|
|
|
if (WARN_ON_ONCE(file == NULL)) {
|
|
|
|
pr_warn("error on getting probe file.\n");
|
|
|
|
warn++;
|
|
|
|
} else
|
2019-06-19 15:07:20 +00:00
|
|
|
disable_trace_kprobe(
|
|
|
|
trace_probe_event_call(&tk->tp), file);
|
2013-05-09 05:44:49 +00:00
|
|
|
}
|
2011-10-04 10:44:38 +00:00
|
|
|
|
2013-07-03 04:50:51 +00:00
|
|
|
tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM);
|
|
|
|
if (WARN_ON_ONCE(tk == NULL)) {
|
2013-05-09 05:44:49 +00:00
|
|
|
pr_warn("error on getting 2nd test probe.\n");
|
2011-10-04 10:44:38 +00:00
|
|
|
warn++;
|
2013-05-09 05:44:49 +00:00
|
|
|
} else {
|
2016-12-09 14:19:38 +00:00
|
|
|
if (trace_kprobe_nhit(tk) != 1) {
|
|
|
|
pr_warn("incorrect number of testprobe2 hits\n");
|
|
|
|
warn++;
|
|
|
|
}
|
|
|
|
|
2013-07-03 04:50:51 +00:00
|
|
|
file = find_trace_probe_file(tk, top_trace_array());
|
2013-05-09 05:44:49 +00:00
|
|
|
if (WARN_ON_ONCE(file == NULL)) {
|
|
|
|
pr_warn("error on getting probe file.\n");
|
|
|
|
warn++;
|
|
|
|
} else
|
2019-06-19 15:07:20 +00:00
|
|
|
disable_trace_kprobe(
|
|
|
|
trace_probe_event_call(&tk->tp), file);
|
2013-05-09 05:44:49 +00:00
|
|
|
}
|
2011-10-04 10:44:38 +00:00
|
|
|
|
2021-02-01 19:48:11 +00:00
|
|
|
ret = create_or_delete_trace_kprobe("-:testprobe");
|
2010-01-14 05:12:12 +00:00
|
|
|
if (WARN_ON_ONCE(ret)) {
|
2013-05-09 05:44:49 +00:00
|
|
|
pr_warn("error on deleting a probe.\n");
|
2010-01-14 05:12:12 +00:00
|
|
|
warn++;
|
|
|
|
}
|
|
|
|
|
2021-02-01 19:48:11 +00:00
|
|
|
ret = create_or_delete_trace_kprobe("-:testprobe2");
|
2010-01-14 05:12:12 +00:00
|
|
|
if (WARN_ON_ONCE(ret)) {
|
2013-05-09 05:44:49 +00:00
|
|
|
pr_warn("error on deleting a probe.\n");
|
2010-01-14 05:12:12 +00:00
|
|
|
warn++;
|
|
|
|
}
|
2009-08-13 20:35:11 +00:00
|
|
|
|
2010-01-14 05:12:12 +00:00
|
|
|
end:
|
2018-11-05 09:02:36 +00:00
|
|
|
ret = dyn_events_release_all(&trace_kprobe_ops);
|
|
|
|
if (WARN_ON_ONCE(ret)) {
|
|
|
|
pr_warn("error on cleaning up probes.\n");
|
|
|
|
warn++;
|
|
|
|
}
|
2017-05-17 08:19:49 +00:00
|
|
|
/*
|
|
|
|
* Wait for the optimizer work to finish. Otherwise it might fiddle
|
|
|
|
* with probes in already freed __init text.
|
|
|
|
*/
|
|
|
|
wait_for_kprobe_optimizer();
|
2010-01-14 05:12:12 +00:00
|
|
|
if (warn)
|
|
|
|
pr_cont("NG: Some tests are failed. Please check them.\n");
|
|
|
|
else
|
|
|
|
pr_cont("OK\n");
|
2009-08-13 20:35:11 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
late_initcall(kprobe_trace_self_tests_init);
|
|
|
|
|
|
|
|
#endif
|