Nothing new in development for this release. These are mostly

fixes that were found during development of changes for the next merge
 window and fixes that were sent to me late in the last cycle.
 -----BEGIN PGP SIGNATURE-----
 
 iQFIBAABCAAyFiEEQEw9Eu0DdyUUkuUUybkF8mrZjcsFAlmypCkUHHJvc3RlZHRA
 Z29vZG1pcy5vcmcACgkQybkF8mrZjcvLdAf/SsYlTViKTxM/jgsDD8fsbS9yOjl7
 9s9WgXkCHlvvpdATQIOBTSXKjc4OWDspwpybkaogf/Pz5xo1qo2JhqgdOK85UxUf
 vbYOt0lKEb+wEFXeeZCAIT3yTS22ILazNE9k6/u/0URF4cByTSnNPMWr9h9OJHzO
 n5gToZgkGNeLMiPa45eY9n7TqHAGvHRSMYzETyrD8LTiEw1IYLaCaWIYswNTrH7o
 TMMT4bmCRWc8XACpqH5EWK0Wq69JuV6trJBHxiJKNJfebl5ojAs5gsARMMoDP3vV
 q1sTjtgPE/anOOGRwnxlKz3jIcMDGfY0Aw3kFoXkWN3ROsJRm8apUd4QPQ==
 =dDI4
 -----END PGP SIGNATURE-----

Merge tag 'trace-v4.14' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace

Pull tracing updates from Steven Rostedt:
 "Nothing new in development for this release. These are mostly fixes
  that were found during development of changes for the next merge
  window and fixes that were sent to me late in the last cycle"

* tag 'trace-v4.14' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace:
  tracing: Apply trace_clock changes to instance max buffer
  tracing: Fix clear of RECORDED_TGID flag when disabling trace event
  tracing: Add barrier to trace_printk() buffer nesting modification
  ftrace: Fix memleak when unregistering dynamic ops when tracing disabled
  ftrace: Fix selftest goto location on error
  ftrace: Zero out ftrace hashes when a module is removed
  tracing: Only have rmmod clear buffers that its events were active in
  ftrace: Fix debug preempt config name in stack_tracer_{en,dis}able
This commit is contained in:
Linus Torvalds 2017-09-08 15:08:14 -07:00
commit 42c8e86c9c
7 changed files with 87 additions and 26 deletions

View file

@ -307,7 +307,7 @@ DECLARE_PER_CPU(int, disable_stack_tracer);
static inline void stack_tracer_disable(void) static inline void stack_tracer_disable(void)
{ {
/* Preemption or interupts must be disabled */ /* Preemption or interupts must be disabled */
if (IS_ENABLED(CONFIG_PREEMPT_DEBUG)) if (IS_ENABLED(CONFIG_DEBUG_PREEMPT))
WARN_ON_ONCE(!preempt_count() || !irqs_disabled()); WARN_ON_ONCE(!preempt_count() || !irqs_disabled());
this_cpu_inc(disable_stack_tracer); this_cpu_inc(disable_stack_tracer);
} }
@ -320,7 +320,7 @@ static inline void stack_tracer_disable(void)
*/ */
static inline void stack_tracer_enable(void) static inline void stack_tracer_enable(void)
{ {
if (IS_ENABLED(CONFIG_PREEMPT_DEBUG)) if (IS_ENABLED(CONFIG_DEBUG_PREEMPT))
WARN_ON_ONCE(!preempt_count() || !irqs_disabled()); WARN_ON_ONCE(!preempt_count() || !irqs_disabled());
this_cpu_dec(disable_stack_tracer); this_cpu_dec(disable_stack_tracer);
} }

View file

@ -217,7 +217,6 @@ enum {
TRACE_EVENT_FL_CAP_ANY_BIT, TRACE_EVENT_FL_CAP_ANY_BIT,
TRACE_EVENT_FL_NO_SET_FILTER_BIT, TRACE_EVENT_FL_NO_SET_FILTER_BIT,
TRACE_EVENT_FL_IGNORE_ENABLE_BIT, TRACE_EVENT_FL_IGNORE_ENABLE_BIT,
TRACE_EVENT_FL_WAS_ENABLED_BIT,
TRACE_EVENT_FL_TRACEPOINT_BIT, TRACE_EVENT_FL_TRACEPOINT_BIT,
TRACE_EVENT_FL_KPROBE_BIT, TRACE_EVENT_FL_KPROBE_BIT,
TRACE_EVENT_FL_UPROBE_BIT, TRACE_EVENT_FL_UPROBE_BIT,
@ -229,9 +228,6 @@ enum {
* CAP_ANY - Any user can enable for perf * CAP_ANY - Any user can enable for perf
* NO_SET_FILTER - Set when filter has error and is to be ignored * NO_SET_FILTER - Set when filter has error and is to be ignored
* IGNORE_ENABLE - For trace internal events, do not enable with debugfs file * IGNORE_ENABLE - For trace internal events, do not enable with debugfs file
* WAS_ENABLED - Set and stays set when an event was ever enabled
* (used for module unloading, if a module event is enabled,
* it is best to clear the buffers that used it).
* TRACEPOINT - Event is a tracepoint * TRACEPOINT - Event is a tracepoint
* KPROBE - Event is a kprobe * KPROBE - Event is a kprobe
* UPROBE - Event is a uprobe * UPROBE - Event is a uprobe
@ -241,7 +237,6 @@ enum {
TRACE_EVENT_FL_CAP_ANY = (1 << TRACE_EVENT_FL_CAP_ANY_BIT), TRACE_EVENT_FL_CAP_ANY = (1 << TRACE_EVENT_FL_CAP_ANY_BIT),
TRACE_EVENT_FL_NO_SET_FILTER = (1 << TRACE_EVENT_FL_NO_SET_FILTER_BIT), TRACE_EVENT_FL_NO_SET_FILTER = (1 << TRACE_EVENT_FL_NO_SET_FILTER_BIT),
TRACE_EVENT_FL_IGNORE_ENABLE = (1 << TRACE_EVENT_FL_IGNORE_ENABLE_BIT), TRACE_EVENT_FL_IGNORE_ENABLE = (1 << TRACE_EVENT_FL_IGNORE_ENABLE_BIT),
TRACE_EVENT_FL_WAS_ENABLED = (1 << TRACE_EVENT_FL_WAS_ENABLED_BIT),
TRACE_EVENT_FL_TRACEPOINT = (1 << TRACE_EVENT_FL_TRACEPOINT_BIT), TRACE_EVENT_FL_TRACEPOINT = (1 << TRACE_EVENT_FL_TRACEPOINT_BIT),
TRACE_EVENT_FL_KPROBE = (1 << TRACE_EVENT_FL_KPROBE_BIT), TRACE_EVENT_FL_KPROBE = (1 << TRACE_EVENT_FL_KPROBE_BIT),
TRACE_EVENT_FL_UPROBE = (1 << TRACE_EVENT_FL_UPROBE_BIT), TRACE_EVENT_FL_UPROBE = (1 << TRACE_EVENT_FL_UPROBE_BIT),
@ -306,6 +301,7 @@ enum {
EVENT_FILE_FL_TRIGGER_MODE_BIT, EVENT_FILE_FL_TRIGGER_MODE_BIT,
EVENT_FILE_FL_TRIGGER_COND_BIT, EVENT_FILE_FL_TRIGGER_COND_BIT,
EVENT_FILE_FL_PID_FILTER_BIT, EVENT_FILE_FL_PID_FILTER_BIT,
EVENT_FILE_FL_WAS_ENABLED_BIT,
}; };
/* /*
@ -321,6 +317,7 @@ enum {
* TRIGGER_MODE - When set, invoke the triggers associated with the event * TRIGGER_MODE - When set, invoke the triggers associated with the event
* TRIGGER_COND - When set, one or more triggers has an associated filter * TRIGGER_COND - When set, one or more triggers has an associated filter
* PID_FILTER - When set, the event is filtered based on pid * PID_FILTER - When set, the event is filtered based on pid
* WAS_ENABLED - Set when enabled to know to clear trace on module removal
*/ */
enum { enum {
EVENT_FILE_FL_ENABLED = (1 << EVENT_FILE_FL_ENABLED_BIT), EVENT_FILE_FL_ENABLED = (1 << EVENT_FILE_FL_ENABLED_BIT),
@ -333,6 +330,7 @@ enum {
EVENT_FILE_FL_TRIGGER_MODE = (1 << EVENT_FILE_FL_TRIGGER_MODE_BIT), EVENT_FILE_FL_TRIGGER_MODE = (1 << EVENT_FILE_FL_TRIGGER_MODE_BIT),
EVENT_FILE_FL_TRIGGER_COND = (1 << EVENT_FILE_FL_TRIGGER_COND_BIT), EVENT_FILE_FL_TRIGGER_COND = (1 << EVENT_FILE_FL_TRIGGER_COND_BIT),
EVENT_FILE_FL_PID_FILTER = (1 << EVENT_FILE_FL_PID_FILTER_BIT), EVENT_FILE_FL_PID_FILTER = (1 << EVENT_FILE_FL_PID_FILTER_BIT),
EVENT_FILE_FL_WAS_ENABLED = (1 << EVENT_FILE_FL_WAS_ENABLED_BIT),
}; };
struct trace_event_file { struct trace_event_file {

View file

@ -2828,13 +2828,14 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command)
if (!command || !ftrace_enabled) { if (!command || !ftrace_enabled) {
/* /*
* If these are per_cpu ops, they still need their * If these are dynamic or per_cpu ops, they still
* per_cpu field freed. Since, function tracing is * need their data freed. Since, function tracing is
* not currently active, we can just free them * not currently active, we can just free them
* without synchronizing all CPUs. * without synchronizing all CPUs.
*/ */
if (ops->flags & FTRACE_OPS_FL_PER_CPU) if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_PER_CPU))
per_cpu_ops_free(ops); goto free_ops;
return 0; return 0;
} }
@ -2900,6 +2901,7 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command)
if (IS_ENABLED(CONFIG_PREEMPT)) if (IS_ENABLED(CONFIG_PREEMPT))
synchronize_rcu_tasks(); synchronize_rcu_tasks();
free_ops:
arch_ftrace_trampoline_free(ops); arch_ftrace_trampoline_free(ops);
if (ops->flags & FTRACE_OPS_FL_PER_CPU) if (ops->flags & FTRACE_OPS_FL_PER_CPU)
@ -5690,10 +5692,51 @@ static int referenced_filters(struct dyn_ftrace *rec)
return cnt; return cnt;
} }
static void
clear_mod_from_hash(struct ftrace_page *pg, struct ftrace_hash *hash)
{
struct ftrace_func_entry *entry;
struct dyn_ftrace *rec;
int i;
if (ftrace_hash_empty(hash))
return;
for (i = 0; i < pg->index; i++) {
rec = &pg->records[i];
entry = __ftrace_lookup_ip(hash, rec->ip);
/*
* Do not allow this rec to match again.
* Yeah, it may waste some memory, but will be removed
* if/when the hash is modified again.
*/
if (entry)
entry->ip = 0;
}
}
/* Clear any records from hashs */
static void clear_mod_from_hashes(struct ftrace_page *pg)
{
struct trace_array *tr;
mutex_lock(&trace_types_lock);
list_for_each_entry(tr, &ftrace_trace_arrays, list) {
if (!tr->ops || !tr->ops->func_hash)
continue;
mutex_lock(&tr->ops->func_hash->regex_lock);
clear_mod_from_hash(pg, tr->ops->func_hash->filter_hash);
clear_mod_from_hash(pg, tr->ops->func_hash->notrace_hash);
mutex_unlock(&tr->ops->func_hash->regex_lock);
}
mutex_unlock(&trace_types_lock);
}
void ftrace_release_mod(struct module *mod) void ftrace_release_mod(struct module *mod)
{ {
struct dyn_ftrace *rec; struct dyn_ftrace *rec;
struct ftrace_page **last_pg; struct ftrace_page **last_pg;
struct ftrace_page *tmp_page = NULL;
struct ftrace_page *pg; struct ftrace_page *pg;
int order; int order;
@ -5723,14 +5766,25 @@ void ftrace_release_mod(struct module *mod)
ftrace_update_tot_cnt -= pg->index; ftrace_update_tot_cnt -= pg->index;
*last_pg = pg->next; *last_pg = pg->next;
order = get_count_order(pg->size / ENTRIES_PER_PAGE);
free_pages((unsigned long)pg->records, order); pg->next = tmp_page;
kfree(pg); tmp_page = pg;
} else } else
last_pg = &pg->next; last_pg = &pg->next;
} }
out_unlock: out_unlock:
mutex_unlock(&ftrace_lock); mutex_unlock(&ftrace_lock);
for (pg = tmp_page; pg; pg = tmp_page) {
/* Needs to be called outside of ftrace_lock */
clear_mod_from_hashes(pg);
order = get_count_order(pg->size / ENTRIES_PER_PAGE);
free_pages((unsigned long)pg->records, order);
tmp_page = pg->next;
kfree(pg);
}
} }
void ftrace_module_enable(struct module *mod) void ftrace_module_enable(struct module *mod)

View file

@ -1702,6 +1702,9 @@ void tracing_reset_all_online_cpus(void)
struct trace_array *tr; struct trace_array *tr;
list_for_each_entry(tr, &ftrace_trace_arrays, list) { list_for_each_entry(tr, &ftrace_trace_arrays, list) {
if (!tr->clear_trace)
continue;
tr->clear_trace = false;
tracing_reset_online_cpus(&tr->trace_buffer); tracing_reset_online_cpus(&tr->trace_buffer);
#ifdef CONFIG_TRACER_MAX_TRACE #ifdef CONFIG_TRACER_MAX_TRACE
tracing_reset_online_cpus(&tr->max_buffer); tracing_reset_online_cpus(&tr->max_buffer);
@ -2799,11 +2802,17 @@ static char *get_trace_buf(void)
if (!buffer || buffer->nesting >= 4) if (!buffer || buffer->nesting >= 4)
return NULL; return NULL;
return &buffer->buffer[buffer->nesting++][0]; buffer->nesting++;
/* Interrupts must see nesting incremented before we use the buffer */
barrier();
return &buffer->buffer[buffer->nesting][0];
} }
static void put_trace_buf(void) static void put_trace_buf(void)
{ {
/* Don't let the decrement of nesting leak before this */
barrier();
this_cpu_dec(trace_percpu_buffer->nesting); this_cpu_dec(trace_percpu_buffer->nesting);
} }
@ -6220,7 +6229,7 @@ static int tracing_set_clock(struct trace_array *tr, const char *clockstr)
tracing_reset_online_cpus(&tr->trace_buffer); tracing_reset_online_cpus(&tr->trace_buffer);
#ifdef CONFIG_TRACER_MAX_TRACE #ifdef CONFIG_TRACER_MAX_TRACE
if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer) if (tr->max_buffer.buffer)
ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func); ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
tracing_reset_online_cpus(&tr->max_buffer); tracing_reset_online_cpus(&tr->max_buffer);
#endif #endif

View file

@ -245,6 +245,7 @@ struct trace_array {
int stop_count; int stop_count;
int clock_id; int clock_id;
int nr_topts; int nr_topts;
bool clear_trace;
struct tracer *current_trace; struct tracer *current_trace;
unsigned int trace_flags; unsigned int trace_flags;
unsigned char trace_flags_index[TRACE_FLAGS_MAX_SIZE]; unsigned char trace_flags_index[TRACE_FLAGS_MAX_SIZE];

View file

@ -406,7 +406,7 @@ static int __ftrace_event_enable_disable(struct trace_event_file *file,
if (file->flags & EVENT_FILE_FL_RECORDED_TGID) { if (file->flags & EVENT_FILE_FL_RECORDED_TGID) {
tracing_stop_tgid_record(); tracing_stop_tgid_record();
clear_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags); clear_bit(EVENT_FILE_FL_RECORDED_TGID_BIT, &file->flags);
} }
call->class->reg(call, TRACE_REG_UNREGISTER, file); call->class->reg(call, TRACE_REG_UNREGISTER, file);
@ -466,7 +466,7 @@ static int __ftrace_event_enable_disable(struct trace_event_file *file,
set_bit(EVENT_FILE_FL_ENABLED_BIT, &file->flags); set_bit(EVENT_FILE_FL_ENABLED_BIT, &file->flags);
/* WAS_ENABLED gets set but never cleared. */ /* WAS_ENABLED gets set but never cleared. */
call->flags |= TRACE_EVENT_FL_WAS_ENABLED; set_bit(EVENT_FILE_FL_WAS_ENABLED_BIT, &file->flags);
} }
break; break;
} }
@ -2058,6 +2058,10 @@ static void event_remove(struct trace_event_call *call)
do_for_each_event_file(tr, file) { do_for_each_event_file(tr, file) {
if (file->event_call != call) if (file->event_call != call)
continue; continue;
if (file->flags & EVENT_FILE_FL_WAS_ENABLED)
tr->clear_trace = true;
ftrace_event_enable_disable(file, 0); ftrace_event_enable_disable(file, 0);
/* /*
* The do_for_each_event_file() is * The do_for_each_event_file() is
@ -2396,16 +2400,12 @@ static void trace_module_add_events(struct module *mod)
static void trace_module_remove_events(struct module *mod) static void trace_module_remove_events(struct module *mod)
{ {
struct trace_event_call *call, *p; struct trace_event_call *call, *p;
bool clear_trace = false;
down_write(&trace_event_sem); down_write(&trace_event_sem);
list_for_each_entry_safe(call, p, &ftrace_events, list) { list_for_each_entry_safe(call, p, &ftrace_events, list) {
if (call->mod == mod) { if (call->mod == mod)
if (call->flags & TRACE_EVENT_FL_WAS_ENABLED)
clear_trace = true;
__trace_remove_event_call(call); __trace_remove_event_call(call);
} }
}
up_write(&trace_event_sem); up_write(&trace_event_sem);
/* /*
@ -2416,7 +2416,6 @@ static void trace_module_remove_events(struct module *mod)
* over from this module may be passed to the new module events and * over from this module may be passed to the new module events and
* unexpected results may occur. * unexpected results may occur.
*/ */
if (clear_trace)
tracing_reset_all_online_cpus(); tracing_reset_all_online_cpus();
} }

View file

@ -273,7 +273,7 @@ static int trace_selftest_ops(struct trace_array *tr, int cnt)
goto out_free; goto out_free;
if (cnt > 1) { if (cnt > 1) {
if (trace_selftest_test_global_cnt == 0) if (trace_selftest_test_global_cnt == 0)
goto out; goto out_free;
} }
if (trace_selftest_test_dyn_cnt == 0) if (trace_selftest_test_dyn_cnt == 0)
goto out_free; goto out_free;