mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-11-01 17:08:10 +00:00
tracing: Consolidate buffer allocation code
There's a bit of duplicate code in creating the trace buffers for the normal trace buffer and the max trace buffer among the instances and the main global_trace. This code can be consolidated and cleaned up a bit making the code cleaner and more readable as well as less duplication. Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
This commit is contained in:
parent
45ad21ca55
commit
737223fbca
1 changed files with 63 additions and 67 deletions
|
@ -3171,6 +3171,7 @@ int tracer_init(struct tracer *t, struct trace_array *tr)
|
||||||
static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
|
static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
|
||||||
{
|
{
|
||||||
int cpu;
|
int cpu;
|
||||||
|
|
||||||
for_each_tracing_cpu(cpu)
|
for_each_tracing_cpu(cpu)
|
||||||
per_cpu_ptr(buf->data, cpu)->entries = val;
|
per_cpu_ptr(buf->data, cpu)->entries = val;
|
||||||
}
|
}
|
||||||
|
@ -5267,12 +5268,70 @@ struct dentry *trace_instance_dir;
|
||||||
static void
|
static void
|
||||||
init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer);
|
init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer);
|
||||||
|
|
||||||
static int new_instance_create(const char *name)
|
static void init_trace_buffers(struct trace_array *tr, struct trace_buffer *buf)
|
||||||
|
{
|
||||||
|
int cpu;
|
||||||
|
|
||||||
|
for_each_tracing_cpu(cpu) {
|
||||||
|
memset(per_cpu_ptr(buf->data, cpu), 0, sizeof(struct trace_array_cpu));
|
||||||
|
per_cpu_ptr(buf->data, cpu)->trace_cpu.cpu = cpu;
|
||||||
|
per_cpu_ptr(buf->data, cpu)->trace_cpu.tr = tr;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static int allocate_trace_buffers(struct trace_array *tr, int size)
|
||||||
{
|
{
|
||||||
enum ring_buffer_flags rb_flags;
|
enum ring_buffer_flags rb_flags;
|
||||||
|
|
||||||
|
rb_flags = trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
|
||||||
|
|
||||||
|
tr->trace_buffer.buffer = ring_buffer_alloc(size, rb_flags);
|
||||||
|
if (!tr->trace_buffer.buffer)
|
||||||
|
goto out_free;
|
||||||
|
|
||||||
|
tr->trace_buffer.data = alloc_percpu(struct trace_array_cpu);
|
||||||
|
if (!tr->trace_buffer.data)
|
||||||
|
goto out_free;
|
||||||
|
|
||||||
|
init_trace_buffers(tr, &tr->trace_buffer);
|
||||||
|
|
||||||
|
/* Allocate the first page for all buffers */
|
||||||
|
set_buffer_entries(&tr->trace_buffer,
|
||||||
|
ring_buffer_size(tr->trace_buffer.buffer, 0));
|
||||||
|
|
||||||
|
#ifdef CONFIG_TRACER_MAX_TRACE
|
||||||
|
|
||||||
|
tr->max_buffer.buffer = ring_buffer_alloc(1, rb_flags);
|
||||||
|
if (!tr->max_buffer.buffer)
|
||||||
|
goto out_free;
|
||||||
|
|
||||||
|
tr->max_buffer.data = alloc_percpu(struct trace_array_cpu);
|
||||||
|
if (!tr->max_buffer.data)
|
||||||
|
goto out_free;
|
||||||
|
|
||||||
|
init_trace_buffers(tr, &tr->max_buffer);
|
||||||
|
|
||||||
|
set_buffer_entries(&tr->max_buffer, 1);
|
||||||
|
#endif
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
out_free:
|
||||||
|
if (tr->trace_buffer.buffer)
|
||||||
|
ring_buffer_free(tr->trace_buffer.buffer);
|
||||||
|
free_percpu(tr->trace_buffer.data);
|
||||||
|
|
||||||
|
#ifdef CONFIG_TRACER_MAX_TRACE
|
||||||
|
if (tr->max_buffer.buffer)
|
||||||
|
ring_buffer_free(tr->max_buffer.buffer);
|
||||||
|
free_percpu(tr->max_buffer.data);
|
||||||
|
#endif
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int new_instance_create(const char *name)
|
||||||
|
{
|
||||||
struct trace_array *tr;
|
struct trace_array *tr;
|
||||||
int ret;
|
int ret;
|
||||||
int i;
|
|
||||||
|
|
||||||
mutex_lock(&trace_types_lock);
|
mutex_lock(&trace_types_lock);
|
||||||
|
|
||||||
|
@ -5298,22 +5357,9 @@ static int new_instance_create(const char *name)
|
||||||
INIT_LIST_HEAD(&tr->systems);
|
INIT_LIST_HEAD(&tr->systems);
|
||||||
INIT_LIST_HEAD(&tr->events);
|
INIT_LIST_HEAD(&tr->events);
|
||||||
|
|
||||||
rb_flags = trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
|
if (allocate_trace_buffers(tr, trace_buf_size) < 0)
|
||||||
|
|
||||||
tr->trace_buffer.buffer = ring_buffer_alloc(trace_buf_size, rb_flags);
|
|
||||||
if (!tr->trace_buffer.buffer)
|
|
||||||
goto out_free_tr;
|
goto out_free_tr;
|
||||||
|
|
||||||
tr->trace_buffer.data = alloc_percpu(struct trace_array_cpu);
|
|
||||||
if (!tr->trace_buffer.data)
|
|
||||||
goto out_free_tr;
|
|
||||||
|
|
||||||
for_each_tracing_cpu(i) {
|
|
||||||
memset(per_cpu_ptr(tr->trace_buffer.data, i), 0, sizeof(struct trace_array_cpu));
|
|
||||||
per_cpu_ptr(tr->trace_buffer.data, i)->trace_cpu.cpu = i;
|
|
||||||
per_cpu_ptr(tr->trace_buffer.data, i)->trace_cpu.tr = tr;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Holder for file callbacks */
|
/* Holder for file callbacks */
|
||||||
tr->trace_cpu.cpu = RING_BUFFER_ALL_CPUS;
|
tr->trace_cpu.cpu = RING_BUFFER_ALL_CPUS;
|
||||||
tr->trace_cpu.tr = tr;
|
tr->trace_cpu.tr = tr;
|
||||||
|
@ -5736,8 +5782,6 @@ EXPORT_SYMBOL_GPL(ftrace_dump);
|
||||||
__init static int tracer_alloc_buffers(void)
|
__init static int tracer_alloc_buffers(void)
|
||||||
{
|
{
|
||||||
int ring_buf_size;
|
int ring_buf_size;
|
||||||
enum ring_buffer_flags rb_flags;
|
|
||||||
int i;
|
|
||||||
int ret = -ENOMEM;
|
int ret = -ENOMEM;
|
||||||
|
|
||||||
|
|
||||||
|
@ -5758,69 +5802,21 @@ __init static int tracer_alloc_buffers(void)
|
||||||
else
|
else
|
||||||
ring_buf_size = 1;
|
ring_buf_size = 1;
|
||||||
|
|
||||||
rb_flags = trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
|
|
||||||
|
|
||||||
cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
|
cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
|
||||||
cpumask_copy(tracing_cpumask, cpu_all_mask);
|
cpumask_copy(tracing_cpumask, cpu_all_mask);
|
||||||
|
|
||||||
raw_spin_lock_init(&global_trace.start_lock);
|
raw_spin_lock_init(&global_trace.start_lock);
|
||||||
|
|
||||||
/* TODO: make the number of buffers hot pluggable with CPUS */
|
/* TODO: make the number of buffers hot pluggable with CPUS */
|
||||||
global_trace.trace_buffer.buffer = ring_buffer_alloc(ring_buf_size, rb_flags);
|
if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
|
||||||
if (!global_trace.trace_buffer.buffer) {
|
|
||||||
printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
|
printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
|
||||||
WARN_ON(1);
|
WARN_ON(1);
|
||||||
goto out_free_cpumask;
|
goto out_free_cpumask;
|
||||||
}
|
}
|
||||||
|
|
||||||
global_trace.trace_buffer.data = alloc_percpu(struct trace_array_cpu);
|
|
||||||
|
|
||||||
if (!global_trace.trace_buffer.data) {
|
|
||||||
printk(KERN_ERR "tracer: failed to allocate percpu memory!\n");
|
|
||||||
WARN_ON(1);
|
|
||||||
goto out_free_cpumask;
|
|
||||||
}
|
|
||||||
|
|
||||||
for_each_tracing_cpu(i) {
|
|
||||||
memset(per_cpu_ptr(global_trace.trace_buffer.data, i), 0,
|
|
||||||
sizeof(struct trace_array_cpu));
|
|
||||||
per_cpu_ptr(global_trace.trace_buffer.data, i)->trace_cpu.cpu = i;
|
|
||||||
per_cpu_ptr(global_trace.trace_buffer.data, i)->trace_cpu.tr = &global_trace;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (global_trace.buffer_disabled)
|
if (global_trace.buffer_disabled)
|
||||||
tracing_off();
|
tracing_off();
|
||||||
|
|
||||||
#ifdef CONFIG_TRACER_MAX_TRACE
|
|
||||||
global_trace.max_buffer.data = alloc_percpu(struct trace_array_cpu);
|
|
||||||
if (!global_trace.max_buffer.data) {
|
|
||||||
printk(KERN_ERR "tracer: failed to allocate percpu memory!\n");
|
|
||||||
WARN_ON(1);
|
|
||||||
goto out_free_cpumask;
|
|
||||||
}
|
|
||||||
global_trace.max_buffer.buffer = ring_buffer_alloc(1, rb_flags);
|
|
||||||
if (!global_trace.max_buffer.buffer) {
|
|
||||||
printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n");
|
|
||||||
WARN_ON(1);
|
|
||||||
ring_buffer_free(global_trace.trace_buffer.buffer);
|
|
||||||
goto out_free_cpumask;
|
|
||||||
}
|
|
||||||
|
|
||||||
for_each_tracing_cpu(i) {
|
|
||||||
memset(per_cpu_ptr(global_trace.max_buffer.data, i), 0,
|
|
||||||
sizeof(struct trace_array_cpu));
|
|
||||||
per_cpu_ptr(global_trace.max_buffer.data, i)->trace_cpu.cpu = i;
|
|
||||||
per_cpu_ptr(global_trace.max_buffer.data, i)->trace_cpu.tr = &global_trace;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/* Allocate the first page for all buffers */
|
|
||||||
set_buffer_entries(&global_trace.trace_buffer,
|
|
||||||
ring_buffer_size(global_trace.trace_buffer.buffer, 0));
|
|
||||||
#ifdef CONFIG_TRACER_MAX_TRACE
|
|
||||||
set_buffer_entries(&global_trace.max_buffer, 1);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
trace_init_cmdlines();
|
trace_init_cmdlines();
|
||||||
|
|
||||||
register_tracer(&nop_trace);
|
register_tracer(&nop_trace);
|
||||||
|
|
Loading…
Reference in a new issue