tracing: Add snapshot refcount

When a ring-buffer is memory mapped by user-space, no trace or
ring-buffer swap is possible. This means the snapshot feature is
mutually exclusive with the memory mapping. Having a refcount on
snapshot users will help to know if a mapping is possible or not.

Instead of relying on the global trace_types_lock, a new spinlock is
introduced to serialize accesses to trace_array->snapshot. This intends
to allow access to that variable in a context where the mmap lock is
already held.

Link: https://lore.kernel.org/linux-trace-kernel/20240220202310.2489614-4-vdonnefort@google.com

Signed-off-by: Vincent Donnefort <vdonnefort@google.com>
Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
This commit is contained in:
Vincent Donnefort 2024-02-20 20:23:07 +00:00 committed by Steven Rostedt (Google)
parent b70f293824
commit 180e4e3909
3 changed files with 129 additions and 36 deletions

View File

@ -1300,6 +1300,50 @@ static void free_snapshot(struct trace_array *tr)
tr->allocated_snapshot = false;
}
static int tracing_arm_snapshot_locked(struct trace_array *tr)
{
int ret;
lockdep_assert_held(&trace_types_lock);
spin_lock(&tr->snapshot_trigger_lock);
if (tr->snapshot == UINT_MAX) {
spin_unlock(&tr->snapshot_trigger_lock);
return -EBUSY;
}
tr->snapshot++;
spin_unlock(&tr->snapshot_trigger_lock);
ret = tracing_alloc_snapshot_instance(tr);
if (ret) {
spin_lock(&tr->snapshot_trigger_lock);
tr->snapshot--;
spin_unlock(&tr->snapshot_trigger_lock);
}
return ret;
}
int tracing_arm_snapshot(struct trace_array *tr)
{
int ret;
mutex_lock(&trace_types_lock);
ret = tracing_arm_snapshot_locked(tr);
mutex_unlock(&trace_types_lock);
return ret;
}
void tracing_disarm_snapshot(struct trace_array *tr)
{
spin_lock(&tr->snapshot_trigger_lock);
if (!WARN_ON(!tr->snapshot))
tr->snapshot--;
spin_unlock(&tr->snapshot_trigger_lock);
}
/**
* tracing_alloc_snapshot - allocate snapshot buffer.
*
@ -1373,10 +1417,6 @@ int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data,
mutex_lock(&trace_types_lock);
ret = tracing_alloc_snapshot_instance(tr);
if (ret)
goto fail_unlock;
if (tr->current_trace->use_max_tr) {
ret = -EBUSY;
goto fail_unlock;
@ -1395,6 +1435,10 @@ int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data,
goto fail_unlock;
}
ret = tracing_arm_snapshot_locked(tr);
if (ret)
goto fail_unlock;
local_irq_disable();
arch_spin_lock(&tr->max_lock);
tr->cond_snapshot = cond_snapshot;
@ -1439,6 +1483,8 @@ int tracing_snapshot_cond_disable(struct trace_array *tr)
arch_spin_unlock(&tr->max_lock);
local_irq_enable();
tracing_disarm_snapshot(tr);
return ret;
}
EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
@ -1481,6 +1527,7 @@ int tracing_snapshot_cond_disable(struct trace_array *tr)
}
EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
#define free_snapshot(tr) do { } while (0)
#define tracing_arm_snapshot_locked(tr) ({ -EBUSY; })
#endif /* CONFIG_TRACER_SNAPSHOT */
void tracer_tracing_off(struct trace_array *tr)
@ -6112,11 +6159,12 @@ int tracing_set_tracer(struct trace_array *tr, const char *buf)
*/
synchronize_rcu();
free_snapshot(tr);
tracing_disarm_snapshot(tr);
}
if (t->use_max_tr && !tr->allocated_snapshot) {
ret = tracing_alloc_snapshot_instance(tr);
if (ret < 0)
if (t->use_max_tr) {
ret = tracing_arm_snapshot_locked(tr);
if (ret)
goto out;
}
#else
@ -6125,8 +6173,13 @@ int tracing_set_tracer(struct trace_array *tr, const char *buf)
if (t->init) {
ret = tracer_init(t, tr);
if (ret)
if (ret) {
#ifdef CONFIG_TRACER_MAX_TRACE
if (t->use_max_tr)
tracing_disarm_snapshot(tr);
#endif
goto out;
}
}
tr->current_trace = t;
@ -7228,10 +7281,11 @@ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
if (tr->allocated_snapshot)
ret = resize_buffer_duplicate_size(&tr->max_buffer,
&tr->array_buffer, iter->cpu_file);
else
ret = tracing_alloc_snapshot_instance(tr);
if (ret < 0)
ret = tracing_arm_snapshot_locked(tr);
if (ret)
break;
/* Now, we're going to swap */
if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
local_irq_disable();
@ -7241,6 +7295,7 @@ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
smp_call_function_single(iter->cpu_file, tracing_swap_cpu_buffer,
(void *)tr, 1);
}
tracing_disarm_snapshot(tr);
break;
default:
if (tr->allocated_snapshot) {
@ -8372,8 +8427,13 @@ ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash,
ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
if (glob[0] == '!')
return unregister_ftrace_function_probe_func(glob+1, tr, ops);
if (glob[0] == '!') {
ret = unregister_ftrace_function_probe_func(glob+1, tr, ops);
if (!ret)
tracing_disarm_snapshot(tr);
return ret;
}
if (!param)
goto out_reg;
@ -8392,12 +8452,13 @@ ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash,
return ret;
out_reg:
ret = tracing_alloc_snapshot_instance(tr);
ret = tracing_arm_snapshot(tr);
if (ret < 0)
goto out;
ret = register_ftrace_function_probe(glob, tr, ops, count);
if (ret < 0)
tracing_disarm_snapshot(tr);
out:
return ret < 0 ? ret : 0;
}
@ -9204,7 +9265,9 @@ trace_array_create_systems(const char *name, const char *systems)
raw_spin_lock_init(&tr->start_lock);
tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
#ifdef CONFIG_TRACER_MAX_TRACE
spin_lock_init(&tr->snapshot_trigger_lock);
#endif
tr->current_trace = &nop_trace;
INIT_LIST_HEAD(&tr->systems);
@ -10174,7 +10237,9 @@ __init static int tracer_alloc_buffers(void)
global_trace.current_trace = &nop_trace;
global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
#ifdef CONFIG_TRACER_MAX_TRACE
spin_lock_init(&global_trace.snapshot_trigger_lock);
#endif
ftrace_init_global_array_ops(&global_trace);
init_trace_flags_index(&global_trace);

View File

@ -334,8 +334,8 @@ struct trace_array {
*/
struct array_buffer max_buffer;
bool allocated_snapshot;
#endif
#ifdef CONFIG_TRACER_MAX_TRACE
spinlock_t snapshot_trigger_lock;
unsigned int snapshot;
unsigned long max_latency;
#ifdef CONFIG_FSNOTIFY
struct dentry *d_max_latency;
@ -1983,12 +1983,16 @@ static inline void trace_event_eval_update(struct trace_eval_map **map, int len)
#ifdef CONFIG_TRACER_SNAPSHOT
void tracing_snapshot_instance(struct trace_array *tr);
int tracing_alloc_snapshot_instance(struct trace_array *tr);
int tracing_arm_snapshot(struct trace_array *tr);
void tracing_disarm_snapshot(struct trace_array *tr);
#else
static inline void tracing_snapshot_instance(struct trace_array *tr) { }
static inline int tracing_alloc_snapshot_instance(struct trace_array *tr)
{
return 0;
}
static inline int tracing_arm_snapshot(struct trace_array *tr) { return 0; }
static inline void tracing_disarm_snapshot(struct trace_array *tr) { }
#endif
#ifdef CONFIG_PREEMPT_TRACER

View File

@ -597,20 +597,12 @@ out:
return ret;
}
/**
* unregister_trigger - Generic event_command @unreg implementation
* @glob: The raw string used to register the trigger
* @test: Trigger-specific data used to find the trigger to remove
* @file: The trace_event_file associated with the event
*
* Common implementation for event trigger unregistration.
*
* Usually used directly as the @unreg method in event command
* implementations.
/*
* True if the trigger was found and unregistered, else false.
*/
static void unregister_trigger(char *glob,
struct event_trigger_data *test,
struct trace_event_file *file)
static bool try_unregister_trigger(char *glob,
struct event_trigger_data *test,
struct trace_event_file *file)
{
struct event_trigger_data *data = NULL, *iter;
@ -626,8 +618,32 @@ static void unregister_trigger(char *glob,
}
}
if (data && data->ops->free)
data->ops->free(data);
if (data) {
if (data->ops->free)
data->ops->free(data);
return true;
}
return false;
}
/**
* unregister_trigger - Generic event_command @unreg implementation
* @glob: The raw string used to register the trigger
* @test: Trigger-specific data used to find the trigger to remove
* @file: The trace_event_file associated with the event
*
* Common implementation for event trigger unregistration.
*
* Usually used directly as the @unreg method in event command
* implementations.
*/
static void unregister_trigger(char *glob,
struct event_trigger_data *test,
struct trace_event_file *file)
{
try_unregister_trigger(glob, test, file);
}
/*
@ -1470,7 +1486,7 @@ register_snapshot_trigger(char *glob,
struct event_trigger_data *data,
struct trace_event_file *file)
{
int ret = tracing_alloc_snapshot_instance(file->tr);
int ret = tracing_arm_snapshot(file->tr);
if (ret < 0)
return ret;
@ -1478,6 +1494,14 @@ register_snapshot_trigger(char *glob,
return register_trigger(glob, data, file);
}
static void unregister_snapshot_trigger(char *glob,
struct event_trigger_data *data,
struct trace_event_file *file)
{
if (try_unregister_trigger(glob, data, file))
tracing_disarm_snapshot(file->tr);
}
static int
snapshot_trigger_print(struct seq_file *m, struct event_trigger_data *data)
{
@ -1510,7 +1534,7 @@ static struct event_command trigger_snapshot_cmd = {
.trigger_type = ETT_SNAPSHOT,
.parse = event_trigger_parse,
.reg = register_snapshot_trigger,
.unreg = unregister_trigger,
.unreg = unregister_snapshot_trigger,
.get_trigger_ops = snapshot_get_trigger_ops,
.set_filter = set_trigger_filter,
};