linux-stable/tools/perf/util/ordered-events.h

66 lines
1.6 KiB
C
Raw Normal View History

#ifndef __ORDERED_EVENTS_H
#define __ORDERED_EVENTS_H
#include <linux/types.h>
struct perf_sample;
struct ordered_event {
u64 timestamp;
u64 file_offset;
union perf_event *event;
struct list_head list;
};
enum oe_flush {
OE_FLUSH__NONE,
OE_FLUSH__FINAL,
OE_FLUSH__ROUND,
OE_FLUSH__HALF,
};
struct ordered_events;
typedef int (*ordered_events__deliver_t)(struct ordered_events *oe,
struct ordered_event *event);
struct ordered_events {
u64 last_flush;
u64 next_flush;
u64 max_timestamp;
u64 max_alloc_size;
u64 cur_alloc_size;
struct list_head events;
struct list_head cache;
struct list_head to_free;
struct ordered_event *buffer;
struct ordered_event *last;
ordered_events__deliver_t deliver;
int buffer_idx;
unsigned int nr_events;
enum oe_flush last_flush_type;
u32 nr_unordered_events;
perf session: Add option to copy events when queueing When processing events the session code has an ordered samples queue which is used to time-sort events coming in across multiple mmaps. At a later point in time samples on the queue are flushed up to some timestamp at which point the event is actually processed. When analyzing events live (ie., record/analysis path in the same command) there is a race that leads to corrupted events and parse errors which cause perf to terminate. The problem is that when the event is placed in the ordered samples queue it is only a reference to the event which is really sitting in the mmap buffer. Even though the event is queued for later processing the mmap tail pointer is updated which indicates to the kernel that the event has been processed. The race is flushing the event from the queue before it gets overwritten by some other event. For commands trying to process events live (versus just writing to a file) and processing a high rate of events this leads to parse failures and perf terminates. Examples hitting this problem are 'perf kvm stat live', especially with nested VMs which generate 100,000+ traces per second, and a command processing scheduling events with a high rate of context switching -- e.g., running 'perf bench sched pipe'. This patch offers live commands an option to copy the event when it is placed in the ordered samples queue. Based on a patch from David Ahern <dsahern@gmail.com> Signed-off-by: Alexander Yarygin <yarygin@linux.vnet.ibm.com> Acked-by: Jiri Olsa <jolsa@kernel.org> Cc: Christian Borntraeger <borntraeger@de.ibm.com> Cc: David Ahern <dsahern@gmail.com> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Ingo Molnar <mingo@kernel.org> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Mike Galbraith <efault@gmx.de> Cc: Namhyung Kim <namhyung.kim@lge.com> Cc: Paul Mackerras <paulus@samba.org> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Stephane Eranian <eranian@google.com> Link: http://lkml.kernel.org/r/1412347212-28237-2-git-send-email-yarygin@linux.vnet.ibm.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2014-10-03 14:40:11 +00:00
bool copy_on_queue;
};
int ordered_events__queue(struct ordered_events *oe, union perf_event *event,
struct perf_sample *sample, u64 file_offset);
void ordered_events__delete(struct ordered_events *oe, struct ordered_event *event);
int ordered_events__flush(struct ordered_events *oe, enum oe_flush how);
void ordered_events__init(struct ordered_events *oe, ordered_events__deliver_t deliver);
void ordered_events__free(struct ordered_events *oe);
void ordered_events__reinit(struct ordered_events *oe);
static inline
void ordered_events__set_alloc_size(struct ordered_events *oe, u64 size)
{
oe->max_alloc_size = size;
}
perf session: Add option to copy events when queueing When processing events the session code has an ordered samples queue which is used to time-sort events coming in across multiple mmaps. At a later point in time samples on the queue are flushed up to some timestamp at which point the event is actually processed. When analyzing events live (ie., record/analysis path in the same command) there is a race that leads to corrupted events and parse errors which cause perf to terminate. The problem is that when the event is placed in the ordered samples queue it is only a reference to the event which is really sitting in the mmap buffer. Even though the event is queued for later processing the mmap tail pointer is updated which indicates to the kernel that the event has been processed. The race is flushing the event from the queue before it gets overwritten by some other event. For commands trying to process events live (versus just writing to a file) and processing a high rate of events this leads to parse failures and perf terminates. Examples hitting this problem are 'perf kvm stat live', especially with nested VMs which generate 100,000+ traces per second, and a command processing scheduling events with a high rate of context switching -- e.g., running 'perf bench sched pipe'. This patch offers live commands an option to copy the event when it is placed in the ordered samples queue. Based on a patch from David Ahern <dsahern@gmail.com> Signed-off-by: Alexander Yarygin <yarygin@linux.vnet.ibm.com> Acked-by: Jiri Olsa <jolsa@kernel.org> Cc: Christian Borntraeger <borntraeger@de.ibm.com> Cc: David Ahern <dsahern@gmail.com> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Ingo Molnar <mingo@kernel.org> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Mike Galbraith <efault@gmx.de> Cc: Namhyung Kim <namhyung.kim@lge.com> Cc: Paul Mackerras <paulus@samba.org> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Stephane Eranian <eranian@google.com> Link: http://lkml.kernel.org/r/1412347212-28237-2-git-send-email-yarygin@linux.vnet.ibm.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2014-10-03 14:40:11 +00:00
static inline
void ordered_events__set_copy_on_queue(struct ordered_events *oe, bool copy)
{
oe->copy_on_queue = copy;
}
#endif /* __ORDERED_EVENTS_H */