libperf: Adopt perf_mmap__read_event() from tools/perf

Move perf_mmap__read_event() from tools/perf to libperf and export it in
the perf/mmap.h header.

Signed-off-by: Jiri Olsa <jolsa@kernel.org>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Michael Petlan <mpetlan@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: http://lore.kernel.org/lkml/20191007125344.14268-13-jolsa@kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
This commit is contained in:
Jiri Olsa 2019-10-07 14:53:20 +02:00 committed by Arnaldo Carvalho de Melo
parent 32fdc2ca7e
commit 151ed5d70d
21 changed files with 98 additions and 95 deletions

View file

@ -121,7 +121,7 @@ int test__perf_time_to_tsc(struct test *test __maybe_unused, int subtest __maybe
if (perf_mmap__read_init(&md->core) < 0)
continue;
while ((event = perf_mmap__read_event(md)) != NULL) {
while ((event = perf_mmap__read_event(&md->core)) != NULL) {
struct perf_sample sample;
if (event->header.type != PERF_RECORD_COMM ||

View file

@ -764,7 +764,7 @@ static s64 perf_kvm__mmap_read_idx(struct perf_kvm_stat *kvm, int idx,
if (err < 0)
return (err == -EAGAIN) ? 0 : -1;
while ((event = perf_mmap__read_event(md)) != NULL) {
while ((event = perf_mmap__read_event(&md->core)) != NULL) {
err = perf_evlist__parse_sample_timestamp(evlist, event, &timestamp);
if (err) {
perf_mmap__consume(&md->core);

View file

@ -873,7 +873,7 @@ static void perf_top__mmap_read_idx(struct perf_top *top, int idx)
if (perf_mmap__read_init(&md->core) < 0)
return;
while ((event = perf_mmap__read_event(md)) != NULL) {
while ((event = perf_mmap__read_event(&md->core)) != NULL) {
int ret;
ret = perf_evlist__parse_sample_timestamp(evlist, event, &last_timestamp);

View file

@ -3804,7 +3804,7 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
if (perf_mmap__read_init(&md->core) < 0)
continue;
while ((event = perf_mmap__read_event(md)) != NULL) {
while ((event = perf_mmap__read_event(&md->core)) != NULL) {
++trace->nr_events;
err = trace__deliver_event(trace, event);

View file

@ -5,9 +5,11 @@
#include <perf/core.h>
struct perf_mmap;
union perf_event;
LIBPERF_API void perf_mmap__consume(struct perf_mmap *map);
LIBPERF_API int perf_mmap__read_init(struct perf_mmap *map);
LIBPERF_API void perf_mmap__read_done(struct perf_mmap *map);
LIBPERF_API union perf_event *perf_mmap__read_event(struct perf_mmap *map);
#endif /* __LIBPERF_MMAP_H */

View file

@ -43,6 +43,7 @@ LIBPERF_0.0.1 {
perf_mmap__consume;
perf_mmap__read_init;
perf_mmap__read_done;
perf_mmap__read_event;
local:
*;
};

View file

@ -3,9 +3,11 @@
#include <inttypes.h>
#include <asm/bug.h>
#include <errno.h>
#include <string.h>
#include <linux/ring_buffer.h>
#include <linux/perf_event.h>
#include <perf/mmap.h>
#include <perf/event.h>
#include <internal/mmap.h>
#include <internal/lib.h>
#include <linux/kernel.h>
@ -192,3 +194,80 @@ void perf_mmap__read_done(struct perf_mmap *map)
map->prev = perf_mmap__read_head(map);
}
/* When check_messup is true, 'end' must points to a good entry */
static union perf_event *perf_mmap__read(struct perf_mmap *map,
u64 *startp, u64 end)
{
unsigned char *data = map->base + page_size;
union perf_event *event = NULL;
int diff = end - *startp;
if (diff >= (int)sizeof(event->header)) {
size_t size;
event = (union perf_event *)&data[*startp & map->mask];
size = event->header.size;
if (size < sizeof(event->header) || diff < (int)size)
return NULL;
/*
* Event straddles the mmap boundary -- header should always
* be inside due to u64 alignment of output.
*/
if ((*startp & map->mask) + size != ((*startp + size) & map->mask)) {
unsigned int offset = *startp;
unsigned int len = min(sizeof(*event), size), cpy;
void *dst = map->event_copy;
do {
cpy = min(map->mask + 1 - (offset & map->mask), len);
memcpy(dst, &data[offset & map->mask], cpy);
offset += cpy;
dst += cpy;
len -= cpy;
} while (len);
event = (union perf_event *)map->event_copy;
}
*startp += size;
}
return event;
}
/*
* Read event from ring buffer one by one.
* Return one event for each call.
*
* Usage:
* perf_mmap__read_init()
* while(event = perf_mmap__read_event()) {
* //process the event
* perf_mmap__consume()
* }
* perf_mmap__read_done()
*/
union perf_event *perf_mmap__read_event(struct perf_mmap *map)
{
union perf_event *event;
/*
* Check if event was unmapped due to a POLLHUP/POLLERR.
*/
if (!refcount_read(&map->refcnt))
return NULL;
/* non-overwirte doesn't pause the ringbuffer */
if (!map->overwrite)
map->end = perf_mmap__read_head(map);
event = perf_mmap__read(map, &map->start, map->end);
if (!map->overwrite)
map->prev = map->start;
return event;
}

View file

@ -39,7 +39,7 @@ static int count_samples(struct evlist *evlist, int *sample_count,
union perf_event *event;
perf_mmap__read_init(&map->core);
while ((event = perf_mmap__read_event(map)) != NULL) {
while ((event = perf_mmap__read_event(&map->core)) != NULL) {
const u32 type = event->header.type;
switch (type) {

View file

@ -188,7 +188,7 @@ static int do_test(struct bpf_object *obj, int (*func)(void),
if (perf_mmap__read_init(&md->core) < 0)
continue;
while ((event = perf_mmap__read_event(md)) != NULL) {
while ((event = perf_mmap__read_event(&md->core)) != NULL) {
const u32 type = event->header.type;
if (type == PERF_RECORD_SAMPLE)

View file

@ -429,7 +429,7 @@ static int process_events(struct machine *machine, struct evlist *evlist,
if (perf_mmap__read_init(&md->core) < 0)
continue;
while ((event = perf_mmap__read_event(md)) != NULL) {
while ((event = perf_mmap__read_event(&md->core)) != NULL) {
ret = process_event(machine, evlist, event, state);
perf_mmap__consume(&md->core);
if (ret < 0)

View file

@ -41,7 +41,7 @@ static int find_comm(struct evlist *evlist, const char *comm)
md = &evlist->mmap[i];
if (perf_mmap__read_init(&md->core) < 0)
continue;
while ((event = perf_mmap__read_event(md)) != NULL) {
while ((event = perf_mmap__read_event(&md->core)) != NULL) {
if (event->header.type == PERF_RECORD_COMM &&
(pid_t)event->comm.pid == getpid() &&
(pid_t)event->comm.tid == getpid() &&

View file

@ -117,7 +117,7 @@ int test__basic_mmap(struct test *test __maybe_unused, int subtest __maybe_unuse
if (perf_mmap__read_init(&md->core) < 0)
goto out_init;
while ((event = perf_mmap__read_event(md)) != NULL) {
while ((event = perf_mmap__read_event(&md->core)) != NULL) {
struct perf_sample sample;
if (event->header.type != PERF_RECORD_SAMPLE) {

View file

@ -96,7 +96,7 @@ int test__syscall_openat_tp_fields(struct test *test __maybe_unused, int subtest
if (perf_mmap__read_init(&md->core) < 0)
continue;
while ((event = perf_mmap__read_event(md)) != NULL) {
while ((event = perf_mmap__read_event(&md->core)) != NULL) {
const u32 type = event->header.type;
int tp_flags;
struct perf_sample sample;

View file

@ -174,7 +174,7 @@ int test__PERF_RECORD(struct test *test __maybe_unused, int subtest __maybe_unus
if (perf_mmap__read_init(&md->core) < 0)
continue;
while ((event = perf_mmap__read_event(md)) != NULL) {
while ((event = perf_mmap__read_event(&md->core)) != NULL) {
const u32 type = event->header.type;
const char *name = perf_event__name(type);

View file

@ -103,7 +103,7 @@ static int __test__sw_clock_freq(enum perf_sw_ids clock_id)
if (perf_mmap__read_init(&md->core) < 0)
goto out_init;
while ((event = perf_mmap__read_event(md)) != NULL) {
while ((event = perf_mmap__read_event(&md->core)) != NULL) {
struct perf_sample sample;
if (event->header.type != PERF_RECORD_SAMPLE)

View file

@ -273,7 +273,7 @@ static int process_events(struct evlist *evlist,
if (perf_mmap__read_init(&md->core) < 0)
continue;
while ((event = perf_mmap__read_event(md)) != NULL) {
while ((event = perf_mmap__read_event(&md->core)) != NULL) {
cnt += 1;
ret = add_event(evlist, &events, event);
perf_mmap__consume(&md->core);

View file

@ -121,7 +121,7 @@ int test__task_exit(struct test *test __maybe_unused, int subtest __maybe_unused
if (perf_mmap__read_init(&md->core) < 0)
goto out_init;
while ((event = perf_mmap__read_event(md)) != NULL) {
while ((event = perf_mmap__read_event(&md->core)) != NULL) {
if (event->header.type == PERF_RECORD_EXIT)
nr_exit++;

View file

@ -1811,7 +1811,7 @@ static void *perf_evlist__poll_thread(void *arg)
if (perf_mmap__read_init(&map->core))
continue;
while ((event = perf_mmap__read_event(map)) != NULL) {
while ((event = perf_mmap__read_event(&map->core)) != NULL) {
struct evsel *evsel = perf_evlist__event2evsel(evlist, event);
if (evsel && evsel->side_band.cb)

View file

@ -29,83 +29,6 @@ size_t mmap__mmap_len(struct mmap *map)
return perf_mmap__mmap_len(&map->core);
}
/* When check_messup is true, 'end' must points to a good entry */
static union perf_event *perf_mmap__read(struct mmap *map,
u64 *startp, u64 end)
{
unsigned char *data = map->core.base + page_size;
union perf_event *event = NULL;
int diff = end - *startp;
if (diff >= (int)sizeof(event->header)) {
size_t size;
event = (union perf_event *)&data[*startp & map->core.mask];
size = event->header.size;
if (size < sizeof(event->header) || diff < (int)size)
return NULL;
/*
* Event straddles the mmap boundary -- header should always
* be inside due to u64 alignment of output.
*/
if ((*startp & map->core.mask) + size != ((*startp + size) & map->core.mask)) {
unsigned int offset = *startp;
unsigned int len = min(sizeof(*event), size), cpy;
void *dst = map->core.event_copy;
do {
cpy = min(map->core.mask + 1 - (offset & map->core.mask), len);
memcpy(dst, &data[offset & map->core.mask], cpy);
offset += cpy;
dst += cpy;
len -= cpy;
} while (len);
event = (union perf_event *)map->core.event_copy;
}
*startp += size;
}
return event;
}
/*
* Read event from ring buffer one by one.
* Return one event for each call.
*
* Usage:
* perf_mmap__read_init()
* while(event = perf_mmap__read_event()) {
* //process the event
* perf_mmap__consume()
* }
* perf_mmap__read_done()
*/
union perf_event *perf_mmap__read_event(struct mmap *map)
{
union perf_event *event;
/*
* Check if event was unmapped due to a POLLHUP/POLLERR.
*/
if (!refcount_read(&map->core.refcnt))
return NULL;
/* non-overwirte doesn't pause the ringbuffer */
if (!map->core.overwrite)
map->core.end = perf_mmap__read_head(&map->core);
event = perf_mmap__read(map, &map->core.start, map->core.end);
if (!map->core.overwrite)
map->core.prev = map->core.start;
return event;
}
int __weak auxtrace_mmap__mmap(struct auxtrace_mmap *mm __maybe_unused,
struct auxtrace_mmap_params *mp __maybe_unused,
void *userpg __maybe_unused,

View file

@ -47,8 +47,6 @@ void mmap__munmap(struct mmap *map);
union perf_event *perf_mmap__read_forward(struct mmap *map);
union perf_event *perf_mmap__read_event(struct mmap *map);
int perf_mmap__push(struct mmap *md, void *to,
int push(struct mmap *map, void *to, void *buf, size_t size));

View file

@ -1026,7 +1026,7 @@ static PyObject *pyrf_evlist__read_on_cpu(struct pyrf_evlist *pevlist,
if (perf_mmap__read_init(&md->core) < 0)
goto end;
event = perf_mmap__read_event(md);
event = perf_mmap__read_event(&md->core);
if (event != NULL) {
PyObject *pyevent = pyrf_event__new(event);
struct pyrf_event *pevent = (struct pyrf_event *)pyevent;