Adding libperf's version of perf_evlist__mmap/munmap
functions and exporting them in perf/evlist.h header.

It's the backbone of what we have in perf code. The
following changes will add needed callbacks and then
we finally switch perf code to use libperf's version.

Adding mmap/mmap_ovw 'struct perf_mmap' object arrays
to hold maps for libperf's evlist.

Link: http://lkml.kernel.org/n/tip-smr1w2e6j37ncbmsd1eet...@git.kernel.org
Signed-off-by: Jiri Olsa <jo...@kernel.org>
---
 tools/perf/lib/evlist.c                  | 215 +++++++++++++++++++++++
 tools/perf/lib/include/internal/evlist.h |   2 +
 tools/perf/lib/include/perf/evlist.h     |   3 +
 tools/perf/lib/libperf.map               |   2 +
 4 files changed, 222 insertions(+)

diff --git a/tools/perf/lib/evlist.c b/tools/perf/lib/evlist.c
index 160393cb9bed..168bc5d0a1c8 100644
--- a/tools/perf/lib/evlist.c
+++ b/tools/perf/lib/evlist.c
@@ -7,13 +7,20 @@
 #include <internal/evlist.h>
 #include <internal/evsel.h>
 #include <internal/xyarray.h>
+#include <internal/mmap.h>
+#include <internal/cpumap.h>
+#include <internal/threadmap.h>
+#include <internal/xyarray.h>
+#include <internal/lib.h>
 #include <linux/zalloc.h>
+#include <sys/ioctl.h>
 #include <stdlib.h>
 #include <errno.h>
 #include <unistd.h>
 #include <fcntl.h>
 #include <signal.h>
 #include <poll.h>
+#include <sys/mman.h>
 #include <perf/cpumap.h>
 #include <perf/threadmap.h>
 
@@ -101,6 +108,8 @@ perf_evlist__next(struct perf_evlist *evlist, struct 
perf_evsel *prev)
 
 void perf_evlist__delete(struct perf_evlist *evlist)
 {
+       free(evlist->mmap);
+       free(evlist->mmap_ovw);
        free(evlist);
 }
 
@@ -279,3 +288,209 @@ int perf_evlist__poll(struct perf_evlist *evlist, int 
timeout)
 {
        return fdarray__poll(&evlist->pollfd, timeout);
 }
+
+static int perf_evlist__alloc_maps(struct perf_evlist *evlist)
+{
+       struct perf_mmap **map;
+
+       evlist->nr_mmaps = perf_cpu_map__nr(evlist->cpus);
+       if (perf_cpu_map__empty(evlist->cpus))
+               evlist->nr_mmaps = perf_thread_map__nr(evlist->threads);
+
+       map = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap *));
+       if (map)
+               evlist->mmap = map;
+
+       map = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap *));
+       if (map)
+               evlist->mmap_ovw = map;
+
+       return 0;
+}
+
+static void perf_evlist__set_sid_idx(struct perf_evlist *evlist,
+                                    struct perf_evsel *evsel, int idx, int cpu,
+                                    int thread)
+{
+       struct perf_sample_id *sid = SID(evsel, cpu, thread);
+
+       sid->idx = idx;
+       if (evlist->cpus && cpu >= 0)
+               sid->cpu = evlist->cpus->map[cpu];
+       else
+               sid->cpu = -1;
+       if (!evsel->system_wide && evlist->threads && thread >= 0)
+               sid->tid = perf_thread_map__pid(evlist->threads, thread);
+       else
+               sid->tid = -1;
+}
+
+#define FD(e, x, y) (*(int *) xyarray__entry(e->fd, x, y))
+
+static int
+mmap_per_evsel(struct perf_evlist *evlist, int idx,
+              struct perf_mmap_param *mp, int cpu_idx,
+              int thread, int *_output, int *_output_overwrite)
+{
+       int evlist_cpu = perf_cpu_map__cpu(evlist->cpus, cpu_idx);
+       struct perf_evsel *evsel;
+       int revent;
+
+       perf_evlist__for_each_entry(evlist, evsel) {
+               bool overwrite = evsel->attr.write_backward;
+               struct perf_mmap *map;
+               int *output, fd, cpu;
+
+               if (evsel->system_wide && thread)
+                       continue;
+
+               cpu = perf_cpu_map__idx(evsel->cpus, evlist_cpu);
+               if (cpu == -1)
+                       continue;
+
+               map = perf_mmap__new(overwrite, NULL);
+               if (map == NULL)
+                       return -ENOMEM;
+
+               if (overwrite) {
+                       mp->prot = PROT_READ;
+                       output   = _output_overwrite;
+                       evlist->mmap_ovw[idx] = map;
+               } else {
+                       mp->prot = PROT_READ | PROT_WRITE;
+                       output   = _output;
+                       evlist->mmap[idx] = map;
+               }
+
+               fd = FD(evsel, cpu, thread);
+
+               if (*output == -1) {
+                       *output = fd;
+
+                       if (perf_mmap__mmap(map, mp, *output, evlist_cpu) < 0)
+                               return -1;
+               } else {
+                       if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, *output) != 0)
+                               return -1;
+
+                       perf_mmap__get(map);
+               }
+
+               revent = !overwrite ? POLLIN : 0;
+
+               if (!evsel->system_wide &&
+                   perf_evlist__add_pollfd(evlist, fd, map, revent) < 0) {
+                       perf_mmap__put(map);
+                       return -1;
+               }
+
+               if (evsel->attr.read_format & PERF_FORMAT_ID) {
+                       if (perf_evlist__id_add_fd(evlist, evsel, cpu, thread,
+                                                  fd) < 0)
+                               return -1;
+                       perf_evlist__set_sid_idx(evlist, evsel, idx, cpu,
+                                                thread);
+               }
+       }
+
+       return 0;
+}
+
+static int
+mmap_per_thread(struct perf_evlist *evlist, struct perf_mmap_param *mp)
+{
+       int thread;
+       int nr_threads = perf_thread_map__nr(evlist->threads);
+
+       for (thread = 0; thread < nr_threads; thread++) {
+               int output = -1;
+               int output_overwrite = -1;
+
+               if (mmap_per_evsel(evlist, thread, mp, 0, thread,
+                                  &output, &output_overwrite))
+                       goto out_unmap;
+       }
+
+       return 0;
+
+out_unmap:
+       perf_evlist__munmap(evlist);
+       return -1;
+}
+
+static int
+mmap_per_cpu(struct perf_evlist *evlist, struct perf_mmap_param *mp)
+{
+       int nr_threads = perf_thread_map__nr(evlist->threads);
+       int nr_cpus    = perf_cpu_map__nr(evlist->cpus);
+       int cpu, thread;
+
+       for (cpu = 0; cpu < nr_cpus; cpu++) {
+               int output = -1;
+               int output_overwrite = -1;
+
+               for (thread = 0; thread < nr_threads; thread++) {
+                       if (mmap_per_evsel(evlist, cpu, mp, cpu,
+                                          thread, &output, &output_overwrite))
+                               goto out_unmap;
+               }
+       }
+
+       return 0;
+
+out_unmap:
+       perf_evlist__munmap(evlist);
+       return -1;
+}
+
+int perf_evlist__mmap(struct perf_evlist *evlist, int pages)
+{
+       struct perf_evsel *evsel;
+       const struct perf_cpu_map *cpus = evlist->cpus;
+       const struct perf_thread_map *threads = evlist->threads;
+       struct perf_mmap_param mp;
+
+       if (!evlist->mmap && perf_evlist__alloc_maps(evlist))
+               return -ENOMEM;
+
+       perf_evlist__for_each_entry(evlist, evsel) {
+               if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
+                   evsel->sample_id == NULL &&
+                   perf_evsel__alloc_id(evsel, perf_cpu_map__nr(cpus), 
threads->nr) < 0)
+                       return -ENOMEM;
+       }
+
+       evlist->mmap_len = (pages + 1) * page_size;
+       mp.mask = evlist->mmap_len - page_size - 1;
+
+       if (perf_cpu_map__empty(cpus))
+               return mmap_per_thread(evlist, &mp);
+
+       return mmap_per_cpu(evlist, &mp);
+}
+
+void perf_evlist__munmap(struct perf_evlist *evlist)
+{
+       int i;
+
+       if (evlist->mmap) {
+               for (i = 0; i < evlist->nr_mmaps; i++) {
+                       struct perf_mmap *map = evlist->mmap[i];
+
+                       perf_mmap__munmap(map);
+                       free(map);
+               }
+       }
+
+       if (evlist->mmap_ovw) {
+               for (i = 0; i < evlist->nr_mmaps; i++) {
+                       struct perf_mmap *map = evlist->mmap_ovw[i];
+
+                       perf_mmap__munmap(map);
+                       free(map);
+               }
+       }
+
+       zfree(&evlist->mmap);
+       zfree(&evlist->mmap_ovw);
+}
diff --git a/tools/perf/lib/include/internal/evlist.h 
b/tools/perf/lib/include/internal/evlist.h
index 9f440ab12b76..b136d1b4ea72 100644
--- a/tools/perf/lib/include/internal/evlist.h
+++ b/tools/perf/lib/include/internal/evlist.h
@@ -22,6 +22,8 @@ struct perf_evlist {
        size_t                   mmap_len;
        struct fdarray           pollfd;
        struct hlist_head        heads[PERF_EVLIST__HLIST_SIZE];
+       struct perf_mmap        **mmap;
+       struct perf_mmap        **mmap_ovw;
 };
 
 int perf_evlist__alloc_pollfd(struct perf_evlist *evlist);
diff --git a/tools/perf/lib/include/perf/evlist.h 
b/tools/perf/lib/include/perf/evlist.h
index 8a2ce0757ab2..28b6a12a8a2b 100644
--- a/tools/perf/lib/include/perf/evlist.h
+++ b/tools/perf/lib/include/perf/evlist.h
@@ -33,4 +33,7 @@ LIBPERF_API void perf_evlist__set_maps(struct perf_evlist 
*evlist,
                                       struct perf_thread_map *threads);
 LIBPERF_API int perf_evlist__poll(struct perf_evlist *evlist, int timeout);
 
+LIBPERF_API int perf_evlist__mmap(struct perf_evlist *evlist, int pages);
+LIBPERF_API void perf_evlist__munmap(struct perf_evlist *evlist);
+
 #endif /* __LIBPERF_EVLIST_H */
diff --git a/tools/perf/lib/libperf.map b/tools/perf/lib/libperf.map
index 6e7d9be3c35f..198dcf305356 100644
--- a/tools/perf/lib/libperf.map
+++ b/tools/perf/lib/libperf.map
@@ -39,6 +39,8 @@ LIBPERF_0.0.1 {
                perf_evlist__next;
                perf_evlist__set_maps;
                perf_evlist__poll;
+               perf_evlist__mmap;
+               perf_evlist__munmap;
                perf_mmap__consume;
                perf_mmap__read_init;
                perf_mmap__read_done;
-- 
2.21.0

Reply via email to