Em Tue, Jan 22, 2019 at 08:48:54PM +0300, Alexey Budankov escreveu:
> 
> Allocate and bind AIO user space buffers to the memory nodes
> that mmap kernel buffers are bound to.

[root@quaco amazonlinux]# perf test -v python
18: 'import perf' in python                               :
--- start ---
test child forked, pid 526
Traceback (most recent call last):
  File "<stdin>", line 1, in <module>
ImportError: /tmp/build/perf/python/perf.so: undefined symbol: mbind
test child finished with -1
---- end ----
'import perf' in python: FAILED!
[root@quaco amazonlinux]#


Please always use 'perf test' before pushing upstream, I'll try to fix
this one, either by linking libnuma into the python binding or by moving
the routines using it to a separate file.

Thanks,

- Arnaldo
 
> Signed-off-by: Alexey Budankov <alexey.budan...@linux.intel.com>
> ---
> Changes in v4:
> - fixed compilation issue converting pr_warn() to pr_warning()
> - implemented stop if mbind() fails
> 
> Changes in v3:
> - corrected code style issues
> - adjusted __aio_alloc,__aio_bind,__aio_free() implementation
> 
> Changes in v2:
> - implemented perf_mmap__aio_alloc, perf_mmap__aio_free, perf_mmap__aio_bind 
>   and put HAVE_LIBNUMA_SUPPORT #ifdefs in there
> ---
>  tools/perf/util/mmap.c | 77 +++++++++++++++++++++++++++++++++++++++---
>  1 file changed, 73 insertions(+), 4 deletions(-)
> 
> diff --git a/tools/perf/util/mmap.c b/tools/perf/util/mmap.c
> index e68ba754a8e2..34be9f900575 100644
> --- a/tools/perf/util/mmap.c
> +++ b/tools/perf/util/mmap.c
> @@ -10,6 +10,9 @@
>  #include <sys/mman.h>
>  #include <inttypes.h>
>  #include <asm/bug.h>
> +#ifdef HAVE_LIBNUMA_SUPPORT
> +#include <numaif.h>
> +#endif
>  #include "debug.h"
>  #include "event.h"
>  #include "mmap.h"
> @@ -154,9 +157,72 @@ void __weak auxtrace_mmap_params__set_idx(struct 
> auxtrace_mmap_params *mp __mayb
>  }
>  
>  #ifdef HAVE_AIO_SUPPORT
> +
> +#ifdef HAVE_LIBNUMA_SUPPORT
> +static int perf_mmap__aio_alloc(struct perf_mmap *map, int index)
> +{
> +     map->aio.data[index] = mmap(NULL, perf_mmap__mmap_len(map), 
> PROT_READ|PROT_WRITE,
> +                                 MAP_PRIVATE|MAP_ANONYMOUS, 0, 0);
> +     if (map->aio.data[index] == MAP_FAILED) {
> +             map->aio.data[index] = NULL;
> +             return -1;
> +     }
> +
> +     return 0;
> +}
> +
> +static void perf_mmap__aio_free(struct perf_mmap *map, int index)
> +{
> +     if (map->aio.data[index]) {
> +             munmap(map->aio.data[index], perf_mmap__mmap_len(map));
> +             map->aio.data[index] = NULL;
> +     }
> +}
> +
> +static int perf_mmap__aio_bind(struct perf_mmap *map, int index, int cpu, 
> int affinity)
> +{
> +     void *data;
> +     size_t mmap_len;
> +     unsigned long node_mask;
> +
> +     if (affinity != PERF_AFFINITY_SYS && cpu__max_node() > 1) {
> +             data = map->aio.data[index];
> +             mmap_len = perf_mmap__mmap_len(map);
> +             node_mask = 1UL << cpu__get_node(cpu);
> +             if (mbind(data, mmap_len, MPOL_BIND, &node_mask, 1, 0)) {
> +                     pr_err("Failed to bind [%p-%p] AIO buffer to node %d: 
> error %m\n",
> +                             data, data + mmap_len, cpu__get_node(cpu));
> +                     return -1;
> +             }
> +     }
> +
> +     return 0;
> +}
> +#else
> +static int perf_mmap__aio_alloc(struct perf_mmap *map, int index)
> +{
> +     map->aio.data[index] = malloc(perf_mmap__mmap_len(map));
> +     if (map->aio.data[index] == NULL)
> +             return -1;
> +
> +     return 0;
> +}
> +
> +static void perf_mmap__aio_free(struct perf_mmap *map, int index)
> +{
> +     zfree(&(map->aio.data[index]));
> +}
> +
> +static int perf_mmap__aio_bind(struct perf_mmap *map __maybe_unused, int 
> index __maybe_unused,
> +             int cpu __maybe_unused, int affinity __maybe_unused)
> +{
> +     return 0;
> +}
> +#endif
> +
>  static int perf_mmap__aio_mmap(struct perf_mmap *map, struct mmap_params *mp)
>  {
> -     int delta_max, i, prio;
> +     int delta_max, i, prio, ret;
>  
>       map->aio.nr_cblocks = mp->nr_cblocks;
>       if (map->aio.nr_cblocks) {
> @@ -177,11 +243,14 @@ static int perf_mmap__aio_mmap(struct perf_mmap *map, 
> struct mmap_params *mp)
>               }
>               delta_max = sysconf(_SC_AIO_PRIO_DELTA_MAX);
>               for (i = 0; i < map->aio.nr_cblocks; ++i) {
> -                     map->aio.data[i] = malloc(perf_mmap__mmap_len(map));
> -                     if (!map->aio.data[i]) {
> +                     ret = perf_mmap__aio_alloc(map, i);
> +                     if (ret == -1) {
>                               pr_debug2("failed to allocate data buffer area, 
> error %m");
>                               return -1;
>                       }
> +                     ret = perf_mmap__aio_bind(map, i, map->cpu, 
> mp->affinity);
> +                     if (ret == -1)
> +                             return -1;
>                       /*
>                        * Use cblock.aio_fildes value different from -1
>                        * to denote started aio write operation on the
> @@ -210,7 +279,7 @@ static void perf_mmap__aio_munmap(struct perf_mmap *map)
>       int i;
>  
>       for (i = 0; i < map->aio.nr_cblocks; ++i)
> -             zfree(&map->aio.data[i]);
> +             perf_mmap__aio_free(map, i);
>       if (map->aio.data)
>               zfree(&map->aio.data);
>       zfree(&map->aio.cblocks);

-- 

- Arnaldo

Reply via email to