Re: [PATCH v2 3/4] perf record: apply affinity masks when reading mmap buffers

2019-01-09 Thread Alexey Budankov
Hi,

On 02.01.2019 0:39, Jiri Olsa wrote:
> On Mon, Dec 24, 2018 at 03:27:17PM +0300, Alexey Budankov wrote:
>>
>> Build node cpu masks for mmap data buffers. Apply node cpu
>> masks to tool thread every time it references data buffers
>> cross node or cross cpu.
>>
>> Signed-off-by: Alexey Budankov 
>> ---
>> Changes in v2:
>> - separated AIO buffers binding to patch 2/4
>> ---
>>  tools/perf/builtin-record.c |  9 +
>>  tools/perf/util/evlist.c|  6 +-
>>  tools/perf/util/mmap.c  | 12 
>>  tools/perf/util/mmap.h  |  1 +
>>  4 files changed, 27 insertions(+), 1 deletion(-)
>>
>> diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
>> index b26febb54d01..eea96794ee45 100644
>> --- a/tools/perf/builtin-record.c
>> +++ b/tools/perf/builtin-record.c
>> @@ -536,6 +536,9 @@ static int record__mmap_evlist(struct record *rec,
>>  struct record_opts *opts = >opts;
>>  char msg[512];
>>  
>> +if (opts->affinity != PERF_AFFINITY_SYS)
>> +cpu__setup_cpunode_map();
>> +
>>  if (perf_evlist__mmap_ex(evlist, opts->mmap_pages,
>>   opts->auxtrace_mmap_pages,
>>   opts->auxtrace_snapshot_mode,
>> @@ -755,6 +758,12 @@ static int record__mmap_read_evlist(struct record *rec, 
>> struct perf_evlist *evli
>>  struct perf_mmap *map = [i];
>>  
>>  if (map->base) {
>> +if (rec->opts.affinity != PERF_AFFINITY_SYS &&
>> +!CPU_EQUAL(>affinity_mask, 
>> >affinity_mask)) {
>> +CPU_ZERO(>affinity_mask);
>> +CPU_OR(>affinity_mask, 
>> >affinity_mask, >affinity_mask);
>> +sched_setaffinity(0, 
>> sizeof(rec->affinity_mask), >affinity_mask);
> 
> all this code depends on aio and LIBNUMA, let's keep it there then

Please note that thread migration improves performance for serial case too:

   BENCH REPORT BASED   ELAPSED TIME BASED
  v4.20.0-rc5 
  (tip perf/core):

(current) SERIAL-SYS  / BASE : 1.27x (14.37/11.31), 1.29x (15.19/11.69)
  SERIAL-NODE / BASE : 1.15x (13.04/11.31), 1.17x (13.79/11.69)
  SERIAL-CPU  / BASE : 1.00x (11.32/11.31), 1.01x (11.89/11.69)

mbind() for AIO buffers is the only related adjustment.

> 
> also please add this and the affinity_mask setup code below to a function

Separated the code into record__adjust_affinity() and 
perf_mmap__setup_affinity_mask() in v3.

Thanks,
Alexey

> 
> thanks,
> jirka
> 
>> +}
>>  if (!record__aio_enabled(rec)) {
>>  if (perf_mmap__push(map, rec, record__pushfn) 
>> != 0) {
>>  rc = -1;
>> diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
>> index 60e825be944a..5ca5bb5ea0db 100644
>> --- a/tools/perf/util/evlist.c
>> +++ b/tools/perf/util/evlist.c
>> @@ -1028,7 +1028,11 @@ int perf_evlist__mmap_ex(struct perf_evlist *evlist, 
>> unsigned int pages,
>>   * Its value is decided by evsel's write_backward.
>>   * So  should not be passed through const pointer.
>>   */
>> -struct mmap_params mp = { .nr_cblocks = nr_cblocks, .affinity = 
>> affinity };
>> +struct mmap_params mp = {
>> +.nr_cblocks = nr_cblocks,
>> +.affinity   = affinity,
>> +.cpu_map= cpus
>> +};
>>  
>>  if (!evlist->mmap)
>>  evlist->mmap = perf_evlist__alloc_mmap(evlist, false);
>> diff --git a/tools/perf/util/mmap.c b/tools/perf/util/mmap.c
>> index 742fa9a8e498..a2095e4eda4b 100644
>> --- a/tools/perf/util/mmap.c
>> +++ b/tools/perf/util/mmap.c
>> @@ -361,6 +361,7 @@ void perf_mmap__munmap(struct perf_mmap *map)
>>  
>>  int perf_mmap__mmap(struct perf_mmap *map, struct mmap_params *mp, int fd, 
>> int cpu)
>>  {
>> +int c, nr_cpus, node;
>>  /*
>>   * The last one will be done at perf_mmap__consume(), so that we
>>   * make sure we don't prevent tools from consuming every last event in
>> @@ -389,6 +390,17 @@ int perf_mmap__mmap(struct perf_mmap *map, struct 
>> mmap_params *mp, int fd, int c
>>  map->cpu = cpu;
>>  
>>  CPU_ZERO(>affinity_mask);
>> +if (mp->affinity == PERF_AFFINITY_NODE && cpu__max_node() > 1) {
>> +nr_cpus = cpu_map__nr(mp->cpu_map);
>> +node = cpu__get_node(map->cpu);
>> +for (c = 0; c < nr_cpus; c++) {
>> +if (cpu__get_node(c) == node) {
>> +CPU_SET(c, >affinity_mask);
>> +}
>> +}
>> +} else if (mp->affinity == PERF_AFFINITY_CPU) {
>> +CPU_SET(map->cpu, >affinity_mask);
>> +}
>>  
>>  if (auxtrace_mmap__mmap(>auxtrace_mmap,
>>  >auxtrace_mp, map->base, fd))
>> diff --git a/tools/perf/util/mmap.h 

Re: [PATCH v2 3/4] perf record: apply affinity masks when reading mmap buffers

2019-01-09 Thread Alexey Budankov
Hi,

On 02.01.2019 0:39, Jiri Olsa wrote:
> On Mon, Dec 24, 2018 at 03:27:17PM +0300, Alexey Budankov wrote:
> 
> SNIP
> 
>> diff --git a/tools/perf/util/mmap.c b/tools/perf/util/mmap.c
>> index 742fa9a8e498..a2095e4eda4b 100644
>> --- a/tools/perf/util/mmap.c
>> +++ b/tools/perf/util/mmap.c
>> @@ -361,6 +361,7 @@ void perf_mmap__munmap(struct perf_mmap *map)
>>  
>>  int perf_mmap__mmap(struct perf_mmap *map, struct mmap_params *mp, int fd, 
>> int cpu)
>>  {
>> +int c, nr_cpus, node;
>>  /*
>>   * The last one will be done at perf_mmap__consume(), so that we
>>   * make sure we don't prevent tools from consuming every last event in
>> @@ -389,6 +390,17 @@ int perf_mmap__mmap(struct perf_mmap *map, struct 
>> mmap_params *mp, int fd, int c
>>  map->cpu = cpu;
>>  
>>  CPU_ZERO(>affinity_mask);
>> +if (mp->affinity == PERF_AFFINITY_NODE && cpu__max_node() > 1) {
>> +nr_cpus = cpu_map__nr(mp->cpu_map);
>> +node = cpu__get_node(map->cpu);
>> +for (c = 0; c < nr_cpus; c++) {
>> +if (cpu__get_node(c) == node) {
> 
> the 'c' is just an index here, I think you need to
> use the real cpu value from the mp->cpu_map->map[c]

Well, yes, mapping c index to online cpu index is more generic. 
Corrected in v3. Thanks!

Alexey

> 
> jirka
> 
>> +CPU_SET(c, >affinity_mask);
>> +}
>> +}
>> +} else if (mp->affinity == PERF_AFFINITY_CPU) {
>> +CPU_SET(map->cpu, >affinity_mask);
>> +}
>>  
>>  if (auxtrace_mmap__mmap(>auxtrace_mmap,
>>  >auxtrace_mp, map->base, fd))
>> diff --git a/tools/perf/util/mmap.h b/tools/perf/util/mmap.h
>> index e566c19b242b..b3f724fad22e 100644
>> --- a/tools/perf/util/mmap.h
>> +++ b/tools/perf/util/mmap.h
>> @@ -72,6 +72,7 @@ enum bkw_mmap_state {
>>  struct mmap_params {
>>  int prot, mask, nr_cblocks, affinity;
>>  struct auxtrace_mmap_params auxtrace_mp;
>> +const struct cpu_map*cpu_map;
>>  };
>>  
>>  int perf_mmap__mmap(struct perf_mmap *map, struct mmap_params *mp, int fd, 
>> int cpu);
> 


Re: [PATCH v2 3/4] perf record: apply affinity masks when reading mmap buffers

2019-01-01 Thread Jiri Olsa
On Mon, Dec 24, 2018 at 03:27:17PM +0300, Alexey Budankov wrote:
> 
> Build node cpu masks for mmap data buffers. Apply node cpu
> masks to tool thread every time it references data buffers
> cross node or cross cpu.
> 
> Signed-off-by: Alexey Budankov 
> ---
> Changes in v2:
> - separated AIO buffers binding to patch 2/4
> ---
>  tools/perf/builtin-record.c |  9 +
>  tools/perf/util/evlist.c|  6 +-
>  tools/perf/util/mmap.c  | 12 
>  tools/perf/util/mmap.h  |  1 +
>  4 files changed, 27 insertions(+), 1 deletion(-)
> 
> diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
> index b26febb54d01..eea96794ee45 100644
> --- a/tools/perf/builtin-record.c
> +++ b/tools/perf/builtin-record.c
> @@ -536,6 +536,9 @@ static int record__mmap_evlist(struct record *rec,
>   struct record_opts *opts = >opts;
>   char msg[512];
>  
> + if (opts->affinity != PERF_AFFINITY_SYS)
> + cpu__setup_cpunode_map();
> +
>   if (perf_evlist__mmap_ex(evlist, opts->mmap_pages,
>opts->auxtrace_mmap_pages,
>opts->auxtrace_snapshot_mode,
> @@ -755,6 +758,12 @@ static int record__mmap_read_evlist(struct record *rec, 
> struct perf_evlist *evli
>   struct perf_mmap *map = [i];
>  
>   if (map->base) {
> + if (rec->opts.affinity != PERF_AFFINITY_SYS &&
> + !CPU_EQUAL(>affinity_mask, 
> >affinity_mask)) {
> + CPU_ZERO(>affinity_mask);
> + CPU_OR(>affinity_mask, 
> >affinity_mask, >affinity_mask);
> + sched_setaffinity(0, 
> sizeof(rec->affinity_mask), >affinity_mask);

all this code depends on aio and LIBNUMA, let's keep it there then

also please add this and the affinity_mask setup code below to a function

thanks,
jirka

> + }
>   if (!record__aio_enabled(rec)) {
>   if (perf_mmap__push(map, rec, record__pushfn) 
> != 0) {
>   rc = -1;
> diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
> index 60e825be944a..5ca5bb5ea0db 100644
> --- a/tools/perf/util/evlist.c
> +++ b/tools/perf/util/evlist.c
> @@ -1028,7 +1028,11 @@ int perf_evlist__mmap_ex(struct perf_evlist *evlist, 
> unsigned int pages,
>* Its value is decided by evsel's write_backward.
>* So  should not be passed through const pointer.
>*/
> - struct mmap_params mp = { .nr_cblocks = nr_cblocks, .affinity = 
> affinity };
> + struct mmap_params mp = {
> + .nr_cblocks = nr_cblocks,
> + .affinity   = affinity,
> + .cpu_map= cpus
> + };
>  
>   if (!evlist->mmap)
>   evlist->mmap = perf_evlist__alloc_mmap(evlist, false);
> diff --git a/tools/perf/util/mmap.c b/tools/perf/util/mmap.c
> index 742fa9a8e498..a2095e4eda4b 100644
> --- a/tools/perf/util/mmap.c
> +++ b/tools/perf/util/mmap.c
> @@ -361,6 +361,7 @@ void perf_mmap__munmap(struct perf_mmap *map)
>  
>  int perf_mmap__mmap(struct perf_mmap *map, struct mmap_params *mp, int fd, 
> int cpu)
>  {
> + int c, nr_cpus, node;
>   /*
>* The last one will be done at perf_mmap__consume(), so that we
>* make sure we don't prevent tools from consuming every last event in
> @@ -389,6 +390,17 @@ int perf_mmap__mmap(struct perf_mmap *map, struct 
> mmap_params *mp, int fd, int c
>   map->cpu = cpu;
>  
>   CPU_ZERO(>affinity_mask);
> + if (mp->affinity == PERF_AFFINITY_NODE && cpu__max_node() > 1) {
> + nr_cpus = cpu_map__nr(mp->cpu_map);
> + node = cpu__get_node(map->cpu);
> + for (c = 0; c < nr_cpus; c++) {
> + if (cpu__get_node(c) == node) {
> + CPU_SET(c, >affinity_mask);
> + }
> + }
> + } else if (mp->affinity == PERF_AFFINITY_CPU) {
> + CPU_SET(map->cpu, >affinity_mask);
> + }
>  
>   if (auxtrace_mmap__mmap(>auxtrace_mmap,
>   >auxtrace_mp, map->base, fd))
> diff --git a/tools/perf/util/mmap.h b/tools/perf/util/mmap.h
> index e566c19b242b..b3f724fad22e 100644
> --- a/tools/perf/util/mmap.h
> +++ b/tools/perf/util/mmap.h
> @@ -72,6 +72,7 @@ enum bkw_mmap_state {
>  struct mmap_params {
>   int prot, mask, nr_cblocks, affinity;
>   struct auxtrace_mmap_params auxtrace_mp;
> + const struct cpu_map*cpu_map;
>  };
>  
>  int perf_mmap__mmap(struct perf_mmap *map, struct mmap_params *mp, int fd, 
> int cpu);


Re: [PATCH v2 3/4] perf record: apply affinity masks when reading mmap buffers

2019-01-01 Thread Jiri Olsa
On Mon, Dec 24, 2018 at 03:27:17PM +0300, Alexey Budankov wrote:

SNIP

> diff --git a/tools/perf/util/mmap.c b/tools/perf/util/mmap.c
> index 742fa9a8e498..a2095e4eda4b 100644
> --- a/tools/perf/util/mmap.c
> +++ b/tools/perf/util/mmap.c
> @@ -361,6 +361,7 @@ void perf_mmap__munmap(struct perf_mmap *map)
>  
>  int perf_mmap__mmap(struct perf_mmap *map, struct mmap_params *mp, int fd, 
> int cpu)
>  {
> + int c, nr_cpus, node;
>   /*
>* The last one will be done at perf_mmap__consume(), so that we
>* make sure we don't prevent tools from consuming every last event in
> @@ -389,6 +390,17 @@ int perf_mmap__mmap(struct perf_mmap *map, struct 
> mmap_params *mp, int fd, int c
>   map->cpu = cpu;
>  
>   CPU_ZERO(>affinity_mask);
> + if (mp->affinity == PERF_AFFINITY_NODE && cpu__max_node() > 1) {
> + nr_cpus = cpu_map__nr(mp->cpu_map);
> + node = cpu__get_node(map->cpu);
> + for (c = 0; c < nr_cpus; c++) {
> + if (cpu__get_node(c) == node) {

the 'c' is just an index here, I think you need to
use the real cpu value from the mp->cpu_map->map[c]

jirka

> + CPU_SET(c, >affinity_mask);
> + }
> + }
> + } else if (mp->affinity == PERF_AFFINITY_CPU) {
> + CPU_SET(map->cpu, >affinity_mask);
> + }
>  
>   if (auxtrace_mmap__mmap(>auxtrace_mmap,
>   >auxtrace_mp, map->base, fd))
> diff --git a/tools/perf/util/mmap.h b/tools/perf/util/mmap.h
> index e566c19b242b..b3f724fad22e 100644
> --- a/tools/perf/util/mmap.h
> +++ b/tools/perf/util/mmap.h
> @@ -72,6 +72,7 @@ enum bkw_mmap_state {
>  struct mmap_params {
>   int prot, mask, nr_cblocks, affinity;
>   struct auxtrace_mmap_params auxtrace_mp;
> + const struct cpu_map*cpu_map;
>  };
>  
>  int perf_mmap__mmap(struct perf_mmap *map, struct mmap_params *mp, int fd, 
> int cpu);


[PATCH v2 3/4] perf record: apply affinity masks when reading mmap buffers

2018-12-24 Thread Alexey Budankov


Build node cpu masks for mmap data buffers. Apply node cpu
masks to tool thread every time it references data buffers
cross node or cross cpu.

Signed-off-by: Alexey Budankov 
---
Changes in v2:
- separated AIO buffers binding to patch 2/4
---
 tools/perf/builtin-record.c |  9 +
 tools/perf/util/evlist.c|  6 +-
 tools/perf/util/mmap.c  | 12 
 tools/perf/util/mmap.h  |  1 +
 4 files changed, 27 insertions(+), 1 deletion(-)

diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index b26febb54d01..eea96794ee45 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -536,6 +536,9 @@ static int record__mmap_evlist(struct record *rec,
struct record_opts *opts = >opts;
char msg[512];
 
+   if (opts->affinity != PERF_AFFINITY_SYS)
+   cpu__setup_cpunode_map();
+
if (perf_evlist__mmap_ex(evlist, opts->mmap_pages,
 opts->auxtrace_mmap_pages,
 opts->auxtrace_snapshot_mode,
@@ -755,6 +758,12 @@ static int record__mmap_read_evlist(struct record *rec, 
struct perf_evlist *evli
struct perf_mmap *map = [i];
 
if (map->base) {
+   if (rec->opts.affinity != PERF_AFFINITY_SYS &&
+   !CPU_EQUAL(>affinity_mask, 
>affinity_mask)) {
+   CPU_ZERO(>affinity_mask);
+   CPU_OR(>affinity_mask, 
>affinity_mask, >affinity_mask);
+   sched_setaffinity(0, 
sizeof(rec->affinity_mask), >affinity_mask);
+   }
if (!record__aio_enabled(rec)) {
if (perf_mmap__push(map, rec, record__pushfn) 
!= 0) {
rc = -1;
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index 60e825be944a..5ca5bb5ea0db 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -1028,7 +1028,11 @@ int perf_evlist__mmap_ex(struct perf_evlist *evlist, 
unsigned int pages,
 * Its value is decided by evsel's write_backward.
 * So  should not be passed through const pointer.
 */
-   struct mmap_params mp = { .nr_cblocks = nr_cblocks, .affinity = 
affinity };
+   struct mmap_params mp = {
+   .nr_cblocks = nr_cblocks,
+   .affinity   = affinity,
+   .cpu_map= cpus
+   };
 
if (!evlist->mmap)
evlist->mmap = perf_evlist__alloc_mmap(evlist, false);
diff --git a/tools/perf/util/mmap.c b/tools/perf/util/mmap.c
index 742fa9a8e498..a2095e4eda4b 100644
--- a/tools/perf/util/mmap.c
+++ b/tools/perf/util/mmap.c
@@ -361,6 +361,7 @@ void perf_mmap__munmap(struct perf_mmap *map)
 
 int perf_mmap__mmap(struct perf_mmap *map, struct mmap_params *mp, int fd, int 
cpu)
 {
+   int c, nr_cpus, node;
/*
 * The last one will be done at perf_mmap__consume(), so that we
 * make sure we don't prevent tools from consuming every last event in
@@ -389,6 +390,17 @@ int perf_mmap__mmap(struct perf_mmap *map, struct 
mmap_params *mp, int fd, int c
map->cpu = cpu;
 
CPU_ZERO(>affinity_mask);
+   if (mp->affinity == PERF_AFFINITY_NODE && cpu__max_node() > 1) {
+   nr_cpus = cpu_map__nr(mp->cpu_map);
+   node = cpu__get_node(map->cpu);
+   for (c = 0; c < nr_cpus; c++) {
+   if (cpu__get_node(c) == node) {
+   CPU_SET(c, >affinity_mask);
+   }
+   }
+   } else if (mp->affinity == PERF_AFFINITY_CPU) {
+   CPU_SET(map->cpu, >affinity_mask);
+   }
 
if (auxtrace_mmap__mmap(>auxtrace_mmap,
>auxtrace_mp, map->base, fd))
diff --git a/tools/perf/util/mmap.h b/tools/perf/util/mmap.h
index e566c19b242b..b3f724fad22e 100644
--- a/tools/perf/util/mmap.h
+++ b/tools/perf/util/mmap.h
@@ -72,6 +72,7 @@ enum bkw_mmap_state {
 struct mmap_params {
int prot, mask, nr_cblocks, affinity;
struct auxtrace_mmap_params auxtrace_mp;
+   const struct cpu_map*cpu_map;
 };
 
 int perf_mmap__mmap(struct perf_mmap *map, struct mmap_params *mp, int fd, int 
cpu);


[PATCH v2 3/4] perf record: apply affinity masks when reading mmap, buffers

2018-12-12 Thread Alexey Budankov


Build node cpu masks for mmap data buffers. Apply node cpu
masks to tool thread every time it references data buffers
cross node or cross cpu.

Signed-off-by: Alexey Budankov 
---
Changes in v2:
- separated AIO buffers binding to patch 2/4
---
 tools/perf/builtin-record.c |  9 +
 tools/perf/util/evlist.c|  6 +-
 tools/perf/util/mmap.c  | 12 
 tools/perf/util/mmap.h  |  1 +
 4 files changed, 27 insertions(+), 1 deletion(-)

diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index b26febb54d01..eea96794ee45 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -536,6 +536,9 @@ static int record__mmap_evlist(struct record *rec,
struct record_opts *opts = >opts;
char msg[512];
 
+   if (opts->affinity != PERF_AFFINITY_SYS)
+   cpu__setup_cpunode_map();
+
if (perf_evlist__mmap_ex(evlist, opts->mmap_pages,
 opts->auxtrace_mmap_pages,
 opts->auxtrace_snapshot_mode,
@@ -755,6 +758,12 @@ static int record__mmap_read_evlist(struct record *rec, 
struct perf_evlist *evli
struct perf_mmap *map = [i];
 
if (map->base) {
+   if (rec->opts.affinity != PERF_AFFINITY_SYS &&
+   !CPU_EQUAL(>affinity_mask, 
>affinity_mask)) {
+   CPU_ZERO(>affinity_mask);
+   CPU_OR(>affinity_mask, 
>affinity_mask, >affinity_mask);
+   sched_setaffinity(0, 
sizeof(rec->affinity_mask), >affinity_mask);
+   }
if (!record__aio_enabled(rec)) {
if (perf_mmap__push(map, rec, record__pushfn) 
!= 0) {
rc = -1;
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index 60e825be944a..5ca5bb5ea0db 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -1028,7 +1028,11 @@ int perf_evlist__mmap_ex(struct perf_evlist *evlist, 
unsigned int pages,
 * Its value is decided by evsel's write_backward.
 * So  should not be passed through const pointer.
 */
-   struct mmap_params mp = { .nr_cblocks = nr_cblocks, .affinity = 
affinity };
+   struct mmap_params mp = {
+   .nr_cblocks = nr_cblocks,
+   .affinity   = affinity,
+   .cpu_map= cpus
+   };
 
if (!evlist->mmap)
evlist->mmap = perf_evlist__alloc_mmap(evlist, false);
diff --git a/tools/perf/util/mmap.c b/tools/perf/util/mmap.c
index 742fa9a8e498..a2095e4eda4b 100644
--- a/tools/perf/util/mmap.c
+++ b/tools/perf/util/mmap.c
@@ -361,6 +361,7 @@ void perf_mmap__munmap(struct perf_mmap *map)
 
 int perf_mmap__mmap(struct perf_mmap *map, struct mmap_params *mp, int fd, int 
cpu)
 {
+   int c, nr_cpus, node;
/*
 * The last one will be done at perf_mmap__consume(), so that we
 * make sure we don't prevent tools from consuming every last event in
@@ -389,6 +390,17 @@ int perf_mmap__mmap(struct perf_mmap *map, struct 
mmap_params *mp, int fd, int c
map->cpu = cpu;
 
CPU_ZERO(>affinity_mask);
+   if (mp->affinity == PERF_AFFINITY_NODE && cpu__max_node() > 1) {
+   nr_cpus = cpu_map__nr(mp->cpu_map);
+   node = cpu__get_node(map->cpu);
+   for (c = 0; c < nr_cpus; c++) {
+   if (cpu__get_node(c) == node) {
+   CPU_SET(c, >affinity_mask);
+   }
+   }
+   } else if (mp->affinity == PERF_AFFINITY_CPU) {
+   CPU_SET(map->cpu, >affinity_mask);
+   }
 
if (auxtrace_mmap__mmap(>auxtrace_mmap,
>auxtrace_mp, map->base, fd))
diff --git a/tools/perf/util/mmap.h b/tools/perf/util/mmap.h
index e566c19b242b..b3f724fad22e 100644
--- a/tools/perf/util/mmap.h
+++ b/tools/perf/util/mmap.h
@@ -72,6 +72,7 @@ enum bkw_mmap_state {
 struct mmap_params {
int prot, mask, nr_cblocks, affinity;
struct auxtrace_mmap_params auxtrace_mp;
+   const struct cpu_map*cpu_map;
 };
 
 int perf_mmap__mmap(struct perf_mmap *map, struct mmap_params *mp, int fd, int 
cpu);