On Wed, Dec 12, 2018 at 10:40:22AM +0300, Alexey Budankov wrote:

SNIP

>  int perf_mmap__mmap(struct perf_mmap *map, struct mmap_params *mp, int fd, 
> int cpu)
>  {
> +     int c, nr_cpus, node;
>       /*
>        * The last one will be done at perf_mmap__consume(), so that we
>        * make sure we don't prevent tools from consuming every last event in
> @@ -344,6 +369,17 @@ int perf_mmap__mmap(struct perf_mmap *map, struct 
> mmap_params *mp, int fd, int c
>       map->cpu = cpu;
>  
>       CPU_ZERO(&map->affinity_mask);
> +     if (mp->affinity == PERF_AFFINITY_NODE && cpu__max_node() > 1) {
> +             nr_cpus = cpu_map__nr(mp->cpu_map);
> +             node = cpu__get_node(map->cpu);
> +             for (c = 0; c < nr_cpus; c++) {
> +                     if (cpu__get_node(c) == node) {
> +                             CPU_SET(c, &map->affinity_mask);
> +                     }
> +             }
> +     } else if (mp->affinity == PERF_AFFINITY_CPU) {
> +             CPU_SET(map->cpu, &map->affinity_mask);
> +     }

won't both of this end up in same mask?

jirka

Reply via email to