Em Tue, Jul 22, 2014 at 04:17:25PM +0300, Adrian Hunter escreveu:
> Add an array to struct machine to store
> the current tid running on each cpu.
> Add machine functions to get / set
> the tid for a cpu.
> 
> This will be used to determine the tid
> when decoding a per-cpu Instruction Trace.

Most machines don't have MAX_NR_CPUS cpus, but then this per-machine
struct cost will be suffered only by tools that use this new facility,
so it wouldn't be fair at this point to ask you to use a growing array.

See tools/perf/builtin-trace.c trace__set_fd_pathname() to see how it
does for a thread fd->pathname table learn't from either reading
/proc/PID/fd/ or using 'perf probe' made vfs_getname
wannabe-tracepoints.

But in general, try to avoid using these FOO_MAX_BAR things :-\

- Arnaldo

> Signed-off-by: Adrian Hunter <[email protected]>
> ---
>  tools/perf/util/machine.c | 46 ++++++++++++++++++++++++++++++++++++++++++++++
>  tools/perf/util/machine.h |  5 +++++
>  2 files changed, 51 insertions(+)
> 
> diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
> index 04c17a7..34dd63f 100644
> --- a/tools/perf/util/machine.c
> +++ b/tools/perf/util/machine.c
> @@ -47,6 +47,8 @@ int machine__init(struct machine *machine, const char 
> *root_dir, pid_t pid)
>               thread__set_comm(thread, comm, 0);
>       }
>  
> +     machine->current_tid = NULL;
> +
>       return 0;
>  }
>  
> @@ -106,6 +108,7 @@ void machine__exit(struct machine *machine)
>       dsos__delete(&machine->user_dsos);
>       dsos__delete(&machine->kernel_dsos);
>       zfree(&machine->root_dir);
> +     zfree(&machine->current_tid);
>  }
>  
>  void machine__delete(struct machine *machine)
> @@ -1532,3 +1535,46 @@ int machine__get_kernel_start(struct machine *machine)
>       }
>       return err;
>  }
> +
> +pid_t machine__get_current_tid(struct machine *machine, int cpu)
> +{
> +     if (cpu < 0 || cpu >= MAX_NR_CPUS || !machine->current_tid)
> +             return -1;
> +
> +     return machine->current_tid[cpu];
> +}
> +
> +int machine__set_current_tid(struct machine *machine, int cpu, pid_t pid,
> +                          pid_t tid)
> +{
> +     struct thread *thread;
> +
> +     if (cpu < 0)
> +             return -EINVAL;
> +
> +     if (!machine->current_tid) {
> +             int i;
> +
> +             machine->current_tid = calloc(MAX_NR_CPUS, sizeof(pid_t));
> +             if (!machine->current_tid)
> +                     return -ENOMEM;
> +             for (i = 0; i < MAX_NR_CPUS; i++)
> +                     machine->current_tid[i] = -1;
> +     }
> +
> +     if (cpu >= MAX_NR_CPUS) {
> +             pr_err("Requested CPU %d too large. ", cpu);
> +             pr_err("Consider raising MAX_NR_CPUS\n");
> +             return -EINVAL;
> +     }
> +
> +     machine->current_tid[cpu] = tid;
> +
> +     thread = machine__findnew_thread(machine, pid, tid);
> +     if (!thread)
> +             return -ENOMEM;
> +
> +     thread->cpu = cpu;
> +
> +     return 0;
> +}
> diff --git a/tools/perf/util/machine.h b/tools/perf/util/machine.h
> index be97021..6442d65 100644
> --- a/tools/perf/util/machine.h
> +++ b/tools/perf/util/machine.h
> @@ -35,6 +35,7 @@ struct machine {
>       struct map        *vmlinux_maps[MAP__NR_TYPES];
>       u64               kernel_start;
>       symbol_filter_t   symbol_filter;
> +     pid_t             *current_tid;
>  };
>  
>  static inline
> @@ -212,4 +213,8 @@ int machine__synthesize_threads(struct machine *machine, 
> struct target *target,
>                                            perf_event__process, data_mmap);
>  }
>  
> +pid_t machine__get_current_tid(struct machine *machine, int cpu);
> +int machine__set_current_tid(struct machine *machine, int cpu, pid_t pid,
> +                          pid_t tid);
> +
>  #endif /* __PERF_MACHINE_H */
> -- 
> 1.8.3.2
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [email protected]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to