From: Kan Liang <[email protected]> Iterating all events which need to receive side-band events also bring some overhead.
The side-band events overhead PERF_CORE_SB_OVERHEAD is a common overhead type. Signed-off-by: Kan Liang <[email protected]> --- include/uapi/linux/perf_event.h | 1 + kernel/events/core.c | 17 ++++++++++++++++- 2 files changed, 17 insertions(+), 1 deletion(-) diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h index c488336..7ba6d30 100644 --- a/include/uapi/linux/perf_event.h +++ b/include/uapi/linux/perf_event.h @@ -1002,6 +1002,7 @@ struct perf_branch_entry { enum perf_record_overhead_type { /* common overhead */ PERF_CORE_MUX_OVERHEAD = 0, + PERF_CORE_SB_OVERHEAD, /* PMU specific */ PERF_OVERHEAD_MAX, }; diff --git a/kernel/events/core.c b/kernel/events/core.c index 28468ae..335b1e2 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -6134,9 +6134,13 @@ static void perf_iterate_sb(perf_iterate_f output, void *data, struct perf_event_context *task_ctx) { + struct perf_event_context *overhead_ctx = task_ctx; + struct perf_cpu_context *cpuctx; struct perf_event_context *ctx; + u64 start_clock, end_clock; int ctxn; + start_clock = perf_clock(); rcu_read_lock(); preempt_disable(); @@ -6154,12 +6158,23 @@ perf_iterate_sb(perf_iterate_f output, void *data, for_each_task_context_nr(ctxn) { ctx = rcu_dereference(current->perf_event_ctxp[ctxn]); - if (ctx) + if (ctx) { perf_iterate_ctx(ctx, output, data, false); + if (!overhead_ctx) + overhead_ctx = ctx; + } } done: preempt_enable(); rcu_read_unlock(); + + /* calculate side-band event overhead */ + end_clock = perf_clock(); + if (overhead_ctx && overhead_ctx->pmu && overhead_ctx->pmu->stat) { + cpuctx = this_cpu_ptr(overhead_ctx->pmu->pmu_cpu_context); + cpuctx->overhead[PERF_CORE_SB_OVERHEAD].nr++; + cpuctx->overhead[PERF_CORE_SB_OVERHEAD].time += end_clock - start_clock; + } } /* -- 2.4.3

