Latencies of missed timer offsets. Generate a histogram of missed
timer offsets in microseconds. This will be a based along with irq
and preemption latencies to calculate the effective process wakeup
latencies.

The following filter(s) may be used

'hist:key=common_pid.execname'
'hist:key=common_pid.execname,cpu:val=toffset,hitcount'

Signed-off-by: Binoy Jayan <binoy.ja...@linaro.org>
---
 include/linux/hrtimer.h        |  3 +++
 include/trace/events/latency.h | 31 +++++++++++++++++++++++++++++++
 kernel/time/hrtimer.c          | 39 +++++++++++++++++++++++++++++++++++++++
 3 files changed, 73 insertions(+)

diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index 5e00f80..e09de14 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -104,6 +104,9 @@ struct hrtimer {
        struct hrtimer_clock_base       *base;
        u8                              state;
        u8                              is_rel;
+#if defined(CONFIG_PREEMPT_TRACER) || defined(CONFIG_IRQSOFF_TRACER)
+       ktime_t                         praecox;
+#endif
 #ifdef CONFIG_TIMER_STATS
        int                             start_pid;
        void                            *start_site;
diff --git a/include/trace/events/latency.h b/include/trace/events/latency.h
index 77896c7..24cf009 100644
--- a/include/trace/events/latency.h
+++ b/include/trace/events/latency.h
@@ -37,6 +37,37 @@ DEFINE_EVENT(latency_template, latency_critical_timings,
            TP_PROTO(int cpu, cycles_t latency),
            TP_ARGS(cpu, latency));
 
+TRACE_EVENT(latency_hrtimer_interrupt,
+
+       TP_PROTO(int cpu, long long toffset, struct task_struct *curr,
+               struct task_struct *task),
+
+       TP_ARGS(cpu, toffset, curr, task),
+
+       TP_STRUCT__entry(
+               __field(int,            cpu)
+               __field(long long,      toffset)
+               __array(char,           ccomm,  TASK_COMM_LEN)
+               __field(int,            cprio)
+               __array(char,           tcomm,  TASK_COMM_LEN)
+               __field(int,            tprio)
+       ),
+
+       TP_fast_assign(
+               __entry->cpu     = cpu;
+               __entry->toffset = toffset;
+               memcpy(__entry->ccomm, curr->comm, TASK_COMM_LEN);
+               __entry->cprio  = curr->prio;
+               memcpy(__entry->tcomm, task != NULL ? task->comm : "<none>",
+                       task != NULL ? TASK_COMM_LEN : 7);
+               __entry->tprio  = task != NULL ? task->prio : -1;
+       ),
+
+       TP_printk("cpu=%d toffset=%lld curr=%s[%d] thread=%s[%d]",
+               __entry->cpu, __entry->toffset, __entry->ccomm,
+               __entry->cprio, __entry->tcomm, __entry->tprio)
+);
+
 #endif /* _TRACE_HIST_H */
 
 /* This part must be outside protection */
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index 9ba7c82..1a96e34 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -53,9 +53,12 @@
 #include <asm/uaccess.h>
 
 #include <trace/events/timer.h>
+#include <trace/events/latency.h>
 
 #include "tick-internal.h"
 
+static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer);
+
 /*
  * The timer bases:
  *
@@ -960,6 +963,38 @@ static inline ktime_t hrtimer_update_lowres(struct hrtimer 
*timer, ktime_t tim,
        return tim;
 }
 
+static inline void trace_latency_mark_ts(struct hrtimer *timer,
+                                        struct hrtimer_clock_base *new_base,
+                                        ktime_t tim)
+{
+#if defined(CONFIG_PREEMPT_TRACER) || defined(CONFIG_IRQSOFF_TRACER)
+       if (trace_latency_hrtimer_interrupt_enabled()) {
+               ktime_t now = new_base->get_time();
+
+               if (ktime_to_ns(tim) < ktime_to_ns(now))
+                       timer->praecox = now;
+               else
+                       timer->praecox = ktime_set(0, 0);
+       }
+#endif
+}
+
+static inline void trace_missed_hrtimer(struct hrtimer *timer, ktime_t basenow)
+{
+#if defined(CONFIG_PREEMPT_TRACER) || defined(CONFIG_IRQSOFF_TRACER)
+       if (trace_latency_hrtimer_interrupt_enabled())
+               trace_latency_hrtimer_interrupt(raw_smp_processor_id(),
+                       ktime_to_ns(ktime_sub(ktime_to_ns(timer->praecox) ?
+                               timer->praecox : hrtimer_get_expires(timer),
+                               basenow)),
+                       current,
+                       timer->function == hrtimer_wakeup ?
+                               container_of(timer, struct hrtimer_sleeper,
+                                       timer)->task : NULL);
+#endif
+
+}
+
 /**
  * hrtimer_start_range_ns - (re)start an hrtimer on the current CPU
  * @timer:     the timer to be added
@@ -992,6 +1027,8 @@ void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t 
tim,
 
        timer_stats_hrtimer_set_start_info(timer);
 
+       trace_latency_mark_ts(timer, new_base, tim);
+
        leftmost = enqueue_hrtimer(timer, new_base);
        if (!leftmost)
                goto unlock;
@@ -1284,6 +1321,8 @@ static void __hrtimer_run_queues(struct hrtimer_cpu_base 
*cpu_base, ktime_t now)
 
                        timer = container_of(node, struct hrtimer, node);
 
+                       trace_missed_hrtimer(timer, basenow);
+
                        /*
                         * The immediate goal for using the softexpires is
                         * minimizing wakeups, not running timers at the
-- 
The Qualcomm Innovation Center, Inc. is a member of the Code Aurora Forum,
a Linux Foundation Collaborative Project

Reply via email to