exec_clock already provides per-group cpu usage metrics, and can be
reused by cpuacct in case cpu and cpuacct are comounted.

However, it is only provided by tasks in fair class. Doing the same for
rt is easy, and can be done in an already existing hierarchy loop. This
is an improvement over the independent hierarchy walk executed by
cpuacct.

Signed-off-by: Glauber Costa <glom...@openvz.org>
CC: Dave Jones <da...@redhat.com>
CC: Ben Hutchings <b...@decadent.org.uk>
CC: Peter Zijlstra <a.p.zijls...@chello.nl>
CC: Paul Turner <p...@google.com>
CC: Lennart Poettering <lenn...@poettering.net>
CC: Kay Sievers <kay.siev...@vrfy.org>
CC: Tejun Heo <t...@kernel.org>
---
 kernel/sched/rt.c    | 1 +
 kernel/sched/sched.h | 3 +++
 2 files changed, 4 insertions(+)

diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index e9f8dcd..4a21045 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -907,6 +907,7 @@ static void update_curr_rt(struct rq *rq)
 
        for_each_sched_rt_entity(rt_se) {
                rt_rq = rt_rq_of_se(rt_se);
+               schedstat_add(rt_rq, exec_clock, delta_exec);
 
                if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
                        raw_spin_lock(&rt_rq->rt_runtime_lock);
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 765c687..b05dd84 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -254,6 +254,7 @@ struct cfs_rq {
        unsigned int nr_running, h_nr_running;
 
        u64 exec_clock;
+       u64 prev_exec_clock;
        u64 min_vruntime;
 #ifndef CONFIG_64BIT
        u64 min_vruntime_copy;
@@ -356,6 +357,8 @@ struct rt_rq {
        struct plist_head pushable_tasks;
 #endif
        int rt_throttled;
+       u64 exec_clock;
+       u64 prev_exec_clock;
        u64 rt_time;
        u64 rt_runtime;
        /* Nests inside the rq lock: */
-- 
1.8.1.4

_______________________________________________
Devel mailing list
Devel@openvz.org
https://lists.openvz.org/mailman/listinfo/devel

Reply via email to