We protect sum_exec_runtime variable against inconsistency when reading
it on thread_group_cputime(), but we also read it on other places not
called with task_rq_lock() taken.

Patch changes that, except file kernel/sched/debug.c , where we also
read some other u64 variables (vrunime, exec_start) without protection.
But since this is for debug purpose and inconsistency is very very
improbable, we can ignore it.

Signed-off-by: Stanislaw Gruszka <[email protected]>
---
 fs/proc/base.c                 | 2 +-
 kernel/delayacct.c             | 2 +-
 kernel/exit.c                  | 2 +-
 kernel/sched/cputime.c         | 2 +-
 kernel/time/posix-cpu-timers.c | 4 ++--
 5 files changed, 6 insertions(+), 6 deletions(-)

diff --git a/fs/proc/base.c b/fs/proc/base.c
index e9ff186..8118d97 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -505,7 +505,7 @@ static int proc_pid_schedstat(struct seq_file *m, struct 
pid_namespace *ns,
                seq_printf(m, "0 0 0\n");
        else
                seq_printf(m, "%llu %llu %lu\n",
-                  (unsigned long long)task->se.sum_exec_runtime,
+                  (unsigned long long)read_sum_exec_runtime(task),
                   (unsigned long long)task->sched_info.run_delay,
                   task->sched_info.pcount);
 
diff --git a/kernel/delayacct.c b/kernel/delayacct.c
index 435c14a..023b2ca 100644
--- a/kernel/delayacct.c
+++ b/kernel/delayacct.c
@@ -104,7 +104,7 @@ int __delayacct_add_tsk(struct taskstats *d, struct 
task_struct *tsk)
         */
        t1 = tsk->sched_info.pcount;
        t2 = tsk->sched_info.run_delay;
-       t3 = tsk->se.sum_exec_runtime;
+       t3 = read_sum_exec_runtime(tsk);
 
        d->cpu_count += t1;
 
diff --git a/kernel/exit.c b/kernel/exit.c
index 2f974ae..a46f96f 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -134,7 +134,7 @@ static void __exit_signal(struct task_struct *tsk)
        sig->inblock += task_io_get_inblock(tsk);
        sig->oublock += task_io_get_oublock(tsk);
        task_io_accounting_add(&sig->ioac, &tsk->ioac);
-       sig->sum_sched_runtime += tsk->se.sum_exec_runtime;
+       sig->sum_sched_runtime += read_sum_exec_runtime(tsk);
        sig->nr_threads--;
        __unhash_process(tsk, group_dead);
        write_sequnlock(&sig->stats_lock);
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
index 026c1c4..cd1dd43 100644
--- a/kernel/sched/cputime.c
+++ b/kernel/sched/cputime.c
@@ -682,7 +682,7 @@ out:
 void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
 {
        struct task_cputime cputime = {
-               .sum_exec_runtime = p->se.sum_exec_runtime,
+               .sum_exec_runtime = read_sum_exec_runtime(p),
        };
 
        task_cputime(p, &cputime.utime, &cputime.stime);
diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
index 4d8466b..b1e7275 100644
--- a/kernel/time/posix-cpu-timers.c
+++ b/kernel/time/posix-cpu-timers.c
@@ -849,7 +849,7 @@ static void check_thread_timers(struct task_struct *tsk,
        tsk_expires->virt_exp = expires_to_cputime(expires);
 
        tsk_expires->sched_exp = check_timers_list(++timers, firing,
-                                                  tsk->se.sum_exec_runtime);
+                                                  read_sum_exec_runtime(tsk));
 
        /*
         * Check for the special case thread timers.
@@ -1116,7 +1116,7 @@ static inline int fastpath_timer_check(struct task_struct 
*tsk)
                struct task_cputime task_sample;
 
                task_cputime(tsk, &task_sample.utime, &task_sample.stime);
-               task_sample.sum_exec_runtime = tsk->se.sum_exec_runtime;
+               task_sample.sum_exec_runtime = read_sum_exec_runtime(tsk);
                if (task_cputime_expired(&task_sample, &tsk->cputime_expires))
                        return 1;
        }
-- 
1.8.3.1

Reply via email to