A quote from Documentation/memory_barriers.txt: For example, because 'jiffies' is marked volatile, it is never necessary to say READ_ONCE(jiffies). The reason for this is that READ_ONCE() and WRITE_ONCE() are implemented as volatile casts, which has no effect when its argument is already marked volatile.
Hence change all 'READ_ONCE(jiffies)' occurrences into 'jiffies'. Signed-off-by: Bart Van Assche <bart.vanass...@sandisk.com> Cc: Peter Zijlstra <pet...@infradead.org> Cc: Oleg Nesterov <o...@redhat.com> --- kernel/sched/core.c | 2 +- kernel/sched/cputime.c | 4 ++-- kernel/sched/fair.c | 8 ++++---- kernel/sched/wait.c | 6 ++++-- 4 files changed, 11 insertions(+), 9 deletions(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 44817c6..edb811e 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -3100,7 +3100,7 @@ void scheduler_tick(void) u64 scheduler_tick_max_deferment(void) { struct rq *rq = this_rq(); - unsigned long next, now = READ_ONCE(jiffies); + unsigned long next, now = jiffies; next = rq->last_sched_tick + HZ; diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c index a846cf8..9aecaad 100644 --- a/kernel/sched/cputime.c +++ b/kernel/sched/cputime.c @@ -691,7 +691,7 @@ void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN static cputime_t vtime_delta(struct task_struct *tsk) { - unsigned long now = READ_ONCE(jiffies); + unsigned long now = jiffies; if (time_before(now, (unsigned long)tsk->vtime_snap)) return 0; @@ -701,7 +701,7 @@ static cputime_t vtime_delta(struct task_struct *tsk) static cputime_t get_vtime_delta(struct task_struct *tsk) { - unsigned long now = READ_ONCE(jiffies); + unsigned long now = jiffies; cputime_t delta, other; /* diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 039de34..db5ae54 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -4802,7 +4802,7 @@ static void cpu_load_update_idle(struct rq *this_rq) if (weighted_cpuload(cpu_of(this_rq))) return; - cpu_load_update_nohz(this_rq, READ_ONCE(jiffies), 0); + cpu_load_update_nohz(this_rq, jiffies, 0); } /* @@ -4828,7 +4828,7 @@ void cpu_load_update_nohz_start(void) */ void cpu_load_update_nohz_stop(void) { - unsigned long curr_jiffies = READ_ONCE(jiffies); + unsigned long curr_jiffies = jiffies; struct rq *this_rq = this_rq(); unsigned long load; @@ -4851,7 +4851,7 @@ static void cpu_load_update_periodic(struct rq *this_rq, unsigned long load) { #ifdef CONFIG_NO_HZ_COMMON /* See the mess around cpu_load_update_nohz(). */ - this_rq->last_load_update_tick = READ_ONCE(jiffies); + this_rq->last_load_update_tick = jiffies; #endif cpu_load_update(this_rq, load, 1); } @@ -4864,7 +4864,7 @@ void cpu_load_update_active(struct rq *this_rq) unsigned long load = weighted_cpuload(cpu_of(this_rq)); if (tick_nohz_tick_stopped()) - cpu_load_update_nohz(this_rq, READ_ONCE(jiffies), load); + cpu_load_update_nohz(this_rq, jiffies, load); else cpu_load_update_periodic(this_rq, load); } diff --git a/kernel/sched/wait.c b/kernel/sched/wait.c index f15d6b6..af4959d 100644 --- a/kernel/sched/wait.c +++ b/kernel/sched/wait.c @@ -601,7 +601,8 @@ EXPORT_SYMBOL(bit_wait_io); __sched int bit_wait_timeout(struct wait_bit_key *word, int mode) { - unsigned long now = READ_ONCE(jiffies); + unsigned long now = jiffies; + if (time_after_eq(now, word->timeout)) return -EAGAIN; schedule_timeout(word->timeout - now); @@ -613,7 +614,8 @@ EXPORT_SYMBOL_GPL(bit_wait_timeout); __sched int bit_wait_io_timeout(struct wait_bit_key *word, int mode) { - unsigned long now = READ_ONCE(jiffies); + unsigned long now = jiffies; + if (time_after_eq(now, word->timeout)) return -EAGAIN; io_schedule_timeout(word->timeout - now); -- 2.10.0