The following commit has been merged into the sched/urgent branch of tip:

Commit-ID:     763a9ec06c409dcde2a761aac4bb83ff3938e0b3
Gitweb:        
https://git.kernel.org/tip/763a9ec06c409dcde2a761aac4bb83ff3938e0b3
Author:        Qian Cai <[email protected]>
AuthorDate:    Tue, 20 Aug 2019 14:40:55 -04:00
Committer:     Ingo Molnar <[email protected]>
CommitterDate: Wed, 25 Sep 2019 17:42:31 +02:00

sched/fair: Fix -Wunused-but-set-variable warnings

Commit:

   de53fd7aedb1 ("sched/fair: Fix low cpu usage with high throttling by 
removing expiration of cpu-local slices")

introduced a few compilation warnings:

  kernel/sched/fair.c: In function '__refill_cfs_bandwidth_runtime':
  kernel/sched/fair.c:4365:6: warning: variable 'now' set but not used 
[-Wunused-but-set-variable]
  kernel/sched/fair.c: In function 'start_cfs_bandwidth':
  kernel/sched/fair.c:4992:6: warning: variable 'overrun' set but not used 
[-Wunused-but-set-variable]

Also, __refill_cfs_bandwidth_runtime() does no longer update the
expiration time, so fix the comments accordingly.

Signed-off-by: Qian Cai <[email protected]>
Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Reviewed-by: Ben Segall <[email protected]>
Reviewed-by: Dave Chiluk <[email protected]>
Cc: Linus Torvalds <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: [email protected]
Fixes: de53fd7aedb1 ("sched/fair: Fix low cpu usage with high throttling by 
removing expiration of cpu-local slices")
Link: https://lkml.kernel.org/r/[email protected]
Signed-off-by: Ingo Molnar <[email protected]>
---
 kernel/sched/fair.c | 19 ++++++-------------
 1 file changed, 6 insertions(+), 13 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 5bc2399..dfdac90 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -4353,21 +4353,16 @@ static inline u64 sched_cfs_bandwidth_slice(void)
 }
 
 /*
- * Replenish runtime according to assigned quota and update expiration time.
- * We use sched_clock_cpu directly instead of rq->clock to avoid adding
- * additional synchronization around rq->lock.
+ * Replenish runtime according to assigned quota. We use sched_clock_cpu
+ * directly instead of rq->clock to avoid adding additional synchronization
+ * around rq->lock.
  *
  * requires cfs_b->lock
  */
 void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b)
 {
-       u64 now;
-
-       if (cfs_b->quota == RUNTIME_INF)
-               return;
-
-       now = sched_clock_cpu(smp_processor_id());
-       cfs_b->runtime = cfs_b->quota;
+       if (cfs_b->quota != RUNTIME_INF)
+               cfs_b->runtime = cfs_b->quota;
 }
 
 static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
@@ -4983,15 +4978,13 @@ static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq)
 
 void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
 {
-       u64 overrun;
-
        lockdep_assert_held(&cfs_b->lock);
 
        if (cfs_b->period_active)
                return;
 
        cfs_b->period_active = 1;
-       overrun = hrtimer_forward_now(&cfs_b->period_timer, cfs_b->period);
+       hrtimer_forward_now(&cfs_b->period_timer, cfs_b->period);
        hrtimer_start_expires(&cfs_b->period_timer, HRTIMER_MODE_ABS_PINNED);
 }
 

Reply via email to