Qian Cai <c...@lca.pw> writes:

> The linux-next commit "sched/fair: Fix low cpu usage with high
> throttling by removing expiration of cpu-local slices" [1] introduced a
> few compilation warnings,
>
> kernel/sched/fair.c: In function '__refill_cfs_bandwidth_runtime':
> kernel/sched/fair.c:4365:6: warning: variable 'now' set but not used
> [-Wunused-but-set-variable]
> kernel/sched/fair.c: In function 'start_cfs_bandwidth':
> kernel/sched/fair.c:4992:6: warning: variable 'overrun' set but not used
> [-Wunused-but-set-variable]
>
> Also, __refill_cfs_bandwidth_runtime() does no longer update the
> expiration time, so fix the comments accordingly.
>
> [1] 
> https://lore.kernel.org/lkml/1558121424-2914-1-git-send-email-chiluk+li...@indeed.com/
>
> Signed-off-by: Qian Cai <c...@lca.pw>
> ---
>  kernel/sched/fair.c | 18 +++++-------------
>  1 file changed, 5 insertions(+), 13 deletions(-)
>
> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> index 84959d3285d1..43d6eb033ca8 100644
> --- a/kernel/sched/fair.c
> +++ b/kernel/sched/fair.c
> @@ -4354,21 +4354,16 @@ static inline u64 sched_cfs_bandwidth_slice(void)
>  }
>  
>  /*
> - * Replenish runtime according to assigned quota and update expiration time.
> - * We use sched_clock_cpu directly instead of rq->clock to avoid adding
> - * additional synchronization around rq->lock.
> + * Replenish runtime according to assigned quota. We use sched_clock_cpu
> + * directly instead of rq->clock to avoid adding additional synchronization
> + * around rq->lock.
>   *
>   * requires cfs_b->lock
>   */
>  void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b)
>  {
> -     u64 now;
> -
> -     if (cfs_b->quota == RUNTIME_INF)
> -             return;
> -
> -     now = sched_clock_cpu(smp_processor_id());
> -     cfs_b->runtime = cfs_b->quota;
> +     if (cfs_b->quota != RUNTIME_INF)
> +             cfs_b->runtime = cfs_b->quota;
>  }

This is fine.

>  
>  static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
> @@ -4989,15 +4984,12 @@ static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq)
>  
>  void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
>  {
> -     u64 overrun;
> -
>       lockdep_assert_held(&cfs_b->lock);
>  
>       if (cfs_b->period_active)
>               return;
>  
>       cfs_b->period_active = 1;
> -     overrun = hrtimer_forward_now(&cfs_b->period_timer, cfs_b->period);

This is not - hrtimer_forward_now has effects we still need.

>       hrtimer_start_expires(&cfs_b->period_timer, HRTIMER_MODE_ABS_PINNED);
>  }

Reply via email to