FWIW, good to see progress, still waiting for you guys to agree :-)

On Mon, Jul 01, 2019 at 01:15:44PM -0700, [email protected] wrote:

> - Taking up-to-every rq->lock is bad and expensive and 5ms may be too
>   short a delay for this. I haven't tried microbenchmarks on the cost of
>   this vs min_cfs_rq_runtime = 0 vs baseline.

Yes, that's tricky, SGI/HPE have definite ideas about that.

> @@ -4781,12 +4790,41 @@ static __always_inline void 
> return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
>   */
>  static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
>  {
> -     u64 runtime = 0, slice = sched_cfs_bandwidth_slice();
> +     u64 runtime = 0;
>       unsigned long flags;
>       u64 expires;
> +     struct cfs_rq *cfs_rq, *temp;
> +     LIST_HEAD(temp_head);
> +
> +     local_irq_save(flags);
> +
> +     raw_spin_lock(&cfs_b->lock);
> +     cfs_b->slack_started = false;
> +     list_splice_init(&cfs_b->slack_cfs_rq, &temp_head);
> +     raw_spin_unlock(&cfs_b->lock);
> +
> +
> +     /* Gather all left over runtime from all rqs */
> +     list_for_each_entry_safe(cfs_rq, temp, &temp_head, slack_list) {
> +             struct rq *rq = rq_of(cfs_rq);
> +             struct rq_flags rf;
> +
> +             rq_lock(rq, &rf);
> +
> +             raw_spin_lock(&cfs_b->lock);
> +             list_del_init(&cfs_rq->slack_list);
> +             if (!cfs_rq->nr_running && cfs_rq->runtime_remaining > 0 &&
> +                 cfs_rq->runtime_expires == cfs_b->runtime_expires) {
> +                     cfs_b->runtime += cfs_rq->runtime_remaining;
> +                     cfs_rq->runtime_remaining = 0;
> +             }
> +             raw_spin_unlock(&cfs_b->lock);
> +
> +             rq_unlock(rq, &rf);
> +     }

But worse still, you take possibly every rq->lock without ever
re-enabling IRQs.

>  
>       /* confirm we're still not at a refresh boundary */
> -     raw_spin_lock_irqsave(&cfs_b->lock, flags);
> +     raw_spin_lock(&cfs_b->lock);
>       cfs_b->slack_started = false;
>       if (cfs_b->distribute_running) {
>               raw_spin_unlock_irqrestore(&cfs_b->lock, flags);

Reply via email to