On Mon, Jul 22, 2019 at 01:33:43PM -0400, Rik van Riel wrote:
> Refactor enqueue_entity, dequeue_entity, and update_load_avg, in order
> to split out the things we still want to happen at every level in the
> cgroup hierarchy with a flat runqueue from the things we only need to
> happen once.
> 
> No functional changes.

> @@ -3500,7 +3500,7 @@ static void detach_entity_load_avg(struct cfs_rq 
> *cfs_rq, struct sched_entity *s
>  #define DO_ATTACH    0x4
>  
>  /* Update task and its cfs_rq load average */
> -static inline void update_load_avg(struct cfs_rq *cfs_rq, struct 
> sched_entity *se, int flags)
> +static inline bool update_load_avg(struct cfs_rq *cfs_rq, struct 
> sched_entity *se, int flags)
>  {
>       u64 now = cfs_rq_clock_pelt(cfs_rq);
>       int decayed;
> @@ -3529,6 +3529,8 @@ static inline void update_load_avg(struct cfs_rq 
> *cfs_rq, struct sched_entity *s
>  
>       } else if (decayed && (flags & UPDATE_TG))
>               update_tg_load_avg(cfs_rq, 0);
> +
> +     return decayed;
>  }
>  
>  #ifndef CONFIG_64BIT
> @@ -3745,9 +3747,10 @@ static inline void update_misfit_status(struct 
> task_struct *p, struct rq *rq)
>  #define SKIP_AGE_LOAD        0x0
>  #define DO_ATTACH    0x0
>  
> -static inline void update_load_avg(struct cfs_rq *cfs_rq, struct 
> sched_entity *se, int not_used1)
> +static inline bool update_load_avg(struct cfs_rq *cfs_rq, struct 
> sched_entity *se, int not_used1)
>  {
>       cfs_rq_util_change(cfs_rq, 0);
> +     return false;
>  }
>  
>  static inline void remove_entity_load_avg(struct sched_entity *se) {}
> @@ -3870,6 +3873,24 @@ static inline void check_schedstat_required(void)
>   * CPU and an up-to-date min_vruntime on the destination CPU.
>   */
>  
> +static bool
> +enqueue_entity_groups(struct cfs_rq *cfs_rq, struct sched_entity *se, int 
> flags)
> +{
> +     /*
> +      * When enqueuing a sched_entity, we must:
> +      *   - Update loads to have both entity and cfs_rq synced with now.
> +      *   - Add its load to cfs_rq->runnable_avg
> +      *   - For group_entity, update its weight to reflect the new share of
> +      *     its group cfs_rq
> +      *   - Add its new weight to cfs_rq->load.weight
> +      */
> +     if (!update_load_avg(cfs_rq, se, UPDATE_TG | DO_ATTACH))
> +             return false;
> +
> +     update_cfs_group(se);
> +     return true;
> +}
> +
>  static void
>  enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
>  {
> @@ -3894,16 +3915,6 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct 
> sched_entity *se, int flags)
>       if (renorm && !curr)
>               se->vruntime += cfs_rq->min_vruntime;
>  
> -     /*
> -      * When enqueuing a sched_entity, we must:
> -      *   - Update loads to have both entity and cfs_rq synced with now.
> -      *   - Add its load to cfs_rq->runnable_avg
> -      *   - For group_entity, update its weight to reflect the new share of
> -      *     its group cfs_rq
> -      *   - Add its new weight to cfs_rq->load.weight
> -      */
> -     update_load_avg(cfs_rq, se, UPDATE_TG | DO_ATTACH);
> -     update_cfs_group(se);
>       enqueue_runnable_load_avg(cfs_rq, se);
>       account_entity_enqueue(cfs_rq, se);
>  

No functional, but you did make update_cfs_group() conditional. Now that
looks OK, but maybe you can do that part in a separate patch with a
little justification of its own.

Reply via email to