Re: [RFC PATCH 3/3] sched/fair: Change @running of __update_load_avg() to @update_util

2016-06-03 Thread Juri Lelli
On 02/06/16 18:27, Dietmar Eggemann wrote:
> On 02/06/16 10:25, Juri Lelli wrote:
> 
> [...]
> 
> >> @@ -2757,7 +2754,7 @@ __update_load_avg(u64 now, int cpu, struct sched_avg 
> >> *sa,
> >>weight * scaled_delta_w;
> >>}
> >>}
> >> -  if (update_util && running)
> >> +  if (update_util == 0x3)
> > 
> > How about a define for these masks?
> 
> Something like this?
> 
> +#define UTIL_RUNNING   1
> +#define UTIL_UPDATE2

Make these 0x01 and 0x02, I'd say.

> +
>  /*
>   * We can represent the historical contribution to runnable average as the
>   * coefficients of a geometric series.  To do this we sub-divide our runnable
> @@ -2724,7 +2727,7 @@ static u32 __compute_runnable_contrib(u64 n)
>   */
>  static __always_inline int
>  __update_load_avg(u64 now, int cpu, struct sched_avg *sa,
> - unsigned long weight, int update_util, struct cfs_rq 
> *cfs_rq)
> + unsigned long weight, int util_flags, struct cfs_rq *cfs_rq)
>  {
> u64 delta, scaled_delta, periods;
> u32 contrib;
> @@ -2775,7 +2778,7 @@ __update_load_avg(u64 now, int cpu, struct sched_avg 
> *sa,
> weight * scaled_delta_w;
> }
> }
> -   if (update_util == 0x3)
> +   if (util_flags == (UTIL_UPDATE | UTIL_RUNNING))

Looks more readable to me. :-)

Best,

- Juri


Re: [RFC PATCH 3/3] sched/fair: Change @running of __update_load_avg() to @update_util

2016-06-03 Thread Juri Lelli
On 02/06/16 18:27, Dietmar Eggemann wrote:
> On 02/06/16 10:25, Juri Lelli wrote:
> 
> [...]
> 
> >> @@ -2757,7 +2754,7 @@ __update_load_avg(u64 now, int cpu, struct sched_avg 
> >> *sa,
> >>weight * scaled_delta_w;
> >>}
> >>}
> >> -  if (update_util && running)
> >> +  if (update_util == 0x3)
> > 
> > How about a define for these masks?
> 
> Something like this?
> 
> +#define UTIL_RUNNING   1
> +#define UTIL_UPDATE2

Make these 0x01 and 0x02, I'd say.

> +
>  /*
>   * We can represent the historical contribution to runnable average as the
>   * coefficients of a geometric series.  To do this we sub-divide our runnable
> @@ -2724,7 +2727,7 @@ static u32 __compute_runnable_contrib(u64 n)
>   */
>  static __always_inline int
>  __update_load_avg(u64 now, int cpu, struct sched_avg *sa,
> - unsigned long weight, int update_util, struct cfs_rq 
> *cfs_rq)
> + unsigned long weight, int util_flags, struct cfs_rq *cfs_rq)
>  {
> u64 delta, scaled_delta, periods;
> u32 contrib;
> @@ -2775,7 +2778,7 @@ __update_load_avg(u64 now, int cpu, struct sched_avg 
> *sa,
> weight * scaled_delta_w;
> }
> }
> -   if (update_util == 0x3)
> +   if (util_flags == (UTIL_UPDATE | UTIL_RUNNING))

Looks more readable to me. :-)

Best,

- Juri


Re: [RFC PATCH 3/3] sched/fair: Change @running of __update_load_avg() to @update_util

2016-06-02 Thread Dietmar Eggemann
On 02/06/16 10:25, Juri Lelli wrote:

[...]

>> @@ -2757,7 +2754,7 @@ __update_load_avg(u64 now, int cpu, struct sched_avg 
>> *sa,
>>  weight * scaled_delta_w;
>>  }
>>  }
>> -if (update_util && running)
>> +if (update_util == 0x3)
> 
> How about a define for these masks?

Something like this?

+#define UTIL_RUNNING   1
+#define UTIL_UPDATE2
+
 /*
  * We can represent the historical contribution to runnable average as the
  * coefficients of a geometric series.  To do this we sub-divide our runnable
@@ -2724,7 +2727,7 @@ static u32 __compute_runnable_contrib(u64 n)
  */
 static __always_inline int
 __update_load_avg(u64 now, int cpu, struct sched_avg *sa,
- unsigned long weight, int update_util, struct cfs_rq *cfs_rq)
+ unsigned long weight, int util_flags, struct cfs_rq *cfs_rq)
 {
u64 delta, scaled_delta, periods;
u32 contrib;
@@ -2775,7 +2778,7 @@ __update_load_avg(u64 now, int cpu, struct sched_avg *sa,
weight * scaled_delta_w;
}
}
-   if (update_util == 0x3)
+   if (util_flags == (UTIL_UPDATE | UTIL_RUNNING))
sa->util_sum += scaled_delta_w * scale_cpu;
...


Re: [RFC PATCH 3/3] sched/fair: Change @running of __update_load_avg() to @update_util

2016-06-02 Thread Dietmar Eggemann
On 02/06/16 10:25, Juri Lelli wrote:

[...]

>> @@ -2757,7 +2754,7 @@ __update_load_avg(u64 now, int cpu, struct sched_avg 
>> *sa,
>>  weight * scaled_delta_w;
>>  }
>>  }
>> -if (update_util && running)
>> +if (update_util == 0x3)
> 
> How about a define for these masks?

Something like this?

+#define UTIL_RUNNING   1
+#define UTIL_UPDATE2
+
 /*
  * We can represent the historical contribution to runnable average as the
  * coefficients of a geometric series.  To do this we sub-divide our runnable
@@ -2724,7 +2727,7 @@ static u32 __compute_runnable_contrib(u64 n)
  */
 static __always_inline int
 __update_load_avg(u64 now, int cpu, struct sched_avg *sa,
- unsigned long weight, int update_util, struct cfs_rq *cfs_rq)
+ unsigned long weight, int util_flags, struct cfs_rq *cfs_rq)
 {
u64 delta, scaled_delta, periods;
u32 contrib;
@@ -2775,7 +2778,7 @@ __update_load_avg(u64 now, int cpu, struct sched_avg *sa,
weight * scaled_delta_w;
}
}
-   if (update_util == 0x3)
+   if (util_flags == (UTIL_UPDATE | UTIL_RUNNING))
sa->util_sum += scaled_delta_w * scale_cpu;
...


Re: [RFC PATCH 3/3] sched/fair: Change @running of __update_load_avg() to @update_util

2016-06-02 Thread Dietmar Eggemann
On 01/06/16 21:11, Peter Zijlstra wrote:
> On Wed, Jun 01, 2016 at 08:39:22PM +0100, Dietmar Eggemann wrote:
>> The information whether a se/cfs_rq should get its load and
>> utilization (se representing a task and root cfs_rq) or only its load
>> (se representing a task group and cfs_rq owned by this se) updated can
>> be passed into __update_load_avg() to avoid the additional if/else
>> condition to set update_util.
>>
>> @running is changed to @update_util which now carries the information if
>> the utilization of the se/cfs_rq should be updated and if the se/cfs_rq
>> is running or not.
>>
>> Signed-off-by: Dietmar Eggemann 
>> ---
>>  kernel/sched/fair.c | 42 +-
>>  1 file changed, 21 insertions(+), 21 deletions(-)
>>
>> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
>> index 3ae8e79fb687..a1c13975cf56 100644
>> --- a/kernel/sched/fair.c
>> +++ b/kernel/sched/fair.c
>> @@ -2669,6 +2669,10 @@ static u32 __compute_runnable_contrib(u64 n)
>>  
>>  #define cap_scale(v, s) ((v)*(s) >> SCHED_CAPACITY_SHIFT)
>>  
>> +#define upd_util_se(se, rng) ((entity_is_task(se) << 1) | (rng))
> 
> Just saying that on first reading that went: Random Number Generator, uh
> what?!
> 
> So maybe pick better names?

Yeah, can do. What about?

#define update_util_se(se, running) ((entity_is_task(se) << 1) | (running))
#define update_util_rq(cfs_rq) ((rq_is_root(cfs_rq) << 1) | !!(cfs_rq)->curr)

 


Re: [RFC PATCH 3/3] sched/fair: Change @running of __update_load_avg() to @update_util

2016-06-02 Thread Dietmar Eggemann
On 01/06/16 21:11, Peter Zijlstra wrote:
> On Wed, Jun 01, 2016 at 08:39:22PM +0100, Dietmar Eggemann wrote:
>> The information whether a se/cfs_rq should get its load and
>> utilization (se representing a task and root cfs_rq) or only its load
>> (se representing a task group and cfs_rq owned by this se) updated can
>> be passed into __update_load_avg() to avoid the additional if/else
>> condition to set update_util.
>>
>> @running is changed to @update_util which now carries the information if
>> the utilization of the se/cfs_rq should be updated and if the se/cfs_rq
>> is running or not.
>>
>> Signed-off-by: Dietmar Eggemann 
>> ---
>>  kernel/sched/fair.c | 42 +-
>>  1 file changed, 21 insertions(+), 21 deletions(-)
>>
>> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
>> index 3ae8e79fb687..a1c13975cf56 100644
>> --- a/kernel/sched/fair.c
>> +++ b/kernel/sched/fair.c
>> @@ -2669,6 +2669,10 @@ static u32 __compute_runnable_contrib(u64 n)
>>  
>>  #define cap_scale(v, s) ((v)*(s) >> SCHED_CAPACITY_SHIFT)
>>  
>> +#define upd_util_se(se, rng) ((entity_is_task(se) << 1) | (rng))
> 
> Just saying that on first reading that went: Random Number Generator, uh
> what?!
> 
> So maybe pick better names?

Yeah, can do. What about?

#define update_util_se(se, running) ((entity_is_task(se) << 1) | (running))
#define update_util_rq(cfs_rq) ((rq_is_root(cfs_rq) << 1) | !!(cfs_rq)->curr)

 


Re: [RFC PATCH 3/3] sched/fair: Change @running of __update_load_avg() to @update_util

2016-06-02 Thread Juri Lelli
Hi,

another minor comment below. :-)

On 01/06/16 20:39, Dietmar Eggemann wrote:
> The information whether a se/cfs_rq should get its load and
> utilization (se representing a task and root cfs_rq) or only its load
> (se representing a task group and cfs_rq owned by this se) updated can
> be passed into __update_load_avg() to avoid the additional if/else
> condition to set update_util.
> 
> @running is changed to @update_util which now carries the information if
> the utilization of the se/cfs_rq should be updated and if the se/cfs_rq
> is running or not.
> 
> Signed-off-by: Dietmar Eggemann 
> ---
>  kernel/sched/fair.c | 42 +-
>  1 file changed, 21 insertions(+), 21 deletions(-)
> 
> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> index 3ae8e79fb687..a1c13975cf56 100644
> --- a/kernel/sched/fair.c
> +++ b/kernel/sched/fair.c
> @@ -2669,6 +2669,10 @@ static u32 __compute_runnable_contrib(u64 n)
>  
>  #define cap_scale(v, s) ((v)*(s) >> SCHED_CAPACITY_SHIFT)
>  
> +#define upd_util_se(se, rng) ((entity_is_task(se) << 1) | (rng))
> +#define upd_util_cfs_rq(cfs_rq) \
> + (((_of(cfs_rq)->cfs == cfs_rq) << 1) | !!cfs_rq->curr)
> +
>  /*
>   * We can represent the historical contribution to runnable average as the
>   * coefficients of a geometric series.  To do this we sub-divide our runnable
> @@ -2699,13 +2703,12 @@ static u32 __compute_runnable_contrib(u64 n)
>   */
>  static __always_inline int
>  __update_load_avg(u64 now, int cpu, struct sched_avg *sa,
> -   unsigned long weight, int running, struct cfs_rq *cfs_rq)
> +   unsigned long weight, int update_util, struct cfs_rq *cfs_rq)
>  {
>   u64 delta, scaled_delta, periods;
>   u32 contrib;
>   unsigned int delta_w, scaled_delta_w, decayed = 0;
>   unsigned long scale_freq, scale_cpu;
> - int update_util = 0;
>  
>   delta = now - sa->last_update_time;
>   /*
> @@ -2726,12 +2729,6 @@ __update_load_avg(u64 now, int cpu, struct sched_avg 
> *sa,
>   return 0;
>   sa->last_update_time = now;
>  
> - if (cfs_rq) {
> - if (_of(cfs_rq)->cfs == cfs_rq)
> - update_util = 1;
> - } else if (entity_is_task(container_of(sa, struct sched_entity, avg)))
> - update_util = 1;
> -
>   scale_freq = arch_scale_freq_capacity(NULL, cpu);
>   scale_cpu = arch_scale_cpu_capacity(NULL, cpu);
>  
> @@ -2757,7 +2754,7 @@ __update_load_avg(u64 now, int cpu, struct sched_avg 
> *sa,
>   weight * scaled_delta_w;
>   }
>   }
> - if (update_util && running)
> + if (update_util == 0x3)

How about a define for these masks?

Best,

- Juri


Re: [RFC PATCH 3/3] sched/fair: Change @running of __update_load_avg() to @update_util

2016-06-02 Thread Juri Lelli
Hi,

another minor comment below. :-)

On 01/06/16 20:39, Dietmar Eggemann wrote:
> The information whether a se/cfs_rq should get its load and
> utilization (se representing a task and root cfs_rq) or only its load
> (se representing a task group and cfs_rq owned by this se) updated can
> be passed into __update_load_avg() to avoid the additional if/else
> condition to set update_util.
> 
> @running is changed to @update_util which now carries the information if
> the utilization of the se/cfs_rq should be updated and if the se/cfs_rq
> is running or not.
> 
> Signed-off-by: Dietmar Eggemann 
> ---
>  kernel/sched/fair.c | 42 +-
>  1 file changed, 21 insertions(+), 21 deletions(-)
> 
> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> index 3ae8e79fb687..a1c13975cf56 100644
> --- a/kernel/sched/fair.c
> +++ b/kernel/sched/fair.c
> @@ -2669,6 +2669,10 @@ static u32 __compute_runnable_contrib(u64 n)
>  
>  #define cap_scale(v, s) ((v)*(s) >> SCHED_CAPACITY_SHIFT)
>  
> +#define upd_util_se(se, rng) ((entity_is_task(se) << 1) | (rng))
> +#define upd_util_cfs_rq(cfs_rq) \
> + (((_of(cfs_rq)->cfs == cfs_rq) << 1) | !!cfs_rq->curr)
> +
>  /*
>   * We can represent the historical contribution to runnable average as the
>   * coefficients of a geometric series.  To do this we sub-divide our runnable
> @@ -2699,13 +2703,12 @@ static u32 __compute_runnable_contrib(u64 n)
>   */
>  static __always_inline int
>  __update_load_avg(u64 now, int cpu, struct sched_avg *sa,
> -   unsigned long weight, int running, struct cfs_rq *cfs_rq)
> +   unsigned long weight, int update_util, struct cfs_rq *cfs_rq)
>  {
>   u64 delta, scaled_delta, periods;
>   u32 contrib;
>   unsigned int delta_w, scaled_delta_w, decayed = 0;
>   unsigned long scale_freq, scale_cpu;
> - int update_util = 0;
>  
>   delta = now - sa->last_update_time;
>   /*
> @@ -2726,12 +2729,6 @@ __update_load_avg(u64 now, int cpu, struct sched_avg 
> *sa,
>   return 0;
>   sa->last_update_time = now;
>  
> - if (cfs_rq) {
> - if (_of(cfs_rq)->cfs == cfs_rq)
> - update_util = 1;
> - } else if (entity_is_task(container_of(sa, struct sched_entity, avg)))
> - update_util = 1;
> -
>   scale_freq = arch_scale_freq_capacity(NULL, cpu);
>   scale_cpu = arch_scale_cpu_capacity(NULL, cpu);
>  
> @@ -2757,7 +2754,7 @@ __update_load_avg(u64 now, int cpu, struct sched_avg 
> *sa,
>   weight * scaled_delta_w;
>   }
>   }
> - if (update_util && running)
> + if (update_util == 0x3)

How about a define for these masks?

Best,

- Juri


Re: [RFC PATCH 3/3] sched/fair: Change @running of __update_load_avg() to @update_util

2016-06-01 Thread Peter Zijlstra
On Wed, Jun 01, 2016 at 08:39:22PM +0100, Dietmar Eggemann wrote:
> The information whether a se/cfs_rq should get its load and
> utilization (se representing a task and root cfs_rq) or only its load
> (se representing a task group and cfs_rq owned by this se) updated can
> be passed into __update_load_avg() to avoid the additional if/else
> condition to set update_util.
> 
> @running is changed to @update_util which now carries the information if
> the utilization of the se/cfs_rq should be updated and if the se/cfs_rq
> is running or not.
> 
> Signed-off-by: Dietmar Eggemann 
> ---
>  kernel/sched/fair.c | 42 +-
>  1 file changed, 21 insertions(+), 21 deletions(-)
> 
> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> index 3ae8e79fb687..a1c13975cf56 100644
> --- a/kernel/sched/fair.c
> +++ b/kernel/sched/fair.c
> @@ -2669,6 +2669,10 @@ static u32 __compute_runnable_contrib(u64 n)
>  
>  #define cap_scale(v, s) ((v)*(s) >> SCHED_CAPACITY_SHIFT)
>  
> +#define upd_util_se(se, rng) ((entity_is_task(se) << 1) | (rng))

Just saying that on first reading that went: Random Number Generator, uh
what?!

So maybe pick better names?


Re: [RFC PATCH 3/3] sched/fair: Change @running of __update_load_avg() to @update_util

2016-06-01 Thread Peter Zijlstra
On Wed, Jun 01, 2016 at 08:39:22PM +0100, Dietmar Eggemann wrote:
> The information whether a se/cfs_rq should get its load and
> utilization (se representing a task and root cfs_rq) or only its load
> (se representing a task group and cfs_rq owned by this se) updated can
> be passed into __update_load_avg() to avoid the additional if/else
> condition to set update_util.
> 
> @running is changed to @update_util which now carries the information if
> the utilization of the se/cfs_rq should be updated and if the se/cfs_rq
> is running or not.
> 
> Signed-off-by: Dietmar Eggemann 
> ---
>  kernel/sched/fair.c | 42 +-
>  1 file changed, 21 insertions(+), 21 deletions(-)
> 
> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> index 3ae8e79fb687..a1c13975cf56 100644
> --- a/kernel/sched/fair.c
> +++ b/kernel/sched/fair.c
> @@ -2669,6 +2669,10 @@ static u32 __compute_runnable_contrib(u64 n)
>  
>  #define cap_scale(v, s) ((v)*(s) >> SCHED_CAPACITY_SHIFT)
>  
> +#define upd_util_se(se, rng) ((entity_is_task(se) << 1) | (rng))

Just saying that on first reading that went: Random Number Generator, uh
what?!

So maybe pick better names?


[RFC PATCH 3/3] sched/fair: Change @running of __update_load_avg() to @update_util

2016-06-01 Thread Dietmar Eggemann
The information whether a se/cfs_rq should get its load and
utilization (se representing a task and root cfs_rq) or only its load
(se representing a task group and cfs_rq owned by this se) updated can
be passed into __update_load_avg() to avoid the additional if/else
condition to set update_util.

@running is changed to @update_util which now carries the information if
the utilization of the se/cfs_rq should be updated and if the se/cfs_rq
is running or not.

Signed-off-by: Dietmar Eggemann 
---
 kernel/sched/fair.c | 42 +-
 1 file changed, 21 insertions(+), 21 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 3ae8e79fb687..a1c13975cf56 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2669,6 +2669,10 @@ static u32 __compute_runnable_contrib(u64 n)
 
 #define cap_scale(v, s) ((v)*(s) >> SCHED_CAPACITY_SHIFT)
 
+#define upd_util_se(se, rng) ((entity_is_task(se) << 1) | (rng))
+#define upd_util_cfs_rq(cfs_rq) \
+   (((_of(cfs_rq)->cfs == cfs_rq) << 1) | !!cfs_rq->curr)
+
 /*
  * We can represent the historical contribution to runnable average as the
  * coefficients of a geometric series.  To do this we sub-divide our runnable
@@ -2699,13 +2703,12 @@ static u32 __compute_runnable_contrib(u64 n)
  */
 static __always_inline int
 __update_load_avg(u64 now, int cpu, struct sched_avg *sa,
- unsigned long weight, int running, struct cfs_rq *cfs_rq)
+ unsigned long weight, int update_util, struct cfs_rq *cfs_rq)
 {
u64 delta, scaled_delta, periods;
u32 contrib;
unsigned int delta_w, scaled_delta_w, decayed = 0;
unsigned long scale_freq, scale_cpu;
-   int update_util = 0;
 
delta = now - sa->last_update_time;
/*
@@ -2726,12 +2729,6 @@ __update_load_avg(u64 now, int cpu, struct sched_avg *sa,
return 0;
sa->last_update_time = now;
 
-   if (cfs_rq) {
-   if (_of(cfs_rq)->cfs == cfs_rq)
-   update_util = 1;
-   } else if (entity_is_task(container_of(sa, struct sched_entity, avg)))
-   update_util = 1;
-
scale_freq = arch_scale_freq_capacity(NULL, cpu);
scale_cpu = arch_scale_cpu_capacity(NULL, cpu);
 
@@ -2757,7 +2754,7 @@ __update_load_avg(u64 now, int cpu, struct sched_avg *sa,
weight * scaled_delta_w;
}
}
-   if (update_util && running)
+   if (update_util == 0x3)
sa->util_sum += scaled_delta_w * scale_cpu;
 
delta -= delta_w;
@@ -2781,7 +2778,7 @@ __update_load_avg(u64 now, int cpu, struct sched_avg *sa,
if (cfs_rq)
cfs_rq->runnable_load_sum += weight * contrib;
}
-   if (update_util && running)
+   if (update_util == 0x3)
sa->util_sum += contrib * scale_cpu;
}
 
@@ -2792,7 +2789,7 @@ __update_load_avg(u64 now, int cpu, struct sched_avg *sa,
if (cfs_rq)
cfs_rq->runnable_load_sum += weight * scaled_delta;
}
-   if (update_util && running)
+   if (update_util == 0x3)
sa->util_sum += scaled_delta * scale_cpu;
 
sa->period_contrib += delta;
@@ -2803,7 +2800,7 @@ __update_load_avg(u64 now, int cpu, struct sched_avg *sa,
cfs_rq->runnable_load_avg =
div_u64(cfs_rq->runnable_load_sum, 
LOAD_AVG_MAX);
}
-   if (update_util)
+   if (update_util & 0x2)
sa->util_avg = sa->util_sum / LOAD_AVG_MAX;
}
 
@@ -2873,7 +2870,7 @@ void set_task_rq_fair(struct sched_entity *se,
n_last_update_time = next->avg.last_update_time;
 #endif
__update_load_avg(p_last_update_time, cpu_of(rq_of(prev)),
- >avg, 0, 0, NULL);
+ >avg, 0, upd_util_se(se, 0), NULL);
se->avg.last_update_time = n_last_update_time;
}
 }
@@ -2935,7 +2932,8 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq, 
bool update_freq)
}
 
decayed = __update_load_avg(now, cpu_of(rq_of(cfs_rq)), sa,
-   scale_load_down(cfs_rq->load.weight), cfs_rq->curr != NULL, 
cfs_rq);
+   scale_load_down(cfs_rq->load.weight), upd_util_cfs_rq(cfs_rq),
+   cfs_rq);
 
 #ifndef CONFIG_64BIT
smp_wmb();
@@ -2962,7 +2960,7 @@ static inline void update_load_avg(struct sched_entity 
*se, int update_tg)
 */
__update_load_avg(now, cpu, >avg,
  se->on_rq * scale_load_down(se->load.weight),
- cfs_rq->curr == se, NULL);
+ upd_util_se(se, cfs_rq->curr == se), NULL);
 

[RFC PATCH 3/3] sched/fair: Change @running of __update_load_avg() to @update_util

2016-06-01 Thread Dietmar Eggemann
The information whether a se/cfs_rq should get its load and
utilization (se representing a task and root cfs_rq) or only its load
(se representing a task group and cfs_rq owned by this se) updated can
be passed into __update_load_avg() to avoid the additional if/else
condition to set update_util.

@running is changed to @update_util which now carries the information if
the utilization of the se/cfs_rq should be updated and if the se/cfs_rq
is running or not.

Signed-off-by: Dietmar Eggemann 
---
 kernel/sched/fair.c | 42 +-
 1 file changed, 21 insertions(+), 21 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 3ae8e79fb687..a1c13975cf56 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2669,6 +2669,10 @@ static u32 __compute_runnable_contrib(u64 n)
 
 #define cap_scale(v, s) ((v)*(s) >> SCHED_CAPACITY_SHIFT)
 
+#define upd_util_se(se, rng) ((entity_is_task(se) << 1) | (rng))
+#define upd_util_cfs_rq(cfs_rq) \
+   (((_of(cfs_rq)->cfs == cfs_rq) << 1) | !!cfs_rq->curr)
+
 /*
  * We can represent the historical contribution to runnable average as the
  * coefficients of a geometric series.  To do this we sub-divide our runnable
@@ -2699,13 +2703,12 @@ static u32 __compute_runnable_contrib(u64 n)
  */
 static __always_inline int
 __update_load_avg(u64 now, int cpu, struct sched_avg *sa,
- unsigned long weight, int running, struct cfs_rq *cfs_rq)
+ unsigned long weight, int update_util, struct cfs_rq *cfs_rq)
 {
u64 delta, scaled_delta, periods;
u32 contrib;
unsigned int delta_w, scaled_delta_w, decayed = 0;
unsigned long scale_freq, scale_cpu;
-   int update_util = 0;
 
delta = now - sa->last_update_time;
/*
@@ -2726,12 +2729,6 @@ __update_load_avg(u64 now, int cpu, struct sched_avg *sa,
return 0;
sa->last_update_time = now;
 
-   if (cfs_rq) {
-   if (_of(cfs_rq)->cfs == cfs_rq)
-   update_util = 1;
-   } else if (entity_is_task(container_of(sa, struct sched_entity, avg)))
-   update_util = 1;
-
scale_freq = arch_scale_freq_capacity(NULL, cpu);
scale_cpu = arch_scale_cpu_capacity(NULL, cpu);
 
@@ -2757,7 +2754,7 @@ __update_load_avg(u64 now, int cpu, struct sched_avg *sa,
weight * scaled_delta_w;
}
}
-   if (update_util && running)
+   if (update_util == 0x3)
sa->util_sum += scaled_delta_w * scale_cpu;
 
delta -= delta_w;
@@ -2781,7 +2778,7 @@ __update_load_avg(u64 now, int cpu, struct sched_avg *sa,
if (cfs_rq)
cfs_rq->runnable_load_sum += weight * contrib;
}
-   if (update_util && running)
+   if (update_util == 0x3)
sa->util_sum += contrib * scale_cpu;
}
 
@@ -2792,7 +2789,7 @@ __update_load_avg(u64 now, int cpu, struct sched_avg *sa,
if (cfs_rq)
cfs_rq->runnable_load_sum += weight * scaled_delta;
}
-   if (update_util && running)
+   if (update_util == 0x3)
sa->util_sum += scaled_delta * scale_cpu;
 
sa->period_contrib += delta;
@@ -2803,7 +2800,7 @@ __update_load_avg(u64 now, int cpu, struct sched_avg *sa,
cfs_rq->runnable_load_avg =
div_u64(cfs_rq->runnable_load_sum, 
LOAD_AVG_MAX);
}
-   if (update_util)
+   if (update_util & 0x2)
sa->util_avg = sa->util_sum / LOAD_AVG_MAX;
}
 
@@ -2873,7 +2870,7 @@ void set_task_rq_fair(struct sched_entity *se,
n_last_update_time = next->avg.last_update_time;
 #endif
__update_load_avg(p_last_update_time, cpu_of(rq_of(prev)),
- >avg, 0, 0, NULL);
+ >avg, 0, upd_util_se(se, 0), NULL);
se->avg.last_update_time = n_last_update_time;
}
 }
@@ -2935,7 +2932,8 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq, 
bool update_freq)
}
 
decayed = __update_load_avg(now, cpu_of(rq_of(cfs_rq)), sa,
-   scale_load_down(cfs_rq->load.weight), cfs_rq->curr != NULL, 
cfs_rq);
+   scale_load_down(cfs_rq->load.weight), upd_util_cfs_rq(cfs_rq),
+   cfs_rq);
 
 #ifndef CONFIG_64BIT
smp_wmb();
@@ -2962,7 +2960,7 @@ static inline void update_load_avg(struct sched_entity 
*se, int update_tg)
 */
__update_load_avg(now, cpu, >avg,
  se->on_rq * scale_load_down(se->load.weight),
- cfs_rq->curr == se, NULL);
+ upd_util_se(se, cfs_rq->curr == se), NULL);
 
if