On 02/06/16 10:25, Juri Lelli wrote:

[...]

>> @@ -2757,7 +2754,7 @@ __update_load_avg(u64 now, int cpu, struct sched_avg 
>> *sa,
>>                                              weight * scaled_delta_w;
>>                      }
>>              }
>> -            if (update_util && running)
>> +            if (update_util == 0x3)
> 
> How about a define for these masks?

Something like this?

+#define UTIL_RUNNING   1
+#define UTIL_UPDATE    2
+
 /*
  * We can represent the historical contribution to runnable average as the
  * coefficients of a geometric series.  To do this we sub-divide our runnable
@@ -2724,7 +2727,7 @@ static u32 __compute_runnable_contrib(u64 n)
  */
 static __always_inline int
 __update_load_avg(u64 now, int cpu, struct sched_avg *sa,
-                 unsigned long weight, int update_util, struct cfs_rq *cfs_rq)
+                 unsigned long weight, int util_flags, struct cfs_rq *cfs_rq)
 {
        u64 delta, scaled_delta, periods;
        u32 contrib;
@@ -2775,7 +2778,7 @@ __update_load_avg(u64 now, int cpu, struct sched_avg *sa,
                                                weight * scaled_delta_w;
                        }
                }
-               if (update_util == 0x3)
+               if (util_flags == (UTIL_UPDATE | UTIL_RUNNING))
                        sa->util_sum += scaled_delta_w * scale_cpu;
...

Reply via email to