In sched average update, a period is about 1ms, so a 32-bit unsigned
integer can approximately hold a maximum of 49 (=2^32/1000/3600/24)
days, which means it is big enough and 64-bit is needless.

Signed-off-by: Yuyang Du <yuyang...@intel.com>
---
 kernel/sched/fair.c |   27 +++++++++++++--------------
 1 file changed, 13 insertions(+), 14 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 74eaeab..17bc721 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2619,18 +2619,13 @@ static const u32 __accumulated_sum_N32[] = {
  * n is the number of periods past; a period is ~1ms
  * m is called half-life in exponential decay; here it is 
SCHED_AVG_HALFLIFE=32.
  */
-static __always_inline u64 __decay_sum(u64 val, u64 n)
+static __always_inline u64 __decay_sum(u64 val, u32 n)
 {
-       unsigned int local_n;
-
        if (!n)
                return val;
        else if (unlikely(n > SCHED_AVG_HALFLIFE * 63))
                return 0;
 
-       /* after bounds checking we can collapse to 32-bit */
-       local_n = n;
-
        /*
         * As y^PERIOD = 1/2, we can combine
         *    y^n = 1/2^(n/PERIOD) * y^(n%PERIOD)
@@ -2638,12 +2633,12 @@ static __always_inline u64 __decay_sum(u64 val, u64 n)
         *
         * To achieve constant time decay_load.
         */
-       if (unlikely(local_n >= SCHED_AVG_HALFLIFE)) {
-               val >>= local_n / SCHED_AVG_HALFLIFE;
-               local_n %= SCHED_AVG_HALFLIFE;
+       if (unlikely(n >= SCHED_AVG_HALFLIFE)) {
+               val >>= n / SCHED_AVG_HALFLIFE;
+               n %= SCHED_AVG_HALFLIFE;
        }
 
-       val = mul_u64_u32_shr(val, __decay_inv_multiply_N[local_n], 32);
+       val = mul_u64_u32_shr(val, __decay_inv_multiply_N[n], 32);
        return val;
 }
 
@@ -2654,7 +2649,7 @@ static __always_inline u64 __decay_sum(u64 val, u64 n)
  * We can compute this efficiently by combining:
  * y^32 = 1/2 with precomputed \Sum 1024*y^n   (where n < 32)
  */
-static u32 __accumulate_sum(u64 n)
+static u32 __accumulate_sum(u32 n)
 {
        u32 contrib = 0;
 
@@ -2708,8 +2703,8 @@ static __always_inline int
 __update_sched_avg(u64 now, int cpu, struct sched_avg *sa,
                  unsigned long weight, int running, struct cfs_rq *cfs_rq)
 {
-       u64 delta, scaled_delta, periods;
-       u32 contrib;
+       u64 delta, scaled_delta;
+       u32 contrib, periods;
        unsigned int delta_w, scaled_delta_w, decayed = 0;
        unsigned long scale_freq, scale_cpu;
 
@@ -2762,7 +2757,11 @@ __update_sched_avg(u64 now, int cpu, struct sched_avg 
*sa,
 
                delta -= delta_w;
 
-               /* Figure out how many additional periods this update spans */
+               /*
+                * Figure out how many additional periods this update spans.
+                * A period is 1024*1024ns or ~1ms, so a 32bit integer can hold
+                * approximately a maximum of 49 (=2^32/1000/3600/24) days.
+                */
                periods = delta / 1024;
                delta %= 1024;
 
-- 
1.7.9.5

Reply via email to