In __decay_sum(), the 64-bit to 32-bit variable conversion makes no
performance nor correctness use.

Minor cleanup and no functionality change.

Signed-off-by: Yuyang Du <[email protected]>
---
 kernel/sched/fair.c |   13 ++++---------
 1 file changed, 4 insertions(+), 9 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 283e2c2..eea3349 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2616,16 +2616,11 @@ static const u32 __accumulated_sum_N32[] = {
  */
 static __always_inline u64 __decay_sum(u64 val, u64 n)
 {
-       unsigned int local_n;
-
        if (!n)
                return val;
        else if (unlikely(n > SCHED_AVG_HALFLIFE * 63))
                return 0;
 
-       /* after bounds checking we can collapse to 32-bit */
-       local_n = n;
-
        /*
         * As y^HALFLIFE = 1/2, we can combine
         *    y^n = 1/2^(n/HALFLIFE) * y^(n%HALFLIFE)
@@ -2633,12 +2628,12 @@ static __always_inline u64 __decay_sum(u64 val, u64 n)
         *
         * To achieve constant time __decay_load.
         */
-       if (unlikely(local_n >= SCHED_AVG_HALFLIFE)) {
-               val >>= local_n / SCHED_AVG_HALFLIFE;
-               local_n %= SCHED_AVG_HALFLIFE;
+       if (unlikely(n >= SCHED_AVG_HALFLIFE)) {
+               val >>= n / SCHED_AVG_HALFLIFE;
+               n %= SCHED_AVG_HALFLIFE;
        }
 
-       val = mul_u64_u32_shr(val, __decay_inv_multiply_N[local_n], 32);
+       val = mul_u64_u32_shr(val, __decay_inv_multiply_N[n], 32);
        return val;
 }
 
-- 
1.7.9.5

Reply via email to