Everybody has it. If code-size is not the problem, __accumulate_sum() should have it too.
Signed-off-by: Yuyang Du <yuyang...@intel.com> --- kernel/sched/fair.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 17bc721..1655280 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -2649,7 +2649,7 @@ static __always_inline u64 __decay_sum(u64 val, u32 n) * We can compute this efficiently by combining: * y^32 = 1/2 with precomputed \Sum 1024*y^n (where n < 32) */ -static u32 __accumulate_sum(u32 n) +static __always_inline u32 __accumulate_sum(u32 n) { u32 contrib = 0; -- 1.7.9.5