On 5 December 2017 at 18:10, Patrick Bellasi <[email protected]> wrote:
> Utilization and capacity are tracked as unsigned long, however some
> functions using them return an int which is ultimately assigned back to
> unsigned long variables.
>
> Since there is not scope on using a different and signed type, this
> consolidate the signature of functions returning utilization to always
> use the native type.
> As well as improving code consistency this is expected also benefits
> code paths where utilizations should be clamped by avoiding further type
> conversions or ugly type casts.
>
> Signed-off-by: Patrick Bellasi <[email protected]>
> Reviewed-by: Chris Redpath <[email protected]>
> Reviewed-by: Brendan Jackman <[email protected]>
> Reviewed-by: Dietmar Eggemann <[email protected]>
> Cc: Ingo Molnar <[email protected]>
> Cc: Peter Zijlstra <[email protected]>
> Cc: Vincent Guittot <[email protected]>
> Cc: Morten Rasmussen <[email protected]>
> Cc: Dietmar Eggemann <[email protected]>
> Cc: [email protected]

Acked-by: Vincent Guittot <[email protected]>

>
> ---
> Changes v1->v2:
>  - rebase on top of v4.15-rc2
>  - tested that overhauled PELT code does not affect the util_est
> ---
>  kernel/sched/fair.c | 10 +++++-----
>  1 file changed, 5 insertions(+), 5 deletions(-)
>
> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> index 4037e19bbca2..ad21550d008c 100644
> --- a/kernel/sched/fair.c
> +++ b/kernel/sched/fair.c
> @@ -5721,8 +5721,8 @@ static int wake_affine(struct sched_domain *sd, struct 
> task_struct *p,
>         return affine;
>  }
>
> -static inline int task_util(struct task_struct *p);
> -static int cpu_util_wake(int cpu, struct task_struct *p);
> +static inline unsigned long task_util(struct task_struct *p);
> +static unsigned long cpu_util_wake(int cpu, struct task_struct *p);
>
>  static unsigned long capacity_spare_wake(int cpu, struct task_struct *p)
>  {
> @@ -6203,7 +6203,7 @@ static int select_idle_sibling(struct task_struct *p, 
> int prev, int target)
>   * capacity_orig) as it useful for predicting the capacity required after 
> task
>   * migrations (scheduler-driven DVFS).
>   */
> -static int cpu_util(int cpu)
> +static unsigned long cpu_util(int cpu)
>  {
>         unsigned long util = cpu_rq(cpu)->cfs.avg.util_avg;
>         unsigned long capacity = capacity_orig_of(cpu);
> @@ -6211,7 +6211,7 @@ static int cpu_util(int cpu)
>         return (util >= capacity) ? capacity : util;
>  }
>
> -static inline int task_util(struct task_struct *p)
> +static inline unsigned long task_util(struct task_struct *p)
>  {
>         return p->se.avg.util_avg;
>  }
> @@ -6220,7 +6220,7 @@ static inline int task_util(struct task_struct *p)
>   * cpu_util_wake: Compute cpu utilization with any contributions from
>   * the waking task p removed.
>   */
> -static int cpu_util_wake(int cpu, struct task_struct *p)
> +static unsigned long cpu_util_wake(int cpu, struct task_struct *p)
>  {
>         unsigned long util, capacity;
>
> --
> 2.14.1
>

Reply via email to