On Tue, Jul 14, 2020 at 10:06:16AM +0530, Srikar Dronamraju wrote:
> Just moving the powerpc_topology description above.
> This will help in using functions in this file and avoid declarations.
> 
> No other functional changes
> 
> Cc: linuxppc-dev <linuxppc-dev@lists.ozlabs.org>
> Cc: Michael Ellerman <micha...@au1.ibm.com>
> Cc: Nick Piggin <npig...@au1.ibm.com>
> Cc: Oliver OHalloran <olive...@au1.ibm.com>
> Cc: Nathan Lynch <nath...@linux.ibm.com>
> Cc: Michael Neuling <mi...@linux.ibm.com>
> Cc: Anton Blanchard <an...@au1.ibm.com>
> Cc: Gautham R Shenoy <e...@linux.vnet.ibm.com>
> Cc: Vaidyanathan Srinivasan <sva...@linux.ibm.com>
> Signed-off-by: Srikar Dronamraju <sri...@linux.vnet.ibm.com>

Reviewed-by: Gautham R. Shenoy <e...@linux.vnet.ibm.com>

> ---
>  arch/powerpc/kernel/smp.c | 116 +++++++++++++++++++-------------------
>  1 file changed, 58 insertions(+), 58 deletions(-)
> 
> diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
> index 069ea4b21c6d..24529f6134aa 100644
> --- a/arch/powerpc/kernel/smp.c
> +++ b/arch/powerpc/kernel/smp.c
> @@ -818,6 +818,64 @@ static int init_cpu_l1_cache_map(int cpu)
>       return err;
>  }
> 
> +static bool shared_caches;
> +
> +#ifdef CONFIG_SCHED_SMT
> +/* cpumask of CPUs with asymmetric SMT dependency */
> +static int powerpc_smt_flags(void)
> +{
> +     int flags = SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES;
> +
> +     if (cpu_has_feature(CPU_FTR_ASYM_SMT)) {
> +             printk_once(KERN_INFO "Enabling Asymmetric SMT scheduling\n");
> +             flags |= SD_ASYM_PACKING;
> +     }
> +     return flags;
> +}
> +#endif
> +
> +/*
> + * P9 has a slightly odd architecture where pairs of cores share an L2 cache.
> + * This topology makes it *much* cheaper to migrate tasks between adjacent 
> cores
> + * since the migrated task remains cache hot. We want to take advantage of 
> this
> + * at the scheduler level so an extra topology level is required.
> + */
> +static int powerpc_shared_cache_flags(void)
> +{
> +     return SD_SHARE_PKG_RESOURCES;
> +}
> +
> +/*
> + * We can't just pass cpu_l2_cache_mask() directly because
> + * returns a non-const pointer and the compiler barfs on that.
> + */
> +static const struct cpumask *shared_cache_mask(int cpu)
> +{
> +     if (shared_caches)
> +             return cpu_l2_cache_mask(cpu);
> +
> +     if (has_big_cores)
> +             return cpu_smallcore_mask(cpu);
> +
> +     return cpu_smt_mask(cpu);
> +}
> +
> +#ifdef CONFIG_SCHED_SMT
> +static const struct cpumask *smallcore_smt_mask(int cpu)
> +{
> +     return cpu_smallcore_mask(cpu);
> +}
> +#endif
> +
> +static struct sched_domain_topology_level powerpc_topology[] = {
> +#ifdef CONFIG_SCHED_SMT
> +     { cpu_smt_mask, powerpc_smt_flags, SD_INIT_NAME(SMT) },
> +#endif
> +     { shared_cache_mask, powerpc_shared_cache_flags, SD_INIT_NAME(CACHE) },
> +     { cpu_cpu_mask, SD_INIT_NAME(DIE) },
> +     { NULL, },
> +};
> +
>  static int init_big_cores(void)
>  {
>       int cpu;
> @@ -1249,8 +1307,6 @@ static void add_cpu_to_masks(int cpu)
>                       set_cpus_related(cpu, i, cpu_core_mask);
>  }
> 
> -static bool shared_caches;
> -
>  /* Activate a secondary processor. */
>  void start_secondary(void *unused)
>  {
> @@ -1314,62 +1370,6 @@ int setup_profiling_timer(unsigned int multiplier)
>       return 0;
>  }
> 
> -#ifdef CONFIG_SCHED_SMT
> -/* cpumask of CPUs with asymmetric SMT dependency */
> -static int powerpc_smt_flags(void)
> -{
> -     int flags = SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES;
> -
> -     if (cpu_has_feature(CPU_FTR_ASYM_SMT)) {
> -             printk_once(KERN_INFO "Enabling Asymmetric SMT scheduling\n");
> -             flags |= SD_ASYM_PACKING;
> -     }
> -     return flags;
> -}
> -#endif
> -
> -/*
> - * P9 has a slightly odd architecture where pairs of cores share an L2 cache.
> - * This topology makes it *much* cheaper to migrate tasks between adjacent 
> cores
> - * since the migrated task remains cache hot. We want to take advantage of 
> this
> - * at the scheduler level so an extra topology level is required.
> - */
> -static int powerpc_shared_cache_flags(void)
> -{
> -     return SD_SHARE_PKG_RESOURCES;
> -}
> -
> -/*
> - * We can't just pass cpu_l2_cache_mask() directly because
> - * returns a non-const pointer and the compiler barfs on that.
> - */
> -static const struct cpumask *shared_cache_mask(int cpu)
> -{
> -     if (shared_caches)
> -             return cpu_l2_cache_mask(cpu);
> -
> -     if (has_big_cores)
> -             return cpu_smallcore_mask(cpu);
> -
> -     return cpu_smt_mask(cpu);
> -}
> -
> -#ifdef CONFIG_SCHED_SMT
> -static const struct cpumask *smallcore_smt_mask(int cpu)
> -{
> -     return cpu_smallcore_mask(cpu);
> -}
> -#endif
> -
> -static struct sched_domain_topology_level powerpc_topology[] = {
> -#ifdef CONFIG_SCHED_SMT
> -     { cpu_smt_mask, powerpc_smt_flags, SD_INIT_NAME(SMT) },
> -#endif
> -     { shared_cache_mask, powerpc_shared_cache_flags, SD_INIT_NAME(CACHE) },
> -     { cpu_cpu_mask, SD_INIT_NAME(DIE) },
> -     { NULL, },
> -};
> -
>  void __init smp_cpus_done(unsigned int max_cpus)
>  {
>       /*
> -- 
> 2.17.1
> 

Reply via email to