This patch does a couple of optimizations in init_amu_fie(), like early
exits from paths where we don't need to continue any further, moving the
calls to topology_scale_freq_invariant() just when we need
them, instead of at the top of the routine, and avoiding calling it for
the third time.

Signed-off-by: Viresh Kumar <[email protected]>
---
V2:
- The enable/disable dance is actually required, just made a bunch of
  other optimizations to make it look better.

 arch/arm64/kernel/topology.c | 22 ++++++++++++++--------
 1 file changed, 14 insertions(+), 8 deletions(-)

diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c
index ebadc73449f9..1ebdb667f0d1 100644
--- a/arch/arm64/kernel/topology.c
+++ b/arch/arm64/kernel/topology.c
@@ -221,7 +221,7 @@ static DEFINE_STATIC_KEY_FALSE(amu_fie_key);
 
 static int __init init_amu_fie(void)
 {
-       bool invariance_status = topology_scale_freq_invariant();
+       bool prev, now;
        cpumask_var_t valid_cpus;
        int ret = 0;
        int cpu;
@@ -249,18 +249,24 @@ static int __init init_amu_fie(void)
        if (cpumask_equal(valid_cpus, cpu_present_mask))
                cpumask_copy(amu_fie_cpus, cpu_present_mask);
 
-       if (!cpumask_empty(amu_fie_cpus)) {
-               pr_info("CPUs[%*pbl]: counters will be used for FIE.",
-                       cpumask_pr_args(amu_fie_cpus));
-               static_branch_enable(&amu_fie_key);
-       }
+       if (cpumask_empty(amu_fie_cpus))
+               goto free_valid_mask;
+
+       prev = topology_scale_freq_invariant();
+       static_branch_enable(&amu_fie_key);
+       now = topology_scale_freq_invariant();
 
        /*
         * If the system is not fully invariant after AMU init, disable
         * partial use of counters for frequency invariance.
         */
-       if (!topology_scale_freq_invariant())
+       if (!now) {
                static_branch_disable(&amu_fie_key);
+               goto free_valid_mask;
+       }
+
+       pr_info("CPUs[%*pbl]: counters will be used for FIE.",
+               cpumask_pr_args(amu_fie_cpus));
 
        /*
         * Task scheduler behavior depends on frequency invariance support,
@@ -268,7 +274,7 @@ static int __init init_amu_fie(void)
         * a result of counter initialisation and use, retrigger the build of
         * scheduling domains to ensure the information is propagated properly.
         */
-       if (invariance_status != topology_scale_freq_invariant())
+       if (prev != now)
                rebuild_sched_domains_energy();
 
 free_valid_mask:
-- 
2.25.0.rc1.19.g042ed3e048af

Reply via email to