[PATCH v7 REPOST 4/9] arm64: parse cpu capacity-dmips-mhz from DT

2016-10-17 Thread Juri Lelli
With the introduction of cpu capacity-dmips-mhz bindings, CPU capacities
can now be calculated from values extracted from DT and information
coming from cpufreq. Add parsing of DT information at boot time, and
complement it with cpufreq information. Also, store such information
using per CPU variables, as we do for arm.

Caveat: the information provided by this patch will start to be used in
the future. We need to #define arch_scale_cpu_capacity to something
provided in arch, so that scheduler's default implementation (which gets
used if arch_scale_cpu_capacity is not defined) is overwritten.

Cc: Catalin Marinas 
Cc: Will Deacon 
Cc: Mark Brown 
Cc: Sudeep Holla 
Signed-off-by: Juri Lelli 
Acked-by: Vincent Guittot 
---

Changes from v1:
  - normalize w.r.t. highest capacity found in DT
  - bailout conditions (all-or-nothing)

Changes from v4:
  - parsing modified to reflect change in binding (capacity-dmips-mhz)

Changes from v5:
  - allocate raw_capacity array with kcalloc()
  - pr_err() only for partial capacity information

Changes from v6:
  - use cpuinfo.max_freq instead of policy->max
  - add delayed work to unregister cpufreq notifier
---
 arch/arm64/kernel/topology.c | 159 ++-
 1 file changed, 158 insertions(+), 1 deletion(-)

diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c
index 694f6deedbab..b75b0ba2e113 100644
--- a/arch/arm64/kernel/topology.c
+++ b/arch/arm64/kernel/topology.c
@@ -19,10 +19,162 @@
 #include 
 #include 
 #include 
+#include 
+#include 
 
 #include 
 #include 
 
+static DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE;
+
+unsigned long arch_scale_cpu_capacity(struct sched_domain *sd, int cpu)
+{
+   return per_cpu(cpu_scale, cpu);
+}
+
+static void set_capacity_scale(unsigned int cpu, unsigned long capacity)
+{
+   per_cpu(cpu_scale, cpu) = capacity;
+}
+
+static u32 capacity_scale;
+static u32 *raw_capacity;
+static bool cap_parsing_failed;
+
+static void __init parse_cpu_capacity(struct device_node *cpu_node, int cpu)
+{
+   int ret;
+   u32 cpu_capacity;
+
+   if (cap_parsing_failed)
+   return;
+
+   ret = of_property_read_u32(cpu_node,
+  "capacity-dmips-mhz",
+  _capacity);
+   if (!ret) {
+   if (!raw_capacity) {
+   raw_capacity = kcalloc(num_possible_cpus(),
+  sizeof(*raw_capacity),
+  GFP_KERNEL);
+   if (!raw_capacity) {
+   pr_err("cpu_capacity: failed to allocate memory 
for raw capacities\n");
+   cap_parsing_failed = true;
+   return;
+   }
+   }
+   capacity_scale = max(cpu_capacity, capacity_scale);
+   raw_capacity[cpu] = cpu_capacity;
+   pr_debug("cpu_capacity: %s cpu_capacity=%u (raw)\n",
+   cpu_node->full_name, raw_capacity[cpu]);
+   } else {
+   if (raw_capacity) {
+   pr_err("cpu_capacity: missing %s raw capacity\n",
+   cpu_node->full_name);
+   pr_err("cpu_capacity: partial information: fallback to 
1024 for all CPUs\n");
+   }
+   cap_parsing_failed = true;
+   kfree(raw_capacity);
+   }
+}
+
+static void normalize_cpu_capacity(void)
+{
+   u64 capacity;
+   int cpu;
+
+   if (!raw_capacity || cap_parsing_failed)
+   return;
+
+   pr_debug("cpu_capacity: capacity_scale=%u\n", capacity_scale);
+   for_each_possible_cpu(cpu) {
+   pr_debug("cpu_capacity: cpu=%d raw_capacity=%u\n",
+cpu, raw_capacity[cpu]);
+   capacity = (raw_capacity[cpu] << SCHED_CAPACITY_SHIFT)
+   / capacity_scale;
+   set_capacity_scale(cpu, capacity);
+   pr_debug("cpu_capacity: CPU%d cpu_capacity=%lu\n",
+   cpu, arch_scale_cpu_capacity(NULL, cpu));
+   }
+}
+
+#ifdef CONFIG_CPU_FREQ
+static cpumask_var_t cpus_to_visit;
+static bool cap_parsing_done;
+static void parsing_done_workfn(struct work_struct *work);
+static DECLARE_WORK(parsing_done_work, parsing_done_workfn);
+
+static int
+init_cpu_capacity_callback(struct notifier_block *nb,
+  unsigned long val,
+  void *data)
+{
+   struct cpufreq_policy *policy = data;
+   int cpu;
+
+   if (cap_parsing_failed || cap_parsing_done)
+   return 0;
+
+   switch (val) {
+   case CPUFREQ_NOTIFY:
+   pr_debug("cpu_capacity: init cpu capacity for CPUs 

[PATCH v7 REPOST 4/9] arm64: parse cpu capacity-dmips-mhz from DT

2016-10-17 Thread Juri Lelli
With the introduction of cpu capacity-dmips-mhz bindings, CPU capacities
can now be calculated from values extracted from DT and information
coming from cpufreq. Add parsing of DT information at boot time, and
complement it with cpufreq information. Also, store such information
using per CPU variables, as we do for arm.

Caveat: the information provided by this patch will start to be used in
the future. We need to #define arch_scale_cpu_capacity to something
provided in arch, so that scheduler's default implementation (which gets
used if arch_scale_cpu_capacity is not defined) is overwritten.

Cc: Catalin Marinas 
Cc: Will Deacon 
Cc: Mark Brown 
Cc: Sudeep Holla 
Signed-off-by: Juri Lelli 
Acked-by: Vincent Guittot 
---

Changes from v1:
  - normalize w.r.t. highest capacity found in DT
  - bailout conditions (all-or-nothing)

Changes from v4:
  - parsing modified to reflect change in binding (capacity-dmips-mhz)

Changes from v5:
  - allocate raw_capacity array with kcalloc()
  - pr_err() only for partial capacity information

Changes from v6:
  - use cpuinfo.max_freq instead of policy->max
  - add delayed work to unregister cpufreq notifier
---
 arch/arm64/kernel/topology.c | 159 ++-
 1 file changed, 158 insertions(+), 1 deletion(-)

diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c
index 694f6deedbab..b75b0ba2e113 100644
--- a/arch/arm64/kernel/topology.c
+++ b/arch/arm64/kernel/topology.c
@@ -19,10 +19,162 @@
 #include 
 #include 
 #include 
+#include 
+#include 
 
 #include 
 #include 
 
+static DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE;
+
+unsigned long arch_scale_cpu_capacity(struct sched_domain *sd, int cpu)
+{
+   return per_cpu(cpu_scale, cpu);
+}
+
+static void set_capacity_scale(unsigned int cpu, unsigned long capacity)
+{
+   per_cpu(cpu_scale, cpu) = capacity;
+}
+
+static u32 capacity_scale;
+static u32 *raw_capacity;
+static bool cap_parsing_failed;
+
+static void __init parse_cpu_capacity(struct device_node *cpu_node, int cpu)
+{
+   int ret;
+   u32 cpu_capacity;
+
+   if (cap_parsing_failed)
+   return;
+
+   ret = of_property_read_u32(cpu_node,
+  "capacity-dmips-mhz",
+  _capacity);
+   if (!ret) {
+   if (!raw_capacity) {
+   raw_capacity = kcalloc(num_possible_cpus(),
+  sizeof(*raw_capacity),
+  GFP_KERNEL);
+   if (!raw_capacity) {
+   pr_err("cpu_capacity: failed to allocate memory 
for raw capacities\n");
+   cap_parsing_failed = true;
+   return;
+   }
+   }
+   capacity_scale = max(cpu_capacity, capacity_scale);
+   raw_capacity[cpu] = cpu_capacity;
+   pr_debug("cpu_capacity: %s cpu_capacity=%u (raw)\n",
+   cpu_node->full_name, raw_capacity[cpu]);
+   } else {
+   if (raw_capacity) {
+   pr_err("cpu_capacity: missing %s raw capacity\n",
+   cpu_node->full_name);
+   pr_err("cpu_capacity: partial information: fallback to 
1024 for all CPUs\n");
+   }
+   cap_parsing_failed = true;
+   kfree(raw_capacity);
+   }
+}
+
+static void normalize_cpu_capacity(void)
+{
+   u64 capacity;
+   int cpu;
+
+   if (!raw_capacity || cap_parsing_failed)
+   return;
+
+   pr_debug("cpu_capacity: capacity_scale=%u\n", capacity_scale);
+   for_each_possible_cpu(cpu) {
+   pr_debug("cpu_capacity: cpu=%d raw_capacity=%u\n",
+cpu, raw_capacity[cpu]);
+   capacity = (raw_capacity[cpu] << SCHED_CAPACITY_SHIFT)
+   / capacity_scale;
+   set_capacity_scale(cpu, capacity);
+   pr_debug("cpu_capacity: CPU%d cpu_capacity=%lu\n",
+   cpu, arch_scale_cpu_capacity(NULL, cpu));
+   }
+}
+
+#ifdef CONFIG_CPU_FREQ
+static cpumask_var_t cpus_to_visit;
+static bool cap_parsing_done;
+static void parsing_done_workfn(struct work_struct *work);
+static DECLARE_WORK(parsing_done_work, parsing_done_workfn);
+
+static int
+init_cpu_capacity_callback(struct notifier_block *nb,
+  unsigned long val,
+  void *data)
+{
+   struct cpufreq_policy *policy = data;
+   int cpu;
+
+   if (cap_parsing_failed || cap_parsing_done)
+   return 0;
+
+   switch (val) {
+   case CPUFREQ_NOTIFY:
+   pr_debug("cpu_capacity: init cpu capacity for CPUs [%*pbl] 
(to_visit=%*pbl)\n",
+   cpumask_pr_args(policy->related_cpus),
+