From: Wang Qing <wangq...@vivo.com>

When ACPI is not enabled, we can get cache topolopy from DT like:
*               cpu0: cpu@000 {
*                       next-level-cache = <&L2_1>;
*                       L2_1: l2-cache {
*                               compatible = "cache";
*                               next-level-cache = <&L3_1>;
*                       };
*                       L3_1: l3-cache {
*                               compatible = "cache";
*                       };
*               };
*
*               cpu1: cpu@001 {
*                       next-level-cache = <&L2_1>;
*                       cpu-idle-states = <&clusteroff_l &mcusysoff
*                                               &system_mem &system_pll 
&system_bus
*                                               &s2idle>;
*               };
*               cpu2: cpu@002 {
*                       L2_2: l2-cache {
*                               compatible = "cache";
*                               next-level-cache = <&L3_1>;
*                       };
*               };
*
*               cpu3: cpu@003 {
*                       next-level-cache = <&L2_2>;
*               };
cache_topology hold the pointer describing "next-level-cache", 
it can describe the cache topology of every level.

Signed-off-by: Wang Qing <wangq...@vivo.com>
---
 drivers/base/arch_topology.c  | 89 ++++++++++++++++++++++++++++++++++++++++++-
 include/linux/arch_topology.h |  4 ++
 2 files changed, 92 insertions(+), 1 deletion(-)

diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c
index 1d6636e..41e0301
--- a/drivers/base/arch_topology.c
+++ b/drivers/base/arch_topology.c
@@ -647,6 +647,92 @@ static int __init parse_dt_topology(void)
 }
 #endif
 
+
+/*
+ * cpu cache topology table
+ */
+#define MAX_CACHE_LEVEL 7
+struct device_node *cache_topology[NR_CPUS][MAX_CACHE_LEVEL];
+
+void init_cpu_cache_topology(void)
+{
+       struct device_node *node_cpu, *node_cache;
+       int cpu;
+       int level = 0;
+
+       for_each_possible_cpu(cpu) {
+               node_cpu = of_get_cpu_node(cpu, NULL);
+               if (!node_cpu)
+                       continue;
+
+               level = 0;
+               node_cache = node_cpu;
+               while (level < MAX_CACHE_LEVEL) {
+                       node_cache = of_parse_phandle(node_cache, 
"next-level-cache", 0);
+                       if (!node_cache)
+                               break;
+
+                       cache_topology[cpu][level++] = node_cache;
+               }
+               of_node_put(node_cpu);
+       }
+}
+
+/*
+ * private means only shared within cpu_mask
+ * Returns -1 if not described int DT.
+ */
+int cpu_share_private_cache(const struct cpumask *cpu_mask)
+{
+       int cache_level, cpu_id;
+       struct cpumask cache_mask;
+       int cpu = cpumask_first(cpu_mask);
+
+       for (cache_level = 0; cache_level < MAX_CACHE_LEVEL; cache_level++) {
+               if (!cache_topology[cpu][cache_level])
+                       return -1;
+
+               cpumask_clear(&cache_mask);
+               for (cpu_id = 0; cpu_id < NR_CPUS; cpu_id++) {
+                       if (cache_topology[cpu][cache_level] == 
cache_topology[cpu_id][cache_level])
+                               cpumask_set_cpu(cpu_id, &cache_mask);
+               }
+
+               if (cpumask_equal(cpu_mask, &cache_mask))
+                       return 1;
+       }
+
+       return 0;
+}
+
+bool cpu_share_llc(int cpu1, int cpu2)
+{
+       int cache_level;
+
+       for (cache_level = MAX_CACHE_LEVEL - 1; cache_level > 0; cache_level--) 
{
+               if (!cache_topology[cpu1][cache_level])
+                       continue;
+
+               if (cache_topology[cpu1][cache_level] == 
cache_topology[cpu2][cache_level])
+                       return true;
+
+               return false;
+       }
+
+       return false;
+}
+
+bool cpu_share_l2c(int cpu1, int cpu2)
+{
+       if (!cache_topology[cpu1][0])
+               return false;
+
+       if (cache_topology[cpu1][0] == cache_topology[cpu2][0])
+               return true;
+
+       return false;
+}
+
 /*
  * cpu topology table
  */
@@ -684,7 +770,8 @@ void update_siblings_masks(unsigned int cpuid)
        for_each_online_cpu(cpu) {
                cpu_topo = &cpu_topology[cpu];
 
-               if (cpuid_topo->llc_id == cpu_topo->llc_id) {
+               if ((cpuid_topo->llc_id != -1 && cpuid_topo->llc_id == 
cpu_topo->llc_id)
+                       || (cpuid_topo->llc_id == -1 && cpu_share_llc(cpu, 
cpuid))) {
                        cpumask_set_cpu(cpu, &cpuid_topo->llc_sibling);
                        cpumask_set_cpu(cpuid, &cpu_topo->llc_sibling);
                }
diff --git a/include/linux/arch_topology.h b/include/linux/arch_topology.h
index 58cbe18..a402ff6
--- a/include/linux/arch_topology.h
+++ b/include/linux/arch_topology.h
@@ -86,6 +86,10 @@ extern struct cpu_topology cpu_topology[NR_CPUS];
 #define topology_cluster_cpumask(cpu)  (&cpu_topology[cpu].cluster_sibling)
 #define topology_llc_cpumask(cpu)      (&cpu_topology[cpu].llc_sibling)
 void init_cpu_topology(void);
+void init_cpu_cache_topology(void);
+int cpu_share_private_cache(const struct cpumask *cpu_mask);
+bool cpu_share_llc(int cpu1, int cpu2);
+bool cpu_share_l2c(int cpu1, int cpu2);
 void store_cpu_topology(unsigned int cpuid);
 const struct cpumask *cpu_coregroup_mask(int cpu);
 const struct cpumask *cpu_clustergroup_mask(int cpu);
-- 
2.7.4

Reply via email to