From: Dietmar Eggemann <[email protected]>

Consolidate sched_init_numa() and sched_init_conv() into one function
sched_init_topology().

Signed-off-by: Dietmar Eggemann <[email protected]>
---
 kernel/sched/core.c |  164 +++++++++++++++++++++------------------------------
 1 file changed, 68 insertions(+), 96 deletions(-)

diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 798a4d2c9d7b..9edd1d511f3c 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -5410,13 +5410,72 @@ static bool find_numa_distance(int distance)
        return false;
 }
 
-static void sched_init_numa(void)
+static void sched_domains_numa_masks_set(int cpu)
+{
+       int i, j;
+       int node = cpu_to_node(cpu);
+
+       for (i = 0; i < sched_domains_numa_levels; i++) {
+               for (j = 0; j < nr_node_ids; j++) {
+                       if (node_distance(j, node) <= 
sched_domains_numa_distance[i])
+                               cpumask_set_cpu(cpu, 
sched_domains_numa_masks[i][j]);
+               }
+       }
+}
+
+static void sched_domains_numa_masks_clear(int cpu)
+{
+       int i, j;
+       for (i = 0; i < sched_domains_numa_levels; i++) {
+               for (j = 0; j < nr_node_ids; j++)
+                       cpumask_clear_cpu(cpu, sched_domains_numa_masks[i][j]);
+       }
+}
+
+/*
+ * Update sched_domains_numa_masks[level][node] array when new cpus
+ * are onlined.
+ */
+static int sched_domains_numa_masks_update(struct notifier_block *nfb,
+                                          unsigned long action,
+                                          void *hcpu)
+{
+       int cpu = (long)hcpu;
+
+       switch (action & ~CPU_TASKS_FROZEN) {
+       case CPU_ONLINE:
+               sched_domains_numa_masks_set(cpu);
+               break;
+
+       case CPU_DEAD:
+               sched_domains_numa_masks_clear(cpu);
+               break;
+
+       default:
+               return NOTIFY_DONE;
+       }
+
+       return NOTIFY_OK;
+}
+#else
+static int sched_domains_numa_masks_update(struct notifier_block *nfb,
+                                          unsigned long action,
+                                          void *hcpu)
+{
+       return 0;
+}
+#endif /* CONFIG_NUMA */
+
+static void sched_init_topology(void)
 {
-       int next_distance, curr_distance = node_distance(0, 0);
        struct sched_domain_topology_level *tl;
        struct sched_domain_topology_info *ti = sched_domain_topology_info;
        int level = 0;
-       int i, j, k;
+       int i;
+
+#ifdef CONFIG_NUMA
+       int next_distance, curr_distance = node_distance(0, 0);
+       int j, k;
 
        sched_domains_numa_distance = kzalloc(sizeof(int) * nr_node_ids, 
GFP_KERNEL);
        if (!sched_domains_numa_distance)
@@ -5512,6 +5571,7 @@ static void sched_init_numa(void)
                        }
                }
        }
+#endif /* CONFIG_NUMA */
 
        /*
         * An extra empty struct sched_domain_topology_level element at the end
@@ -5529,6 +5589,9 @@ static void sched_init_numa(void)
        for (i = 0; i < sched_domain_topology_info_size; i++)
                tl[i].info = ti[i];
 
+       sched_domain_topology = tl;
+
+#ifdef CONFIG_NUMA
        /*
         * .. and append 'j' levels of NUMA goodness.
         */
@@ -5541,99 +5604,9 @@ static void sched_init_numa(void)
                };
        }
 
-       sched_domain_topology = tl;
-
        sched_domains_numa_levels = level;
-}
-
-static void sched_init_conv(void)
-{
-}
-
-static void sched_domains_numa_masks_set(int cpu)
-{
-       int i, j;
-       int node = cpu_to_node(cpu);
-
-       for (i = 0; i < sched_domains_numa_levels; i++) {
-               for (j = 0; j < nr_node_ids; j++) {
-                       if (node_distance(j, node) <= 
sched_domains_numa_distance[i])
-                               cpumask_set_cpu(cpu, 
sched_domains_numa_masks[i][j]);
-               }
-       }
-}
-
-static void sched_domains_numa_masks_clear(int cpu)
-{
-       int i, j;
-       for (i = 0; i < sched_domains_numa_levels; i++) {
-               for (j = 0; j < nr_node_ids; j++)
-                       cpumask_clear_cpu(cpu, sched_domains_numa_masks[i][j]);
-       }
-}
-
-/*
- * Update sched_domains_numa_masks[level][node] array when new cpus
- * are onlined.
- */
-static int sched_domains_numa_masks_update(struct notifier_block *nfb,
-                                          unsigned long action,
-                                          void *hcpu)
-{
-       int cpu = (long)hcpu;
-
-       switch (action & ~CPU_TASKS_FROZEN) {
-       case CPU_ONLINE:
-               sched_domains_numa_masks_set(cpu);
-               break;
-
-       case CPU_DEAD:
-               sched_domains_numa_masks_clear(cpu);
-               break;
-
-       default:
-               return NOTIFY_DONE;
-       }
-
-       return NOTIFY_OK;
-}
-#else
-static inline void sched_init_numa(void)
-{
-}
-
-static void sched_init_conv(void)
-{
-       struct sched_domain_topology_level *tl;
-       struct sched_domain_topology_info *ti = sched_domain_topology_info;
-       int i;
-
-       /*
-        * An extra empty struct sched_domain_topology_level element at the end
-        * of the array is needed to let for_each_sd_topology() work correctly.
-        */
-       tl = kzalloc((sched_domain_topology_info_size + 1) *
-               sizeof(struct sched_domain_topology_level),
-               GFP_KERNEL);
-       if (!tl)
-               return;
-
-       /*
-        * Copy the topology info bits..
-        */
-       for (i = 0; i < sched_domain_topology_info_size; i++)
-               tl[i].info = ti[i];
-
-       sched_domain_topology = tl;
-}
-
-static int sched_domains_numa_masks_update(struct notifier_block *nfb,
-                                          unsigned long action,
-                                          void *hcpu)
-{
-       return 0;
-}
 #endif /* CONFIG_NUMA */
+}
 
 static struct sched_domain *
 sd_init(struct sched_domain_topology_level *tl, int cpu)
@@ -6151,8 +6124,7 @@ void __init sched_init_smp(void)
        alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL);
        alloc_cpumask_var(&fallback_doms, GFP_KERNEL);
 
-       sched_init_conv();
-       sched_init_numa();
+       sched_init_topology();
 
        /*
         * There's no userspace yet to cause hotplug operations; hence all the
-- 
1.7.9.5


--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [email protected]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to