*** Idea ***
For kexec -p, the boot cpu can be not the cpu0, this causes the problem
of allocating memory for paca_ptrs[]. However, in theory, there is no
requirement to assign cpu's logical id as its present sequence in the
device tree. But there is something like cpu_first_thread_sibling(),
which makes assumption on the mapping inside a core. Hence partially
loosening the mapping, i.e. unbind the mapping of core while keep the
mapping inside a core.

*** Implement ***
At this early stage, there are plenty of memory to utilize. Hence, this
patch allocates interim memory to link the cpu info on a list, then
reorder cpus by changing the list head. As a result, there is a rotate
shift between the sequence number in dt and the cpu logical number.

*** Result ***
After this patch, a boot-cpu's logical id will always be mapped into the
range [0,threads_per_core).

Besides this, at this phase, all threads in the boot core are forced to
be onlined. This restriction will be lifted in a later patch with
extra effort.

Signed-off-by: Pingfan Liu <pi...@redhat.com>
Cc: Michael Ellerman <m...@ellerman.id.au>
Cc: Nicholas Piggin <npig...@gmail.com>
Cc: Christophe Leroy <christophe.le...@csgroup.eu>
Cc: Mahesh Salgaonkar <mah...@linux.ibm.com>
Cc: Wen Xiong <wenxi...@us.ibm.com>
Cc: Baoquan He <b...@redhat.com>
Cc: Ming Lei <ming....@redhat.com>
Cc: kexec@lists.infradead.org
To: linuxppc-...@lists.ozlabs.org
---
 arch/powerpc/kernel/prom.c         | 25 +++++----
 arch/powerpc/kernel/setup-common.c | 87 +++++++++++++++++++++++-------
 2 files changed, 85 insertions(+), 27 deletions(-)

diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index ec82f5bda908..87272a2d8c10 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -76,7 +76,9 @@ u64 ppc64_rma_size;
 unsigned int boot_cpu_node_count __ro_after_init;
 #endif
 static phys_addr_t first_memblock_size;
+#ifdef CONFIG_SMP
 static int __initdata boot_cpu_count;
+#endif
 
 static int __init early_parse_mem(char *p)
 {
@@ -331,8 +333,7 @@ static int __init early_init_dt_scan_cpus(unsigned long 
node,
        const __be32 *intserv;
        int i, nthreads;
        int len;
-       int found = -1;
-       int found_thread = 0;
+       bool found = false;
 
        /* We are scanning "cpu" nodes only */
        if (type == NULL || strcmp(type, "cpu") != 0)
@@ -355,8 +356,15 @@ static int __init early_init_dt_scan_cpus(unsigned long 
node,
        for (i = 0; i < nthreads; i++) {
                if (be32_to_cpu(intserv[i]) ==
                        fdt_boot_cpuid_phys(initial_boot_params)) {
-                       found = boot_cpu_count;
-                       found_thread = i;
+                       /*
+                        * always map the boot-cpu logical id into the
+                        * range of [0, thread_per_core)
+                        */
+                       boot_cpuid = i;
+                       found = true;
+                       /* This works around the hole in paca_ptrs[]. */
+                       if (nr_cpu_ids < nthreads)
+                               set_nr_cpu_ids(nthreads);
                }
 #ifdef CONFIG_SMP
                /* logical cpu id is always 0 on UP kernels */
@@ -365,14 +373,13 @@ static int __init early_init_dt_scan_cpus(unsigned long 
node,
        }
 
        /* Not the boot CPU */
-       if (found < 0)
+       if (!found)
                return 0;
 
-       DBG("boot cpu: logical %d physical %d\n", found,
-           be32_to_cpu(intserv[found_thread]));
-       boot_cpuid = found;
+       DBG("boot cpu: logical %d physical %d\n", boot_cpuid,
+           be32_to_cpu(intserv[boot_cpuid]));
 
-       boot_cpu_hwid = be32_to_cpu(intserv[found_thread]);
+       boot_cpu_hwid = be32_to_cpu(intserv[boot_cpuid]);
 
        /*
         * PAPR defines "logical" PVR values for cpus that
diff --git a/arch/powerpc/kernel/setup-common.c 
b/arch/powerpc/kernel/setup-common.c
index 1b19a9815672..81291e13dec0 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -36,6 +36,7 @@
 #include <linux/of_platform.h>
 #include <linux/hugetlb.h>
 #include <linux/pgtable.h>
+#include <linux/list.h>
 #include <asm/io.h>
 #include <asm/paca.h>
 #include <asm/processor.h>
@@ -425,6 +426,13 @@ static void __init cpu_init_thread_core_maps(int tpc)
 
 u32 *cpu_to_phys_id = NULL;
 
+struct interrupt_server_node {
+       struct list_head node;
+       bool    avail;
+       int     len;
+       __be32 *intserv;
+};
+
 /**
  * setup_cpu_maps - initialize the following cpu maps:
  *                  cpu_possible_mask
@@ -446,11 +454,16 @@ u32 *cpu_to_phys_id = NULL;
 void __init smp_setup_cpu_maps(void)
 {
        struct device_node *dn;
-       int cpu = 0;
-       int nthreads = 1;
+       int shift = 0, cpu = 0;
+       int j, nthreads = 1;
+       int len;
+       struct interrupt_server_node *intserv_node, *n;
+       struct list_head *bt_node, head;
+       bool avail, found_boot_cpu = false;
 
        DBG("smp_setup_cpu_maps()\n");
 
+       INIT_LIST_HEAD(&head);
        cpu_to_phys_id = memblock_alloc(nr_cpu_ids * sizeof(u32),
                                        __alignof__(u32));
        if (!cpu_to_phys_id)
@@ -460,7 +473,6 @@ void __init smp_setup_cpu_maps(void)
        for_each_node_by_type(dn, "cpu") {
                const __be32 *intserv;
                __be32 cpu_be;
-               int j, len;
 
                DBG("  * %pOF...\n", dn);
 
@@ -480,29 +492,68 @@ void __init smp_setup_cpu_maps(void)
                        }
                }
 
-               nthreads = len / sizeof(int);
+               avail = of_device_is_available(dn);
+               if (!avail)
+                       avail = !of_property_match_string(dn,
+                                       "enable-method", "spin-table");
 
-               for (j = 0; j < nthreads && cpu < nr_cpu_ids; j++) {
-                       bool avail;
 
-                       DBG("    thread %d -> cpu %d (hard id %d)\n",
-                           j, cpu, be32_to_cpu(intserv[j]));
-
-                       avail = of_device_is_available(dn);
-                       if (!avail)
-                               avail = !of_property_match_string(dn,
-                                               "enable-method", "spin-table");
+               intserv_node = memblock_alloc(sizeof(struct 
interrupt_server_node) + len,
+                                       __alignof__(u32));
+               if (!intserv_node)
+                       panic("%s: Failed to allocate %zu bytes align=0x%zx\n",
+                               __func__,
+                               sizeof(struct interrupt_server_node) + len,
+                               __alignof__(u32));
+               intserv_node->intserv = (__be32 *)((char *)intserv_node +
+                                               sizeof(struct 
interrupt_server_node));
+               intserv_node->len = len;
+               memcpy(intserv_node->intserv, intserv, len);
+               intserv_node->avail = avail;
+               INIT_LIST_HEAD(&intserv_node->node);
+               list_add_tail(&intserv_node->node, &head);
+
+               if (!found_boot_cpu) {
+                       nthreads = len / sizeof(int);
+                       for (j = 0 ; j < nthreads; j++) {
+                               if (be32_to_cpu(intserv[j]) == boot_cpu_hwid) {
+                                       bt_node = &intserv_node->node;
+                                       found_boot_cpu = true;
+                                       /*
+                                        * Record the round-shift between dt
+                                        * seq and cpu logical number
+                                        */
+                                       shift = cpu - j;
+                                       break;
+                               }
+
+                               cpu++;
+                       }
+               }
 
+       }
+       cpu = 0;
+       list_del_init(&head);
+       /* Select the primary thread, the boot cpu's slibing, as the logic 0 */
+       list_add_tail(&head, bt_node);
+       pr_info("the round shift between dt seq and the cpu logic number: 
%d\n", shift);
+       list_for_each_entry(intserv_node, &head, node) {
+
+               avail = intserv_node->avail;
+               nthreads = intserv_node->len / sizeof(int);
+               for (j = 0; j < nthreads && cpu < nr_cpu_ids; j++) {
                        set_cpu_present(cpu, avail);
                        set_cpu_possible(cpu, true);
-                       cpu_to_phys_id[cpu] = be32_to_cpu(intserv[j]);
+                       cpu_to_phys_id[cpu] = 
be32_to_cpu(intserv_node->intserv[j]);
+                       DBG("    thread %d -> cpu %d (hard id %d)\n",
+                           j, cpu, be32_to_cpu(intserv_node->intserv[j]));
                        cpu++;
                }
+       }
 
-               if (cpu >= nr_cpu_ids) {
-                       of_node_put(dn);
-                       break;
-               }
+       list_for_each_entry_safe(intserv_node, n, &head, node) {
+               len = sizeof(struct interrupt_server_node) + intserv_node->len;
+               memblock_free(intserv_node, len);
        }
 
        /* If no SMT supported, nthreads is forced to 1 */
-- 
2.31.1


_______________________________________________
kexec mailing list
kexec@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/kexec

Reply via email to