If the boot_cpuid is smaller than nr_cpus, it requires extra effort to
ensure the boot_cpu is in cpu_present_mask. This can be achieved by
reserving the last quota for the boot cpu.

Note: the restriction on nr_cpus will be lifted with more effort in the
next patch

Signed-off-by: Pingfan Liu <pi...@redhat.com>
Cc: Michael Ellerman <m...@ellerman.id.au>
Cc: Nicholas Piggin <npig...@gmail.com>
Cc: Christophe Leroy <christophe.le...@csgroup.eu>
Cc: Mahesh Salgaonkar <mah...@linux.ibm.com>
Cc: Wen Xiong <wenxi...@us.ibm.com>
Cc: Baoquan He <b...@redhat.com>
Cc: Ming Lei <ming....@redhat.com>
Cc: ke...@lists.infradead.org
To: linuxppc-dev@lists.ozlabs.org
---
 arch/powerpc/kernel/setup-common.c | 25 ++++++++++++++++++++++---
 1 file changed, 22 insertions(+), 3 deletions(-)

diff --git a/arch/powerpc/kernel/setup-common.c 
b/arch/powerpc/kernel/setup-common.c
index f6d32324b5a5..a72d00a6cff2 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -454,8 +454,8 @@ struct interrupt_server_node {
 void __init smp_setup_cpu_maps(void)
 {
        struct device_node *dn;
-       int shift = 0, cpu = 0;
-       int j, nthreads = 1;
+       int terminate, shift = 0, cpu = 0;
+       int j, bt_thread = 0, nthreads = 1;
        int len;
        struct interrupt_server_node *intserv_node, *n;
        struct list_head *bt_node, head;
@@ -518,6 +518,7 @@ void __init smp_setup_cpu_maps(void)
                        for (j = 0 ; j < nthreads; j++) {
                                if (be32_to_cpu(intserv[j]) == boot_cpu_hwid) {
                                        bt_node = &intserv_node->node;
+                                       bt_thread = j;
                                        found_boot_cpu = true;
                                        /*
                                         * Record the round-shift between dt
@@ -537,11 +538,21 @@ void __init smp_setup_cpu_maps(void)
        /* Select the primary thread, the boot cpu's slibing, as the logic 0 */
        list_add_tail(&head, bt_node);
        pr_info("the round shift between dt seq and the cpu logic number: 
%d\n", shift);
+       terminate = nr_cpu_ids;
        list_for_each_entry(intserv_node, &head, node) {
 
+               j = 0;
+               /* Choose a start point to cover the boot cpu */
+               if (nr_cpu_ids - 1 < bt_thread) {
+                       /*
+                        * The processor core puts assumption on the thread id,
+                        * not to breach the assumption.
+                        */
+                       terminate = nr_cpu_ids - 1;
+               }
                avail = intserv_node->avail;
                nthreads = intserv_node->len / sizeof(int);
-               for (j = 0; j < nthreads && cpu < nr_cpu_ids; j++) {
+               for (; j < nthreads && cpu < terminate; j++) {
                        set_cpu_present(cpu, avail);
                        set_cpu_possible(cpu, true);
                        cpu_to_phys_id[cpu] = 
be32_to_cpu(intserv_node->intserv[j]);
@@ -549,6 +560,14 @@ void __init smp_setup_cpu_maps(void)
                            j, cpu, be32_to_cpu(intserv[j]));
                        cpu++;
                }
+               /* Online the boot cpu */
+               if (nr_cpu_ids - 1 < bt_thread) {
+                       set_cpu_present(bt_thread, avail);
+                       set_cpu_possible(bt_thread, true);
+                       cpu_to_phys_id[bt_thread] = 
be32_to_cpu(intserv_node->intserv[bt_thread]);
+                       DBG("    thread %d -> cpu %d (hard id %d)\n",
+                           bt_thread, bt_thread, 
be32_to_cpu(intserv[bt_thread]));
+               }
        }
 
        list_for_each_entry_safe(intserv_node, n, &head, node) {
-- 
2.31.1

Reply via email to