Author: nwhitehorn
Date: Tue May 31 15:11:43 2011
New Revision: 222531
URL: http://svn.freebsd.org/changeset/base/222531

Log:
  On multi-core, multi-threaded PPC systems, it is important that the threads
  be brought up in the order they are enumerated in the device tree (in
  particular, that thread 0 on each core be brought up first). The SLIST
  through which we loop to start the CPUs has all of its entries added with
  SLIST_INSERT_HEAD(), which means it is in reverse order of enumeration
  and so AP startup would always fail in such situations (causing a machine
  check or RTAS failure). Fix this by changing the SLIST into an STAILQ,
  and inserting new CPUs at the end.
  
  Reviewed by:  jhb

Modified:
  head/sys/i386/pci/pci_cfgreg.c
  head/sys/ia64/ia64/machdep.c
  head/sys/ia64/ia64/mp_machdep.c
  head/sys/ia64/ia64/pmap.c
  head/sys/kern/kern_idle.c
  head/sys/kern/sched_4bsd.c
  head/sys/kern/subr_kdb.c
  head/sys/kern/subr_pcpu.c
  head/sys/mips/mips/mp_machdep.c
  head/sys/net/netisr.c
  head/sys/powerpc/booke/pmap.c
  head/sys/powerpc/powerpc/mp_machdep.c
  head/sys/sparc64/sparc64/mp_machdep.c
  head/sys/sparc64/sparc64/pmap.c
  head/sys/sys/pcpu.h

Modified: head/sys/i386/pci/pci_cfgreg.c
==============================================================================
--- head/sys/i386/pci/pci_cfgreg.c      Tue May 31 15:11:23 2011        
(r222530)
+++ head/sys/i386/pci/pci_cfgreg.c      Tue May 31 15:11:43 2011        
(r222531)
@@ -553,7 +553,7 @@ pcie_cfgregopen(uint64_t base, uint8_t m
                    (uintmax_t)base);
 
 #ifdef SMP
-       SLIST_FOREACH(pc, &cpuhead, pc_allcpu)
+       STAILQ_FOREACH(pc, &cpuhead, pc_allcpu)
 #endif
        {
 

Modified: head/sys/ia64/ia64/machdep.c
==============================================================================
--- head/sys/ia64/ia64/machdep.c        Tue May 31 15:11:23 2011        
(r222530)
+++ head/sys/ia64/ia64/machdep.c        Tue May 31 15:11:43 2011        
(r222531)
@@ -316,7 +316,7 @@ cpu_startup(void *dummy)
        /*
         * Create sysctl tree for per-CPU information.
         */
-       SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
+       STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
                snprintf(nodename, sizeof(nodename), "%u", pc->pc_cpuid);
                sysctl_ctx_init(&pc->pc_md.sysctl_ctx);
                pc->pc_md.sysctl_tree = SYSCTL_ADD_NODE(&pc->pc_md.sysctl_ctx,

Modified: head/sys/ia64/ia64/mp_machdep.c
==============================================================================
--- head/sys/ia64/ia64/mp_machdep.c     Tue May 31 15:11:23 2011        
(r222530)
+++ head/sys/ia64/ia64/mp_machdep.c     Tue May 31 15:11:43 2011        
(r222531)
@@ -357,7 +357,7 @@ cpu_mp_start()
        /* Keep 'em spinning until we unleash them... */
        ia64_ap_state.as_spin = 1;
 
-       SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
+       STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
                pc->pc_md.current_pmap = kernel_pmap;
                pc->pc_other_cpus = all_cpus & ~pc->pc_cpumask;
                /* The BSP is obviously running already. */
@@ -424,7 +424,7 @@ cpu_mp_unleash(void *dummy)
 
        cpus = 0;
        smp_cpus = 0;
-       SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
+       STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
                cpus++;
                if (pc->pc_md.awake) {
                        kproc_create(ia64_store_mca_state, pc, NULL, 0, 0,
@@ -462,7 +462,7 @@ ipi_selected(cpumask_t cpus, int ipi)
 {
        struct pcpu *pc;
 
-       SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
+       STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
                if (cpus & pc->pc_cpumask)
                        ipi_send(pc, ipi);
        }
@@ -486,7 +486,7 @@ ipi_all_but_self(int ipi)
 {
        struct pcpu *pc;
 
-       SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
+       STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
                if (pc != pcpup)
                        ipi_send(pc, ipi);
        }

Modified: head/sys/ia64/ia64/pmap.c
==============================================================================
--- head/sys/ia64/ia64/pmap.c   Tue May 31 15:11:23 2011        (r222530)
+++ head/sys/ia64/ia64/pmap.c   Tue May 31 15:11:43 2011        (r222531)
@@ -535,7 +535,7 @@ pmap_invalidate_page(vm_offset_t va)
        critical_enter();
        vhpt_ofs = ia64_thash(va) - PCPU_GET(md.vhpt);
        tag = ia64_ttag(va);
-       SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
+       STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
                pte = (struct ia64_lpte *)(pc->pc_md.vhpt + vhpt_ofs);
                atomic_cmpset_64(&pte->tag, tag, 1UL << 63);
        }

Modified: head/sys/kern/kern_idle.c
==============================================================================
--- head/sys/kern/kern_idle.c   Tue May 31 15:11:23 2011        (r222530)
+++ head/sys/kern/kern_idle.c   Tue May 31 15:11:43 2011        (r222531)
@@ -60,7 +60,7 @@ idle_setup(void *dummy)
 
        p = NULL; /* start with no idle process */
 #ifdef SMP
-       SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
+       STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
 #endif
 #ifdef SMP
                error = kproc_kthread_add(sched_idletd, NULL, &p, &td,

Modified: head/sys/kern/sched_4bsd.c
==============================================================================
--- head/sys/kern/sched_4bsd.c  Tue May 31 15:11:23 2011        (r222530)
+++ head/sys/kern/sched_4bsd.c  Tue May 31 15:11:43 2011        (r222531)
@@ -1081,7 +1081,7 @@ forward_wakeup(int cpunum)
        dontuse = me | stopped_cpus | hlt_cpus_mask;
        map2 = 0;
        if (forward_wakeup_use_loop) {
-               SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
+               STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
                        id = pc->pc_cpumask;
                        if ((id & dontuse) == 0 &&
                            pc->pc_curthread == pc->pc_idlethread) {
@@ -1112,7 +1112,7 @@ forward_wakeup(int cpunum)
        }
        if (map) {
                forward_wakeups_delivered++;
-               SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
+               STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
                        id = pc->pc_cpumask;
                        if ((map & id) == 0)
                                continue;

Modified: head/sys/kern/subr_kdb.c
==============================================================================
--- head/sys/kern/subr_kdb.c    Tue May 31 15:11:23 2011        (r222530)
+++ head/sys/kern/subr_kdb.c    Tue May 31 15:11:43 2011        (r222531)
@@ -412,7 +412,7 @@ kdb_thr_ctx(struct thread *thr)
                return (&kdb_pcb);
 
 #if defined(SMP) && defined(KDB_STOPPEDPCB)
-       SLIST_FOREACH(pc, &cpuhead, pc_allcpu)  {
+       STAILQ_FOREACH(pc, &cpuhead, pc_allcpu)  {
                if (pc->pc_curthread == thr && (stopped_cpus & pc->pc_cpumask))
                        return (KDB_STOPPEDPCB(pc));
        }

Modified: head/sys/kern/subr_pcpu.c
==============================================================================
--- head/sys/kern/subr_pcpu.c   Tue May 31 15:11:23 2011        (r222530)
+++ head/sys/kern/subr_pcpu.c   Tue May 31 15:11:43 2011        (r222531)
@@ -74,7 +74,7 @@ static TAILQ_HEAD(, dpcpu_free) dpcpu_he
 static struct sx dpcpu_lock;
 uintptr_t dpcpu_off[MAXCPU];
 struct pcpu *cpuid_to_pcpu[MAXCPU];
-struct cpuhead cpuhead = SLIST_HEAD_INITIALIZER(cpuhead);
+struct cpuhead cpuhead = STAILQ_HEAD_INITIALIZER(cpuhead);
 
 /*
  * Initialize the MI portions of a struct pcpu.
@@ -89,7 +89,7 @@ pcpu_init(struct pcpu *pcpu, int cpuid, 
        pcpu->pc_cpuid = cpuid;
        pcpu->pc_cpumask = 1 << cpuid;
        cpuid_to_pcpu[cpuid] = pcpu;
-       SLIST_INSERT_HEAD(&cpuhead, pcpu, pc_allcpu);
+       STAILQ_INSERT_TAIL(&cpuhead, pcpu, pc_allcpu);
        cpu_pcpu_init(pcpu, cpuid, size);
        pcpu->pc_rm_queue.rmq_next = &pcpu->pc_rm_queue;
        pcpu->pc_rm_queue.rmq_prev = &pcpu->pc_rm_queue;
@@ -245,7 +245,7 @@ void
 pcpu_destroy(struct pcpu *pcpu)
 {
 
-       SLIST_REMOVE(&cpuhead, pcpu, pcpu, pc_allcpu);
+       STAILQ_REMOVE(&cpuhead, pcpu, pcpu, pc_allcpu);
        cpuid_to_pcpu[pcpu->pc_cpuid] = NULL;
        dpcpu_off[pcpu->pc_cpuid] = 0;
 }

Modified: head/sys/mips/mips/mp_machdep.c
==============================================================================
--- head/sys/mips/mips/mp_machdep.c     Tue May 31 15:11:23 2011        
(r222530)
+++ head/sys/mips/mips/mp_machdep.c     Tue May 31 15:11:43 2011        
(r222531)
@@ -86,7 +86,7 @@ ipi_selected(cpumask_t cpus, int ipi)
 
        CTR3(KTR_SMP, "%s: cpus: %x, ipi: %x\n", __func__, cpus, ipi);
 
-       SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
+       STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
                if ((cpus & pc->pc_cpumask) != 0)
                        ipi_send(pc, ipi);
        }

Modified: head/sys/net/netisr.c
==============================================================================
--- head/sys/net/netisr.c       Tue May 31 15:11:23 2011        (r222530)
+++ head/sys/net/netisr.c       Tue May 31 15:11:43 2011        (r222531)
@@ -1221,7 +1221,7 @@ netisr_start(void *arg)
 {
        struct pcpu *pc;
 
-       SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
+       STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
                if (nws_count >= netisr_maxthreads)
                        break;
                /* XXXRW: Is skipping absent CPUs still required here? */

Modified: head/sys/powerpc/booke/pmap.c
==============================================================================
--- head/sys/powerpc/booke/pmap.c       Tue May 31 15:11:23 2011        
(r222530)
+++ head/sys/powerpc/booke/pmap.c       Tue May 31 15:11:43 2011        
(r222531)
@@ -390,7 +390,7 @@ tlb_miss_lock(void)
        if (!smp_started)
                return;
 
-       SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
+       STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
                if (pc != pcpup) {
 
                        CTR3(KTR_PMAP, "%s: tlb miss LOCK of CPU=%d, "
@@ -416,7 +416,7 @@ tlb_miss_unlock(void)
        if (!smp_started)
                return;
 
-       SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
+       STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
                if (pc != pcpup) {
                        CTR2(KTR_PMAP, "%s: tlb miss UNLOCK of CPU=%d",
                            __func__, pc->pc_cpuid);

Modified: head/sys/powerpc/powerpc/mp_machdep.c
==============================================================================
--- head/sys/powerpc/powerpc/mp_machdep.c       Tue May 31 15:11:23 2011        
(r222530)
+++ head/sys/powerpc/powerpc/mp_machdep.c       Tue May 31 15:11:43 2011        
(r222531)
@@ -212,7 +212,7 @@ cpu_mp_unleash(void *dummy)
 
        cpus = 0;
        smp_cpus = 0;
-       SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
+       STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
                cpus++;
                pc->pc_other_cpus = all_cpus & ~pc->pc_cpumask;
                if (!pc->pc_bsp) {
@@ -347,7 +347,7 @@ ipi_selected(cpumask_t cpus, int ipi)
 {
        struct pcpu *pc;
 
-       SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
+       STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
                if (cpus & pc->pc_cpumask)
                        ipi_send(pc, ipi);
        }
@@ -367,7 +367,7 @@ ipi_all_but_self(int ipi)
 {
        struct pcpu *pc;
 
-       SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
+       STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
                if (pc != pcpup)
                        ipi_send(pc, ipi);
        }

Modified: head/sys/sparc64/sparc64/mp_machdep.c
==============================================================================
--- head/sys/sparc64/sparc64/mp_machdep.c       Tue May 31 15:11:23 2011        
(r222530)
+++ head/sys/sparc64/sparc64/mp_machdep.c       Tue May 31 15:11:43 2011        
(r222531)
@@ -383,7 +383,7 @@ cpu_mp_unleash(void *v)
        ctx_inc = (TLB_CTX_USER_MAX - 1) / mp_ncpus;
        csa = &cpu_start_args;
        csa->csa_count = mp_ncpus;
-       SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
+       STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
                pc->pc_tlb_ctx = ctx_min;
                pc->pc_tlb_ctx_min = ctx_min;
                pc->pc_tlb_ctx_max = ctx_min + ctx_inc;

Modified: head/sys/sparc64/sparc64/pmap.c
==============================================================================
--- head/sys/sparc64/sparc64/pmap.c     Tue May 31 15:11:23 2011        
(r222530)
+++ head/sys/sparc64/sparc64/pmap.c     Tue May 31 15:11:43 2011        
(r222531)
@@ -1278,7 +1278,7 @@ pmap_release(pmap_t pm)
         *   to a kernel thread, leaving the pmap pointer unchanged.
         */
        mtx_lock_spin(&sched_lock);
-       SLIST_FOREACH(pc, &cpuhead, pc_allcpu)
+       STAILQ_FOREACH(pc, &cpuhead, pc_allcpu)
                if (pc->pc_pmap == pm)
                        pc->pc_pmap = NULL;
        mtx_unlock_spin(&sched_lock);

Modified: head/sys/sys/pcpu.h
==============================================================================
--- head/sys/sys/pcpu.h Tue May 31 15:11:23 2011        (r222530)
+++ head/sys/sys/pcpu.h Tue May 31 15:11:43 2011        (r222531)
@@ -164,7 +164,7 @@ struct pcpu {
        u_int           pc_cpuid;               /* This cpu number */
        cpumask_t       pc_cpumask;             /* This cpu mask */
        cpumask_t       pc_other_cpus;          /* Mask of all other cpus */
-       SLIST_ENTRY(pcpu) pc_allcpu;
+       STAILQ_ENTRY(pcpu) pc_allcpu;
        struct lock_list_entry *pc_spinlocks;
 #ifdef KTR
        char            pc_name[PCPU_NAME_LEN]; /* String name for KTR */
@@ -201,7 +201,7 @@ struct pcpu {
 
 #ifdef _KERNEL
 
-SLIST_HEAD(cpuhead, pcpu);
+STAILQ_HEAD(cpuhead, pcpu);
 
 extern struct cpuhead cpuhead;
 extern struct pcpu *cpuid_to_pcpu[MAXCPU];
_______________________________________________
svn-src-head@freebsd.org mailing list
http://lists.freebsd.org/mailman/listinfo/svn-src-head
To unsubscribe, send any mail to "svn-src-head-unsubscr...@freebsd.org"

Reply via email to