Module Name:    src
Committed By:   snj
Date:           Sat May 30 16:57:19 UTC 2009

Modified Files:
        src/sys/arch/sparc/sparc [netbsd-5]: autoconf.c cpu.c cpuvar.h
            db_interface.c intr.c pmap.c timer_sun4m.c

Log Message:
Pull up following revision(s) (requested by mrg in ticket #776):
        sys/arch/sparc/sparc/autoconf.c: revision 1.233 via patch
        sys/arch/sparc/sparc/cpu.c: revision 1.213 via patch
        sys/arch/sparc/sparc/cpuvar.h: revision 1.76 via patch
        sys/arch/sparc/sparc/db_interface.c: revision 1.83 via patch
        sys/arch/sparc/sparc/intr.c: revision 1.102 via patch
        sys/arch/sparc/sparc/pmap.c: revision 1.325 via patch
        sys/arch/sparc/sparc/timer_sun4m.c: revision 1.17 via patch
Work in progress from a colaborative effort of mrg and me (all bugs are
mine) - not quite working, but improves the situation for non-MULTIPROCESSOR
kernels (makes LOCKDEBUG kernels work) and does not make SMP kernels worse:
Rearange cpu_info access and hide the actual implementation of the mapping
from all parts of the code that do not directly deal with it. Do the
mapping early in pmap_bootstrap, so that post-vmlocking2 kernels have
a chance to work.
The actual mapping of the cpus array for SMP kernels has to be fixed still,
but both mrg and me ran out of time and this lay around in our trees far
too long.


To generate a diff of this commit:
cvs rdiff -u -r1.229 -r1.229.4.1 src/sys/arch/sparc/sparc/autoconf.c
cvs rdiff -u -r1.211 -r1.211.8.1 src/sys/arch/sparc/sparc/cpu.c
cvs rdiff -u -r1.75 -r1.75.10.1 src/sys/arch/sparc/sparc/cpuvar.h
cvs rdiff -u -r1.79 -r1.79.4.1 src/sys/arch/sparc/sparc/db_interface.c
cvs rdiff -u -r1.100 -r1.100.20.1 src/sys/arch/sparc/sparc/intr.c
cvs rdiff -u -r1.322 -r1.322.20.1 src/sys/arch/sparc/sparc/pmap.c
cvs rdiff -u -r1.16 -r1.16.56.1 src/sys/arch/sparc/sparc/timer_sun4m.c

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/arch/sparc/sparc/autoconf.c
diff -u src/sys/arch/sparc/sparc/autoconf.c:1.229 src/sys/arch/sparc/sparc/autoconf.c:1.229.4.1
--- src/sys/arch/sparc/sparc/autoconf.c:1.229	Thu Jul 17 14:39:26 2008
+++ src/sys/arch/sparc/sparc/autoconf.c	Sat May 30 16:57:18 2009
@@ -1,4 +1,4 @@
-/*	$NetBSD: autoconf.c,v 1.229 2008/07/17 14:39:26 cegger Exp $ */
+/*	$NetBSD: autoconf.c,v 1.229.4.1 2009/05/30 16:57:18 snj Exp $ */
 
 /*
  * Copyright (c) 1996
@@ -48,7 +48,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: autoconf.c,v 1.229 2008/07/17 14:39:26 cegger Exp $");
+__KERNEL_RCSID(0, "$NetBSD: autoconf.c,v 1.229.4.1 2009/05/30 16:57:18 snj Exp $");
 
 #include "opt_ddb.h"
 #include "opt_kgdb.h"
@@ -279,6 +279,7 @@
 
 	cpuinfo.master = 1;
 	getcpuinfo(&cpuinfo, 0);
+	curlwp = &lwp0;
 
 #if defined(SUN4M) || defined(SUN4D)
 	/* Switch to sparc v8 multiply/divide functions on v8 machines */
@@ -314,18 +315,6 @@
 	initmsgbuf((void *)KERNBASE, 8192);
 #endif
 
-#if NKSYMS || defined(DDB) || defined(LKM)
-	if ((bi_sym = lookup_bootinfo(BTINFO_SYMTAB)) != NULL) {
-		if (bi_sym->ssym < KERNBASE) {
-			/* Assume low-loading boot loader */
-			bi_sym->ssym += KERNBASE;
-			bi_sym->esym += KERNBASE;
-		}
-		ksyms_init(bi_sym->nsym, (int *)bi_sym->ssym,
-		    (int *)bi_sym->esym);
-	}
-#endif
-
 #if defined(SUN4M)
 	/*
 	 * sun4m bootstrap is complex and is totally different for "normal" 4m
@@ -351,6 +340,19 @@
 		*((unsigned char *)INTRREG_VA) = 0;
 	}
 #endif /* SUN4 || SUN4C */
+
+
+#if NKSYMS || defined(DDB) || defined(LKM)
+	if ((bi_sym = lookup_bootinfo(BTINFO_SYMTAB)) != NULL) {
+		if (bi_sym->ssym < KERNBASE) {
+			/* Assume low-loading boot loader */
+			bi_sym->ssym += KERNBASE;
+			bi_sym->esym += KERNBASE;
+		}
+		ksyms_init(bi_sym->nsym, (int *)bi_sym->ssym,
+		    (int *)bi_sym->esym);
+	}
+#endif
 }
 
 #if defined(SUN4M) && !defined(MSIIEP)

Index: src/sys/arch/sparc/sparc/cpu.c
diff -u src/sys/arch/sparc/sparc/cpu.c:1.211 src/sys/arch/sparc/sparc/cpu.c:1.211.8.1
--- src/sys/arch/sparc/sparc/cpu.c:1.211	Wed Jun  4 12:41:41 2008
+++ src/sys/arch/sparc/sparc/cpu.c	Sat May 30 16:57:18 2009
@@ -1,4 +1,4 @@
-/*	$NetBSD: cpu.c,v 1.211 2008/06/04 12:41:41 ad Exp $ */
+/*	$NetBSD: cpu.c,v 1.211.8.1 2009/05/30 16:57:18 snj Exp $ */
 
 /*
  * Copyright (c) 1996
@@ -52,7 +52,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.211 2008/06/04 12:41:41 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.211.8.1 2009/05/30 16:57:18 snj Exp $");
 
 #include "opt_multiprocessor.h"
 #include "opt_lockdebug.h"
@@ -102,10 +102,10 @@
 extern char machine_model[];
 
 int	sparc_ncpus;			/* # of CPUs detected by PROM */
-struct	cpu_info **cpus;
+#ifdef MULTIPROCESSOR
+union cpu_info_pg *cpus;
 u_int	cpu_ready_mask;			/* the set of CPUs marked as READY */
-static	int cpu_instance;		/* current # of CPUs wired by us */
-
+#endif
 
 /* The CPU configuration driver. */
 static void cpu_mainbus_attach(struct device *, struct device *, void *);
@@ -138,117 +138,39 @@
 int bootmid;		/* Module ID of boot CPU */
 #if defined(MULTIPROCESSOR)
 void cpu_spinup(struct cpu_info *);
-struct cpu_info *alloc_cpuinfo_global_va(int, vsize_t *);
-struct cpu_info	*alloc_cpuinfo(void);
+static void init_cpuinfo(struct cpu_info *, int);
 
 int go_smp_cpus = 0;	/* non-primary CPUs wait for this to go */
 
 /* lock this to send IPI's */
 struct simplelock xpmsg_lock = SIMPLELOCK_INITIALIZER;
 
-struct cpu_info *
-alloc_cpuinfo_global_va(int ismaster, vsize_t *sizep)
+static void
+init_cpuinfo(struct cpu_info *cpi, int node)
 {
-	int align;
-	vaddr_t sva, va;
-	vsize_t sz, esz;
+	vaddr_t intstack, va;
 
 	/*
-	 * Allocate aligned KVA.  `cpuinfo' resides at a fixed virtual
-	 * address. Since we need to access an other CPU's cpuinfo
-	 * structure occasionally, this must be done at a virtual address
-	 * that's cache congruent to the fixed address CPUINFO_VA.
-	 *
-	 * NOTE: we're using the cache properties of the boot CPU to
-	 * determine the alignment (XXX).
+	 * Finish initialising this cpu_info.
 	 */
-	align = PAGE_SIZE;
-	if (CACHEINFO.c_totalsize > align) {
-		/* Need a power of two */
-		while (align <= CACHEINFO.c_totalsize)
-			align <<= 1;
-		align >>= 1;
-	}
-
-	sz = sizeof(struct cpu_info);
-
-	if (ismaster == 0) {
-		/*
-		 * While we're here, allocate a per-CPU idle PCB and
-		 * interrupt stack as well (8KB + 16KB).
-		 */
-		sz += USPACE;		/* `idle' u-area for this CPU */
-		sz += INT_STACK_SIZE;	/* interrupt stack for this CPU */
-	}
-
-	sz = (sz + PAGE_SIZE - 1) & -PAGE_SIZE;
-	esz = sz + align - PAGE_SIZE;
-
-	sva = vm_map_min(kernel_map);
-	if (uvm_map(kernel_map, &sva, esz, NULL, UVM_UNKNOWN_OFFSET,
-	    0, UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
-	    UVM_ADV_RANDOM, UVM_FLAG_NOWAIT)))
-		panic("alloc_cpuinfo_global_va: no virtual space");
-
-	va = sva + (((CPUINFO_VA & (align - 1)) + align - sva) & (align - 1));
-
-	/* Return excess virtual memory space */
-	if (va != sva)
-		(void)uvm_unmap(kernel_map, sva, va);
-	if (va + sz != sva + esz)
-		(void)uvm_unmap(kernel_map, va + sz, sva + esz);
-
-	if (sizep != NULL)
-		*sizep = sz;
-
-	return ((struct cpu_info *)va);
-}
-
-struct cpu_info *
-alloc_cpuinfo(void)
-{
-	vaddr_t va;
-	vsize_t sz;
-	vaddr_t low, high;
-	struct vm_page *m;
-	struct pglist mlist;
-	struct cpu_info *cpi;
-
-	/* Allocate the aligned VA and determine the size. */
-	cpi = alloc_cpuinfo_global_va(0, &sz);
-	va = (vaddr_t)cpi;
-
-	/* Allocate physical pages */
-	low = vm_first_phys;
-	high = vm_first_phys + vm_num_phys - PAGE_SIZE;
-	if (uvm_pglistalloc(sz, low, high, PAGE_SIZE, 0, &mlist, 1, 0) != 0)
-		panic("alloc_cpuinfo: no pages");
-
-	/* Map the pages */
-	for (m = TAILQ_FIRST(&mlist); m != NULL; m = TAILQ_NEXT(m, pageq.queue)) {
-		paddr_t pa = VM_PAGE_TO_PHYS(m);
-		pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE);
-		va += PAGE_SIZE;
-	}
-	pmap_update(pmap_kernel());
-
-	bzero((void *)cpi, sz);
+	getcpuinfo(cpi, node);
 
 	/*
-	 * Arrange pcb and interrupt stack in the same
-	 * way as is done for the boot CPU in locore.
+	 * Arrange pcb and interrupt stack.
 	 */
-	cpi->eintstack = (void *)((vaddr_t)cpi + sz - USPACE);
+	intstack = uvm_km_alloc(kernel_map, INT_STACK_SIZE,
+		0, UVM_KMF_WIRED);
+	if (intstack == 0)
+		panic("%s: no uspace/intstack", __func__);
+	cpi->eintstack = (void*)(intstack + INT_STACK_SIZE);
 
 	/* Allocate virtual space for pmap page_copy/page_zero */
 	va = uvm_km_alloc(kernel_map, 2*PAGE_SIZE, 0, UVM_KMF_VAONLY);
 	if (va == 0)
-		panic("alloc_cpuinfo: no virtual space");
+		panic("%s: no virtual space", __func__);
 
 	cpi->vpage[0] = (void *)(va + 0);
 	cpi->vpage[1] = (void *)(va + PAGE_SIZE);
-
-	return (cpi);
 }
 #endif /* MULTIPROCESSOR */
 
@@ -405,34 +327,19 @@
 cpu_attach(struct cpu_softc *sc, int node, int mid)
 {
 	struct cpu_info *cpi;
+	int idx;
+	static int cpu_attach_count = 0;
 
 	/*
 	 * The first CPU we're attaching must be the boot CPU.
 	 * (see autoconf.c and cpuunit.c)
 	 */
-	if (cpus == NULL) {
-		cpus = malloc(sparc_ncpus * sizeof(cpi), M_DEVBUF, M_NOWAIT);
-		bzero(cpus, sparc_ncpus * sizeof(cpi));
-
+	idx = cpu_attach_count++;
+	if (cpu_attach_count == 1) {
 		getcpuinfo(&cpuinfo, node);
 
 #if defined(MULTIPROCESSOR)
-		/*
-		 * Allocate a suitable global VA for the boot CPU's
-		 * cpu_info (which is already statically allocated),
-		 * and double map it to that global VA.  Then fixup
-		 * the self-reference to use the globalized address.
-		 */
-		cpi = sc->sc_cpuinfo = alloc_cpuinfo_global_va(1, NULL);
-		pmap_globalize_boot_cpuinfo(cpi);
-
-		cpuinfo.ci_self = cpi;
-
-		/* XXX - fixup lwp0 and idlelwp l_cpu */
-		lwp0.l_cpu = cpi;
-		cpi->ci_data.cpu_idlelwp->l_cpu = cpi;
-		cpi->ci_data.cpu_idlelwp->l_mutex =
-		    cpi->ci_schedstate.spc_lwplock;
+		cpi = sc->sc_cpuinfo = cpuinfo.ci_self;
 #else
 		/* The `local' VA is global for uniprocessor. */
 		cpi = sc->sc_cpuinfo = (struct cpu_info *)CPUINFO_VA;
@@ -453,10 +360,10 @@
 		int error;
 
 		/*
-		 * Allocate and initiize this cpu's cpu_info.
+		 * Initialise this cpu's cpu_info.
 		 */
-		cpi = sc->sc_cpuinfo = alloc_cpuinfo();
-		cpi->ci_self = cpi;
+		cpi = sc->sc_cpuinfo = &cpus[idx].ci;
+		init_cpuinfo(cpi, node);
 
 		/*
 		 * Call the MI attach which creates an idle LWP for us.
@@ -470,13 +377,12 @@
 		}
 
 		/*
-		 * Note: `eintstack' is set in alloc_cpuinfo() above.
+		 * Note: `eintstack' is set in init_cpuinfo() above.
 		 * The %wim register will be initialized in cpu_hatch().
 		 */
 		cpi->ci_curlwp = cpi->ci_data.cpu_idlelwp;
 		cpi->curpcb = (struct pcb *)cpi->ci_curlwp->l_addr;
 		cpi->curpcb->pcb_wim = 1;
-		getcpuinfo(cpi, node);
 
 #else
 		sc->sc_cpuinfo = NULL;
@@ -489,12 +395,7 @@
 	cpi->redzone = (void *)((long)cpi->eintstack + REDSIZE);
 #endif
 
-	/*
-	 * Allocate a slot in the cpus[] array such that the following
-	 * invariant holds: cpus[cpi->ci_cpuid] == cpi;
-	 */
-	cpus[cpu_instance] = cpi;
-	cpi->ci_cpuid = cpu_instance++;
+	cpi->ci_cpuid = idx;
 	cpi->mid = mid;
 	cpi->node = node;
 
@@ -530,16 +431,13 @@
 
 	cache_print(sc);
 
-	if (sparc_ncpus > 1 && cpu_instance == sparc_ncpus) {
-		int n;
+	if (sparc_ncpus > 1 && idx == sparc_ncpus-1) {
+		CPU_INFO_ITERATOR n;
 		/*
 		 * Install MP cache flush functions, unless the
 		 * single-processor versions are no-ops.
 		 */
-		for (n = 0; n < sparc_ncpus; n++) {
-			cpi = cpus[n];
-			if (cpi == NULL)
-				continue;
+		for (CPU_INFO_FOREACH(n, cpi)) {
 #define SET_CACHE_FUNC(x) \
 	if (cpi->x != __CONCAT(noop_,x)) cpi->x = __CONCAT(smp_,x)
 			SET_CACHE_FUNC(vcache_flush_page);
@@ -558,20 +456,13 @@
 void
 cpu_boot_secondary_processors(void)
 {
-	int n;
-
-	if (cpu_instance != sparc_ncpus) {
-		printf("NOTICE: only %d out of %d CPUs were configured\n",
-			cpu_instance, sparc_ncpus);
-		return;
-	}
+	CPU_INFO_ITERATOR n;
+	struct cpu_info *cpi;
 
 	printf("cpu0: booting secondary processors:");
-	for (n = 0; n < sparc_ncpus; n++) {
-		struct cpu_info *cpi = cpus[n];
-
-		if (cpi == NULL || cpuinfo.mid == cpi->mid ||
-			(cpi->flags & CPUFLG_HATCHED) == 0)
+	for (CPU_INFO_FOREACH(n, cpi)) {
+		if (cpuinfo.mid == cpi->mid ||
+		    (cpi->flags & CPUFLG_HATCHED) == 0)
 			continue;
 
 		printf(" cpu%d", cpi->ci_cpuid);
@@ -597,8 +488,7 @@
 void
 cpu_setup(void)
 {
-
-	if (cpuinfo.hotfix)
+ 	if (cpuinfo.hotfix)
 		(*cpuinfo.hotfix)(&cpuinfo);
 
 	/* Initialize FPU */
@@ -665,6 +555,7 @@
 xcall(xcall_func_t func, xcall_trap_t trap, int arg0, int arg1, int arg2,
       u_int cpuset)
 {
+	struct cpu_info *cpi;
 	int s, n, i, done, callself, mybit;
 	volatile struct xpmsg_func *p;
 	int fasttrap;
@@ -707,11 +598,7 @@
 	 * finished by the time we start looking.
 	 */
 	fasttrap = trap != NULL ? 1 : 0;
-	for (n = 0; n < sparc_ncpus; n++) {
-		struct cpu_info *cpi = cpus[n];
-
-		if (!cpi)
-			continue;
+	for (CPU_INFO_FOREACH(n, cpi)) {
 
 		/* Note: n == cpi->ci_cpuid */
 		if ((cpuset & (1 << n)) == 0)
@@ -750,10 +637,8 @@
 		}
 
 		done = 1;
-		for (n = 0; n < sparc_ncpus; n++) {
-			struct cpu_info *cpi = cpus[n];
-
-			if (!cpi || (cpuset & (1 << n)) == 0)
+		for (CPU_INFO_FOREACH(n, cpi)) {
+			if ((cpuset & (1 << n)) == 0)
 				continue;
 
 			if (cpi->msg.complete == 0) {
@@ -779,15 +664,15 @@
 void
 mp_pause_cpus(void)
 {
-	int n;
+	CPU_INFO_ITERATOR n;
+	struct cpu_info *cpi;
 
 	if (cpus == NULL)
 		return;
 
-	for (n = 0; n < sparc_ncpus; n++) {
-		struct cpu_info *cpi = cpus[n];
-
-		if (cpi == NULL || cpuinfo.mid == cpi->mid)
+	for (CPU_INFO_FOREACH(n, cpi)) {
+		if (cpuinfo.mid == cpi->mid ||
+		    (cpi->flags & CPUFLG_HATCHED) == 0)
 			continue;
 
 		/*
@@ -806,15 +691,15 @@
 void
 mp_resume_cpus(void)
 {
-	int n;
+	CPU_INFO_ITERATOR n;
+	struct cpu_info *cpi;
 
 	if (cpus == NULL)
 		return;
 
-	for (n = 0; n < sparc_ncpus; n++) {
-		struct cpu_info *cpi = cpus[n];
-
-		if (cpi == NULL || cpuinfo.mid == cpi->mid)
+	for (CPU_INFO_FOREACH(n, cpi)) {
+		if (cpuinfo.mid == cpi->mid ||
+		    (cpi->flags & CPUFLG_HATCHED) == 0)
 			continue;
 
 		/*
@@ -832,16 +717,16 @@
 void
 mp_halt_cpus(void)
 {
-	int n;
+	CPU_INFO_ITERATOR n;
+	struct cpu_info *cpi;
 
 	if (cpus == NULL)
 		return;
 
-	for (n = 0; n < sparc_ncpus; n++) {
-		struct cpu_info *cpi = cpus[n];
+	for (CPU_INFO_FOREACH(n, cpi)) {
 		int r;
 
-		if (cpi == NULL || cpuinfo.mid == cpi->mid)
+		if (cpuinfo.mid == cpi->mid)
 			continue;
 
 		/*
@@ -859,15 +744,15 @@
 void
 mp_pause_cpus_ddb(void)
 {
-	int n;
+	CPU_INFO_ITERATOR n;
+	struct cpu_info *cpi;
 
 	if (cpus == NULL)
 		return;
 
-	for (n = 0; n < sparc_ncpus; n++) {
-		struct cpu_info *cpi = cpus[n];
-
-		if (cpi == NULL || cpi->mid == cpuinfo.mid)
+	for (CPU_INFO_FOREACH(n, cpi)) {
+		if (cpi == NULL || cpi->mid == cpuinfo.mid ||
+		    (cpi->flags & CPUFLG_HATCHED) == 0)
 			continue;
 
 		cpi->msg_lev15.tag = XPMSG15_PAUSECPU;
@@ -878,15 +763,15 @@
 void
 mp_resume_cpus_ddb(void)
 {
-	int n;
+	CPU_INFO_ITERATOR n;
+	struct cpu_info *cpi;
 
 	if (cpus == NULL)
 		return;
 
-	for (n = 0; n < sparc_ncpus; n++) {
-		struct cpu_info *cpi = cpus[n];
-
-		if (cpi == NULL || cpuinfo.mid == cpi->mid)
+	for (CPU_INFO_FOREACH(n, cpi)) {
+		if (cpi == NULL || cpuinfo.mid == cpi->mid ||
+		    (cpi->flags & CPUFLG_PAUSED) == 0)
 			continue;
 
 		/* tell it to continue */
@@ -1762,15 +1647,11 @@
 viking_module_error(void)
 {
 	uint64_t v;
-	int n, fatal = 0;
+	int n = 0, fatal = 0;
+	struct cpu_info *cpi;
 
 	/* Report on MXCC error registers in each module */
-	for (n = 0; n < sparc_ncpus; n++) {
-		struct cpu_info *cpi = cpus[n];
-
-		if (cpi == NULL)
-			continue;
-
+	for (CPU_INFO_FOREACH(n, cpi)) {
 		if (cpi->ci_mxccregs == 0) {
 			printf("\tMXCC registers not mapped\n");
 			continue;

Index: src/sys/arch/sparc/sparc/cpuvar.h
diff -u src/sys/arch/sparc/sparc/cpuvar.h:1.75 src/sys/arch/sparc/sparc/cpuvar.h:1.75.10.1
--- src/sys/arch/sparc/sparc/cpuvar.h:1.75	Mon Apr 28 20:23:36 2008
+++ src/sys/arch/sparc/sparc/cpuvar.h	Sat May 30 16:57:18 2009
@@ -1,4 +1,4 @@
-/*	$NetBSD: cpuvar.h,v 1.75 2008/04/28 20:23:36 martin Exp $ */
+/*	$NetBSD: cpuvar.h,v 1.75.10.1 2009/05/30 16:57:18 snj Exp $ */
 
 /*
  *  Copyright (c) 1996 The NetBSD Foundation, Inc.
@@ -416,9 +416,9 @@
 
 #define CPU_INFO_ITERATOR		int
 #ifdef MULTIPROCESSOR
-#define CPU_INFO_FOREACH(cii, ci)	cii = 0; ci = cpus[cii], cii < sparc_ncpus; cii++
+#define CPU_INFO_FOREACH(cii, cp)	cii = 0; cp = &cpus[cii].ci, cii < sparc_ncpus; cii++
 #else
-#define	CPU_INFO_FOREACH(cii, ci)	(void)cii, ci = curcpu(); ci != NULL; ci = NULL
+#define	CPU_INFO_FOREACH(cii, cp)	(void)cii, cp = curcpu(); cp != NULL; cp = NULL
 #endif
 
 /*
@@ -433,7 +433,6 @@
 void getcpuinfo (struct cpu_info *sc, int node);
 void mmu_install_tables (struct cpu_info *);
 void pmap_alloc_cpu (struct cpu_info *);
-void pmap_globalize_boot_cpu (struct cpu_info *);
 
 #define	CPUSET_ALL	0xffffffffU	/* xcall to all configured CPUs */
 
@@ -473,8 +472,14 @@
 extern int bootmid;			/* Module ID of boot CPU */
 #define CPU_MID2CPUNO(mid)		((mid) != 0 ? (mid) - 8 : 0)
 
-extern struct cpu_info **cpus;
+#ifdef MULTIPROCESSOR
+union cpu_info_pg {
+	struct cpu_info ci;	/* cpu info (aliased (per cpu) to CPUINFO_VA */
+	char pad[32 * 1024];	/* XXX: force 32K alignment for now */
+};				/* SMP capable cpu types */
+extern union cpu_info_pg *cpus;
 extern u_int cpu_ready_mask;		/* the set of CPUs marked as READY */
+#endif
 
 #define cpuinfo	(*(struct cpu_info *)CPUINFO_VA)
 

Index: src/sys/arch/sparc/sparc/db_interface.c
diff -u src/sys/arch/sparc/sparc/db_interface.c:1.79 src/sys/arch/sparc/sparc/db_interface.c:1.79.4.1
--- src/sys/arch/sparc/sparc/db_interface.c:1.79	Fri Aug  8 17:09:28 2008
+++ src/sys/arch/sparc/sparc/db_interface.c	Sat May 30 16:57:18 2009
@@ -1,4 +1,4 @@
-/*	$NetBSD: db_interface.c,v 1.79 2008/08/08 17:09:28 skrll Exp $ */
+/*	$NetBSD: db_interface.c,v 1.79.4.1 2009/05/30 16:57:18 snj Exp $ */
 
 /*
  * Mach Operating System
@@ -33,7 +33,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: db_interface.c,v 1.79 2008/08/08 17:09:28 skrll Exp $");
+__KERNEL_RCSID(0, "$NetBSD: db_interface.c,v 1.79.4.1 2009/05/30 16:57:18 snj Exp $");
 
 #include "opt_ddb.h"
 #include "opt_kgdb.h"
@@ -463,7 +463,7 @@
 		db_printf("%ld: CPU out of range\n", addr);
 		return;
 	}
-	ci = cpus[addr];
+	ci = &cpus[addr].ci;
 	if (ci == NULL) {
 		db_printf("CPU %ld not configured\n", addr);
 		return;

Index: src/sys/arch/sparc/sparc/intr.c
diff -u src/sys/arch/sparc/sparc/intr.c:1.100 src/sys/arch/sparc/sparc/intr.c:1.100.20.1
--- src/sys/arch/sparc/sparc/intr.c:1.100	Wed Jan  9 13:52:33 2008
+++ src/sys/arch/sparc/sparc/intr.c	Sat May 30 16:57:18 2009
@@ -1,4 +1,4 @@
-/*	$NetBSD: intr.c,v 1.100 2008/01/09 13:52:33 ad Exp $ */
+/*	$NetBSD: intr.c,v 1.100.20.1 2009/05/30 16:57:18 snj Exp $ */
 
 /*
  * Copyright (c) 1992, 1993
@@ -41,7 +41,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: intr.c,v 1.100 2008/01/09 13:52:33 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: intr.c,v 1.100.20.1 2009/05/30 16:57:18 snj Exp $");
 
 #include "opt_multiprocessor.h"
 #include "opt_sparc_arch.h"
@@ -291,7 +291,9 @@
 			/* In case there's an xcall in progress (unlikely) */
 			spl0();
 			cpuinfo.flags &= ~CPUFLG_READY;
+#ifdef MULTIPROCESSOR
 			cpu_ready_mask &= ~(1 << cpu_number());
+#endif
 			prom_cpustop(0);
 			break;
 		case OPENPROM_MBX_ABORT:

Index: src/sys/arch/sparc/sparc/pmap.c
diff -u src/sys/arch/sparc/sparc/pmap.c:1.322 src/sys/arch/sparc/sparc/pmap.c:1.322.20.1
--- src/sys/arch/sparc/sparc/pmap.c:1.322	Wed Jan  2 11:48:29 2008
+++ src/sys/arch/sparc/sparc/pmap.c	Sat May 30 16:57:18 2009
@@ -1,4 +1,4 @@
-/*	$NetBSD: pmap.c,v 1.322 2008/01/02 11:48:29 ad Exp $ */
+/*	$NetBSD: pmap.c,v 1.322.20.1 2009/05/30 16:57:18 snj Exp $ */
 
 /*
  * Copyright (c) 1996
@@ -56,7 +56,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.322 2008/01/02 11:48:29 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.322.20.1 2009/05/30 16:57:18 snj Exp $");
 
 #include "opt_ddb.h"
 #include "opt_kgdb.h"
@@ -964,6 +964,7 @@
 
 static u_long va2pa_offset;
 #define PMAP_BOOTSTRAP_VA2PA(v) ((paddr_t)((u_long)(v) - va2pa_offset))
+#define PMAP_BOOTSTRAP_PA2VA(p) ((vaddr_t)((u_long)(p) + va2pa_offset))
 
 /*
  * Grab physical memory list.
@@ -1115,7 +1116,7 @@
 			if (end < chop)
 				chop = end;
 #ifdef DEBUG
-			printf("bootstrap gap: start %lx, chop %lx, end %lx\n",
+			prom_printf("bootstrap gap: start %lx, chop %lx, end %lx\n",
 				start, chop, end);
 #endif
 			uvm_page_physload(
@@ -1389,7 +1390,7 @@
 
 		case SRMMU_TEPTE:
 #ifdef DEBUG
-			printf("mmu_setup4m_L1: "
+			prom_printf("mmu_setup4m_L1: "
 			       "converting region 0x%x from L1->L3\n", i);
 #endif
 			/*
@@ -1444,7 +1445,7 @@
 
 		case SRMMU_TEPTE:
 #ifdef DEBUG
-			printf("mmu_setup4m_L2: converting L2 entry at segment 0x%x to L3\n",i);
+			prom_printf("mmu_setup4m_L2: converting L2 entry at segment 0x%x to L3\n",i);
 #endif
 			/*
 			 * This segment entry covers 256KB of memory -- or
@@ -2056,10 +2057,11 @@
 ctx_alloc(struct pmap *pm)
 {
 	union ctxinfo *c;
-	int cnum, i, doflush;
+	int cnum, i = 0, doflush;
 	struct regmap *rp;
 	int gap_start, gap_end;
 	vaddr_t va;
+	struct cpu_info *cpi;
 
 /*XXX-GCC!*/gap_start=gap_end=0;
 #ifdef DEBUG
@@ -2205,17 +2207,7 @@
 		 * Note on multi-threaded processes: a context must remain
 		 * valid as long as any thread is still running on a CPU.
 		 */
-#if defined(MULTIPROCESSOR)
-		for (i = 0; i < sparc_ncpus; i++)
-#else
-		i = 0;
-#endif
-		{
-			struct cpu_info *cpi = cpus[i];
-#if defined(MULTIPROCESSOR)
-			if (cpi == NULL)
-				continue;
-#endif
+		for (CPU_INFO_FOREACH(i, cpi)) {
 			setpgt4m(&cpi->ctx_tbl[cnum],
 				 (pm->pm_reg_ptps_pa[i] >> SRMMU_PPNPASHIFT) |
 					SRMMU_TEPTD);
@@ -2236,6 +2228,7 @@
 ctx_free(struct pmap *pm)
 {
 	union ctxinfo *c;
+	struct cpu_info *cpi;
 	int ctx;
 
 	c = pm->pm_ctx;
@@ -2259,17 +2252,7 @@
 
 		cache_flush_context(ctx);
 		tlb_flush_context(ctx, PMAP_CPUSET(pm));
-#if defined(MULTIPROCESSOR)
-		for (i = 0; i < sparc_ncpus; i++)
-#else
-		i = 0;
-#endif
-		{
-			struct cpu_info *cpi = cpus[i];
-#if defined(MULTIPROCESSOR)
-			if (cpi == NULL)
-				continue;
-#endif
+		for (CPU_INFO_FOREACH(i, cpi)) {
 			setpgt4m(&cpi->ctx_tbl[ctx], SRMMU_TEINVALID);
 		}
 	}
@@ -3014,7 +2997,8 @@
 	}
 
 	pmap_page_upload();
-	curlwp = &lwp0;
+	mutex_init(&demap_lock, MUTEX_DEFAULT, IPL_VM);
+	mutex_init(&ctx_lock, MUTEX_DEFAULT, IPL_SCHED);
 }
 
 #if defined(SUN4) || defined(SUN4C)
@@ -3209,14 +3193,12 @@
 
 	p = i;			/* retract to first free phys */
 
-	mutex_init(&demap_lock, MUTEX_DEFAULT, IPL_VM);
 
 	/*
 	 * All contexts are free except the kernel's.
 	 *
 	 * XXX sun4c could use context 0 for users?
 	 */
-	mutex_init(&ctx_lock, MUTEX_DEFAULT, IPL_SCHED);
 	ci->c_pmap = pmap_kernel();
 	ctx_freelist = ci + 1;
 	for (i = 1; i < ncontext; i++) {
@@ -3481,6 +3463,11 @@
 	paddr_t pagetables_start_pa;
 	extern char etext[];
 	extern char kernel_text[];
+	vaddr_t va;
+#ifdef MULTIPROCESSOR
+	vsize_t off;
+	struct vm_page *pg;
+#endif
 
 	/*
 	 * Compute `va2pa_offset'.
@@ -3550,7 +3537,6 @@
 	p += ncontext * sizeof *ci;
 	bzero((void *)ci, (u_int)p - (u_int)ci);
 
-
 	/*
 	 * Set up the `constants' for the call to vm_init()
 	 * in main().  All pages beginning at p (rounded up to
@@ -3699,12 +3685,9 @@
 
 	p = q;			/* retract to first free phys */
 
-	mutex_init(&demap_lock, MUTEX_DEFAULT, IPL_VM);
-
 	/*
 	 * Set up the ctxinfo structures (freelist of contexts)
 	 */
-	mutex_init(&ctx_lock, MUTEX_DEFAULT, IPL_SCHED);
 	ci->c_pmap = pmap_kernel();
 	ctx_freelist = ci + 1;
 	for (i = 1; i < ncontext; i++) {
@@ -3775,7 +3758,7 @@
 		 */
 		int size = pagetables_end - pagetables_start;
 		if (CACHEINFO.c_vactype != VAC_NONE) {
-			vaddr_t va = (vaddr_t)pagetables_start;
+			va = (vaddr_t)pagetables_start;
 			while (size > 0) {
 				cache_flush_page(va, 0);
 				va += NBPG;
@@ -3795,6 +3778,64 @@
 	 * Now switch to kernel pagetables (finally!)
 	 */
 	mmu_install_tables(&cpuinfo);
+
+#ifdef MULTIPROCESSOR
+	/* Allocate VA for all the cpu_info structurs */
+	cpus = (union cpu_info_pg*)uvm_km_alloc(kernel_map,
+	    sizeof cpus[sparc_ncpus], 32*1024, UVM_KMF_VAONLY);
+	/*
+	 * Add an alias mapping for the CPUINFO_VA allocation created
+	 * early during bootstrap for the first CPU
+	 */
+	off = 0;
+	for (va = (vaddr_t)&cpus[0].ci;
+	     off < sizeof(struct cpu_info);
+	     va += NBPG, off += NBPG) {
+		paddr_t pa = PMAP_BOOTSTRAP_VA2PA(CPUINFO_VA + off);
+		pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE);
+	}
+	/*
+	 * Now allocate memory for all other CPUs cpu_info and map
+	 * it into the coresponding space in the cpus array. We will
+	 * later duplicate the mapping into CPUINFO_VA.
+	 */
+	for (i = 1; i < sparc_ncpus; i++) {
+		off = 0;
+		for (va = (vaddr_t)&cpus[i].ci;
+		     off < sizeof(struct cpu_info);
+		     va += NBPG, off += NBPG) {
+			pg = uvm_pagealloc(NULL, 0, NULL, 0);
+			paddr_t pa = VM_PAGE_TO_PHYS(pg);
+			pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE);
+		}
+	}
+
+	/* clear new cpu infos */
+	prom_printf("clearing other cpus cpu info\n");
+	memset(&cpus[1].ci, 0, (sparc_ncpus-1)*sizeof(union cpu_info_pg));
+
+	/* setup self refernces, and cpu "cpuinfo" */
+	prom_printf("setting cpus self reference and mapping\n");
+	for (i = 0; i < sparc_ncpus; i++) {
+
+		prom_printf("going to set cpu%d ci_self address: %p\n", i, &cpus[i].ci);
+		cpus[i].ci.ci_self = &cpus[i].ci;
+
+		/* mapped above. */
+		if (i == 0)
+			continue;
+
+		off = 0;
+		for (va = (vaddr_t)&cpus[i].ci;
+		     off < sizeof(struct cpu_info);
+		     va += NBPG, off += NBPG) {
+			paddr_t pa = PMAP_BOOTSTRAP_VA2PA(va + off);
+			prom_printf("going to pmap_kenter_pa(va=%p, pa=%p)\n", va, pa);
+			pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE);
+		}
+	}
+#endif
+	pmap_update(pmap_kernel());
 }
 
 static u_long prom_ctxreg;
@@ -3804,7 +3845,7 @@
 {
 
 #ifdef DEBUG
-	printf("pmap_bootstrap: installing kernel page tables...");
+	prom_printf("pmap_bootstrap: installing kernel page tables...");
 #endif
 	setcontext4m(0);	/* paranoia? %%%: Make 0x3 a define! below */
 
@@ -3821,7 +3862,7 @@
 	tlb_flush_all_real();
 
 #ifdef DEBUG
-	printf("done.\n");
+	prom_printf("done.\n");
 #endif
 }
 
@@ -3839,23 +3880,6 @@
 
 #if defined(MULTIPROCESSOR)
 /*
- * Globalize the boot CPU's cpu_info structure.
- */
-void
-pmap_globalize_boot_cpuinfo(struct cpu_info *cpi)
-{
-	vaddr_t va;
-	vsize_t off;
-
-	off = 0;
-	for (va = (vaddr_t)cpi; off < sizeof(*cpi); va += NBPG, off += NBPG) {
-		paddr_t pa = PMAP_BOOTSTRAP_VA2PA(CPUINFO_VA + off);
-		pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE);
-	}
-	pmap_update(pmap_kernel());
-}
-
-/*
  * Allocate per-CPU page tables. One region, segment and page table
  * is needed to map CPUINFO_VA to different physical addresses on
  * each CPU. Since the kernel region and segment tables are all
@@ -4085,10 +4109,6 @@
 			n = 0;
 #endif
 			{
-#if defined(MULTIPROCESSOR)
-				if (cpus[n] == NULL)
-					continue;
-#endif
 				if (pm->pm_reg_ptps[n][vr] != SRMMU_TEINVALID)
 					printf("pmap_chk: spurious PTP in user "
 						"region %d on CPU %d\n", vr, n);
@@ -4202,10 +4222,6 @@
 		{
 			int *upt, *kpt;
 
-#if defined(MULTIPROCESSOR)
-			if (cpus[n] == NULL)
-				continue;
-#endif
 			upt = pool_get(&L1_pool, flags);
 			pm->pm_reg_ptps[n] = upt;
 			pm->pm_reg_ptps_pa[n] = VA2PA((char *)upt);
@@ -4256,10 +4272,6 @@
 		n = 0;
 #endif
 		{
-#if defined(MULTIPROCESSOR)
-			if (cpus[n] == NULL)
-				continue;
-#endif
 			int *pt = pm->pm_reg_ptps[n];
 			pm->pm_reg_ptps[n] = NULL;
 			pm->pm_reg_ptps_pa[n] = 0;
@@ -4435,7 +4447,7 @@
 #ifdef MULTIPROCESSOR
 		/* Invalidate level 1 PTP entries on all CPUs */
 		for (; n < sparc_ncpus; n++) {
-			if (cpus[n] == NULL)
+			if ((cpus[n].ci.flags & CPUFLG_HATCHED) == 0)
 				continue;
 #endif
 			setpgt4m(&pm->pm_reg_ptps[n][vr], SRMMU_TEINVALID);
@@ -6272,7 +6284,7 @@
 #endif
 		{
 #if defined(MULTIPROCESSOR)
-			if (cpus[i] == NULL)
+			if ((cpus[i].ci.flags & CPUFLG_HATCHED) == 0)
 				continue;
 #endif
 			setpgt4m(&pm->pm_reg_ptps[i][vr],

Index: src/sys/arch/sparc/sparc/timer_sun4m.c
diff -u src/sys/arch/sparc/sparc/timer_sun4m.c:1.16 src/sys/arch/sparc/sparc/timer_sun4m.c:1.16.56.1
--- src/sys/arch/sparc/sparc/timer_sun4m.c:1.16	Sun Feb 25 06:03:32 2007
+++ src/sys/arch/sparc/sparc/timer_sun4m.c	Sat May 30 16:57:18 2009
@@ -1,4 +1,4 @@
-/*	$NetBSD: timer_sun4m.c,v 1.16 2007/02/25 06:03:32 macallan Exp $	*/
+/*	$NetBSD: timer_sun4m.c,v 1.16.56.1 2009/05/30 16:57:18 snj Exp $	*/
 
 /*
  * Copyright (c) 1992, 1993
@@ -58,7 +58,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: timer_sun4m.c,v 1.16 2007/02/25 06:03:32 macallan Exp $");
+__KERNEL_RCSID(0, "$NetBSD: timer_sun4m.c,v 1.16.56.1 2009/05/30 16:57:18 snj Exp $");
 
 #include <sys/param.h>
 #include <sys/kernel.h>
@@ -89,9 +89,7 @@
 	int n;
 
 	timerreg4m->t_limit = tmr_ustolim4m(tick);
-	for (n = 0; n < sparc_ncpus; n++) {
-		if ((cpi = cpus[n]) == NULL)
-			continue;
+	for (CPU_INFO_FOREACH(n, cpi)) {
 		cpi->counterreg_4m->t_limit = tmr_ustolim4m(statint);
 	}
 	icr_si_bic(SINTR_T);
@@ -201,10 +199,7 @@
 		 * Check whether the CPU corresponding to this timer
 		 * register is installed.
 		 */
-		cpi = NULL;
-		for (n = 0; n < sparc_ncpus; n++) {
-			if ((cpi = cpus[n]) == NULL)
-				continue;
+		for (CPU_INFO_FOREACH(n, cpi)) {
 			if ((i == 0 && sparc_ncpus == 1) || cpi->mid == i + 8) {
 				/* We got a corresponding MID. */
 				break;

Reply via email to