Module Name:    src
Committed By:   matt
Date:           Wed Dec 30 04:51:26 UTC 2009

Modified Files:
        src/sys/arch/mips/conf [matt-nb5-mips64]: files.mips
        src/sys/arch/mips/include [matt-nb5-mips64]: cpu.h locore.h pmap.h
            proc.h pte.h vmparam.h
        src/sys/arch/mips/mips [matt-nb5-mips64]: locore.S mips_machdep.c
            pmap.c trap.c
Added Files:
        src/sys/arch/mips/mips [matt-nb5-mips64]: pmap_segtab.c

Log Message:
Please segtab lookups into separate file.
Add mips_add_physload
Add mips_init_lwp0_uarea
cleanup lwp0/cpu_info_store initialization.


To generate a diff of this commit:
cvs rdiff -u -r1.58.24.3 -r1.58.24.4 src/sys/arch/mips/conf/files.mips
cvs rdiff -u -r1.90.16.8 -r1.90.16.9 src/sys/arch/mips/include/cpu.h
cvs rdiff -u -r1.78.36.1.2.6 -r1.78.36.1.2.7 \
    src/sys/arch/mips/include/locore.h
cvs rdiff -u -r1.54.26.1 -r1.54.26.2 src/sys/arch/mips/include/pmap.h
cvs rdiff -u -r1.21.36.3 -r1.21.36.4 src/sys/arch/mips/include/proc.h
cvs rdiff -u -r1.19 -r1.19.18.1 src/sys/arch/mips/include/pte.h
cvs rdiff -u -r1.41.28.7 -r1.41.28.8 src/sys/arch/mips/include/vmparam.h
cvs rdiff -u -r1.167.38.3 -r1.167.38.4 src/sys/arch/mips/mips/locore.S
cvs rdiff -u -r1.205.4.1.2.1.2.17 -r1.205.4.1.2.1.2.18 \
    src/sys/arch/mips/mips/mips_machdep.c
cvs rdiff -u -r1.179.16.6 -r1.179.16.7 src/sys/arch/mips/mips/pmap.c
cvs rdiff -u -r0 -r1.1.2.1 src/sys/arch/mips/mips/pmap_segtab.c
cvs rdiff -u -r1.217.12.11 -r1.217.12.12 src/sys/arch/mips/mips/trap.c

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/arch/mips/conf/files.mips
diff -u src/sys/arch/mips/conf/files.mips:1.58.24.3 src/sys/arch/mips/conf/files.mips:1.58.24.4
--- src/sys/arch/mips/conf/files.mips:1.58.24.3	Sun Sep 13 03:28:51 2009
+++ src/sys/arch/mips/conf/files.mips	Wed Dec 30 04:51:25 2009
@@ -1,4 +1,4 @@
-#	$NetBSD: files.mips,v 1.58.24.3 2009/09/13 03:28:51 cliff Exp $
+#	$NetBSD: files.mips,v 1.58.24.4 2009/12/30 04:51:25 matt Exp $
 #
 
 defflag	opt_cputype.h		NOFPU
@@ -34,6 +34,7 @@
 file	arch/mips/mips/kgdb_machdep.c		kgdb
 file	arch/mips/mips/mem.c
 file	arch/mips/mips/pmap.c
+file	arch/mips/mips/pmap_segtab.c
 file	arch/mips/mips/trap.c			# trap handlers
 file	arch/mips/mips/syscall.c		# syscall entries
 file	arch/mips/mips/mips_machdep.c

Index: src/sys/arch/mips/include/cpu.h
diff -u src/sys/arch/mips/include/cpu.h:1.90.16.8 src/sys/arch/mips/include/cpu.h:1.90.16.9
--- src/sys/arch/mips/include/cpu.h:1.90.16.8	Mon Nov 23 23:48:58 2009
+++ src/sys/arch/mips/include/cpu.h	Wed Dec 30 04:51:26 2009
@@ -1,4 +1,4 @@
-/*	$NetBSD: cpu.h,v 1.90.16.8 2009/11/23 23:48:58 cliff Exp $	*/
+/*	$NetBSD: cpu.h,v 1.90.16.9 2009/12/30 04:51:26 matt Exp $	*/
 
 /*-
  * Copyright (c) 1992, 1993
@@ -393,7 +393,8 @@
 struct lwp;
 struct user;
 
-extern struct segtab *segbase;	/* current segtab base */
+extern struct segtab *segbase;		/* current segtab base */
+extern int mips_poolpage_vmfreelist;	/* freelist to allocate poolpages */
 
 /* copy.S */
 int8_t	ufetch_int8(void *);
@@ -477,9 +478,15 @@
 int	kdbpeek(vaddr_t);
 
 /* mips_machdep.c */
+struct mips_vmfreelist;
+struct phys_ram_seg;
 void	dumpsys(void);
 int	savectx(struct user *);
 void	mips_init_msgbuf(void);
+void	mips_init_lwp0_uarea(void);
+void	mips_page_physload(vaddr_t, vaddr_t,
+	    const struct phys_ram_seg *, size_t,
+	    const struct mips_vmfreelist *, size_t);
 void	savefpregs(struct lwp *);
 void	loadfpregs(struct lwp *);
 

Index: src/sys/arch/mips/include/locore.h
diff -u src/sys/arch/mips/include/locore.h:1.78.36.1.2.6 src/sys/arch/mips/include/locore.h:1.78.36.1.2.7
--- src/sys/arch/mips/include/locore.h:1.78.36.1.2.6	Sun Dec 13 00:25:30 2009
+++ src/sys/arch/mips/include/locore.h	Wed Dec 30 04:51:26 2009
@@ -1,4 +1,4 @@
-/* $NetBSD: locore.h,v 1.78.36.1.2.6 2009/12/13 00:25:30 matt Exp $ */
+/* $NetBSD: locore.h,v 1.78.36.1.2.7 2009/12/30 04:51:26 matt Exp $ */
 
 /*
  * Copyright 1996 The Board of Trustees of The Leland Stanford
@@ -307,6 +307,12 @@
 	uintptr_t lsw_setfunc_trampoline;
 };
 
+struct mips_vmfreelist {
+	paddr_t fl_start;
+	paddr_t fl_end;
+	int fl_freelist;
+};
+
 /*
  * The "active" locore-fuction vector, and
  */

Index: src/sys/arch/mips/include/pmap.h
diff -u src/sys/arch/mips/include/pmap.h:1.54.26.1 src/sys/arch/mips/include/pmap.h:1.54.26.2
--- src/sys/arch/mips/include/pmap.h:1.54.26.1	Mon Sep  7 21:42:17 2009
+++ src/sys/arch/mips/include/pmap.h	Wed Dec 30 04:51:26 2009
@@ -1,4 +1,4 @@
-/*	$NetBSD: pmap.h,v 1.54.26.1 2009/09/07 21:42:17 matt Exp $	*/
+/*	$NetBSD: pmap.h,v 1.54.26.2 2009/12/30 04:51:26 matt Exp $	*/
 
 /*
  * Copyright (c) 1992, 1993
@@ -75,6 +75,7 @@
 #define	_MIPS_PMAP_H_
 
 #include <mips/cpuregs.h>	/* for KSEG0 below */
+#include <mips/pte.h>
 
 /*
  * The user address space is 2Gb (0x0 - 0x80000000).
@@ -100,7 +101,6 @@
 
 #define mips_trunc_seg(x)	((vaddr_t)(x) & ~SEGOFSET)
 #define mips_round_seg(x)	(((vaddr_t)(x) + SEGOFSET) & ~SEGOFSET)
-#define pmap_segmap(m, v)	((m)->pm_segtab->seg_tab[((v) >> SEGSHIFT)])
 
 #define PMAP_SEGTABSIZE		(1 << (31 - SEGSHIFT))
 
@@ -110,6 +110,17 @@
 	union pt_entry	*seg_tab[PMAP_SEGTABSIZE];
 };
 
+struct pmap;
+typedef bool (*pte_callback_t)(struct pmap *, vaddr_t, vaddr_t, pt_entry_t *,
+	uintptr_t);
+pt_entry_t *pmap_pte_lookup(struct pmap *, vaddr_t);
+pt_entry_t *pmap_pte_reserve(struct pmap *, vaddr_t, int);
+void pmap_pte_process(struct pmap *, vaddr_t, vaddr_t, pte_callback_t,
+	uintptr_t);
+void pmap_segtab_activate(struct lwp *);
+void pmap_segtab_alloc(struct pmap *);
+void pmap_segtab_free(struct pmap *);
+
 /*
  * Machine dependent pmap structure.
  */
@@ -183,8 +194,10 @@
  */
 vaddr_t mips_pmap_map_poolpage(paddr_t);
 paddr_t mips_pmap_unmap_poolpage(vaddr_t);
-#define	PMAP_MAP_POOLPAGE(pa)	mips_pmap_map_poolpage(pa)
-#define	PMAP_UNMAP_POOLPAGE(va)	mips_pmap_unmap_poolpage(va)
+struct vm_page *mips_pmap_alloc_poolpage(int);
+#define	PMAP_ALLOC_POOLPAGE(flags)	mips_pmap_alloc_poolpage(flags)
+#define	PMAP_MAP_POOLPAGE(pa)		mips_pmap_map_poolpage(pa)
+#define	PMAP_UNMAP_POOLPAGE(va)		mips_pmap_unmap_poolpage(va)
 
 /*
  * Other hooks for the pool allocator.

Index: src/sys/arch/mips/include/proc.h
diff -u src/sys/arch/mips/include/proc.h:1.21.36.3 src/sys/arch/mips/include/proc.h:1.21.36.4
--- src/sys/arch/mips/include/proc.h:1.21.36.3	Sat Sep  5 03:16:23 2009
+++ src/sys/arch/mips/include/proc.h	Wed Dec 30 04:51:26 2009
@@ -1,4 +1,4 @@
-/*	$NetBSD: proc.h,v 1.21.36.3 2009/09/05 03:16:23 matt Exp $	*/
+/*	$NetBSD: proc.h,v 1.21.36.4 2009/12/30 04:51:26 matt Exp $	*/
 
 /*
  * Copyright (c) 1992, 1993
@@ -76,6 +76,8 @@
 #ifdef _KERNEL
 /* kernel single-step emulation */
 int mips_singlestep(struct lwp *l);
+
+#define	LWP0_CPU_INFO	&cpu_info_store	/* staticly set in lwp0 */
 #endif /* _KERNEL */
 
 #endif /* _MIPS_PROC_H_ */

Index: src/sys/arch/mips/include/pte.h
diff -u src/sys/arch/mips/include/pte.h:1.19 src/sys/arch/mips/include/pte.h:1.19.18.1
--- src/sys/arch/mips/include/pte.h:1.19	Mon Apr 28 20:23:28 2008
+++ src/sys/arch/mips/include/pte.h	Wed Dec 30 04:51:26 2009
@@ -1,4 +1,4 @@
-/*	$NetBSD: pte.h,v 1.19 2008/04/28 20:23:28 martin Exp $	*/
+/*	$NetBSD: pte.h,v 1.19.18.1 2009/12/30 04:51:26 matt Exp $	*/
 
 /*-
  * Copyright (c) 1997 The NetBSD Foundation, Inc.
@@ -67,7 +67,7 @@
 #define	mips_pg_nv_bit()	(MIPS1_PG_NV)	/* same on mips1 and mips3 */
 
 
-int pmap_is_page_ro(pmap_t, vaddr_t, int);
+int pmap_is_page_ro(struct pmap *pmap, vaddr_t, int);
 
 
 /* MIPS1-only */

Index: src/sys/arch/mips/include/vmparam.h
diff -u src/sys/arch/mips/include/vmparam.h:1.41.28.7 src/sys/arch/mips/include/vmparam.h:1.41.28.8
--- src/sys/arch/mips/include/vmparam.h:1.41.28.7	Fri Dec 11 20:22:15 2009
+++ src/sys/arch/mips/include/vmparam.h	Wed Dec 30 04:51:26 2009
@@ -1,4 +1,4 @@
-/*	$NetBSD: vmparam.h,v 1.41.28.7 2009/12/11 20:22:15 matt Exp $	*/
+/*	$NetBSD: vmparam.h,v 1.41.28.8 2009/12/30 04:51:26 matt Exp $	*/
 
 /*
  * Copyright (c) 1992, 1993
@@ -222,10 +222,7 @@
 #ifndef VM_NFREELIST
 #define	VM_NFREELIST		16	/* 16 distinct memory segments */
 #define VM_FREELIST_DEFAULT	0
-#define VM_FREELIST_FIRST16M	1	/* ISA DMA range */
-#define VM_FREELIST_FIRST512M	2	/* KSEG0/1 */
-#define VM_FREELIST_FIRST4G	3	/* 32bit addrs */
-#define VM_FREELIST_MAX		4
+#define VM_FREELIST_MAX		1
 #endif
 
 #endif /* ! _MIPS_VMPARAM_H_ */

Index: src/sys/arch/mips/mips/locore.S
diff -u src/sys/arch/mips/mips/locore.S:1.167.38.3 src/sys/arch/mips/mips/locore.S:1.167.38.4
--- src/sys/arch/mips/mips/locore.S:1.167.38.3	Fri Sep 11 23:51:25 2009
+++ src/sys/arch/mips/mips/locore.S	Wed Dec 30 04:51:26 2009
@@ -1,4 +1,4 @@
-/*	$NetBSD: locore.S,v 1.167.38.3 2009/09/11 23:51:25 matt Exp $	*/
+/*	$NetBSD: locore.S,v 1.167.38.4 2009/12/30 04:51:26 matt Exp $	*/
 
 /*
  * Copyright (c) 1992, 1993
@@ -138,10 +138,7 @@
 #endif
 	INT_S	t0, _C_LABEL(cpu_id)		# save PRID register
 	INT_S	t1, _C_LABEL(fpu_id)		# save FPU ID register
-	PTR_LA	MIPS_CURLWP, _C_LABEL(lwp0)	# set curlwp, curcpu
-	PTR_LA	t0, _C_LABEL(cpu_info_store)
-	PTR_S	MIPS_CURLWP, CPU_INFO_CURLWP(t0)
-	PTR_S	t0, L_CPU(MIPS_CURLWP) 
+	PTR_LA	MIPS_CURLWP, _C_LABEL(lwp0)	# set curlwp
 	jal	_C_LABEL(mach_init)		# mach_init(a0, a1, a2, a3)
 	nop
 

Index: src/sys/arch/mips/mips/mips_machdep.c
diff -u src/sys/arch/mips/mips/mips_machdep.c:1.205.4.1.2.1.2.17 src/sys/arch/mips/mips/mips_machdep.c:1.205.4.1.2.1.2.18
--- src/sys/arch/mips/mips/mips_machdep.c:1.205.4.1.2.1.2.17	Fri Nov 13 05:25:49 2009
+++ src/sys/arch/mips/mips/mips_machdep.c	Wed Dec 30 04:51:26 2009
@@ -1,4 +1,4 @@
-/*	$NetBSD: mips_machdep.c,v 1.205.4.1.2.1.2.17 2009/11/13 05:25:49 cliff Exp $	*/
+/*	$NetBSD: mips_machdep.c,v 1.205.4.1.2.1.2.18 2009/12/30 04:51:26 matt Exp $	*/
 
 /*
  * Copyright 2002 Wasabi Systems, Inc.
@@ -112,7 +112,7 @@
 
 #include <sys/cdefs.h>			/* RCS ID & Copyright macro defns */
 
-__KERNEL_RCSID(0, "$NetBSD: mips_machdep.c,v 1.205.4.1.2.1.2.17 2009/11/13 05:25:49 cliff Exp $");
+__KERNEL_RCSID(0, "$NetBSD: mips_machdep.c,v 1.205.4.1.2.1.2.18 2009/12/30 04:51:26 matt Exp $");
 
 #include "opt_cputype.h"
 #include "opt_compat_netbsd32.h"
@@ -226,6 +226,11 @@
 uint32_t mips3_tlb_pg_mask;
 #endif
 
+struct	cpu_info cpu_info_store = {
+	.ci_curlwp = &lwp0,
+	.ci_fpcurlwp = &lwp0,
+};
+
 struct	user *proc0paddr;
 struct	segtab *segbase = (void *)(MIPS_KSEG2_START + 0x1eadbeef);
 
@@ -832,6 +837,7 @@
 {
 	const struct pridtab *ct;
 
+#if 0
 	/*
 	 * XXX Set-up curlwp/curcpu again.  They may have been clobbered
 	 * beween verylocore and here.
@@ -839,7 +845,10 @@
 	lwp0.l_cpu = &cpu_info_store;
 	cpu_info_store.ci_curlwp = &lwp0;
 	cpu_info_store.ci_fpcurlwp = &lwp0;
-	curlwp = &lwp0;
+#endif
+#if 0
+	curlwp = &lwp0;		/* handled in locore.S */
+#endif
 
 	mycpu = NULL;
 	for (ct = cputab; ct->cpu_name != NULL; ct++) {
@@ -1623,6 +1632,173 @@
 }
 
 void
+mips_init_lwp0_uarea(void)
+{
+	vaddr_t v = uvm_pageboot_alloc(USPACE);
+	lwp0.l_addr = proc0paddr = (struct user *)v;
+	lwp0.l_md.md_regs = (struct frame *)(v + USPACE) - 1;
+#ifdef _LP64
+	lwp0.l_md.md_regs->f_regs[_R_SR] = MIPS_SR_KX;
+#endif
+	lwp0.l_addr->u_pcb.pcb_context.val[_L_SR] =
+#ifdef _LP64
+	    MIPS_SR_KX |
+#endif
+	    MIPS_INT_MASK | MIPS_SR_INT_IE; /* SR */
+}
+
+int mips_poolpage_vmfreelist = VM_FREELIST_DEFAULT;
+
+#define	HALFGIG		((paddr_t)512 * 1024 * 1024)
+#define	FOURGIG		((paddr_t)4 * 1024 * 1024 * 1024)
+
+void
+mips_page_physload(vaddr_t vkernstart, vaddr_t vkernend,
+	const phys_ram_seg_t *segs, size_t nseg,
+	const struct mips_vmfreelist *flp, size_t nfl)
+{
+	const paddr_t kernstart = MIPS_KSEG0_TO_PHYS(vkernstart);
+	const paddr_t kernend = MIPS_KSEG0_TO_PHYS(vkernend);
+#if defined(VM_FREELIST_FIRST4G) || defined(VM_FREELIST_FIRST512M)
+#ifdef VM_FREELIST_FIRST512M
+	bool need512m = false;
+#endif
+#ifdef VM_FREELIST_FIRST4G
+	bool need4g = false;
+#endif
+
+	/*
+	 * Do a first pass and see what ranges memory we have to deal with.
+	 */
+	for (size_t i = 0; i < nseg; i++) {
+#ifdef VM_FREELIST_FIRST4G
+		if (segs[i].last > FOURGIG)
+			need4g = true;
+#endif
+#ifdef VM_FREELIST_FIRST512M
+		if (segs[i].last > HALFGIG) {
+			need512m = true;
+			mips_poolpage_vmfreelist = VM_FREELIST_FIRST512M;
+		}
+#endif
+	}
+#endif /* VM_FREELIST_FIRST512M || VM_FREELIST_FIRST4G */
+
+	while (nseg-- > 0) {
+		/*
+		 * Copy this segment since we may have to deal with it
+		 * piecemeal.
+		 */
+		phys_ram_seg_t tmp = *segs++;
+		printf("phys segment: %#"PRIxPADDR"@%#"PRIxPADDR"\n", tmp.size, tmp.start);
+		while (tmp.size > 0) {
+			int freelist = -1;	/* unknown freelist */
+			psize_t segsize = tmp.size;
+			for (size_t i = 0; i < nfl; i++) {
+				/*
+				 * If this segment doesn't overlap the freelist
+				 * at all, skip it.
+				 */ 
+				if (tmp.start >= flp[i].fl_end
+				    || tmp.start + tmp.size <= flp[i].fl_start)
+					continue;
+				/*
+				 * If the start of this segment starts before
+				 * the start of the freelist, then limit the
+				 * segment to loaded to the part that doesn't
+				 * match this freelist and fall back to normal
+				 * freelist matching.
+				 */
+				if (tmp.start < flp[i].fl_start) {
+					segsize = flp[i].fl_start - tmp.start;
+					break;
+				}
+
+				/*
+				 * We've matched this freelist so remember it.
+				 */
+				freelist = flp->fl_freelist;
+
+				/*
+				 * If this segment extends past the end of this
+				 * freelist, bound to segment to the freelist.
+				 */
+				if (tmp.start + tmp.size > flp[i].fl_end)
+					segsize = flp[i].fl_end - tmp.start;
+				break;
+			}
+			/*
+			 * If we didn't match one of the port dependent
+			 * freelists, let's try the common ones.
+			 */
+			if (freelist == -1) {
+#ifdef VM_FREELIST_FIRST512M
+				if (need512m && tmp.start < HALFGIG) {
+					freelist = VM_FREELIST_FIRST512M;
+					if (tmp.start + tmp.size > HALFGIG)
+						segsize = HALFGIG - tmp.start;
+				} else
+#endif
+#ifdef VM_FREELIST_FIRST4G
+				if (need4g && tmp.start < FOURGIG) {
+					freelist = VM_FREELIST_FIRST4G;
+					if (tmp.start + tmp.size > FOURGIG)
+						segsize = FOURGIG - tmp.start;
+				} else
+#endif
+					freelist = VM_FREELIST_DEFAULT;
+			}
+
+			/*
+			 * Make sure the memory we provide to uvm doesn't
+			 * include the kernel.
+			 */
+			if (tmp.start < kernend
+			    && tmp.start + segsize > kernstart) {
+				if (tmp.start < kernstart) {
+					/*
+					 * Only add the memory before the
+					 * kernel.
+					 */
+					segsize -= kernstart - tmp.start;
+				} else if (tmp.start + segsize > kernend) {
+					/*
+					 * Only add the memory after the
+					 * kernel.
+					 */
+					segsize -= (kernend - tmp.start);
+					tmp.size -= (kernend - tmp.start);
+					tmp.start = kernend;
+				} else {
+					/*
+					 * Just skip the segment entirely since
+					 * it's inside the kernel.
+					 */
+					tmp.start += segsize;
+					tmp.size -= segsize;
+					continue;
+				}
+			}
+			
+			/*
+			 * Now we give this segment to uvm.
+			 */
+			paddr_t first = atop(tmp.start);
+			paddr_t last = first + atop(segsize);
+			printf("adding %#"PRIxPADDR"@%#"PRIxPADDR" to freelist %d\n",
+				tmp.start, tmp.start + segsize, freelist);
+			uvm_page_physload(first, last, first, last, freelist);
+
+			/*
+			 * Remove from tmp the segment we just loaded.
+			 */
+			tmp.start += segsize;
+			tmp.size -= segsize;
+		}
+	}
+}
+
+void
 savefpregs(struct lwp *l)
 {
 #ifndef NOFPU

Index: src/sys/arch/mips/mips/pmap.c
diff -u src/sys/arch/mips/mips/pmap.c:1.179.16.6 src/sys/arch/mips/mips/pmap.c:1.179.16.7
--- src/sys/arch/mips/mips/pmap.c:1.179.16.6	Sat Dec 19 06:58:30 2009
+++ src/sys/arch/mips/mips/pmap.c	Wed Dec 30 04:51:26 2009
@@ -1,4 +1,4 @@
-/*	$NetBSD: pmap.c,v 1.179.16.6 2009/12/19 06:58:30 matt Exp $	*/
+/*	$NetBSD: pmap.c,v 1.179.16.7 2009/12/30 04:51:26 matt Exp $	*/
 
 /*-
  * Copyright (c) 1998, 2001 The NetBSD Foundation, Inc.
@@ -67,7 +67,7 @@
 
 #include <sys/cdefs.h>
 
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.179.16.6 2009/12/19 06:58:30 matt Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.179.16.7 2009/12/30 04:51:26 matt Exp $");
 
 /*
  *	Manages physical address maps.
@@ -150,7 +150,6 @@
 CTASSERT(MIPS_KSEG0_P(MIPS_PHYS_TO_KSEG0(0)));
 CTASSERT(MIPS_KSEG1_P(MIPS_PHYS_TO_KSEG1(0)));
 
-CTASSERT(NBPG >= sizeof(struct segtab));
 #ifdef DEBUG
 struct {
 	int kernel;	/* entering kernel mapping */
@@ -203,13 +202,12 @@
 struct pv_entry	*pv_table;
 int		 pv_table_npages;
 
-struct segtab	*free_segtab;		/* free list kept locally */
 pt_entry_t	*Sysmap;		/* kernel pte table */
 unsigned int	Sysmapsize;		/* number of pte's in Sysmap */
 
-unsigned pmap_max_asid;			/* max ASID supported by the system */
-unsigned pmap_next_asid;		/* next free ASID to use */
-unsigned pmap_asid_generation;		/* current ASID generation */
+unsigned int	pmap_asid_max;		/* max ASID supported by the system */
+unsigned int	pmap_asid_next;		/* next free ASID to use */
+unsigned int	pmap_asid_generation;	/* current ASID generation */
 #define PMAP_ASID_RESERVED 0
 
 /*
@@ -379,8 +377,8 @@
 	pmap_kernel()->pm_asidgen = 0;
 	pmap_kernel()->pm_segtab = (void *)(MIPS_KSEG2_START + 0x1eadbeef);
 
-	pmap_max_asid = MIPS_TLB_NUM_PIDS;
-	pmap_next_asid = 1;
+	pmap_asid_max = MIPS_TLB_NUM_PIDS;
+	pmap_asid_next = 1;
 	pmap_asid_generation = 0;
 
 	MachSetPID(0);
@@ -584,7 +582,6 @@
 pmap_create(void)
 {
 	pmap_t pmap;
-	int i;
 
 #ifdef DEBUG
 	if (pmapdebug & (PDB_FOLLOW|PDB_CREATE))
@@ -595,50 +592,11 @@
 	memset(pmap, 0, sizeof(*pmap));
 
 	pmap->pm_count = 1;
-	if (free_segtab) {
-		pmap->pm_segtab = free_segtab;
-		free_segtab = *(struct segtab **)free_segtab;
-		pmap->pm_segtab->seg_tab[0] = NULL;
-	} else {
-		struct segtab *stp;
-		struct vm_page *stp_pg;
-		paddr_t stp_pa;
-
-		for (;;) {
-			stp_pg = uvm_pagealloc(NULL, 0, NULL,
-			    UVM_PGA_USERESERVE|UVM_PGA_ZERO);
-			if (stp_pg != NULL)
-				break;
-			/*
-			 * XXX What else can we do?  Could we
-			 * XXX deadlock here?
-			 */
-			uvm_wait("pmap_create");
-		}
-
-		stp_pa = VM_PAGE_TO_PHYS(stp_pg);
-#ifdef _LP64
-		KASSERT(mips3_xkphys_cached);
-		stp = (struct segtab *)MIPS_PHYS_TO_XKPHYS_CACHED(stp_pa);
-#else
-		stp = (struct segtab *)MIPS_PHYS_TO_KSEG0(stp_pa);
-#endif
-		pmap->pm_segtab = stp;
-		i = NBPG / sizeof(struct segtab);
-		while (--i != 0) {
-			stp++;
-			*(struct segtab **)stp = free_segtab;
-			free_segtab = stp;
-		}
-	}
-#ifdef PARANOIADIAG
-	for (i = 0; i < PMAP_SEGTABSIZE; i++)
-		if (pmap->pm_segtab->seg_tab[i] != 0)
-			panic("pmap_create: pm_segtab != 0");
-#endif
 	pmap->pm_asid = PMAP_ASID_RESERVED;
 	pmap->pm_asidgen = pmap_asid_generation;
 
+	pmap_segtab_alloc(pmap);
+
 	return pmap;
 }
 
@@ -660,52 +618,7 @@
 	if (count > 0)
 		return;
 
-	if (pmap->pm_segtab) {
-		pt_entry_t *pte;
-		int i;
-#ifdef PARANOIADIAG
-		int j;
-#endif
-
-		for (i = 0; i < PMAP_SEGTABSIZE; i++) {
-			paddr_t pa;
-			/* get pointer to segment map */
-			pte = pmap->pm_segtab->seg_tab[i];
-			if (!pte)
-				continue;
-#ifdef PARANOIADIAG
-			for (j = 0; j < NPTEPG; j++) {
-				if ((pte + j)->pt_entry)
-					panic("pmap_destroy: segmap not empty");
-			}
-#endif
-
-#ifdef MIPS3_PLUS	/* XXX mmu XXX */
-			/*
-			 * The pica pmap.c flushed the segmap pages here.  I'm
-			 * not sure why, but I suspect it's because the page(s)
-			 * were being accessed by KSEG0 (cached) addresses and
-			 * may cause cache coherency problems when the page
-			 * is reused with KSEG2 (mapped) addresses.  This may
-			 * cause problems on machines without VCED/VCEI.
-			 */
-			if (mips_cache_virtual_alias)
-				mips_dcache_inv_range((vaddr_t)pte, PAGE_SIZE);
-#endif	/* MIPS3_PLUS */
-#ifdef _LP64
-			KASSERT(MIPS_XKPHYS_P(pte));
-			pa = MIPS_XKPHYS_TO_PHYS(pte);
-#else
-			pa = MIPS_KSEG0_TO_PHYS(pte);
-#endif
-			uvm_pagefree(PHYS_TO_VM_PAGE(pa));
-
-			pmap->pm_segtab->seg_tab[i] = NULL;
-		}
-		*(struct segtab **)pmap->pm_segtab = free_segtab;
-		free_segtab = pmap->pm_segtab;
-		pmap->pm_segtab = NULL;
-	}
+	pmap_segtab_free(pmap);
 
 	pool_put(&pmap_pmap_pool, pmap);
 }
@@ -736,7 +649,7 @@
 
 	pmap_asid_alloc(pmap);
 	if (l == curlwp) {
-		segbase = pmap->pm_segtab;
+		pmap_segtab_activate(l);
 		MachSetPID(pmap->pm_asid);
 	}
 }
@@ -757,14 +670,50 @@
  *	It is assumed that the start and end are properly
  *	rounded to the page size.
  */
+
+static bool
+pmap_pte_remove(pmap_t pmap, vaddr_t sva, vaddr_t eva, pt_entry_t *pte,
+	uintptr_t flags)
+{
+	const uint32_t asid = pmap->pm_asid << MIPS_TLB_PID_SHIFT;
+	const bool needflush = (pmap->pm_asidgen == pmap_asid_generation);
+
+#ifdef DEBUG
+	if (pmapdebug & (PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT)) {
+		printf("%s: %p, %"PRIxVADDR", %"PRIxVADDR", %p, %"PRIxPTR"\n",
+		   __func__, pmap, sva, eva, pte, flags);
+	}
+#endif
+
+	for (; sva < eva; sva += NBPG, pte++) {
+		struct vm_page *pg;
+		uint32_t entry = pte->pt_entry;
+		if (!mips_pg_v(entry))
+			continue;
+		if (mips_pg_wired(entry))
+			pmap->pm_stats.wired_count--;
+		pmap->pm_stats.resident_count--;
+		pg = PHYS_TO_VM_PAGE(mips_tlbpfn_to_paddr(entry));
+		if (pg)
+			pmap_remove_pv(pmap, sva, pg);
+		pte->pt_entry = mips_pg_nv_bit();
+		/*
+		 * Flush the TLB for the given address.
+		 */
+		if (needflush) {
+			MIPS_TBIS(sva | asid);
+#ifdef DEBUG
+			remove_stats.flushes++;
+#endif
+		}
+	}
+	return false;
+}
+
 void
 pmap_remove(pmap_t pmap, vaddr_t sva, vaddr_t eva)
 {
 	struct vm_page *pg;
-	vaddr_t nssva;
-	pt_entry_t *pte;
-	unsigned entry;
-	unsigned asid, needflush;
 
 #ifdef DEBUG
 	if (pmapdebug & (PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT))
@@ -777,9 +726,9 @@
 		if (sva < VM_MIN_KERNEL_ADDRESS || eva >= virtual_end)
 			panic("pmap_remove: kva not in range");
 #endif
-		pte = kvtopte(sva);
+		pt_entry_t *pte = kvtopte(sva);
 		for (; sva < eva; sva += NBPG, pte++) {
-			entry = pte->pt_entry;
+			uint32_t entry = pte->pt_entry;
 			if (!mips_pg_v(entry))
 				continue;
 			if (mips_pg_wired(entry))
@@ -800,7 +749,6 @@
 			MIPS_TBIS(sva);
 #ifdef DEBUG
 			remove_stats.flushes++;
-
 #endif
 		}
 		return;
@@ -820,46 +768,7 @@
 		}
 	}
 #endif
-	asid = pmap->pm_asid << MIPS_TLB_PID_SHIFT;
-	needflush = (pmap->pm_asidgen == pmap_asid_generation);
-	while (sva < eva) {
-		nssva = mips_trunc_seg(sva) + NBSEG;
-		if (nssva == 0 || nssva > eva)
-			nssva = eva;
-		/*
-		 * If VA belongs to an unallocated segment,
-		 * skip to the next segment boundary.
-		 */
-		if (!(pte = pmap_segmap(pmap, sva))) {
-			sva = nssva;
-			continue;
-		}
-		/*
-		 * Invalidate every valid mapping within this segment.
-		 */
-		pte += (sva >> PGSHIFT) & (NPTEPG - 1);
-		for (; sva < nssva; sva += NBPG, pte++) {
-			entry = pte->pt_entry;
-			if (!mips_pg_v(entry))
-				continue;
-			if (mips_pg_wired(entry))
-				pmap->pm_stats.wired_count--;
-			pmap->pm_stats.resident_count--;
-			pg = PHYS_TO_VM_PAGE(mips_tlbpfn_to_paddr(entry));
-			if (pg)
-				pmap_remove_pv(pmap, sva, pg);
-			pte->pt_entry = mips_pg_nv_bit();
-			/*
-			 * Flush the TLB for the given address.
-			 */
-			if (needflush) {
-				MIPS_TBIS(sva | asid);
-#ifdef DEBUG
-				remove_stats.flushes++;
-#endif
-			}
-		}
-	}
+	pmap_pte_process(pmap, sva, eva, pmap_pte_remove, 0);
 }
 
 /*
@@ -912,6 +821,34 @@
 	}
 }
 
+static bool
+pmap_pte_protect(pmap_t pmap, vaddr_t sva, vaddr_t eva, pt_entry_t *pte,
+	uintptr_t flags)
+{
+	const uint32_t asid = pmap->pm_asid << MIPS_TLB_PID_SHIFT;
+	const bool needupdate = (pmap->pm_asidgen == pmap_asid_generation);
+	const uint32_t p = flags;
+
+	/*
+	 * Change protection on every valid mapping within this segment.
+	 */
+	for (; sva < eva; sva += NBPG, pte++) {
+		uint32_t entry = pte->pt_entry;
+		if (!mips_pg_v(entry))
+			continue;
+		if (MIPS_HAS_R4K_MMU && entry & mips_pg_m_bit())
+			mips_dcache_wbinv_range_index(sva, PAGE_SIZE);
+		entry = (entry & ~(mips_pg_m_bit() | mips_pg_ro_bit())) | p;
+		pte->pt_entry = entry;
+		/*
+		 * Update the TLB if the given address is in the cache.
+		 */
+		if (needupdate)
+			MachTLBUpdate(sva | asid, entry);
+	}
+	return false;
+}
+
 /*
  *	Set the physical protection on the
  *	specified range of this map as requested.
@@ -919,11 +856,8 @@
 void
 pmap_protect(pmap_t pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
 {
-	vaddr_t nssva;
 	pt_entry_t *pte;
-	unsigned entry;
 	u_int p;
-	unsigned asid, needupdate;
 
 #ifdef DEBUG
 	if (pmapdebug & (PDB_FOLLOW|PDB_PROTECT))
@@ -952,7 +886,7 @@
 #endif
 		pte = kvtopte(sva);
 		for (; sva < eva; sva += NBPG, pte++) {
-			entry = pte->pt_entry;
+			uint32_t entry = pte->pt_entry;
 			if (!mips_pg_v(entry))
 				continue;
 			if (MIPS_HAS_R4K_MMU && entry & mips_pg_m_bit())
@@ -979,40 +913,11 @@
 		}
 	}
 #endif
-	asid = pmap->pm_asid << MIPS_TLB_PID_SHIFT;
-	needupdate = (pmap->pm_asidgen == pmap_asid_generation);
-	while (sva < eva) {
-		nssva = mips_trunc_seg(sva) + NBSEG;
-		if (nssva == 0 || nssva > eva)
-			nssva = eva;
-		/*
-		 * If VA belongs to an unallocated segment,
-		 * skip to the next segment boundary.
-		 */
-		if (!(pte = pmap_segmap(pmap, sva))) {
-			sva = nssva;
-			continue;
-		}
-		/*
-		 * Change protection on every valid mapping within this segment.
-		 */
-		pte += (sva >> PGSHIFT) & (NPTEPG - 1);
-		for (; sva < nssva; sva += NBPG, pte++) {
-			entry = pte->pt_entry;
-			if (!mips_pg_v(entry))
-				continue;
-			if (MIPS_HAS_R4K_MMU && entry & mips_pg_m_bit())
-				mips_dcache_wbinv_range_index(sva, PAGE_SIZE);
-			entry = (entry & ~(mips_pg_m_bit() |
-			    mips_pg_ro_bit())) | p;
-			pte->pt_entry = entry;
-			/*
-			 * Update the TLB if the given address is in the cache.
-			 */
-			if (needupdate)
-				MachTLBUpdate(sva | asid, entry);
-		}
-	}
+
+	/*
+	 * Change protection on every valid mapping within this segment.
+	 */
+	pmap_pte_process(pmap, sva, eva, pmap_pte_protect, p);
 }
 
 /*
@@ -1048,10 +953,7 @@
 		if (pmap == pmap_kernel()) {
 			pte = kvtopte(va);
 		} else {
-			if (!(pte = pmap_segmap(pmap, va))) {
-				return;
-			}
-			pte += (va >> PGSHIFT) & (NPTEPG - 1);
+			pte = pmap_pte_lookup(pmap, va);
 		}
 		entry = pte->pt_entry;
 		if (!mips_pg_v(entry))
@@ -1118,10 +1020,9 @@
 			}
 		} else {
 
-			pte = pmap_segmap(pv->pv_pmap, pv->pv_va);
+			pte = pmap_pte_lookup(pv->pv_pmap, pv->pv_va);
 			if (pte == NULL)
 				continue;
-			pte += (pv->pv_va >> PGSHIFT) & (NPTEPG - 1);
 			entry = pte->pt_entry;
 			if (entry & MIPS3_PG_V) {
 				entry = (entry & ~MIPS3_PG_CACHEMODE) | newmode;
@@ -1152,7 +1053,7 @@
 {
 	pt_entry_t *pte;
 	u_int npte;
-	struct vm_page *pg, *mem;
+	struct vm_page *pg;
 	unsigned asid;
 #if defined(_MIPS_PADDR_T_64BIT) || defined(_LP64)
 	int cached = 1;
@@ -1308,41 +1209,15 @@
 		return 0;
 	}
 
-	if (!(pte = pmap_segmap(pmap, va))) {
-		paddr_t phys;
-		mem = uvm_pagealloc(NULL, 0, NULL,
-				    UVM_PGA_USERESERVE|UVM_PGA_ZERO);
-		if (mem == NULL) {
-			if (flags & PMAP_CANFAIL)
-				return ENOMEM;
-			panic("pmap_enter: cannot allocate segmap");
-		}
-
-		phys = VM_PAGE_TO_PHYS(mem);
-#ifdef _LP64
-		KASSERT(mips3_xkphys_cached);
-		pte = (pt_entry_t *)MIPS_PHYS_TO_XKPHYS_CACHED(phys);
-#else
-		pte = (pt_entry_t *)MIPS_PHYS_TO_KSEG0(phys);
-#endif
-		pmap_segmap(pmap, va) = pte;
-#ifdef PARANOIADIAG
-	    {
-		int i;
-		for (i = 0; i < NPTEPG; i++) {
-			if ((pte+i)->pt_entry)
-				panic("pmap_enter: new segmap not empty");
-		}
-	    }
-#endif
+	pte = pmap_pte_reserve(pmap, va, flags);
+	if (__predict_false(pte == NULL)) {
+		return ENOMEM;
 	}
 
 	/* Done after case that may sleep/return. */
 	if (pg)
 		pmap_enter_pv(pmap, va, pg, &npte);
 
-	pte += (va >> PGSHIFT) & (NPTEPG - 1);
-
 	/*
 	 * Now validate mapping with desired protection/wiring.
 	 * Assume uniform modified and referenced status for all
@@ -1514,13 +1389,12 @@
 #endif
 		pte = kvtopte(va);
 	} else {
-		pte = pmap_segmap(pmap, va);
+		pte = pmap_pte_lookup(pmap, va);
 #ifdef DIAGNOSTIC
 		if (pte == NULL)
 			panic("pmap_unwire: pmap %p va %#"PRIxVADDR" invalid STE",
 			    pmap, va);
 #endif
-		pte += (va >> PGSHIFT) & (NPTEPG - 1);
 	}
 
 #ifdef DIAGNOSTIC
@@ -1575,14 +1449,13 @@
 		else
 			pte = kvtopte(va);
 	} else {
-		if (!(pte = pmap_segmap(pmap, va))) {
+		if (!(pte = pmap_pte_lookup(pmap, va))) {
 #ifdef DEBUG
 			if (pmapdebug & PDB_FOLLOW)
 				printf("not in segmap\n");
 #endif
 			return false;
 		}
-		pte += (va >> PGSHIFT) & (NPTEPG - 1);
 	}
 	if (!mips_pg_v(pte->pt_entry)) {
 #ifdef DEBUG
@@ -1844,9 +1717,8 @@
 			pte = kvtopte(va);
 			asid = 0;
 		} else {
-			pte = pmap_segmap(pmap, va);
+			pte = pmap_pte_lookup(pmap, va);
 			KASSERT(pte);
-			pte += ((va >> PGSHIFT) & (NPTEPG - 1));
 			asid = pmap->pm_asid << MIPS_TLB_PID_SHIFT;
 		}
 		if ((pte->pt_entry & mips_pg_m_bit()) == 0) {
@@ -1910,12 +1782,12 @@
 
 	if (pmap->pm_asid == PMAP_ASID_RESERVED ||
 	    pmap->pm_asidgen != pmap_asid_generation) {
-		if (pmap_next_asid == pmap_max_asid) {
+		if (pmap_asid_next == pmap_asid_max) {
 			MIPS_TBIAP();
 			pmap_asid_generation++; /* ok to wrap to 0 */
-			pmap_next_asid = 1;	/* 0 means invalid */
+			pmap_asid_next = 1;	/* 0 means invalid */
 		}
-		pmap->pm_asid = pmap_next_asid++;
+		pmap->pm_asid = pmap_asid_next++;
 		pmap->pm_asidgen = pmap_asid_generation;
 	}
 
@@ -2044,10 +1916,8 @@
 				if (pmap == pmap_kernel())
 					entry = kvtopte(va)->pt_entry;
 				else {
-					pte = pmap_segmap(pmap, va);
+					pte = pmap_pte_lookup(pmap, va);
 					if (pte) {
-						pte += (va >> PGSHIFT) &
-						    (NPTEPG - 1);
 						entry = pte->pt_entry;
 					} else
 						entry = 0;
@@ -2120,7 +1990,6 @@
 			 * Copy current modified and referenced status to
 			 * the following entry before copying.
 			 */
-
 			npv->pv_flags |=
 			    pv->pv_flags & (PV_MODIFIED | PV_REFERENCED);
 			*pv = *npv;
@@ -2176,28 +2045,20 @@
 void *
 pmap_pv_page_alloc(struct pool *pp, int flags)
 {
-	struct vm_page *pg;
-	paddr_t phys;
-#if defined(MIPS3_PLUS)
-	pv_entry_t pv;
-#endif
-	vaddr_t va;
-
-	pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE);
+	const struct vm_page *pg = PMAP_ALLOC_POOLPAGE(UVM_PGA_USERESERVE);
 	if (pg == NULL)
 		return NULL;
 
-	phys = VM_PAGE_TO_PHYS(pg);
+	const paddr_t pa = VM_PAGE_TO_PHYS(pg);
 #ifdef _LP64
 	KASSERT(mips3_xkphys_cached);
-	va = MIPS_PHYS_TO_XKPHYS_CACHED(phys);
+	const vaddr_t va = MIPS_PHYS_TO_XKPHYS_CACHED(pa);
 #else
-	va = MIPS_PHYS_TO_KSEG0(phys);
+	const vaddr_t va = MIPS_PHYS_TO_KSEG0(pa);
 #endif
 #if defined(MIPS3_PLUS)
 	if (mips_cache_virtual_alias) {
-		pg = PHYS_TO_VM_PAGE(phys);
-		pv = pg->mdpage.pvh_list;
+		pv_entry_t pv = pg->mdpage.pvh_list;
 		if ((pv->pv_flags & PV_UNCACHED) == 0 &&
 		    mips_cache_indexof(pv->pv_va) != mips_cache_indexof(va))
 			mips_dcache_wbinv_range_index(pv->pv_va, PAGE_SIZE);
@@ -2232,12 +2093,12 @@
 pt_entry_t *
 pmap_pte(pmap_t pmap, vaddr_t va)
 {
-	pt_entry_t *pte = NULL;
+	pt_entry_t *pte;
 
 	if (pmap == pmap_kernel())
 		pte = kvtopte(va);
-	else if ((pte = pmap_segmap(pmap, va)) != NULL)
-		pte += (va >> PGSHIFT) & (NPTEPG - 1);
+	else
+		pte = pmap_pte_lookup(pmap, va);
 	return pte;
 }
 
@@ -2264,6 +2125,22 @@
 }
 #endif	/* MIPS3_PLUS */
 
+struct vm_page *
+mips_pmap_alloc_poolpage(int flags)
+{
+#ifndef _LP64
+	/*
+	 * On 32bit kernels, we must make sure that we only allocate pages that
+	 * can be mapped via KSEG0.  If all memory is in KSEG0, then we can just
+	 * use the default freelist otherwise we must use the pool page list.
+	 */
+	if (mips_poolpage_vmfreelist != VM_FREELIST_DEFAULT)
+		return uvm_pagealloc_strat(NULL, 0, NULL, flags,
+		    UVM_PGA_STRAT_ONLY, mips_poolpage_vmfreelist);
+#endif
+	return uvm_pagealloc(NULL, 0, NULL, flags);
+}
+
 vaddr_t
 mips_pmap_map_poolpage(paddr_t pa)
 {
@@ -2277,11 +2154,11 @@
 	KASSERT(mips3_xkphys_cached);
 	va = MIPS_PHYS_TO_XKPHYS_CACHED(pa);
 #else
-	if (pa <= MIPS_PHYS_MASK)
-		va = MIPS_PHYS_TO_KSEG0(pa);
-	else
+	if (pa > MIPS_PHYS_MASK)
 		panic("mips_pmap_map_poolpage: "
 		    "pa #%"PRIxPADDR" can not be mapped into KSEG0", pa);
+
+	va = MIPS_PHYS_TO_KSEG0(pa);
 #endif
 #if defined(MIPS3_PLUS)
 	if (mips_cache_virtual_alias) {

Index: src/sys/arch/mips/mips/trap.c
diff -u src/sys/arch/mips/mips/trap.c:1.217.12.11 src/sys/arch/mips/mips/trap.c:1.217.12.12
--- src/sys/arch/mips/mips/trap.c:1.217.12.11	Sat Nov 14 21:52:08 2009
+++ src/sys/arch/mips/mips/trap.c	Wed Dec 30 04:51:26 2009
@@ -1,4 +1,4 @@
-/*	$NetBSD: trap.c,v 1.217.12.11 2009/11/14 21:52:08 matt Exp $	*/
+/*	$NetBSD: trap.c,v 1.217.12.12 2009/12/30 04:51:26 matt Exp $	*/
 
 /*
  * Copyright (c) 1992, 1993
@@ -78,7 +78,7 @@
 
 #include <sys/cdefs.h>			/* RCS ID & Copyright macro defns */
 
-__KERNEL_RCSID(0, "$NetBSD: trap.c,v 1.217.12.11 2009/11/14 21:52:08 matt Exp $");
+__KERNEL_RCSID(0, "$NetBSD: trap.c,v 1.217.12.12 2009/12/30 04:51:26 matt Exp $");
 
 #include "opt_cputype.h"	/* which mips CPU levels do we support? */
 #include "opt_ddb.h"
@@ -308,9 +308,8 @@
 		pmap_t pmap;
 
 		pmap  = p->p_vmspace->vm_map.pmap;
-		if (!(pte = pmap_segmap(pmap, vaddr)))
+		if (!(pte = pmap_pte_lookup(pmap, vaddr)))
 			panic("utlbmod: invalid segmap");
-		pte += (vaddr >> PGSHIFT) & (NPTEPG - 1);
 		entry = pte->pt_entry;
 		if (!mips_pg_v(entry) || (entry & mips_pg_m_bit()))
 			panic("utlbmod: invalid pte");
@@ -381,7 +380,7 @@
 #ifdef VMFAULT_TRACE
 		printf(
 		    "uvm_fault(%p (pmap %p), %#"PRIxVADDR
-		    " (0x%x), %d) -> %d at pc %#"PRIxVADDR"\n",
+		    " (%"PRIxVADDR"), %d) -> %d at pc %#"PRIxVADDR"\n",
 		    map, vm->vm_map.pmap, va, vaddr, ftype, rv, opc);
 #endif
 		/*

Added files:

Index: src/sys/arch/mips/mips/pmap_segtab.c
diff -u /dev/null src/sys/arch/mips/mips/pmap_segtab.c:1.1.2.1
--- /dev/null	Wed Dec 30 04:51:27 2009
+++ src/sys/arch/mips/mips/pmap_segtab.c	Wed Dec 30 04:51:26 2009
@@ -0,0 +1,395 @@
+/*	$NetBSD: pmap_segtab.c,v 1.1.2.1 2009/12/30 04:51:26 matt Exp $	*/
+
+/*-
+ * Copyright (c) 1998, 2001 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
+ * NASA Ames Research Center and by Chris G. Demetriou.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Copyright (c) 1992, 1993
+ *	The Regents of the University of California.  All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * the Systems Programming Group of the University of Utah Computer
+ * Science Department and Ralph Campbell.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ *	@(#)pmap.c	8.4 (Berkeley) 1/26/94
+ */
+
+#include <sys/cdefs.h>
+
+__KERNEL_RCSID(0, "$NetBSD: pmap_segtab.c,v 1.1.2.1 2009/12/30 04:51:26 matt Exp $");
+
+/*
+ *	Manages physical address maps.
+ *
+ *	In addition to hardware address maps, this
+ *	module is called upon to provide software-use-only
+ *	maps which may or may not be stored in the same
+ *	form as hardware maps.  These pseudo-maps are
+ *	used to store intermediate results from copy
+ *	operations to and from address spaces.
+ *
+ *	Since the information managed by this module is
+ *	also stored by the logical address mapping module,
+ *	this module may throw away valid virtual-to-physical
+ *	mappings at almost any time.  However, invalidations
+ *	of virtual-to-physical mappings must be done as
+ *	requested.
+ *
+ *	In order to cope with hardware architectures which
+ *	make virtual-to-physical map invalidates expensive,
+ *	this module may delay invalidate or reduced protection
+ *	operations until such time as they are actually
+ *	necessary.  This module is given full information as
+ *	to which processors are currently using which maps,
+ *	and to when physical maps must be made correct.
+ */
+
+/* XXX simonb 2002/02/26
+ *
+ * MIPS3_PLUS is used to conditionally compile the r4k MMU support.
+ * This is bogus - for example, some IDT MIPS-II CPUs have r4k style
+ * MMUs (and 32-bit ones at that).
+ *
+ * On the other hand, it's not likely that we'll ever support the R6000
+ * (is it?), so maybe that can be an "if MIPS2 or greater" check.
+ *
+ * Also along these lines are using totally separate functions for
+ * r3k-style and r4k-style MMUs and removing all the MIPS_HAS_R4K_MMU
+ * checks in the current functions.
+ *
+ * These warnings probably applies to other files under sys/arch/mips.
+ */
+
+#include "opt_sysv.h"
+#include "opt_cputype.h"
+#include "opt_mips_cache.h"
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/proc.h>
+#include <sys/user.h>
+#include <sys/buf.h>
+#include <sys/pool.h>
+#include <sys/mutex.h>
+#include <sys/atomic.h>
+#ifdef SYSVSHM
+#include <sys/shm.h>
+#endif
+#include <sys/socketvar.h>	/* XXX: for sock_loan_thresh */
+
+#include <uvm/uvm.h>
+
+#include <mips/cache.h>
+#include <mips/cpuregs.h>
+#include <mips/locore.h>
+#include <mips/pte.h>
+
+CTASSERT(NBPG >= sizeof(struct segtab));
+
+struct segtab	*free_segtab;		/* free list kept locally */
+
+static inline struct vm_page *
+pmap_pte_pagealloc(void)
+{
+	return mips_pmap_alloc_poolpage(UVM_PGA_ZERO|UVM_PGA_USERESERVE);
+}
+
+static inline pt_entry_t * 
+pmap_segmap(struct pmap *pmap, vaddr_t va)
+{
+	struct segtab *stp = pmap->pm_segtab;
+	return stp->seg_tab[va >> SEGSHIFT];
+}
+
+pt_entry_t *
+pmap_pte_lookup(pmap_t pmap, vaddr_t va)
+{
+	pt_entry_t *pte = pmap_segmap(pmap, va);
+	if (pte == NULL)
+		return NULL;
+
+	return pte + ((va >> PGSHIFT) & (NPTEPG - 1));
+}
+
+/*
+ *	Create and return a physical map.
+ *
+ *	If the size specified for the map
+ *	is zero, the map is an actual physical
+ *	map, and may be referenced by the
+ *	hardware.
+ *
+ *	If the size specified is non-zero,
+ *	the map will be used in software only, and
+ *	is bounded by that size.
+ */
+void
+pmap_segtab_alloc(pmap_t pmap)
+{
+	struct segtab *stp;
+
+	stp = NULL;
+	while (__predict_true(free_segtab != NULL)) {
+		struct segtab *next_stp;
+		stp = free_segtab;
+		next_stp = (struct segtab *)stp->seg_tab[0];
+		if (stp == atomic_cas_ptr(&free_segtab, stp, next_stp))
+			break;
+	}
+	
+	if (__predict_true(stp != NULL)) {
+		stp->seg_tab[0] = NULL;
+	} else {
+		paddr_t stp_pa;
+
+		for (;;) {
+			struct vm_page * const stp_pg = pmap_pte_pagealloc();
+			if (stp_pg != NULL) {
+				stp_pa = VM_PAGE_TO_PHYS(stp_pg);
+				break;
+			}
+			/*
+			 * XXX What else can we do?  Could we
+			 * XXX deadlock here?
+			 */
+			uvm_wait("pmap_create");
+		}
+
+#ifdef _LP64
+		KASSERT(mips3_xkphys_cached);
+		stp = (struct segtab *)MIPS_PHYS_TO_XKPHYS_CACHED(stp_pa);
+#else
+		stp = (struct segtab *)MIPS_PHYS_TO_KSEG0(stp_pa);
+#endif
+		const size_t n = NBPG / sizeof(struct segtab);
+		if (n > 1) {
+			/*
+			 * link all the segtabs in this page together
+			 */
+			for (size_t i = 1; i < n - 1; i++) {
+				stp[i].seg_tab[0] = (void *)&stp[i+1];
+			}
+			/*
+			 * Now link the new segtabs into the free segtab list.
+			 */
+			for (;;) {
+				void *tmp = free_segtab;
+				stp[n-1].seg_tab[0] = tmp;
+				if (tmp == atomic_cas_ptr(&free_segtab, tmp, stp+1))
+					break;
+			}
+		}
+	}
+
+#ifdef PARANOIADIAG
+	for (i = 0; i < PMAP_SEGTABSIZE; i++) {
+		if (stp->seg_tab[i] != 0)
+			panic("pmap_create: pm_segtab.seg_tab[%zu] != 0");
+	}
+#endif
+
+	pmap->pm_segtab = stp;
+}
+
+/*
+ *	Retire the given physical map from service.
+ *	Should only be called if the map contains
+ *	no valid mappings.
+ */
+void
+pmap_segtab_free(pmap_t pmap)
+{
+	struct segtab *stp = pmap->pm_segtab;
+
+	if (stp == NULL)
+		return;
+
+	for (size_t i = 0; i < PMAP_SEGTABSIZE; i++) {
+		paddr_t pa;
+		/* get pointer to segment map */
+		pt_entry_t *pte = stp->seg_tab[i];
+		if (pte == NULL)
+			continue;
+#ifdef PARANOIADIAG
+		for (size_t j = 0; j < NPTEPG; j++) {
+			if ((pte + j)->pt_entry)
+				panic("pmap_destroy: segmap not empty");
+		}
+#endif
+
+#ifdef MIPS3_PLUS	/* XXX mmu XXX */
+		/*
+		 * The pica pmap.c flushed the segmap pages here.  I'm
+		 * not sure why, but I suspect it's because the page(s)
+		 * were being accessed by KSEG0 (cached) addresses and
+		 * may cause cache coherency problems when the page
+		 * is reused with KSEG2 (mapped) addresses.  This may
+		 * cause problems on machines without VCED/VCEI.
+		 */
+		if (mips_cache_virtual_alias)
+			mips_dcache_inv_range((vaddr_t)pte, PAGE_SIZE);
+#endif	/* MIPS3_PLUS */
+#ifdef _LP64
+		KASSERT(MIPS_XKPHYS_P(pte));
+		pa = MIPS_XKPHYS_TO_PHYS(pte);
+#else
+		pa = MIPS_KSEG0_TO_PHYS(pte);
+#endif
+		uvm_pagefree(PHYS_TO_VM_PAGE(pa));
+
+		stp->seg_tab[i] = NULL;
+	}
+
+	/*
+	 * Insert the the segtab into the segtab freelist.
+	 */
+	for (;;) {
+		void *tmp = free_segtab;
+		stp->seg_tab[0] = tmp;
+		if (tmp == atomic_cas_ptr(&free_segtab, tmp, stp))
+			break;
+	}
+}
+
+/*
+ *	Make a new pmap (vmspace) active for the given process.
+ */
+void
+pmap_segtab_activate(struct lwp *l)
+{
+	if (l == curlwp) {
+		pmap_t pmap = l->l_proc->p_vmspace->vm_map.pmap;
+		segbase = pmap->pm_segtab;	/* XXX needs to be in curcpu() */
+	}
+}
+
+/*
+ *	Act on the given range of addresses from the specified map.
+ *
+ *	It is assumed that the start and end are properly rounded to
+ *	the page size.
+ */
+void
+pmap_pte_process(pmap_t pmap, vaddr_t sva, vaddr_t eva,
+	pte_callback_t callback, uintptr_t flags)
+{
+#if 0
+	printf("%s: %p, %"PRIxVADDR", %"PRIxVADDR", %p, %"PRIxPTR"\n",
+	    __func__, pmap, sva, eva, callback, flags);
+#endif
+	while (sva < eva) {
+		vaddr_t lastseg_va = mips_trunc_seg(sva) + NBSEG;
+		KASSERT(lastseg_va != 0);
+		if (lastseg_va > eva)
+			lastseg_va = eva;
+
+		/*
+		 * If VA belongs to an unallocated segment,
+		 * skip to the next segment boundary.
+		 */
+		pt_entry_t * const pte = pmap_pte_lookup(pmap, sva);
+		if (pte != NULL) {
+			/*
+			 * Callback to deal with the ptes for this segment.
+			 */
+			(*callback)(pmap, sva, lastseg_va, pte, flags);
+		}
+		/*
+		 * In theory we could release pages with no entries,
+		 * but that takes more effort than we want here.
+		 */
+		sva = lastseg_va;
+	}
+}
+
+/*
+ *	Return a pointer for the pte that corresponds to the specified virtual
+ *	address (va) in the target physical map, allocating if needed.
+ */
+pt_entry_t *
+pmap_pte_reserve(pmap_t pmap, vaddr_t va, int flags)
+{
+	struct segtab *stp = pmap->pm_segtab;
+	pt_entry_t *pte;
+
+	pte = pmap_pte_lookup(pmap, va);
+	if (__predict_false(pte == NULL)) {
+		struct vm_page * const pg = pmap_pte_pagealloc();
+		if (pg == NULL) {
+			if (flags & PMAP_CANFAIL)
+				return NULL;
+			panic("%s: cannot allocate page table page "
+			    "for va %" PRIxVADDR, __func__, va);
+		}
+
+		const paddr_t pa = VM_PAGE_TO_PHYS(pg);
+#ifdef _LP64
+		KASSERT(mips3_xkphys_cached);
+		pte = (pt_entry_t *)MIPS_PHYS_TO_XKPHYS_CACHED(pa);
+#else
+		pte = (pt_entry_t *)MIPS_PHYS_TO_KSEG0(pa);
+#endif
+		stp->seg_tab[va >> SEGSHIFT] = pte;
+
+		pte += (va >> PGSHIFT) & (NPTEPG - 1);
+#ifdef PARANOIADIAG
+		for (size_t i = 0; i < NPTEPG; i++) {
+			if ((pte+i)->pt_entry)
+				panic("pmap_enter: new segmap not empty");
+		}
+#endif
+	}
+
+	return pte;
+}

Reply via email to