Module Name: src Committed By: matt Date: Thu Feb 17 19:27:13 UTC 2011
Modified Files: src/sys/kern: kern_kthread.c src/sys/uvm: uvm_extern.h uvm_glue.c Log Message: Add support for cpu-specific uarea allocation routines. Allows different allocation for user and system lwps. MIPS will use this to map uareas of system lwp used direct-mapped addresses (to reduce the overhead of switching to kernel threads). ibm4xx could use to map uareas via direct mapped addresses and avoid the problem of having the kernel stack not in the TLB. To generate a diff of this commit: cvs rdiff -u -r1.30 -r1.31 src/sys/kern/kern_kthread.c cvs rdiff -u -r1.170 -r1.171 src/sys/uvm/uvm_extern.h cvs rdiff -u -r1.147 -r1.148 src/sys/uvm/uvm_glue.c Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files.
Modified files: Index: src/sys/kern/kern_kthread.c diff -u src/sys/kern/kern_kthread.c:1.30 src/sys/kern/kern_kthread.c:1.31 --- src/sys/kern/kern_kthread.c:1.30 Sun Jun 13 04:13:31 2010 +++ src/sys/kern/kern_kthread.c Thu Feb 17 19:27:13 2011 @@ -1,4 +1,4 @@ -/* $NetBSD: kern_kthread.c,v 1.30 2010/06/13 04:13:31 yamt Exp $ */ +/* $NetBSD: kern_kthread.c,v 1.31 2011/02/17 19:27:13 matt Exp $ */ /*- * Copyright (c) 1998, 1999, 2007, 2009 The NetBSD Foundation, Inc. @@ -31,7 +31,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: kern_kthread.c,v 1.30 2010/06/13 04:13:31 yamt Exp $"); +__KERNEL_RCSID(0, "$NetBSD: kern_kthread.c,v 1.31 2011/02/17 19:27:13 matt Exp $"); #include <sys/param.h> #include <sys/systm.h> @@ -70,8 +70,8 @@ va_list ap; lwp_flags = LWP_DETACHED; - - uaddr = uvm_uarea_alloc(); + + uaddr = uvm_uarea_system_alloc(); if (uaddr == 0) { return ENOMEM; } @@ -88,7 +88,7 @@ error = lwp_create(&lwp0, &proc0, uaddr, lwp_flags, NULL, 0, func, arg, &l, lc); if (error) { - uvm_uarea_free(uaddr); + uvm_uarea_system_free(uaddr); return error; } if (fmt != NULL) { Index: src/sys/uvm/uvm_extern.h diff -u src/sys/uvm/uvm_extern.h:1.170 src/sys/uvm/uvm_extern.h:1.171 --- src/sys/uvm/uvm_extern.h:1.170 Thu Feb 10 14:46:44 2011 +++ src/sys/uvm/uvm_extern.h Thu Feb 17 19:27:13 2011 @@ -1,4 +1,4 @@ -/* $NetBSD: uvm_extern.h,v 1.170 2011/02/10 14:46:44 pooka Exp $ */ +/* $NetBSD: uvm_extern.h,v 1.171 2011/02/17 19:27:13 matt Exp $ */ /* * Copyright (c) 1997 Charles D. Cranor and Washington University. @@ -623,6 +623,8 @@ __dead void uvm_scheduler(void); vaddr_t uvm_uarea_alloc(void); void uvm_uarea_free(vaddr_t); +vaddr_t uvm_uarea_system_alloc(void); +void uvm_uarea_system_free(vaddr_t); vaddr_t uvm_lwp_getuarea(lwp_t *); void uvm_lwp_setuarea(lwp_t *, vaddr_t); int uvm_vslock(struct vmspace *, void *, size_t, vm_prot_t); Index: src/sys/uvm/uvm_glue.c diff -u src/sys/uvm/uvm_glue.c:1.147 src/sys/uvm/uvm_glue.c:1.148 --- src/sys/uvm/uvm_glue.c:1.147 Wed Feb 2 15:25:27 2011 +++ src/sys/uvm/uvm_glue.c Thu Feb 17 19:27:13 2011 @@ -1,4 +1,4 @@ -/* $NetBSD: uvm_glue.c,v 1.147 2011/02/02 15:25:27 chuck Exp $ */ +/* $NetBSD: uvm_glue.c,v 1.148 2011/02/17 19:27:13 matt Exp $ */ /* * Copyright (c) 1997 Charles D. Cranor and Washington University. @@ -62,7 +62,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: uvm_glue.c,v 1.147 2011/02/02 15:25:27 chuck Exp $"); +__KERNEL_RCSID(0, "$NetBSD: uvm_glue.c,v 1.148 2011/02/17 19:27:13 matt Exp $"); #include "opt_kgdb.h" #include "opt_kstack.h" @@ -238,6 +238,11 @@ #endif static pool_cache_t uvm_uarea_cache; +#if defined(__HAVE_CPU_UAREA_ROUTINES) +static pool_cache_t uvm_uarea_system_cache; +#else +#define uvm_uarea_system_cache uvm_uarea_cache +#endif static void * uarea_poolpage_alloc(struct pool *pp, int flags) @@ -257,6 +262,11 @@ return (void *)va; } #endif +#if defined(__HAVE_CPU_UAREA_ROUTINES) + void *va = cpu_uarea_alloc(false); + if (va) + return (void *)va; +#endif return (void *)uvm_km_alloc(kernel_map, pp->pr_alloc->pa_pagesz, USPACE_ALIGN, UVM_KMF_WIRED | ((flags & PR_WAITOK) ? UVM_KMF_WAITVA : @@ -276,6 +286,10 @@ return; } #endif +#if defined(__HAVE_CPU_UAREA_ROUTINES) + if (cpu_uarea_free(addr)) + return; +#endif uvm_km_free(kernel_map, (vaddr_t)addr, pp->pr_alloc->pa_pagesz, UVM_KMF_WIRED); } @@ -286,6 +300,29 @@ .pa_pagesz = USPACE, }; +#if defined(__HAVE_CPU_UAREA_ROUTINES) +static void * +uarea_system_poolpage_alloc(struct pool *pp, int flags) +{ + void * const va = cpu_uarea_alloc(true); + KASSERT(va != NULL); + return va; +} + +static void +uarea_system_poolpage_free(struct pool *pp, void *addr) +{ + if (!cpu_uarea_free(addr)) + panic("%s: failed to free uarea %p", __func__, addr); +} + +static struct pool_allocator uvm_uarea_system_allocator = { + .pa_alloc = uarea_system_poolpage_alloc, + .pa_free = uarea_system_poolpage_free, + .pa_pagesz = USPACE, +}; +#endif /* __HAVE_CPU_UAREA_ROUTINES */ + void uvm_uarea_init(void) { @@ -304,6 +341,11 @@ uvm_uarea_cache = pool_cache_init(USPACE, USPACE_ALIGN, 0, flags, "uarea", &uvm_uarea_allocator, IPL_NONE, NULL, NULL, NULL); + if (uvm_uarea_system_cache != uvm_uarea_cache) { + uvm_uarea_system_cache = pool_cache_init(USPACE, USPACE_ALIGN, + 0, flags, "uareasys", &uvm_uarea_system_allocator, + IPL_NONE, NULL, NULL, NULL); + } } /* @@ -317,6 +359,13 @@ return (vaddr_t)pool_cache_get(uvm_uarea_cache, PR_WAITOK); } +vaddr_t +uvm_uarea_system_alloc(void) +{ + + return (vaddr_t)pool_cache_get(uvm_uarea_system_cache, PR_WAITOK); +} + /* * uvm_uarea_free: free a u-area */ @@ -328,6 +377,13 @@ pool_cache_put(uvm_uarea_cache, (void *)uaddr); } +void +uvm_uarea_system_free(vaddr_t uaddr) +{ + + pool_cache_put(uvm_uarea_system_cache, (void *)uaddr); +} + vaddr_t uvm_lwp_getuarea(lwp_t *l) { @@ -374,8 +430,12 @@ uvm_lwp_exit(struct lwp *l) { vaddr_t va = uvm_lwp_getuarea(l); + bool system = (l->l_flag & LW_SYSTEM) != 0; - uvm_uarea_free(va); + if (system) + uvm_uarea_system_free(va); + else + uvm_uarea_free(va); #ifdef DIAGNOSTIC uvm_lwp_setuarea(l, (vaddr_t)NULL); #endif