Module Name:    src
Committed By:   ad
Date:           Sun Sep 10 14:45:53 UTC 2023

Modified Files:
        src/common/lib/libc/gen: radixtree.c
        src/sys/kern: init_main.c kern_descrip.c kern_lwp.c kern_mutex_obj.c
            kern_resource.c kern_rwlock_obj.c kern_turnstile.c subr_kcpuset.c
            vfs_cwd.c vfs_init.c vfs_lockf.c
        src/sys/rump/librump/rumpkern: rump.c
        src/sys/rump/librump/rumpvfs: rump_vfs.c
        src/sys/sys: namei.src
        src/sys/uvm: uvm_init.c uvm_map.c uvm_readahead.c

Log Message:
- Do away with separate pool_cache for some kernel objects that have no special
  requirements and use the general purpose allocator instead.  On one of my
  test systems this makes for a small (~1%) but repeatable reduction in system
  time during builds presumably because it decreases the kernel's cache /
  memory bandwidth footprint a little.
- vfs_lockf: cache a pointer to the uidinfo and put mutex in the data segment.


To generate a diff of this commit:
cvs rdiff -u -r1.29 -r1.30 src/common/lib/libc/gen/radixtree.c
cvs rdiff -u -r1.543 -r1.544 src/sys/kern/init_main.c
cvs rdiff -u -r1.258 -r1.259 src/sys/kern/kern_descrip.c
cvs rdiff -u -r1.252 -r1.253 src/sys/kern/kern_lwp.c
cvs rdiff -u -r1.11 -r1.12 src/sys/kern/kern_mutex_obj.c
cvs rdiff -u -r1.191 -r1.192 src/sys/kern/kern_resource.c
cvs rdiff -u -r1.9 -r1.10 src/sys/kern/kern_rwlock_obj.c
cvs rdiff -u -r1.46 -r1.47 src/sys/kern/kern_turnstile.c
cvs rdiff -u -r1.16 -r1.17 src/sys/kern/subr_kcpuset.c
cvs rdiff -u -r1.8 -r1.9 src/sys/kern/vfs_cwd.c
cvs rdiff -u -r1.61 -r1.62 src/sys/kern/vfs_init.c
cvs rdiff -u -r1.78 -r1.79 src/sys/kern/vfs_lockf.c
cvs rdiff -u -r1.357 -r1.358 src/sys/rump/librump/rumpkern/rump.c
cvs rdiff -u -r1.94 -r1.95 src/sys/rump/librump/rumpvfs/rump_vfs.c
cvs rdiff -u -r1.61 -r1.62 src/sys/sys/namei.src
cvs rdiff -u -r1.56 -r1.57 src/sys/uvm/uvm_init.c
cvs rdiff -u -r1.407 -r1.408 src/sys/uvm/uvm_map.c
cvs rdiff -u -r1.13 -r1.14 src/sys/uvm/uvm_readahead.c

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/common/lib/libc/gen/radixtree.c
diff -u src/common/lib/libc/gen/radixtree.c:1.29 src/common/lib/libc/gen/radixtree.c:1.30
--- src/common/lib/libc/gen/radixtree.c:1.29	Mon Mar  6 21:39:06 2023
+++ src/common/lib/libc/gen/radixtree.c	Sun Sep 10 14:45:52 2023
@@ -1,4 +1,4 @@
-/*	$NetBSD: radixtree.c,v 1.29 2023/03/06 21:39:06 andvar Exp $	*/
+/*	$NetBSD: radixtree.c,v 1.30 2023/09/10 14:45:52 ad Exp $	*/
 
 /*-
  * Copyright (c)2011,2012,2013 YAMAMOTO Takashi,
@@ -43,7 +43,7 @@
  *
  * Intermediate nodes are automatically allocated and freed internally and
  * basically users don't need to care about them.  The allocation is done via
- * pool_cache_get(9) for _KERNEL, malloc(3) for userland, and alloc() for
+ * kmem_zalloc(9) for _KERNEL, malloc(3) for userland, and alloc() for
  * _STANDALONE environment.  Only radix_tree_insert_node function can allocate
  * memory for intermediate nodes and thus can fail for ENOMEM.
  *
@@ -112,17 +112,17 @@
 #include <sys/cdefs.h>
 
 #if defined(_KERNEL) || defined(_STANDALONE)
-__KERNEL_RCSID(0, "$NetBSD: radixtree.c,v 1.29 2023/03/06 21:39:06 andvar Exp $");
+__KERNEL_RCSID(0, "$NetBSD: radixtree.c,v 1.30 2023/09/10 14:45:52 ad Exp $");
 #include <sys/param.h>
 #include <sys/errno.h>
-#include <sys/pool.h>
+#include <sys/kmem.h>
 #include <sys/radixtree.h>
 #include <lib/libkern/libkern.h>
 #if defined(_STANDALONE)
 #include <lib/libsa/stand.h>
 #endif /* defined(_STANDALONE) */
 #else /* defined(_KERNEL) || defined(_STANDALONE) */
-__RCSID("$NetBSD: radixtree.c,v 1.29 2023/03/06 21:39:06 andvar Exp $");
+__RCSID("$NetBSD: radixtree.c,v 1.30 2023/09/10 14:45:52 ad Exp $");
 #include <assert.h>
 #include <errno.h>
 #include <stdbool.h>
@@ -303,18 +303,6 @@ radix_tree_node_init(struct radix_tree_n
 }
 
 #if defined(_KERNEL)
-pool_cache_t radix_tree_node_cache __read_mostly;
-
-static int
-radix_tree_node_ctor(void *dummy, void *item, int flags)
-{
-	struct radix_tree_node *n = item;
-
-	KASSERT(dummy == NULL);
-	radix_tree_node_init(n);
-	return 0;
-}
-
 /*
  * radix_tree_init:
  *
@@ -325,10 +313,7 @@ void
 radix_tree_init(void)
 {
 
-	radix_tree_node_cache = pool_cache_init(sizeof(struct radix_tree_node),
-	    coherency_unit, 0, PR_LARGECACHE, "radixnode", NULL, IPL_NONE,
-	    radix_tree_node_ctor, NULL, NULL);
-	KASSERT(radix_tree_node_cache != NULL);
+	/* nothing right now */
 }
 
 /*
@@ -346,10 +331,10 @@ radix_tree_await_memory(void)
 	int i;
 
 	for (i = 0; i < __arraycount(nodes); i++) {
-		nodes[i] = pool_cache_get(radix_tree_node_cache, PR_WAITOK);
+		nodes[i] = kmem_alloc(sizeof(struct radix_tree_node), KM_SLEEP);
 	}
 	while (--i >= 0) {
-		pool_cache_put(radix_tree_node_cache, nodes[i]);
+		kmem_free(nodes[i], sizeof(struct radix_tree_node));
 	}
 }
 
@@ -424,11 +409,10 @@ radix_tree_alloc_node(void)
 
 #if defined(_KERNEL)
 	/*
-	 * note that pool_cache_get can block.
+	 * note that kmem_alloc can block.
 	 */
-	n = pool_cache_get(radix_tree_node_cache, PR_NOWAIT);
-#else /* defined(_KERNEL) */
-#if defined(_STANDALONE)
+	n = kmem_alloc(sizeof(struct radix_tree_node), KM_SLEEP);
+#elif defined(_STANDALONE)
 	n = alloc(sizeof(*n));
 #else /* defined(_STANDALONE) */
 	n = malloc(sizeof(*n));
@@ -436,7 +420,6 @@ radix_tree_alloc_node(void)
 	if (n != NULL) {
 		radix_tree_node_init(n);
 	}
-#endif /* defined(_KERNEL) */
 	KASSERT(n == NULL || radix_tree_sum_node(n) == 0);
 	return n;
 }
@@ -447,7 +430,7 @@ radix_tree_free_node(struct radix_tree_n
 
 	KASSERT(radix_tree_sum_node(n) == 0);
 #if defined(_KERNEL)
-	pool_cache_put(radix_tree_node_cache, n);
+	kmem_free(n, sizeof(struct radix_tree_node));
 #elif defined(_STANDALONE)
 	dealloc(n, sizeof(*n));
 #else

Index: src/sys/kern/init_main.c
diff -u src/sys/kern/init_main.c:1.543 src/sys/kern/init_main.c:1.544
--- src/sys/kern/init_main.c:1.543	Sat Sep  2 17:44:59 2023
+++ src/sys/kern/init_main.c	Sun Sep 10 14:45:52 2023
@@ -1,7 +1,7 @@
-/*	$NetBSD: init_main.c,v 1.543 2023/09/02 17:44:59 riastradh Exp $	*/
+/*	$NetBSD: init_main.c,v 1.544 2023/09/10 14:45:52 ad Exp $	*/
 
 /*-
- * Copyright (c) 2008, 2009, 2019 The NetBSD Foundation, Inc.
+ * Copyright (c) 2008, 2009, 2019, 2023 The NetBSD Foundation, Inc.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -97,7 +97,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: init_main.c,v 1.543 2023/09/02 17:44:59 riastradh Exp $");
+__KERNEL_RCSID(0, "$NetBSD: init_main.c,v 1.544 2023/09/10 14:45:52 ad Exp $");
 
 #include "opt_cnmagic.h"
 #include "opt_ddb.h"
@@ -327,9 +327,6 @@ main(void)
 
 	percpu_init();
 
-	/* Initialize lock caches. */
-	mutex_obj_init();
-
 	/* Initialize radix trees (used by numerous subsystems). */
 	radix_tree_init();
 
@@ -503,14 +500,10 @@ main(void)
 	fstrans_init();
 
 	vfsinit();
-	lf_init();
 
 	/* Initialize the file descriptor system. */
 	fd_sys_init();
 
-	/* Initialize cwd structures */
-	cwd_sys_init();
-
 	/* Initialize kqueue. */
 	kqueue_init();
 

Index: src/sys/kern/kern_descrip.c
diff -u src/sys/kern/kern_descrip.c:1.258 src/sys/kern/kern_descrip.c:1.259
--- src/sys/kern/kern_descrip.c:1.258	Sun Sep 10 14:44:08 2023
+++ src/sys/kern/kern_descrip.c	Sun Sep 10 14:45:52 2023
@@ -1,7 +1,7 @@
-/*	$NetBSD: kern_descrip.c,v 1.258 2023/09/10 14:44:08 ad Exp $	*/
+/*	$NetBSD: kern_descrip.c,v 1.259 2023/09/10 14:45:52 ad Exp $	*/
 
 /*-
- * Copyright (c) 2008, 2009 The NetBSD Foundation, Inc.
+ * Copyright (c) 2008, 2009, 2023 The NetBSD Foundation, Inc.
  * All rights reserved.
  *
  * This code is derived from software contributed to The NetBSD Foundation
@@ -70,7 +70,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: kern_descrip.c,v 1.258 2023/09/10 14:44:08 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: kern_descrip.c,v 1.259 2023/09/10 14:45:52 ad Exp $");
 
 #include <sys/param.h>
 #include <sys/systm.h>
@@ -106,12 +106,11 @@ kmutex_t		filelist_lock	__cacheline_alig
 
 static pool_cache_t	filedesc_cache	__read_mostly;
 static pool_cache_t	file_cache	__read_mostly;
-static pool_cache_t	fdfile_cache	__read_mostly;
 
 static int	file_ctor(void *, void *, int);
 static void	file_dtor(void *, void *);
-static int	fdfile_ctor(void *, void *, int);
-static void	fdfile_dtor(void *, void *);
+static void	fdfile_ctor(fdfile_t *);
+static void	fdfile_dtor(fdfile_t *);
 static int	filedesc_ctor(void *, void *, int);
 static void	filedesc_dtor(void *, void *);
 static int	filedescopen(dev_t, int, int, lwp_t *);
@@ -157,11 +156,6 @@ fd_sys_init(void)
 	    0, "file", NULL, IPL_NONE, file_ctor, file_dtor, NULL);
 	KASSERT(file_cache != NULL);
 
-	fdfile_cache = pool_cache_init(sizeof(fdfile_t), coherency_unit, 0,
-	    PR_LARGECACHE, "fdfile", NULL, IPL_NONE, fdfile_ctor, fdfile_dtor,
-	    NULL);
-	KASSERT(fdfile_cache != NULL);
-
 	filedesc_cache = pool_cache_init(sizeof(filedesc_t), coherency_unit,
 	    0, 0, "filedesc", NULL, IPL_NONE, filedesc_ctor, filedesc_dtor,
 	    NULL);
@@ -788,7 +782,8 @@ fd_dup2(file_t *fp, unsigned newfd, int 
 	while (newfd >= atomic_load_consume(&fdp->fd_dt)->dt_nfiles) {
 		fd_tryexpand(curproc);
 	}
-	ff = pool_cache_get(fdfile_cache, PR_WAITOK);
+	ff = kmem_alloc(sizeof(*ff), KM_SLEEP);
+	fdfile_ctor(ff);
 
 	/*
 	 * If there is already a file open, close it.  If the file is
@@ -824,7 +819,8 @@ fd_dup2(file_t *fp, unsigned newfd, int 
 	/* Slot is now allocated.  Insert copy of the file. */
 	fd_affix(curproc, fp, newfd);
 	if (ff != NULL) {
-		pool_cache_put(fdfile_cache, ff);
+		cv_destroy(&ff->ff_closing);
+		kmem_free(ff, sizeof(*ff));
 	}
 	return 0;
 }
@@ -875,6 +871,8 @@ closef(file_t *fp)
 
 /*
  * Allocate a file descriptor for the process.
+ *
+ * Future idea for experimentation: replace all of this with radixtree.
  */
 int
 fd_alloc(proc_t *p, int want, int *result)
@@ -896,6 +894,7 @@ fd_alloc(proc_t *p, int want, int *resul
 	KASSERT(dt->dt_ff[0] == (fdfile_t *)fdp->fd_dfdfile[0]);
 	lim = uimin((int)p->p_rlimit[RLIMIT_NOFILE].rlim_cur, maxfiles);
 	last = uimin(dt->dt_nfiles, lim);
+
 	for (;;) {
 		if ((i = want) < fdp->fd_freefile)
 			i = fdp->fd_freefile;
@@ -920,7 +919,8 @@ fd_alloc(proc_t *p, int want, int *resul
 		}
 		if (dt->dt_ff[i] == NULL) {
 			KASSERT(i >= NDFDFILE);
-			dt->dt_ff[i] = pool_cache_get(fdfile_cache, PR_WAITOK);
+			dt->dt_ff[i] = kmem_alloc(sizeof(fdfile_t), KM_SLEEP);
+			fdfile_ctor(dt->dt_ff[i]);
 		}
 		KASSERT(dt->dt_ff[i]->ff_file == NULL);
 		fd_used(fdp, i);
@@ -1267,21 +1267,17 @@ file_dtor(void *arg, void *obj)
 	mutex_destroy(&fp->f_lock);
 }
 
-static int
-fdfile_ctor(void *arg, void *obj, int flags)
+static void
+fdfile_ctor(fdfile_t *ff)
 {
-	fdfile_t *ff = obj;
 
 	memset(ff, 0, sizeof(*ff));
 	cv_init(&ff->ff_closing, "fdclose");
-
-	return 0;
 }
 
 static void
-fdfile_dtor(void *arg, void *obj)
+fdfile_dtor(fdfile_t *ff)
 {
-	fdfile_t *ff = obj;
 
 	cv_destroy(&ff->ff_closing);
 }
@@ -1367,8 +1363,7 @@ filedesc_ctor(void *arg, void *obj, int 
 
 	CTASSERT(sizeof(fdp->fd_dfdfile[0]) >= sizeof(fdfile_t));
 	for (i = 0, ffp = fdp->fd_dt->dt_ff; i < NDFDFILE; i++, ffp++) {
-		*ffp = (fdfile_t *)fdp->fd_dfdfile[i];
-		(void)fdfile_ctor(NULL, fdp->fd_dfdfile[i], PR_WAITOK);
+		fdfile_ctor(*ffp = (fdfile_t *)fdp->fd_dfdfile[i]);
 	}
 
 	return 0;
@@ -1381,7 +1376,7 @@ filedesc_dtor(void *arg, void *obj)
 	int i;
 
 	for (i = 0; i < NDFDFILE; i++) {
-		fdfile_dtor(NULL, fdp->fd_dfdfile[i]);
+		fdfile_dtor((fdfile_t *)fdp->fd_dfdfile[i]);
 	}
 
 	mutex_destroy(&fdp->fd_lock);
@@ -1516,7 +1511,8 @@ fd_copy(void)
 
 		/* Allocate an fdfile_t to represent it. */
 		if (i >= NDFDFILE) {
-			ff2 = pool_cache_get(fdfile_cache, PR_WAITOK);
+			ff2 = kmem_alloc(sizeof(*ff2), KM_SLEEP);
+			fdfile_ctor(ff2);
 			*nffp = ff2;
 		} else {
 			ff2 = newdt->dt_ff[i];
@@ -1605,7 +1601,8 @@ fd_free(void)
 		KASSERT(!ff->ff_exclose);
 		KASSERT(!ff->ff_allocated);
 		if (fd >= NDFDFILE) {
-			pool_cache_put(fdfile_cache, ff);
+			cv_destroy(&ff->ff_closing);
+			kmem_free(ff, sizeof(*ff));
 			dt->dt_ff[fd] = NULL;
 		}
 	}

Index: src/sys/kern/kern_lwp.c
diff -u src/sys/kern/kern_lwp.c:1.252 src/sys/kern/kern_lwp.c:1.253
--- src/sys/kern/kern_lwp.c:1.252	Sun Apr  9 09:18:09 2023
+++ src/sys/kern/kern_lwp.c	Sun Sep 10 14:45:52 2023
@@ -1,7 +1,7 @@
-/*	$NetBSD: kern_lwp.c,v 1.252 2023/04/09 09:18:09 riastradh Exp $	*/
+/*	$NetBSD: kern_lwp.c,v 1.253 2023/09/10 14:45:52 ad Exp $	*/
 
 /*-
- * Copyright (c) 2001, 2006, 2007, 2008, 2009, 2019, 2020
+ * Copyright (c) 2001, 2006, 2007, 2008, 2009, 2019, 2020, 2023
  *     The NetBSD Foundation, Inc.
  * All rights reserved.
  *
@@ -217,7 +217,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: kern_lwp.c,v 1.252 2023/04/09 09:18:09 riastradh Exp $");
+__KERNEL_RCSID(0, "$NetBSD: kern_lwp.c,v 1.253 2023/09/10 14:45:52 ad Exp $");
 
 #include "opt_ddb.h"
 #include "opt_lockdebug.h"
@@ -397,7 +397,8 @@ lwp_ctor(void *arg, void *obj, int flags
 	l->l_stat = LSIDL;
 	l->l_cpu = curcpu();
 	l->l_mutex = l->l_cpu->ci_schedstate.spc_lwplock;
-	l->l_ts = pool_get(&turnstile_pool, flags);
+	l->l_ts = kmem_alloc(sizeof(*l->l_ts), flags == PR_WAITOK ?
+	    KM_SLEEP : KM_NOSLEEP);
 
 	if (l->l_ts == NULL) {
 		return ENOMEM;
@@ -422,7 +423,7 @@ lwp_dtor(void *arg, void *obj)
 	 * so if it comes up just drop it quietly and move on.
 	 */
 	if (l->l_ts != &turnstile0)
-		pool_put(&turnstile_pool, l->l_ts);
+		kmem_free(l->l_ts, sizeof(*l->l_ts));
 }
 
 /*

Index: src/sys/kern/kern_mutex_obj.c
diff -u src/sys/kern/kern_mutex_obj.c:1.11 src/sys/kern/kern_mutex_obj.c:1.12
--- src/sys/kern/kern_mutex_obj.c:1.11	Fri Feb 24 11:02:27 2023
+++ src/sys/kern/kern_mutex_obj.c	Sun Sep 10 14:45:52 2023
@@ -1,7 +1,7 @@
-/*	$NetBSD: kern_mutex_obj.c,v 1.11 2023/02/24 11:02:27 riastradh Exp $	*/
+/*	$NetBSD: kern_mutex_obj.c,v 1.12 2023/09/10 14:45:52 ad Exp $	*/
 
 /*-
- * Copyright (c) 2008, 2019 The NetBSD Foundation, Inc.
+ * Copyright (c) 2008, 2019, 2023 The NetBSD Foundation, Inc.
  * All rights reserved.
  *
  * This code is derived from software contributed to The NetBSD Foundation
@@ -30,12 +30,12 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: kern_mutex_obj.c,v 1.11 2023/02/24 11:02:27 riastradh Exp $");
+__KERNEL_RCSID(0, "$NetBSD: kern_mutex_obj.c,v 1.12 2023/09/10 14:45:52 ad Exp $");
 
 #include <sys/param.h>
 #include <sys/atomic.h>
 #include <sys/mutex.h>
-#include <sys/pool.h>
+#include <sys/kmem.h>
 
 /* Mutex cache */
 #define	MUTEX_OBJ_MAGIC	0x5aa3c85d
@@ -43,41 +43,10 @@ struct kmutexobj {
 	kmutex_t	mo_lock;
 	u_int		mo_magic;
 	u_int		mo_refcnt;
+	uint8_t		mo_pad[COHERENCY_UNIT - sizeof(kmutex_t) -
+	    sizeof(u_int) * 2];
 };
 
-static int	mutex_obj_ctor(void *, void *, int);
-
-static pool_cache_t	mutex_obj_cache		__read_mostly;
-
-/*
- * mutex_obj_init:
- *
- *	Initialize the mutex object store.
- */
-void
-mutex_obj_init(void)
-{
-
-	mutex_obj_cache = pool_cache_init(sizeof(struct kmutexobj),
-	    coherency_unit, 0, 0, "mutex", NULL, IPL_NONE, mutex_obj_ctor,
-	    NULL, NULL);
-}
-
-/*
- * mutex_obj_ctor:
- *
- *	Initialize a new lock for the cache.
- */
-static int
-mutex_obj_ctor(void *arg, void *obj, int flags)
-{
-	struct kmutexobj * mo = obj;
-
-	mo->mo_magic = MUTEX_OBJ_MAGIC;
-
-	return 0;
-}
-
 /*
  * mutex_obj_alloc:
  *
@@ -88,9 +57,11 @@ mutex_obj_alloc(kmutex_type_t type, int 
 {
 	struct kmutexobj *mo;
 
-	mo = pool_cache_get(mutex_obj_cache, PR_WAITOK);
+	mo = kmem_alloc(sizeof(*mo), KM_SLEEP);
+	KASSERT(ALIGNED_POINTER(mo, coherency_unit));
 	_mutex_init(&mo->mo_lock, type, ipl,
 	    (uintptr_t)__builtin_return_address(0));
+	mo->mo_magic = MUTEX_OBJ_MAGIC;
 	mo->mo_refcnt = 1;
 
 	return (kmutex_t *)mo;
@@ -106,10 +77,12 @@ mutex_obj_tryalloc(kmutex_type_t type, i
 {
 	struct kmutexobj *mo;
 
-	mo = pool_cache_get(mutex_obj_cache, PR_NOWAIT);
+	mo = kmem_alloc(sizeof(*mo), KM_NOSLEEP);
+	KASSERT(ALIGNED_POINTER(mo, coherency_unit));
 	if (__predict_true(mo != NULL)) {
 		_mutex_init(&mo->mo_lock, type, ipl,
 		    (uintptr_t)__builtin_return_address(0));
+		mo->mo_magic = MUTEX_OBJ_MAGIC;
 		mo->mo_refcnt = 1;
 	}
 
@@ -161,7 +134,7 @@ mutex_obj_free(kmutex_t *lock)
 	}
 	membar_acquire();
 	mutex_destroy(&mo->mo_lock);
-	pool_cache_put(mutex_obj_cache, mo);
+	kmem_free(mo, sizeof(*mo));
 	return true;
 }
 

Index: src/sys/kern/kern_resource.c
diff -u src/sys/kern/kern_resource.c:1.191 src/sys/kern/kern_resource.c:1.192
--- src/sys/kern/kern_resource.c:1.191	Sat Jul  8 20:02:10 2023
+++ src/sys/kern/kern_resource.c	Sun Sep 10 14:45:52 2023
@@ -1,4 +1,4 @@
-/*	$NetBSD: kern_resource.c,v 1.191 2023/07/08 20:02:10 riastradh Exp $	*/
+/*	$NetBSD: kern_resource.c,v 1.192 2023/09/10 14:45:52 ad Exp $	*/
 
 /*-
  * Copyright (c) 1982, 1986, 1991, 1993
@@ -37,7 +37,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: kern_resource.c,v 1.191 2023/07/08 20:02:10 riastradh Exp $");
+__KERNEL_RCSID(0, "$NetBSD: kern_resource.c,v 1.192 2023/09/10 14:45:52 ad Exp $");
 
 #include <sys/param.h>
 #include <sys/systm.h>
@@ -65,9 +65,6 @@ __KERNEL_RCSID(0, "$NetBSD: kern_resourc
 rlim_t			maxdmap = MAXDSIZ;
 rlim_t			maxsmap = MAXSSIZ;
 
-static pool_cache_t	plimit_cache	__read_mostly;
-static pool_cache_t	pstats_cache	__read_mostly;
-
 static kauth_listener_t	resource_listener;
 static struct sysctllog	*proc_sysctllog;
 
@@ -141,11 +138,6 @@ void
 resource_init(void)
 {
 
-	plimit_cache = pool_cache_init(sizeof(struct plimit), 0, 0, 0,
-	    "plimitpl", NULL, IPL_NONE, NULL, NULL, NULL);
-	pstats_cache = pool_cache_init(sizeof(struct pstats), 0, 0, 0,
-	    "pstatspl", NULL, IPL_NONE, NULL, NULL, NULL);
-
 	resource_listener = kauth_listen_scope(KAUTH_SCOPE_PROCESS,
 	    resource_listener_cb, NULL);
 
@@ -690,7 +682,7 @@ lim_copy(struct plimit *lim)
 	char *corename;
 	size_t alen, len;
 
-	newlim = pool_cache_get(plimit_cache, PR_WAITOK);
+	newlim = kmem_alloc(sizeof(*newlim), KM_SLEEP);
 	mutex_init(&newlim->pl_lock, MUTEX_DEFAULT, IPL_NONE);
 	newlim->pl_writeable = false;
 	newlim->pl_refcnt = 1;
@@ -811,7 +803,7 @@ lim_free(struct plimit *lim)
 		}
 		sv_lim = lim->pl_sv_limit;
 		mutex_destroy(&lim->pl_lock);
-		pool_cache_put(plimit_cache, lim);
+		kmem_free(lim, sizeof(*lim));
 	} while ((lim = sv_lim) != NULL);
 }
 
@@ -821,7 +813,7 @@ pstatscopy(struct pstats *ps)
 	struct pstats *nps;
 	size_t len;
 
-	nps = pool_cache_get(pstats_cache, PR_WAITOK);
+	nps = kmem_alloc(sizeof(*nps), KM_SLEEP);
 
 	len = (char *)&nps->pstat_endzero - (char *)&nps->pstat_startzero;
 	memset(&nps->pstat_startzero, 0, len);
@@ -836,7 +828,7 @@ void
 pstatsfree(struct pstats *ps)
 {
 
-	pool_cache_put(pstats_cache, ps);
+	kmem_free(ps, sizeof(*ps));
 }
 
 /*

Index: src/sys/kern/kern_rwlock_obj.c
diff -u src/sys/kern/kern_rwlock_obj.c:1.9 src/sys/kern/kern_rwlock_obj.c:1.10
--- src/sys/kern/kern_rwlock_obj.c:1.9	Fri Feb 24 11:02:27 2023
+++ src/sys/kern/kern_rwlock_obj.c	Sun Sep 10 14:45:52 2023
@@ -1,7 +1,7 @@
-/*	$NetBSD: kern_rwlock_obj.c,v 1.9 2023/02/24 11:02:27 riastradh Exp $	*/
+/*	$NetBSD: kern_rwlock_obj.c,v 1.10 2023/09/10 14:45:52 ad Exp $	*/
 
 /*-
- * Copyright (c) 2008, 2009, 2019 The NetBSD Foundation, Inc.
+ * Copyright (c) 2008, 2009, 2019, 2023 The NetBSD Foundation, Inc.
  * All rights reserved.
  *
  * This code is derived from software contributed to The NetBSD Foundation
@@ -30,11 +30,11 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: kern_rwlock_obj.c,v 1.9 2023/02/24 11:02:27 riastradh Exp $");
+__KERNEL_RCSID(0, "$NetBSD: kern_rwlock_obj.c,v 1.10 2023/09/10 14:45:52 ad Exp $");
 
 #include <sys/param.h>
 #include <sys/atomic.h>
-#include <sys/pool.h>
+#include <sys/kmem.h>
 #include <sys/rwlock.h>
 
 /* Mutex cache */
@@ -43,41 +43,10 @@ struct krwobj {
 	krwlock_t	ro_lock;
 	u_int		ro_magic;
 	u_int		ro_refcnt;
+	uint8_t		mo_pad[COHERENCY_UNIT - sizeof(krwlock_t) -
+	    sizeof(u_int) * 2];
 };
 
-static int	rw_obj_ctor(void *, void *, int);
-
-static pool_cache_t	rw_obj_cache	__read_mostly;
-
-/*
- * rw_obj_init:
- *
- *	Initialize the rw object store.
- */
-void
-rw_obj_init(void)
-{
-
-	rw_obj_cache = pool_cache_init(sizeof(struct krwobj),
-	    coherency_unit, 0, 0, "rwlock", NULL, IPL_NONE, rw_obj_ctor,
-	    NULL, NULL);
-}
-
-/*
- * rw_obj_ctor:
- *
- *	Initialize a new lock for the cache.
- */
-static int
-rw_obj_ctor(void *arg, void *obj, int flags)
-{
-	struct krwobj * ro = obj;
-
-	ro->ro_magic = RW_OBJ_MAGIC;
-
-	return 0;
-}
-
 /*
  * rw_obj_alloc:
  *
@@ -88,8 +57,10 @@ rw_obj_alloc(void)
 {
 	struct krwobj *ro;
 
-	ro = pool_cache_get(rw_obj_cache, PR_WAITOK);
+	ro = kmem_alloc(sizeof(*ro), KM_SLEEP);
+	KASSERT(ALIGNED_POINTER(ro, coherency_unit));
 	_rw_init(&ro->ro_lock, (uintptr_t)__builtin_return_address(0));
+	ro->ro_magic = RW_OBJ_MAGIC;
 	ro->ro_refcnt = 1;
 
 	return (krwlock_t *)ro;
@@ -105,9 +76,11 @@ rw_obj_tryalloc(void)
 {
 	struct krwobj *ro;
 
-	ro = pool_cache_get(rw_obj_cache, PR_NOWAIT);
+	ro = kmem_alloc(sizeof(*ro), KM_NOSLEEP);
+	KASSERT(ALIGNED_POINTER(ro, coherency_unit));
 	if (__predict_true(ro != NULL)) {
 		_rw_init(&ro->ro_lock, (uintptr_t)__builtin_return_address(0));
+		ro->ro_magic = RW_OBJ_MAGIC;
 		ro->ro_refcnt = 1;
 	}
 
@@ -151,7 +124,7 @@ rw_obj_free(krwlock_t *lock)
 	}
 	membar_acquire();
 	rw_destroy(&ro->ro_lock);
-	pool_cache_put(rw_obj_cache, ro);
+	kmem_free(ro, sizeof(*ro));
 	return true;
 }
 

Index: src/sys/kern/kern_turnstile.c
diff -u src/sys/kern/kern_turnstile.c:1.46 src/sys/kern/kern_turnstile.c:1.47
--- src/sys/kern/kern_turnstile.c:1.46	Sun Apr  9 09:18:09 2023
+++ src/sys/kern/kern_turnstile.c	Sun Sep 10 14:45:52 2023
@@ -1,7 +1,7 @@
-/*	$NetBSD: kern_turnstile.c,v 1.46 2023/04/09 09:18:09 riastradh Exp $	*/
+/*	$NetBSD: kern_turnstile.c,v 1.47 2023/09/10 14:45:52 ad Exp $	*/
 
 /*-
- * Copyright (c) 2002, 2006, 2007, 2009, 2019, 2020
+ * Copyright (c) 2002, 2006, 2007, 2009, 2019, 2020, 2023
  *     The NetBSD Foundation, Inc.
  * All rights reserved.
  *
@@ -61,11 +61,10 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: kern_turnstile.c,v 1.46 2023/04/09 09:18:09 riastradh Exp $");
+__KERNEL_RCSID(0, "$NetBSD: kern_turnstile.c,v 1.47 2023/09/10 14:45:52 ad Exp $");
 
 #include <sys/param.h>
 #include <sys/lockdebug.h>
-#include <sys/pool.h>
 #include <sys/proc.h>
 #include <sys/sleepq.h>
 #include <sys/sleeptab.h>
@@ -81,7 +80,6 @@ __KERNEL_RCSID(0, "$NetBSD: kern_turnsti
 #define	TS_HASH(obj)	(((uintptr_t)(obj) >> 6) & TS_HASH_MASK)
 
 static tschain_t	turnstile_chains[TS_HASH_SIZE] __cacheline_aligned;
-struct pool		turnstile_pool;
 
 static union {
 	kmutex_t	lock;
@@ -103,9 +101,6 @@ turnstile_init(void)
 		mutex_init(&turnstile_locks[i].lock, MUTEX_DEFAULT, IPL_SCHED);
 	}
 
-	pool_init(&turnstile_pool, sizeof(turnstile_t), coherency_unit,
-	    0, 0, "tstile", NULL, IPL_NONE);
-
 	turnstile_ctor(&turnstile0);
 }
 

Index: src/sys/kern/subr_kcpuset.c
diff -u src/sys/kern/subr_kcpuset.c:1.16 src/sys/kern/subr_kcpuset.c:1.17
--- src/sys/kern/subr_kcpuset.c:1.16	Fri Sep  1 16:57:33 2023
+++ src/sys/kern/subr_kcpuset.c	Sun Sep 10 14:45:52 2023
@@ -1,7 +1,7 @@
-/*	$NetBSD: subr_kcpuset.c,v 1.16 2023/09/01 16:57:33 skrll Exp $	*/
+/*	$NetBSD: subr_kcpuset.c,v 1.17 2023/09/10 14:45:52 ad Exp $	*/
 
 /*-
- * Copyright (c) 2011 The NetBSD Foundation, Inc.
+ * Copyright (c) 2011, 2023 The NetBSD Foundation, Inc.
  * All rights reserved.
  *
  * This code is derived from software contributed to The NetBSD Foundation
@@ -41,7 +41,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: subr_kcpuset.c,v 1.16 2023/09/01 16:57:33 skrll Exp $");
+__KERNEL_RCSID(0, "$NetBSD: subr_kcpuset.c,v 1.17 2023/09/10 14:45:52 ad Exp $");
 
 #include <sys/param.h>
 #include <sys/types.h>
@@ -49,7 +49,7 @@ __KERNEL_RCSID(0, "$NetBSD: subr_kcpuset
 #include <sys/atomic.h>
 #include <sys/sched.h>
 #include <sys/kcpuset.h>
-#include <sys/pool.h>
+#include <sys/kmem.h>
 
 /* Number of CPUs to support. */
 #define	KC_MAXCPUS		roundup2(MAXCPUS, 32)
@@ -96,8 +96,7 @@ static bool			kc_initialised = false;
  */
 static size_t			kc_bitsize __read_mostly = KC_BITSIZE_EARLY;
 static size_t			kc_nfields __read_mostly = KC_NFIELDS_EARLY;
-
-static pool_cache_t		kc_cache __read_mostly;
+static size_t			kc_memsize __read_mostly;
 
 static kcpuset_t *		kcpuset_create_raw(bool);
 
@@ -114,12 +113,10 @@ kcpuset_sysinit(void)
 	/* Set a kcpuset_t sizes. */
 	kc_nfields = (KC_MAXCPUS >> KC_SHIFT);
 	kc_bitsize = sizeof(uint32_t) * kc_nfields;
+	kc_memsize = sizeof(kcpuset_impl_t) + kc_bitsize;
 	KASSERT(kc_nfields != 0);
 	KASSERT(kc_bitsize != 0);
 
-	kc_cache = pool_cache_init(sizeof(kcpuset_impl_t) + kc_bitsize,
-	    coherency_unit, 0, 0, "kcpuset", NULL, IPL_NONE, NULL, NULL, NULL);
-
 	/* First, pre-allocate kcpuset entries. */
 	for (i = 0; i < kc_last_idx; i++) {
 		kcp = kcpuset_create_raw(true);
@@ -195,7 +192,7 @@ kcpuset_create_raw(bool zero)
 {
 	kcpuset_impl_t *kc;
 
-	kc = pool_cache_get(kc_cache, PR_WAITOK);
+	kc = kmem_alloc(kc_memsize, KM_SLEEP);
 	kc->kc_refcnt = 1;
 	kc->kc_next = NULL;
 
@@ -229,6 +226,7 @@ kcpuset_clone(kcpuset_t **retkcp, const 
 void
 kcpuset_destroy(kcpuset_t *kcp)
 {
+	const size_t size = kc_memsize;
 	kcpuset_impl_t *kc;
 
 	KASSERT(kc_initialised);
@@ -237,7 +235,7 @@ kcpuset_destroy(kcpuset_t *kcp)
 	do {
 		kc = KC_GETSTRUCT(kcp);
 		kcp = kc->kc_next;
-		pool_cache_put(kc_cache, kc);
+		kmem_free(kc, size);
 	} while (kcp);
 }
 

Index: src/sys/kern/vfs_cwd.c
diff -u src/sys/kern/vfs_cwd.c:1.8 src/sys/kern/vfs_cwd.c:1.9
--- src/sys/kern/vfs_cwd.c:1.8	Sat Apr  9 23:38:33 2022
+++ src/sys/kern/vfs_cwd.c	Sun Sep 10 14:45:52 2023
@@ -1,7 +1,7 @@
-/*	$NetBSD: vfs_cwd.c,v 1.8 2022/04/09 23:38:33 riastradh Exp $	*/
+/*	$NetBSD: vfs_cwd.c,v 1.9 2023/09/10 14:45:52 ad Exp $	*/
 
 /*-
- * Copyright (c) 2008, 2020 The NetBSD Foundation, Inc.
+ * Copyright (c) 2008, 2020, 2023 The NetBSD Foundation, Inc.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -31,27 +31,14 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: vfs_cwd.c,v 1.8 2022/04/09 23:38:33 riastradh Exp $");
+__KERNEL_RCSID(0, "$NetBSD: vfs_cwd.c,v 1.9 2023/09/10 14:45:52 ad Exp $");
 
 #include <sys/param.h>
 #include <sys/atomic.h>
 #include <sys/filedesc.h>
 #include <sys/proc.h>
 #include <sys/vnode.h>
-
-static int	cwdi_ctor(void *, void *, int);
-static void	cwdi_dtor(void *, void *);
-
-static pool_cache_t cwdi_cache;
-
-void
-cwd_sys_init(void)
-{
-
-	cwdi_cache = pool_cache_init(sizeof(struct cwdinfo), coherency_unit,
-	    0, 0, "cwdi", NULL, IPL_NONE, cwdi_ctor, cwdi_dtor, NULL);
-	KASSERT(cwdi_cache != NULL);
-}
+#include <sys/kmem.h>
 
 /*
  * Create an initial cwdinfo structure, using the same current and root
@@ -63,7 +50,9 @@ cwdinit(void)
 	struct cwdinfo *cwdi;
 	struct cwdinfo *copy;
 
-	cwdi = pool_cache_get(cwdi_cache, PR_WAITOK);
+	cwdi = kmem_alloc(sizeof(*cwdi), KM_SLEEP);
+	KASSERT(ALIGNED_POINTER(cwdi, COHERENCY_UNIT));
+	rw_init(&cwdi->cwdi_lock);
 	copy = curproc->p_cwdi;
 
 	rw_enter(&copy->cwdi_lock, RW_READER);
@@ -76,31 +65,14 @@ cwdinit(void)
 	cwdi->cwdi_edir = copy->cwdi_edir;
 	if (cwdi->cwdi_edir)
 		vref(cwdi->cwdi_edir);
+	rw_exit(&copy->cwdi_lock);
+
 	cwdi->cwdi_cmask = copy->cwdi_cmask;
 	cwdi->cwdi_refcnt = 1;
-	rw_exit(&copy->cwdi_lock);
 
 	return (cwdi);
 }
 
-static int
-cwdi_ctor(void *arg, void *obj, int flags)
-{
-	struct cwdinfo *cwdi = obj;
-
-	rw_init(&cwdi->cwdi_lock);
-
-	return 0;
-}
-
-static void
-cwdi_dtor(void *arg, void *obj)
-{
-	struct cwdinfo *cwdi = obj;
-
-	rw_destroy(&cwdi->cwdi_lock);
-}
-
 /*
  * Make p2 share p1's cwdinfo.
  */
@@ -144,11 +116,12 @@ cwdfree(struct cwdinfo *cwdi)
 	membar_acquire();
 
 	vrele(cwdi->cwdi_cdir);
+	rw_destroy(&cwdi->cwdi_lock);
 	if (cwdi->cwdi_rdir)
 		vrele(cwdi->cwdi_rdir);
 	if (cwdi->cwdi_edir)
 		vrele(cwdi->cwdi_edir);
-	pool_cache_put(cwdi_cache, cwdi);
+	kmem_free(cwdi, sizeof(*cwdi));
 }
 
 void

Index: src/sys/kern/vfs_init.c
diff -u src/sys/kern/vfs_init.c:1.61 src/sys/kern/vfs_init.c:1.62
--- src/sys/kern/vfs_init.c:1.61	Sat Apr 29 10:07:22 2023
+++ src/sys/kern/vfs_init.c	Sun Sep 10 14:45:52 2023
@@ -1,4 +1,4 @@
-/*	$NetBSD: vfs_init.c,v 1.61 2023/04/29 10:07:22 riastradh Exp $	*/
+/*	$NetBSD: vfs_init.c,v 1.62 2023/09/10 14:45:52 ad Exp $	*/
 
 /*-
  * Copyright (c) 1998, 2000, 2008 The NetBSD Foundation, Inc.
@@ -67,7 +67,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: vfs_init.c,v 1.61 2023/04/29 10:07:22 riastradh Exp $");
+__KERNEL_RCSID(0, "$NetBSD: vfs_init.c,v 1.62 2023/09/10 14:45:52 ad Exp $");
 
 #include <sys/param.h>
 #include <sys/types.h>
@@ -104,8 +104,6 @@ __KERNEL_RCSID(0, "$NetBSD: vfs_init.c,v
 
 SDT_PROVIDER_DEFINE(vfs);
 
-pool_cache_t pnbuf_cache;
-
 /*
  * These vnodeopv_descs are listed here because they are not
  * associated with any particular file system, and thus cannot
@@ -408,13 +406,6 @@ vfsinit(void)
 	sysctl_vfs_setup();
 
 	/*
-	 * Initialize the namei pathname buffer pool and cache.
-	 */
-	pnbuf_cache = pool_cache_init(MAXPATHLEN, 0, 0, 0, "pnbufpl",
-	    NULL, IPL_NONE, NULL, NULL, NULL);
-	KASSERT(pnbuf_cache != NULL);
-
-	/*
 	 * Initialize the vnode table
 	 */
 	vntblinit();

Index: src/sys/kern/vfs_lockf.c
diff -u src/sys/kern/vfs_lockf.c:1.78 src/sys/kern/vfs_lockf.c:1.79
--- src/sys/kern/vfs_lockf.c:1.78	Fri Nov 25 16:15:39 2022
+++ src/sys/kern/vfs_lockf.c	Sun Sep 10 14:45:52 2023
@@ -1,4 +1,4 @@
-/*	$NetBSD: vfs_lockf.c,v 1.78 2022/11/25 16:15:39 riastradh Exp $	*/
+/*	$NetBSD: vfs_lockf.c,v 1.79 2023/09/10 14:45:52 ad Exp $	*/
 
 /*
  * Copyright (c) 1982, 1986, 1989, 1993
@@ -35,7 +35,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: vfs_lockf.c,v 1.78 2022/11/25 16:15:39 riastradh Exp $");
+__KERNEL_RCSID(0, "$NetBSD: vfs_lockf.c,v 1.79 2023/09/10 14:45:52 ad Exp $");
 
 #include <sys/param.h>
 #include <sys/systm.h>
@@ -43,7 +43,7 @@ __KERNEL_RCSID(0, "$NetBSD: vfs_lockf.c,
 #include <sys/file.h>
 #include <sys/proc.h>
 #include <sys/vnode.h>
-#include <sys/pool.h>
+#include <sys/kmem.h>
 #include <sys/fcntl.h>
 #include <sys/lockf.h>
 #include <sys/atomic.h>
@@ -69,6 +69,7 @@ struct lockf {
 	kcondvar_t lf_cv;	 /* Signalling */
 	short	lf_flags;	 /* Lock semantics: F_POSIX, F_FLOCK, F_WAIT */
 	short	lf_type;	 /* Lock type: F_RDLCK, F_WRLCK */
+	uid_t	lf_uid;		 /* User ID responsible */
 	off_t	lf_start;	 /* The byte # of the start of the lock */
 	off_t	lf_end;		 /* The byte # of the end of the lock (-1=EOF)*/
 	void	*lf_id;		 /* process or file description holding lock */
@@ -76,14 +77,13 @@ struct lockf {
 	struct	lockf *lf_next;	 /* Next lock on this vnode, or blocking lock */
 	struct  locklist lf_blkhd; /* List of requests blocked on this lock */
 	TAILQ_ENTRY(lockf) lf_block;/* A request waiting for a lock */
-	uid_t	lf_uid;		 /* User ID responsible */
+	struct	uidinfo *lf_uip; /* Cached pointer to uidinfo */
 };
 
 /* Maximum length of sleep chains to traverse to try and detect deadlock. */
 #define MAXDEPTH 50
 
-static pool_cache_t lockf_cache;
-static kmutex_t *lockf_lock;
+static kmutex_t lockf_lock __cacheline_aligned;
 static char lockstr[] = "lockf";
 
 /*
@@ -205,39 +205,20 @@ lf_alloc(int allowfail)
 		return NULL;
 	}
 
-	lock = pool_cache_get(lockf_cache, PR_WAITOK);
+	lock = kmem_alloc(sizeof(*lock), KM_SLEEP);
 	lock->lf_uid = uid;
+	lock->lf_uip = uip;
+	cv_init(&lock->lf_cv, lockstr);
 	return lock;
 }
 
 static void
 lf_free(struct lockf *lock)
 {
-	struct uidinfo *uip;
 
-	uip = uid_find(lock->lf_uid);
-	atomic_dec_ulong(&uip->ui_lockcnt);
-	pool_cache_put(lockf_cache, lock);
-}
-
-static int
-lf_ctor(void *arg, void *obj, int flag)
-{
-	struct lockf *lock;
-
-	lock = obj;
-	cv_init(&lock->lf_cv, lockstr);
-
-	return 0;
-}
-
-static void
-lf_dtor(void *arg, void *obj)
-{
-	struct lockf *lock;
-
-	lock = obj;
+	atomic_dec_ulong(&lock->lf_uip->ui_lockcnt);
 	cv_destroy(&lock->lf_cv);
+	kmem_free(lock, sizeof(*lock));
 }
 
 /*
@@ -811,7 +792,7 @@ lf_advlock(struct vop_advlock_args *ap, 
 	struct flock *fl = ap->a_fl;
 	struct lockf *lock = NULL;
 	struct lockf *sparelock;
-	kmutex_t *interlock = lockf_lock;
+	kmutex_t *interlock = &lockf_lock;
 	off_t start, end;
 	int error = 0;
 
@@ -973,17 +954,3 @@ quit:
 
 	return error;
 }
-
-/*
- * Initialize subsystem.   XXX We use a global lock.  This could be the
- * vnode interlock, but the deadlock detection code may need to inspect
- * locks belonging to other files.
- */
-void
-lf_init(void)
-{
-
-	lockf_cache = pool_cache_init(sizeof(struct lockf), 0, 0, 0, "lockf",
- 	    NULL, IPL_NONE, lf_ctor, lf_dtor, NULL);
-        lockf_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE);
-}

Index: src/sys/rump/librump/rumpkern/rump.c
diff -u src/sys/rump/librump/rumpkern/rump.c:1.357 src/sys/rump/librump/rumpkern/rump.c:1.358
--- src/sys/rump/librump/rumpkern/rump.c:1.357	Sat Aug  5 11:51:47 2023
+++ src/sys/rump/librump/rumpkern/rump.c	Sun Sep 10 14:45:52 2023
@@ -1,4 +1,4 @@
-/*	$NetBSD: rump.c,v 1.357 2023/08/05 11:51:47 riastradh Exp $	*/
+/*	$NetBSD: rump.c,v 1.358 2023/09/10 14:45:52 ad Exp $	*/
 
 /*
  * Copyright (c) 2007-2011 Antti Kantee.  All Rights Reserved.
@@ -26,7 +26,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: rump.c,v 1.357 2023/08/05 11:51:47 riastradh Exp $");
+__KERNEL_RCSID(0, "$NetBSD: rump.c,v 1.358 2023/09/10 14:45:52 ad Exp $");
 
 #include <sys/systm.h>
 #define ELFSIZE ARCH_ELFSIZE
@@ -293,8 +293,6 @@ rump_init_callback(void (*cpuinit_callba
 	uvm_ra_init();
 	uao_init();
 
-	mutex_obj_init();
-	rw_obj_init();
 	callout_startup();
 
 	kprintf_init();

Index: src/sys/rump/librump/rumpvfs/rump_vfs.c
diff -u src/sys/rump/librump/rumpvfs/rump_vfs.c:1.94 src/sys/rump/librump/rumpvfs/rump_vfs.c:1.95
--- src/sys/rump/librump/rumpvfs/rump_vfs.c:1.94	Wed Oct 26 23:39:10 2022
+++ src/sys/rump/librump/rumpvfs/rump_vfs.c	Sun Sep 10 14:45:53 2023
@@ -1,4 +1,4 @@
-/*	$NetBSD: rump_vfs.c,v 1.94 2022/10/26 23:39:10 riastradh Exp $	*/
+/*	$NetBSD: rump_vfs.c,v 1.95 2023/09/10 14:45:53 ad Exp $	*/
 
 /*
  * Copyright (c) 2008 Antti Kantee.  All Rights Reserved.
@@ -29,7 +29,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: rump_vfs.c,v 1.94 2022/10/26 23:39:10 riastradh Exp $");
+__KERNEL_RCSID(0, "$NetBSD: rump_vfs.c,v 1.95 2023/09/10 14:45:53 ad Exp $");
 
 #include <sys/param.h>
 #include <sys/buf.h>
@@ -124,8 +124,6 @@ RUMP_COMPONENT(RUMP__FACTION_VFS)
 	fstrans_init();
 	vfsinit();
 	bufinit();
-	cwd_sys_init();
-	lf_init();
 	spec_init();
 
 	root_device = &rump_rootdev;

Index: src/sys/sys/namei.src
diff -u src/sys/sys/namei.src:1.61 src/sys/sys/namei.src:1.62
--- src/sys/sys/namei.src:1.61	Sat Sep  9 18:27:59 2023
+++ src/sys/sys/namei.src	Sun Sep 10 14:45:53 2023
@@ -1,4 +1,4 @@
-/*	$NetBSD: namei.src,v 1.61 2023/09/09 18:27:59 ad Exp $	*/
+/*	$NetBSD: namei.src,v 1.62 2023/09/10 14:45:53 ad Exp $	*/
 
 /*
  * Copyright (c) 1985, 1989, 1991, 1993
@@ -244,15 +244,13 @@ struct namecache {
 #endif /* __NAMECACHE_PRIVATE */
 
 #ifdef _KERNEL
-#include <sys/pool.h>
+#include <sys/kmem.h>
 
 struct mount;
 struct cpu_info;
 
-extern pool_cache_t pnbuf_cache;	/* pathname buffer cache */
-
-#define	PNBUF_GET()	((char *)pool_cache_get(pnbuf_cache, PR_WAITOK))
-#define	PNBUF_PUT(pnb)	pool_cache_put(pnbuf_cache, (void *)(pnb))
+#define	PNBUF_GET()	((char *)kmem_alloc(MAXPATHLEN, KM_SLEEP))
+#define	PNBUF_PUT(pnb)	kmem_free((pnb), MAXPATHLEN)
 
 /*
  * Typesafe flags for namei_simple/nameiat_simple.

Index: src/sys/uvm/uvm_init.c
diff -u src/sys/uvm/uvm_init.c:1.56 src/sys/uvm/uvm_init.c:1.57
--- src/sys/uvm/uvm_init.c:1.56	Mon Jul 17 12:55:37 2023
+++ src/sys/uvm/uvm_init.c	Sun Sep 10 14:45:53 2023
@@ -1,4 +1,4 @@
-/*	$NetBSD: uvm_init.c,v 1.56 2023/07/17 12:55:37 riastradh Exp $	*/
+/*	$NetBSD: uvm_init.c,v 1.57 2023/09/10 14:45:53 ad Exp $	*/
 
 /*
  * Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -32,7 +32,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_init.c,v 1.56 2023/07/17 12:55:37 riastradh Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_init.c,v 1.57 2023/09/10 14:45:53 ad Exp $");
 
 #include <sys/param.h>
 #include <sys/systm.h>
@@ -174,7 +174,6 @@ uvm_init(void)
 	 * so initialize that first.
 	 */
 
-	rw_obj_init();
 	uao_create(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS,
 	    UAO_FLAG_KERNSWAP);
 

Index: src/sys/uvm/uvm_map.c
diff -u src/sys/uvm/uvm_map.c:1.407 src/sys/uvm/uvm_map.c:1.408
--- src/sys/uvm/uvm_map.c:1.407	Thu Aug  3 03:15:48 2023
+++ src/sys/uvm/uvm_map.c	Sun Sep 10 14:45:53 2023
@@ -1,4 +1,4 @@
-/*	$NetBSD: uvm_map.c,v 1.407 2023/08/03 03:15:48 rin Exp $	*/
+/*	$NetBSD: uvm_map.c,v 1.408 2023/09/10 14:45:53 ad Exp $	*/
 
 /*
  * Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -66,7 +66,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_map.c,v 1.407 2023/08/03 03:15:48 rin Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_map.c,v 1.408 2023/09/10 14:45:53 ad Exp $");
 
 #include "opt_ddb.h"
 #include "opt_pax.h"
@@ -144,12 +144,6 @@ UVMMAP_EVCNT_DEFINE(mlk_treeloop)
 const char vmmapbsy[] = "vmmapbsy";
 
 /*
- * cache for vmspace structures.
- */
-
-static struct pool_cache uvm_vmspace_cache;
-
-/*
  * cache for dynamically-allocated map entries.
  */
 
@@ -931,8 +925,6 @@ uvm_map_init_caches(void)
 	pool_cache_bootstrap(&uvm_map_entry_cache, sizeof(struct vm_map_entry),
 	    coherency_unit, 0, PR_LARGECACHE, "vmmpepl", NULL, IPL_NONE, NULL,
 	    NULL, NULL);
-	pool_cache_bootstrap(&uvm_vmspace_cache, sizeof(struct vmspace),
-	    0, 0, 0, "vmsppl", NULL, IPL_NONE, NULL, NULL, NULL);
 }
 
 /*
@@ -4112,7 +4104,7 @@ uvmspace_alloc(vaddr_t vmin, vaddr_t vma
 	struct vmspace *vm;
 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
 
-	vm = pool_cache_get(&uvm_vmspace_cache, PR_WAITOK);
+	vm = kmem_alloc(sizeof(*vm), KM_SLEEP);
 	uvmspace_init(vm, NULL, vmin, vmax, topdown);
 	UVMHIST_LOG(maphist,"<- done (vm=%#jx)", (uintptr_t)vm, 0, 0, 0);
 	return (vm);
@@ -4368,7 +4360,7 @@ uvmspace_free(struct vmspace *vm)
 	rw_destroy(&map->lock);
 	cv_destroy(&map->cv);
 	pmap_destroy(map->pmap);
-	pool_cache_put(&uvm_vmspace_cache, vm);
+	kmem_free(vm, sizeof(*vm));
 }
 
 static struct vm_map_entry *

Index: src/sys/uvm/uvm_readahead.c
diff -u src/sys/uvm/uvm_readahead.c:1.13 src/sys/uvm/uvm_readahead.c:1.14
--- src/sys/uvm/uvm_readahead.c:1.13	Tue May 19 21:45:35 2020
+++ src/sys/uvm/uvm_readahead.c	Sun Sep 10 14:45:53 2023
@@ -1,4 +1,4 @@
-/*	$NetBSD: uvm_readahead.c,v 1.13 2020/05/19 21:45:35 ad Exp $	*/
+/*	$NetBSD: uvm_readahead.c,v 1.14 2023/09/10 14:45:53 ad Exp $	*/
 
 /*-
  * Copyright (c)2003, 2005, 2009 YAMAMOTO Takashi,
@@ -40,10 +40,10 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_readahead.c,v 1.13 2020/05/19 21:45:35 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_readahead.c,v 1.14 2023/09/10 14:45:53 ad Exp $");
 
 #include <sys/param.h>
-#include <sys/pool.h>
+#include <sys/kmem.h>
 
 #include <uvm/uvm.h>
 #include <uvm/uvm_readahead.h>
@@ -83,8 +83,6 @@ static off_t ra_startio(struct uvm_objec
 static struct uvm_ractx *ra_allocctx(void);
 static void ra_freectx(struct uvm_ractx *);
 
-static struct pool_cache ractx_cache;
-
 /*
  * uvm_ra_init: initialize readahead module.
  */
@@ -93,22 +91,20 @@ void
 uvm_ra_init(void)
 {
 
-	pool_cache_bootstrap(&ractx_cache, sizeof(struct uvm_ractx), 0, 0, 0,
-	    "ractx", NULL, IPL_NONE, NULL, NULL, NULL);
 }
 
 static struct uvm_ractx *
 ra_allocctx(void)
 {
 
-	return pool_cache_get(&ractx_cache, PR_NOWAIT);
+	return kmem_alloc(sizeof(struct uvm_ractx), KM_NOSLEEP);
 }
 
 static void
 ra_freectx(struct uvm_ractx *ra)
 {
 
-	pool_cache_put(&ractx_cache, ra);
+	kmem_free(ra, sizeof(struct uvm_ractx));
 }
 
 /*

Reply via email to