Module Name:    src
Committed By:   oster
Date:           Fri Jul 23 00:54:45 UTC 2021

Modified Files:
        src/sys/dev/raidframe: rf_alloclist.c rf_aselect.c rf_callback.c
            rf_callback.h rf_copyback.c rf_dagdegrd.c rf_dagdegwr.c
            rf_dagffrd.c rf_dagffwr.c rf_dagutils.c rf_dagutils.h
            rf_diskqueue.c rf_diskqueue.h rf_driver.c rf_evenodd.c rf_map.c
            rf_map.h rf_mcpair.c rf_mcpair.h rf_netbsd.h rf_netbsdkintf.c
            rf_paritylogDiskMgr.c rf_parityscan.c rf_psstatus.c rf_psstatus.h
            rf_raid.h rf_raid1.c rf_reconbuffer.c rf_reconstruct.c
            rf_reconstruct.h rf_reconutil.c rf_revent.c rf_revent.h rf_states.c
            rf_stripelocks.c rf_stripelocks.h

Log Message:
Extensive mechanical changes to the pools used in RAIDframe.

Alloclist remains not per-RAID, so initialize that pool
separately/differently than the rest.

The remainder of pools in RF_Pools_s are now per-RAID pools.  Mostly
mechanical changes to functions to allocate/destroy per-RAID pools.
Needed to make raidPtr available in certain cases to be able to find
the per-RAID pools.

Extend rf_pool_init() to now populate a per-RAID wchan value that is
unique to each pool for a given RAID device.

TODO: Complete the analysis of the minimum number of items that are
required for each pool to allow IO to progress (i.e. so that a request
for pool resources can always be satisfied), and dynamically scale
minimum pool sizes based on RAID configuration.


To generate a diff of this commit:
cvs rdiff -u -r1.28 -r1.29 src/sys/dev/raidframe/rf_alloclist.c \
    src/sys/dev/raidframe/rf_revent.c
cvs rdiff -u -r1.29 -r1.30 src/sys/dev/raidframe/rf_aselect.c \
    src/sys/dev/raidframe/rf_reconstruct.h
cvs rdiff -u -r1.24 -r1.25 src/sys/dev/raidframe/rf_callback.c \
    src/sys/dev/raidframe/rf_mcpair.c
cvs rdiff -u -r1.7 -r1.8 src/sys/dev/raidframe/rf_callback.h
cvs rdiff -u -r1.53 -r1.54 src/sys/dev/raidframe/rf_copyback.c
cvs rdiff -u -r1.31 -r1.32 src/sys/dev/raidframe/rf_dagdegrd.c
cvs rdiff -u -r1.35 -r1.36 src/sys/dev/raidframe/rf_dagdegwr.c
cvs rdiff -u -r1.21 -r1.22 src/sys/dev/raidframe/rf_dagffrd.c \
    src/sys/dev/raidframe/rf_dagutils.h
cvs rdiff -u -r1.36 -r1.37 src/sys/dev/raidframe/rf_dagffwr.c \
    src/sys/dev/raidframe/rf_netbsd.h src/sys/dev/raidframe/rf_parityscan.c
cvs rdiff -u -r1.57 -r1.58 src/sys/dev/raidframe/rf_dagutils.c
cvs rdiff -u -r1.59 -r1.60 src/sys/dev/raidframe/rf_diskqueue.c
cvs rdiff -u -r1.26 -r1.27 src/sys/dev/raidframe/rf_diskqueue.h \
    src/sys/dev/raidframe/rf_reconbuffer.c
cvs rdiff -u -r1.137 -r1.138 src/sys/dev/raidframe/rf_driver.c
cvs rdiff -u -r1.22 -r1.23 src/sys/dev/raidframe/rf_evenodd.c
cvs rdiff -u -r1.50 -r1.51 src/sys/dev/raidframe/rf_map.c
cvs rdiff -u -r1.13 -r1.14 src/sys/dev/raidframe/rf_map.h
cvs rdiff -u -r1.10 -r1.11 src/sys/dev/raidframe/rf_mcpair.h \
    src/sys/dev/raidframe/rf_revent.h
cvs rdiff -u -r1.394 -r1.395 src/sys/dev/raidframe/rf_netbsdkintf.c
cvs rdiff -u -r1.30 -r1.31 src/sys/dev/raidframe/rf_paritylogDiskMgr.c
cvs rdiff -u -r1.37 -r1.38 src/sys/dev/raidframe/rf_psstatus.c \
    src/sys/dev/raidframe/rf_raid1.c src/sys/dev/raidframe/rf_reconutil.c
cvs rdiff -u -r1.15 -r1.16 src/sys/dev/raidframe/rf_psstatus.h
cvs rdiff -u -r1.48 -r1.49 src/sys/dev/raidframe/rf_raid.h
cvs rdiff -u -r1.125 -r1.126 src/sys/dev/raidframe/rf_reconstruct.c
cvs rdiff -u -r1.51 -r1.52 src/sys/dev/raidframe/rf_states.c
cvs rdiff -u -r1.34 -r1.35 src/sys/dev/raidframe/rf_stripelocks.c
cvs rdiff -u -r1.9 -r1.10 src/sys/dev/raidframe/rf_stripelocks.h

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/dev/raidframe/rf_alloclist.c
diff -u src/sys/dev/raidframe/rf_alloclist.c:1.28 src/sys/dev/raidframe/rf_alloclist.c:1.29
--- src/sys/dev/raidframe/rf_alloclist.c:1.28	Sun Feb 10 17:13:33 2019
+++ src/sys/dev/raidframe/rf_alloclist.c	Fri Jul 23 00:54:45 2021
@@ -1,4 +1,4 @@
-/*	$NetBSD: rf_alloclist.c,v 1.28 2019/02/10 17:13:33 christos Exp $	*/
+/*	$NetBSD: rf_alloclist.c,v 1.29 2021/07/23 00:54:45 oster Exp $	*/
 /*
  * Copyright (c) 1995 Carnegie-Mellon University.
  * All rights reserved.
@@ -37,7 +37,7 @@
  ***************************************************************************/
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: rf_alloclist.c,v 1.28 2019/02/10 17:13:33 christos Exp $");
+__KERNEL_RCSID(0, "$NetBSD: rf_alloclist.c,v 1.29 2021/07/23 00:54:45 oster Exp $");
 
 #include <dev/raidframe/raidframevar.h>
 
@@ -59,15 +59,17 @@ static void rf_ShutdownAllocList(void *)
 
 static void rf_ShutdownAllocList(void *ignored)
 {
-	pool_destroy(&rf_pools.alloclist);
+	pool_destroy(&rf_alloclist_pool);
 }
 
 int
 rf_ConfigureAllocList(RF_ShutdownList_t **listp)
 {
 
-	rf_pool_init(&rf_pools.alloclist, sizeof(RF_AllocListElem_t),
-		     "rf_alloclist_pl", RF_AL_FREELIST_MIN, RF_AL_FREELIST_MAX);
+	pool_init(&rf_alloclist_pool, sizeof(RF_AllocListElem_t), 0, 0, 0, "rf_alloclist_pl", NULL, IPL_BIO);
+	pool_sethiwat(&rf_alloclist_pool, RF_AL_FREELIST_MAX);
+	pool_prime(&rf_alloclist_pool, RF_AL_FREELIST_MIN);
+
 	rf_ShutdownCreate(listp, rf_ShutdownAllocList, NULL);
 
 	return (0);
@@ -115,12 +117,12 @@ rf_FreeAllocList(RF_AllocListElem_t *l)
 	while (l) {
 		temp = l;
 		l = l->next;
-		pool_put(&rf_pools.alloclist, temp);
+		pool_put(&rf_alloclist_pool, temp);
 	}
 }
 
 RF_AllocListElem_t *
 rf_real_MakeAllocList(void)
 {
-	return pool_get(&rf_pools.alloclist, PR_WAITOK | PR_ZERO);
+	return pool_get(&rf_alloclist_pool, PR_WAITOK | PR_ZERO);
 }
Index: src/sys/dev/raidframe/rf_revent.c
diff -u src/sys/dev/raidframe/rf_revent.c:1.28 src/sys/dev/raidframe/rf_revent.c:1.29
--- src/sys/dev/raidframe/rf_revent.c:1.28	Mon May  2 01:07:24 2011
+++ src/sys/dev/raidframe/rf_revent.c	Fri Jul 23 00:54:45 2021
@@ -1,4 +1,4 @@
-/*	$NetBSD: rf_revent.c,v 1.28 2011/05/02 01:07:24 mrg Exp $	*/
+/*	$NetBSD: rf_revent.c,v 1.29 2021/07/23 00:54:45 oster Exp $	*/
 /*
  * Copyright (c) 1995 Carnegie-Mellon University.
  * All rights reserved.
@@ -30,7 +30,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: rf_revent.c,v 1.28 2011/05/02 01:07:24 mrg Exp $");
+__KERNEL_RCSID(0, "$NetBSD: rf_revent.c,v 1.29 2021/07/23 00:54:45 oster Exp $");
 
 #include <sys/errno.h>
 
@@ -51,20 +51,25 @@ __KERNEL_RCSID(0, "$NetBSD: rf_revent.c,
 static void rf_ShutdownReconEvent(void *);
 
 static RF_ReconEvent_t *
-GetReconEventDesc(RF_RowCol_t col, void *arg, RF_Revent_t type);
+GetReconEventDesc(RF_Raid_t *raidPtr, RF_RowCol_t col, void *arg, RF_Revent_t type);
 
-static void rf_ShutdownReconEvent(void *ignored)
+static void rf_ShutdownReconEvent(void *arg)
 {
-	pool_destroy(&rf_pools.revent);
+	RF_Raid_t *raidPtr;
+
+	raidPtr = (RF_Raid_t *) arg;
+	
+	pool_destroy(&raidPtr->pools.revent);
 }
 
 int
-rf_ConfigureReconEvent(RF_ShutdownList_t **listp)
+rf_ConfigureReconEvent(RF_ShutdownList_t **listp, RF_Raid_t *raidPtr,
+		       RF_Config_t *cfgPtr)
 {
 
-	rf_pool_init(&rf_pools.revent, sizeof(RF_ReconEvent_t),
-		     "rf_revent_pl", RF_MIN_FREE_REVENT, RF_MAX_FREE_REVENT);
-	rf_ShutdownCreate(listp, rf_ShutdownReconEvent, NULL);
+	rf_pool_init(raidPtr, raidPtr->poolNames.revent, &raidPtr->pools.revent, sizeof(RF_ReconEvent_t),
+		     "revent", RF_MIN_FREE_REVENT, RF_MAX_FREE_REVENT);
+	rf_ShutdownCreate(listp, rf_ShutdownReconEvent, raidPtr);
 
 	return (0);
 }
@@ -163,7 +168,7 @@ rf_CauseReconEvent(RF_Raid_t *raidPtr, R
 		   RF_Revent_t type)
 {
 	RF_ReconCtrl_t *rctrl = raidPtr->reconControl;
-	RF_ReconEvent_t *event = GetReconEventDesc(col, arg, type);
+	RF_ReconEvent_t *event = GetReconEventDesc(raidPtr, col, arg, type);
 
 	if (type == RF_REVENT_BUFCLEAR) {
 		RF_ASSERT(col != rctrl->fcol);
@@ -180,11 +185,11 @@ rf_CauseReconEvent(RF_Raid_t *raidPtr, R
 }
 /* allocates and initializes a recon event descriptor */
 static RF_ReconEvent_t *
-GetReconEventDesc(RF_RowCol_t col, void *arg, RF_Revent_t type)
+GetReconEventDesc(RF_Raid_t *raidPtr, RF_RowCol_t col, void *arg, RF_Revent_t type)
 {
 	RF_ReconEvent_t *t;
 
-	t = pool_get(&rf_pools.revent, PR_WAITOK);
+	t = pool_get(&raidPtr->pools.revent, PR_WAITOK);
 	t->col = col;
 	t->arg = arg;
 	t->type = type;
@@ -212,13 +217,13 @@ rf_DrainReconEventQueue(RF_RaidReconDesc
 		event->next = NULL;
 		rctrl->eq_count--;
 		/* dump it */
-		rf_FreeReconEventDesc(event);
+		rf_FreeReconEventDesc(reconDesc->raidPtr, event);
 	}
 	rf_unlock_mutex2(rctrl->eq_mutex);
 }
 
 void
-rf_FreeReconEventDesc(RF_ReconEvent_t *event)
+rf_FreeReconEventDesc(RF_Raid_t *raidPtr, RF_ReconEvent_t *event)
 {
-	pool_put(&rf_pools.revent, event);
+	pool_put(&raidPtr->pools.revent, event);
 }

Index: src/sys/dev/raidframe/rf_aselect.c
diff -u src/sys/dev/raidframe/rf_aselect.c:1.29 src/sys/dev/raidframe/rf_aselect.c:1.30
--- src/sys/dev/raidframe/rf_aselect.c:1.29	Wed Jan  4 15:50:34 2017
+++ src/sys/dev/raidframe/rf_aselect.c	Fri Jul 23 00:54:45 2021
@@ -1,4 +1,4 @@
-/*	$NetBSD: rf_aselect.c,v 1.29 2017/01/04 15:50:34 christos Exp $	*/
+/*	$NetBSD: rf_aselect.c,v 1.30 2021/07/23 00:54:45 oster Exp $	*/
 /*
  * Copyright (c) 1995 Carnegie-Mellon University.
  * All rights reserved.
@@ -33,7 +33,7 @@
  *****************************************************************************/
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: rf_aselect.c,v 1.29 2017/01/04 15:50:34 christos Exp $");
+__KERNEL_RCSID(0, "$NetBSD: rf_aselect.c,v 1.30 2021/07/23 00:54:45 oster Exp $");
 
 #include <dev/raidframe/raidframevar.h>
 
@@ -58,7 +58,7 @@ static void
 InitHdrNode(RF_DagHeader_t **hdr, RF_Raid_t *raidPtr, RF_RaidAccessDesc_t *desc)
 {
 	/* create and initialize dag hdr */
-	*hdr = rf_AllocDAGHeader();
+	*hdr = rf_AllocDAGHeader(raidPtr);
 	rf_MakeAllocList((*hdr)->allocList);
 	(*hdr)->status = rf_enable;
 	(*hdr)->numSuccedents = 0;
@@ -149,7 +149,7 @@ rf_SelectAlgorithm(RF_RaidAccessDesc_t *
 	desc->numStripes = 0;
 	for (i = 0, asm_p = asmap; asm_p; asm_p = asm_p->next, i++) {
 		desc->numStripes++;
-		stripeFuncs = rf_AllocFuncList();
+		stripeFuncs = rf_AllocFuncList(raidPtr);
 
 		if (stripeFuncsEnd == NULL) {
 			stripeFuncsList = stripeFuncs;
@@ -166,7 +166,7 @@ rf_SelectAlgorithm(RF_RaidAccessDesc_t *
 			 * unit in the stripe */
 
 			/* create a failed stripe structure to attempt to deal with the failure */
-			failed_stripe = rf_AllocFailedStripeStruct();
+			failed_stripe = rf_AllocFailedStripeStruct(raidPtr);
 			if (failed_stripes_list == NULL) {
 				failed_stripes_list = failed_stripe;
 				failed_stripes_list_end = failed_stripe;
@@ -189,7 +189,7 @@ rf_SelectAlgorithm(RF_RaidAccessDesc_t *
 				length = physPtr->numSector;
 				buffer = physPtr->bufPtr;
 
-				asmhle = rf_AllocASMHeaderListElem();
+				asmhle = rf_AllocASMHeaderListElem(raidPtr);
 				if (failed_stripe->asmh_u == NULL) {
 					failed_stripe->asmh_u = asmhle;      /* we're the head... */
 					failed_stripes_asmh_u_end = asmhle;  /* and the tail      */
@@ -203,7 +203,7 @@ rf_SelectAlgorithm(RF_RaidAccessDesc_t *
 				asmhle->asmh = rf_MapAccess(raidPtr, address, length, buffer, RF_DONT_REMAP);
 				asm_up = asmhle->asmh->stripeMap;
 
-				vfple = rf_AllocVFPListElem();
+				vfple = rf_AllocVFPListElem(raidPtr);
 				if (failed_stripe->vfple == NULL) {
 					failed_stripe->vfple = vfple;
 					failed_stripes_vfple_end = vfple;
@@ -236,7 +236,7 @@ rf_SelectAlgorithm(RF_RaidAccessDesc_t *
 						length = 1;
 						buffer = (char *)physPtr->bufPtr + (k * (1 << raidPtr->logBytesPerSector));
 
-						asmhle = rf_AllocASMHeaderListElem();
+						asmhle = rf_AllocASMHeaderListElem(raidPtr);
 						if (failed_stripe->asmh_b == NULL) {
 							failed_stripe->asmh_b = asmhle;
 							failed_stripes_asmh_b_end = asmhle;
@@ -248,7 +248,7 @@ rf_SelectAlgorithm(RF_RaidAccessDesc_t *
 						asmhle->asmh = rf_MapAccess(raidPtr, address, length, buffer, RF_DONT_REMAP);
 						asm_bp = asmhle->asmh->stripeMap;
 
-						vfple = rf_AllocVFPListElem();
+						vfple = rf_AllocVFPListElem(raidPtr);
 						if (failed_stripe->bvfple == NULL) {
 							failed_stripe->bvfple = vfple;
 							failed_stripes_bvfple_end = vfple;
@@ -286,37 +286,37 @@ rf_SelectAlgorithm(RF_RaidAccessDesc_t *
 					while (asmhle) {
 						tmpasmhle= asmhle;
 						asmhle = tmpasmhle->next;
-						rf_FreeAccessStripeMap(tmpasmhle->asmh);
-						rf_FreeASMHeaderListElem(tmpasmhle);
+						rf_FreeAccessStripeMap(raidPtr, tmpasmhle->asmh);
+						rf_FreeASMHeaderListElem(raidPtr, tmpasmhle);
 					}
 
 					asmhle = failed_stripe->asmh_b;
 					while (asmhle) {
 						tmpasmhle= asmhle;
 						asmhle = tmpasmhle->next;
-						rf_FreeAccessStripeMap(tmpasmhle->asmh);
-						rf_FreeASMHeaderListElem(tmpasmhle);
+						rf_FreeAccessStripeMap(raidPtr, tmpasmhle->asmh);
+						rf_FreeASMHeaderListElem(raidPtr, tmpasmhle);
 					}
 
 					vfple = failed_stripe->vfple;
 					while (vfple) {
 						tmpvfple = vfple;
 						vfple = tmpvfple->next;
-						rf_FreeVFPListElem(tmpvfple);
+						rf_FreeVFPListElem(raidPtr, tmpvfple);
 					}
 
 					vfple = failed_stripe->bvfple;
 					while (vfple) {
 						tmpvfple = vfple;
 						vfple = tmpvfple->next;
-						rf_FreeVFPListElem(tmpvfple);
+						rf_FreeVFPListElem(raidPtr, tmpvfple);
 					}
 
 					stripeNum++;
 					/* only move to the next failed stripe slot if the current one was used */
 					tmpfailed_stripe = failed_stripe;
 					failed_stripe = failed_stripe->next;
-					rf_FreeFailedStripeStruct(tmpfailed_stripe);
+					rf_FreeFailedStripeStruct(raidPtr, tmpfailed_stripe);
 				}
 				stripeFuncs = stripeFuncs->next;
 			}
@@ -325,7 +325,7 @@ rf_SelectAlgorithm(RF_RaidAccessDesc_t *
 		while (stripeFuncsList != NULL) {
 			temp = stripeFuncsList;
 			stripeFuncsList = stripeFuncsList->next;
-			rf_FreeFuncList(temp);
+			rf_FreeFuncList(raidPtr, temp);
 		}
 		desc->numStripes = 0;
 		return (1);
@@ -344,7 +344,7 @@ rf_SelectAlgorithm(RF_RaidAccessDesc_t *
 			/* grab dag header for this stripe */
 			dag_h = NULL;
 
-			dagList = rf_AllocDAGList();
+			dagList = rf_AllocDAGList(raidPtr);
 
 			/* always tack the new dagList onto the end of the list... */
 			if (dagListend == NULL) {
@@ -505,38 +505,38 @@ rf_SelectAlgorithm(RF_RaidAccessDesc_t *
 				while (asmhle) {
 					tmpasmhle= asmhle;
 					asmhle = tmpasmhle->next;
-					rf_FreeASMHeaderListElem(tmpasmhle);
+					rf_FreeASMHeaderListElem(raidPtr, tmpasmhle);
 				}
 
 				asmhle = failed_stripe->asmh_b;
 				while (asmhle) {
 					tmpasmhle= asmhle;
 					asmhle = tmpasmhle->next;
-					rf_FreeASMHeaderListElem(tmpasmhle);
+					rf_FreeASMHeaderListElem(raidPtr, tmpasmhle);
 				}
 				vfple = failed_stripe->vfple;
 				while (vfple) {
 					tmpvfple = vfple;
 					vfple = tmpvfple->next;
-					rf_FreeVFPListElem(tmpvfple);
+					rf_FreeVFPListElem(raidPtr, tmpvfple);
 				}
 
 				vfple = failed_stripe->bvfple;
 				while (vfple) {
 					tmpvfple = vfple;
 					vfple = tmpvfple->next;
-					rf_FreeVFPListElem(tmpvfple);
+					rf_FreeVFPListElem(raidPtr, tmpvfple);
 				}
 
 				tmpfailed_stripe = failed_stripe;
 				failed_stripe = tmpfailed_stripe->next;
-				rf_FreeFailedStripeStruct(tmpfailed_stripe);
+				rf_FreeFailedStripeStruct(raidPtr, tmpfailed_stripe);
 			}
 		}
 		while (stripeFuncsList != NULL) {
 			temp = stripeFuncsList;
 			stripeFuncsList = stripeFuncsList->next;
-			rf_FreeFuncList(temp);
+			rf_FreeFuncList(raidPtr, temp);
 		}
 		return (0);
 	}
Index: src/sys/dev/raidframe/rf_reconstruct.h
diff -u src/sys/dev/raidframe/rf_reconstruct.h:1.29 src/sys/dev/raidframe/rf_reconstruct.h:1.30
--- src/sys/dev/raidframe/rf_reconstruct.h:1.29	Thu Oct 10 03:43:59 2019
+++ src/sys/dev/raidframe/rf_reconstruct.h	Fri Jul 23 00:54:45 2021
@@ -1,4 +1,4 @@
-/*	$NetBSD: rf_reconstruct.h,v 1.29 2019/10/10 03:43:59 christos Exp $	*/
+/*	$NetBSD: rf_reconstruct.h,v 1.30 2021/07/23 00:54:45 oster Exp $	*/
 /*
  * Copyright (c) 1995 Carnegie-Mellon University.
  * All rights reserved.
@@ -175,7 +175,7 @@ struct RF_ReconCtrl_s {
 /* the default priority for reconstruction accesses */
 #define RF_IO_RECON_PRIORITY RF_IO_LOW_PRIORITY
 
-int rf_ConfigureReconstruction(RF_ShutdownList_t **);
+int rf_ConfigureReconstruction(RF_ShutdownList_t **, RF_Raid_t *, RF_Config_t *);
 int rf_ReconstructFailedDisk(RF_Raid_t *, RF_RowCol_t);
 int rf_ReconstructFailedDiskBasic(RF_Raid_t *, RF_RowCol_t);
 int rf_ReconstructInPlace(RF_Raid_t *, RF_RowCol_t);

Index: src/sys/dev/raidframe/rf_callback.c
diff -u src/sys/dev/raidframe/rf_callback.c:1.24 src/sys/dev/raidframe/rf_callback.c:1.25
--- src/sys/dev/raidframe/rf_callback.c:1.24	Thu Oct 10 03:43:59 2019
+++ src/sys/dev/raidframe/rf_callback.c	Fri Jul 23 00:54:45 2021
@@ -1,4 +1,4 @@
-/*	$NetBSD: rf_callback.c,v 1.24 2019/10/10 03:43:59 christos Exp $	*/
+/*	$NetBSD: rf_callback.c,v 1.25 2021/07/23 00:54:45 oster Exp $	*/
 /*
  * Copyright (c) 1995 Carnegie-Mellon University.
  * All rights reserved.
@@ -34,7 +34,7 @@
 
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: rf_callback.c,v 1.24 2019/10/10 03:43:59 christos Exp $");
+__KERNEL_RCSID(0, "$NetBSD: rf_callback.c,v 1.25 2021/07/23 00:54:45 oster Exp $");
 
 #include <dev/raidframe/raidframevar.h>
 #include <sys/pool.h>
@@ -46,51 +46,57 @@ __KERNEL_RCSID(0, "$NetBSD: rf_callback.
 #include "rf_general.h"
 #include "rf_shutdown.h"
 #include "rf_netbsd.h"
+#include "rf_raid.h"
 
 #define RF_MAX_FREE_CALLBACK 64
 #define RF_MIN_FREE_CALLBACK 32
 
 static void rf_ShutdownCallback(void *);
 static void
-rf_ShutdownCallback(void *ignored)
+rf_ShutdownCallback(void *arg)
 {
-	pool_destroy(&rf_pools.callbackf);
-	pool_destroy(&rf_pools.callbackv);
+	RF_Raid_t *raidPtr;
+
+	raidPtr = (RF_Raid_t *) arg;
+	
+	pool_destroy(&raidPtr->pools.callbackf);
+	pool_destroy(&raidPtr->pools.callbackv);
 }
 
 int
-rf_ConfigureCallback(RF_ShutdownList_t **listp)
+rf_ConfigureCallback(RF_ShutdownList_t **listp, RF_Raid_t *raidPtr,
+		     RF_Config_t *cfgPtr)
 {
 
-	rf_pool_init(&rf_pools.callbackf, sizeof(RF_CallbackFuncDesc_t),
-		     "rf_callbackfpl", RF_MIN_FREE_CALLBACK, RF_MAX_FREE_CALLBACK);
-	rf_pool_init(&rf_pools.callbackv, sizeof(RF_CallbackValueDesc_t),
-		     "rf_callbackvpl", RF_MIN_FREE_CALLBACK, RF_MAX_FREE_CALLBACK);
-	rf_ShutdownCreate(listp, rf_ShutdownCallback, NULL);
+	rf_pool_init(raidPtr, raidPtr->poolNames.callbackf, &raidPtr->pools.callbackf, sizeof(RF_CallbackFuncDesc_t),
+		     "callbackf", RF_MIN_FREE_CALLBACK, RF_MAX_FREE_CALLBACK);
+	rf_pool_init(raidPtr, raidPtr->poolNames.callbackv, &raidPtr->pools.callbackv, sizeof(RF_CallbackValueDesc_t),
+		     "callbackv", RF_MIN_FREE_CALLBACK, RF_MAX_FREE_CALLBACK);
+	rf_ShutdownCreate(listp, rf_ShutdownCallback, raidPtr);
 
 	return (0);
 }
 
 RF_CallbackFuncDesc_t *
-rf_AllocCallbackFuncDesc(void)
+rf_AllocCallbackFuncDesc(RF_Raid_t *raidPtr)
 {
-	return pool_get(&rf_pools.callbackf, PR_WAITOK);
+	return pool_get(&raidPtr->pools.callbackf, PR_WAITOK);
 }
 
 void
-rf_FreeCallbackFuncDesc(RF_CallbackFuncDesc_t *p)
+rf_FreeCallbackFuncDesc(RF_Raid_t *raidPtr, RF_CallbackFuncDesc_t *p)
 {
-	pool_put(&rf_pools.callbackf, p);
+	pool_put(&raidPtr->pools.callbackf, p);
 }
 
 RF_CallbackValueDesc_t *
-rf_AllocCallbackValueDesc(void)
+rf_AllocCallbackValueDesc(RF_Raid_t *raidPtr)
 {
-	return pool_get(&rf_pools.callbackv, PR_WAITOK);
+	return pool_get(&raidPtr->pools.callbackv, PR_WAITOK);
 }
 
 void
-rf_FreeCallbackValueDesc(RF_CallbackValueDesc_t *p)
+rf_FreeCallbackValueDesc(RF_Raid_t *raidPtr, RF_CallbackValueDesc_t *p)
 {
-	pool_put(&rf_pools.callbackv, p);
+	pool_put(&raidPtr->pools.callbackv, p);
 }
Index: src/sys/dev/raidframe/rf_mcpair.c
diff -u src/sys/dev/raidframe/rf_mcpair.c:1.24 src/sys/dev/raidframe/rf_mcpair.c:1.25
--- src/sys/dev/raidframe/rf_mcpair.c:1.24	Sun May  1 01:09:05 2011
+++ src/sys/dev/raidframe/rf_mcpair.c	Fri Jul 23 00:54:45 2021
@@ -1,4 +1,4 @@
-/*	$NetBSD: rf_mcpair.c,v 1.24 2011/05/01 01:09:05 mrg Exp $	*/
+/*	$NetBSD: rf_mcpair.c,v 1.25 2021/07/23 00:54:45 oster Exp $	*/
 /*
  * Copyright (c) 1995 Carnegie-Mellon University.
  * All rights reserved.
@@ -32,7 +32,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: rf_mcpair.c,v 1.24 2011/05/01 01:09:05 mrg Exp $");
+__KERNEL_RCSID(0, "$NetBSD: rf_mcpair.c,v 1.25 2021/07/23 00:54:45 oster Exp $");
 
 #include <dev/raidframe/raidframevar.h>
 
@@ -43,6 +43,7 @@ __KERNEL_RCSID(0, "$NetBSD: rf_mcpair.c,
 #include "rf_general.h"
 #include "rf_shutdown.h"
 #include "rf_netbsd.h"
+#include "rf_raid.h"
 
 #include <sys/pool.h>
 #include <sys/proc.h>
@@ -53,28 +54,33 @@ __KERNEL_RCSID(0, "$NetBSD: rf_mcpair.c,
 static void rf_ShutdownMCPair(void *);
 
 static void
-rf_ShutdownMCPair(void *ignored)
+rf_ShutdownMCPair(void *arg)
 {
-	pool_destroy(&rf_pools.mcpair);
+	RF_Raid_t *raidPtr;
+
+	raidPtr = (RF_Raid_t *) arg;
+	
+	pool_destroy(&raidPtr->pools.mcpair);
 }
 
 int
-rf_ConfigureMCPair(RF_ShutdownList_t **listp)
+rf_ConfigureMCPair(RF_ShutdownList_t **listp, RF_Raid_t *raidPtr,
+		   RF_Config_t *cfgPtr)
 {
 
-	rf_pool_init(&rf_pools.mcpair, sizeof(RF_MCPair_t),
-		     "rf_mcpair_pl", RF_MIN_FREE_MCPAIR, RF_MAX_FREE_MCPAIR);
-	rf_ShutdownCreate(listp, rf_ShutdownMCPair, NULL);
+	rf_pool_init(raidPtr, raidPtr->poolNames.mcpair, &raidPtr->pools.mcpair, sizeof(RF_MCPair_t),
+		     "mcpair", RF_MIN_FREE_MCPAIR, RF_MAX_FREE_MCPAIR);
+	rf_ShutdownCreate(listp, rf_ShutdownMCPair, raidPtr);
 
 	return (0);
 }
 
 RF_MCPair_t *
-rf_AllocMCPair(void)
+rf_AllocMCPair(RF_Raid_t *raidPtr)
 {
 	RF_MCPair_t *t;
 
-	t = pool_get(&rf_pools.mcpair, PR_WAITOK);
+	t = pool_get(&raidPtr->pools.mcpair, PR_WAITOK);
 	rf_init_mutex2(t->mutex, IPL_VM);
 	rf_init_cond2(t->cond, "mcpair");
 	t->flag = 0;
@@ -83,11 +89,11 @@ rf_AllocMCPair(void)
 }
 
 void
-rf_FreeMCPair(RF_MCPair_t *t)
+rf_FreeMCPair(RF_Raid_t *raidPtr, RF_MCPair_t *t)
 {
 	rf_destroy_cond2(t->cond);
 	rf_destroy_mutex2(t->mutex);
-	pool_put(&rf_pools.mcpair, t);
+	pool_put(&raidPtr->pools.mcpair, t);
 }
 
 /* the callback function used to wake you up when you use an mcpair to

Index: src/sys/dev/raidframe/rf_callback.h
diff -u src/sys/dev/raidframe/rf_callback.h:1.7 src/sys/dev/raidframe/rf_callback.h:1.8
--- src/sys/dev/raidframe/rf_callback.h:1.7	Thu Oct 10 03:43:59 2019
+++ src/sys/dev/raidframe/rf_callback.h	Fri Jul 23 00:54:45 2021
@@ -1,4 +1,4 @@
-/*	$NetBSD: rf_callback.h,v 1.7 2019/10/10 03:43:59 christos Exp $	*/
+/*	$NetBSD: rf_callback.h,v 1.8 2021/07/23 00:54:45 oster Exp $	*/
 /*
  * Copyright (c) 1995 Carnegie-Mellon University.
  * All rights reserved.
@@ -59,10 +59,11 @@ struct RF_CallbackValueDesc_s {
 	RF_CallbackValueDesc_t *next;/* next entry in list */
 };
 
-int     rf_ConfigureCallback(RF_ShutdownList_t ** listp);
-RF_CallbackFuncDesc_t *rf_AllocCallbackFuncDesc(void);
-void    rf_FreeCallbackFuncDesc(RF_CallbackFuncDesc_t * p);
-RF_CallbackValueDesc_t *rf_AllocCallbackValueDesc(void);
-void    rf_FreeCallbackValueDesc(RF_CallbackValueDesc_t * p);
+int     rf_ConfigureCallback(RF_ShutdownList_t ** listp, RF_Raid_t *raidPtr,
+			     RF_Config_t *cfgPtr);
+RF_CallbackFuncDesc_t *rf_AllocCallbackFuncDesc(RF_Raid_t *raidPtr);
+void    rf_FreeCallbackFuncDesc(RF_Raid_t *raidPtr, RF_CallbackFuncDesc_t * p);
+RF_CallbackValueDesc_t *rf_AllocCallbackValueDesc(RF_Raid_t *raidPtr);
+void    rf_FreeCallbackValueDesc(RF_Raid_t *raidPtr, RF_CallbackValueDesc_t * p);
 
 #endif				/* !_RF__RF_CALLBACK_H_ */

Index: src/sys/dev/raidframe/rf_copyback.c
diff -u src/sys/dev/raidframe/rf_copyback.c:1.53 src/sys/dev/raidframe/rf_copyback.c:1.54
--- src/sys/dev/raidframe/rf_copyback.c:1.53	Sun Dec  8 12:14:40 2019
+++ src/sys/dev/raidframe/rf_copyback.c	Fri Jul 23 00:54:45 2021
@@ -1,4 +1,4 @@
-/*	$NetBSD: rf_copyback.c,v 1.53 2019/12/08 12:14:40 mlelstv Exp $	*/
+/*	$NetBSD: rf_copyback.c,v 1.54 2021/07/23 00:54:45 oster Exp $	*/
 /*
  * Copyright (c) 1995 Carnegie-Mellon University.
  * All rights reserved.
@@ -38,7 +38,7 @@
  ****************************************************************************/
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: rf_copyback.c,v 1.53 2019/12/08 12:14:40 mlelstv Exp $");
+__KERNEL_RCSID(0, "$NetBSD: rf_copyback.c,v 1.54 2021/07/23 00:54:45 oster Exp $");
 
 #include <dev/raidframe/raidframevar.h>
 
@@ -198,7 +198,7 @@ rf_CopybackReconstructedData(RF_Raid_t *
 	desc->sectPerSU = raidPtr->Layout.sectorsPerStripeUnit;
 	desc->sectPerStripe = raidPtr->Layout.sectorsPerStripeUnit * raidPtr->Layout.numDataCol;
 	desc->databuf = databuf;
-	desc->mcpair = rf_AllocMCPair();
+	desc->mcpair = rf_AllocMCPair(raidPtr);
 
 	/* quiesce the array, since we don't want to code support for user
 	 * accs here */
@@ -422,7 +422,7 @@ rf_CopybackComplete(RF_CopybackDesc_t *d
 		       raidPtr->raidid, status);
 
 	RF_Free(desc->databuf, rf_RaidAddressToByte(raidPtr, desc->sectPerSU));
-	rf_FreeMCPair(desc->mcpair);
+	rf_FreeMCPair(raidPtr, desc->mcpair);
 	RF_Free(desc, sizeof(*desc));
 
 	rf_copyback_in_progress = 0;

Index: src/sys/dev/raidframe/rf_dagdegrd.c
diff -u src/sys/dev/raidframe/rf_dagdegrd.c:1.31 src/sys/dev/raidframe/rf_dagdegrd.c:1.32
--- src/sys/dev/raidframe/rf_dagdegrd.c:1.31	Thu Oct 10 03:43:59 2019
+++ src/sys/dev/raidframe/rf_dagdegrd.c	Fri Jul 23 00:54:45 2021
@@ -1,4 +1,4 @@
-/*	$NetBSD: rf_dagdegrd.c,v 1.31 2019/10/10 03:43:59 christos Exp $	*/
+/*	$NetBSD: rf_dagdegrd.c,v 1.32 2021/07/23 00:54:45 oster Exp $	*/
 /*
  * Copyright (c) 1995 Carnegie-Mellon University.
  * All rights reserved.
@@ -33,7 +33,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: rf_dagdegrd.c,v 1.31 2019/10/10 03:43:59 christos Exp $");
+__KERNEL_RCSID(0, "$NetBSD: rf_dagdegrd.c,v 1.32 2021/07/23 00:54:45 oster Exp $");
 
 #include <dev/raidframe/raidframevar.h>
 
@@ -141,19 +141,19 @@ rf_CreateRaidOneDegradedReadDAG(RF_Raid_
 
 	/* total number of nodes = 1 + (block + commit + terminator) */
 
-	rdNode = rf_AllocDAGNode();
+	rdNode = rf_AllocDAGNode(raidPtr);
 	rdNode->list_next = dag_h->nodes;
 	dag_h->nodes = rdNode;
 
-	blockNode = rf_AllocDAGNode();
+	blockNode = rf_AllocDAGNode(raidPtr);
 	blockNode->list_next = dag_h->nodes;
 	dag_h->nodes = blockNode;
 
-	commitNode = rf_AllocDAGNode();
+	commitNode = rf_AllocDAGNode(raidPtr);
 	commitNode->list_next = dag_h->nodes;
 	dag_h->nodes = commitNode;
 
-	termNode = rf_AllocDAGNode();
+	termNode = rf_AllocDAGNode(raidPtr);
 	termNode->list_next = dag_h->nodes;
 	dag_h->nodes = termNode;
 
@@ -315,35 +315,35 @@ rf_CreateDegradedReadDAG(RF_Raid_t *raid
 	nRrdNodes = ((new_asm_h[0]) ? new_asm_h[0]->stripeMap->numStripeUnitsAccessed : 0) +
 	    ((new_asm_h[1]) ? new_asm_h[1]->stripeMap->numStripeUnitsAccessed : 0);
 
-	blockNode = rf_AllocDAGNode();
+	blockNode = rf_AllocDAGNode(raidPtr);
 	blockNode->list_next = dag_h->nodes;
 	dag_h->nodes = blockNode;
 
-	commitNode = rf_AllocDAGNode();
+	commitNode = rf_AllocDAGNode(raidPtr);
 	commitNode->list_next = dag_h->nodes;
 	dag_h->nodes = commitNode;
 
-	xorNode = rf_AllocDAGNode();
+	xorNode = rf_AllocDAGNode(raidPtr);
 	xorNode->list_next = dag_h->nodes;
 	dag_h->nodes = xorNode;
 
-	rpNode = rf_AllocDAGNode();
+	rpNode = rf_AllocDAGNode(raidPtr);
 	rpNode->list_next = dag_h->nodes;
 	dag_h->nodes = rpNode;
 
-	termNode = rf_AllocDAGNode();
+	termNode = rf_AllocDAGNode(raidPtr);
 	termNode->list_next = dag_h->nodes;
 	dag_h->nodes = termNode;
 
 	for (i = 0; i < nRudNodes; i++) {
-		tmpNode = rf_AllocDAGNode();
+		tmpNode = rf_AllocDAGNode(raidPtr);
 		tmpNode->list_next = dag_h->nodes;
 		dag_h->nodes = tmpNode;
 	}
 	rudNodes = dag_h->nodes;
 
 	for (i = 0; i < nRrdNodes; i++) {
-		tmpNode = rf_AllocDAGNode();
+		tmpNode = rf_AllocDAGNode(raidPtr);
 		tmpNode->list_next = dag_h->nodes;
 		dag_h->nodes = tmpNode;
 	}
@@ -420,7 +420,7 @@ rf_CreateDegradedReadDAG(RF_Raid_t *raid
 		}
 	}
 	/* make a PDA for the parity unit */
-	parityPDA = rf_AllocPhysDiskAddr();
+	parityPDA = rf_AllocPhysDiskAddr(raidPtr);
 	parityPDA->next = dag_h->pda_cleanup_list;
 	dag_h->pda_cleanup_list = parityPDA;
 	parityPDA->col = asmap->parityInfo->col;
@@ -453,7 +453,7 @@ rf_CreateDegradedReadDAG(RF_Raid_t *raid
 		/* any Rud nodes that overlap the failed access need to be
 		 * xored in */
 		if (overlappingPDAs[i]) {
-			pda = rf_AllocPhysDiskAddr();
+			pda = rf_AllocPhysDiskAddr(raidPtr);
 			memcpy((char *) pda, (char *) tmprudNode->params[0].p, sizeof(RF_PhysDiskAddr_t));
 			/* add it into the pda_cleanup_list *after* the copy, TYVM */
 			pda->next = dag_h->pda_cleanup_list;

Index: src/sys/dev/raidframe/rf_dagdegwr.c
diff -u src/sys/dev/raidframe/rf_dagdegwr.c:1.35 src/sys/dev/raidframe/rf_dagdegwr.c:1.36
--- src/sys/dev/raidframe/rf_dagdegwr.c:1.35	Thu Oct 10 03:43:59 2019
+++ src/sys/dev/raidframe/rf_dagdegwr.c	Fri Jul 23 00:54:45 2021
@@ -1,4 +1,4 @@
-/*	$NetBSD: rf_dagdegwr.c,v 1.35 2019/10/10 03:43:59 christos Exp $	*/
+/*	$NetBSD: rf_dagdegwr.c,v 1.36 2021/07/23 00:54:45 oster Exp $	*/
 /*
  * Copyright (c) 1995 Carnegie-Mellon University.
  * All rights reserved.
@@ -34,7 +34,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: rf_dagdegwr.c,v 1.35 2019/10/10 03:43:59 christos Exp $");
+__KERNEL_RCSID(0, "$NetBSD: rf_dagdegwr.c,v 1.36 2021/07/23 00:54:45 oster Exp $");
 
 #include <dev/raidframe/raidframevar.h>
 
@@ -232,39 +232,39 @@ rf_CommonCreateSimpleDegradedWriteDAG(RF
 		rdnodesFaked = 0;
 	}
 
-	blockNode = rf_AllocDAGNode();
+	blockNode = rf_AllocDAGNode(raidPtr);
 	blockNode->list_next = dag_h->nodes;
 	dag_h->nodes = blockNode;
 
-	commitNode = rf_AllocDAGNode();
+	commitNode = rf_AllocDAGNode(raidPtr);
 	commitNode->list_next = dag_h->nodes;
 	dag_h->nodes = commitNode;
 
-	unblockNode = rf_AllocDAGNode();
+	unblockNode = rf_AllocDAGNode(raidPtr);
 	unblockNode->list_next = dag_h->nodes;
 	dag_h->nodes = unblockNode;
 
-	termNode = rf_AllocDAGNode();
+	termNode = rf_AllocDAGNode(raidPtr);
 	termNode->list_next = dag_h->nodes;
 	dag_h->nodes = termNode;
 
-	xorNode = rf_AllocDAGNode();
+	xorNode = rf_AllocDAGNode(raidPtr);
 	xorNode->list_next = dag_h->nodes;
 	dag_h->nodes = xorNode;
 
-	wnpNode = rf_AllocDAGNode();
+	wnpNode = rf_AllocDAGNode(raidPtr);
 	wnpNode->list_next = dag_h->nodes;
 	dag_h->nodes = wnpNode;
 
 	for (i = 0; i < nWndNodes; i++) {
-		tmpNode = rf_AllocDAGNode();
+		tmpNode = rf_AllocDAGNode(raidPtr);
 		tmpNode->list_next = dag_h->nodes;
 		dag_h->nodes = tmpNode;
 	}
 	wndNodes = dag_h->nodes;
 
 	for (i = 0; i < nRrdNodes; i++) {
-		tmpNode = rf_AllocDAGNode();
+		tmpNode = rf_AllocDAGNode(raidPtr);
 		tmpNode->list_next = dag_h->nodes;
 		dag_h->nodes = tmpNode;
 	}
@@ -272,7 +272,7 @@ rf_CommonCreateSimpleDegradedWriteDAG(RF
 
 #if (RF_INCLUDE_DECL_PQ > 0) || (RF_INCLUDE_RAID6 > 0)
 	if (nfaults == 2) {
-		wnqNode = rf_AllocDAGNode();
+		wnqNode = rf_AllocDAGNode(raidPtr);
 		wnqNode->list_next = dag_h->nodes;
 		dag_h->nodes = wnqNode;
 	} else {
@@ -359,7 +359,7 @@ rf_CommonCreateSimpleDegradedWriteDAG(RF
 	 * asmap->parityInfo describes the failed unit and the copy can also
 	 * be avoided. */
 
-	parityPDA = rf_AllocPhysDiskAddr();
+	parityPDA = rf_AllocPhysDiskAddr(raidPtr);
 	parityPDA->next = dag_h->pda_cleanup_list;
 	dag_h->pda_cleanup_list = parityPDA;
 	parityPDA->col = asmap->parityInfo->col;
@@ -436,7 +436,7 @@ rf_CommonCreateSimpleDegradedWriteDAG(RF
 		/* any Wnd nodes that overlap the failed access need to be
 		 * xored in */
 		if (overlappingPDAs[i]) {
-			pda = rf_AllocPhysDiskAddr();
+			pda = rf_AllocPhysDiskAddr(raidPtr);
 			memcpy((char *) pda, (char *) tmpwndNode->params[0].p, sizeof(RF_PhysDiskAddr_t));
 			/* add it into the pda_cleanup_list *after* the copy, TYVM */
 			pda->next = dag_h->pda_cleanup_list;

Index: src/sys/dev/raidframe/rf_dagffrd.c
diff -u src/sys/dev/raidframe/rf_dagffrd.c:1.21 src/sys/dev/raidframe/rf_dagffrd.c:1.22
--- src/sys/dev/raidframe/rf_dagffrd.c:1.21	Thu Oct 10 03:43:59 2019
+++ src/sys/dev/raidframe/rf_dagffrd.c	Fri Jul 23 00:54:45 2021
@@ -1,4 +1,4 @@
-/*	$NetBSD: rf_dagffrd.c,v 1.21 2019/10/10 03:43:59 christos Exp $	*/
+/*	$NetBSD: rf_dagffrd.c,v 1.22 2021/07/23 00:54:45 oster Exp $	*/
 /*
  * Copyright (c) 1995 Carnegie-Mellon University.
  * All rights reserved.
@@ -34,7 +34,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: rf_dagffrd.c,v 1.21 2019/10/10 03:43:59 christos Exp $");
+__KERNEL_RCSID(0, "$NetBSD: rf_dagffrd.c,v 1.22 2021/07/23 00:54:45 oster Exp $");
 
 #include <dev/raidframe/raidframevar.h>
 
@@ -185,21 +185,21 @@ rf_CreateNonredundantDAG(RF_Raid_t *raid
 	RF_ASSERT(n > 0);
 
 	for (i = 0; i < n; i++) {
-		tmpNode = rf_AllocDAGNode();
+		tmpNode = rf_AllocDAGNode(raidPtr);
 		tmpNode->list_next = dag_h->nodes;
 		dag_h->nodes = tmpNode;
 	}
 	diskNodes = dag_h->nodes;
 
-	blockNode = rf_AllocDAGNode();
+	blockNode = rf_AllocDAGNode(raidPtr);
 	blockNode->list_next = dag_h->nodes;
 	dag_h->nodes = blockNode;
 
-	commitNode = rf_AllocDAGNode();
+	commitNode = rf_AllocDAGNode(raidPtr);
 	commitNode->list_next = dag_h->nodes;
 	dag_h->nodes = commitNode;
 
-	termNode = rf_AllocDAGNode();
+	termNode = rf_AllocDAGNode(raidPtr);
 	termNode->list_next = dag_h->nodes;
 	dag_h->nodes = termNode;
 
@@ -356,21 +356,21 @@ CreateMirrorReadDAG(RF_Raid_t *raidPtr, 
 	RF_ASSERT(n > 0);
 
 	for (i = 0; i < n; i++) {
-		tmpNode = rf_AllocDAGNode();
+		tmpNode = rf_AllocDAGNode(raidPtr);
 		tmpNode->list_next = dag_h->nodes;
 		dag_h->nodes = tmpNode;
 	}
 	readNodes = dag_h->nodes;
 
-	blockNode = rf_AllocDAGNode();
+	blockNode = rf_AllocDAGNode(raidPtr);
 	blockNode->list_next = dag_h->nodes;
 	dag_h->nodes = blockNode;
 
-	commitNode = rf_AllocDAGNode();
+	commitNode = rf_AllocDAGNode(raidPtr);
 	commitNode->list_next = dag_h->nodes;
 	dag_h->nodes = commitNode;
 
-	termNode = rf_AllocDAGNode();
+	termNode = rf_AllocDAGNode(raidPtr);
 	termNode->list_next = dag_h->nodes;
 	dag_h->nodes = termNode;
 
Index: src/sys/dev/raidframe/rf_dagutils.h
diff -u src/sys/dev/raidframe/rf_dagutils.h:1.21 src/sys/dev/raidframe/rf_dagutils.h:1.22
--- src/sys/dev/raidframe/rf_dagutils.h:1.21	Thu Oct 10 03:43:59 2019
+++ src/sys/dev/raidframe/rf_dagutils.h	Fri Jul 23 00:54:45 2021
@@ -1,4 +1,4 @@
-/*	$NetBSD: rf_dagutils.h,v 1.21 2019/10/10 03:43:59 christos Exp $	*/
+/*	$NetBSD: rf_dagutils.h,v 1.22 2021/07/23 00:54:45 oster Exp $	*/
 /*
  * Copyright (c) 1995 Carnegie-Mellon University.
  * All rights reserved.
@@ -64,22 +64,22 @@ void rf_InitNode(RF_DagNode_t *, RF_Node
 		 const char *, RF_AllocListElem_t *);
 
 void rf_FreeDAG(RF_DagHeader_t *);
-int rf_ConfigureDAGs(RF_ShutdownList_t **);
+int rf_ConfigureDAGs(RF_ShutdownList_t **, RF_Raid_t *, RF_Config_t *);
 
-RF_DagHeader_t *rf_AllocDAGHeader(void);
-void    rf_FreeDAGHeader(RF_DagHeader_t * dh);
+RF_DagHeader_t *rf_AllocDAGHeader(RF_Raid_t *);
+void    rf_FreeDAGHeader(RF_Raid_t *raidPtr, RF_DagHeader_t * dh);
 
-RF_DagNode_t *rf_AllocDAGNode(void);
-void rf_FreeDAGNode(RF_DagNode_t *);
+RF_DagNode_t *rf_AllocDAGNode(RF_Raid_t *);
+void rf_FreeDAGNode(RF_Raid_t *, RF_DagNode_t *);
 
-RF_DagList_t *rf_AllocDAGList(void);
-void rf_FreeDAGList(RF_DagList_t *);
+RF_DagList_t *rf_AllocDAGList(RF_Raid_t *);
+void rf_FreeDAGList(RF_Raid_t *, RF_DagList_t *);
 
-void *rf_AllocDAGPCache(void);
-void rf_FreeDAGPCache(void *);
+void *rf_AllocDAGPCache(RF_Raid_t *);
+void rf_FreeDAGPCache(RF_Raid_t *, void *);
 
-RF_FuncList_t *rf_AllocFuncList(void);
-void rf_FreeFuncList(RF_FuncList_t *);
+RF_FuncList_t *rf_AllocFuncList(RF_Raid_t *);
+void rf_FreeFuncList(RF_Raid_t *, RF_FuncList_t *);
 
 void *rf_AllocBuffer(RF_Raid_t *, RF_DagHeader_t *, int);
 void *rf_AllocIOBuffer(RF_Raid_t *, int);

Index: src/sys/dev/raidframe/rf_dagffwr.c
diff -u src/sys/dev/raidframe/rf_dagffwr.c:1.36 src/sys/dev/raidframe/rf_dagffwr.c:1.37
--- src/sys/dev/raidframe/rf_dagffwr.c:1.36	Thu Oct 10 03:43:59 2019
+++ src/sys/dev/raidframe/rf_dagffwr.c	Fri Jul 23 00:54:45 2021
@@ -1,4 +1,4 @@
-/*	$NetBSD: rf_dagffwr.c,v 1.36 2019/10/10 03:43:59 christos Exp $	*/
+/*	$NetBSD: rf_dagffwr.c,v 1.37 2021/07/23 00:54:45 oster Exp $	*/
 /*
  * Copyright (c) 1995 Carnegie-Mellon University.
  * All rights reserved.
@@ -34,7 +34,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: rf_dagffwr.c,v 1.36 2019/10/10 03:43:59 christos Exp $");
+__KERNEL_RCSID(0, "$NetBSD: rf_dagffwr.c,v 1.37 2021/07/23 00:54:45 oster Exp $");
 
 #include <dev/raidframe/raidframevar.h>
 
@@ -201,29 +201,29 @@ rf_CommonCreateLargeWriteDAG(RF_Raid_t *
 	nWndNodes = asmap->numStripeUnitsAccessed;
 
 	for (i = 0; i < nWndNodes; i++) {
-		tmpNode = rf_AllocDAGNode();
+		tmpNode = rf_AllocDAGNode(raidPtr);
 		tmpNode->list_next = dag_h->nodes;
 		dag_h->nodes = tmpNode;
 	}
 	wndNodes = dag_h->nodes;
 
-	xorNode = rf_AllocDAGNode();
+	xorNode = rf_AllocDAGNode(raidPtr);
 	xorNode->list_next = dag_h->nodes;
 	dag_h->nodes = xorNode;
 
-	wnpNode = rf_AllocDAGNode();
+	wnpNode = rf_AllocDAGNode(raidPtr);
 	wnpNode->list_next = dag_h->nodes;
 	dag_h->nodes = wnpNode;
 
-	blockNode = rf_AllocDAGNode();
+	blockNode = rf_AllocDAGNode(raidPtr);
 	blockNode->list_next = dag_h->nodes;
 	dag_h->nodes = blockNode;
 
-	commitNode = rf_AllocDAGNode();
+	commitNode = rf_AllocDAGNode(raidPtr);
 	commitNode->list_next = dag_h->nodes;
 	dag_h->nodes = commitNode;
 
-	termNode = rf_AllocDAGNode();
+	termNode = rf_AllocDAGNode(raidPtr);
 	termNode->list_next = dag_h->nodes;
 	dag_h->nodes = termNode;
 
@@ -239,7 +239,7 @@ rf_CommonCreateLargeWriteDAG(RF_Raid_t *
 					&eosBuffer, allocList);
 	if (nRodNodes > 0) {
 		for (i = 0; i < nRodNodes; i++) {
-			tmpNode = rf_AllocDAGNode();
+			tmpNode = rf_AllocDAGNode(raidPtr);
 			tmpNode->list_next = dag_h->nodes;
 			dag_h->nodes = tmpNode;
 		}
@@ -591,71 +591,71 @@ rf_CommonCreateSmallWriteDAG(RF_Raid_t *
          * Step 2. create the nodes
          */
 
-	blockNode = rf_AllocDAGNode();
+	blockNode = rf_AllocDAGNode(raidPtr);
 	blockNode->list_next = dag_h->nodes;
 	dag_h->nodes = blockNode;
 
-	commitNode = rf_AllocDAGNode();
+	commitNode = rf_AllocDAGNode(raidPtr);
 	commitNode->list_next = dag_h->nodes;
 	dag_h->nodes = commitNode;
 
 	for (i = 0; i < numDataNodes; i++) {
-		tmpNode = rf_AllocDAGNode();
+		tmpNode = rf_AllocDAGNode(raidPtr);
 		tmpNode->list_next = dag_h->nodes;
 		dag_h->nodes = tmpNode;
 	}
 	readDataNodes = dag_h->nodes;
 
 	for (i = 0; i < numParityNodes; i++) {
-		tmpNode = rf_AllocDAGNode();
+		tmpNode = rf_AllocDAGNode(raidPtr);
 		tmpNode->list_next = dag_h->nodes;
 		dag_h->nodes = tmpNode;
 	}
 	readParityNodes = dag_h->nodes;
 
 	for (i = 0; i < numDataNodes; i++) {
-		tmpNode = rf_AllocDAGNode();
+		tmpNode = rf_AllocDAGNode(raidPtr);
 		tmpNode->list_next = dag_h->nodes;
 		dag_h->nodes = tmpNode;
 	}
 	writeDataNodes = dag_h->nodes;
 
 	for (i = 0; i < numParityNodes; i++) {
-		tmpNode = rf_AllocDAGNode();
+		tmpNode = rf_AllocDAGNode(raidPtr);
 		tmpNode->list_next = dag_h->nodes;
 		dag_h->nodes = tmpNode;
 	}
 	writeParityNodes = dag_h->nodes;
 
 	for (i = 0; i < numParityNodes; i++) {
-		tmpNode = rf_AllocDAGNode();
+		tmpNode = rf_AllocDAGNode(raidPtr);
 		tmpNode->list_next = dag_h->nodes;
 		dag_h->nodes = tmpNode;
 	}
 	xorNodes = dag_h->nodes;
 
-	termNode = rf_AllocDAGNode();
+	termNode = rf_AllocDAGNode(raidPtr);
 	termNode->list_next = dag_h->nodes;
 	dag_h->nodes = termNode;
 
 #if (RF_INCLUDE_DECL_PQ > 0) || (RF_INCLUDE_RAID6 > 0)
 	if (nfaults == 2) {
 		for (i = 0; i < numParityNodes; i++) {
-			tmpNode = rf_AllocDAGNode();
+			tmpNode = rf_AllocDAGNode(raidPtr);
 			tmpNode->list_next = dag_h->nodes;
 			dag_h->nodes = tmpNode;
 		}
 		readQNodes = dag_h->nodes;
 
 		for (i = 0; i < numParityNodes; i++) {
-			tmpNode = rf_AllocDAGNode();
+			tmpNode = rf_AllocDAGNode(raidPtr);
 			tmpNode->list_next = dag_h->nodes;
 			dag_h->nodes = tmpNode;
 		}
 		writeQNodes = dag_h->nodes;
 
 		for (i = 0; i < numParityNodes; i++) {
-			tmpNode = rf_AllocDAGNode();
+			tmpNode = rf_AllocDAGNode(raidPtr);
 			tmpNode->list_next = dag_h->nodes;
 			dag_h->nodes = tmpNode;
 		}
@@ -1230,28 +1230,28 @@ rf_CreateRaidOneWriteDAG(RF_Raid_t *raid
 	/* total number of nodes = nWndNodes + nWmirNodes + (commit + unblock
 	 * + terminator) */
 	for (i = 0; i < nWndNodes; i++) {
-		tmpNode = rf_AllocDAGNode();
+		tmpNode = rf_AllocDAGNode(raidPtr);
 		tmpNode->list_next = dag_h->nodes;
 		dag_h->nodes = tmpNode;
 	}
 	wndNode = dag_h->nodes;
 
 	for (i = 0; i < nWmirNodes; i++) {
-		tmpNode = rf_AllocDAGNode();
+		tmpNode = rf_AllocDAGNode(raidPtr);
 		tmpNode->list_next = dag_h->nodes;
 		dag_h->nodes = tmpNode;
 	}
 	wmirNode = dag_h->nodes;
 
-	commitNode = rf_AllocDAGNode();
+	commitNode = rf_AllocDAGNode(raidPtr);
 	commitNode->list_next = dag_h->nodes;
 	dag_h->nodes = commitNode;
 
-	unblockNode = rf_AllocDAGNode();
+	unblockNode = rf_AllocDAGNode(raidPtr);
 	unblockNode->list_next = dag_h->nodes;
 	dag_h->nodes = unblockNode;
 
-	termNode = rf_AllocDAGNode();
+	termNode = rf_AllocDAGNode(raidPtr);
 	termNode->list_next = dag_h->nodes;
 	dag_h->nodes = termNode;
 
Index: src/sys/dev/raidframe/rf_netbsd.h
diff -u src/sys/dev/raidframe/rf_netbsd.h:1.36 src/sys/dev/raidframe/rf_netbsd.h:1.37
--- src/sys/dev/raidframe/rf_netbsd.h:1.36	Fri Jul 23 00:26:19 2021
+++ src/sys/dev/raidframe/rf_netbsd.h	Fri Jul 23 00:54:45 2021
@@ -1,4 +1,4 @@
-/*	$NetBSD: rf_netbsd.h,v 1.36 2021/07/23 00:26:19 oster Exp $	*/
+/*	$NetBSD: rf_netbsd.h,v 1.37 2021/07/23 00:54:45 oster Exp $	*/
 
 /*-
  * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
@@ -54,10 +54,9 @@ struct raidcinfo {
 
 
 /* a little structure to serve as a container for all the various
-   global pools used in RAIDframe */
+   per-device pools used in RAIDframe */
 
 struct RF_Pools_s {
-	struct pool alloclist;   /* AllocList */
 	struct pool asm_hdr;     /* Access Stripe Map Header */
 	struct pool asmap;       /* Access Stripe Map */
 	struct pool asmhle;      /* Access Stripe Map Header List Elements */
@@ -83,8 +82,35 @@ struct RF_Pools_s {
 	struct pool vple;        /* VoidPointer List Elements */
 };
 
-extern struct RF_Pools_s rf_pools;
-void rf_pool_init(struct pool *, size_t, const char *, size_t, size_t);
+#define RF_MAX_POOLNAMELEN 30
+struct RF_PoolNames_s {
+	char asm_hdr[RF_MAX_POOLNAMELEN];     /* Access Stripe Map Header */
+	char asmap[RF_MAX_POOLNAMELEN];       /* Access Stripe Map */
+	char asmhle[RF_MAX_POOLNAMELEN];      /* Access Stripe Map Header List Elements */
+	char bufio[RF_MAX_POOLNAMELEN];       /* Buffer IO Pool */
+	char callbackf[RF_MAX_POOLNAMELEN];   /* Callback function descriptors */
+	char callbackv[RF_MAX_POOLNAMELEN];   /* Callback value descriptors */
+	char dagh[RF_MAX_POOLNAMELEN];        /* DAG headers */
+	char dagnode[RF_MAX_POOLNAMELEN];     /* DAG nodes */
+	char daglist[RF_MAX_POOLNAMELEN];     /* DAG lists */
+	char dagpcache[RF_MAX_POOLNAMELEN];   /* DAG pointer/param cache */
+	char dqd[RF_MAX_POOLNAMELEN];         /* Disk Queue Data */
+	char fss[RF_MAX_POOLNAMELEN];         /* Failed Stripe Structures */
+	char funclist[RF_MAX_POOLNAMELEN];    /* Function Lists */
+	char mcpair[RF_MAX_POOLNAMELEN];      /* Mutex/Cond Pairs */
+	char pda[RF_MAX_POOLNAMELEN];         /* Physical Disk Access structures */
+	char pss[RF_MAX_POOLNAMELEN];         /* Parity Stripe Status */
+	char pss_issued[RF_MAX_POOLNAMELEN];  /* Parity Stripe Status Issued */
+	char rad[RF_MAX_POOLNAMELEN];         /* Raid Access Descriptors */
+	char reconbuffer[RF_MAX_POOLNAMELEN]; /* reconstruction buffer (header) pool */
+	char revent[RF_MAX_POOLNAMELEN];      /* reconstruct events */
+	char stripelock[RF_MAX_POOLNAMELEN];  /* StripeLock */
+	char vfple[RF_MAX_POOLNAMELEN];       /* VoidFunctionPtr List Elements */
+	char vple[RF_MAX_POOLNAMELEN];        /* VoidPointer List Elements */
+};
+
+extern struct pool rf_alloclist_pool;   /* AllocList */
+void rf_pool_init(RF_Raid_t *, char *, struct pool *, size_t, const char *, size_t, size_t);
 int rf_buf_queue_check(RF_Raid_t *);
 
 /* XXX probably belongs in a different .h file. */
Index: src/sys/dev/raidframe/rf_parityscan.c
diff -u src/sys/dev/raidframe/rf_parityscan.c:1.36 src/sys/dev/raidframe/rf_parityscan.c:1.37
--- src/sys/dev/raidframe/rf_parityscan.c:1.36	Thu Oct 10 03:43:59 2019
+++ src/sys/dev/raidframe/rf_parityscan.c	Fri Jul 23 00:54:45 2021
@@ -1,4 +1,4 @@
-/*	$NetBSD: rf_parityscan.c,v 1.36 2019/10/10 03:43:59 christos Exp $	*/
+/*	$NetBSD: rf_parityscan.c,v 1.37 2021/07/23 00:54:45 oster Exp $	*/
 /*
  * Copyright (c) 1995 Carnegie-Mellon University.
  * All rights reserved.
@@ -33,7 +33,7 @@
  ****************************************************************************/
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: rf_parityscan.c,v 1.36 2019/10/10 03:43:59 christos Exp $");
+__KERNEL_RCSID(0, "$NetBSD: rf_parityscan.c,v 1.37 2021/07/23 00:54:45 oster Exp $");
 
 #include <dev/raidframe/raidframevar.h>
 
@@ -136,7 +136,7 @@ rf_RewriteParityRange(RF_Raid_t *raidPtr
 			printf("Bad rc=%d from VerifyParity in RewriteParity\n", rc);
 			ret_val = 1;
 		}
-		rf_FreeAccessStripeMap(asm_h);
+		rf_FreeAccessStripeMap(raidPtr, asm_h);
 	}
 	return (ret_val);
 }
@@ -218,7 +218,7 @@ rf_VerifyParityBasic(RF_Raid_t *raidPtr,
 
 	retcode = RF_PARITY_OKAY;
 
-	mcpair = rf_AllocMCPair();
+	mcpair = rf_AllocMCPair(raidPtr);
 	rf_MakeAllocList(alloclist);
 	bf = RF_MallocAndAdd(numbytes
 	    * (layoutPtr->numDataCol + layoutPtr->numParityCol), alloclist);
@@ -329,10 +329,10 @@ rf_VerifyParityBasic(RF_Raid_t *raidPtr,
 			retcode = RF_PARITY_CORRECTED;
 	}
 out:
-	rf_FreeAccessStripeMap(asm_h);
+	rf_FreeAccessStripeMap(raidPtr, asm_h);
 	rf_FreeAllocList(alloclist);
 	rf_FreeDAG(rd_dag_h);
-	rf_FreeMCPair(mcpair);
+	rf_FreeMCPair(raidPtr, mcpair);
 	return (retcode);
 }
 
@@ -428,7 +428,7 @@ rf_MakeSimpleDAG(RF_Raid_t *raidPtr, int
 
 	/* grab a DAG header... */
 
-	dag_h = rf_AllocDAGHeader();
+	dag_h = rf_AllocDAGHeader(raidPtr);
 	dag_h->raidPtr = (void *) raidPtr;
 	dag_h->allocList = NULL;/* we won't use this alloc list */
 	dag_h->status = rf_enable;
@@ -444,21 +444,21 @@ rf_MakeSimpleDAG(RF_Raid_t *raidPtr, int
 	 * node */
 
 	for (i = 0; i < nNodes; i++) {
-		tmpNode = rf_AllocDAGNode();
+		tmpNode = rf_AllocDAGNode(raidPtr);
 		tmpNode->list_next = dag_h->nodes;
 		dag_h->nodes = tmpNode;
 	}
 	nodes = dag_h->nodes;
 
-	blockNode = rf_AllocDAGNode();
+	blockNode = rf_AllocDAGNode(raidPtr);
 	blockNode->list_next = dag_h->nodes;
 	dag_h->nodes = blockNode;
 
-	unblockNode = rf_AllocDAGNode();
+	unblockNode = rf_AllocDAGNode(raidPtr);
 	unblockNode->list_next = dag_h->nodes;
 	dag_h->nodes = unblockNode;
 
-	termNode = rf_AllocDAGNode();
+	termNode = rf_AllocDAGNode(raidPtr);
 	termNode->list_next = dag_h->nodes;
 	dag_h->nodes = termNode;
 

Index: src/sys/dev/raidframe/rf_dagutils.c
diff -u src/sys/dev/raidframe/rf_dagutils.c:1.57 src/sys/dev/raidframe/rf_dagutils.c:1.58
--- src/sys/dev/raidframe/rf_dagutils.c:1.57	Thu Oct 10 03:43:59 2019
+++ src/sys/dev/raidframe/rf_dagutils.c	Fri Jul 23 00:54:45 2021
@@ -1,4 +1,4 @@
-/*	$NetBSD: rf_dagutils.c,v 1.57 2019/10/10 03:43:59 christos Exp $	*/
+/*	$NetBSD: rf_dagutils.c,v 1.58 2021/07/23 00:54:45 oster Exp $	*/
 /*
  * Copyright (c) 1995 Carnegie-Mellon University.
  * All rights reserved.
@@ -33,7 +33,7 @@
  *****************************************************************************/
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: rf_dagutils.c,v 1.57 2019/10/10 03:43:59 christos Exp $");
+__KERNEL_RCSID(0, "$NetBSD: rf_dagutils.c,v 1.58 2021/07/23 00:54:45 oster Exp $");
 
 #include <dev/raidframe/raidframevar.h>
 
@@ -99,7 +99,8 @@ rf_InitNode(RF_DagNode_t *node, RF_NodeS
 {
 	void  **ptrs;
 	int     nptrs;
-
+	RF_Raid_t *raidPtr;
+	
 	if (nAnte > RF_MAX_ANTECEDENTS)
 		RF_PANIC();
 	node->status = initstatus;
@@ -122,6 +123,9 @@ rf_InitNode(RF_DagNode_t *node, RF_NodeS
 	node->big_dag_params = NULL;
 	node->visited = 0;
 
+	RF_ASSERT(hdr != NULL);
+	raidPtr = hdr->raidPtr;
+	
 	/* allocate all the pointers with one call to malloc */
 	nptrs = nSucc + nAnte + nResult + nSucc;
 
@@ -139,7 +143,7 @@ rf_InitNode(RF_DagNode_t *node, RF_NodeS
 	         */
 		ptrs = (void **) node->dag_ptrs;
 	} else if (nptrs <= (RF_DAGPCACHE_SIZE / sizeof(RF_DagNode_t *))) {
-		node->big_dag_ptrs = rf_AllocDAGPCache();
+		node->big_dag_ptrs = rf_AllocDAGPCache(raidPtr);
 		ptrs = (void **) node->big_dag_ptrs;
 	} else {
 		ptrs = RF_MallocAndAdd(nptrs * sizeof(*ptrs), alist);
@@ -153,7 +157,7 @@ rf_InitNode(RF_DagNode_t *node, RF_NodeS
 		if (nParam <= RF_DAG_PARAMCACHESIZE) {
 			node->params = (RF_DagParam_t *) node->dag_params;
 		} else if (nParam <= (RF_DAGPCACHE_SIZE / sizeof(RF_DagParam_t))) {
-			node->big_dag_params = rf_AllocDAGPCache();
+			node->big_dag_params = rf_AllocDAGPCache(raidPtr);
 			node->params = node->big_dag_params;
 		} else {
 			node->params = RF_MallocAndAdd(
@@ -179,26 +183,30 @@ rf_FreeDAG(RF_DagHeader_t *dag_h)
 	RF_PhysDiskAddr_t *pda;
 	RF_DagNode_t *tmpnode;
 	RF_DagHeader_t *nextDag;
+	RF_Raid_t *raidPtr;
 
+	if (dag_h)
+		raidPtr = dag_h->raidPtr;
+	
 	while (dag_h) {
 		nextDag = dag_h->next;
 		rf_FreeAllocList(dag_h->allocList);
 		for (asmap = dag_h->asmList; asmap;) {
 			t_asmap = asmap;
 			asmap = asmap->next;
-			rf_FreeAccessStripeMap(t_asmap);
+			rf_FreeAccessStripeMap(raidPtr, t_asmap);
 		}
 		while (dag_h->pda_cleanup_list) {
 			pda = dag_h->pda_cleanup_list;
 			dag_h->pda_cleanup_list = dag_h->pda_cleanup_list->next;
-			rf_FreePhysDiskAddr(pda);
+			rf_FreePhysDiskAddr(raidPtr, pda);
 		}
 		while (dag_h->nodes) {
 			tmpnode = dag_h->nodes;
 			dag_h->nodes = dag_h->nodes->list_next;
-			rf_FreeDAGNode(tmpnode);
+			rf_FreeDAGNode(raidPtr, tmpnode);
 		}
-		rf_FreeDAGHeader(dag_h);
+		rf_FreeDAGHeader(raidPtr, dag_h);
 		dag_h = nextDag;
 	}
 }
@@ -223,98 +231,103 @@ rf_FreeDAG(RF_DagHeader_t *dag_h)
 
 static void rf_ShutdownDAGs(void *);
 static void
-rf_ShutdownDAGs(void *ignored)
+rf_ShutdownDAGs(void *arg)
 {
-	pool_destroy(&rf_pools.dagh);
-	pool_destroy(&rf_pools.dagnode);
-	pool_destroy(&rf_pools.daglist);
-	pool_destroy(&rf_pools.dagpcache);
-	pool_destroy(&rf_pools.funclist);
+	RF_Raid_t *raidPtr;
+
+	raidPtr = (RF_Raid_t *) arg;
+
+	pool_destroy(&raidPtr->pools.dagh);
+	pool_destroy(&raidPtr->pools.dagnode);
+	pool_destroy(&raidPtr->pools.daglist);
+	pool_destroy(&raidPtr->pools.dagpcache);
+	pool_destroy(&raidPtr->pools.funclist);
 }
 
 int
-rf_ConfigureDAGs(RF_ShutdownList_t **listp)
+rf_ConfigureDAGs(RF_ShutdownList_t **listp, RF_Raid_t *raidPtr,
+		 RF_Config_t *cfgPtr)
 {
 
-	rf_pool_init(&rf_pools.dagnode, sizeof(RF_DagNode_t),
-		     "rf_dagnode_pl", RF_MIN_FREE_DAGNODE, RF_MAX_FREE_DAGNODE);
-	rf_pool_init(&rf_pools.dagh, sizeof(RF_DagHeader_t),
-		     "rf_dagh_pl", RF_MIN_FREE_DAGH, RF_MAX_FREE_DAGH);
-	rf_pool_init(&rf_pools.daglist, sizeof(RF_DagList_t),
-		     "rf_daglist_pl", RF_MIN_FREE_DAGLIST, RF_MAX_FREE_DAGLIST);
-	rf_pool_init(&rf_pools.dagpcache, RF_DAGPCACHE_SIZE,
-		     "rf_dagpcache_pl", RF_MIN_FREE_DAGPCACHE, RF_MAX_FREE_DAGPCACHE);
-	rf_pool_init(&rf_pools.funclist, sizeof(RF_FuncList_t),
-		     "rf_funclist_pl", RF_MIN_FREE_FUNCLIST, RF_MAX_FREE_FUNCLIST);
-	rf_ShutdownCreate(listp, rf_ShutdownDAGs, NULL);
+	rf_pool_init(raidPtr, raidPtr->poolNames.dagnode, &raidPtr->pools.dagnode, sizeof(RF_DagNode_t),
+		     "dagnode", RF_MIN_FREE_DAGNODE, RF_MAX_FREE_DAGNODE);
+	rf_pool_init(raidPtr, raidPtr->poolNames.dagh, &raidPtr->pools.dagh, sizeof(RF_DagHeader_t),
+		     "dagh", RF_MIN_FREE_DAGH, RF_MAX_FREE_DAGH);
+	rf_pool_init(raidPtr, raidPtr->poolNames.daglist, &raidPtr->pools.daglist, sizeof(RF_DagList_t),
+		     "daglist", RF_MIN_FREE_DAGLIST, RF_MAX_FREE_DAGLIST);
+	rf_pool_init(raidPtr, raidPtr->poolNames.dagpcache, &raidPtr->pools.dagpcache, RF_DAGPCACHE_SIZE,
+		     "dagpcache", RF_MIN_FREE_DAGPCACHE, RF_MAX_FREE_DAGPCACHE);
+	rf_pool_init(raidPtr, raidPtr->poolNames.funclist, &raidPtr->pools.funclist, sizeof(RF_FuncList_t),
+		     "funclist", RF_MIN_FREE_FUNCLIST, RF_MAX_FREE_FUNCLIST);
+	rf_ShutdownCreate(listp, rf_ShutdownDAGs, raidPtr);
 
 	return (0);
 }
 
 RF_DagHeader_t *
-rf_AllocDAGHeader(void)
+rf_AllocDAGHeader(RF_Raid_t *raidPtr)
 {
-	return pool_get(&rf_pools.dagh, PR_WAITOK | PR_ZERO);
+	return pool_get(&raidPtr->pools.dagh, PR_WAITOK | PR_ZERO);
 }
 
 void
-rf_FreeDAGHeader(RF_DagHeader_t * dh)
+rf_FreeDAGHeader(RF_Raid_t *raidPtr, RF_DagHeader_t * dh)
 {
-	pool_put(&rf_pools.dagh, dh);
+	pool_put(&raidPtr->pools.dagh, dh);
 }
 
 RF_DagNode_t *
-rf_AllocDAGNode(void)
+rf_AllocDAGNode(RF_Raid_t *raidPtr)
 {
-	return pool_get(&rf_pools.dagnode, PR_WAITOK | PR_ZERO);
+	return pool_get(&raidPtr->pools.dagnode, PR_WAITOK | PR_ZERO);
 }
 
 void
-rf_FreeDAGNode(RF_DagNode_t *node)
+rf_FreeDAGNode(RF_Raid_t *raidPtr, RF_DagNode_t *node)
 {
 	if (node->big_dag_ptrs) {
-		rf_FreeDAGPCache(node->big_dag_ptrs);
+		rf_FreeDAGPCache(raidPtr, node->big_dag_ptrs);
 	}
 	if (node->big_dag_params) {
-		rf_FreeDAGPCache(node->big_dag_params);
+		rf_FreeDAGPCache(raidPtr, node->big_dag_params);
 	}
-	pool_put(&rf_pools.dagnode, node);
+	pool_put(&raidPtr->pools.dagnode, node);
 }
 
 RF_DagList_t *
-rf_AllocDAGList(void)
+rf_AllocDAGList(RF_Raid_t *raidPtr)
 {
-	return pool_get(&rf_pools.daglist, PR_WAITOK | PR_ZERO);
+	return pool_get(&raidPtr->pools.daglist, PR_WAITOK | PR_ZERO);
 }
 
 void
-rf_FreeDAGList(RF_DagList_t *dagList)
+rf_FreeDAGList(RF_Raid_t *raidPtr, RF_DagList_t *dagList)
 {
-	pool_put(&rf_pools.daglist, dagList);
+	pool_put(&raidPtr->pools.daglist, dagList);
 }
 
 void *
-rf_AllocDAGPCache(void)
+rf_AllocDAGPCache(RF_Raid_t *raidPtr)
 {
-	return pool_get(&rf_pools.dagpcache, PR_WAITOK | PR_ZERO);
+	return pool_get(&raidPtr->pools.dagpcache, PR_WAITOK | PR_ZERO);
 }
 
 void
-rf_FreeDAGPCache(void *p)
+rf_FreeDAGPCache(RF_Raid_t *raidPtr, void *p)
 {
-	pool_put(&rf_pools.dagpcache, p);
+	pool_put(&raidPtr->pools.dagpcache, p);
 }
 
 RF_FuncList_t *
-rf_AllocFuncList(void)
+rf_AllocFuncList(RF_Raid_t *raidPtr)
 {
-	return pool_get(&rf_pools.funclist, PR_WAITOK | PR_ZERO);
+	return pool_get(&raidPtr->pools.funclist, PR_WAITOK | PR_ZERO);
 }
 
 void
-rf_FreeFuncList(RF_FuncList_t *funcList)
+rf_FreeFuncList(RF_Raid_t *raidPtr, RF_FuncList_t *funcList)
 {
-	pool_put(&rf_pools.funclist, funcList);
+	pool_put(&raidPtr->pools.funclist, funcList);
 }
 
 /* allocates a stripe buffer -- a buffer large enough to hold all the data
@@ -340,7 +353,7 @@ rf_AllocStripeBuffer(RF_Raid_t *raidPtr,
 			vple = raidPtr->stripebuf;
 			raidPtr->stripebuf = vple->next;
 			p = vple->p;
-			rf_FreeVPListElem(vple);
+			rf_FreeVPListElem(raidPtr, vple);
 			raidPtr->stripebuf_count--;
 		} else {
 #ifdef DIAGNOSTIC
@@ -357,7 +370,7 @@ rf_AllocStripeBuffer(RF_Raid_t *raidPtr,
 	}
 	memset(p, 0, raidPtr->numCol * (raidPtr->Layout.sectorsPerStripeUnit << raidPtr->logBytesPerSector));
 
-	vple = rf_AllocVPListElem();
+	vple = rf_AllocVPListElem(raidPtr);
 	vple->p = p;
         vple->next = dag_h->desc->stripebufs;
         dag_h->desc->stripebufs = vple;
@@ -377,7 +390,7 @@ rf_FreeStripeBuffer(RF_Raid_t *raidPtr, 
 		raidPtr->stripebuf_count++;
 	} else {
 		free(vple->p, M_RAIDFRAME);
-		rf_FreeVPListElem(vple);
+		rf_FreeVPListElem(raidPtr, vple);
 	}
 	rf_unlock_mutex2(raidPtr->mutex);
 }
@@ -393,7 +406,7 @@ rf_AllocBuffer(RF_Raid_t *raidPtr, RF_Da
 	void *p;
 
 	p = rf_AllocIOBuffer(raidPtr, size);
-	vple = rf_AllocVPListElem();
+	vple = rf_AllocVPListElem(raidPtr);
 	vple->p = p;
 	vple->next = dag_h->desc->iobufs;
 	dag_h->desc->iobufs = vple;
@@ -419,7 +432,7 @@ rf_AllocIOBuffer(RF_Raid_t *raidPtr, int
 			vple = raidPtr->iobuf;
 			raidPtr->iobuf = vple->next;
 			p = vple->p;
-			rf_FreeVPListElem(vple);
+			rf_FreeVPListElem(raidPtr, vple);
 			raidPtr->iobuf_count--;
 		} else {
 #ifdef DIAGNOSTIC
@@ -450,7 +463,7 @@ rf_FreeIOBuffer(RF_Raid_t *raidPtr, RF_V
 		raidPtr->iobuf_count++;
 	} else {
 		free(vple->p, M_RAIDFRAME);
-		rf_FreeVPListElem(vple);
+		rf_FreeVPListElem(raidPtr, vple);
 	}
 	rf_unlock_mutex2(raidPtr->mutex);
 }

Index: src/sys/dev/raidframe/rf_diskqueue.c
diff -u src/sys/dev/raidframe/rf_diskqueue.c:1.59 src/sys/dev/raidframe/rf_diskqueue.c:1.60
--- src/sys/dev/raidframe/rf_diskqueue.c:1.59	Fri Jul 23 00:26:19 2021
+++ src/sys/dev/raidframe/rf_diskqueue.c	Fri Jul 23 00:54:45 2021
@@ -1,4 +1,4 @@
-/*	$NetBSD: rf_diskqueue.c,v 1.59 2021/07/23 00:26:19 oster Exp $	*/
+/*	$NetBSD: rf_diskqueue.c,v 1.60 2021/07/23 00:54:45 oster Exp $	*/
 /*
  * Copyright (c) 1995 Carnegie-Mellon University.
  * All rights reserved.
@@ -66,7 +66,7 @@
  ****************************************************************************/
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: rf_diskqueue.c,v 1.59 2021/07/23 00:26:19 oster Exp $");
+__KERNEL_RCSID(0, "$NetBSD: rf_diskqueue.c,v 1.60 2021/07/23 00:54:45 oster Exp $");
 
 #include <dev/raidframe/raidframevar.h>
 
@@ -193,21 +193,27 @@ rf_ConfigureDiskQueue(RF_Raid_t *raidPtr
 }
 
 static void
-rf_ShutdownDiskQueueSystem(void *ignored)
+rf_ShutdownDiskQueueSystem(void *arg)
 {
-	pool_destroy(&rf_pools.dqd);
-	pool_destroy(&rf_pools.bufio);
+	RF_Raid_t *raidPtr;
+
+	raidPtr = (RF_Raid_t *) arg;
+	
+	pool_destroy(&raidPtr->pools.dqd);
+	pool_destroy(&raidPtr->pools.bufio);
 }
 
 int
-rf_ConfigureDiskQueueSystem(RF_ShutdownList_t **listp)
+rf_ConfigureDiskQueueSystem(RF_ShutdownList_t **listp, RF_Raid_t *raidPtr,
+			    RF_Config_t *cfgPtr)
+
 {
 
-	rf_pool_init(&rf_pools.dqd, sizeof(RF_DiskQueueData_t),
-		     "rf_dqd_pl", RF_MIN_FREE_DQD, RF_MAX_FREE_DQD);
-	rf_pool_init(&rf_pools.bufio, sizeof(buf_t),
-		     "rf_bufio_pl", RF_MIN_FREE_BUFIO, RF_MAX_FREE_BUFIO);
-	rf_ShutdownCreate(listp, rf_ShutdownDiskQueueSystem, NULL);
+	rf_pool_init(raidPtr, raidPtr->poolNames.dqd, &raidPtr->pools.dqd, sizeof(RF_DiskQueueData_t),
+		     "dqd", RF_MIN_FREE_DQD, RF_MAX_FREE_DQD);
+	rf_pool_init(raidPtr, raidPtr->poolNames.bufio, &raidPtr->pools.bufio, sizeof(buf_t),
+		     "bufio", RF_MIN_FREE_BUFIO, RF_MAX_FREE_BUFIO);
+	rf_ShutdownCreate(listp, rf_ShutdownDiskQueueSystem, raidPtr);
 
 	return (0);
 }
@@ -377,7 +383,7 @@ rf_CreateDiskQueueData(RF_IoType_t typ, 
 {
 	RF_DiskQueueData_t *p;
 
-	p = pool_get(&rf_pools.dqd, PR_WAITOK | PR_ZERO);
+	p = pool_get(&raidPtr->pools.dqd, PR_WAITOK | PR_ZERO);
 	KASSERT(p != NULL);
 
 	/* Obtain a buffer from our own pool.  It is possible for the
@@ -386,7 +392,7 @@ rf_CreateDiskQueueData(RF_IoType_t typ, 
 	   doesn't have a good way to recover if memory allocation
 	   fails here.
 	*/
-	p->bp = pool_get(&rf_pools.bufio, PR_WAITOK | PR_ZERO);
+	p->bp = pool_get(&raidPtr->pools.bufio, PR_WAITOK | PR_ZERO);
 	KASSERT(p->bp != NULL);
 	
 	buf_init(p->bp);
@@ -416,6 +422,6 @@ rf_CreateDiskQueueData(RF_IoType_t typ, 
 void
 rf_FreeDiskQueueData(RF_DiskQueueData_t *p)
 {
-	pool_put(&rf_pools.bufio, p->bp);
-	pool_put(&rf_pools.dqd, p);
+	pool_put(&p->raidPtr->pools.bufio, p->bp);
+	pool_put(&p->raidPtr->pools.dqd, p);
 }

Index: src/sys/dev/raidframe/rf_diskqueue.h
diff -u src/sys/dev/raidframe/rf_diskqueue.h:1.26 src/sys/dev/raidframe/rf_diskqueue.h:1.27
--- src/sys/dev/raidframe/rf_diskqueue.h:1.26	Fri Jun 19 19:29:39 2020
+++ src/sys/dev/raidframe/rf_diskqueue.h	Fri Jul 23 00:54:45 2021
@@ -1,4 +1,4 @@
-/*	$NetBSD: rf_diskqueue.h,v 1.26 2020/06/19 19:29:39 jdolecek Exp $	*/
+/*	$NetBSD: rf_diskqueue.h,v 1.27 2021/07/23 00:54:45 oster Exp $	*/
 /*
  * Copyright (c) 1995 Carnegie-Mellon University.
  * All rights reserved.
@@ -131,7 +131,7 @@ struct RF_DiskQueue_s {
   (RF_QUEUE_EMPTY(_q_) || \
     (!RF_QUEUE_FULL(_q_) && ((_r_)->priority >= (_q_)->curPriority)))
 
-int rf_ConfigureDiskQueueSystem(RF_ShutdownList_t **);
+int rf_ConfigureDiskQueueSystem(RF_ShutdownList_t **, RF_Raid_t *, RF_Config_t *);
 int rf_ConfigureDiskQueues(RF_ShutdownList_t **, RF_Raid_t *, RF_Config_t *);
 void rf_DiskIOEnqueue(RF_DiskQueue_t *, RF_DiskQueueData_t *, int);
 void rf_DiskIOComplete(RF_DiskQueue_t *, RF_DiskQueueData_t *, int);
Index: src/sys/dev/raidframe/rf_reconbuffer.c
diff -u src/sys/dev/raidframe/rf_reconbuffer.c:1.26 src/sys/dev/raidframe/rf_reconbuffer.c:1.27
--- src/sys/dev/raidframe/rf_reconbuffer.c:1.26	Thu Oct 10 03:43:59 2019
+++ src/sys/dev/raidframe/rf_reconbuffer.c	Fri Jul 23 00:54:45 2021
@@ -1,4 +1,4 @@
-/*	$NetBSD: rf_reconbuffer.c,v 1.26 2019/10/10 03:43:59 christos Exp $	*/
+/*	$NetBSD: rf_reconbuffer.c,v 1.27 2021/07/23 00:54:45 oster Exp $	*/
 /*
  * Copyright (c) 1995 Carnegie-Mellon University.
  * All rights reserved.
@@ -33,7 +33,7 @@
  ***************************************************/
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: rf_reconbuffer.c,v 1.26 2019/10/10 03:43:59 christos Exp $");
+__KERNEL_RCSID(0, "$NetBSD: rf_reconbuffer.c,v 1.27 2021/07/23 00:54:45 oster Exp $");
 
 #include "rf_raid.h"
 #include "rf_reconbuffer.h"
@@ -233,8 +233,8 @@ rf_SubmitReconBufferBasic(RF_ReconBuffer
 			RF_PANIC();
 		}
 		pssPtr->flags |= RF_PSS_BUFFERWAIT;
-		cb = rf_AllocCallbackValueDesc();/* append to buf wait list in
-						 * recon ctrl structure */
+		cb = rf_AllocCallbackValueDesc(raidPtr); /* append to buf wait list in
+							  * recon ctrl structure */
 		cb->col = rbuf->col;
 		cb->v = rbuf->parityStripeID;
 		cb->next = NULL;
@@ -413,7 +413,7 @@ rf_ReleaseFloatingReconBuffer(RF_Raid_t 
 		rcPtr->bufferWaitList = cb->next;
 		rf_CauseReconEvent(raidPtr, cb->col, (void *) 1, RF_REVENT_BUFCLEAR);	/* arg==1 => we've
 												 * committed a buffer */
-		rf_FreeCallbackValueDesc(cb);
+		rf_FreeCallbackValueDesc(raidPtr, cb);
 		raidPtr->procsInBufWait--;
 	} else {
 		rbuf->next = rcPtr->floatingRbufs;

Index: src/sys/dev/raidframe/rf_driver.c
diff -u src/sys/dev/raidframe/rf_driver.c:1.137 src/sys/dev/raidframe/rf_driver.c:1.138
--- src/sys/dev/raidframe/rf_driver.c:1.137	Wed May 26 06:11:50 2021
+++ src/sys/dev/raidframe/rf_driver.c	Fri Jul 23 00:54:45 2021
@@ -1,4 +1,4 @@
-/*	$NetBSD: rf_driver.c,v 1.137 2021/05/26 06:11:50 mrg Exp $	*/
+/*	$NetBSD: rf_driver.c,v 1.138 2021/07/23 00:54:45 oster Exp $	*/
 /*-
  * Copyright (c) 1999 The NetBSD Foundation, Inc.
  * All rights reserved.
@@ -66,7 +66,7 @@
 
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: rf_driver.c,v 1.137 2021/05/26 06:11:50 mrg Exp $");
+__KERNEL_RCSID(0, "$NetBSD: rf_driver.c,v 1.138 2021/07/23 00:54:45 oster Exp $");
 
 #ifdef _KERNEL_OPT
 #include "opt_raid_diagnostic.h"
@@ -128,7 +128,7 @@ static void rf_ConfigureDebug(RF_Config_
 static void set_debug_option(char *name, long val);
 static void rf_UnconfigureArray(void);
 static void rf_ShutdownRDFreeList(void *);
-static int rf_ConfigureRDFreeList(RF_ShutdownList_t **);
+static int rf_ConfigureRDFreeList(RF_ShutdownList_t **, RF_Raid_t *, RF_Config_t *);
 
 rf_declare_mutex2(rf_printf_mutex);	/* debug only:  avoids interleaved
 					 * printfs by different stripes */
@@ -147,7 +147,7 @@ static rf_declare_mutex2(configureMutex)
 static RF_ShutdownList_t *globalShutdown;	/* non array-specific
 						 * stuff */
 
-static int rf_ConfigureRDFreeList(RF_ShutdownList_t ** listp);
+static int rf_ConfigureRDFreeList(RF_ShutdownList_t ** listp, RF_Raid_t *raidPtr, RF_Config_t *cfgPtr);
 static int rf_AllocEmergBuffers(RF_Raid_t *);
 static void rf_FreeEmergBuffers(RF_Raid_t *);
 static void rf_destroy_mutex_cond(RF_Raid_t *);
@@ -309,7 +309,6 @@ rf_Configure(RF_Raid_t *raidPtr, RF_Conf
 		rf_init_mutex2(rf_printf_mutex, IPL_VM);
 
 		/* initialize globals */
-
 		DO_INIT_CONFIGURE(rf_ConfigureAllocList);
 
 		/*
@@ -321,19 +320,9 @@ rf_Configure(RF_Raid_t *raidPtr, RF_Conf
 #if RF_ACC_TRACE > 0
 		DO_INIT_CONFIGURE(rf_ConfigureAccessTrace);
 #endif
-		DO_INIT_CONFIGURE(rf_ConfigureMapModule);
-		DO_INIT_CONFIGURE(rf_ConfigureReconEvent);
-		DO_INIT_CONFIGURE(rf_ConfigureCallback);
-		DO_INIT_CONFIGURE(rf_ConfigureRDFreeList);
 		DO_INIT_CONFIGURE(rf_ConfigureNWayXor);
-		DO_INIT_CONFIGURE(rf_ConfigureStripeLockFreeList);
-		DO_INIT_CONFIGURE(rf_ConfigureMCPair);
-		DO_INIT_CONFIGURE(rf_ConfigureDAGs);
 		DO_INIT_CONFIGURE(rf_ConfigureDAGFuncs);
-		DO_INIT_CONFIGURE(rf_ConfigureReconstruction);
 		DO_INIT_CONFIGURE(rf_ConfigureCopyback);
-		DO_INIT_CONFIGURE(rf_ConfigureDiskQueueSystem);
-		DO_INIT_CONFIGURE(rf_ConfigurePSStatus);
 		isconfigged = 1;
 	}
 	rf_unlock_mutex2(configureMutex);
@@ -358,6 +347,17 @@ rf_Configure(RF_Raid_t *raidPtr, RF_Conf
 	raidPtr->status = rf_rs_optimal;
 	raidPtr->reconControl = NULL;
 
+	DO_RAID_INIT_CONFIGURE(rf_ConfigureMapModule);
+	DO_RAID_INIT_CONFIGURE(rf_ConfigureReconEvent);
+	DO_RAID_INIT_CONFIGURE(rf_ConfigureCallback);
+	DO_RAID_INIT_CONFIGURE(rf_ConfigureRDFreeList);
+	DO_RAID_INIT_CONFIGURE(rf_ConfigureStripeLockFreeList);
+	DO_RAID_INIT_CONFIGURE(rf_ConfigureMCPair);
+	DO_RAID_INIT_CONFIGURE(rf_ConfigureDAGs);
+	DO_RAID_INIT_CONFIGURE(rf_ConfigureReconstruction);
+	DO_RAID_INIT_CONFIGURE(rf_ConfigureDiskQueueSystem);
+	DO_RAID_INIT_CONFIGURE(rf_ConfigurePSStatus);
+	
 	DO_RAID_INIT_CONFIGURE(rf_ConfigureEngine);
 	DO_RAID_INIT_CONFIGURE(rf_ConfigureStripeLocks);
 
@@ -379,6 +379,9 @@ rf_Configure(RF_Raid_t *raidPtr, RF_Conf
 
 	DO_RAID_INIT_CONFIGURE(rf_ConfigureLayout);
 
+
+	
+	
 	/* Initialize per-RAID PSS bits */
 	rf_InitPSStatus(raidPtr);
 
@@ -491,7 +494,7 @@ rf_AllocEmergBuffers(RF_Raid_t *raidPtr)
 				 raidPtr->logBytesPerSector,
 				 M_RAIDFRAME, M_WAITOK);
 		if (tmpbuf) {
-			vple = rf_AllocVPListElem();
+			vple = rf_AllocVPListElem(raidPtr);
 			vple->p= tmpbuf;
 			vple->next = raidPtr->iobuf;
 			raidPtr->iobuf = vple;
@@ -510,7 +513,7 @@ rf_AllocEmergBuffers(RF_Raid_t *raidPtr)
                                  raidPtr->logBytesPerSector),
                                  M_RAIDFRAME, M_WAITOK);
                 if (tmpbuf) {
-                        vple = rf_AllocVPListElem();
+                        vple = rf_AllocVPListElem(raidPtr);
                         vple->p= tmpbuf;
                         vple->next = raidPtr->stripebuf;
                         raidPtr->stripebuf = vple;
@@ -535,7 +538,7 @@ rf_FreeEmergBuffers(RF_Raid_t *raidPtr)
 		tmp = raidPtr->iobuf;
 		raidPtr->iobuf = raidPtr->iobuf->next;
 		free(tmp->p, M_RAIDFRAME);
-		rf_FreeVPListElem(tmp);
+		rf_FreeVPListElem(raidPtr,tmp);
 	}
 
 	/* Free the emergency stripe buffers */
@@ -543,24 +546,29 @@ rf_FreeEmergBuffers(RF_Raid_t *raidPtr)
 		tmp = raidPtr->stripebuf;
 		raidPtr->stripebuf = raidPtr->stripebuf->next;
 		free(tmp->p, M_RAIDFRAME);
-		rf_FreeVPListElem(tmp);
+		rf_FreeVPListElem(raidPtr, tmp);
 	}
 }
 
 
 static void
-rf_ShutdownRDFreeList(void *ignored)
+rf_ShutdownRDFreeList(void *arg)
 {
-	pool_destroy(&rf_pools.rad);
+	RF_Raid_t *raidPtr;
+
+	raidPtr = (RF_Raid_t *) arg;
+	
+	pool_destroy(&raidPtr->pools.rad);
 }
 
 static int
-rf_ConfigureRDFreeList(RF_ShutdownList_t **listp)
+rf_ConfigureRDFreeList(RF_ShutdownList_t **listp, RF_Raid_t *raidPtr,
+		       RF_Config_t *cfgPtr)
 {
 
-	rf_pool_init(&rf_pools.rad, sizeof(RF_RaidAccessDesc_t),
-		     "rf_rad_pl", RF_MIN_FREE_RAD, RF_MAX_FREE_RAD);
-	rf_ShutdownCreate(listp, rf_ShutdownRDFreeList, NULL);
+	rf_pool_init(raidPtr, raidPtr->poolNames.rad, &raidPtr->pools.rad, sizeof(RF_RaidAccessDesc_t),
+		     "rad", RF_MIN_FREE_RAD, RF_MAX_FREE_RAD);
+	rf_ShutdownCreate(listp, rf_ShutdownRDFreeList, raidPtr);
 	return (0);
 }
 
@@ -572,7 +580,7 @@ rf_AllocRaidAccDesc(RF_Raid_t *raidPtr, 
 {
 	RF_RaidAccessDesc_t *desc;
 
-	desc = pool_get(&rf_pools.rad, PR_WAITOK);
+	desc = pool_get(&raidPtr->pools.rad, PR_WAITOK);
 
 	rf_lock_mutex2(raidPtr->rad_lock);
 	if (raidPtr->waitShutdown) {
@@ -582,7 +590,7 @@ rf_AllocRaidAccDesc(RF_Raid_t *raidPtr, 
 	         */
 
 		rf_unlock_mutex2(raidPtr->rad_lock);
-		pool_put(&rf_pools.rad, desc);
+		pool_put(&raidPtr->pools.rad, desc);
 		return (NULL);
 	}
 	raidPtr->nAccOutstanding++;
@@ -628,7 +636,7 @@ rf_FreeRaidAccDesc(RF_RaidAccessDesc_t *
 	while(dagList != NULL) {
 		temp = dagList;
 		dagList = dagList->next;
-		rf_FreeDAGList(temp);
+		rf_FreeDAGList(raidPtr, temp);
 	}
 
 	while (desc->iobufs) {
@@ -643,7 +651,7 @@ rf_FreeRaidAccDesc(RF_RaidAccessDesc_t *
 		rf_FreeStripeBuffer(raidPtr, tmp);
 	}
 
-	pool_put(&rf_pools.rad, desc);
+	pool_put(&raidPtr->pools.rad, desc);
 	rf_lock_mutex2(raidPtr->rad_lock);
 	raidPtr->nAccOutstanding--;
 	if (raidPtr->waitShutdown) {
@@ -848,7 +856,7 @@ rf_ResumeNewRequests(RF_Raid_t *raidPtr)
 		t = cb;
 		cb = cb->next;
 		(t->callbackFunc) (t->callbackArg);
-		rf_FreeCallbackFuncDesc(t);
+		rf_FreeCallbackFuncDesc(raidPtr, t);
 	}
 }
 /*****************************************************************************************

Index: src/sys/dev/raidframe/rf_evenodd.c
diff -u src/sys/dev/raidframe/rf_evenodd.c:1.22 src/sys/dev/raidframe/rf_evenodd.c:1.23
--- src/sys/dev/raidframe/rf_evenodd.c:1.22	Sat Feb  9 03:34:00 2019
+++ src/sys/dev/raidframe/rf_evenodd.c	Fri Jul 23 00:54:45 2021
@@ -1,4 +1,4 @@
-/*	$NetBSD: rf_evenodd.c,v 1.22 2019/02/09 03:34:00 christos Exp $	*/
+/*	$NetBSD: rf_evenodd.c,v 1.23 2021/07/23 00:54:45 oster Exp $	*/
 /*
  * Copyright (c) 1995 Carnegie-Mellon University.
  * All rights reserved.
@@ -33,7 +33,7 @@
  ****************************************************************************************/
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: rf_evenodd.c,v 1.22 2019/02/09 03:34:00 christos Exp $");
+__KERNEL_RCSID(0, "$NetBSD: rf_evenodd.c,v 1.23 2021/07/23 00:54:45 oster Exp $");
 
 #include "rf_archs.h"
 
@@ -356,7 +356,7 @@ rf_VerifyParityEvenOdd(RF_Raid_t *raidPt
 
 	retcode = RF_PARITY_OKAY;
 
-	mcpair = rf_AllocMCPair();
+	mcpair = rf_AllocMCPair(raidPtr);
 	rf_MakeAllocList(alloclist);
 	buf = RF_MallocAndAdd(
 	    numbytes * (layoutPtr->numDataCol + layoutPtr->numParityCol),
@@ -526,10 +526,10 @@ rf_VerifyParityEvenOdd(RF_Raid_t *raidPt
 
 
 out:
-	rf_FreeAccessStripeMap(asm_h);
+	rf_FreeAccessStripeMap(raidPtr, asm_h);
 	rf_FreeAllocList(alloclist);
 	rf_FreeDAG(rd_dag_h);
-	rf_FreeMCPair(mcpair);
+	rf_FreeMCPair(raidPtr, mcpair);
 	return (retcode);
 }
 #endif				/* RF_INCLUDE_EVENODD > 0 */

Index: src/sys/dev/raidframe/rf_map.c
diff -u src/sys/dev/raidframe/rf_map.c:1.50 src/sys/dev/raidframe/rf_map.c:1.51
--- src/sys/dev/raidframe/rf_map.c:1.50	Tue May 28 08:59:35 2019
+++ src/sys/dev/raidframe/rf_map.c	Fri Jul 23 00:54:45 2021
@@ -1,4 +1,4 @@
-/*	$NetBSD: rf_map.c,v 1.50 2019/05/28 08:59:35 msaitoh Exp $	*/
+/*	$NetBSD: rf_map.c,v 1.51 2021/07/23 00:54:45 oster Exp $	*/
 /*
  * Copyright (c) 1995 Carnegie-Mellon University.
  * All rights reserved.
@@ -33,7 +33,7 @@
  **************************************************************************/
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: rf_map.c,v 1.50 2019/05/28 08:59:35 msaitoh Exp $");
+__KERNEL_RCSID(0, "$NetBSD: rf_map.c,v 1.51 2021/07/23 00:54:45 oster Exp $");
 
 #include <dev/raidframe/raidframevar.h>
 
@@ -43,8 +43,8 @@ __KERNEL_RCSID(0, "$NetBSD: rf_map.c,v 1
 #include "rf_map.h"
 #include "rf_shutdown.h"
 
-static void rf_FreePDAList(RF_PhysDiskAddr_t *pda_list);
-static void rf_FreeASMList(RF_AccessStripeMap_t *asm_list);
+static void rf_FreePDAList(RF_Raid_t *raidPtr, RF_PhysDiskAddr_t *pda_list);
+static void rf_FreeASMList(RF_Raid_t *raidPtr, RF_AccessStripeMap_t *asm_list);
 
 /***************************************************************************
  *
@@ -108,10 +108,10 @@ rf_MapAccess(RF_Raid_t *raidPtr, RF_Raid
 	SUID = rf_RaidAddressToStripeUnitID(layoutPtr, raidAddress);
 	lastSUID = rf_RaidAddressToStripeUnitID(layoutPtr, lastRaidAddr);
 
-	asmList = rf_AllocASMList(totStripes);
+	asmList = rf_AllocASMList(raidPtr, totStripes);
 
 	/* may also need pda(s) per stripe for parity */
-	pdaList = rf_AllocPDAList(lastSUID - SUID + 1 +
+	pdaList = rf_AllocPDAList(raidPtr, lastSUID - SUID + 1 +
 				  faultsTolerated * totStripes);
 
 
@@ -209,7 +209,7 @@ rf_MapAccess(RF_Raid_t *raidPtr, RF_Raid
 			/* raidAddr may be needed to find unit to redirect to */
 			pda_p->raidAddress = rf_RaidAddressOfPrevStripeUnitBoundary(layoutPtr, startAddrWithinStripe);
 			rf_ASMCheckStatus(raidPtr, pda_p, asm_p, disks, 1);
-			rf_ASMParityAdjust(asm_p->parityInfo, startAddrWithinStripe, endAddress, layoutPtr, asm_p);
+			rf_ASMParityAdjust(raidPtr, asm_p->parityInfo, startAddrWithinStripe, endAddress, layoutPtr, asm_p);
 
 			break;
 #if (RF_INCLUDE_DECL_PQ > 0) || (RF_INCLUDE_RAID6 > 0)
@@ -236,15 +236,15 @@ rf_MapAccess(RF_Raid_t *raidPtr, RF_Raid
 			/* failure mode stuff */
 			rf_ASMCheckStatus(raidPtr, pda_p, asm_p, disks, 1);
 			rf_ASMCheckStatus(raidPtr, pda_q, asm_p, disks, 1);
-			rf_ASMParityAdjust(asm_p->parityInfo, startAddrWithinStripe, endAddress, layoutPtr, asm_p);
-			rf_ASMParityAdjust(asm_p->qInfo, startAddrWithinStripe, endAddress, layoutPtr, asm_p);
+			rf_ASMParityAdjust(raidPtr, asm_p->parityInfo, startAddrWithinStripe, endAddress, layoutPtr, asm_p);
+			rf_ASMParityAdjust(raidPtr, asm_p->qInfo, startAddrWithinStripe, endAddress, layoutPtr, asm_p);
 			break;
 #endif
 		}
 	}
 	RF_ASSERT(asmList == NULL && pdaList == NULL);
 	/* make the header structure */
-	asm_hdr = rf_AllocAccessStripeMapHeader();
+	asm_hdr = rf_AllocAccessStripeMapHeader(raidPtr);
 	RF_ASSERT(numStripes == totStripes);
 	asm_hdr->numStripes = numStripes;
 	asm_hdr->stripeMap = asm_list;
@@ -339,103 +339,108 @@ rf_MarkFailuresInASMList(RF_Raid_t *raid
    release all the free lists */
 static void rf_ShutdownMapModule(void *);
 static void
-rf_ShutdownMapModule(void *ignored)
+rf_ShutdownMapModule(void *arg)
 {
-	pool_destroy(&rf_pools.asm_hdr);
-	pool_destroy(&rf_pools.asmap);
-	pool_destroy(&rf_pools.asmhle);
-	pool_destroy(&rf_pools.pda);
-	pool_destroy(&rf_pools.fss);
-	pool_destroy(&rf_pools.vfple);
-	pool_destroy(&rf_pools.vple);
+	RF_Raid_t *raidPtr;
+
+	raidPtr = (RF_Raid_t *) arg;
+
+	pool_destroy(&raidPtr->pools.asm_hdr);
+	pool_destroy(&raidPtr->pools.asmap);
+	pool_destroy(&raidPtr->pools.asmhle);
+	pool_destroy(&raidPtr->pools.pda);
+	pool_destroy(&raidPtr->pools.fss);
+	pool_destroy(&raidPtr->pools.vfple);
+	pool_destroy(&raidPtr->pools.vple);
 }
 
 int
-rf_ConfigureMapModule(RF_ShutdownList_t **listp)
+rf_ConfigureMapModule(RF_ShutdownList_t **listp, RF_Raid_t *raidPtr,
+		      RF_Config_t *cfgPtr)
 {
 
-	rf_pool_init(&rf_pools.asm_hdr, sizeof(RF_AccessStripeMapHeader_t),
-		     "rf_asmhdr_pl", RF_MIN_FREE_ASMHDR, RF_MAX_FREE_ASMHDR);
-	rf_pool_init(&rf_pools.asmap, sizeof(RF_AccessStripeMap_t),
-		     "rf_asm_pl", RF_MIN_FREE_ASM, RF_MAX_FREE_ASM);
-	rf_pool_init(&rf_pools.asmhle, sizeof(RF_ASMHeaderListElem_t),
-		     "rf_asmhle_pl", RF_MIN_FREE_ASMHLE, RF_MAX_FREE_ASMHLE);
-	rf_pool_init(&rf_pools.pda, sizeof(RF_PhysDiskAddr_t),
-		     "rf_pda_pl", RF_MIN_FREE_PDA, RF_MAX_FREE_PDA);
-	rf_pool_init(&rf_pools.fss, sizeof(RF_FailedStripe_t),
-		     "rf_fss_pl", RF_MIN_FREE_FSS, RF_MAX_FREE_FSS);
-	rf_pool_init(&rf_pools.vfple, sizeof(RF_VoidFunctionPointerListElem_t),
-		     "rf_vfple_pl", RF_MIN_FREE_VFPLE, RF_MAX_FREE_VFPLE);
-	rf_pool_init(&rf_pools.vple, sizeof(RF_VoidPointerListElem_t),
-		     "rf_vple_pl", RF_MIN_FREE_VPLE, RF_MAX_FREE_VPLE);
-	rf_ShutdownCreate(listp, rf_ShutdownMapModule, NULL);
+	rf_pool_init(raidPtr, raidPtr->poolNames.asm_hdr, &raidPtr->pools.asm_hdr, sizeof(RF_AccessStripeMapHeader_t),
+		     "asmhdr", RF_MIN_FREE_ASMHDR, RF_MAX_FREE_ASMHDR);
+	rf_pool_init(raidPtr, raidPtr->poolNames.asmap, &raidPtr->pools.asmap, sizeof(RF_AccessStripeMap_t),
+		     "asmap", RF_MIN_FREE_ASM, RF_MAX_FREE_ASM);
+	rf_pool_init(raidPtr, raidPtr->poolNames.asmhle, &raidPtr->pools.asmhle, sizeof(RF_ASMHeaderListElem_t),
+		     "asmhle", RF_MIN_FREE_ASMHLE, RF_MAX_FREE_ASMHLE);
+	rf_pool_init(raidPtr, raidPtr->poolNames.pda, &raidPtr->pools.pda, sizeof(RF_PhysDiskAddr_t),
+		     "pda", RF_MIN_FREE_PDA, RF_MAX_FREE_PDA);
+	rf_pool_init(raidPtr, raidPtr->poolNames.fss, &raidPtr->pools.fss, sizeof(RF_FailedStripe_t),
+		     "fss", RF_MIN_FREE_FSS, RF_MAX_FREE_FSS);
+	rf_pool_init(raidPtr, raidPtr->poolNames.vfple, &raidPtr->pools.vfple, sizeof(RF_VoidFunctionPointerListElem_t),
+		     "vfple", RF_MIN_FREE_VFPLE, RF_MAX_FREE_VFPLE);
+	rf_pool_init(raidPtr, raidPtr->poolNames.vple, &raidPtr->pools.vple, sizeof(RF_VoidPointerListElem_t),
+		     "vple", RF_MIN_FREE_VPLE, RF_MAX_FREE_VPLE);
+	rf_ShutdownCreate(listp, rf_ShutdownMapModule, raidPtr);
 
 	return (0);
 }
 
 RF_AccessStripeMapHeader_t *
-rf_AllocAccessStripeMapHeader(void)
+rf_AllocAccessStripeMapHeader(RF_Raid_t *raidPtr)
 {
-	return pool_get(&rf_pools.asm_hdr, PR_WAITOK | PR_ZERO);
+	return pool_get(&raidPtr->pools.asm_hdr, PR_WAITOK | PR_ZERO);
 }
 
 void
-rf_FreeAccessStripeMapHeader(RF_AccessStripeMapHeader_t *p)
+rf_FreeAccessStripeMapHeader(RF_Raid_t *raidPtr, RF_AccessStripeMapHeader_t *p)
 {
-	pool_put(&rf_pools.asm_hdr, p);
+	pool_put(&raidPtr->pools.asm_hdr, p);
 }
 
 
 RF_VoidFunctionPointerListElem_t *
-rf_AllocVFPListElem(void)
+rf_AllocVFPListElem(RF_Raid_t *raidPtr)
 {
-	return pool_get(&rf_pools.vfple, PR_WAITOK | PR_ZERO);
+	return pool_get(&raidPtr->pools.vfple, PR_WAITOK | PR_ZERO);
 }
 
 void
-rf_FreeVFPListElem(RF_VoidFunctionPointerListElem_t *p)
+rf_FreeVFPListElem(RF_Raid_t *raidPtr, RF_VoidFunctionPointerListElem_t *p)
 {
 
-	pool_put(&rf_pools.vfple, p);
+	pool_put(&raidPtr->pools.vfple, p);
 }
 
 
 RF_VoidPointerListElem_t *
-rf_AllocVPListElem(void)
+rf_AllocVPListElem(RF_Raid_t *raidPtr)
 {
-	return pool_get(&rf_pools.vple, PR_WAITOK | PR_ZERO);
+	return pool_get(&raidPtr->pools.vple, PR_WAITOK | PR_ZERO);
 }
 
 void
-rf_FreeVPListElem(RF_VoidPointerListElem_t *p)
+rf_FreeVPListElem(RF_Raid_t *raidPtr, RF_VoidPointerListElem_t *p)
 {
 
-	pool_put(&rf_pools.vple, p);
+	pool_put(&raidPtr->pools.vple, p);
 }
 
 RF_ASMHeaderListElem_t *
-rf_AllocASMHeaderListElem(void)
+rf_AllocASMHeaderListElem(RF_Raid_t *raidPtr)
 {
-	return pool_get(&rf_pools.asmhle, PR_WAITOK | PR_ZERO);
+	return pool_get(&raidPtr->pools.asmhle, PR_WAITOK | PR_ZERO);
 }
 
 void
-rf_FreeASMHeaderListElem(RF_ASMHeaderListElem_t *p)
+rf_FreeASMHeaderListElem(RF_Raid_t *raidPtr, RF_ASMHeaderListElem_t *p)
 {
 
-	pool_put(&rf_pools.asmhle, p);
+	pool_put(&raidPtr->pools.asmhle, p);
 }
 
 RF_FailedStripe_t *
-rf_AllocFailedStripeStruct(void)
+rf_AllocFailedStripeStruct(RF_Raid_t *raidPtr)
 {
-	return pool_get(&rf_pools.fss, PR_WAITOK | PR_ZERO);
+	return pool_get(&raidPtr->pools.fss, PR_WAITOK | PR_ZERO);
 }
 
 void
-rf_FreeFailedStripeStruct(RF_FailedStripe_t *p)
+rf_FreeFailedStripeStruct(RF_Raid_t *raidPtr, RF_FailedStripe_t *p)
 {
-	pool_put(&rf_pools.fss, p);
+	pool_put(&raidPtr->pools.fss, p);
 }
 
 
@@ -443,9 +448,9 @@ rf_FreeFailedStripeStruct(RF_FailedStrip
 
 
 RF_PhysDiskAddr_t *
-rf_AllocPhysDiskAddr(void)
+rf_AllocPhysDiskAddr(RF_Raid_t *raidPtr)
 {
-	return pool_get(&rf_pools.pda, PR_WAITOK | PR_ZERO);
+	return pool_get(&raidPtr->pools.pda, PR_WAITOK | PR_ZERO);
 }
 /* allocates a list of PDAs, locking the free list only once when we
  * have to call calloc, we do it one component at a time to simplify
@@ -453,7 +458,7 @@ rf_AllocPhysDiskAddr(void)
  * not be much of a performance hit, because it should be very
  * infrequently executed.  */
 RF_PhysDiskAddr_t *
-rf_AllocPDAList(int count)
+rf_AllocPDAList(RF_Raid_t *raidPtr, int count)
 {
 	RF_PhysDiskAddr_t *p, *prev;
 	int i;
@@ -461,7 +466,7 @@ rf_AllocPDAList(int count)
 	p = NULL;
 	prev = NULL;
 	for (i = 0; i < count; i++) {
-		p = pool_get(&rf_pools.pda, PR_WAITOK);
+		p = pool_get(&raidPtr->pools.pda, PR_WAITOK);
 		p->next = prev;
 		prev = p;
 	}
@@ -470,20 +475,20 @@ rf_AllocPDAList(int count)
 }
 
 void
-rf_FreePhysDiskAddr(RF_PhysDiskAddr_t *p)
+rf_FreePhysDiskAddr(RF_Raid_t *raidPtr, RF_PhysDiskAddr_t *p)
 {
-	pool_put(&rf_pools.pda, p);
+	pool_put(&raidPtr->pools.pda, p);
 }
 
 static void
-rf_FreePDAList(RF_PhysDiskAddr_t *pda_list)
+rf_FreePDAList(RF_Raid_t *raidPtr, RF_PhysDiskAddr_t *pda_list)
 {
 	RF_PhysDiskAddr_t *p, *tmp;
 
 	p=pda_list;
 	while (p) {
 		tmp = p->next;
-		pool_put(&rf_pools.pda, p);
+		pool_put(&raidPtr->pools.pda, p);
 		p = tmp;
 	}
 }
@@ -494,7 +499,7 @@ rf_FreePDAList(RF_PhysDiskAddr_t *pda_li
  * shutdown.  This should not be much of a performance hit, because it
  * should be very infrequently executed.  */
 RF_AccessStripeMap_t *
-rf_AllocASMList(int count)
+rf_AllocASMList(RF_Raid_t *raidPtr, int count)
 {
 	RF_AccessStripeMap_t *p, *prev;
 	int i;
@@ -502,7 +507,7 @@ rf_AllocASMList(int count)
 	p = NULL;
 	prev = NULL;
 	for (i = 0; i < count; i++) {
-		p = pool_get(&rf_pools.asmap, PR_WAITOK);
+		p = pool_get(&raidPtr->pools.asmap, PR_WAITOK);
 		p->next = prev;
 		prev = p;
 	}
@@ -510,20 +515,20 @@ rf_AllocASMList(int count)
 }
 
 static void
-rf_FreeASMList(RF_AccessStripeMap_t *asm_list)
+rf_FreeASMList(RF_Raid_t *raidPtr, RF_AccessStripeMap_t *asm_list)
 {
 	RF_AccessStripeMap_t *p, *tmp;
 
 	p=asm_list;
 	while (p) {
 		tmp = p->next;
-		pool_put(&rf_pools.asmap, p);
+		pool_put(&raidPtr->pools.asmap, p);
 		p = tmp;
 	}
 }
 
 void
-rf_FreeAccessStripeMap(RF_AccessStripeMapHeader_t *hdr)
+rf_FreeAccessStripeMap(RF_Raid_t *raidPtr, RF_AccessStripeMapHeader_t *hdr)
 {
 	RF_AccessStripeMap_t *p;
 	RF_PhysDiskAddr_t *pdp, *trailer, *pdaList = NULL, *pdaEnd = NULL;
@@ -576,9 +581,9 @@ rf_FreeAccessStripeMap(RF_AccessStripeMa
 	RF_ASSERT(t == count);
 
 	if (pdaList)
-		rf_FreePDAList(pdaList);
-	rf_FreeASMList(hdr->stripeMap);
-	rf_FreeAccessStripeMapHeader(hdr);
+		rf_FreePDAList(raidPtr, pdaList);
+	rf_FreeASMList(raidPtr, hdr->stripeMap);
+	rf_FreeAccessStripeMapHeader(raidPtr, hdr);
 }
 /* We can't use the large write optimization if there are any failures
  * in the stripe.  In the declustered layout, there is no way to
@@ -760,7 +765,8 @@ rf_PrintRaidAddressInfo(RF_Raid_t *raidP
  * range restrict the parity descriptor to touch only the correct
  * stuff.  */
 void
-rf_ASMParityAdjust(RF_PhysDiskAddr_t *toAdjust,
+rf_ASMParityAdjust(RF_Raid_t *raidPtr,
+		   RF_PhysDiskAddr_t *toAdjust,
 		   RF_StripeNum_t startAddrWithinStripe,
 		   RF_SectorNum_t endAddress,
 		   RF_RaidLayout_t *layoutPtr,
@@ -795,7 +801,7 @@ rf_ASMParityAdjust(RF_PhysDiskAddr_t *to
 			RF_ASSERT(toAdjust->next == NULL);
 			/* the following will get freed in rf_FreeAccessStripeMap() via
 			   rf_FreePDAList() */
-			new_pda = toAdjust->next = rf_AllocPhysDiskAddr();
+			new_pda = toAdjust->next = rf_AllocPhysDiskAddr(raidPtr);
 			*new_pda = *toAdjust;	/* structure assignment */
 			new_pda->next = NULL;
 

Index: src/sys/dev/raidframe/rf_map.h
diff -u src/sys/dev/raidframe/rf_map.h:1.13 src/sys/dev/raidframe/rf_map.h:1.14
--- src/sys/dev/raidframe/rf_map.h:1.13	Sun Mar  4 06:02:38 2007
+++ src/sys/dev/raidframe/rf_map.h	Fri Jul 23 00:54:45 2021
@@ -1,4 +1,4 @@
-/*	$NetBSD: rf_map.h,v 1.13 2007/03/04 06:02:38 christos Exp $	*/
+/*	$NetBSD: rf_map.h,v 1.14 2021/07/23 00:54:45 oster Exp $	*/
 /*
  * Copyright (c) 1995 Carnegie-Mellon University.
  * All rights reserved.
@@ -40,31 +40,31 @@
 RF_AccessStripeMapHeader_t *rf_MapAccess(RF_Raid_t *, RF_RaidAddr_t,
 					 RF_SectorCount_t, void *, int);
 void rf_MarkFailuresInASMList(RF_Raid_t *, RF_AccessStripeMapHeader_t *);
-int rf_ConfigureMapModule(RF_ShutdownList_t **);
-RF_AccessStripeMapHeader_t *rf_AllocAccessStripeMapHeader(void);
-void rf_FreeAccessStripeMapHeader(RF_AccessStripeMapHeader_t *);
-RF_PhysDiskAddr_t *rf_AllocPhysDiskAddr(void);
-RF_PhysDiskAddr_t *rf_AllocPDAList(int);
-void rf_FreePhysDiskAddr(RF_PhysDiskAddr_t *);
-RF_AccessStripeMap_t *rf_AllocASMList(int);
-void rf_FreeAccessStripeMap(RF_AccessStripeMapHeader_t *);
+int rf_ConfigureMapModule(RF_ShutdownList_t **, RF_Raid_t *, RF_Config_t *);
+RF_AccessStripeMapHeader_t *rf_AllocAccessStripeMapHeader(RF_Raid_t *);
+void rf_FreeAccessStripeMapHeader(RF_Raid_t *, RF_AccessStripeMapHeader_t *);
+RF_PhysDiskAddr_t *rf_AllocPhysDiskAddr(RF_Raid_t *);
+RF_PhysDiskAddr_t *rf_AllocPDAList(RF_Raid_t *, int);
+void rf_FreePhysDiskAddr(RF_Raid_t *, RF_PhysDiskAddr_t *);
+RF_AccessStripeMap_t *rf_AllocASMList(RF_Raid_t *, int);
+void rf_FreeAccessStripeMap(RF_Raid_t *, RF_AccessStripeMapHeader_t *);
 int rf_CheckStripeForFailures(RF_Raid_t *, RF_AccessStripeMap_t *);
 int rf_NumFailedDataUnitsInStripe(RF_Raid_t *, RF_AccessStripeMap_t *);
 void rf_PrintAccessStripeMap(RF_AccessStripeMapHeader_t *);
 void rf_PrintFullAccessStripeMap(RF_AccessStripeMapHeader_t *, int);
 void rf_PrintRaidAddressInfo(RF_Raid_t *, RF_RaidAddr_t, RF_SectorCount_t);
-void rf_ASMParityAdjust(RF_PhysDiskAddr_t *, RF_StripeNum_t, RF_SectorNum_t,
+void rf_ASMParityAdjust(RF_Raid_t *, RF_PhysDiskAddr_t *, RF_StripeNum_t, RF_SectorNum_t,
 			RF_RaidLayout_t *, RF_AccessStripeMap_t *);
 void rf_ASMCheckStatus(RF_Raid_t *, RF_PhysDiskAddr_t *, RF_AccessStripeMap_t *,
 		       RF_RaidDisk_t *, int);
 
-RF_VoidFunctionPointerListElem_t *rf_AllocVFPListElem(void);
-void rf_FreeVFPListElem(RF_VoidFunctionPointerListElem_t *);
-RF_VoidPointerListElem_t *rf_AllocVPListElem(void);
-void rf_FreeVPListElem(RF_VoidPointerListElem_t *);
-RF_ASMHeaderListElem_t *rf_AllocASMHeaderListElem(void);
-void rf_FreeASMHeaderListElem(RF_ASMHeaderListElem_t *);
-RF_FailedStripe_t *rf_AllocFailedStripeStruct(void);
-void rf_FreeFailedStripeStruct(RF_FailedStripe_t *);
+RF_VoidFunctionPointerListElem_t *rf_AllocVFPListElem(RF_Raid_t *);
+void rf_FreeVFPListElem(RF_Raid_t *, RF_VoidFunctionPointerListElem_t *);
+RF_VoidPointerListElem_t *rf_AllocVPListElem(RF_Raid_t *);
+void rf_FreeVPListElem(RF_Raid_t *, RF_VoidPointerListElem_t *);
+RF_ASMHeaderListElem_t *rf_AllocASMHeaderListElem(RF_Raid_t *);
+void rf_FreeASMHeaderListElem(RF_Raid_t *, RF_ASMHeaderListElem_t *);
+RF_FailedStripe_t *rf_AllocFailedStripeStruct(RF_Raid_t *);
+void rf_FreeFailedStripeStruct(RF_Raid_t *, RF_FailedStripe_t *);
 
 #endif				/* !_RF__RF_MAP_H_ */

Index: src/sys/dev/raidframe/rf_mcpair.h
diff -u src/sys/dev/raidframe/rf_mcpair.h:1.10 src/sys/dev/raidframe/rf_mcpair.h:1.11
--- src/sys/dev/raidframe/rf_mcpair.h:1.10	Sun May  1 01:09:05 2011
+++ src/sys/dev/raidframe/rf_mcpair.h	Fri Jul 23 00:54:45 2021
@@ -1,4 +1,4 @@
-/*	$NetBSD: rf_mcpair.h,v 1.10 2011/05/01 01:09:05 mrg Exp $	*/
+/*	$NetBSD: rf_mcpair.h,v 1.11 2021/07/23 00:54:45 oster Exp $	*/
 /*
  * Copyright (c) 1995 Carnegie-Mellon University.
  * All rights reserved.
@@ -45,9 +45,9 @@ struct RF_MCPair_s {
 #define RF_LOCK_MCPAIR(_mcp)	rf_lock_mutex2((_mcp)->mutex)
 #define RF_UNLOCK_MCPAIR(_mcp)	rf_unlock_mutex2((_mcp)->mutex)
 
-int     rf_ConfigureMCPair(RF_ShutdownList_t ** listp);
-RF_MCPair_t *rf_AllocMCPair(void);
-void    rf_FreeMCPair(RF_MCPair_t * t);
+int     rf_ConfigureMCPair(RF_ShutdownList_t ** listp, RF_Raid_t *raidPtr, RF_Config_t *cfgPtr);
+RF_MCPair_t *rf_AllocMCPair(RF_Raid_t *);
+void    rf_FreeMCPair(RF_Raid_t *raidPtr, RF_MCPair_t * t);
 void    rf_MCPairWakeupFunc(RF_MCPair_t * t);
 
 #endif				/* !_RF__RF_MCPAIR_H_ */
Index: src/sys/dev/raidframe/rf_revent.h
diff -u src/sys/dev/raidframe/rf_revent.h:1.10 src/sys/dev/raidframe/rf_revent.h:1.11
--- src/sys/dev/raidframe/rf_revent.h:1.10	Sun Dec 11 12:23:37 2005
+++ src/sys/dev/raidframe/rf_revent.h	Fri Jul 23 00:54:45 2021
@@ -1,4 +1,4 @@
-/*	$NetBSD: rf_revent.h,v 1.10 2005/12/11 12:23:37 christos Exp $	*/
+/*	$NetBSD: rf_revent.h,v 1.11 2021/07/23 00:54:45 oster Exp $	*/
 /*
  * Copyright (c) 1995 Carnegie-Mellon University.
  * All rights reserved.
@@ -37,10 +37,10 @@
 
 #include <dev/raidframe/raidframevar.h>
 
-int rf_ConfigureReconEvent(RF_ShutdownList_t **);
+int rf_ConfigureReconEvent(RF_ShutdownList_t **, RF_Raid_t *, RF_Config_t *);
 RF_ReconEvent_t *rf_GetNextReconEvent(RF_RaidReconDesc_t *);
 void rf_CauseReconEvent(RF_Raid_t *, RF_RowCol_t, void *, RF_Revent_t);
 void rf_DrainReconEventQueue(RF_RaidReconDesc_t *r);
-void rf_FreeReconEventDesc(RF_ReconEvent_t *);
+void rf_FreeReconEventDesc(RF_Raid_t *,RF_ReconEvent_t *);
 
 #endif				/* !_RF__RF_REVENT_H_ */

Index: src/sys/dev/raidframe/rf_netbsdkintf.c
diff -u src/sys/dev/raidframe/rf_netbsdkintf.c:1.394 src/sys/dev/raidframe/rf_netbsdkintf.c:1.395
--- src/sys/dev/raidframe/rf_netbsdkintf.c:1.394	Wed May 26 06:11:50 2021
+++ src/sys/dev/raidframe/rf_netbsdkintf.c	Fri Jul 23 00:54:45 2021
@@ -1,4 +1,4 @@
-/*	$NetBSD: rf_netbsdkintf.c,v 1.394 2021/05/26 06:11:50 mrg Exp $	*/
+/*	$NetBSD: rf_netbsdkintf.c,v 1.395 2021/07/23 00:54:45 oster Exp $	*/
 
 /*-
  * Copyright (c) 1996, 1997, 1998, 2008-2011 The NetBSD Foundation, Inc.
@@ -101,7 +101,7 @@
  ***********************************************************/
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: rf_netbsdkintf.c,v 1.394 2021/05/26 06:11:50 mrg Exp $");
+__KERNEL_RCSID(0, "$NetBSD: rf_netbsdkintf.c,v 1.395 2021/07/23 00:54:45 oster Exp $");
 
 #ifdef _KERNEL_OPT
 #include "opt_raid_autoconfig.h"
@@ -331,7 +331,7 @@ int raidautoconfig = 0;
 #endif
 static bool raidautoconfigdone = false;
 
-struct RF_Pools_s rf_pools;
+struct pool rf_alloclist_pool;   /* AllocList */
 
 static LIST_HEAD(, raid_softc) raids = LIST_HEAD_INITIALIZER(raids);
 static kmutex_t raid_lock;
@@ -3558,15 +3558,19 @@ rf_auto_config_set(RF_ConfigSet_t *cset)
 }
 
 void
-rf_pool_init(struct pool *p, size_t size, const char *w_chan,
+rf_pool_init(RF_Raid_t *raidPtr, char *w_chan, struct pool *p, size_t size, const char *pool_name,
 	     size_t xmin, size_t xmax)
 {
 
+	/* Format: raid%d_foo */
+	snprintf(w_chan, RF_MAX_POOLNAMELEN, "raid%d_%s", raidPtr->raidid, pool_name);
+	
 	pool_init(p, size, 0, 0, 0, w_chan, NULL, IPL_BIO);
 	pool_sethiwat(p, xmax);
 	pool_prime(p, xmin);
 }
 
+
 /*
  * rf_buf_queue_check(RF_Raid_t raidPtr) -- looks into the buffer queue
  * to see if there is IO pending and if that IO could possibly be done

Index: src/sys/dev/raidframe/rf_paritylogDiskMgr.c
diff -u src/sys/dev/raidframe/rf_paritylogDiskMgr.c:1.30 src/sys/dev/raidframe/rf_paritylogDiskMgr.c:1.31
--- src/sys/dev/raidframe/rf_paritylogDiskMgr.c:1.30	Thu Oct 10 03:43:59 2019
+++ src/sys/dev/raidframe/rf_paritylogDiskMgr.c	Fri Jul 23 00:54:45 2021
@@ -1,4 +1,4 @@
-/*	$NetBSD: rf_paritylogDiskMgr.c,v 1.30 2019/10/10 03:43:59 christos Exp $	*/
+/*	$NetBSD: rf_paritylogDiskMgr.c,v 1.31 2021/07/23 00:54:45 oster Exp $	*/
 /*
  * Copyright (c) 1995 Carnegie-Mellon University.
  * All rights reserved.
@@ -30,7 +30,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: rf_paritylogDiskMgr.c,v 1.30 2019/10/10 03:43:59 christos Exp $");
+__KERNEL_RCSID(0, "$NetBSD: rf_paritylogDiskMgr.c,v 1.31 2021/07/23 00:54:45 oster Exp $");
 
 #include "rf_archs.h"
 
@@ -134,7 +134,7 @@ ReadRegionLog(
 				      RF_IO_NORMAL_PRIORITY);
 
 	/* create and initialize PDA for the core log */
-	*rrd_pda = rf_AllocPDAList(1);
+	*rrd_pda = rf_AllocPDAList(raidPtr, 1);
 	rf_MapLogParityLogging(raidPtr, regionID, 0,
 			       &((*rrd_pda)->col), &((*rrd_pda)->startSector));
 	(*rrd_pda)->numSector = raidPtr->regionInfo[regionID].capacity;
@@ -184,7 +184,7 @@ WriteCoreLog(
 				      rf_DiskWriteFunc, rf_DiskWriteUndoFunc,
 	    "Wcl", *fwr_alloclist, RF_DAG_FLAGS_NONE, RF_IO_NORMAL_PRIORITY);
 
-	*fwr_pda = rf_AllocPDAList(1);
+	*fwr_pda = rf_AllocPDAList(raidPtr, 1);
 	regionOffset = log->diskOffset;
 	rf_MapLogParityLogging(raidPtr, regionID, regionOffset,
 			       &((*fwr_pda)->col),
@@ -232,7 +232,7 @@ ReadRegionParity(
 				      RF_IO_NORMAL_PRIORITY);
 
 	/* create and initialize PDA for region parity */
-	*prd_pda = rf_AllocPDAList(1);
+	*prd_pda = rf_AllocPDAList(raidPtr, 1);
 	rf_MapRegionParity(raidPtr, regionID,
 			   &((*prd_pda)->col), &((*prd_pda)->startSector),
 			   &((*prd_pda)->numSector));
@@ -287,7 +287,7 @@ WriteRegionParity(
 				      RF_IO_NORMAL_PRIORITY);
 
 	/* create and initialize PDA for region parity */
-	*pwr_pda = rf_AllocPDAList(1);
+	*pwr_pda = rf_AllocPDAList(raidPtr, 1);
 	rf_MapRegionParity(raidPtr, regionID,
 			   &((*pwr_pda)->col), &((*pwr_pda)->startSector),
 			   &((*pwr_pda)->numSector));
@@ -327,7 +327,7 @@ FlushLogsToDisk(
 	RF_AllocListElem_t *fwr_alloclist;
 	RF_PhysDiskAddr_t *fwr_pda;
 
-	fwr_mcpair = rf_AllocMCPair();
+	fwr_mcpair = rf_AllocMCPair(raidPtr);
 	RF_LOCK_MCPAIR(fwr_mcpair);
 
 	RF_ASSERT(logList);
@@ -350,14 +350,14 @@ FlushLogsToDisk(
 			RF_ASSERT(0);
 		}
 		/* RF_Free(fwr_pda, sizeof(RF_PhysDiskAddr_t)); */
-		rf_FreePhysDiskAddr(fwr_pda);
+		rf_FreePhysDiskAddr(raidPtr, fwr_pda);
 		rf_FreeDAG(fwr_dag_h);
 		rf_FreeAllocList(fwr_alloclist);
 
 		log = log->next;
 	}
 	RF_UNLOCK_MCPAIR(fwr_mcpair);
-	rf_FreeMCPair(fwr_mcpair);
+	rf_FreeMCPair(raidPtr, fwr_mcpair);
 	rf_ReleaseParityLogs(raidPtr, logList);
 }
 
@@ -391,7 +391,7 @@ ReintegrateRegion(
 	if (rf_parityLogDebug)
 		printf("[initiating read of parity for region %d]\n",regionID);
 	parityBuffer = AcquireReintBuffer(&raidPtr->parityBufferPool);
-	prd_mcpair = rf_AllocMCPair();
+	prd_mcpair = rf_AllocMCPair(raidPtr);
 	RF_LOCK_MCPAIR(prd_mcpair);
 	prd_mcpair->flag = RF_FALSE;
 	ReadRegionParity(regionID, prd_mcpair, parityBuffer, raidPtr,
@@ -403,7 +403,7 @@ ReintegrateRegion(
 			printf("[initiating read of disk log for region %d]\n",
 			       regionID);
 		regionBuffer = AcquireReintBuffer(&raidPtr->regionBufferPool);
-		rrd_mcpair = rf_AllocMCPair();
+		rrd_mcpair = rf_AllocMCPair(raidPtr);
 		RF_LOCK_MCPAIR(rrd_mcpair);
 		rrd_mcpair->flag = RF_FALSE;
 		ReadRegionLog(regionID, rrd_mcpair, regionBuffer, raidPtr,
@@ -436,17 +436,17 @@ ReintegrateRegion(
 		/* ApplyRegionToParity(regionID, regionBuffer, parityBuffer); */
 		/* release resources associated with region log */
 		/* RF_Free(rrd_pda, sizeof(RF_PhysDiskAddr_t)); */
-		rf_FreePhysDiskAddr(rrd_pda);
+		rf_FreePhysDiskAddr(raidPtr, rrd_pda);
 		rf_FreeDAG(rrd_dag_h);
 		rf_FreeAllocList(rrd_alloclist);
-		rf_FreeMCPair(rrd_mcpair);
+		rf_FreeMCPair(raidPtr, rrd_mcpair);
 		ReleaseReintBuffer(&raidPtr->regionBufferPool, regionBuffer);
 	}
 	/* write reintegrated parity to disk */
 	if (rf_parityLogDebug)
 		printf("[initiating write of parity for region %d]\n",
 		       regionID);
-	pwr_mcpair = rf_AllocMCPair();
+	pwr_mcpair = rf_AllocMCPair(raidPtr);
 	RF_LOCK_MCPAIR(pwr_mcpair);
 	pwr_mcpair->flag = RF_FALSE;
 	WriteRegionParity(regionID, pwr_mcpair, parityBuffer, raidPtr,
@@ -461,18 +461,18 @@ ReintegrateRegion(
 	}
 	/* release resources associated with read of old parity */
 	/* RF_Free(prd_pda, sizeof(RF_PhysDiskAddr_t)); */
-	rf_FreePhysDiskAddr(prd_pda);
+	rf_FreePhysDiskAddr(raidPtr, prd_pda);
 	rf_FreeDAG(prd_dag_h);
 	rf_FreeAllocList(prd_alloclist);
-	rf_FreeMCPair(prd_mcpair);
+	rf_FreeMCPair(raidPtr, prd_mcpair);
 
 	/* release resources associated with write of new parity */
 	ReleaseReintBuffer(&raidPtr->parityBufferPool, parityBuffer);
 	/* RF_Free(pwr_pda, sizeof(RF_PhysDiskAddr_t)); */
-	rf_FreePhysDiskAddr(pwr_pda);
+	rf_FreePhysDiskAddr(raidPtr, pwr_pda);
 	rf_FreeDAG(pwr_dag_h);
 	rf_FreeAllocList(pwr_alloclist);
-	rf_FreeMCPair(pwr_mcpair);
+	rf_FreeMCPair(raidPtr, pwr_mcpair);
 
 	if (rf_parityLogDebug)
 		printf("[finished reintegrating region %d]\n", regionID);

Index: src/sys/dev/raidframe/rf_psstatus.c
diff -u src/sys/dev/raidframe/rf_psstatus.c:1.37 src/sys/dev/raidframe/rf_psstatus.c:1.38
--- src/sys/dev/raidframe/rf_psstatus.c:1.37	Thu Oct 10 03:43:59 2019
+++ src/sys/dev/raidframe/rf_psstatus.c	Fri Jul 23 00:54:45 2021
@@ -1,4 +1,4 @@
-/*	$NetBSD: rf_psstatus.c,v 1.37 2019/10/10 03:43:59 christos Exp $	*/
+/*	$NetBSD: rf_psstatus.c,v 1.38 2021/07/23 00:54:45 oster Exp $	*/
 /*
  * Copyright (c) 1995 Carnegie-Mellon University.
  * All rights reserved.
@@ -37,7 +37,7 @@
  *****************************************************************************/
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: rf_psstatus.c,v 1.37 2019/10/10 03:43:59 christos Exp $");
+__KERNEL_RCSID(0, "$NetBSD: rf_psstatus.c,v 1.38 2021/07/23 00:54:45 oster Exp $");
 
 #include <dev/raidframe/raidframevar.h>
 
@@ -69,17 +69,21 @@ static void rf_ShutdownPSStatus(void *);
 static void
 rf_ShutdownPSStatus(void *arg)
 {
+	RF_Raid_t *raidPtr;
 
-	pool_destroy(&rf_pools.pss);
+	raidPtr = (RF_Raid_t *) arg;
+
+	pool_destroy(&raidPtr->pools.pss);
 }
 
 int
-rf_ConfigurePSStatus(RF_ShutdownList_t **listp)
+rf_ConfigurePSStatus(RF_ShutdownList_t **listp, RF_Raid_t *raidPtr,
+		     RF_Config_t *cfgPtr)
 {
 
-	rf_pool_init(&rf_pools.pss, sizeof(RF_ReconParityStripeStatus_t),
-		     "raidpsspl", RF_MIN_FREE_PSS, RF_MAX_FREE_PSS);
-	rf_ShutdownCreate(listp, rf_ShutdownPSStatus, NULL);
+	rf_pool_init(raidPtr, raidPtr->poolNames.pss, &raidPtr->pools.pss, sizeof(RF_ReconParityStripeStatus_t),
+		     "pss", RF_MIN_FREE_PSS, RF_MAX_FREE_PSS);
+	rf_ShutdownCreate(listp, rf_ShutdownPSStatus, raidPtr);
 
 	return (0);
 }
@@ -257,7 +261,7 @@ rf_RemoveFromActiveReconTable(RF_Raid_t 
 		Dprintf1("Waking up access waiting on parity stripe ID %ld\n", p->parityStripeID);
 		cb1 = cb->next;
 		(cb->callbackFunc) (cb->callbackArg);
-		rf_FreeCallbackFuncDesc(cb);
+		rf_FreeCallbackFuncDesc(raidPtr, cb);
 		cb = cb1;
 	}
 
@@ -267,7 +271,7 @@ rf_RemoveFromActiveReconTable(RF_Raid_t 
 RF_ReconParityStripeStatus_t *
 rf_AllocPSStatus(RF_Raid_t *raidPtr)
 {
-	return pool_get(&rf_pools.pss, PR_WAITOK | PR_ZERO);
+	return pool_get(&raidPtr->pools.pss, PR_WAITOK | PR_ZERO);
 }
 
 void
@@ -277,7 +281,7 @@ rf_FreePSStatus(RF_Raid_t *raidPtr, RF_R
 	RF_ASSERT(p->blockWaitList == NULL);
 	RF_ASSERT(p->bufWaitList == NULL);
 
-	pool_put(&rf_pools.pss, p);
+	pool_put(&raidPtr->pools.pss, p);
 }
 
 static void
Index: src/sys/dev/raidframe/rf_raid1.c
diff -u src/sys/dev/raidframe/rf_raid1.c:1.37 src/sys/dev/raidframe/rf_raid1.c:1.38
--- src/sys/dev/raidframe/rf_raid1.c:1.37	Thu Oct 10 03:43:59 2019
+++ src/sys/dev/raidframe/rf_raid1.c	Fri Jul 23 00:54:45 2021
@@ -1,4 +1,4 @@
-/*	$NetBSD: rf_raid1.c,v 1.37 2019/10/10 03:43:59 christos Exp $	*/
+/*	$NetBSD: rf_raid1.c,v 1.38 2021/07/23 00:54:45 oster Exp $	*/
 /*
  * Copyright (c) 1995 Carnegie-Mellon University.
  * All rights reserved.
@@ -33,7 +33,7 @@
  *****************************************************************************/
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: rf_raid1.c,v 1.37 2019/10/10 03:43:59 christos Exp $");
+__KERNEL_RCSID(0, "$NetBSD: rf_raid1.c,v 1.38 2021/07/23 00:54:45 oster Exp $");
 
 #include "rf_raid.h"
 #include "rf_raid1.h"
@@ -286,7 +286,7 @@ rf_VerifyParityRAID1(RF_Raid_t *raidPtr,
 	rf_MakeAllocList(allocList);
 	if (allocList == NULL)
 		return (RF_PARITY_COULD_NOT_VERIFY);
-	mcpair = rf_AllocMCPair();
+	mcpair = rf_AllocMCPair(raidPtr);
 	if (mcpair == NULL)
 		goto done;
 	RF_ASSERT(layoutPtr->numDataCol == layoutPtr->numParityCol);
@@ -522,13 +522,13 @@ done:
          * so cleanup what we have to and return our running status.
          */
 	if (asm_h)
-		rf_FreeAccessStripeMap(asm_h);
+		rf_FreeAccessStripeMap(raidPtr, asm_h);
 	if (rd_dag_h)
 		rf_FreeDAG(rd_dag_h);
 	if (wr_dag_h)
 		rf_FreeDAG(wr_dag_h);
 	if (mcpair)
-		rf_FreeMCPair(mcpair);
+		rf_FreeMCPair(raidPtr, mcpair);
 	rf_FreeAllocList(allocList);
 #if RF_DEBUG_VERIFYPARITY
 	if (rf_verifyParityDebug) {
@@ -646,7 +646,7 @@ rf_SubmitReconBufferRAID1(RF_ReconBuffer
 			RF_PANIC();
 		}
 		pssPtr->flags |= RF_PSS_BUFFERWAIT;
-		cb = rf_AllocCallbackValueDesc();
+		cb = rf_AllocCallbackValueDesc(raidPtr);
 		cb->col = rbuf->col;
 		cb->v = rbuf->parityStripeID;
 		cb->next = NULL;
Index: src/sys/dev/raidframe/rf_reconutil.c
diff -u src/sys/dev/raidframe/rf_reconutil.c:1.37 src/sys/dev/raidframe/rf_reconutil.c:1.38
--- src/sys/dev/raidframe/rf_reconutil.c:1.37	Sat Feb  9 03:34:00 2019
+++ src/sys/dev/raidframe/rf_reconutil.c	Fri Jul 23 00:54:45 2021
@@ -1,4 +1,4 @@
-/*	$NetBSD: rf_reconutil.c,v 1.37 2019/02/09 03:34:00 christos Exp $	*/
+/*	$NetBSD: rf_reconutil.c,v 1.38 2021/07/23 00:54:45 oster Exp $	*/
 /*
  * Copyright (c) 1995 Carnegie-Mellon University.
  * All rights reserved.
@@ -31,7 +31,7 @@
  ********************************************/
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: rf_reconutil.c,v 1.37 2019/02/09 03:34:00 christos Exp $");
+__KERNEL_RCSID(0, "$NetBSD: rf_reconutil.c,v 1.38 2021/07/23 00:54:45 oster Exp $");
 
 #include <dev/raidframe/raidframevar.h>
 
@@ -231,7 +231,7 @@ rf_MakeReconBuffer(RF_Raid_t *raidPtr, R
 	RF_ReconBuffer_t *t;
 	u_int   recon_buffer_size = rf_RaidAddressToByte(raidPtr, layoutPtr->SUsPerRU * layoutPtr->sectorsPerStripeUnit);
 
-	t = pool_get(&rf_pools.reconbuffer, PR_WAITOK);
+	t = pool_get(&raidPtr->pools.reconbuffer, PR_WAITOK);
 	t->buffer = RF_Malloc(recon_buffer_size);
 	t->raidPtr = raidPtr;
 	t->col = col;
@@ -253,7 +253,7 @@ rf_FreeReconBuffer(RF_ReconBuffer_t *rbu
 	recon_buffer_size = rf_RaidAddressToByte(raidPtr, raidPtr->Layout.SUsPerRU * raidPtr->Layout.sectorsPerStripeUnit);
 
 	RF_Free(rbuf->buffer, recon_buffer_size);
-	pool_put(&rf_pools.reconbuffer, rbuf);
+	pool_put(&raidPtr->pools.reconbuffer, rbuf);
 }
 
 #if RF_DEBUG_RECON

Index: src/sys/dev/raidframe/rf_psstatus.h
diff -u src/sys/dev/raidframe/rf_psstatus.h:1.15 src/sys/dev/raidframe/rf_psstatus.h:1.16
--- src/sys/dev/raidframe/rf_psstatus.h:1.15	Thu Oct 10 03:43:59 2019
+++ src/sys/dev/raidframe/rf_psstatus.h	Fri Jul 23 00:54:45 2021
@@ -1,4 +1,4 @@
-/*	$NetBSD: rf_psstatus.h,v 1.15 2019/10/10 03:43:59 christos Exp $	*/
+/*	$NetBSD: rf_psstatus.h,v 1.16 2021/07/23 00:54:45 oster Exp $	*/
 /*
  * Copyright (c) 1995 Carnegie-Mellon University.
  * All rights reserved.
@@ -122,7 +122,7 @@ struct RF_PSStatusHeader_s {
 #define RF_PSS_BUFFERWAIT      0x00000020	/* someone is waiting for a
 						 * buffer for this RU */
 
-int rf_ConfigurePSStatus(RF_ShutdownList_t **);
+int rf_ConfigurePSStatus(RF_ShutdownList_t **, RF_Raid_t *, RF_Config_t *);
 void rf_InitPSStatus(RF_Raid_t *);
 RF_PSStatusHeader_t *rf_MakeParityStripeStatusTable(RF_Raid_t *);
 void rf_FreeParityStripeStatusTable(RF_Raid_t *, RF_PSStatusHeader_t *);

Index: src/sys/dev/raidframe/rf_raid.h
diff -u src/sys/dev/raidframe/rf_raid.h:1.48 src/sys/dev/raidframe/rf_raid.h:1.49
--- src/sys/dev/raidframe/rf_raid.h:1.48	Thu Oct 10 03:43:59 2019
+++ src/sys/dev/raidframe/rf_raid.h	Fri Jul 23 00:54:45 2021
@@ -1,4 +1,4 @@
-/*	$NetBSD: rf_raid.h,v 1.48 2019/10/10 03:43:59 christos Exp $	*/
+/*	$NetBSD: rf_raid.h,v 1.49 2021/07/23 00:54:45 oster Exp $	*/
 /*
  * Copyright (c) 1995 Carnegie-Mellon University.
  * All rights reserved.
@@ -304,6 +304,8 @@ struct RF_Raid_s {
 
 #endif				/* RF_INCLUDE_PARITYLOGGING > 0 */
 	struct rf_paritymap *parity_map;
+	struct RF_Pools_s pools;
+	struct RF_PoolNames_s poolNames;
 };
 
 struct raid_softc {

Index: src/sys/dev/raidframe/rf_reconstruct.c
diff -u src/sys/dev/raidframe/rf_reconstruct.c:1.125 src/sys/dev/raidframe/rf_reconstruct.c:1.126
--- src/sys/dev/raidframe/rf_reconstruct.c:1.125	Mon Feb 15 23:27:03 2021
+++ src/sys/dev/raidframe/rf_reconstruct.c	Fri Jul 23 00:54:45 2021
@@ -1,4 +1,4 @@
-/*	$NetBSD: rf_reconstruct.c,v 1.125 2021/02/15 23:27:03 oster Exp $	*/
+/*	$NetBSD: rf_reconstruct.c,v 1.126 2021/07/23 00:54:45 oster Exp $	*/
 /*
  * Copyright (c) 1995 Carnegie-Mellon University.
  * All rights reserved.
@@ -33,7 +33,7 @@
  ************************************************************/
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: rf_reconstruct.c,v 1.125 2021/02/15 23:27:03 oster Exp $");
+__KERNEL_RCSID(0, "$NetBSD: rf_reconstruct.c,v 1.126 2021/07/23 00:54:45 oster Exp $");
 
 #include <sys/param.h>
 #include <sys/time.h>
@@ -145,18 +145,23 @@ struct RF_ReconDoneProc_s {
  *
  **************************************************************************/
 static void
-rf_ShutdownReconstruction(void *ignored)
+rf_ShutdownReconstruction(void *arg)
 {
-	pool_destroy(&rf_pools.reconbuffer);
+	RF_Raid_t *raidPtr;
+
+	raidPtr = (RF_Raid_t *) arg;
+	
+	pool_destroy(&raidPtr->pools.reconbuffer);
 }
 
 int
-rf_ConfigureReconstruction(RF_ShutdownList_t **listp)
+rf_ConfigureReconstruction(RF_ShutdownList_t **listp, RF_Raid_t *raidPtr,
+			   RF_Config_t *cfgPtr)
 {
 
-	rf_pool_init(&rf_pools.reconbuffer, sizeof(RF_ReconBuffer_t),
-		     "rf_reconbuffer_pl", RF_MIN_FREE_RECONBUFFER, RF_MAX_FREE_RECONBUFFER);
-	rf_ShutdownCreate(listp, rf_ShutdownReconstruction, NULL);
+	rf_pool_init(raidPtr, raidPtr->poolNames.reconbuffer, &raidPtr->pools.reconbuffer, sizeof(RF_ReconBuffer_t),
+		     "reconbuf", RF_MIN_FREE_RECONBUFFER, RF_MAX_FREE_RECONBUFFER);
+	rf_ShutdownCreate(listp, rf_ShutdownReconstruction, raidPtr);
 
 	return (0);
 }
@@ -1138,7 +1143,7 @@ ProcessReconEvent(RF_Raid_t *raidPtr, RF
 	default:
 		RF_PANIC();
 	}
-	rf_FreeReconEventDesc(event);
+	rf_FreeReconEventDesc(raidPtr, event);
 	return (retcode);
 }
 /*****************************************************************************
@@ -1611,7 +1616,7 @@ CheckForNewMinHeadSep(RF_Raid_t *raidPtr
 			reconCtrlPtr->headSepCBList = p->next;
 			p->next = NULL;
 			rf_CauseReconEvent(raidPtr, p->col, NULL, RF_REVENT_HEADSEPCLEAR);
-			rf_FreeCallbackValueDesc(p);
+			rf_FreeCallbackValueDesc(raidPtr, p);
 		}
 
 	}
@@ -1662,7 +1667,7 @@ CheckHeadSeparation(RF_Raid_t *raidPtr, 
 			 raidPtr->raidid, col, ctrl->headSepCounter,
 			 reconCtrlPtr->minHeadSepCounter,
 			 raidPtr->headSepLimit);
-		cb = rf_AllocCallbackValueDesc();
+		cb = rf_AllocCallbackValueDesc(raidPtr);
 		/* the minHeadSepCounter value we have to get to before we'll
 		 * wake up.  build in 20% hysteresis. */
 		cb->v = (ctrl->headSepCounter - raidPtr->headSepLimit + raidPtr->headSepLimit / 5);
@@ -1718,9 +1723,9 @@ CheckForcedOrBlockedReconstruction(RF_Ra
 	else
 		if (pssPtr->flags & RF_PSS_RECON_BLOCKED) {
 			Dprintf3("RECON: col %d blocked at psid %ld ru %d\n", col, psid, which_ru);
-			cb = rf_AllocCallbackValueDesc();	/* append ourselves to
-							 * the blockage-wait
-							 * list */
+			cb = rf_AllocCallbackValueDesc(raidPtr);	/* append ourselves to
+									 * the blockage-wait
+									 * list */
 			cb->col = col;
 			cb->next = pssPtr->blockWaitList;
 			pssPtr->blockWaitList = cb;
@@ -1838,7 +1843,7 @@ rf_ForceOrBlockRecon(RF_Raid_t *raidPtr,
 	}
 	/* install a callback descriptor to be invoked when recon completes on
 	 * this parity stripe. */
-	cb = rf_AllocCallbackFuncDesc();
+	cb = rf_AllocCallbackFuncDesc(raidPtr);
 	cb->callbackFunc = cbFunc;
 	cb->callbackArg = cbArg;
 	cb->next = pssPtr->procWaitList;
@@ -1917,7 +1922,7 @@ rf_UnblockRecon(RF_Raid_t *raidPtr, RF_A
 			pssPtr->blockWaitList = cb->next;
 			cb->next = NULL;
 			rf_CauseReconEvent(raidPtr, cb->col, NULL, RF_REVENT_BLOCKCLEAR);
-			rf_FreeCallbackValueDesc(cb);
+			rf_FreeCallbackValueDesc(raidPtr, cb);
 		}
 		if (!(pssPtr->flags & RF_PSS_UNDER_RECON)) {
 			/* if no recon was requested while recon was blocked */
@@ -1948,7 +1953,7 @@ rf_WakeupHeadSepCBWaiters(RF_Raid_t *rai
 		raidPtr->reconControl->headSepCBList = p->next;
 		p->next = NULL;
 		rf_CauseReconEvent(raidPtr, p->col, NULL, RF_REVENT_HEADSEPCLEAR);
-		rf_FreeCallbackValueDesc(p);
+		rf_FreeCallbackValueDesc(raidPtr, p);
 	}
 	rf_lock_mutex2(raidPtr->reconControl->rb_mutex);
 	raidPtr->reconControl->rb_lock = 0;

Index: src/sys/dev/raidframe/rf_states.c
diff -u src/sys/dev/raidframe/rf_states.c:1.51 src/sys/dev/raidframe/rf_states.c:1.52
--- src/sys/dev/raidframe/rf_states.c:1.51	Thu Oct 10 03:43:59 2019
+++ src/sys/dev/raidframe/rf_states.c	Fri Jul 23 00:54:45 2021
@@ -1,4 +1,4 @@
-/*	$NetBSD: rf_states.c,v 1.51 2019/10/10 03:43:59 christos Exp $	*/
+/*	$NetBSD: rf_states.c,v 1.52 2021/07/23 00:54:45 oster Exp $	*/
 /*
  * Copyright (c) 1995 Carnegie-Mellon University.
  * All rights reserved.
@@ -27,7 +27,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: rf_states.c,v 1.51 2019/10/10 03:43:59 christos Exp $");
+__KERNEL_RCSID(0, "$NetBSD: rf_states.c,v 1.52 2021/07/23 00:54:45 oster Exp $");
 
 #include <sys/errno.h>
 
@@ -306,7 +306,7 @@ rf_State_Quiesce(RF_RaidAccessDesc_t *de
 	if (need_cb) {
 		/* create a callback if we might need it...
 		   and we likely do. */
-		cb = rf_AllocCallbackFuncDesc();
+		cb = rf_AllocCallbackFuncDesc(raidPtr);
 	}
 
 	rf_lock_mutex2(raidPtr->access_suspend_mutex);
@@ -321,7 +321,7 @@ rf_State_Quiesce(RF_RaidAccessDesc_t *de
 	rf_unlock_mutex2(raidPtr->access_suspend_mutex);
 
 	if ((need_cb == 1) && (used_cb == 0)) {
-		rf_FreeCallbackFuncDesc(cb);
+		rf_FreeCallbackFuncDesc(raidPtr, cb);
 	}
 
 #if RF_ACC_TRACE > 0
@@ -395,7 +395,7 @@ rf_State_Lock(RF_RaidAccessDesc_t *desc)
 			RF_INIT_LOCK_REQ_DESC(asm_p->lockReqDesc, desc->type,
 					      rf_ContinueRaidAccess, desc, asm_p,
 					      raidPtr->Layout.dataSectorsPerStripe);
-			if (rf_AcquireStripeLock(raidPtr->lockTable, asm_p->stripeID,
+			if (rf_AcquireStripeLock(raidPtr, raidPtr->lockTable, asm_p->stripeID,
 						 &asm_p->lockReqDesc)) {
 				suspended = RF_TRUE;
 				break;
@@ -617,7 +617,7 @@ rf_State_ProcessDAG(RF_RaidAccessDesc_t 
 				rf_FreeDAG(dagList->dags);
 				temp = dagList;
 				dagList = dagList->next;
-				rf_FreeDAGList(temp);
+				rf_FreeDAGList(raidPtr, temp);
 			}
 			desc->dagList = NULL;
 
@@ -709,7 +709,8 @@ rf_State_Cleanup(RF_RaidAccessDesc_t *de
 		    asm_p->parityInfo &&
 		    !(desc->flags & RF_DAG_SUPPRESS_LOCKS)) {
 			RF_ASSERT_VALID_LOCKREQ(&asm_p->lockReqDesc);
-			rf_ReleaseStripeLock(raidPtr->lockTable,
+			rf_ReleaseStripeLock(raidPtr,
+					     raidPtr->lockTable,
 					     asm_p->stripeID,
 					     &asm_p->lockReqDesc);
 		}
@@ -724,7 +725,7 @@ rf_State_Cleanup(RF_RaidAccessDesc_t *de
 
 	RF_ETIMER_START(timer);
 #endif
-	rf_FreeAccessStripeMap(asmh);
+	rf_FreeAccessStripeMap(raidPtr, asmh);
 #if RF_ACC_TRACE > 0
 	RF_ETIMER_STOP(timer);
 	RF_ETIMER_EVAL(timer);

Index: src/sys/dev/raidframe/rf_stripelocks.c
diff -u src/sys/dev/raidframe/rf_stripelocks.c:1.34 src/sys/dev/raidframe/rf_stripelocks.c:1.35
--- src/sys/dev/raidframe/rf_stripelocks.c:1.34	Thu Jul 11 03:49:51 2019
+++ src/sys/dev/raidframe/rf_stripelocks.c	Fri Jul 23 00:54:45 2021
@@ -1,4 +1,4 @@
-/*	$NetBSD: rf_stripelocks.c,v 1.34 2019/07/11 03:49:51 msaitoh Exp $	*/
+/*	$NetBSD: rf_stripelocks.c,v 1.35 2021/07/23 00:54:45 oster Exp $	*/
 /*
  * Copyright (c) 1995 Carnegie-Mellon University.
  * All rights reserved.
@@ -57,7 +57,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: rf_stripelocks.c,v 1.34 2019/07/11 03:49:51 msaitoh Exp $");
+__KERNEL_RCSID(0, "$NetBSD: rf_stripelocks.c,v 1.35 2021/07/23 00:54:45 oster Exp $");
 
 #include <dev/raidframe/raidframevar.h>
 
@@ -99,8 +99,8 @@ __KERNEL_RCSID(0, "$NetBSD: rf_stripeloc
 
 static void AddToWaitersQueue(RF_StripeLockDesc_t * lockDesc,
 			      RF_LockReqDesc_t * lockReqDesc);
-static RF_StripeLockDesc_t *AllocStripeLockDesc(RF_StripeNum_t stripeID);
-static void FreeStripeLockDesc(RF_StripeLockDesc_t * p);
+static RF_StripeLockDesc_t *AllocStripeLockDesc(RF_Raid_t *raidPtr, RF_StripeNum_t stripeID);
+static void FreeStripeLockDesc(RF_Raid_t *raidPtr, RF_StripeLockDesc_t * p);
 static RF_LockTableEntry_t *rf_MakeLockTable(void);
 #if RF_DEBUG_STRIPELOCK
 static void PrintLockedStripes(RF_LockTableEntry_t * lockTable);
@@ -158,19 +158,24 @@ static void rf_ShutdownStripeLockFreeLis
 static void rf_RaidShutdownStripeLocks(void *);
 
 static void
-rf_ShutdownStripeLockFreeList(void *ignored)
+rf_ShutdownStripeLockFreeList(void *arg)
 {
-	pool_destroy(&rf_pools.stripelock);
+	RF_Raid_t *raidPtr;
+
+	raidPtr = (RF_Raid_t *) arg;
+	
+	pool_destroy(&raidPtr->pools.stripelock);
 }
 
 int
-rf_ConfigureStripeLockFreeList(RF_ShutdownList_t **listp)
+rf_ConfigureStripeLockFreeList(RF_ShutdownList_t **listp, RF_Raid_t *raidPtr,
+			       RF_Config_t *cfgPtr)
 {
 	unsigned mask;
 
-	rf_pool_init(&rf_pools.stripelock, sizeof(RF_StripeLockDesc_t),
-		     "rf_stripelock_pl", RF_MIN_FREE_STRIPELOCK, RF_MAX_FREE_STRIPELOCK);
-	rf_ShutdownCreate(listp, rf_ShutdownStripeLockFreeList, NULL);
+	rf_pool_init(raidPtr, raidPtr->poolNames.stripelock, &raidPtr->pools.stripelock, sizeof(RF_StripeLockDesc_t),
+		     "strplock", RF_MIN_FREE_STRIPELOCK, RF_MAX_FREE_STRIPELOCK);
+	rf_ShutdownCreate(listp, rf_ShutdownStripeLockFreeList, raidPtr);
 
 	for (mask = 0x1; mask; mask <<= 1)
 		if (rf_lockTableSize == mask)
@@ -245,7 +250,7 @@ rf_ConfigureStripeLocks(RF_ShutdownList_
  * *releaseTag that you need to give back to us when you release the
  * lock.  */
 int
-rf_AcquireStripeLock(RF_LockTableEntry_t *lockTable, RF_StripeNum_t stripeID,
+rf_AcquireStripeLock(RF_Raid_t *raidPtr, RF_LockTableEntry_t *lockTable, RF_StripeNum_t stripeID,
 		     RF_LockReqDesc_t *lockReqDesc)
 {
 	RF_StripeLockDesc_t *lockDesc;
@@ -275,7 +280,7 @@ rf_AcquireStripeLock(RF_LockTableEntry_t
 	if (stripeID == -1)
 		return (0);
 	lockReqDesc->next = NULL;	/* just to be sure */
-	newlockDesc = AllocStripeLockDesc(stripeID);
+	newlockDesc = AllocStripeLockDesc(raidPtr, stripeID);
 
 	rf_lock_mutex2(lockTable[hashval].mutex);
 	for (lockDesc = lockTable[hashval].descList; lockDesc;
@@ -301,7 +306,7 @@ rf_AcquireStripeLock(RF_LockTableEntry_t
 #endif
 	} else {
 		/* we won't be needing newlockDesc after all.. pity.. */
-		FreeStripeLockDesc(newlockDesc);
+		FreeStripeLockDesc(raidPtr, newlockDesc);
 
 		if (lockReqDesc->type == RF_IO_TYPE_WRITE)
 			lockDesc->nWriters++;
@@ -368,7 +373,7 @@ rf_AcquireStripeLock(RF_LockTableEntry_t
 }
 
 void
-rf_ReleaseStripeLock(RF_LockTableEntry_t *lockTable, RF_StripeNum_t stripeID,
+rf_ReleaseStripeLock(RF_Raid_t *raidPtr, RF_LockTableEntry_t *lockTable, RF_StripeNum_t stripeID,
 		     RF_LockReqDesc_t *lockReqDesc)
 {
 	RF_StripeLockDesc_t *lockDesc, *ld_t;
@@ -606,7 +611,7 @@ rf_ReleaseStripeLock(RF_LockTableEntry_t
 			RF_ASSERT(lockDesc == lockTable[hashval].descList);
 			lockTable[hashval].descList = lockDesc->next;
 		}
-		FreeStripeLockDesc(lockDesc);
+		FreeStripeLockDesc(raidPtr, lockDesc);
 		lockDesc = NULL;/* only for the ASSERT below */
 	}
 	rf_unlock_mutex2(lockTable[hashval].mutex);
@@ -637,11 +642,11 @@ AddToWaitersQueue(RF_StripeLockDesc_t *l
 }
 
 static RF_StripeLockDesc_t *
-AllocStripeLockDesc(RF_StripeNum_t stripeID)
+AllocStripeLockDesc(RF_Raid_t *raidPtr, RF_StripeNum_t stripeID)
 {
 	RF_StripeLockDesc_t *p;
 
-	p = pool_get(&rf_pools.stripelock, PR_WAITOK);
+	p = pool_get(&raidPtr->pools.stripelock, PR_WAITOK);
 	if (p) {
 		p->stripeID = stripeID;
 		p->granted = NULL;
@@ -654,9 +659,9 @@ AllocStripeLockDesc(RF_StripeNum_t strip
 }
 
 static void
-FreeStripeLockDesc(RF_StripeLockDesc_t *p)
+FreeStripeLockDesc(RF_Raid_t *raidPtr, RF_StripeLockDesc_t *p)
 {
-	pool_put(&rf_pools.stripelock, p);
+	pool_put(&raidPtr->pools.stripelock, p);
 }
 
 #if RF_DEBUG_STRIPELOCK

Index: src/sys/dev/raidframe/rf_stripelocks.h
diff -u src/sys/dev/raidframe/rf_stripelocks.h:1.9 src/sys/dev/raidframe/rf_stripelocks.h:1.10
--- src/sys/dev/raidframe/rf_stripelocks.h:1.9	Thu Oct 10 03:43:59 2019
+++ src/sys/dev/raidframe/rf_stripelocks.h	Fri Jul 23 00:54:45 2021
@@ -1,4 +1,4 @@
-/*	$NetBSD: rf_stripelocks.h,v 1.9 2019/10/10 03:43:59 christos Exp $	*/
+/*	$NetBSD: rf_stripelocks.h,v 1.10 2021/07/23 00:54:45 oster Exp $	*/
 /*
  * Copyright (c) 1995 Carnegie-Mellon University.
  * All rights reserved.
@@ -104,9 +104,9 @@ struct RF_LockTableEntry_s {
     (_lrd).cbArg   = (void *) (_cba);                                                                        \
   }
 
-int rf_ConfigureStripeLockFreeList(RF_ShutdownList_t **);
-int rf_ConfigureStripeLocks(RF_ShutdownList_t **, RF_Raid_t *, RF_Config_t *);
-int rf_AcquireStripeLock(RF_LockTableEntry_t *, RF_StripeNum_t, RF_LockReqDesc_t *);
-void rf_ReleaseStripeLock(RF_LockTableEntry_t *, RF_StripeNum_t, RF_LockReqDesc_t *);
+int rf_ConfigureStripeLockFreeList(RF_ShutdownList_t **, RF_Raid_t *, RF_Config_t *);
+int rf_ConfigureStripeLocks(RF_ShutdownList_t **, RF_Raid_t *, RF_Config_t *); 
+int rf_AcquireStripeLock(RF_Raid_t *, RF_LockTableEntry_t *, RF_StripeNum_t, RF_LockReqDesc_t *);
+void rf_ReleaseStripeLock(RF_Raid_t *, RF_LockTableEntry_t *, RF_StripeNum_t, RF_LockReqDesc_t *);
 
 #endif				/* !_RF__RF_STRIPELOCKS_H_ */

Reply via email to