i have a few things left to do in the pools per cpu caches, one of
which is make their activity visibile. to that end, here's a diff
provides a way for userland to request stats from the per cpu caches,
and uses that in systat so you can watch them.

there are two added pool sysctls. one copies an array of stats from
each cpus cache. the interesting bits in those stats are how many
items each cpu handled, and how many list operations the cpu did
against the global pool cache.

the second sysctl reports stats about the global pool cache. currently
this is the target for the list length the cpus build is, how many
lists its holding, and how many times the gc has moved a list of
items back into the pool for recovery.

these are used by sysctl for a new view which ive called pcaches,
short for pool caches.

ksh: Fmt: not found

   2 users Load 1.42 0.78 0.35 (1-88 of 158)       v880.embarrassm.net 13:48:40

NAME             LEN   NL  NGC  CPU         REQ        REL       LREQ       LREL
mbufpl             8   72    0    0      955967    2622839        794     209151
                                  1      907108     362300      88367      20266
                                  2      915933     356927      90031      20155
                                  3      878324     339160      86646      19250
                                  4       30620       7851       3301        454
                                  5        1401        885        117         52
                                  6          56         57          0          0
                                  7          37         45          0          0
mtagpl             8    0    0    0           0          0          0          0
                                  1           0          0          0          0
                                  2           0          0          0          0
                                  3           0          0          0          0
                                  4           0          0          0          0
                                  5           0          0          0          0
                                  6           0          0          0          0
                                  7           0          0          0          0
mcl2k              8   32    0    0      944634       7776     117605        497
                                  1         122     318421          0      39786
                                  2         114     313498          0      39171
                                  3         105     298051          0      37242
                                  4          22       6834          0        850
                                  5           0        744          0         91
                                  6           0          1          0          0
                                  7           1          3          0          0
mcl2k2             8    0    0    0           0          0          0          0
                                  1           0          0          0          0
                                  2           0          0          0          0
                                  3           0          0          0          0
                                  4           0          0          0          0
                                  5           0          0          0          0
                                  6           0          0          0          0
                                  7           0          0          0          0
mcl4k              8    2    0    0          20         44          1          2
                                  1        1978       2072         85         96
                                  2        2100       1988         97         83
                                  3        1910       1986         83         91
                                  4          87         61          6          2
                                  5           1          9          0          0
                                  6           0          0          0          0
                                  7           0          0          0          0
mcl8k              8   17    0    0          97        956         11        117
                                  1       19517      20418       2254       2366
                                  2       20934      20704       2441       2412
                                  3       20294      19734       2347       2275
                                  4        1234        399        150         45
                                  5           0         54          0          5
                                  6           0          0          0          0
                                  7           0          0          0          0
mcl9k              8    0    0    0           1          5          0          0
                                  1         145        176          2          4
                                  2         156        162          2          2
                                  3         159        154          1          0
                                  4           3          2          1          0
                                  5           0          1          0          0
                                  6           0          0          0          0
                                  7           0          0          0          0
mcl12k             8    2    0    0           2          8          0          0
                                  1         271        294          6          8
                                  2         302        256          8          2
                                  3         223        294          1          9
                                  4          20          4          2          0
                                  5           0          2          0          0
                                  6           0          0          0          0
                                  7           0          0          0          0
mcl16k             8    2    0    0           7         34          0          2
                                  1        1108       1137         39         42
                                  2        1095       1141         40         45
                                  3        1021        998         39         35
                                  4          51         27          4          0
                                  5           1          3          0          0
                                  6           0          0          0          0
                                  7           0          0          0          0
mcl64k             8    3    0    0          68        327          3         34
                                  1       17492      18495       1694       1819
                                  2       18369      18135       1842       1812
                                  3       18017      17252       1808       1711
                                  4         618        409         69         41
                                  5          22         41          1          3
                                  6           0          0          0          0
                                  7           0          0          0          0


the systat view could be improved, but id prefer to do that in tree.

ok?

Index: sys/kern/subr_pool.c
===================================================================
RCS file: /cvs/src/sys/kern/subr_pool.c,v
retrieving revision 1.209
diff -u -p -r1.209 subr_pool.c
--- sys/kern/subr_pool.c        13 Jun 2017 11:41:11 -0000      1.209
+++ sys/kern/subr_pool.c        14 Jun 2017 03:13:47 -0000
@@ -122,9 +122,12 @@ struct pool_cache {
        struct pool_cache_item  *pc_prev;       /* previous list of items */
 
        uint64_t                 pc_gen;        /* generation number */
-       uint64_t                 pc_gets;
-       uint64_t                 pc_puts;
-       uint64_t                 pc_fails;
+       uint64_t                 pc_nget;       /* # of successful requests */
+       uint64_t                 pc_nfail;      /* # of unsuccessful reqs */
+       uint64_t                 pc_nput;       /* # of releases */
+       uint64_t                 pc_nlget;      /* # of list requests */
+       uint64_t                 pc_nlfail;     /* # of fails getting a list */
+       uint64_t                 pc_nlput;      /* # of list releases */
 
        int                      pc_nout;
 };
@@ -133,7 +136,9 @@ void        *pool_cache_get(struct pool *);
 void    pool_cache_put(struct pool *, void *);
 void    pool_cache_destroy(struct pool *);
 #endif
-void    pool_cache_info(struct pool *, struct kinfo_pool *);
+void    pool_cache_pool_info(struct pool *, struct kinfo_pool *);
+int     pool_cache_info(struct pool *, void *, size_t *);
+int     pool_cache_cpus_info(struct pool *, void *, size_t *);
 
 #ifdef POOL_DEBUG
 int    pool_debug = 1;
@@ -1379,6 +1384,8 @@ sysctl_dopool(int *name, u_int namelen, 
 
        case KERN_POOL_NAME:
        case KERN_POOL_POOL:
+       case KERN_POOL_CACHE:
+       case KERN_POOL_CACHE_CPUS:
                break;
        default:
                return (EOPNOTSUPP);
@@ -1423,10 +1430,18 @@ sysctl_dopool(int *name, u_int namelen, 
                pi.pr_nidle = pp->pr_nidle;
                mtx_leave(&pp->pr_mtx);
 
-               pool_cache_info(pp, &pi);
+               pool_cache_pool_info(pp, &pi);
 
                rv = sysctl_rdstruct(oldp, oldlenp, NULL, &pi, sizeof(pi));
                break;
+
+       case KERN_POOL_CACHE:
+               rv = pool_cache_info(pp, oldp, oldlenp);
+               break;
+
+       case KERN_POOL_CACHE_CPUS:
+               rv = pool_cache_cpus_info(pp, oldp, oldlenp);
+               break;
        }
 
 done:
@@ -1608,7 +1623,7 @@ pool_cache_init(struct pool *pp)
                    IPL_NONE, PR_WAITOK, "plcache", NULL);
        }
 
-        /* must be able to use the pool items as cache list items */
+       /* must be able to use the pool items as cache list entries */
        KASSERT(pp->pr_size >= sizeof(struct pool_cache_item));
 
        cm = cpumem_get(&pool_caches);
@@ -1625,9 +1640,12 @@ pool_cache_init(struct pool *pp)
                pc->pc_nactv = 0;
                pc->pc_prev = NULL;
 
-               pc->pc_gets = 0;
-               pc->pc_puts = 0;
-               pc->pc_fails = 0;
+               pc->pc_nget = 0;
+               pc->pc_nfail = 0;
+               pc->pc_nput = 0;
+               pc->pc_nlget = 0;
+               pc->pc_nlfail = 0;
+               pc->pc_nlput = 0;
                pc->pc_nout = 0;
        }
 
@@ -1694,7 +1712,10 @@ pool_cache_list_alloc(struct pool *pp, s
                pp->pr_cache_nlist--;
 
                pool_cache_item_magic(pp, pl);
-       }
+
+               pc->pc_nlget++;
+       } else
+               pc->pc_nlfail++;
 
        /* fold this cpus nout into the global while we have the lock */
        pp->pr_cache_nout += pc->pc_nout;
@@ -1712,6 +1733,8 @@ pool_cache_list_free(struct pool *pp, st
        TAILQ_INSERT_TAIL(&pp->pr_cache_lists, ci, ci_nextl);
        pp->pr_cache_nlist++;
 
+       pc->pc_nlput++;
+
        /* fold this cpus nout into the global while we have the lock */
        pp->pr_cache_nout += pc->pc_nout;
        pc->pc_nout = 0;
@@ -1753,7 +1776,7 @@ pool_cache_get(struct pool *pp)
                ci = pc->pc_prev;
                pc->pc_prev = NULL;
        } else if ((ci = pool_cache_list_alloc(pp, pc)) == NULL) {
-               pc->pc_fails++;
+               pc->pc_nfail++;
                goto done;
        }
 
@@ -1778,7 +1801,7 @@ pool_cache_get(struct pool *pp)
 
        pc->pc_actv = ci->ci_next;
        pc->pc_nactv = POOL_CACHE_ITEM_NITEMS(ci) - 1;
-       pc->pc_gets++;
+       pc->pc_nget++;
        pc->pc_nout++;
 
 done:
@@ -1825,7 +1848,7 @@ pool_cache_put(struct pool *pp, void *v)
        pc->pc_actv = ci;
        pc->pc_nactv = nitems;
 
-       pc->pc_puts++;
+       pc->pc_nput++;
        pc->pc_nout--;
 
        pool_cache_leave(pp, pc, s);
@@ -1874,7 +1897,7 @@ pool_cache_destroy(struct pool *pp)
 }
 
 void
-pool_cache_info(struct pool *pp, struct kinfo_pool *pi)
+pool_cache_pool_info(struct pool *pp, struct kinfo_pool *pi)
 {
        struct pool_cache *pc;
        struct cpumem_iter i;
@@ -1892,8 +1915,8 @@ pool_cache_info(struct pool *pp, struct 
                        while ((gen = pc->pc_gen) & 1)
                                yield();
 
-                       nget = pc->pc_gets;
-                       nput = pc->pc_puts;
+                       nget = pc->pc_nget;
+                       nput = pc->pc_nput;
                } while (gen != pc->pc_gen);
 
                pi->pr_nget += nget;
@@ -1908,6 +1931,80 @@ pool_cache_info(struct pool *pp, struct 
        pi->pr_nout += pp->pr_cache_nout;
        mtx_leave(&pp->pr_cache_mtx);
 }
+
+int
+pool_cache_info(struct pool *pp, void *oldp, size_t *oldlenp)
+{
+       struct kinfo_pool_cache kpc;
+
+       if (pp->pr_cache == NULL)
+               return (EOPNOTSUPP);
+
+       memset(&kpc, 0, sizeof(kpc)); /* don't leak padding */
+
+       mtx_enter(&pp->pr_cache_mtx);
+       kpc.pr_ngc = 0; /* notyet */
+       kpc.pr_len = pp->pr_cache_items;
+       kpc.pr_nlist = pp->pr_cache_nlist;
+       mtx_leave(&pp->pr_cache_mtx);
+
+       return (sysctl_rdstruct(oldp, oldlenp, NULL, &kpc, sizeof(kpc)));
+}
+
+int
+pool_cache_cpus_info(struct pool *pp, void *oldp, size_t *oldlenp)
+{
+       struct pool_cache *pc;
+       struct kinfo_pool_cache_cpu *kpcc, *info;
+       unsigned int cpu = 0;
+       struct cpumem_iter i;
+       int error = 0;
+       size_t len;
+
+       if (pp->pr_cache == NULL)
+               return (EOPNOTSUPP);
+       if (*oldlenp % sizeof(*kpcc))
+               return (EINVAL);
+
+       kpcc = mallocarray(ncpusfound, sizeof(*kpcc), M_TEMP,
+           M_WAITOK|M_CANFAIL|M_ZERO);
+       if (kpcc == NULL)
+               return (EIO);
+
+       len = ncpusfound * sizeof(*kpcc);
+
+       CPUMEM_FOREACH(pc, &i, pp->pr_cache) {
+               uint64_t gen;
+
+               if (cpu >= ncpusfound) {
+                       error = EIO;
+                       goto err;
+               }
+
+               info = &kpcc[cpu];
+               info->pr_cpu = cpu;
+
+               do {
+                       while ((gen = pc->pc_gen) & 1)
+                               yield();
+
+                       info->pr_nget = pc->pc_nget;
+                       info->pr_nfail = pc->pc_nfail;
+                       info->pr_nput = pc->pc_nput;
+                       info->pr_nlget = pc->pc_nlget;
+                       info->pr_nlfail = pc->pc_nlfail;
+                       info->pr_nlput = pc->pc_nlput;
+               } while (gen != pc->pc_gen);
+
+               cpu++;
+       }
+
+       error = sysctl_rdstruct(oldp, oldlenp, NULL, kpcc, len);
+err:
+       free(kpcc, M_TEMP, len);
+
+       return (error);
+}
 #else /* MULTIPROCESSOR */
 void
 pool_cache_init(struct pool *pp)
@@ -1916,8 +2013,20 @@ pool_cache_init(struct pool *pp)
 }
 
 void
-pool_cache_info(struct pool *pp, struct kinfo_pool *pi)
+pool_cache_pool_info(struct pool *pp, struct kinfo_pool *pi)
 {
        /* nop */
+}
+
+int
+pool_cache_info(struct pool *pp, void *oldp, size_t *oldlenp)
+{
+       return (EOPNOTSUPP);
+}
+
+int
+pool_cache_cpus_info(struct pool *pp, void *oldp, size_t *oldlenp)
+{
+       return (EOPNOTSUPP);
 }
 #endif /* MULTIPROCESSOR */
Index: sys/sys/pool.h
===================================================================
RCS file: /cvs/src/sys/sys/pool.h,v
retrieving revision 1.69
diff -u -p -r1.69 pool.h
--- sys/sys/pool.h      7 Feb 2017 05:39:17 -0000       1.69
+++ sys/sys/pool.h      14 Jun 2017 03:13:47 -0000
@@ -43,6 +43,8 @@
 #define KERN_POOL_NPOOLS       1
 #define KERN_POOL_NAME         2
 #define KERN_POOL_POOL         3
+#define KERN_POOL_CACHE                4       /* global pool cache info */
+#define KERN_POOL_CACHE_CPUS   5       /* all cpus cache info */
 
 struct kinfo_pool {
        unsigned int    pr_size;        /* size of a pool item */
@@ -66,6 +68,30 @@ struct kinfo_pool {
        unsigned long   pr_nidle;       /* # of idle pages */
 };
 
+struct kinfo_pool_cache {
+       uint64_t        pr_ngc;         /* # of times a list has been gc'ed */
+       unsigned int    pr_len;         /* current target for list len */
+       unsigned int    pr_nlist;       /* # of lists in the pool */
+};
+
+/*
+ * KERN_POOL_CACHE_CPUS access to an array, not a single struct. ie, it
+ * provides struct kinfo_pool_cache_cpu kppc[ncpusfound].
+ */
+struct kinfo_pool_cache_cpu {
+       unsigned int    pr_cpu;         /* which cpu is this a cache on */
+
+       /* counters for times items were hanled by the cache */
+       uint64_t        pr_nget;        /* # of requests */
+       uint64_t        pr_nfail;       /* # of unsuccessful requests */
+       uint64_t        pr_nput;        /* # of releases */
+
+       /* counters for times the cache interacted with the pool */
+       uint64_t        pr_nlget;       /* # of list requests */
+       uint64_t        pr_nlfail;      /* # of unsuccessful list requests */
+       uint64_t        pr_nlput;       /* # of list releases */
+};
+
 #if defined(_KERNEL) || defined(_LIBKVM)
 
 #include <sys/queue.h>
@@ -159,8 +185,8 @@ struct pool {
        struct mutex    pr_cache_mtx;
        struct pool_cache_lists
                        pr_cache_lists;
-       u_int           pr_cache_nlist;
-       u_int           pr_cache_items;
+       u_int           pr_cache_nlist; /* # of lists */
+       u_int           pr_cache_items; /* target list length */
        u_int           pr_cache_contention;
        int             pr_cache_nout;
 
Index: usr.bin/systat/pool.c
===================================================================
RCS file: /cvs/src/usr.bin/systat/pool.c,v
retrieving revision 1.11
diff -u -p -r1.11 pool.c
--- usr.bin/systat/pool.c       12 Mar 2016 12:45:27 -0000      1.11
+++ usr.bin/systat/pool.c       14 Jun 2017 03:13:48 -0000
@@ -26,6 +26,19 @@
 
 #include "systat.h"
 
+#ifndef nitems
+#define nitems(_a) (sizeof((_a)) / sizeof((_a)[0]))
+#endif
+
+static int sysctl_rdint(const int *, unsigned int);
+static int hw_ncpusfound(void);
+
+static int pool_get_npools(void);
+static int pool_get_name(int, char *, size_t);
+static int pool_get_cache(int, struct kinfo_pool_cache *);
+static int pool_get_cache_cpus(int, struct kinfo_pool_cache_cpu *,
+    unsigned int);
+
 void print_pool(void);
 int  read_pool(void);
 void  sort_pool(void);
@@ -44,11 +57,25 @@ struct pool_info {
        struct kinfo_pool pool;
 };
 
+/*
+ * the kernel gives an array of ncpusfound * kinfo_pool_cache structs, but
+ * it's idea of how big that struct is may differ from here. we fetch both
+ * ncpusfound and the size it thinks kinfo_pool_cache is from sysctl, and
+ * then allocate the memory for this here.
+ */
+struct pool_cache_info {
+       char name[32];
+       struct kinfo_pool_cache cache;
+       struct kinfo_pool_cache_cpu *cache_cpus;
+};
 
 int print_all = 0;
 int num_pools = 0;
 struct pool_info *pools = NULL;
+int num_pool_caches = 0;
+struct pool_cache_info *pool_caches = NULL;
 
+int ncpusfound = 0;
 
 field_def fields_pool[] = {
        {"NAME", 12, 32, 1, FLD_ALIGN_LEFT, -1, 0, 0, 0},
@@ -65,7 +92,6 @@ field_def fields_pool[] = {
        {"IDLE", 8, 24, 1, FLD_ALIGN_RIGHT, -1, 0, 0, 0}
 };
 
-
 #define FLD_POOL_NAME  FIELD_ADDR(fields_pool,0)
 #define FLD_POOL_SIZE  FIELD_ADDR(fields_pool,1)
 #define FLD_POOL_REQS  FIELD_ADDR(fields_pool,2)
@@ -100,11 +126,80 @@ struct view_manager pool_mgr = {
        print_pool, pool_keyboard_callback, pool_order_list, pool_order_list
 };
 
-field_view views_pool[] = {
-       {view_pool_0, "pool", '5', &pool_mgr},
+field_view pool_view = {
+       view_pool_0,
+       "pool",
+       '5',
+       &pool_mgr
+};
+
+void   pool_cache_print(void);
+int    pool_cache_read(void);
+void   pool_cache_sort(void);
+void   pool_cache_show(const struct pool_cache_info *);
+int    pool_cache_kbd_cb(int);
+
+field_def pool_cache_fields[] = {
+       {"NAME", 12, 32, 1, FLD_ALIGN_LEFT, -1, 0, 0, 0},
+       {"LEN", 4, 4, 1, FLD_ALIGN_RIGHT, -1, 0, 0, 0},
+       {"NL", 4, 4, 1, FLD_ALIGN_RIGHT, -1, 0, 0, 0},
+       {"NGC", 4, 4, 1, FLD_ALIGN_RIGHT, -1, 0, 0, 0},
+       {"CPU",  4, 4, 1, FLD_ALIGN_RIGHT, -1, 0, 0, 0},
+       {"REQ", 8, 12, 1, FLD_ALIGN_RIGHT, -1, 0, 0, 0},
+       {"REL", 8, 12, 1, FLD_ALIGN_RIGHT, -1, 0, 0, 0},
+       {"LREQ", 8, 12, 1, FLD_ALIGN_RIGHT, -1, 0, 0, 0},
+       {"LREL", 8, 12, 1, FLD_ALIGN_RIGHT, -1, 0, 0, 0},
+};
+
+#define FLD_POOL_CACHE_NAME    FIELD_ADDR(pool_cache_fields, 0)
+#define FLD_POOL_CACHE_LEN     FIELD_ADDR(pool_cache_fields, 1)
+#define FLD_POOL_CACHE_NL      FIELD_ADDR(pool_cache_fields, 2)
+#define FLD_POOL_CACHE_NGC     FIELD_ADDR(pool_cache_fields, 3)
+#define FLD_POOL_CACHE_CPU     FIELD_ADDR(pool_cache_fields, 4)
+#define FLD_POOL_CACHE_GET     FIELD_ADDR(pool_cache_fields, 5)
+#define FLD_POOL_CACHE_PUT     FIELD_ADDR(pool_cache_fields, 6)
+#define FLD_POOL_CACHE_LGET    FIELD_ADDR(pool_cache_fields, 7)
+#define FLD_POOL_CACHE_LPUT    FIELD_ADDR(pool_cache_fields, 8)
+
+field_def *view_pool_cache_0[] = {
+       FLD_POOL_CACHE_NAME,
+       FLD_POOL_CACHE_LEN,
+       FLD_POOL_CACHE_NL,
+       FLD_POOL_CACHE_NGC,
+       FLD_POOL_CACHE_CPU,
+       FLD_POOL_CACHE_GET,
+       FLD_POOL_CACHE_PUT,
+       FLD_POOL_CACHE_LGET,
+       FLD_POOL_CACHE_LPUT,
+       NULL,
+};
+
+order_type pool_cache_order_list[] = {
+       {"name", "name", 'N', sort_name_callback},
+       {"requests", "requests", 'G', sort_req_callback},
+       {"releases", "releases", 'P', sort_req_callback},
        {NULL, NULL, 0, NULL}
 };
 
+/* Define view managers */
+struct view_manager pool_cache_mgr = {
+       "PoolCache",
+       select_pool,
+       pool_cache_read,
+       pool_cache_sort,
+       print_header,
+       pool_cache_print,
+       pool_keyboard_callback,
+       pool_cache_order_list,
+       pool_cache_order_list
+};
+
+field_view pool_cache_view = {
+       view_pool_cache_0,
+       "pcaches",
+       '5',
+       &pool_cache_mgr
+};
 
 int
 sort_name_callback(const void *s1, const void *s2)
@@ -200,30 +295,31 @@ select_pool(void)
 int
 read_pool(void)
 {
-       int mib[4], np, i;
+       int mib[] = { CTL_KERN, KERN_POOL, KERN_POOL_POOL, 0 };
+       struct pool_info *p;
+       int np, i;
        size_t size;
 
-       mib[0] = CTL_KERN;
-       mib[1] = KERN_POOL;
-       mib[2] = KERN_POOL_NPOOLS;
-       size = sizeof(np);
-
-       if (sysctl(mib, 3, &np, &size, NULL, 0) < 0) {
+       np = pool_get_npools();
+       if (np == -1) {
                error("sysctl(npools): %s", strerror(errno));
                return (-1);
        }
 
-       if (np <= 0) {
+       if (np == 0) {
+               free(pools);
+               pools = NULL;
                num_pools = 0;
                return (0);
        }
 
        if (np > num_pools || pools == NULL) {
-               struct pool_info *p = reallocarray(pools, np, sizeof(*pools));
+               p = reallocarray(pools, np, sizeof(*pools));
                if (p == NULL) {
                        error("realloc: %s", strerror(errno));
                        return (-1);
                }
+               /* commit */
                pools = p;
                num_pools = np;
        }
@@ -231,26 +327,19 @@ read_pool(void)
        num_disp = num_pools;
 
        for (i = 0; i < num_pools; i++) {
-               mib[0] = CTL_KERN;
-               mib[1] = KERN_POOL;
-               mib[2] = KERN_POOL_POOL;
-               mib[3] = i + 1;
+               p = &pools[i];
+               np = i + 1;
+
+               mib[3] = np;
                size = sizeof(pools[i].pool);
-               if (sysctl(mib, 4, &pools[i].pool, &size, NULL, 0) < 0) {
-                       memset(&pools[i], 0, sizeof(pools[i]));
+               if (sysctl(mib, nitems(mib), &p->pool, &size, NULL, 0) < 0) {
+                       p->name[0] = '\0';
                        num_disp--;
                        continue;
                }
-               mib[2] = KERN_POOL_NAME;
-               size = sizeof(pools[i].name);
-               if (sysctl(mib, 4, &pools[i].name, &size, NULL, 0) < 0) {
-                       snprintf(pools[i].name, size, "#%d#", mib[3]);
-               }
-       }
 
-       if (i != num_pools) {
-               memset(pools, 0, sizeof(*pools) * num_pools);
-               return (-1);
+               if (pool_get_name(np, p->name, sizeof(p->name)) < 0)
+                       snprintf(p->name, sizeof(p->name), "#%d#", i + 1);
        }
 
        return 0;
@@ -289,11 +378,18 @@ initpool(void)
 {
        field_view *v;
 
-       for (v = views_pool; v->name != NULL; v++)
-               add_view(v);
-
+       add_view(&pool_view);
        read_pool();
 
+       ncpusfound = hw_ncpusfound();
+       if (ncpusfound == -1) {
+               error("sysctl(ncpusfound): %s", strerror(errno));
+               exit(1);
+       }
+
+       add_view(&pool_cache_view);
+       pool_cache_read();
+
        return(0);
 }
 
@@ -340,4 +436,194 @@ pool_keyboard_callback(int ch)
        };
 
        return (1);
+}
+
+int
+pool_cache_read(void)
+{
+       struct pool_cache_info *pc;
+       int np, i;
+
+       np = pool_get_npools();
+       if (np == -1) {
+               error("sysctl(npools): %s", strerror(errno));
+               return (-1);
+       }
+
+       if (np > num_pool_caches) {
+               pc = reallocarray(pool_caches, np, sizeof(*pc));
+               if (pc == NULL) {
+                       error("realloc: %s", strerror(errno));
+                       return (-1);
+               }
+               /* commit to using the new memory */
+               pool_caches = pc;
+
+               for (i = num_pool_caches; i < np; i++) {
+                       pc = &pool_caches[i];
+                       pc->name[0] = '\0';
+
+                       pc->cache_cpus = reallocarray(NULL, ncpusfound,
+                           sizeof(*pc->cache_cpus));
+                       if (pc->cache_cpus == NULL) {
+                               error("malloc cache cpus: %s", strerror(errno));
+                               goto unalloc;
+                       }
+               }
+
+               /* commit to using the new cache_infos */
+               num_pool_caches = np;
+       }
+
+       for (i = 0; i < num_pool_caches; i++) {
+               pc = &pool_caches[i];
+               np = i + 1;
+
+               if (pool_get_cache(np, &pc->cache) < 0 ||
+                   pool_get_cache_cpus(np, pc->cache_cpus, ncpusfound) < 0) {
+                       pc->name[0] = '\0';
+                       continue;
+               }
+
+               if (pool_get_name(np, pc->name, sizeof(pc->name)) < 0)
+                       snprintf(pc->name, sizeof(pc->name), "#%d#", i + 1);
+       }
+
+       return 0;
+
+unalloc:
+       while (i > num_pool_caches) {
+               pc = &pool_caches[--i];
+               free(pc->cache_cpus);
+       }
+}
+
+void
+pool_cache_sort(void)
+{
+       /* XXX */
+       order_type *ordering;
+
+       if (curr_mgr == NULL)
+               return;
+
+       ordering = curr_mgr->order_curr;
+
+       if (ordering == NULL)
+               return;
+       if (ordering->func == NULL)
+               return;
+       if (pools == NULL)
+               return;
+       if (num_pools <= 0)
+               return;
+
+       mergesort(pools, num_pools, sizeof(struct pool_info), ordering->func);
+}
+
+void
+pool_cache_print(void)
+{
+       struct pool_cache_info *pc;
+       int i, n, count = 0;
+
+       if (pool_caches == NULL)
+               return;
+
+       for (n = i = 0; i < num_pool_caches; i++) {
+               pc = &pool_caches[i];
+               if (pc->name[0] == '\0')
+                       continue;
+
+               if (n++ < dispstart)
+                       continue;
+
+               pool_cache_show(pc);
+               count++;
+               if (maxprint > 0 && count >= maxprint)
+                       break;
+       }
+}
+
+void
+pool_cache_show(const struct pool_cache_info *pc)
+{
+       const struct kinfo_pool_cache *kpc;
+       const struct kinfo_pool_cache_cpu *kpcc;
+       int cpu;
+
+       kpc = &pc->cache;
+
+       print_fld_str(FLD_POOL_CACHE_NAME, pc->name);
+       print_fld_uint(FLD_POOL_CACHE_LEN, kpc->pr_len);
+       print_fld_uint(FLD_POOL_CACHE_NL, kpc->pr_nlist);
+       print_fld_uint(FLD_POOL_CACHE_NGC, kpc->pr_ngc);
+
+       for (cpu = 0; cpu < ncpusfound; cpu++) {
+               kpcc = &pc->cache_cpus[cpu];
+
+               print_fld_uint(FLD_POOL_CACHE_CPU, kpcc->pr_cpu);
+
+               print_fld_size(FLD_POOL_CACHE_GET, kpcc->pr_nget);
+               print_fld_size(FLD_POOL_CACHE_PUT, kpcc->pr_nput);
+               print_fld_size(FLD_POOL_CACHE_LGET, kpcc->pr_nlget);
+               print_fld_size(FLD_POOL_CACHE_LPUT, kpcc->pr_nlput);
+               end_line();
+       }
+
+}
+
+static int
+pool_get_npools(void)
+{
+       int mib[] = { CTL_KERN, KERN_POOL, KERN_POOL_NPOOLS };
+
+       return (sysctl_rdint(mib, nitems(mib)));
+}
+
+static int
+pool_get_cache(int pool, struct kinfo_pool_cache *kpc)
+{
+       int mib[] = { CTL_KERN, KERN_POOL, KERN_POOL_CACHE, pool };
+       size_t len = sizeof(*kpc);
+
+       return (sysctl(mib, nitems(mib), kpc, &len, NULL, 0));
+}
+
+static int
+pool_get_cache_cpus(int pool, struct kinfo_pool_cache_cpu *kpcc,
+    unsigned int ncpus)
+{
+       int mib[] = { CTL_KERN, KERN_POOL, KERN_POOL_CACHE_CPUS, pool };
+       size_t len = sizeof(*kpcc) * ncpus;
+
+       return (sysctl(mib, nitems(mib), kpcc, &len, NULL, 0));
+}
+
+static int
+pool_get_name(int pool, char *name, size_t len)
+{
+       int mib[] = { CTL_KERN, KERN_POOL, KERN_POOL_NAME, pool };
+
+       return (sysctl(mib, nitems(mib), name, &len, NULL, 0));
+}
+
+static int
+hw_ncpusfound(void)
+{
+       int mib[] = { CTL_HW, HW_NCPUFOUND };
+
+       return (sysctl_rdint(mib, nitems(mib)));
+}
+
+static int
+sysctl_rdint(const int *mib, unsigned int nmib)
+{
+       int i;
+       size_t size = sizeof(i);
+
+       if (sysctl(mib, nmib, &i, &size, NULL, 0) == -1)
+               return (-1);
+
+       return (i);
 }
Index: usr.bin/systat/systat.1
===================================================================
RCS file: /cvs/src/usr.bin/systat/systat.1,v
retrieving revision 1.101
diff -u -p -r1.101 systat.1
--- usr.bin/systat/systat.1     12 Mar 2015 01:03:00 -0000      1.101
+++ usr.bin/systat/systat.1     14 Jun 2017 03:13:48 -0000
@@ -136,6 +136,7 @@ argument expects to be one of:
 .Ic queues ,
 .Ic pf ,
 .Ic pool ,
+.Ic pcache ,
 .Ic malloc ,
 .Ic buckets ,
 .Ic nfsclient ,
@@ -379,6 +380,10 @@ and
 By default only the statistics of active pools are displayed but pressing
 .Ic A
 changes the view to show all of them.
+.It Ic pcache
+Display kernel
+.Xr pool 9
+per CPU cache statistics.
 .It Ic queues
 Display statistics about the active queues,
 similar to the output of

Reply via email to