On Fri, Oct 18, 2019 at 03:55:39PM +1000, David Gwynne wrote:
> why? is it significantly faster? page allocation should be in the slow path.

At least it was not slower.  Performance were slightly higher, but
changes are well below meassurement tolerance.

Usually it is a good idea to avoid locks and mutexes if possible.
Atomic operations cannot create deadlocks.

In this case the code becomes shorter.

I have put back the unprotected check in m_pool_alloc().  It avoids
possibly expensive atomic operations.  Do we want this?  I could
not measure any difference.

bluhm

Index: kern/uipc_mbuf.c
===================================================================
RCS file: /data/mirror/openbsd/cvs/src/sys/kern/uipc_mbuf.c,v
retrieving revision 1.272
diff -u -p -r1.272 uipc_mbuf.c
--- kern/uipc_mbuf.c    19 Jul 2019 09:03:03 -0000      1.272
+++ kern/uipc_mbuf.c    18 Oct 2019 12:33:43 -0000
@@ -133,7 +133,6 @@ struct      mutex m_extref_mtx = MUTEX_INITIA
 void   m_extfree(struct mbuf *);
 void   m_zero(struct mbuf *);

-struct mutex m_pool_mtx = MUTEX_INITIALIZER(IPL_NET);
 unsigned long mbuf_mem_limit;  /* how much memory can be allocated */
 unsigned long mbuf_mem_alloc;  /* how much memory has been allocated */

@@ -1473,30 +1472,23 @@ m_microtime(const struct mbuf *m, struct
 void *
 m_pool_alloc(struct pool *pp, int flags, int *slowdown)
 {
-       void *v = NULL;
-       int avail = 1;
+       void *v;
+       long alloc;

        if (mbuf_mem_alloc + pp->pr_pgsize > mbuf_mem_limit)
                return (NULL);

-       mtx_enter(&m_pool_mtx);
-       if (mbuf_mem_alloc + pp->pr_pgsize > mbuf_mem_limit)
-               avail = 0;
-       else
-               mbuf_mem_alloc += pp->pr_pgsize;
-       mtx_leave(&m_pool_mtx);
-
-       if (avail) {
-               v = (*pool_allocator_multi.pa_alloc)(pp, flags, slowdown);
+       alloc = atomic_add_long_nv(&mbuf_mem_alloc, pp->pr_pgsize);
+       if (alloc > mbuf_mem_limit)
+               goto fail;

-               if (v == NULL) {
-                       mtx_enter(&m_pool_mtx);
-                       mbuf_mem_alloc -= pp->pr_pgsize;
-                       mtx_leave(&m_pool_mtx);
-               }
-       }
+       v = (*pool_allocator_multi.pa_alloc)(pp, flags, slowdown);
+       if (v != NULL)
+               return (v);

-       return (v);
+ fail:
+       atomic_sub_long(&mbuf_mem_alloc, pp->pr_pgsize);
+       return (NULL);
 }

 void
@@ -1504,9 +1496,7 @@ m_pool_free(struct pool *pp, void *v)
 {
        (*pool_allocator_multi.pa_free)(pp, v);

-       mtx_enter(&m_pool_mtx);
-       mbuf_mem_alloc -= pp->pr_pgsize;
-       mtx_leave(&m_pool_mtx);
+       atomic_sub_long(&mbuf_mem_alloc, pp->pr_pgsize);
 }

 void

Reply via email to