why? is it significantly faster? page allocation should be in the slow path.
dlg > On 18 Oct 2019, at 08:33, Alexander Bluhm <[email protected]> wrote: > > Hi, > > Can we replace the mutex that protects the mbuf allocation limit > by an atomic operation? > > ok? > > bluhm > > Index: kern/uipc_mbuf.c > =================================================================== > RCS file: /data/mirror/openbsd/cvs/src/sys/kern/uipc_mbuf.c,v > retrieving revision 1.272 > diff -u -p -r1.272 uipc_mbuf.c > --- kern/uipc_mbuf.c 19 Jul 2019 09:03:03 -0000 1.272 > +++ kern/uipc_mbuf.c 17 Oct 2019 22:29:17 -0000 > @@ -133,7 +133,6 @@ struct mutex m_extref_mtx = MUTEX_INITIA > void m_extfree(struct mbuf *); > void m_zero(struct mbuf *); > > -struct mutex m_pool_mtx = MUTEX_INITIALIZER(IPL_NET); > unsigned long mbuf_mem_limit; /* how much memory can be allocated */ > unsigned long mbuf_mem_alloc; /* how much memory has been allocated */ > > @@ -1473,30 +1472,20 @@ m_microtime(const struct mbuf *m, struct > void * > m_pool_alloc(struct pool *pp, int flags, int *slowdown) > { > - void *v = NULL; > - int avail = 1; > + void *v; > + long alloc; > > - if (mbuf_mem_alloc + pp->pr_pgsize > mbuf_mem_limit) > - return (NULL); > - > - mtx_enter(&m_pool_mtx); > - if (mbuf_mem_alloc + pp->pr_pgsize > mbuf_mem_limit) > - avail = 0; > - else > - mbuf_mem_alloc += pp->pr_pgsize; > - mtx_leave(&m_pool_mtx); > + alloc = atomic_add_long_nv(&mbuf_mem_alloc, pp->pr_pgsize); > + if (alloc > mbuf_mem_limit) > + goto fail; > > - if (avail) { > - v = (*pool_allocator_multi.pa_alloc)(pp, flags, slowdown); > + v = (*pool_allocator_multi.pa_alloc)(pp, flags, slowdown); > + if (v != NULL) > + return (v); > > - if (v == NULL) { > - mtx_enter(&m_pool_mtx); > - mbuf_mem_alloc -= pp->pr_pgsize; > - mtx_leave(&m_pool_mtx); > - } > - } > - > - return (v); > + fail: > + atomic_sub_long(&mbuf_mem_alloc, pp->pr_pgsize); > + return (NULL); > } > > void > @@ -1504,9 +1493,7 @@ m_pool_free(struct pool *pp, void *v) > { > (*pool_allocator_multi.pa_free)(pp, v); > > - mtx_enter(&m_pool_mtx); > - mbuf_mem_alloc -= pp->pr_pgsize; > - mtx_leave(&m_pool_mtx); > + atomic_sub_long(&mbuf_mem_alloc, pp->pr_pgsize); > } > > void >
