The branch main has been updated by np:

URL: 
https://cgit.FreeBSD.org/src/commit/?id=96054b97b418d1ff2b781e9e23cc86a79d91909a

commit 96054b97b418d1ff2b781e9e23cc86a79d91909a
Author:     John Baldwin <[email protected]>
AuthorDate: 2025-09-29 15:10:42 +0000
Commit:     Navdeep Parhar <[email protected]>
CommitDate: 2025-09-29 15:19:13 +0000

    cxgbe mp_ring: Add mp_ring_enqueue_only
    
    Unlike mp_ring_enqueue, this function is meant to be called from
    within a drain routine to enqueue more packets to the queue currently
    being drained.  To avoid recursion, it only enqueues additional items
    without becoming a consumer.
    
    MFC after:      3 days
    Sponsored by:   Chelsio Communications
---
 sys/dev/cxgbe/t4_mp_ring.c | 81 +++++++++++++++++++++++++++++++++++++++++++++-
 sys/dev/cxgbe/t4_mp_ring.h |  1 +
 2 files changed, 81 insertions(+), 1 deletion(-)

diff --git a/sys/dev/cxgbe/t4_mp_ring.c b/sys/dev/cxgbe/t4_mp_ring.c
index 531fd356728e..916c363a0c2a 100644
--- a/sys/dev/cxgbe/t4_mp_ring.c
+++ b/sys/dev/cxgbe/t4_mp_ring.c
@@ -305,7 +305,6 @@ failed:
 }
 
 void
-
 mp_ring_free(struct mp_ring *r)
 {
        int i;
@@ -472,6 +471,86 @@ mp_ring_enqueue(struct mp_ring *r, void **items, int n, 
int budget)
        return (0);
 }
 
+/*
+ * Enqueue n items but never drain the ring.  Can be called
+ * to enqueue new items while draining the ring.
+ *
+ * Returns an errno.
+ */
+int
+mp_ring_enqueue_only(struct mp_ring *r, void **items, int n)
+{
+       union ring_state os, ns;
+       uint16_t pidx_start, pidx_stop;
+       int i;
+
+       MPASS(items != NULL);
+       MPASS(n > 0);
+
+       /*
+        * Reserve room for the new items.  Our reservation, if successful, is
+        * from 'pidx_start' to 'pidx_stop'.
+        */
+       os.state = atomic_load_64(&r->state);
+
+       /* Should only be used from the drain callback. */
+       MPASS(os.flags == BUSY || os.flags == TOO_BUSY ||
+           os.flags == TAKING_OVER);
+
+       for (;;) {
+               if (__predict_false(space_available(r, os) < n)) {
+                       /* Not enough room in the ring. */
+                       counter_u64_add(r->dropped, n);
+                       return (ENOBUFS);
+               }
+
+               /* There is room in the ring. */
+
+               ns.state = os.state;
+               ns.pidx_head = increment_idx(r, os.pidx_head, n);
+               critical_enter();
+               if (atomic_fcmpset_64(&r->state, &os.state, ns.state))
+                       break;
+               critical_exit();
+               cpu_spinwait();
+       };
+
+       pidx_start = os.pidx_head;
+       pidx_stop = ns.pidx_head;
+
+       /*
+        * Wait for other producers who got in ahead of us to enqueue their
+        * items, one producer at a time.  It is our turn when the ring's
+        * pidx_tail reaches the beginning of our reservation (pidx_start).
+        */
+       while (ns.pidx_tail != pidx_start) {
+               cpu_spinwait();
+               ns.state = atomic_load_64(&r->state);
+       }
+
+       /* Now it is our turn to fill up the area we reserved earlier. */
+       i = pidx_start;
+       do {
+               r->items[i] = *items++;
+               if (__predict_false(++i == r->size))
+                       i = 0;
+       } while (i != pidx_stop);
+
+       /*
+        * Update the ring's pidx_tail.  The release style atomic guarantees
+        * that the items are visible to any thread that sees the updated pidx.
+        */
+       os.state = atomic_load_64(&r->state);
+       do {
+               ns.state = os.state;
+               ns.pidx_tail = pidx_stop;
+       } while (atomic_fcmpset_rel_64(&r->state, &os.state, ns.state) == 0);
+       critical_exit();
+
+       counter_u64_add(r->not_consumer, 1);
+       return (0);
+}
+
 void
 mp_ring_check_drainage(struct mp_ring *r, int budget)
 {
diff --git a/sys/dev/cxgbe/t4_mp_ring.h b/sys/dev/cxgbe/t4_mp_ring.h
index 949174b9056d..07b15906cd43 100644
--- a/sys/dev/cxgbe/t4_mp_ring.h
+++ b/sys/dev/cxgbe/t4_mp_ring.h
@@ -62,6 +62,7 @@ int mp_ring_alloc(struct mp_ring **, int, void *, 
ring_drain_t,
     ring_can_drain_t, struct malloc_type *, struct mtx *, int);
 void mp_ring_free(struct mp_ring *);
 int mp_ring_enqueue(struct mp_ring *, void **, int, int);
+int mp_ring_enqueue_only(struct mp_ring *, void **, int);
 void mp_ring_check_drainage(struct mp_ring *, int);
 void mp_ring_reset_stats(struct mp_ring *);
 bool mp_ring_is_idle(struct mp_ring *);

Reply via email to