On Thu, Jan 8, 2026 at 5:06 AM Simon Schippers
<[email protected]> wrote:
>
> Introduce {tun,tap}_ring_consume() helpers that wrap __ptr_ring_consume()
> and wake the corresponding netdev subqueue when consuming an entry frees
> space in the underlying ptr_ring.
>
> Stopping of the netdev queue when the ptr_ring is full will be introduced
> in an upcoming commit.
>
> Co-developed-by: Tim Gebauer <[email protected]>
> Signed-off-by: Tim Gebauer <[email protected]>
> Signed-off-by: Simon Schippers <[email protected]>
> ---
> drivers/net/tap.c | 23 ++++++++++++++++++++++-
> drivers/net/tun.c | 25 +++++++++++++++++++++++--
> 2 files changed, 45 insertions(+), 3 deletions(-)
>
> diff --git a/drivers/net/tap.c b/drivers/net/tap.c
> index 1197f245e873..2442cf7ac385 100644
> --- a/drivers/net/tap.c
> +++ b/drivers/net/tap.c
> @@ -753,6 +753,27 @@ static ssize_t tap_put_user(struct tap_queue *q,
> return ret ? ret : total;
> }
>
> +static void *tap_ring_consume(struct tap_queue *q)
> +{
> + struct ptr_ring *ring = &q->ring;
> + struct net_device *dev;
> + void *ptr;
> +
> + spin_lock(&ring->consumer_lock);
> +
> + ptr = __ptr_ring_consume(ring);
> + if (unlikely(ptr && __ptr_ring_consume_created_space(ring, 1))) {
> + rcu_read_lock();
> + dev = rcu_dereference(q->tap)->dev;
> + netif_wake_subqueue(dev, q->queue_index);
> + rcu_read_unlock();
> + }
> +
> + spin_unlock(&ring->consumer_lock);
> +
> + return ptr;
> +}
> +
> static ssize_t tap_do_read(struct tap_queue *q,
> struct iov_iter *to,
> int noblock, struct sk_buff *skb)
> @@ -774,7 +795,7 @@ static ssize_t tap_do_read(struct tap_queue *q,
> TASK_INTERRUPTIBLE);
>
> /* Read frames from the queue */
> - skb = ptr_ring_consume(&q->ring);
> + skb = tap_ring_consume(q);
> if (skb)
> break;
> if (noblock) {
> diff --git a/drivers/net/tun.c b/drivers/net/tun.c
> index 8192740357a0..7148f9a844a4 100644
> --- a/drivers/net/tun.c
> +++ b/drivers/net/tun.c
> @@ -2113,13 +2113,34 @@ static ssize_t tun_put_user(struct tun_struct *tun,
> return total;
> }
>
> +static void *tun_ring_consume(struct tun_file *tfile)
> +{
> + struct ptr_ring *ring = &tfile->tx_ring;
> + struct net_device *dev;
> + void *ptr;
> +
> + spin_lock(&ring->consumer_lock);
> +
> + ptr = __ptr_ring_consume(ring);
> + if (unlikely(ptr && __ptr_ring_consume_created_space(ring, 1))) {
I guess it's the "bug" I mentioned in the previous patch that leads to
the check of __ptr_ring_consume_created_space() here. If it's true,
another call to tweak the current API.
> + rcu_read_lock();
> + dev = rcu_dereference(tfile->tun)->dev;
> + netif_wake_subqueue(dev, tfile->queue_index);
This would cause the producer TX_SOFTIRQ to run on the same cpu which
I'm not sure is what we want.
> + rcu_read_unlock();
> + }
Btw, this function duplicates a lot of logic of tap_ring_consume() we
should consider to merge the logic.
> +
> + spin_unlock(&ring->consumer_lock);
> +
> + return ptr;
> +}
> +
> static void *tun_ring_recv(struct tun_file *tfile, int noblock, int *err)
> {
> DECLARE_WAITQUEUE(wait, current);
> void *ptr = NULL;
> int error = 0;
>
> - ptr = ptr_ring_consume(&tfile->tx_ring);
> + ptr = tun_ring_consume(tfile);
I'm not sure having a separate patch like this may help. For example,
it will introduce performance regression.
> if (ptr)
> goto out;
> if (noblock) {
> @@ -2131,7 +2152,7 @@ static void *tun_ring_recv(struct tun_file *tfile, int
> noblock, int *err)
>
> while (1) {
> set_current_state(TASK_INTERRUPTIBLE);
> - ptr = ptr_ring_consume(&tfile->tx_ring);
> + ptr = tun_ring_consume(tfile);
> if (ptr)
> break;
> if (signal_pending(current)) {
> --
> 2.43.0
>
Thanks