Re: [PATCH net-next v2] virtio_net: add support for Byte Queue Limits

2024-06-18 Thread Jiri Pirko
Tue, Jun 18, 2024 at 02:53:42AM CEST, [email protected] wrote:
>On Mon, Jun 17, 2024 at 5:18 PM Jiri Pirko  wrote:
>>
>> Mon, Jun 17, 2024 at 04:34:26AM CEST, [email protected] wrote:
>> >On Thu, Jun 13, 2024 at 1:09 AM Jiri Pirko  wrote:
>> >>
>> >> From: Jiri Pirko 
>> >>
>> >> Add support for Byte Queue Limits (BQL).
>> >>
>> >> Tested on qemu emulated virtio_net device with 1, 2 and 4 queues.
>> >> Tested with fq_codel and pfifo_fast. Super netperf with 50 threads is
>> >> running in background. Netperf TCP_RR results:
>> >>
>> >> NOBQL FQC 1q:  159.56  159.33  158.50  154.31agv: 157.925
>> >> NOBQL FQC 2q:  184.64  184.96  174.73  174.15agv: 179.62
>> >> NOBQL FQC 4q:  994.46  441.96  416.50  499.56agv: 588.12
>> >> NOBQL PFF 1q:  148.68  148.92  145.95  149.48agv: 148.2575
>> >> NOBQL PFF 2q:  171.86  171.20  170.42  169.42agv: 170.725
>> >> NOBQL PFF 4q: 1505.23 1137.23 2488.70 3507.99agv: 2159.7875
>> >>   BQL FQC 1q: 1332.80 1297.97 1351.41 1147.57agv: 1282.4375
>> >>   BQL FQC 2q:  768.30  817.72  864.43  974.40agv: 856.2125
>> >>   BQL FQC 4q:  945.66  942.68  878.51  822.82agv: 897.4175
>> >>   BQL PFF 1q:  149.69  151.49  149.40  147.47agv: 149.5125
>> >>   BQL PFF 2q: 2059.32  798.74 1844.12  381.80agv: 1270.995
>> >>   BQL PFF 4q: 1871.98 4420.02 4916.59 13268.16   agv: 6119.1875
>> >>
>> >> Signed-off-by: Jiri Pirko 
>> >> ---
>> >> v1->v2:
>> >> - moved netdev_tx_completed_queue() call into __free_old_xmit(),
>> >>   propagate use_napi flag to __free_old_xmit() and only call
>> >>   netdev_tx_completed_queue() in case it is true
>> >> - added forgotten call to netdev_tx_reset_queue()
>> >> - fixed stats for xdp packets
>> >> - fixed bql accounting when __free_old_xmit() is called from xdp path
>> >> - handle the !use_napi case in start_xmit() kick section
>> >> ---
>> >>  drivers/net/virtio_net.c | 50 +---
>> >>  1 file changed, 32 insertions(+), 18 deletions(-)
>> >>
>> >> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
>> >> index 61a57d134544..5863c663ccab 100644
>> >> --- a/drivers/net/virtio_net.c
>> >> +++ b/drivers/net/virtio_net.c
>> >> @@ -84,7 +84,9 @@ struct virtnet_stat_desc {
>> >>
>> >>  struct virtnet_sq_free_stats {
>> >> u64 packets;
>> >> +   u64 xdp_packets;
>> >> u64 bytes;
>> >> +   u64 xdp_bytes;
>> >>  };
>> >>
>> >>  struct virtnet_sq_stats {
>> >> @@ -506,29 +508,33 @@ static struct xdp_frame *ptr_to_xdp(void *ptr)
>> >> return (struct xdp_frame *)((unsigned long)ptr & 
>> >> ~VIRTIO_XDP_FLAG);
>> >>  }
>> >>
>> >> -static void __free_old_xmit(struct send_queue *sq, bool in_napi,
>> >> +static void __free_old_xmit(struct send_queue *sq, struct netdev_queue 
>> >> *txq,
>> >> +   bool in_napi, bool use_napi,
>> >> struct virtnet_sq_free_stats *stats)
>> >>  {
>> >> unsigned int len;
>> >> void *ptr;
>> >>
>> >> while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
>> >> -   ++stats->packets;
>> >> -
>> >> if (!is_xdp_frame(ptr)) {
>> >> struct sk_buff *skb = ptr;
>> >>
>> >> pr_debug("Sent skb %p\n", skb);
>> >>
>> >> +   stats->packets++;
>> >> stats->bytes += skb->len;
>> >> napi_consume_skb(skb, in_napi);
>> >> } else {
>> >> struct xdp_frame *frame = ptr_to_xdp(ptr);
>> >>
>> >> -   stats->bytes += xdp_get_frame_len(frame);
>> >> +   stats->xdp_packets++;
>> >> +   stats->xdp_bytes += xdp_get_frame_len(frame);
>> >> xdp_return_frame(frame);
>> >> }
>> >> }
>> >> +   if (use_napi)
>> >> +   netdev_tx_completed_queue(txq, stats->packets, 
>> >> stats->bytes);
>> >> +
>> >>  }
>> >
>> >I wonder if this works correctly, for example NAPI could be enabled
>> >after queued but before sent. So __netdev_tx_sent_queue() is not
>> >called before.
>>
>> How is that possible? Napi weight can't change when link is up. Or am I
>> missing something?
>
>Something like this:
>
>1) packet were queued
>2) if down
>3) enable NAPI
>4) if up
>5) packet were sent

Gotcha, will try to fix. Thanks!


>
>?
>
>Thanks
>
>>
>> >
>> >Thanks
>> >
>>
>



Re: [PATCH net-next v2] virtio_net: add support for Byte Queue Limits

2024-06-18 Thread Jiri Pirko
Mon, Jun 17, 2024 at 06:14:11PM CEST, [email protected] wrote:
>On Wed, Jun 12, 2024 at 07:08:51PM +0200, Jiri Pirko wrote:
>> From: Jiri Pirko 
>> 
>> Add support for Byte Queue Limits (BQL).
>> 
>> Tested on qemu emulated virtio_net device with 1, 2 and 4 queues.
>> Tested with fq_codel and pfifo_fast. Super netperf with 50 threads is
>> running in background. Netperf TCP_RR results:
>> 
>> NOBQL FQC 1q:  159.56  159.33  158.50  154.31agv: 157.925
>> NOBQL FQC 2q:  184.64  184.96  174.73  174.15agv: 179.62
>> NOBQL FQC 4q:  994.46  441.96  416.50  499.56agv: 588.12
>> NOBQL PFF 1q:  148.68  148.92  145.95  149.48agv: 148.2575
>> NOBQL PFF 2q:  171.86  171.20  170.42  169.42agv: 170.725
>> NOBQL PFF 4q: 1505.23 1137.23 2488.70 3507.99agv: 2159.7875
>>   BQL FQC 1q: 1332.80 1297.97 1351.41 1147.57agv: 1282.4375
>>   BQL FQC 2q:  768.30  817.72  864.43  974.40agv: 856.2125
>>   BQL FQC 4q:  945.66  942.68  878.51  822.82agv: 897.4175
>>   BQL PFF 1q:  149.69  151.49  149.40  147.47agv: 149.5125
>>   BQL PFF 2q: 2059.32  798.74 1844.12  381.80agv: 1270.995
>>   BQL PFF 4q: 1871.98 4420.02 4916.59 13268.16   agv: 6119.1875
>> 
>> Signed-off-by: Jiri Pirko 
>> ---
>> v1->v2:
>> - moved netdev_tx_completed_queue() call into __free_old_xmit(),
>>   propagate use_napi flag to __free_old_xmit() and only call
>>   netdev_tx_completed_queue() in case it is true
>> - added forgotten call to netdev_tx_reset_queue()
>> - fixed stats for xdp packets
>> - fixed bql accounting when __free_old_xmit() is called from xdp path
>> - handle the !use_napi case in start_xmit() kick section
>> ---
>>  drivers/net/virtio_net.c | 50 +---
>>  1 file changed, 32 insertions(+), 18 deletions(-)
>> 
>> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
>> index 61a57d134544..5863c663ccab 100644
>> --- a/drivers/net/virtio_net.c
>> +++ b/drivers/net/virtio_net.c
>> @@ -84,7 +84,9 @@ struct virtnet_stat_desc {
>>  
>>  struct virtnet_sq_free_stats {
>>  u64 packets;
>> +u64 xdp_packets;
>>  u64 bytes;
>> +u64 xdp_bytes;
>>  };
>>  
>>  struct virtnet_sq_stats {
>> @@ -506,29 +508,33 @@ static struct xdp_frame *ptr_to_xdp(void *ptr)
>>  return (struct xdp_frame *)((unsigned long)ptr & ~VIRTIO_XDP_FLAG);
>>  }
>>  
>> -static void __free_old_xmit(struct send_queue *sq, bool in_napi,
>> +static void __free_old_xmit(struct send_queue *sq, struct netdev_queue *txq,
>> +bool in_napi, bool use_napi,
>>  struct virtnet_sq_free_stats *stats)
>>  {
>>  unsigned int len;
>>  void *ptr;
>>  
>>  while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
>> -++stats->packets;
>> -
>>  if (!is_xdp_frame(ptr)) {
>>  struct sk_buff *skb = ptr;
>>  
>>  pr_debug("Sent skb %p\n", skb);
>>  
>> +stats->packets++;
>>  stats->bytes += skb->len;
>>  napi_consume_skb(skb, in_napi);
>>  } else {
>>  struct xdp_frame *frame = ptr_to_xdp(ptr);
>>  
>> -stats->bytes += xdp_get_frame_len(frame);
>> +stats->xdp_packets++;
>> +stats->xdp_bytes += xdp_get_frame_len(frame);
>>  xdp_return_frame(frame);
>>  }
>>  }
>> +if (use_napi)
>> +netdev_tx_completed_queue(txq, stats->packets, stats->bytes);
>> +
>>  }
>>  
>>  /* Converting between virtqueue no. and kernel tx/rx queue no.
>> @@ -955,21 +961,22 @@ static void virtnet_rq_unmap_free_buf(struct virtqueue 
>> *vq, void *buf)
>>  virtnet_rq_free_buf(vi, rq, buf);
>>  }
>>  
>> -static void free_old_xmit(struct send_queue *sq, bool in_napi)
>> +static void free_old_xmit(struct send_queue *sq, struct netdev_queue *txq,
>> +  bool in_napi, bool use_napi)
>>  {
>>  struct virtnet_sq_free_stats stats = {0};
>>  
>> -__free_old_xmit(sq, in_napi, &stats);
>> +__free_old_xmit(sq, txq, in_napi, use_napi, &stats);
>>  
>>  /* Avoid overhead when no packets have been processed
>>   * happens when called speculatively from start_xmit.
>>   */
>> -if (!stats.packets)
>> +if (!stats.packets && !stats.xdp_packets)
>>  return;
>>  
>>  u64_stats_update_begin(&sq->stats.syncp);
>> -u64_stats_add(&sq->stats.bytes, stats.bytes);
>> -u64_stats_add(&sq->stats.packets, stats.packets);
>> +u64_stats_add(&sq->stats.bytes, stats.bytes + stats.xdp_bytes);
>> +u64_stats_add(&sq->stats.packets, stats.packets + stats.xdp_packets);
>>  u64_stats_update_end(&sq->stats.syncp);
>>  }
>>  
>> @@ -1003,7 +1010,9 @@ static void check_sq_full_and_disable(struct 
>> virtnet_info *vi,
>>   * early means 16 slots are typically wasted.
>>   */
>>  if (sq->vq->num_free < 2+MAX_SKB_FRAGS) {
>> -netif_stop_subqueue(dev,

Re: [PATCH net-next v2] virtio_net: add support for Byte Queue Limits

2024-06-17 Thread Jason Wang
On Mon, Jun 17, 2024 at 5:18 PM Jiri Pirko  wrote:
>
> Mon, Jun 17, 2024 at 04:34:26AM CEST, [email protected] wrote:
> >On Thu, Jun 13, 2024 at 1:09 AM Jiri Pirko  wrote:
> >>
> >> From: Jiri Pirko 
> >>
> >> Add support for Byte Queue Limits (BQL).
> >>
> >> Tested on qemu emulated virtio_net device with 1, 2 and 4 queues.
> >> Tested with fq_codel and pfifo_fast. Super netperf with 50 threads is
> >> running in background. Netperf TCP_RR results:
> >>
> >> NOBQL FQC 1q:  159.56  159.33  158.50  154.31agv: 157.925
> >> NOBQL FQC 2q:  184.64  184.96  174.73  174.15agv: 179.62
> >> NOBQL FQC 4q:  994.46  441.96  416.50  499.56agv: 588.12
> >> NOBQL PFF 1q:  148.68  148.92  145.95  149.48agv: 148.2575
> >> NOBQL PFF 2q:  171.86  171.20  170.42  169.42agv: 170.725
> >> NOBQL PFF 4q: 1505.23 1137.23 2488.70 3507.99agv: 2159.7875
> >>   BQL FQC 1q: 1332.80 1297.97 1351.41 1147.57agv: 1282.4375
> >>   BQL FQC 2q:  768.30  817.72  864.43  974.40agv: 856.2125
> >>   BQL FQC 4q:  945.66  942.68  878.51  822.82agv: 897.4175
> >>   BQL PFF 1q:  149.69  151.49  149.40  147.47agv: 149.5125
> >>   BQL PFF 2q: 2059.32  798.74 1844.12  381.80agv: 1270.995
> >>   BQL PFF 4q: 1871.98 4420.02 4916.59 13268.16   agv: 6119.1875
> >>
> >> Signed-off-by: Jiri Pirko 
> >> ---
> >> v1->v2:
> >> - moved netdev_tx_completed_queue() call into __free_old_xmit(),
> >>   propagate use_napi flag to __free_old_xmit() and only call
> >>   netdev_tx_completed_queue() in case it is true
> >> - added forgotten call to netdev_tx_reset_queue()
> >> - fixed stats for xdp packets
> >> - fixed bql accounting when __free_old_xmit() is called from xdp path
> >> - handle the !use_napi case in start_xmit() kick section
> >> ---
> >>  drivers/net/virtio_net.c | 50 +---
> >>  1 file changed, 32 insertions(+), 18 deletions(-)
> >>
> >> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> >> index 61a57d134544..5863c663ccab 100644
> >> --- a/drivers/net/virtio_net.c
> >> +++ b/drivers/net/virtio_net.c
> >> @@ -84,7 +84,9 @@ struct virtnet_stat_desc {
> >>
> >>  struct virtnet_sq_free_stats {
> >> u64 packets;
> >> +   u64 xdp_packets;
> >> u64 bytes;
> >> +   u64 xdp_bytes;
> >>  };
> >>
> >>  struct virtnet_sq_stats {
> >> @@ -506,29 +508,33 @@ static struct xdp_frame *ptr_to_xdp(void *ptr)
> >> return (struct xdp_frame *)((unsigned long)ptr & ~VIRTIO_XDP_FLAG);
> >>  }
> >>
> >> -static void __free_old_xmit(struct send_queue *sq, bool in_napi,
> >> +static void __free_old_xmit(struct send_queue *sq, struct netdev_queue 
> >> *txq,
> >> +   bool in_napi, bool use_napi,
> >> struct virtnet_sq_free_stats *stats)
> >>  {
> >> unsigned int len;
> >> void *ptr;
> >>
> >> while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
> >> -   ++stats->packets;
> >> -
> >> if (!is_xdp_frame(ptr)) {
> >> struct sk_buff *skb = ptr;
> >>
> >> pr_debug("Sent skb %p\n", skb);
> >>
> >> +   stats->packets++;
> >> stats->bytes += skb->len;
> >> napi_consume_skb(skb, in_napi);
> >> } else {
> >> struct xdp_frame *frame = ptr_to_xdp(ptr);
> >>
> >> -   stats->bytes += xdp_get_frame_len(frame);
> >> +   stats->xdp_packets++;
> >> +   stats->xdp_bytes += xdp_get_frame_len(frame);
> >> xdp_return_frame(frame);
> >> }
> >> }
> >> +   if (use_napi)
> >> +   netdev_tx_completed_queue(txq, stats->packets, 
> >> stats->bytes);
> >> +
> >>  }
> >
> >I wonder if this works correctly, for example NAPI could be enabled
> >after queued but before sent. So __netdev_tx_sent_queue() is not
> >called before.
>
> How is that possible? Napi weight can't change when link is up. Or am I
> missing something?

Something like this:

1) packet were queued
2) if down
3) enable NAPI
4) if up
5) packet were sent

?

Thanks

>
> >
> >Thanks
> >
>




Re: [PATCH net-next v2] virtio_net: add support for Byte Queue Limits

2024-06-17 Thread Jason Xing
On Mon, Jun 17, 2024 at 5:15 PM Jiri Pirko  wrote:
>
> Fri, Jun 14, 2024 at 11:54:04AM CEST, [email protected] wrote:
> >Hello Jiri,
> >
> >On Thu, Jun 13, 2024 at 1:08 AM Jiri Pirko  wrote:
> >>
> >> From: Jiri Pirko 
> >>
> >> Add support for Byte Queue Limits (BQL).
> >>
> >> Tested on qemu emulated virtio_net device with 1, 2 and 4 queues.
> >> Tested with fq_codel and pfifo_fast. Super netperf with 50 threads is
> >> running in background. Netperf TCP_RR results:
> >>
> >> NOBQL FQC 1q:  159.56  159.33  158.50  154.31agv: 157.925
> >> NOBQL FQC 2q:  184.64  184.96  174.73  174.15agv: 179.62
> >> NOBQL FQC 4q:  994.46  441.96  416.50  499.56agv: 588.12
> >> NOBQL PFF 1q:  148.68  148.92  145.95  149.48agv: 148.2575
> >> NOBQL PFF 2q:  171.86  171.20  170.42  169.42agv: 170.725
> >> NOBQL PFF 4q: 1505.23 1137.23 2488.70 3507.99agv: 2159.7875
> >>   BQL FQC 1q: 1332.80 1297.97 1351.41 1147.57agv: 1282.4375
> >>   BQL FQC 2q:  768.30  817.72  864.43  974.40agv: 856.2125
> >>   BQL FQC 4q:  945.66  942.68  878.51  822.82agv: 897.4175
> >>   BQL PFF 1q:  149.69  151.49  149.40  147.47agv: 149.5125
> >>   BQL PFF 2q: 2059.32  798.74 1844.12  381.80agv: 1270.995
> >>   BQL PFF 4q: 1871.98 4420.02 4916.59 13268.16   agv: 6119.1875
> >
> >I cannot get such a huge improvement when I was doing multiple tests
> >between two VMs. I'm pretty sure the BQL feature is working, but the
> >numbers look the same with/without BQL.
> >
> >VM 1 (client):
> >16 cpus, x86_64, 4 queues, the latest net-next kernel with/without
> >this patch, pfifo_fast, napi_tx=true, napi_weight=128
> >
> >VM 2 (server):
> >16 cpus, aarch64, 4 queues, the latest net-next kernel without this
> >patch, pfifo_fast
> >
> >What the 'ping' command shows to me between two VMs is : rtt
> >min/avg/max/mdev = 0.233/0.257/0.300/0.024 ms
> >
> >I started 50 netperfs to communicate the other side with the following 
> >command:
> >#!/bin/bash
> >
> >for i in $(seq 5000 5050);
> >do
> >netperf -p $i -H [ip addr] -l 60 -t TCP_RR -- -r 64,64 > /dev/null 2>&1 &
> >done
> >
> >The results are around 30423.62 txkB/s. If I remove '-r 64 64', they
> >are still the same/similar.
>
> You have to stress the line by parallel TCP_STREAM instances (50 in my
> case). For consistent results, use -p portnum,locport to specify the
> local port.

Thanks. Even though the results of TCP_RR mode vary sometimes, I can
see a big improvement in the total value of those results under such
circumstances.
With BQL, the throughput is 2159.17
Without BQL, it's 1099.33

Please feel free to add the tag:
Tested-by: Jason Xing 

Thanks,
Jason



Re: [PATCH net-next v2] virtio_net: add support for Byte Queue Limits

2024-06-17 Thread Michael S. Tsirkin
On Wed, Jun 12, 2024 at 07:08:51PM +0200, Jiri Pirko wrote:
> From: Jiri Pirko 
> 
> Add support for Byte Queue Limits (BQL).
> 
> Tested on qemu emulated virtio_net device with 1, 2 and 4 queues.
> Tested with fq_codel and pfifo_fast. Super netperf with 50 threads is
> running in background. Netperf TCP_RR results:
> 
> NOBQL FQC 1q:  159.56  159.33  158.50  154.31agv: 157.925
> NOBQL FQC 2q:  184.64  184.96  174.73  174.15agv: 179.62
> NOBQL FQC 4q:  994.46  441.96  416.50  499.56agv: 588.12
> NOBQL PFF 1q:  148.68  148.92  145.95  149.48agv: 148.2575
> NOBQL PFF 2q:  171.86  171.20  170.42  169.42agv: 170.725
> NOBQL PFF 4q: 1505.23 1137.23 2488.70 3507.99agv: 2159.7875
>   BQL FQC 1q: 1332.80 1297.97 1351.41 1147.57agv: 1282.4375
>   BQL FQC 2q:  768.30  817.72  864.43  974.40agv: 856.2125
>   BQL FQC 4q:  945.66  942.68  878.51  822.82agv: 897.4175
>   BQL PFF 1q:  149.69  151.49  149.40  147.47agv: 149.5125
>   BQL PFF 2q: 2059.32  798.74 1844.12  381.80agv: 1270.995
>   BQL PFF 4q: 1871.98 4420.02 4916.59 13268.16   agv: 6119.1875
> 
> Signed-off-by: Jiri Pirko 
> ---
> v1->v2:
> - moved netdev_tx_completed_queue() call into __free_old_xmit(),
>   propagate use_napi flag to __free_old_xmit() and only call
>   netdev_tx_completed_queue() in case it is true
> - added forgotten call to netdev_tx_reset_queue()
> - fixed stats for xdp packets
> - fixed bql accounting when __free_old_xmit() is called from xdp path
> - handle the !use_napi case in start_xmit() kick section
> ---
>  drivers/net/virtio_net.c | 50 +---
>  1 file changed, 32 insertions(+), 18 deletions(-)
> 
> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> index 61a57d134544..5863c663ccab 100644
> --- a/drivers/net/virtio_net.c
> +++ b/drivers/net/virtio_net.c
> @@ -84,7 +84,9 @@ struct virtnet_stat_desc {
>  
>  struct virtnet_sq_free_stats {
>   u64 packets;
> + u64 xdp_packets;
>   u64 bytes;
> + u64 xdp_bytes;
>  };
>  
>  struct virtnet_sq_stats {
> @@ -506,29 +508,33 @@ static struct xdp_frame *ptr_to_xdp(void *ptr)
>   return (struct xdp_frame *)((unsigned long)ptr & ~VIRTIO_XDP_FLAG);
>  }
>  
> -static void __free_old_xmit(struct send_queue *sq, bool in_napi,
> +static void __free_old_xmit(struct send_queue *sq, struct netdev_queue *txq,
> + bool in_napi, bool use_napi,
>   struct virtnet_sq_free_stats *stats)
>  {
>   unsigned int len;
>   void *ptr;
>  
>   while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
> - ++stats->packets;
> -
>   if (!is_xdp_frame(ptr)) {
>   struct sk_buff *skb = ptr;
>  
>   pr_debug("Sent skb %p\n", skb);
>  
> + stats->packets++;
>   stats->bytes += skb->len;
>   napi_consume_skb(skb, in_napi);
>   } else {
>   struct xdp_frame *frame = ptr_to_xdp(ptr);
>  
> - stats->bytes += xdp_get_frame_len(frame);
> + stats->xdp_packets++;
> + stats->xdp_bytes += xdp_get_frame_len(frame);
>   xdp_return_frame(frame);
>   }
>   }
> + if (use_napi)
> + netdev_tx_completed_queue(txq, stats->packets, stats->bytes);
> +
>  }
>  
>  /* Converting between virtqueue no. and kernel tx/rx queue no.
> @@ -955,21 +961,22 @@ static void virtnet_rq_unmap_free_buf(struct virtqueue 
> *vq, void *buf)
>   virtnet_rq_free_buf(vi, rq, buf);
>  }
>  
> -static void free_old_xmit(struct send_queue *sq, bool in_napi)
> +static void free_old_xmit(struct send_queue *sq, struct netdev_queue *txq,
> +   bool in_napi, bool use_napi)
>  {
>   struct virtnet_sq_free_stats stats = {0};
>  
> - __free_old_xmit(sq, in_napi, &stats);
> + __free_old_xmit(sq, txq, in_napi, use_napi, &stats);
>  
>   /* Avoid overhead when no packets have been processed
>* happens when called speculatively from start_xmit.
>*/
> - if (!stats.packets)
> + if (!stats.packets && !stats.xdp_packets)
>   return;
>  
>   u64_stats_update_begin(&sq->stats.syncp);
> - u64_stats_add(&sq->stats.bytes, stats.bytes);
> - u64_stats_add(&sq->stats.packets, stats.packets);
> + u64_stats_add(&sq->stats.bytes, stats.bytes + stats.xdp_bytes);
> + u64_stats_add(&sq->stats.packets, stats.packets + stats.xdp_packets);
>   u64_stats_update_end(&sq->stats.syncp);
>  }
>  
> @@ -1003,7 +1010,9 @@ static void check_sq_full_and_disable(struct 
> virtnet_info *vi,
>* early means 16 slots are typically wasted.
>*/
>   if (sq->vq->num_free < 2+MAX_SKB_FRAGS) {
> - netif_stop_subqueue(dev, qnum);
> + struct netdev_queue *txq = netdev_get_tx_queue(dev, qnum);
> +
> + netif_tx_stop_queue(txq);
>  

Re: [PATCH net-next v2] virtio_net: add support for Byte Queue Limits

2024-06-17 Thread Jiri Pirko
Mon, Jun 17, 2024 at 04:34:26AM CEST, [email protected] wrote:
>On Thu, Jun 13, 2024 at 1:09 AM Jiri Pirko  wrote:
>>
>> From: Jiri Pirko 
>>
>> Add support for Byte Queue Limits (BQL).
>>
>> Tested on qemu emulated virtio_net device with 1, 2 and 4 queues.
>> Tested with fq_codel and pfifo_fast. Super netperf with 50 threads is
>> running in background. Netperf TCP_RR results:
>>
>> NOBQL FQC 1q:  159.56  159.33  158.50  154.31agv: 157.925
>> NOBQL FQC 2q:  184.64  184.96  174.73  174.15agv: 179.62
>> NOBQL FQC 4q:  994.46  441.96  416.50  499.56agv: 588.12
>> NOBQL PFF 1q:  148.68  148.92  145.95  149.48agv: 148.2575
>> NOBQL PFF 2q:  171.86  171.20  170.42  169.42agv: 170.725
>> NOBQL PFF 4q: 1505.23 1137.23 2488.70 3507.99agv: 2159.7875
>>   BQL FQC 1q: 1332.80 1297.97 1351.41 1147.57agv: 1282.4375
>>   BQL FQC 2q:  768.30  817.72  864.43  974.40agv: 856.2125
>>   BQL FQC 4q:  945.66  942.68  878.51  822.82agv: 897.4175
>>   BQL PFF 1q:  149.69  151.49  149.40  147.47agv: 149.5125
>>   BQL PFF 2q: 2059.32  798.74 1844.12  381.80agv: 1270.995
>>   BQL PFF 4q: 1871.98 4420.02 4916.59 13268.16   agv: 6119.1875
>>
>> Signed-off-by: Jiri Pirko 
>> ---
>> v1->v2:
>> - moved netdev_tx_completed_queue() call into __free_old_xmit(),
>>   propagate use_napi flag to __free_old_xmit() and only call
>>   netdev_tx_completed_queue() in case it is true
>> - added forgotten call to netdev_tx_reset_queue()
>> - fixed stats for xdp packets
>> - fixed bql accounting when __free_old_xmit() is called from xdp path
>> - handle the !use_napi case in start_xmit() kick section
>> ---
>>  drivers/net/virtio_net.c | 50 +---
>>  1 file changed, 32 insertions(+), 18 deletions(-)
>>
>> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
>> index 61a57d134544..5863c663ccab 100644
>> --- a/drivers/net/virtio_net.c
>> +++ b/drivers/net/virtio_net.c
>> @@ -84,7 +84,9 @@ struct virtnet_stat_desc {
>>
>>  struct virtnet_sq_free_stats {
>> u64 packets;
>> +   u64 xdp_packets;
>> u64 bytes;
>> +   u64 xdp_bytes;
>>  };
>>
>>  struct virtnet_sq_stats {
>> @@ -506,29 +508,33 @@ static struct xdp_frame *ptr_to_xdp(void *ptr)
>> return (struct xdp_frame *)((unsigned long)ptr & ~VIRTIO_XDP_FLAG);
>>  }
>>
>> -static void __free_old_xmit(struct send_queue *sq, bool in_napi,
>> +static void __free_old_xmit(struct send_queue *sq, struct netdev_queue *txq,
>> +   bool in_napi, bool use_napi,
>> struct virtnet_sq_free_stats *stats)
>>  {
>> unsigned int len;
>> void *ptr;
>>
>> while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
>> -   ++stats->packets;
>> -
>> if (!is_xdp_frame(ptr)) {
>> struct sk_buff *skb = ptr;
>>
>> pr_debug("Sent skb %p\n", skb);
>>
>> +   stats->packets++;
>> stats->bytes += skb->len;
>> napi_consume_skb(skb, in_napi);
>> } else {
>> struct xdp_frame *frame = ptr_to_xdp(ptr);
>>
>> -   stats->bytes += xdp_get_frame_len(frame);
>> +   stats->xdp_packets++;
>> +   stats->xdp_bytes += xdp_get_frame_len(frame);
>> xdp_return_frame(frame);
>> }
>> }
>> +   if (use_napi)
>> +   netdev_tx_completed_queue(txq, stats->packets, stats->bytes);
>> +
>>  }
>
>I wonder if this works correctly, for example NAPI could be enabled
>after queued but before sent. So __netdev_tx_sent_queue() is not
>called before.

How is that possible? Napi weight can't change when link is up. Or am I
missing something?

>
>Thanks
>



Re: [PATCH net-next v2] virtio_net: add support for Byte Queue Limits

2024-06-17 Thread Jiri Pirko
Fri, Jun 14, 2024 at 11:54:04AM CEST, [email protected] wrote:
>Hello Jiri,
>
>On Thu, Jun 13, 2024 at 1:08 AM Jiri Pirko  wrote:
>>
>> From: Jiri Pirko 
>>
>> Add support for Byte Queue Limits (BQL).
>>
>> Tested on qemu emulated virtio_net device with 1, 2 and 4 queues.
>> Tested with fq_codel and pfifo_fast. Super netperf with 50 threads is
>> running in background. Netperf TCP_RR results:
>>
>> NOBQL FQC 1q:  159.56  159.33  158.50  154.31agv: 157.925
>> NOBQL FQC 2q:  184.64  184.96  174.73  174.15agv: 179.62
>> NOBQL FQC 4q:  994.46  441.96  416.50  499.56agv: 588.12
>> NOBQL PFF 1q:  148.68  148.92  145.95  149.48agv: 148.2575
>> NOBQL PFF 2q:  171.86  171.20  170.42  169.42agv: 170.725
>> NOBQL PFF 4q: 1505.23 1137.23 2488.70 3507.99agv: 2159.7875
>>   BQL FQC 1q: 1332.80 1297.97 1351.41 1147.57agv: 1282.4375
>>   BQL FQC 2q:  768.30  817.72  864.43  974.40agv: 856.2125
>>   BQL FQC 4q:  945.66  942.68  878.51  822.82agv: 897.4175
>>   BQL PFF 1q:  149.69  151.49  149.40  147.47agv: 149.5125
>>   BQL PFF 2q: 2059.32  798.74 1844.12  381.80agv: 1270.995
>>   BQL PFF 4q: 1871.98 4420.02 4916.59 13268.16   agv: 6119.1875
>
>I cannot get such a huge improvement when I was doing multiple tests
>between two VMs. I'm pretty sure the BQL feature is working, but the
>numbers look the same with/without BQL.
>
>VM 1 (client):
>16 cpus, x86_64, 4 queues, the latest net-next kernel with/without
>this patch, pfifo_fast, napi_tx=true, napi_weight=128
>
>VM 2 (server):
>16 cpus, aarch64, 4 queues, the latest net-next kernel without this
>patch, pfifo_fast
>
>What the 'ping' command shows to me between two VMs is : rtt
>min/avg/max/mdev = 0.233/0.257/0.300/0.024 ms
>
>I started 50 netperfs to communicate the other side with the following command:
>#!/bin/bash
>
>for i in $(seq 5000 5050);
>do
>netperf -p $i -H [ip addr] -l 60 -t TCP_RR -- -r 64,64 > /dev/null 2>&1 &
>done
>
>The results are around 30423.62 txkB/s. If I remove '-r 64 64', they
>are still the same/similar.

You have to stress the line by parallel TCP_STREAM instances (50 in my
case). For consistent results, use -p portnum,locport to specify the
local port.

Then run TCP_RR, also use -p portnum,locport to specify the local port.

Also, double check the CONFIG_BQL=y is set.


>
>Am I missing something?
>
>Thanks,
>Jason



Re: [PATCH net-next v2] virtio_net: add support for Byte Queue Limits

2024-06-16 Thread Jason Wang
On Thu, Jun 13, 2024 at 1:09 AM Jiri Pirko  wrote:
>
> From: Jiri Pirko 
>
> Add support for Byte Queue Limits (BQL).
>
> Tested on qemu emulated virtio_net device with 1, 2 and 4 queues.
> Tested with fq_codel and pfifo_fast. Super netperf with 50 threads is
> running in background. Netperf TCP_RR results:
>
> NOBQL FQC 1q:  159.56  159.33  158.50  154.31agv: 157.925
> NOBQL FQC 2q:  184.64  184.96  174.73  174.15agv: 179.62
> NOBQL FQC 4q:  994.46  441.96  416.50  499.56agv: 588.12
> NOBQL PFF 1q:  148.68  148.92  145.95  149.48agv: 148.2575
> NOBQL PFF 2q:  171.86  171.20  170.42  169.42agv: 170.725
> NOBQL PFF 4q: 1505.23 1137.23 2488.70 3507.99agv: 2159.7875
>   BQL FQC 1q: 1332.80 1297.97 1351.41 1147.57agv: 1282.4375
>   BQL FQC 2q:  768.30  817.72  864.43  974.40agv: 856.2125
>   BQL FQC 4q:  945.66  942.68  878.51  822.82agv: 897.4175
>   BQL PFF 1q:  149.69  151.49  149.40  147.47agv: 149.5125
>   BQL PFF 2q: 2059.32  798.74 1844.12  381.80agv: 1270.995
>   BQL PFF 4q: 1871.98 4420.02 4916.59 13268.16   agv: 6119.1875
>
> Signed-off-by: Jiri Pirko 
> ---
> v1->v2:
> - moved netdev_tx_completed_queue() call into __free_old_xmit(),
>   propagate use_napi flag to __free_old_xmit() and only call
>   netdev_tx_completed_queue() in case it is true
> - added forgotten call to netdev_tx_reset_queue()
> - fixed stats for xdp packets
> - fixed bql accounting when __free_old_xmit() is called from xdp path
> - handle the !use_napi case in start_xmit() kick section
> ---
>  drivers/net/virtio_net.c | 50 +---
>  1 file changed, 32 insertions(+), 18 deletions(-)
>
> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> index 61a57d134544..5863c663ccab 100644
> --- a/drivers/net/virtio_net.c
> +++ b/drivers/net/virtio_net.c
> @@ -84,7 +84,9 @@ struct virtnet_stat_desc {
>
>  struct virtnet_sq_free_stats {
> u64 packets;
> +   u64 xdp_packets;
> u64 bytes;
> +   u64 xdp_bytes;
>  };
>
>  struct virtnet_sq_stats {
> @@ -506,29 +508,33 @@ static struct xdp_frame *ptr_to_xdp(void *ptr)
> return (struct xdp_frame *)((unsigned long)ptr & ~VIRTIO_XDP_FLAG);
>  }
>
> -static void __free_old_xmit(struct send_queue *sq, bool in_napi,
> +static void __free_old_xmit(struct send_queue *sq, struct netdev_queue *txq,
> +   bool in_napi, bool use_napi,
> struct virtnet_sq_free_stats *stats)
>  {
> unsigned int len;
> void *ptr;
>
> while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
> -   ++stats->packets;
> -
> if (!is_xdp_frame(ptr)) {
> struct sk_buff *skb = ptr;
>
> pr_debug("Sent skb %p\n", skb);
>
> +   stats->packets++;
> stats->bytes += skb->len;
> napi_consume_skb(skb, in_napi);
> } else {
> struct xdp_frame *frame = ptr_to_xdp(ptr);
>
> -   stats->bytes += xdp_get_frame_len(frame);
> +   stats->xdp_packets++;
> +   stats->xdp_bytes += xdp_get_frame_len(frame);
> xdp_return_frame(frame);
> }
> }
> +   if (use_napi)
> +   netdev_tx_completed_queue(txq, stats->packets, stats->bytes);
> +
>  }

I wonder if this works correctly, for example NAPI could be enabled
after queued but before sent. So __netdev_tx_sent_queue() is not
called before.

Thanks




Re: [PATCH net-next v2] virtio_net: add support for Byte Queue Limits

2024-06-14 Thread Jason Xing
Hello Jiri,

On Thu, Jun 13, 2024 at 1:08 AM Jiri Pirko  wrote:
>
> From: Jiri Pirko 
>
> Add support for Byte Queue Limits (BQL).
>
> Tested on qemu emulated virtio_net device with 1, 2 and 4 queues.
> Tested with fq_codel and pfifo_fast. Super netperf with 50 threads is
> running in background. Netperf TCP_RR results:
>
> NOBQL FQC 1q:  159.56  159.33  158.50  154.31agv: 157.925
> NOBQL FQC 2q:  184.64  184.96  174.73  174.15agv: 179.62
> NOBQL FQC 4q:  994.46  441.96  416.50  499.56agv: 588.12
> NOBQL PFF 1q:  148.68  148.92  145.95  149.48agv: 148.2575
> NOBQL PFF 2q:  171.86  171.20  170.42  169.42agv: 170.725
> NOBQL PFF 4q: 1505.23 1137.23 2488.70 3507.99agv: 2159.7875
>   BQL FQC 1q: 1332.80 1297.97 1351.41 1147.57agv: 1282.4375
>   BQL FQC 2q:  768.30  817.72  864.43  974.40agv: 856.2125
>   BQL FQC 4q:  945.66  942.68  878.51  822.82agv: 897.4175
>   BQL PFF 1q:  149.69  151.49  149.40  147.47agv: 149.5125
>   BQL PFF 2q: 2059.32  798.74 1844.12  381.80agv: 1270.995
>   BQL PFF 4q: 1871.98 4420.02 4916.59 13268.16   agv: 6119.1875

I cannot get such a huge improvement when I was doing multiple tests
between two VMs. I'm pretty sure the BQL feature is working, but the
numbers look the same with/without BQL.

VM 1 (client):
16 cpus, x86_64, 4 queues, the latest net-next kernel with/without
this patch, pfifo_fast, napi_tx=true, napi_weight=128

VM 2 (server):
16 cpus, aarch64, 4 queues, the latest net-next kernel without this
patch, pfifo_fast

What the 'ping' command shows to me between two VMs is : rtt
min/avg/max/mdev = 0.233/0.257/0.300/0.024 ms

I started 50 netperfs to communicate the other side with the following command:
#!/bin/bash

for i in $(seq 5000 5050);
do
netperf -p $i -H [ip addr] -l 60 -t TCP_RR -- -r 64,64 > /dev/null 2>&1 &
done

The results are around 30423.62 txkB/s. If I remove '-r 64 64', they
are still the same/similar.

Am I missing something?

Thanks,
Jason



Re: [PATCH net-next v2] virtio_net: add support for Byte Queue Limits

2024-06-12 Thread Michael S. Tsirkin
On Wed, Jun 12, 2024 at 07:08:51PM +0200, Jiri Pirko wrote:
> From: Jiri Pirko 
> 
> Add support for Byte Queue Limits (BQL).
> 
> Tested on qemu emulated virtio_net device with 1, 2 and 4 queues.
> Tested with fq_codel and pfifo_fast. Super netperf with 50 threads is
> running in background. Netperf TCP_RR results:
> 
> NOBQL FQC 1q:  159.56  159.33  158.50  154.31agv: 157.925
> NOBQL FQC 2q:  184.64  184.96  174.73  174.15agv: 179.62
> NOBQL FQC 4q:  994.46  441.96  416.50  499.56agv: 588.12
> NOBQL PFF 1q:  148.68  148.92  145.95  149.48agv: 148.2575
> NOBQL PFF 2q:  171.86  171.20  170.42  169.42agv: 170.725
> NOBQL PFF 4q: 1505.23 1137.23 2488.70 3507.99agv: 2159.7875
>   BQL FQC 1q: 1332.80 1297.97 1351.41 1147.57agv: 1282.4375
>   BQL FQC 2q:  768.30  817.72  864.43  974.40agv: 856.2125
>   BQL FQC 4q:  945.66  942.68  878.51  822.82agv: 897.4175
>   BQL PFF 1q:  149.69  151.49  149.40  147.47agv: 149.5125
>   BQL PFF 2q: 2059.32  798.74 1844.12  381.80agv: 1270.995
>   BQL PFF 4q: 1871.98 4420.02 4916.59 13268.16   agv: 6119.1875
> 
> Signed-off-by: Jiri Pirko 

I see you now support both napi and non-napi. Thanks a lot Jiri!
Just coming out of a national holiday here, as usual with a backlog - pls allow
until Monday to review. Thanks!

> ---
> v1->v2:
> - moved netdev_tx_completed_queue() call into __free_old_xmit(),
>   propagate use_napi flag to __free_old_xmit() and only call
>   netdev_tx_completed_queue() in case it is true
> - added forgotten call to netdev_tx_reset_queue()
> - fixed stats for xdp packets
> - fixed bql accounting when __free_old_xmit() is called from xdp path
> - handle the !use_napi case in start_xmit() kick section
> ---
>  drivers/net/virtio_net.c | 50 +---
>  1 file changed, 32 insertions(+), 18 deletions(-)
> 
> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> index 61a57d134544..5863c663ccab 100644
> --- a/drivers/net/virtio_net.c
> +++ b/drivers/net/virtio_net.c
> @@ -84,7 +84,9 @@ struct virtnet_stat_desc {
>  
>  struct virtnet_sq_free_stats {
>   u64 packets;
> + u64 xdp_packets;
>   u64 bytes;
> + u64 xdp_bytes;
>  };
>  
>  struct virtnet_sq_stats {
> @@ -506,29 +508,33 @@ static struct xdp_frame *ptr_to_xdp(void *ptr)
>   return (struct xdp_frame *)((unsigned long)ptr & ~VIRTIO_XDP_FLAG);
>  }
>  
> -static void __free_old_xmit(struct send_queue *sq, bool in_napi,
> +static void __free_old_xmit(struct send_queue *sq, struct netdev_queue *txq,
> + bool in_napi, bool use_napi,
>   struct virtnet_sq_free_stats *stats)
>  {
>   unsigned int len;
>   void *ptr;
>  
>   while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
> - ++stats->packets;
> -
>   if (!is_xdp_frame(ptr)) {
>   struct sk_buff *skb = ptr;
>  
>   pr_debug("Sent skb %p\n", skb);
>  
> + stats->packets++;
>   stats->bytes += skb->len;
>   napi_consume_skb(skb, in_napi);
>   } else {
>   struct xdp_frame *frame = ptr_to_xdp(ptr);
>  
> - stats->bytes += xdp_get_frame_len(frame);
> + stats->xdp_packets++;
> + stats->xdp_bytes += xdp_get_frame_len(frame);
>   xdp_return_frame(frame);
>   }
>   }
> + if (use_napi)
> + netdev_tx_completed_queue(txq, stats->packets, stats->bytes);
> +
>  }
>  
>  /* Converting between virtqueue no. and kernel tx/rx queue no.
> @@ -955,21 +961,22 @@ static void virtnet_rq_unmap_free_buf(struct virtqueue 
> *vq, void *buf)
>   virtnet_rq_free_buf(vi, rq, buf);
>  }
>  
> -static void free_old_xmit(struct send_queue *sq, bool in_napi)
> +static void free_old_xmit(struct send_queue *sq, struct netdev_queue *txq,
> +   bool in_napi, bool use_napi)
>  {
>   struct virtnet_sq_free_stats stats = {0};
>  
> - __free_old_xmit(sq, in_napi, &stats);
> + __free_old_xmit(sq, txq, in_napi, use_napi, &stats);
>  
>   /* Avoid overhead when no packets have been processed
>* happens when called speculatively from start_xmit.
>*/
> - if (!stats.packets)
> + if (!stats.packets && !stats.xdp_packets)
>   return;
>  
>   u64_stats_update_begin(&sq->stats.syncp);
> - u64_stats_add(&sq->stats.bytes, stats.bytes);
> - u64_stats_add(&sq->stats.packets, stats.packets);
> + u64_stats_add(&sq->stats.bytes, stats.bytes + stats.xdp_bytes);
> + u64_stats_add(&sq->stats.packets, stats.packets + stats.xdp_packets);
>   u64_stats_update_end(&sq->stats.syncp);
>  }
>  
> @@ -1003,7 +1010,9 @@ static void check_sq_full_and_disable(struct 
> virtnet_info *vi,
>* early means 16 slots are typically wasted.
>*/
>   if (sq->vq->num_free < 2+MAX_SKB_FRAGS)