Re: [PATCH net-next v3 4/4] virtio_net: improve dim command request efficiency

2024-06-18 Thread Heng Qi
On Tue, 18 Jun 2024 09:29:48 +0800, Jason Wang  wrote:
> On Mon, Jun 17, 2024 at 4:08 PM Heng Qi  wrote:
> >
> > On Mon, 17 Jun 2024 12:05:30 +0800, Jason Wang  wrote:
> > > On Thu, Jun 6, 2024 at 2:15 PM Heng Qi  wrote:
> > > >
> > > > Currently, control vq handles commands synchronously,
> > > > leading to increased delays for dim commands during multi-queue
> > > > VM configuration and directly impacting dim performance.
> > > >
> > > > To address this, we are shifting to asynchronous processing of
> > > > ctrlq's dim commands.
> > > >
> > > > Signed-off-by: Heng Qi 
> > > > ---
> > > >  drivers/net/virtio_net.c | 233 ++-
> > > >  1 file changed, 208 insertions(+), 25 deletions(-)
> > > >

Hi Jason,

I will incorporate your feedback and update the next version.

Thanks

> > > > diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> > > > index e59e12bb7601..0338528993ab 100644
> > > > --- a/drivers/net/virtio_net.c
> > > > +++ b/drivers/net/virtio_net.c
> > > > @@ -376,6 +376,13 @@ struct control_buf {
> > > > struct completion completion;
> > > >  };
> > > >
> > > > +struct virtnet_coal_node {
> > > > +   struct control_buf ctrl;
> > > > +   struct virtio_net_ctrl_coal_vq coal_vqs;
> > > > +   bool is_coal_wait;
> > > > +   struct list_head list;
> > > > +};
> > > > +
> > > >  struct virtnet_info {
> > > > struct virtio_device *vdev;
> > > > struct virtqueue *cvq;
> > > > @@ -420,6 +427,9 @@ struct virtnet_info {
> > > > /* Lock to protect the control VQ */
> > > > struct mutex cvq_lock;
> > > >
> > > > +   /* Work struct for acquisition of cvq processing results. */
> > > > +   struct work_struct get_cvq;
> > > > +
> > > > /* Host can handle any s/g split between our header and packet 
> > > > data */
> > > > bool any_header_sg;
> > > >
> > > > @@ -464,6 +474,14 @@ struct virtnet_info {
> > > > struct virtnet_interrupt_coalesce intr_coal_tx;
> > > > struct virtnet_interrupt_coalesce intr_coal_rx;
> > > >
> > > > +   /* Free nodes used for concurrent delivery */
> > > > +   struct mutex coal_free_lock;
> > > > +   struct list_head coal_free_list;
> > > > +
> > > > +   /* Filled when there are no free nodes or cvq buffers */
> > > > +   struct mutex coal_wait_lock;
> > > > +   struct list_head coal_wait_list;
> > > > +
> > > > unsigned long guest_offloads;
> > > > unsigned long guest_offloads_capable;
> > > >
> > > > @@ -670,7 +688,7 @@ static void virtnet_cvq_done(struct virtqueue *cvq)
> > > >  {
> > > > struct virtnet_info *vi = cvq->vdev->priv;
> > > >
> > > > -   complete(&vi->ctrl->completion);
> > > > +   schedule_work(&vi->get_cvq);
> > > >  }
> > > >
> > > >  static void skb_xmit_done(struct virtqueue *vq)
> > > > @@ -2696,7 +2714,7 @@ static bool virtnet_send_command_reply(struct 
> > > > virtnet_info *vi,
> > > >struct scatterlist *in)
> > > >  {
> > > > struct scatterlist *sgs[5], hdr, stat;
> > > > -   u32 out_num = 0, tmp, in_num = 0;
> > > > +   u32 out_num = 0, in_num = 0;
> > > > int ret;
> > > >
> > > > /* Caller should know better */
> > > > @@ -2730,14 +2748,14 @@ static bool virtnet_send_command_reply(struct 
> > > > virtnet_info *vi,
> > > > return false;
> > > > }
> > > >
> > > > -   if (unlikely(!virtqueue_kick(vi->cvq)))
> > > > -   goto unlock;
> > > > +   if (unlikely(!virtqueue_kick(vi->cvq))) {
> > > > +   mutex_unlock(&vi->cvq_lock);
> > > > +   return false;
> > > > +   }
> > > > +   mutex_unlock(&vi->cvq_lock);
> > > >
> > > > -   wait_for_completion(&vi->ctrl->completion);
> > > > -   virtqueue_get_buf(vi->cvq, &tmp);
> > > > +   wait_for_completion(&ctrl->completion);
> > > >
> > > > -unlock:
> > > > -   mutex_unlock(&vi->cvq_lock);
> > > > return ctrl->status == VIRTIO_NET_OK;
> > > >  }
> > > >
> > > > @@ -2747,6 +2765,86 @@ static bool virtnet_send_command(struct 
> > > > virtnet_info *vi, u8 class, u8 cmd,
> > > > return virtnet_send_command_reply(vi, class, cmd, vi->ctrl, 
> > > > out, NULL);
> > > >  }
> > > >
> > > > +static void virtnet_process_dim_cmd(struct virtnet_info *vi,
> > > > +   struct virtnet_coal_node *node)
> > > > +{
> > > > +   u16 qnum = le16_to_cpu(node->coal_vqs.vqn) / 2;
> > > > +
> > > > +   mutex_lock(&vi->rq[qnum].dim_lock);
> > > > +   vi->rq[qnum].intr_coal.max_usecs =
> > > > +   le32_to_cpu(node->coal_vqs.coal.max_usecs);
> > > > +   vi->rq[qnum].intr_coal.max_packets =
> > > > +   le32_to_cpu(node->coal_vqs.coal.max_packets);
> > > > +   vi->rq[qnum].dim.state = DIM_START_MEASURE;
> > > > +   mutex_unlock(&vi->rq[qnum].dim_lock);
> > > > +
> > > > +   if (node->is_coal_wait) {
> >

Re: [PATCH net-next v3 4/4] virtio_net: improve dim command request efficiency

2024-06-17 Thread Jason Wang
On Mon, Jun 17, 2024 at 4:08 PM Heng Qi  wrote:
>
> On Mon, 17 Jun 2024 12:05:30 +0800, Jason Wang  wrote:
> > On Thu, Jun 6, 2024 at 2:15 PM Heng Qi  wrote:
> > >
> > > Currently, control vq handles commands synchronously,
> > > leading to increased delays for dim commands during multi-queue
> > > VM configuration and directly impacting dim performance.
> > >
> > > To address this, we are shifting to asynchronous processing of
> > > ctrlq's dim commands.
> > >
> > > Signed-off-by: Heng Qi 
> > > ---
> > >  drivers/net/virtio_net.c | 233 ++-
> > >  1 file changed, 208 insertions(+), 25 deletions(-)
> > >
> > > diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> > > index e59e12bb7601..0338528993ab 100644
> > > --- a/drivers/net/virtio_net.c
> > > +++ b/drivers/net/virtio_net.c
> > > @@ -376,6 +376,13 @@ struct control_buf {
> > > struct completion completion;
> > >  };
> > >
> > > +struct virtnet_coal_node {
> > > +   struct control_buf ctrl;
> > > +   struct virtio_net_ctrl_coal_vq coal_vqs;
> > > +   bool is_coal_wait;
> > > +   struct list_head list;
> > > +};
> > > +
> > >  struct virtnet_info {
> > > struct virtio_device *vdev;
> > > struct virtqueue *cvq;
> > > @@ -420,6 +427,9 @@ struct virtnet_info {
> > > /* Lock to protect the control VQ */
> > > struct mutex cvq_lock;
> > >
> > > +   /* Work struct for acquisition of cvq processing results. */
> > > +   struct work_struct get_cvq;
> > > +
> > > /* Host can handle any s/g split between our header and packet 
> > > data */
> > > bool any_header_sg;
> > >
> > > @@ -464,6 +474,14 @@ struct virtnet_info {
> > > struct virtnet_interrupt_coalesce intr_coal_tx;
> > > struct virtnet_interrupt_coalesce intr_coal_rx;
> > >
> > > +   /* Free nodes used for concurrent delivery */
> > > +   struct mutex coal_free_lock;
> > > +   struct list_head coal_free_list;
> > > +
> > > +   /* Filled when there are no free nodes or cvq buffers */
> > > +   struct mutex coal_wait_lock;
> > > +   struct list_head coal_wait_list;
> > > +
> > > unsigned long guest_offloads;
> > > unsigned long guest_offloads_capable;
> > >
> > > @@ -670,7 +688,7 @@ static void virtnet_cvq_done(struct virtqueue *cvq)
> > >  {
> > > struct virtnet_info *vi = cvq->vdev->priv;
> > >
> > > -   complete(&vi->ctrl->completion);
> > > +   schedule_work(&vi->get_cvq);
> > >  }
> > >
> > >  static void skb_xmit_done(struct virtqueue *vq)
> > > @@ -2696,7 +2714,7 @@ static bool virtnet_send_command_reply(struct 
> > > virtnet_info *vi,
> > >struct scatterlist *in)
> > >  {
> > > struct scatterlist *sgs[5], hdr, stat;
> > > -   u32 out_num = 0, tmp, in_num = 0;
> > > +   u32 out_num = 0, in_num = 0;
> > > int ret;
> > >
> > > /* Caller should know better */
> > > @@ -2730,14 +2748,14 @@ static bool virtnet_send_command_reply(struct 
> > > virtnet_info *vi,
> > > return false;
> > > }
> > >
> > > -   if (unlikely(!virtqueue_kick(vi->cvq)))
> > > -   goto unlock;
> > > +   if (unlikely(!virtqueue_kick(vi->cvq))) {
> > > +   mutex_unlock(&vi->cvq_lock);
> > > +   return false;
> > > +   }
> > > +   mutex_unlock(&vi->cvq_lock);
> > >
> > > -   wait_for_completion(&vi->ctrl->completion);
> > > -   virtqueue_get_buf(vi->cvq, &tmp);
> > > +   wait_for_completion(&ctrl->completion);
> > >
> > > -unlock:
> > > -   mutex_unlock(&vi->cvq_lock);
> > > return ctrl->status == VIRTIO_NET_OK;
> > >  }
> > >
> > > @@ -2747,6 +2765,86 @@ static bool virtnet_send_command(struct 
> > > virtnet_info *vi, u8 class, u8 cmd,
> > > return virtnet_send_command_reply(vi, class, cmd, vi->ctrl, out, 
> > > NULL);
> > >  }
> > >
> > > +static void virtnet_process_dim_cmd(struct virtnet_info *vi,
> > > +   struct virtnet_coal_node *node)
> > > +{
> > > +   u16 qnum = le16_to_cpu(node->coal_vqs.vqn) / 2;
> > > +
> > > +   mutex_lock(&vi->rq[qnum].dim_lock);
> > > +   vi->rq[qnum].intr_coal.max_usecs =
> > > +   le32_to_cpu(node->coal_vqs.coal.max_usecs);
> > > +   vi->rq[qnum].intr_coal.max_packets =
> > > +   le32_to_cpu(node->coal_vqs.coal.max_packets);
> > > +   vi->rq[qnum].dim.state = DIM_START_MEASURE;
> > > +   mutex_unlock(&vi->rq[qnum].dim_lock);
> > > +
> > > +   if (node->is_coal_wait) {
> > > +   mutex_lock(&vi->coal_wait_lock);
> > > +   list_del(&node->list);
> > > +   mutex_unlock(&vi->coal_wait_lock);
> > > +   kfree(node);
> > > +   } else {
> > > +   mutex_lock(&vi->coal_free_lock);
> > > +   list_add(&node->list, &vi->coal_free_list);
> > > +   mutex_unlock(&vi->coal_free

Re: [PATCH net-next v3 4/4] virtio_net: improve dim command request efficiency

2024-06-17 Thread Heng Qi
On Mon, 17 Jun 2024 12:05:30 +0800, Jason Wang  wrote:
> On Thu, Jun 6, 2024 at 2:15 PM Heng Qi  wrote:
> >
> > Currently, control vq handles commands synchronously,
> > leading to increased delays for dim commands during multi-queue
> > VM configuration and directly impacting dim performance.
> >
> > To address this, we are shifting to asynchronous processing of
> > ctrlq's dim commands.
> >
> > Signed-off-by: Heng Qi 
> > ---
> >  drivers/net/virtio_net.c | 233 ++-
> >  1 file changed, 208 insertions(+), 25 deletions(-)
> >
> > diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> > index e59e12bb7601..0338528993ab 100644
> > --- a/drivers/net/virtio_net.c
> > +++ b/drivers/net/virtio_net.c
> > @@ -376,6 +376,13 @@ struct control_buf {
> > struct completion completion;
> >  };
> >
> > +struct virtnet_coal_node {
> > +   struct control_buf ctrl;
> > +   struct virtio_net_ctrl_coal_vq coal_vqs;
> > +   bool is_coal_wait;
> > +   struct list_head list;
> > +};
> > +
> >  struct virtnet_info {
> > struct virtio_device *vdev;
> > struct virtqueue *cvq;
> > @@ -420,6 +427,9 @@ struct virtnet_info {
> > /* Lock to protect the control VQ */
> > struct mutex cvq_lock;
> >
> > +   /* Work struct for acquisition of cvq processing results. */
> > +   struct work_struct get_cvq;
> > +
> > /* Host can handle any s/g split between our header and packet data 
> > */
> > bool any_header_sg;
> >
> > @@ -464,6 +474,14 @@ struct virtnet_info {
> > struct virtnet_interrupt_coalesce intr_coal_tx;
> > struct virtnet_interrupt_coalesce intr_coal_rx;
> >
> > +   /* Free nodes used for concurrent delivery */
> > +   struct mutex coal_free_lock;
> > +   struct list_head coal_free_list;
> > +
> > +   /* Filled when there are no free nodes or cvq buffers */
> > +   struct mutex coal_wait_lock;
> > +   struct list_head coal_wait_list;
> > +
> > unsigned long guest_offloads;
> > unsigned long guest_offloads_capable;
> >
> > @@ -670,7 +688,7 @@ static void virtnet_cvq_done(struct virtqueue *cvq)
> >  {
> > struct virtnet_info *vi = cvq->vdev->priv;
> >
> > -   complete(&vi->ctrl->completion);
> > +   schedule_work(&vi->get_cvq);
> >  }
> >
> >  static void skb_xmit_done(struct virtqueue *vq)
> > @@ -2696,7 +2714,7 @@ static bool virtnet_send_command_reply(struct 
> > virtnet_info *vi,
> >struct scatterlist *in)
> >  {
> > struct scatterlist *sgs[5], hdr, stat;
> > -   u32 out_num = 0, tmp, in_num = 0;
> > +   u32 out_num = 0, in_num = 0;
> > int ret;
> >
> > /* Caller should know better */
> > @@ -2730,14 +2748,14 @@ static bool virtnet_send_command_reply(struct 
> > virtnet_info *vi,
> > return false;
> > }
> >
> > -   if (unlikely(!virtqueue_kick(vi->cvq)))
> > -   goto unlock;
> > +   if (unlikely(!virtqueue_kick(vi->cvq))) {
> > +   mutex_unlock(&vi->cvq_lock);
> > +   return false;
> > +   }
> > +   mutex_unlock(&vi->cvq_lock);
> >
> > -   wait_for_completion(&vi->ctrl->completion);
> > -   virtqueue_get_buf(vi->cvq, &tmp);
> > +   wait_for_completion(&ctrl->completion);
> >
> > -unlock:
> > -   mutex_unlock(&vi->cvq_lock);
> > return ctrl->status == VIRTIO_NET_OK;
> >  }
> >
> > @@ -2747,6 +2765,86 @@ static bool virtnet_send_command(struct virtnet_info 
> > *vi, u8 class, u8 cmd,
> > return virtnet_send_command_reply(vi, class, cmd, vi->ctrl, out, 
> > NULL);
> >  }
> >
> > +static void virtnet_process_dim_cmd(struct virtnet_info *vi,
> > +   struct virtnet_coal_node *node)
> > +{
> > +   u16 qnum = le16_to_cpu(node->coal_vqs.vqn) / 2;
> > +
> > +   mutex_lock(&vi->rq[qnum].dim_lock);
> > +   vi->rq[qnum].intr_coal.max_usecs =
> > +   le32_to_cpu(node->coal_vqs.coal.max_usecs);
> > +   vi->rq[qnum].intr_coal.max_packets =
> > +   le32_to_cpu(node->coal_vqs.coal.max_packets);
> > +   vi->rq[qnum].dim.state = DIM_START_MEASURE;
> > +   mutex_unlock(&vi->rq[qnum].dim_lock);
> > +
> > +   if (node->is_coal_wait) {
> > +   mutex_lock(&vi->coal_wait_lock);
> > +   list_del(&node->list);
> > +   mutex_unlock(&vi->coal_wait_lock);
> > +   kfree(node);
> > +   } else {
> > +   mutex_lock(&vi->coal_free_lock);
> > +   list_add(&node->list, &vi->coal_free_list);
> > +   mutex_unlock(&vi->coal_free_lock);
> > +   }
> > +}
> > +
> > +static int virtnet_add_dim_command(struct virtnet_info *vi,
> > +  struct virtnet_coal_node *coal_node)
> > +{
> > +   struct scatterlist sg;
> > +   int ret;
> > +
> > +   sg_init_one(&sg, &coal_node->coal_vqs, sizeof(coa

Re: [PATCH net-next v3 4/4] virtio_net: improve dim command request efficiency

2024-06-16 Thread Jason Wang
On Thu, Jun 6, 2024 at 2:15 PM Heng Qi  wrote:
>
> Currently, control vq handles commands synchronously,
> leading to increased delays for dim commands during multi-queue
> VM configuration and directly impacting dim performance.
>
> To address this, we are shifting to asynchronous processing of
> ctrlq's dim commands.
>
> Signed-off-by: Heng Qi 
> ---
>  drivers/net/virtio_net.c | 233 ++-
>  1 file changed, 208 insertions(+), 25 deletions(-)
>
> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> index e59e12bb7601..0338528993ab 100644
> --- a/drivers/net/virtio_net.c
> +++ b/drivers/net/virtio_net.c
> @@ -376,6 +376,13 @@ struct control_buf {
> struct completion completion;
>  };
>
> +struct virtnet_coal_node {
> +   struct control_buf ctrl;
> +   struct virtio_net_ctrl_coal_vq coal_vqs;
> +   bool is_coal_wait;
> +   struct list_head list;
> +};
> +
>  struct virtnet_info {
> struct virtio_device *vdev;
> struct virtqueue *cvq;
> @@ -420,6 +427,9 @@ struct virtnet_info {
> /* Lock to protect the control VQ */
> struct mutex cvq_lock;
>
> +   /* Work struct for acquisition of cvq processing results. */
> +   struct work_struct get_cvq;
> +
> /* Host can handle any s/g split between our header and packet data */
> bool any_header_sg;
>
> @@ -464,6 +474,14 @@ struct virtnet_info {
> struct virtnet_interrupt_coalesce intr_coal_tx;
> struct virtnet_interrupt_coalesce intr_coal_rx;
>
> +   /* Free nodes used for concurrent delivery */
> +   struct mutex coal_free_lock;
> +   struct list_head coal_free_list;
> +
> +   /* Filled when there are no free nodes or cvq buffers */
> +   struct mutex coal_wait_lock;
> +   struct list_head coal_wait_list;
> +
> unsigned long guest_offloads;
> unsigned long guest_offloads_capable;
>
> @@ -670,7 +688,7 @@ static void virtnet_cvq_done(struct virtqueue *cvq)
>  {
> struct virtnet_info *vi = cvq->vdev->priv;
>
> -   complete(&vi->ctrl->completion);
> +   schedule_work(&vi->get_cvq);
>  }
>
>  static void skb_xmit_done(struct virtqueue *vq)
> @@ -2696,7 +2714,7 @@ static bool virtnet_send_command_reply(struct 
> virtnet_info *vi,
>struct scatterlist *in)
>  {
> struct scatterlist *sgs[5], hdr, stat;
> -   u32 out_num = 0, tmp, in_num = 0;
> +   u32 out_num = 0, in_num = 0;
> int ret;
>
> /* Caller should know better */
> @@ -2730,14 +2748,14 @@ static bool virtnet_send_command_reply(struct 
> virtnet_info *vi,
> return false;
> }
>
> -   if (unlikely(!virtqueue_kick(vi->cvq)))
> -   goto unlock;
> +   if (unlikely(!virtqueue_kick(vi->cvq))) {
> +   mutex_unlock(&vi->cvq_lock);
> +   return false;
> +   }
> +   mutex_unlock(&vi->cvq_lock);
>
> -   wait_for_completion(&vi->ctrl->completion);
> -   virtqueue_get_buf(vi->cvq, &tmp);
> +   wait_for_completion(&ctrl->completion);
>
> -unlock:
> -   mutex_unlock(&vi->cvq_lock);
> return ctrl->status == VIRTIO_NET_OK;
>  }
>
> @@ -2747,6 +2765,86 @@ static bool virtnet_send_command(struct virtnet_info 
> *vi, u8 class, u8 cmd,
> return virtnet_send_command_reply(vi, class, cmd, vi->ctrl, out, 
> NULL);
>  }
>
> +static void virtnet_process_dim_cmd(struct virtnet_info *vi,
> +   struct virtnet_coal_node *node)
> +{
> +   u16 qnum = le16_to_cpu(node->coal_vqs.vqn) / 2;
> +
> +   mutex_lock(&vi->rq[qnum].dim_lock);
> +   vi->rq[qnum].intr_coal.max_usecs =
> +   le32_to_cpu(node->coal_vqs.coal.max_usecs);
> +   vi->rq[qnum].intr_coal.max_packets =
> +   le32_to_cpu(node->coal_vqs.coal.max_packets);
> +   vi->rq[qnum].dim.state = DIM_START_MEASURE;
> +   mutex_unlock(&vi->rq[qnum].dim_lock);
> +
> +   if (node->is_coal_wait) {
> +   mutex_lock(&vi->coal_wait_lock);
> +   list_del(&node->list);
> +   mutex_unlock(&vi->coal_wait_lock);
> +   kfree(node);
> +   } else {
> +   mutex_lock(&vi->coal_free_lock);
> +   list_add(&node->list, &vi->coal_free_list);
> +   mutex_unlock(&vi->coal_free_lock);
> +   }
> +}
> +
> +static int virtnet_add_dim_command(struct virtnet_info *vi,
> +  struct virtnet_coal_node *coal_node)
> +{
> +   struct scatterlist sg;
> +   int ret;
> +
> +   sg_init_one(&sg, &coal_node->coal_vqs, sizeof(coal_node->coal_vqs));
> +   ret = virtnet_send_command_reply(vi, VIRTIO_NET_CTRL_NOTF_COAL,
> +VIRTIO_NET_CTRL_NOTF_COAL_VQ_SET,
> +&coal_node->ctrl, &sg, NULL);
> +   if (!ret) {
> +   dev_warn(&vi->dev->dev,
> + 

Re: [PATCH net-next v3 4/4] virtio_net: improve dim command request efficiency

2024-06-06 Thread kernel test robot
Hi Heng,

kernel test robot noticed the following build warnings:

[auto build test WARNING on net-next/main]

url:
https://github.com/intel-lab-lkp/linux/commits/Heng-Qi/virtio_net-passing-control_buf-explicitly/20240606-141748
base:   net-next/main
patch link:
https://lore.kernel.org/r/20240606061446.127802-5-hengqi%40linux.alibaba.com
patch subject: [PATCH net-next v3 4/4] virtio_net: improve dim command request 
efficiency
config: sh-allmodconfig 
(https://download.01.org/0day-ci/archive/20240607/[email protected]/config)
compiler: sh4-linux-gcc (GCC) 13.2.0
reproduce (this is a W=1 build): 
(https://download.01.org/0day-ci/archive/20240607/[email protected]/reproduce)

If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot 
| Closes: 
https://lore.kernel.org/oe-kbuild-all/[email protected]/

All warnings (new ones prefixed by >>):

   drivers/net/virtio_net.c: In function 'virtnet_get_cvq_work':
>> drivers/net/virtio_net.c:2841:20: warning: suggest explicit braces to avoid 
>> ambiguous 'else' [-Wdangling-else]
2841 | if (wait_coal)
 |^


vim +/else +2841 drivers/net/virtio_net.c

  2813  
  2814  static void virtnet_get_cvq_work(struct work_struct *work)
  2815  {
  2816  struct virtnet_info *vi =
  2817  container_of(work, struct virtnet_info, get_cvq);
  2818  struct virtnet_coal_node *wait_coal;
  2819  bool valid = false;
  2820  unsigned int tmp;
  2821  void *res;
  2822  
  2823  mutex_lock(&vi->cvq_lock);
  2824  while ((res = virtqueue_get_buf(vi->cvq, &tmp)) != NULL) {
  2825  complete((struct completion *)res);
  2826  valid = true;
  2827  }
  2828  mutex_unlock(&vi->cvq_lock);
  2829  
  2830  if (!valid)
  2831  return;
  2832  
  2833  while (true) {
  2834  wait_coal = NULL;
  2835  mutex_lock(&vi->coal_wait_lock);
  2836  if (!list_empty(&vi->coal_wait_list))
  2837  wait_coal = 
list_first_entry(&vi->coal_wait_list,
  2838   struct 
virtnet_coal_node,
  2839   list);
  2840  mutex_unlock(&vi->coal_wait_lock);
> 2841  if (wait_coal)
  2842  if (virtnet_add_dim_command(vi, wait_coal))
  2843  break;
  2844  else
  2845  break;
  2846  }
  2847  }
  2848  static int virtnet_set_mac_address(struct net_device *dev, void *p)
  2849  {
  2850  struct virtnet_info *vi = netdev_priv(dev);
  2851  struct virtio_device *vdev = vi->vdev;
  2852  int ret;
  2853  struct sockaddr *addr;
  2854  struct scatterlist sg;
  2855  
  2856  if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STANDBY))
  2857  return -EOPNOTSUPP;
  2858  
  2859  addr = kmemdup(p, sizeof(*addr), GFP_KERNEL);
  2860  if (!addr)
  2861  return -ENOMEM;
  2862  
  2863  ret = eth_prepare_mac_addr_change(dev, addr);
  2864  if (ret)
  2865  goto out;
  2866  
  2867  if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
  2868  sg_init_one(&sg, addr->sa_data, dev->addr_len);
  2869  if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
  2870VIRTIO_NET_CTRL_MAC_ADDR_SET, 
&sg)) {
  2871  dev_warn(&vdev->dev,
  2872   "Failed to set mac address by vq 
command.\n");
  2873  ret = -EINVAL;
  2874  goto out;
  2875  }
  2876  } else if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC) &&
  2877 !virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) {
  2878  unsigned int i;
  2879  
  2880  /* Naturally, this has an atomicity problem. */
  2881  for (i = 0; i < dev->addr_len; i++)
  2882  virtio_cwrite8(vdev,
  2883 offsetof(struct 
virtio_net_config, mac) +
  2884 i, addr->sa_data[i]);
  2885  }
  2886  
  2887  eth_commit_mac_addr_change(dev, p);
  2888  ret = 0;
  2889  
  2890  out:
  2891  kfree(addr);
  2892  return ret;
  2893  }
  2894  

-- 
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki



Re: [PATCH net-next v3 4/4] virtio_net: improve dim command request efficiency

2024-06-06 Thread kernel test robot
Hi Heng,

kernel test robot noticed the following build warnings:

[auto build test WARNING on net-next/main]

url:
https://github.com/intel-lab-lkp/linux/commits/Heng-Qi/virtio_net-passing-control_buf-explicitly/20240606-141748
base:   net-next/main
patch link:
https://lore.kernel.org/r/20240606061446.127802-5-hengqi%40linux.alibaba.com
patch subject: [PATCH net-next v3 4/4] virtio_net: improve dim command request 
efficiency
config: riscv-defconfig 
(https://download.01.org/0day-ci/archive/20240606/[email protected]/config)
compiler: clang version 19.0.0git (https://github.com/llvm/llvm-project 
d7d2d4f53fc79b4b58e8d8d08151b577c3699d4a)
reproduce (this is a W=1 build): 
(https://download.01.org/0day-ci/archive/20240606/[email protected]/reproduce)

If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot 
| Closes: 
https://lore.kernel.org/oe-kbuild-all/[email protected]/

All warnings (new ones prefixed by >>):

   In file included from drivers/net/virtio_net.c:7:
   In file included from include/linux/netdevice.h:38:
   In file included from include/net/net_namespace.h:43:
   In file included from include/linux/skbuff.h:17:
   In file included from include/linux/bvec.h:10:
   In file included from include/linux/highmem.h:8:
   In file included from include/linux/cacheflush.h:5:
   In file included from arch/riscv/include/asm/cacheflush.h:9:
   In file included from include/linux/mm.h:2253:
   include/linux/vmstat.h:514:36: warning: arithmetic between different 
enumeration types ('enum node_stat_item' and 'enum lru_list') 
[-Wenum-enum-conversion]
 514 | return node_stat_name(NR_LRU_BASE + lru) + 3; // skip "nr_"
 |   ~~~ ^ ~~~
>> drivers/net/virtio_net.c:2844:3: warning: add explicit braces to avoid 
>> dangling else [-Wdangling-else]
2844 | else
 | ^
   2 warnings generated.


vim +2844 drivers/net/virtio_net.c

  2813  
  2814  static void virtnet_get_cvq_work(struct work_struct *work)
  2815  {
  2816  struct virtnet_info *vi =
  2817  container_of(work, struct virtnet_info, get_cvq);
  2818  struct virtnet_coal_node *wait_coal;
  2819  bool valid = false;
  2820  unsigned int tmp;
  2821  void *res;
  2822  
  2823  mutex_lock(&vi->cvq_lock);
  2824  while ((res = virtqueue_get_buf(vi->cvq, &tmp)) != NULL) {
  2825  complete((struct completion *)res);
  2826  valid = true;
  2827  }
  2828  mutex_unlock(&vi->cvq_lock);
  2829  
  2830  if (!valid)
  2831  return;
  2832  
  2833  while (true) {
  2834  wait_coal = NULL;
  2835  mutex_lock(&vi->coal_wait_lock);
  2836  if (!list_empty(&vi->coal_wait_list))
  2837  wait_coal = 
list_first_entry(&vi->coal_wait_list,
  2838   struct 
virtnet_coal_node,
  2839   list);
  2840  mutex_unlock(&vi->coal_wait_lock);
  2841  if (wait_coal)
  2842  if (virtnet_add_dim_command(vi, wait_coal))
  2843  break;
> 2844  else
  2845  break;
  2846  }
  2847  }
  2848  static int virtnet_set_mac_address(struct net_device *dev, void *p)
  2849  {
  2850  struct virtnet_info *vi = netdev_priv(dev);
  2851  struct virtio_device *vdev = vi->vdev;
  2852  int ret;
  2853  struct sockaddr *addr;
  2854  struct scatterlist sg;
  2855  
  2856  if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STANDBY))
  2857  return -EOPNOTSUPP;
  2858  
  2859  addr = kmemdup(p, sizeof(*addr), GFP_KERNEL);
  2860  if (!addr)
  2861  return -ENOMEM;
  2862  
  2863  ret = eth_prepare_mac_addr_change(dev, addr);
  2864  if (ret)
  2865  goto out;
  2866  
  2867  if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
  2868  sg_init_one(&sg, addr->sa_data, dev->addr_len);
  2869  if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
  2870VIRTIO_NET_CTRL_MAC_ADDR_SET, 
&sg)) {
  2871  dev_warn(&vdev->dev,
  2872   "Failed to set mac address by vq 
command.\n");
  2873  ret = -EINVAL;
  2874  goto out;
  2875  }
  2876  } else if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC) &&
  2877 !virtio_has_feature(vdev, VIRTIO_F_VERSI