On Mon, 21 Mar 2022 14:04:28 +0800 Jason Wang wrote:
> A userspace triggerable infinite loop could happen in
> mlx5_cvq_kick_handler() if userspace keeps sending a huge amount of
> cvq requests.
> 
> Fixing this by introducing a quota and re-queue the work if we're out
> of the budget. While at it, using a per device workqueue to avoid on
> demand memory allocation for cvq.
> 
> Fixes: 5262912ef3cfc ("vdpa/mlx5: Add support for control VQ and MAC setting")
> Signed-off-by: Jason Wang <jasow...@redhat.com>
> ---
>  drivers/vdpa/mlx5/net/mlx5_vnet.c | 28 +++++++++++++++-------------
>  1 file changed, 15 insertions(+), 13 deletions(-)
> 
> diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c 
> b/drivers/vdpa/mlx5/net/mlx5_vnet.c
> index d0f91078600e..d5a6fb3f9c41 100644
> --- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
> +++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
> @@ -163,6 +163,7 @@ struct mlx5_vdpa_net {
>       u32 cur_num_vqs;
>       struct notifier_block nb;
>       struct vdpa_callback config_cb;
> +     struct mlx5_vdpa_wq_ent cvq_ent;
>  };
>  
>  static void free_resources(struct mlx5_vdpa_net *ndev);
> @@ -1600,6 +1601,8 @@ static virtio_net_ctrl_ack handle_ctrl_mq(struct 
> mlx5_vdpa_dev *mvdev, u8 cmd)
>       return status;
>  }
>  
> +#define MLX5_CVQ_BUDGET 16
> +

This is not needed as given a single thread workqueue, a cond_resched()
can do the job in the worker context instead of requeue of work.

Hillf

>  static void mlx5_cvq_kick_handler(struct work_struct *work)
>  {
>       virtio_net_ctrl_ack status = VIRTIO_NET_ERR;
> @@ -1609,17 +1612,17 @@ static void mlx5_cvq_kick_handler(struct work_struct 
> *work)
>       struct mlx5_control_vq *cvq;
>       struct mlx5_vdpa_net *ndev;
>       size_t read, write;
> -     int err;
> +     int err, n = 0;
>  
>       wqent = container_of(work, struct mlx5_vdpa_wq_ent, work);
>       mvdev = wqent->mvdev;
>       ndev = to_mlx5_vdpa_ndev(mvdev);
>       cvq = &mvdev->cvq;
>       if (!(ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VQ)))
> -             goto out;
> +             return;
>  
>       if (!cvq->ready)
> -             goto out;
> +             return;
>  
>       while (true) {
>               err = vringh_getdesc_iotlb(&cvq->vring, &cvq->riov, &cvq->wiov, 
> &cvq->head,
> @@ -1653,9 +1656,13 @@ static void mlx5_cvq_kick_handler(struct work_struct 
> *work)
>  
>               if (vringh_need_notify_iotlb(&cvq->vring))
>                       vringh_notify(&cvq->vring);
> +
> +             n++;
> +             if (n > MLX5_CVQ_BUDGET) {
> +                     queue_work(mvdev->wq, &wqent->work);
> +                     break;
> +             }
>       }
> -out:
> -     kfree(wqent);
>  }
>  
>  static void mlx5_vdpa_kick_vq(struct vdpa_device *vdev, u16 idx)
> @@ -1663,7 +1670,6 @@ static void mlx5_vdpa_kick_vq(struct vdpa_device *vdev, 
> u16 idx)
>       struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
>       struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
>       struct mlx5_vdpa_virtqueue *mvq;
> -     struct mlx5_vdpa_wq_ent *wqent;
>  
>       if (!is_index_valid(mvdev, idx))
>               return;
> @@ -1672,13 +1678,7 @@ static void mlx5_vdpa_kick_vq(struct vdpa_device 
> *vdev, u16 idx)
>               if (!mvdev->cvq.ready)
>                       return;
>  
> -             wqent = kzalloc(sizeof(*wqent), GFP_ATOMIC);
> -             if (!wqent)
> -                     return;
> -
> -             wqent->mvdev = mvdev;
> -             INIT_WORK(&wqent->work, mlx5_cvq_kick_handler);
> -             queue_work(mvdev->wq, &wqent->work);
> +             queue_work(mvdev->wq, &ndev->cvq_ent.work);
>               return;
>       }
>  
> @@ -2668,6 +2668,8 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev 
> *v_mdev, const char *name,
>       if (err)
>               goto err_mr;
>  
> +     ndev->cvq_ent.mvdev = mvdev;
> +     INIT_WORK(&ndev->cvq_ent.work, mlx5_cvq_kick_handler);
>       mvdev->wq = create_singlethread_workqueue("mlx5_vdpa_wq");
>       if (!mvdev->wq) {
>               err = -ENOMEM;
> -- 
> 2.18.1
_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

Reply via email to