> -----Original Message-----
> From: David Marchand <david.march...@redhat.com>
> Sent: Tuesday, February 7, 2023 6:45 PM
> To: dev@dpdk.org
> Cc: maxime.coque...@redhat.com; step...@networkplumber.org; Xia, Chenbo
> <chenbo....@intel.com>; Hu, Jiayu <jiayu...@intel.com>; Wang, YuanX
> <yuanx.w...@intel.com>; Ding, Xuan <xuan.d...@intel.com>;
> m...@smartsharesystems.com
> Subject: [PATCH v6 5/9] vhost: annotate async accesses
> 
> vq->async is initialised and must be accessed under vq->access_lock.
> 
> Signed-off-by: David Marchand <david.march...@redhat.com>
> Acked-by: Morten Brørup <m...@smartsharesystems.com>
> Reviewed-by: Maxime Coquelin <maxime.coque...@redhat.com>
> ---
> Changes since v5:
> - rebased after packed support was added to async code,
> 
> Changes since RFC v3:
> - rebased,
> - fixed annotations vq->access_lock -> &vq->access_lock,
> - reworked free_vq,
> 
> ---
>  lib/vhost/vhost.c      |  4 ++++
>  lib/vhost/vhost.h      |  2 +-
>  lib/vhost/vhost_user.c | 10 +++++++---
>  lib/vhost/virtio_net.c | 41 +++++++++++++++++++++++++++++++++++++++++
>  4 files changed, 53 insertions(+), 4 deletions(-)
> 
> diff --git a/lib/vhost/vhost.c b/lib/vhost/vhost.c
> index 8cd727ca2f..8bccdd8584 100644
> --- a/lib/vhost/vhost.c
> +++ b/lib/vhost/vhost.c
> @@ -369,6 +369,7 @@ cleanup_device(struct virtio_net *dev, int destroy)
> 
>  static void
>  vhost_free_async_mem(struct vhost_virtqueue *vq)
> +     __rte_exclusive_locks_required(&vq->access_lock)
>  {
>       if (!vq->async)
>               return;
> @@ -393,7 +394,9 @@ free_vq(struct virtio_net *dev, struct vhost_virtqueue
> *vq)
>       else
>               rte_free(vq->shadow_used_split);
> 
> +     rte_spinlock_lock(&vq->access_lock);
>       vhost_free_async_mem(vq);
> +     rte_spinlock_unlock(&vq->access_lock);
>       rte_free(vq->batch_copy_elems);
>       vhost_user_iotlb_destroy(vq);
>       rte_free(vq->log_cache);
> @@ -1669,6 +1672,7 @@ rte_vhost_extern_callback_register(int vid,
> 
>  static __rte_always_inline int
>  async_channel_register(struct virtio_net *dev, struct vhost_virtqueue *vq)
> +     __rte_exclusive_locks_required(&vq->access_lock)
>  {
>       struct vhost_async *async;
>       int node = vq->numa_node;
> diff --git a/lib/vhost/vhost.h b/lib/vhost/vhost.h
> index 82fe9b5fda..c05313cf37 100644
> --- a/lib/vhost/vhost.h
> +++ b/lib/vhost/vhost.h
> @@ -326,7 +326,7 @@ struct vhost_virtqueue {
>       struct rte_vhost_resubmit_info *resubmit_inflight;
>       uint64_t                global_counter;
> 
> -     struct vhost_async      *async;
> +     struct vhost_async      *async __rte_guarded_var;
> 
>       int                     notif_enable;
>  #define VIRTIO_UNINITIALIZED_NOTIF   (-1)
> diff --git a/lib/vhost/vhost_user.c b/lib/vhost/vhost_user.c
> index 70d221b9f6..8c1d60b76b 100644
> --- a/lib/vhost/vhost_user.c
> +++ b/lib/vhost/vhost_user.c
> @@ -2168,6 +2168,7 @@ vhost_user_set_vring_enable(struct virtio_net **pdev,
>                       int main_fd __rte_unused)
>  {
>       struct virtio_net *dev = *pdev;
> +     struct vhost_virtqueue *vq;
>       bool enable = !!ctx->msg.payload.state.num;
>       int index = (int)ctx->msg.payload.state.index;
> 
> @@ -2175,15 +2176,18 @@ vhost_user_set_vring_enable(struct virtio_net
> **pdev,
>               "set queue enable: %d to qp idx: %d\n",
>               enable, index);
> 
> -     if (enable && dev->virtqueue[index]->async) {
> -             if (dev->virtqueue[index]->async->pkts_inflight_n) {
> +     vq = dev->virtqueue[index];
> +     /* vhost_user_lock_all_queue_pairs locked all qps */
> +     vq_assert_lock(dev, vq);
> +     if (enable && vq->async) {
> +             if (vq->async->pkts_inflight_n) {
>                       VHOST_LOG_CONFIG(dev->ifname, ERR,
>                               "failed to enable vring. Inflight packets must 
> be
> completed first\n");
>                       return RTE_VHOST_MSG_RESULT_ERR;
>               }
>       }
> 
> -     dev->virtqueue[index]->enabled = enable;
> +     vq->enabled = enable;
> 
>       return RTE_VHOST_MSG_RESULT_OK;
>  }
> diff --git a/lib/vhost/virtio_net.c b/lib/vhost/virtio_net.c
> index f2ab6dba15..6672caac49 100644
> --- a/lib/vhost/virtio_net.c
> +++ b/lib/vhost/virtio_net.c
> @@ -102,6 +102,7 @@ static __rte_always_inline int64_t
>  vhost_async_dma_transfer_one(struct virtio_net *dev, struct
> vhost_virtqueue *vq,
>               int16_t dma_id, uint16_t vchan_id, uint16_t flag_idx,
>               struct vhost_iov_iter *pkt)
> +     __rte_exclusive_locks_required(&vq->access_lock)
>  {
>       struct async_dma_vchan_info *dma_info =
> &dma_copy_track[dma_id].vchans[vchan_id];
>       uint16_t ring_mask = dma_info->ring_mask;
> @@ -151,6 +152,7 @@ static __rte_always_inline uint16_t
>  vhost_async_dma_transfer(struct virtio_net *dev, struct vhost_virtqueue
> *vq,
>               int16_t dma_id, uint16_t vchan_id, uint16_t head_idx,
>               struct vhost_iov_iter *pkts, uint16_t nr_pkts)
> +     __rte_exclusive_locks_required(&vq->access_lock)
>  {
>       struct async_dma_vchan_info *dma_info =
> &dma_copy_track[dma_id].vchans[vchan_id];
>       int64_t ret, nr_copies = 0;
> @@ -434,6 +436,7 @@ static __rte_always_inline void
>  vhost_async_shadow_enqueue_packed_batch(struct vhost_virtqueue *vq,
>                                uint64_t *lens,
>                                uint16_t *ids)
> +     __rte_exclusive_locks_required(&vq->access_lock)
>  {
>       uint16_t i;
>       struct vhost_async *async = vq->async;
> @@ -450,6 +453,7 @@ vhost_async_shadow_enqueue_packed_batch(struct
> vhost_virtqueue *vq,
> 
>  static __rte_always_inline void
>  vhost_async_shadow_dequeue_packed_batch(struct vhost_virtqueue *vq,
> uint16_t *ids)
> +     __rte_exclusive_locks_required(&vq->access_lock)
>  {
>       uint16_t i;
>       struct vhost_async *async = vq->async;
> @@ -611,6 +615,7 @@ vhost_async_shadow_enqueue_packed(struct
> vhost_virtqueue *vq,
>                                  uint16_t *id,
>                                  uint16_t *count,
>                                  uint16_t num_buffers)
> +     __rte_exclusive_locks_required(&vq->access_lock)
>  {
>       uint16_t i;
>       struct vhost_async *async = vq->async;
> @@ -1118,6 +1123,7 @@ static __rte_always_inline int
>  async_fill_seg(struct virtio_net *dev, struct vhost_virtqueue *vq,
>               struct rte_mbuf *m, uint32_t mbuf_offset,
>               uint64_t buf_iova, uint32_t cpy_len, bool to_desc)
> +     __rte_exclusive_locks_required(&vq->access_lock)
>  {
>       struct vhost_async *async = vq->async;
>       uint64_t mapped_len;
> @@ -1195,6 +1201,7 @@ static __rte_always_inline int
>  mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
>               struct rte_mbuf *m, struct buf_vector *buf_vec,
>               uint16_t nr_vec, uint16_t num_buffers, bool is_async)
> +     __rte_exclusive_locks_required(&vq->access_lock)
>  {
>       uint32_t vec_idx = 0;
>       uint32_t mbuf_offset, mbuf_avail;
> @@ -1323,6 +1330,7 @@ vhost_enqueue_single_packed(struct virtio_net *dev,
>                           struct rte_mbuf *pkt,
>                           struct buf_vector *buf_vec,
>                           uint16_t *nr_descs)
> +     __rte_exclusive_locks_required(&vq->access_lock)
>  {
>       uint16_t nr_vec = 0;
>       uint16_t avail_idx = vq->last_avail_idx;
> @@ -1383,6 +1391,7 @@ vhost_enqueue_single_packed(struct virtio_net *dev,
>  static __rte_noinline uint32_t
>  virtio_dev_rx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
>       struct rte_mbuf **pkts, uint32_t count)
> +     __rte_exclusive_locks_required(&vq->access_lock)
>  {
>       uint32_t pkt_idx = 0;
>       uint16_t num_buffers;
> @@ -1610,6 +1619,7 @@ static __rte_always_inline int16_t
>  virtio_dev_rx_single_packed(struct virtio_net *dev,
>                           struct vhost_virtqueue *vq,
>                           struct rte_mbuf *pkt)
> +     __rte_exclusive_locks_required(&vq->access_lock)
>  {
>       struct buf_vector buf_vec[BUF_VECTOR_MAX];
>       uint16_t nr_descs = 0;
> @@ -1634,6 +1644,7 @@ virtio_dev_rx_packed(struct virtio_net *dev,
>                    struct vhost_virtqueue *__rte_restrict vq,
>                    struct rte_mbuf **__rte_restrict pkts,
>                    uint32_t count)
> +     __rte_exclusive_locks_required(&vq->access_lock)
>  {
>       uint32_t pkt_idx = 0;
> 
> @@ -1733,6 +1744,7 @@ rte_vhost_enqueue_burst(int vid, uint16_t queue_id,
> 
>  static __rte_always_inline uint16_t
>  async_get_first_inflight_pkt_idx(struct vhost_virtqueue *vq)
> +     __rte_exclusive_locks_required(&vq->access_lock)
>  {
>       struct vhost_async *async = vq->async;
> 
> @@ -1761,6 +1773,7 @@ store_dma_desc_info_split(struct vring_used_elem
> *s_ring, struct vring_used_elem
>  static __rte_noinline uint32_t
>  virtio_dev_rx_async_submit_split(struct virtio_net *dev, struct
> vhost_virtqueue *vq,
>       struct rte_mbuf **pkts, uint32_t count, int16_t dma_id, uint16_t
> vchan_id)
> +     __rte_exclusive_locks_required(&vq->access_lock)
>  {
>       struct buf_vector buf_vec[BUF_VECTOR_MAX];
>       uint32_t pkt_idx = 0;
> @@ -1867,6 +1880,7 @@ vhost_enqueue_async_packed(struct virtio_net *dev,
>                           struct buf_vector *buf_vec,
>                           uint16_t *nr_descs,
>                           uint16_t *nr_buffers)
> +     __rte_exclusive_locks_required(&vq->access_lock)
>  {
>       uint16_t nr_vec = 0;
>       uint16_t avail_idx = vq->last_avail_idx;
> @@ -1925,6 +1939,7 @@ vhost_enqueue_async_packed(struct virtio_net *dev,
>  static __rte_always_inline int16_t
>  virtio_dev_rx_async_packed(struct virtio_net *dev, struct vhost_virtqueue
> *vq,
>                           struct rte_mbuf *pkt, uint16_t *nr_descs, uint16_t
> *nr_buffers)
> +     __rte_exclusive_locks_required(&vq->access_lock)
>  {
>       struct buf_vector buf_vec[BUF_VECTOR_MAX];
> 
> @@ -1947,6 +1962,7 @@ virtio_dev_rx_async_packed_batch_enqueue(struct
> virtio_net *dev,
>                          struct rte_mbuf **pkts,
>                          uint64_t *desc_addrs,
>                          uint64_t *lens)
> +     __rte_exclusive_locks_required(&vq->access_lock)
>  {
>       uint32_t buf_offset = sizeof(struct virtio_net_hdr_mrg_rxbuf);
>       struct virtio_net_hdr_mrg_rxbuf *hdrs[PACKED_BATCH_SIZE];
> @@ -2007,6 +2023,7 @@ virtio_dev_rx_async_packed_batch(struct virtio_net
> *dev,
>                          struct vhost_virtqueue *vq,
>                          struct rte_mbuf **pkts,
>                          int16_t dma_id, uint16_t vchan_id)
> +     __rte_exclusive_locks_required(&vq->access_lock)
>  {
>       uint64_t desc_addrs[PACKED_BATCH_SIZE];
>       uint64_t lens[PACKED_BATCH_SIZE];
> @@ -2022,6 +2039,7 @@ virtio_dev_rx_async_packed_batch(struct virtio_net
> *dev,
>  static __rte_always_inline void
>  dma_error_handler_packed(struct vhost_virtqueue *vq, uint16_t slot_idx,
>                       uint32_t nr_err, uint32_t *pkt_idx)
> +     __rte_exclusive_locks_required(&vq->access_lock)
>  {
>       uint16_t descs_err = 0;
>       uint16_t buffers_err = 0;
> @@ -2052,6 +2070,7 @@ dma_error_handler_packed(struct vhost_virtqueue *vq,
> uint16_t slot_idx,
>  static __rte_noinline uint32_t
>  virtio_dev_rx_async_submit_packed(struct virtio_net *dev, struct
> vhost_virtqueue *vq,
>       struct rte_mbuf **pkts, uint32_t count, int16_t dma_id, uint16_t
> vchan_id)
> +     __rte_exclusive_locks_required(&vq->access_lock)
>  {
>       uint32_t pkt_idx = 0;
>       uint16_t n_xfer;
> @@ -2124,6 +2143,7 @@ virtio_dev_rx_async_submit_packed(struct virtio_net
> *dev, struct vhost_virtqueue
> 
>  static __rte_always_inline void
>  write_back_completed_descs_split(struct vhost_virtqueue *vq, uint16_t
> n_descs)
> +     __rte_exclusive_locks_required(&vq->access_lock)
>  {
>       struct vhost_async *async = vq->async;
>       uint16_t nr_left = n_descs;
> @@ -2156,6 +2176,7 @@ write_back_completed_descs_split(struct
> vhost_virtqueue *vq, uint16_t n_descs)
>  static __rte_always_inline void
>  write_back_completed_descs_packed(struct vhost_virtqueue *vq,
>                               uint16_t n_buffers)
> +     __rte_exclusive_locks_required(&vq->access_lock)
>  {
>       struct vhost_async *async = vq->async;
>       uint16_t from = async->last_buffer_idx_packed;
> @@ -2220,6 +2241,7 @@ write_back_completed_descs_packed(struct
> vhost_virtqueue *vq,
>  static __rte_always_inline uint16_t
>  vhost_poll_enqueue_completed(struct virtio_net *dev, struct
> vhost_virtqueue *vq,
>       struct rte_mbuf **pkts, uint16_t count, int16_t dma_id, uint16_t
> vchan_id)
> +     __rte_exclusive_locks_required(&vq->access_lock)
>  {
>       struct vhost_async *async = vq->async;
>       struct async_inflight_info *pkts_info = async->pkts_info;
> @@ -2824,6 +2846,7 @@ desc_to_mbuf(struct virtio_net *dev, struct
> vhost_virtqueue *vq,
>                 struct buf_vector *buf_vec, uint16_t nr_vec,
>                 struct rte_mbuf *m, struct rte_mempool *mbuf_pool,
>                 bool legacy_ol_flags, uint16_t slot_idx, bool is_async)
> +     __rte_exclusive_locks_required(&vq->access_lock)
>  {
>       uint32_t buf_avail, buf_offset, buf_len;
>       uint64_t buf_addr, buf_iova;
> @@ -3029,6 +3052,7 @@ static uint16_t
>  virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
>       struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t
> count,
>       bool legacy_ol_flags)
> +     __rte_exclusive_locks_required(&vq->access_lock)
>  {
>       uint16_t i;
>       uint16_t avail_entries;
> @@ -3132,6 +3156,7 @@ static uint16_t
>  virtio_dev_tx_split_legacy(struct virtio_net *dev,
>       struct vhost_virtqueue *vq, struct rte_mempool *mbuf_pool,
>       struct rte_mbuf **pkts, uint16_t count)
> +     __rte_exclusive_locks_required(&vq->access_lock)
>  {
>       return virtio_dev_tx_split(dev, vq, mbuf_pool, pkts, count, true);
>  }
> @@ -3141,6 +3166,7 @@ static uint16_t
>  virtio_dev_tx_split_compliant(struct virtio_net *dev,
>       struct vhost_virtqueue *vq, struct rte_mempool *mbuf_pool,
>       struct rte_mbuf **pkts, uint16_t count)
> +     __rte_exclusive_locks_required(&vq->access_lock)
>  {
>       return virtio_dev_tx_split(dev, vq, mbuf_pool, pkts, count, false);
>  }
> @@ -3341,6 +3367,7 @@ vhost_dequeue_single_packed(struct virtio_net *dev,
>                           uint16_t *buf_id,
>                           uint16_t *desc_count,
>                           bool legacy_ol_flags)
> +     __rte_exclusive_locks_required(&vq->access_lock)
>  {
>       struct buf_vector buf_vec[BUF_VECTOR_MAX];
>       uint32_t buf_len;
> @@ -3389,6 +3416,7 @@ virtio_dev_tx_single_packed(struct virtio_net *dev,
>                           struct rte_mempool *mbuf_pool,
>                           struct rte_mbuf *pkts,
>                           bool legacy_ol_flags)
> +     __rte_exclusive_locks_required(&vq->access_lock)
>  {
> 
>       uint16_t buf_id, desc_count = 0;
> @@ -3419,6 +3447,7 @@ virtio_dev_tx_packed(struct virtio_net *dev,
>                    struct rte_mbuf **__rte_restrict pkts,
>                    uint32_t count,
>                    bool legacy_ol_flags)
> +     __rte_exclusive_locks_required(&vq->access_lock)
>  {
>       uint32_t pkt_idx = 0;
> 
> @@ -3462,6 +3491,7 @@ static uint16_t
>  virtio_dev_tx_packed_legacy(struct virtio_net *dev,
>       struct vhost_virtqueue *__rte_restrict vq, struct rte_mempool
> *mbuf_pool,
>       struct rte_mbuf **__rte_restrict pkts, uint32_t count)
> +     __rte_exclusive_locks_required(&vq->access_lock)
>  {
>       return virtio_dev_tx_packed(dev, vq, mbuf_pool, pkts, count, true);
>  }
> @@ -3471,6 +3501,7 @@ static uint16_t
>  virtio_dev_tx_packed_compliant(struct virtio_net *dev,
>       struct vhost_virtqueue *__rte_restrict vq, struct rte_mempool
> *mbuf_pool,
>       struct rte_mbuf **__rte_restrict pkts, uint32_t count)
> +     __rte_exclusive_locks_required(&vq->access_lock)
>  {
>       return virtio_dev_tx_packed(dev, vq, mbuf_pool, pkts, count, false);
>  }
> @@ -3588,6 +3619,7 @@ static __rte_always_inline uint16_t
>  async_poll_dequeue_completed(struct virtio_net *dev, struct
> vhost_virtqueue *vq,
>               struct rte_mbuf **pkts, uint16_t count, int16_t dma_id,
>               uint16_t vchan_id, bool legacy_ol_flags)
> +     __rte_exclusive_locks_required(&vq->access_lock)
>  {
>       uint16_t start_idx, from, i;
>       uint16_t nr_cpl_pkts = 0;
> @@ -3634,6 +3666,7 @@ static __rte_always_inline uint16_t
>  virtio_dev_tx_async_split(struct virtio_net *dev, struct vhost_virtqueue
> *vq,
>               struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts,
> uint16_t count,
>               int16_t dma_id, uint16_t vchan_id, bool legacy_ol_flags)
> +     __rte_exclusive_locks_required(&vq->access_lock)
>  {
>       static bool allocerr_warned;
>       bool dropped = false;
> @@ -3780,6 +3813,7 @@ virtio_dev_tx_async_split_legacy(struct virtio_net
> *dev,
>               struct vhost_virtqueue *vq, struct rte_mempool *mbuf_pool,
>               struct rte_mbuf **pkts, uint16_t count,
>               int16_t dma_id, uint16_t vchan_id)
> +     __rte_exclusive_locks_required(&vq->access_lock)
>  {
>       return virtio_dev_tx_async_split(dev, vq, mbuf_pool,
>                               pkts, count, dma_id, vchan_id, true);
> @@ -3791,6 +3825,7 @@ virtio_dev_tx_async_split_compliant(struct
> virtio_net *dev,
>               struct vhost_virtqueue *vq, struct rte_mempool *mbuf_pool,
>               struct rte_mbuf **pkts, uint16_t count,
>               int16_t dma_id, uint16_t vchan_id)
> +     __rte_exclusive_locks_required(&vq->access_lock)
>  {
>       return virtio_dev_tx_async_split(dev, vq, mbuf_pool,
>                               pkts, count, dma_id, vchan_id, false);
> @@ -3799,6 +3834,7 @@ virtio_dev_tx_async_split_compliant(struct
> virtio_net *dev,
>  static __rte_always_inline void
>  vhost_async_shadow_dequeue_single_packed(struct vhost_virtqueue *vq,
>                               uint16_t buf_id, uint16_t count)
> +     __rte_exclusive_locks_required(&vq->access_lock)
>  {
>       struct vhost_async *async = vq->async;
>       uint16_t idx = async->buffer_idx_packed;
> @@ -3820,6 +3856,7 @@ virtio_dev_tx_async_single_packed(struct virtio_net
> *dev,
>                       struct rte_mbuf *pkts,
>                       uint16_t slot_idx,
>                       bool legacy_ol_flags)
> +     __rte_exclusive_locks_required(&vq->access_lock)
>  {
>       int err;
>       uint16_t buf_id, desc_count = 0;
> @@ -3871,6 +3908,7 @@ virtio_dev_tx_async_packed_batch(struct virtio_net
> *dev,
>                          struct vhost_virtqueue *vq,
>                          struct rte_mbuf **pkts, uint16_t slot_idx,
>                          uint16_t dma_id, uint16_t vchan_id)
> +     __rte_exclusive_locks_required(&vq->access_lock)
>  {
>       uint16_t avail_idx = vq->last_avail_idx;
>       uint32_t buf_offset = sizeof(struct virtio_net_hdr_mrg_rxbuf);
> @@ -3927,6 +3965,7 @@ static __rte_always_inline uint16_t
>  virtio_dev_tx_async_packed(struct virtio_net *dev, struct vhost_virtqueue
> *vq,
>               struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts,
>               uint16_t count, uint16_t dma_id, uint16_t vchan_id, bool
> legacy_ol_flags)
> +     __rte_exclusive_locks_required(&vq->access_lock)
>  {
>       uint32_t pkt_idx = 0;
>       uint16_t slot_idx = 0;
> @@ -4036,6 +4075,7 @@ static uint16_t
>  virtio_dev_tx_async_packed_legacy(struct virtio_net *dev, struct
> vhost_virtqueue *vq,
>               struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts,
>               uint16_t count, uint16_t dma_id, uint16_t vchan_id)
> +     __rte_exclusive_locks_required(&vq->access_lock)
>  {
>       return virtio_dev_tx_async_packed(dev, vq, mbuf_pool,
>                               pkts, count, dma_id, vchan_id, true);
> @@ -4046,6 +4086,7 @@ static uint16_t
>  virtio_dev_tx_async_packed_compliant(struct virtio_net *dev, struct
> vhost_virtqueue *vq,
>               struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts,
>               uint16_t count, uint16_t dma_id, uint16_t vchan_id)
> +     __rte_exclusive_locks_required(&vq->access_lock)
>  {
>       return virtio_dev_tx_async_packed(dev, vq, mbuf_pool,
>                               pkts, count, dma_id, vchan_id, false);
> --
> 2.39.1

Reviewed-by: Chenbo Xia <chenbo....@intel.com> 

Reply via email to