<snip>

> 
> Use c11 atomics with RELAXED ordering instead of the rte_atomic ops which
> enforce unnecessary barriers on aarch64.
> 
> Signed-off-by: Phil Yang <phil.y...@arm.com>
Looks good.

Reviewed-by: Honnappa Nagarahalli <honnappa.nagaraha...@arm.com>

> ---
> v4:
> Remove the unnecessary ACQUIRE barrier in rx burst path. (Honnappa)
> 
> v3:
> Split from the patchset:
> http://patchwork.dpdk.org/cover/68159/
> 
>  drivers/net/mlx5/mlx5_rxq.c  |  2 +-
>  drivers/net/mlx5/mlx5_rxtx.c | 16 +++++++++-------
> drivers/net/mlx5/mlx5_rxtx.h |  2 +-
>  3 files changed, 11 insertions(+), 9 deletions(-)
> 
> diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c index
> 79eb8f8..40e0239 100644
> --- a/drivers/net/mlx5/mlx5_rxq.c
> +++ b/drivers/net/mlx5/mlx5_rxq.c
> @@ -2012,7 +2012,7 @@ mlx5_mprq_buf_init(struct rte_mempool *mp,
> void *opaque_arg,
> 
>       memset(_m, 0, sizeof(*buf));
>       buf->mp = mp;
> -     rte_atomic16_set(&buf->refcnt, 1);
> +     __atomic_store_n(&buf->refcnt, 1, __ATOMIC_RELAXED);
>       for (j = 0; j != strd_n; ++j) {
>               shinfo = &buf->shinfos[j];
>               shinfo->free_cb = mlx5_mprq_buf_free_cb; diff --git
> a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c index
> 1b71e94..549477b 100644
> --- a/drivers/net/mlx5/mlx5_rxtx.c
> +++ b/drivers/net/mlx5/mlx5_rxtx.c
> @@ -1626,10 +1626,11 @@ mlx5_mprq_buf_free_cb(void *addr
> __rte_unused, void *opaque)  {
>       struct mlx5_mprq_buf *buf = opaque;
> 
> -     if (rte_atomic16_read(&buf->refcnt) == 1) {
> +     if (__atomic_load_n(&buf->refcnt, __ATOMIC_RELAXED) == 1) {
>               rte_mempool_put(buf->mp, buf);
> -     } else if (rte_atomic16_add_return(&buf->refcnt, -1) == 0) {
> -             rte_atomic16_set(&buf->refcnt, 1);
> +     } else if (unlikely(__atomic_sub_fetch(&buf->refcnt, 1,
> +                                            __ATOMIC_RELAXED) == 0)) {
> +             __atomic_store_n(&buf->refcnt, 1, __ATOMIC_RELAXED);
>               rte_mempool_put(buf->mp, buf);
>       }
>  }
> @@ -1709,7 +1710,8 @@ mlx5_rx_burst_mprq(void *dpdk_rxq, struct
> rte_mbuf **pkts, uint16_t pkts_n)
> 
>               if (consumed_strd == strd_n) {
>                       /* Replace WQE only if the buffer is still in use. */
> -                     if (rte_atomic16_read(&buf->refcnt) > 1) {
> +                     if (__atomic_load_n(&buf->refcnt,
> +                                         __ATOMIC_RELAXED) > 1) {
>                               mprq_buf_replace(rxq, rq_ci & wq_mask,
> strd_n);
>                               /* Release the old buffer. */
>                               mlx5_mprq_buf_free(buf);
> @@ -1821,9 +1823,9 @@ mlx5_rx_burst_mprq(void *dpdk_rxq, struct
> rte_mbuf **pkts, uint16_t pkts_n)
>                       void *buf_addr;
> 
>                       /* Increment the refcnt of the whole chunk. */
> -                     rte_atomic16_add_return(&buf->refcnt, 1);
> -                     MLX5_ASSERT((uint16_t)rte_atomic16_read(&buf-
> >refcnt) <=
> -                                 strd_n + 1);
> +                     __atomic_add_fetch(&buf->refcnt, 1,
> __ATOMIC_RELAXED);
> +                     MLX5_ASSERT(__atomic_load_n(&buf->refcnt,
> +                                 __ATOMIC_RELAXED) <= strd_n + 1);
>                       buf_addr = RTE_PTR_SUB(addr,
> RTE_PKTMBUF_HEADROOM);
>                       /*
>                        * MLX5 device doesn't use iova but it is necessary in
> a diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h
> index c02a007..467f31d 100644
> --- a/drivers/net/mlx5/mlx5_rxtx.h
> +++ b/drivers/net/mlx5/mlx5_rxtx.h
> @@ -68,7 +68,7 @@ struct rxq_zip {
>  /* Multi-Packet RQ buffer header. */
>  struct mlx5_mprq_buf {
>       struct rte_mempool *mp;
> -     rte_atomic16_t refcnt; /* Atomically accessed refcnt. */
> +     uint16_t refcnt; /* Atomically accessed refcnt. */
>       uint8_t pad[RTE_PKTMBUF_HEADROOM]; /* Headroom for the first
> packet. */
>       struct rte_mbuf_ext_shared_info shinfos[];
>       /*
> --
> 2.7.4

Reply via email to