Hi Phil Yang, we noticed that this patch gives us 10% of performance 
degradation on ARM.
x86 seems to be unaffected though. Do you know what may be the reason of this 
behavior?

Regards,
Alex

> -----Original Message-----
> From: dev <dev-boun...@dpdk.org> On Behalf Of Phil Yang
> Sent: Sunday, July 12, 2020 23:02
> To: Matan Azrad <ma...@mellanox.com>; Shahaf Shuler
> <shah...@mellanox.com>; Slava Ovsiienko <viachesl...@mellanox.com>
> Cc: Honnappa Nagarahalli <honnappa.nagaraha...@arm.com>;
> d...@linux.vnet.ibm.com; nd <n...@arm.com>; Phil Yang <phil.y...@arm.com>;
> dev@dpdk.org; nd <n...@arm.com>
> Subject: Re: [dpdk-dev] [PATCH v3] net/mlx5: relaxed ordering for multi-packet
> RQ buffer refcnt
> 
> Hi,
> 
> We are also doing C11 atomics converting for other components.
> Your insight would be much appreciated.
> 
> Thanks,
> Phil Yang
> 
> > -----Original Message-----
> > From: dev <dev-boun...@dpdk.org> On Behalf Of Phil Yang
> > Sent: Tuesday, June 23, 2020 4:27 PM
> > To: dev@dpdk.org
> > Cc: ma...@mellanox.com; shah...@mellanox.com;
> > viachesl...@mellanox.com; Honnappa Nagarahalli
> > <honnappa.nagaraha...@arm.com>; d...@linux.vnet.ibm.com; nd
> > <n...@arm.com>
> > Subject: [dpdk-dev] [PATCH v3] net/mlx5: relaxed ordering for
> > multi-packet RQ buffer refcnt
> >
> > Use c11 atomics with explicit ordering instead of the rte_atomic ops
> > which enforce unnecessary barriers on aarch64.
> >
> > Signed-off-by: Phil Yang <phil.y...@arm.com>
> > ---
> > v3:
> > Split from the patchset:
> > https://eur03.safelinks.protection.outlook.com/?url=http%3A%2F%2Fpatch
> >
> work.dpdk.org%2Fcover%2F68159%2F&amp;data=02%7C01%7Cakozyrev%40m
> ellano
> >
> x.com%7C1e3dc839a3604924fdf208d826d934ad%7Ca652971c7d2e4d9ba6a4d1
> 49256
> >
> f461b%7C0%7C0%7C637302061620808255&amp;sdata=mRXbgPi6HyrVtP04Vl7
> Bx8lD0
> > trVP7noQlpOD7gBoTQ%3D&amp;reserved=0
> >
> >  drivers/net/mlx5/mlx5_rxq.c  |  2 +-
> >  drivers/net/mlx5/mlx5_rxtx.c | 16 +++++++++-------
> > drivers/net/mlx5/mlx5_rxtx.h |  2 +-
> >  3 files changed, 11 insertions(+), 9 deletions(-)
> >
> > diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
> > index dda0073..7f487f1 100644
> > --- a/drivers/net/mlx5/mlx5_rxq.c
> > +++ b/drivers/net/mlx5/mlx5_rxq.c
> > @@ -1545,7 +1545,7 @@ mlx5_mprq_buf_init(struct rte_mempool *mp, void
> > *opaque_arg,
> >
> >     memset(_m, 0, sizeof(*buf));
> >     buf->mp = mp;
> > -   rte_atomic16_set(&buf->refcnt, 1);
> > +   __atomic_store_n(&buf->refcnt, 1, __ATOMIC_RELAXED);
> >     for (j = 0; j != strd_n; ++j) {
> >             shinfo = &buf->shinfos[j];
> >             shinfo->free_cb = mlx5_mprq_buf_free_cb; diff --git
> > a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c index
> > e4106bf..f0eda88 100644
> > --- a/drivers/net/mlx5/mlx5_rxtx.c
> > +++ b/drivers/net/mlx5/mlx5_rxtx.c
> > @@ -1595,10 +1595,11 @@ mlx5_mprq_buf_free_cb(void *addr
> __rte_unused,
> > void *opaque)  {
> >     struct mlx5_mprq_buf *buf = opaque;
> >
> > -   if (rte_atomic16_read(&buf->refcnt) == 1) {
> > +   if (__atomic_load_n(&buf->refcnt, __ATOMIC_RELAXED) == 1) {
> >             rte_mempool_put(buf->mp, buf);
> > -   } else if (rte_atomic16_add_return(&buf->refcnt, -1) == 0) {
> > -           rte_atomic16_set(&buf->refcnt, 1);
> > +   } else if (unlikely(__atomic_sub_fetch(&buf->refcnt, 1,
> > +                                          __ATOMIC_RELAXED) == 0)) {
> > +           __atomic_store_n(&buf->refcnt, 1, __ATOMIC_RELAXED);
> >             rte_mempool_put(buf->mp, buf);
> >     }
> >  }
> > @@ -1678,7 +1679,8 @@ mlx5_rx_burst_mprq(void *dpdk_rxq, struct
> > rte_mbuf **pkts, uint16_t pkts_n)
> >
> >             if (consumed_strd == strd_n) {
> >                     /* Replace WQE only if the buffer is still in use. */
> > -                   if (rte_atomic16_read(&buf->refcnt) > 1) {
> > +                   if (__atomic_load_n(&buf->refcnt,
> > +                                       __ATOMIC_RELAXED) > 1) {
> >                             mprq_buf_replace(rxq, rq_ci & wq_mask,
> strd_n);
> >                             /* Release the old buffer. */
> >                             mlx5_mprq_buf_free(buf);
> > @@ -1790,9 +1792,9 @@ mlx5_rx_burst_mprq(void *dpdk_rxq, struct
> > rte_mbuf **pkts, uint16_t pkts_n)
> >                     void *buf_addr;
> >
> >                     /* Increment the refcnt of the whole chunk. */
> > -                   rte_atomic16_add_return(&buf->refcnt, 1);
> > -                   MLX5_ASSERT((uint16_t)rte_atomic16_read(&buf-
> > >refcnt) <=
> > -                               strd_n + 1);
> > +                   __atomic_add_fetch(&buf->refcnt, 1,
> > __ATOMIC_ACQUIRE);
> > +                   MLX5_ASSERT(__atomic_load_n(&buf->refcnt,
> > +                               __ATOMIC_RELAXED) <= strd_n + 1);
> >                     buf_addr = RTE_PTR_SUB(addr,
> > RTE_PKTMBUF_HEADROOM);
> >                     /*
> >                      * MLX5 device doesn't use iova but it is necessary in a
> diff
> > --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h
> > index 26621ff..0fc15f3 100644
> > --- a/drivers/net/mlx5/mlx5_rxtx.h
> > +++ b/drivers/net/mlx5/mlx5_rxtx.h
> > @@ -78,7 +78,7 @@ struct rxq_zip {
> >  /* Multi-Packet RQ buffer header. */
> >  struct mlx5_mprq_buf {
> >     struct rte_mempool *mp;
> > -   rte_atomic16_t refcnt; /* Atomically accessed refcnt. */
> > +   uint16_t refcnt; /* Atomically accessed refcnt. */
> >     uint8_t pad[RTE_PKTMBUF_HEADROOM]; /* Headroom for the first
> packet.
> > */
> >     struct rte_mbuf_ext_shared_info shinfos[];
> >     /*
> > --
> > 2.7.4

Reply via email to