Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.

Signed-off-by: Tyler Retzlaff <roret...@linux.microsoft.com>
Acked-by: Stephen Hemminger <step...@networkplumber.org>
---
 drivers/net/i40e/i40e_ethdev.c        | 4 ++--
 drivers/net/i40e/i40e_rxtx.c          | 6 +++---
 drivers/net/i40e/i40e_rxtx_vec_neon.c | 2 +-
 3 files changed, 6 insertions(+), 6 deletions(-)

diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 380ce1a..801cc95 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -4687,7 +4687,7 @@ enum i40e_status_code
                        u64 size,
                        u32 alignment)
 {
-       static uint64_t i40e_dma_memzone_id;
+       static RTE_ATOMIC(uint64_t) i40e_dma_memzone_id;
        const struct rte_memzone *mz = NULL;
        char z_name[RTE_MEMZONE_NAMESIZE];
 
@@ -4695,7 +4695,7 @@ enum i40e_status_code
                return I40E_ERR_PARAM;
 
        snprintf(z_name, sizeof(z_name), "i40e_dma_%" PRIu64,
-               __atomic_fetch_add(&i40e_dma_memzone_id, 1, __ATOMIC_RELAXED));
+               rte_atomic_fetch_add_explicit(&i40e_dma_memzone_id, 1, 
rte_memory_order_relaxed));
        mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY,
                        RTE_MEMZONE_IOVA_CONTIG, alignment, RTE_PGSIZE_2M);
        if (!mz)
diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c
index 5d25ab4..155f243 100644
--- a/drivers/net/i40e/i40e_rxtx.c
+++ b/drivers/net/i40e/i40e_rxtx.c
@@ -486,7 +486,7 @@
                }
 
                /* This barrier is to order loads of different words in the 
descriptor */
-               rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+               rte_atomic_thread_fence(rte_memory_order_acquire);
 
                /* Compute how many status bits were set */
                for (j = 0, nb_dd = 0; j < I40E_LOOK_AHEAD; j++) {
@@ -745,7 +745,7 @@
                 * Use acquire fence to ensure that qword1 which includes DD
                 * bit is loaded before loading of other descriptor words.
                 */
-               rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+               rte_atomic_thread_fence(rte_memory_order_acquire);
 
                rxd = *rxdp;
                nb_hold++;
@@ -867,7 +867,7 @@
                 * Use acquire fence to ensure that qword1 which includes DD
                 * bit is loaded before loading of other descriptor words.
                 */
-               rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+               rte_atomic_thread_fence(rte_memory_order_acquire);
 
                rxd = *rxdp;
                nb_hold++;
diff --git a/drivers/net/i40e/i40e_rxtx_vec_neon.c 
b/drivers/net/i40e/i40e_rxtx_vec_neon.c
index d873e30..3a99137 100644
--- a/drivers/net/i40e/i40e_rxtx_vec_neon.c
+++ b/drivers/net/i40e/i40e_rxtx_vec_neon.c
@@ -425,7 +425,7 @@
                descs[0] =  vld1q_u64((uint64_t *)(rxdp));
 
                /* Use acquire fence to order loads of descriptor qwords */
-               rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+               rte_atomic_thread_fence(rte_memory_order_acquire);
                /* A.2 reload qword0 to make it ordered after qword1 load */
                descs[3] = vld1q_lane_u64((uint64_t *)(rxdp + 3), descs[3], 0);
                descs[2] = vld1q_lane_u64((uint64_t *)(rxdp + 2), descs[2], 0);
-- 
1.8.3.1

Reply via email to