Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.

Signed-off-by: Tyler Retzlaff <roret...@linux.microsoft.com>
Acked-by: Stephen Hemminger <step...@networkplumber.org>
---
 drivers/net/hns3/hns3_cmd.c       | 18 ++++++------
 drivers/net/hns3/hns3_dcb.c       |  2 +-
 drivers/net/hns3/hns3_ethdev.c    | 36 +++++++++++------------
 drivers/net/hns3/hns3_ethdev.h    | 32 ++++++++++-----------
 drivers/net/hns3/hns3_ethdev_vf.c | 60 +++++++++++++++++++--------------------
 drivers/net/hns3/hns3_intr.c      | 36 +++++++++++------------
 drivers/net/hns3/hns3_intr.h      |  4 +--
 drivers/net/hns3/hns3_mbx.c       |  6 ++--
 drivers/net/hns3/hns3_mp.c        |  6 ++--
 drivers/net/hns3/hns3_rxtx.c      | 10 +++----
 drivers/net/hns3/hns3_tm.c        |  4 +--
 11 files changed, 107 insertions(+), 107 deletions(-)

diff --git a/drivers/net/hns3/hns3_cmd.c b/drivers/net/hns3/hns3_cmd.c
index 001ff49..3c5fdbe 100644
--- a/drivers/net/hns3/hns3_cmd.c
+++ b/drivers/net/hns3/hns3_cmd.c
@@ -44,12 +44,12 @@
 hns3_allocate_dma_mem(struct hns3_hw *hw, struct hns3_cmq_ring *ring,
                      uint64_t size, uint32_t alignment)
 {
-       static uint64_t hns3_dma_memzone_id;
+       static RTE_ATOMIC(uint64_t) hns3_dma_memzone_id;
        const struct rte_memzone *mz = NULL;
        char z_name[RTE_MEMZONE_NAMESIZE];
 
        snprintf(z_name, sizeof(z_name), "hns3_dma_%" PRIu64,
-               __atomic_fetch_add(&hns3_dma_memzone_id, 1, __ATOMIC_RELAXED));
+               rte_atomic_fetch_add_explicit(&hns3_dma_memzone_id, 1, 
rte_memory_order_relaxed));
        mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY,
                                         RTE_MEMZONE_IOVA_CONTIG, alignment,
                                         RTE_PGSIZE_2M);
@@ -198,8 +198,8 @@
                hns3_err(hw, "wrong cmd addr(%0x) head (%u, %u-%u)", addr, head,
                         csq->next_to_use, csq->next_to_clean);
                if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
-                       __atomic_store_n(&hw->reset.disable_cmd, 1,
-                                        __ATOMIC_RELAXED);
+                       rte_atomic_store_explicit(&hw->reset.disable_cmd, 1,
+                                        rte_memory_order_relaxed);
                        hns3_schedule_delayed_reset(HNS3_DEV_HW_TO_ADAPTER(hw));
                }
 
@@ -313,7 +313,7 @@ static int hns3_cmd_poll_reply(struct hns3_hw *hw)
                if (hns3_cmd_csq_done(hw))
                        return 0;
 
-               if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED)) {
+               if (rte_atomic_load_explicit(&hw->reset.disable_cmd, 
rte_memory_order_relaxed)) {
                        hns3_err(hw,
                                 "Don't wait for reply because of disable_cmd");
                        return -EBUSY;
@@ -360,7 +360,7 @@ static int hns3_cmd_poll_reply(struct hns3_hw *hw)
        int retval;
        uint32_t ntc;
 
-       if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED))
+       if (rte_atomic_load_explicit(&hw->reset.disable_cmd, 
rte_memory_order_relaxed))
                return -EBUSY;
 
        rte_spinlock_lock(&hw->cmq.csq.lock);
@@ -747,7 +747,7 @@ static int hns3_cmd_poll_reply(struct hns3_hw *hw)
                ret = -EBUSY;
                goto err_cmd_init;
        }
-       __atomic_store_n(&hw->reset.disable_cmd, 0, __ATOMIC_RELAXED);
+       rte_atomic_store_explicit(&hw->reset.disable_cmd, 0, 
rte_memory_order_relaxed);
 
        ret = hns3_cmd_query_firmware_version_and_capability(hw);
        if (ret) {
@@ -790,7 +790,7 @@ static int hns3_cmd_poll_reply(struct hns3_hw *hw)
        return 0;
 
 err_cmd_init:
-       __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
+       rte_atomic_store_explicit(&hw->reset.disable_cmd, 1, 
rte_memory_order_relaxed);
        return ret;
 }
 
@@ -819,7 +819,7 @@ static int hns3_cmd_poll_reply(struct hns3_hw *hw)
        if (!hns->is_vf)
                (void)hns3_firmware_compat_config(hw, false);
 
-       __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
+       rte_atomic_store_explicit(&hw->reset.disable_cmd, 1, 
rte_memory_order_relaxed);
 
        /*
         * A delay is added to ensure that the register cleanup operations
diff --git a/drivers/net/hns3/hns3_dcb.c b/drivers/net/hns3/hns3_dcb.c
index 915e4eb..2f917fe 100644
--- a/drivers/net/hns3/hns3_dcb.c
+++ b/drivers/net/hns3/hns3_dcb.c
@@ -648,7 +648,7 @@
         * and configured directly to the hardware in the RESET_STAGE_RESTORE
         * stage of the reset process.
         */
-       if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) {
+       if (rte_atomic_load_explicit(&hw->reset.resetting, 
rte_memory_order_relaxed) == 0) {
                for (i = 0; i < hw->rss_ind_tbl_size; i++)
                        rss_cfg->rss_indirection_tbl[i] =
                                                        i % hw->alloc_rss_size;
diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c
index 9730b9a..327f6fe 100644
--- a/drivers/net/hns3/hns3_ethdev.c
+++ b/drivers/net/hns3/hns3_ethdev.c
@@ -99,7 +99,7 @@ struct hns3_intr_state {
 };
 
 static enum hns3_reset_level hns3_get_reset_level(struct hns3_adapter *hns,
-                                                uint64_t *levels);
+                                                RTE_ATOMIC(uint64_t) *levels);
 static int hns3_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
 static int hns3_vlan_pvid_configure(struct hns3_adapter *hns, uint16_t pvid,
                                    int on);
@@ -134,7 +134,7 @@ static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
 {
        struct hns3_hw *hw = &hns->hw;
 
-       __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
+       rte_atomic_store_explicit(&hw->reset.disable_cmd, 1, 
rte_memory_order_relaxed);
        hns3_atomic_set_bit(HNS3_IMP_RESET, &hw->reset.pending);
        *vec_val = BIT(HNS3_VECTOR0_IMPRESET_INT_B);
        hw->reset.stats.imp_cnt++;
@@ -148,7 +148,7 @@ static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
 {
        struct hns3_hw *hw = &hns->hw;
 
-       __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
+       rte_atomic_store_explicit(&hw->reset.disable_cmd, 1, 
rte_memory_order_relaxed);
        hns3_atomic_set_bit(HNS3_GLOBAL_RESET, &hw->reset.pending);
        *vec_val = BIT(HNS3_VECTOR0_GLOBALRESET_INT_B);
        hw->reset.stats.global_cnt++;
@@ -1151,7 +1151,7 @@ static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
         * ensure that the hardware configuration remains unchanged before and
         * after reset.
         */
-       if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) {
+       if (rte_atomic_load_explicit(&hw->reset.resetting, 
rte_memory_order_relaxed) == 0) {
                hw->port_base_vlan_cfg.state = HNS3_PORT_BASE_VLAN_DISABLE;
                hw->port_base_vlan_cfg.pvid = HNS3_INVALID_PVID;
        }
@@ -1175,7 +1175,7 @@ static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
         * we will restore configurations to hardware in hns3_restore_vlan_table
         * and hns3_restore_vlan_conf later.
         */
-       if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) {
+       if (rte_atomic_load_explicit(&hw->reset.resetting, 
rte_memory_order_relaxed) == 0) {
                ret = hns3_vlan_pvid_configure(hns, HNS3_INVALID_PVID, 0);
                if (ret) {
                        hns3_err(hw, "pvid set fail in pf, ret =%d", ret);
@@ -5059,7 +5059,7 @@ static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
        int ret;
 
        PMD_INIT_FUNC_TRACE();
-       if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED))
+       if (rte_atomic_load_explicit(&hw->reset.resetting, 
rte_memory_order_relaxed))
                return -EBUSY;
 
        rte_spinlock_lock(&hw->lock);
@@ -5150,7 +5150,7 @@ static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
         * during reset and is required to be released after the reset is
         * completed.
         */
-       if (__atomic_load_n(&hw->reset.resetting,  __ATOMIC_RELAXED) == 0)
+       if (rte_atomic_load_explicit(&hw->reset.resetting,  
rte_memory_order_relaxed) == 0)
                hns3_dev_release_mbufs(hns);
 
        ret = hns3_cfg_mac_mode(hw, false);
@@ -5158,7 +5158,7 @@ static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
                return ret;
        hw->mac.link_status = RTE_ETH_LINK_DOWN;
 
-       if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0) {
+       if (rte_atomic_load_explicit(&hw->reset.disable_cmd, 
rte_memory_order_relaxed) == 0) {
                hns3_configure_all_mac_addr(hns, true);
                ret = hns3_reset_all_tqps(hns);
                if (ret) {
@@ -5184,7 +5184,7 @@ static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
        hns3_stop_rxtx_datapath(dev);
 
        rte_spinlock_lock(&hw->lock);
-       if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) {
+       if (rte_atomic_load_explicit(&hw->reset.resetting, 
rte_memory_order_relaxed) == 0) {
                hns3_tm_dev_stop_proc(hw);
                hns3_config_mac_tnl_int(hw, false);
                hns3_stop_tqps(hw);
@@ -5577,7 +5577,7 @@ static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
 
        last_req = hns3_get_reset_level(hns, &hw->reset.pending);
        if (last_req == HNS3_NONE_RESET || last_req < new_req) {
-               __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
+               rte_atomic_store_explicit(&hw->reset.disable_cmd, 1, 
rte_memory_order_relaxed);
                hns3_schedule_delayed_reset(hns);
                hns3_warn(hw, "High level reset detected, delay do reset");
                return true;
@@ -5677,7 +5677,7 @@ static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
 }
 
 static enum hns3_reset_level
-hns3_get_reset_level(struct hns3_adapter *hns, uint64_t *levels)
+hns3_get_reset_level(struct hns3_adapter *hns, RTE_ATOMIC(uint64_t) *levels)
 {
        struct hns3_hw *hw = &hns->hw;
        enum hns3_reset_level reset_level = HNS3_NONE_RESET;
@@ -5737,7 +5737,7 @@ static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
                 * any mailbox handling or command to firmware is only valid
                 * after hns3_cmd_init is called.
                 */
-               __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
+               rte_atomic_store_explicit(&hw->reset.disable_cmd, 1, 
rte_memory_order_relaxed);
                hw->reset.stats.request_cnt++;
                break;
        case HNS3_IMP_RESET:
@@ -5792,7 +5792,7 @@ static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
         * from table space. Hence, for function reset software intervention is
         * required to delete the entries
         */
-       if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0)
+       if (rte_atomic_load_explicit(&hw->reset.disable_cmd, 
rte_memory_order_relaxed) == 0)
                hns3_configure_all_mc_mac_addr(hns, true);
        rte_spinlock_unlock(&hw->lock);
 
@@ -5913,10 +5913,10 @@ static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
         * The interrupt may have been lost. It is necessary to handle
         * the interrupt to recover from the error.
         */
-       if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) ==
+       if (rte_atomic_load_explicit(&hw->reset.schedule, 
rte_memory_order_relaxed) ==
                            SCHEDULE_DEFERRED) {
-               __atomic_store_n(&hw->reset.schedule, SCHEDULE_REQUESTED,
-                                 __ATOMIC_RELAXED);
+               rte_atomic_store_explicit(&hw->reset.schedule, 
SCHEDULE_REQUESTED,
+                                 rte_memory_order_relaxed);
                hns3_err(hw, "Handling interrupts in delayed tasks");
                hns3_interrupt_handler(&rte_eth_devices[hw->data->port_id]);
                reset_level = hns3_get_reset_level(hns, &hw->reset.pending);
@@ -5925,7 +5925,7 @@ static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
                        hns3_atomic_set_bit(HNS3_IMP_RESET, &hw->reset.pending);
                }
        }
-       __atomic_store_n(&hw->reset.schedule, SCHEDULE_NONE, __ATOMIC_RELAXED);
+       rte_atomic_store_explicit(&hw->reset.schedule, SCHEDULE_NONE, 
rte_memory_order_relaxed);
 
        /*
         * Check if there is any ongoing reset in the hardware. This status can
@@ -6576,7 +6576,7 @@ static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
 
        hw->adapter_state = HNS3_NIC_INITIALIZED;
 
-       if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) ==
+       if (rte_atomic_load_explicit(&hw->reset.schedule, 
rte_memory_order_relaxed) ==
                            SCHEDULE_PENDING) {
                hns3_err(hw, "Reschedule reset service after dev_init");
                hns3_schedule_reset(hns);
diff --git a/drivers/net/hns3/hns3_ethdev.h b/drivers/net/hns3/hns3_ethdev.h
index e70c5ff..4c0f076 100644
--- a/drivers/net/hns3/hns3_ethdev.h
+++ b/drivers/net/hns3/hns3_ethdev.h
@@ -401,17 +401,17 @@ enum hns3_schedule {
 
 struct hns3_reset_data {
        enum hns3_reset_stage stage;
-       uint16_t schedule;
+       RTE_ATOMIC(uint16_t) schedule;
        /* Reset flag, covering the entire reset process */
-       uint16_t resetting;
+       RTE_ATOMIC(uint16_t) resetting;
        /* Used to disable sending cmds during reset */
-       uint16_t disable_cmd;
+       RTE_ATOMIC(uint16_t) disable_cmd;
        /* The reset level being processed */
        enum hns3_reset_level level;
        /* Reset level set, each bit represents a reset level */
-       uint64_t pending;
+       RTE_ATOMIC(uint64_t) pending;
        /* Request reset level set, from interrupt or mailbox */
-       uint64_t request;
+       RTE_ATOMIC(uint64_t) request;
        int attempts; /* Reset failure retry */
        int retries;  /* Timeout failure retry in reset_post */
        /*
@@ -499,7 +499,7 @@ struct hns3_hw {
         * by dev_set_link_up() or dev_start().
         */
        bool set_link_down;
-       unsigned int secondary_cnt; /* Number of secondary processes init'd. */
+       RTE_ATOMIC(unsigned int) secondary_cnt; /* Number of secondary 
processes init'd. */
        struct hns3_tqp_stats tqp_stats;
        /* Include Mac stats | Rx stats | Tx stats */
        struct hns3_mac_stats mac_stats;
@@ -844,7 +844,7 @@ struct hns3_vf {
        struct hns3_adapter *adapter;
 
        /* Whether PF support push link status change to VF */
-       uint16_t pf_push_lsc_cap;
+       RTE_ATOMIC(uint16_t) pf_push_lsc_cap;
 
        /*
         * If PF support push link status change, VF still need send request to
@@ -853,7 +853,7 @@ struct hns3_vf {
         */
        uint16_t req_link_info_cnt;
 
-       uint16_t poll_job_started; /* whether poll job is started */
+       RTE_ATOMIC(uint16_t) poll_job_started; /* whether poll job is started */
 };
 
 struct hns3_adapter {
@@ -997,32 +997,32 @@ static inline uint32_t hns3_read_reg(void *base, uint32_t 
reg)
        hns3_read_reg((a)->io_base, (reg))
 
 static inline uint64_t
-hns3_atomic_test_bit(unsigned int nr, volatile uint64_t *addr)
+hns3_atomic_test_bit(unsigned int nr, volatile RTE_ATOMIC(uint64_t) *addr)
 {
        uint64_t res;
 
-       res = (__atomic_load_n(addr, __ATOMIC_RELAXED) & (1UL << nr)) != 0;
+       res = (rte_atomic_load_explicit(addr, rte_memory_order_relaxed) & (1UL 
<< nr)) != 0;
        return res;
 }
 
 static inline void
-hns3_atomic_set_bit(unsigned int nr, volatile uint64_t *addr)
+hns3_atomic_set_bit(unsigned int nr, volatile RTE_ATOMIC(uint64_t) *addr)
 {
-       __atomic_fetch_or(addr, (1UL << nr), __ATOMIC_RELAXED);
+       rte_atomic_fetch_or_explicit(addr, (1UL << nr), 
rte_memory_order_relaxed);
 }
 
 static inline void
-hns3_atomic_clear_bit(unsigned int nr, volatile uint64_t *addr)
+hns3_atomic_clear_bit(unsigned int nr, volatile RTE_ATOMIC(uint64_t) *addr)
 {
-       __atomic_fetch_and(addr, ~(1UL << nr), __ATOMIC_RELAXED);
+       rte_atomic_fetch_and_explicit(addr, ~(1UL << nr), 
rte_memory_order_relaxed);
 }
 
 static inline uint64_t
-hns3_test_and_clear_bit(unsigned int nr, volatile uint64_t *addr)
+hns3_test_and_clear_bit(unsigned int nr, volatile RTE_ATOMIC(uint64_t) *addr)
 {
        uint64_t mask = (1UL << nr);
 
-       return __atomic_fetch_and(addr, ~mask, __ATOMIC_RELAXED) & mask;
+       return rte_atomic_fetch_and_explicit(addr, ~mask, 
rte_memory_order_relaxed) & mask;
 }
 
 int
diff --git a/drivers/net/hns3/hns3_ethdev_vf.c 
b/drivers/net/hns3/hns3_ethdev_vf.c
index 4eeb46a..b83d5b9 100644
--- a/drivers/net/hns3/hns3_ethdev_vf.c
+++ b/drivers/net/hns3/hns3_ethdev_vf.c
@@ -37,7 +37,7 @@ enum hns3vf_evt_cause {
 };
 
 static enum hns3_reset_level hns3vf_get_reset_level(struct hns3_hw *hw,
-                                                   uint64_t *levels);
+                                                   RTE_ATOMIC(uint64_t) 
*levels);
 static int hns3vf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
 static int hns3vf_dev_configure_vlan(struct rte_eth_dev *dev);
 
@@ -484,7 +484,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev 
*eth_dev,
         * MTU value issued by hns3 VF PMD must be less than or equal to
         * PF's MTU.
         */
-       if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
+       if (rte_atomic_load_explicit(&hw->reset.resetting, 
rte_memory_order_relaxed)) {
                hns3_err(hw, "Failed to set mtu during resetting");
                return -EIO;
        }
@@ -565,7 +565,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev 
*eth_dev,
                rst_ing_reg = hns3_read_dev(hw, HNS3_FUN_RST_ING);
                hns3_warn(hw, "resetting reg: 0x%x", rst_ing_reg);
                hns3_atomic_set_bit(HNS3_VF_RESET, &hw->reset.pending);
-               __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
+               rte_atomic_store_explicit(&hw->reset.disable_cmd, 1, 
rte_memory_order_relaxed);
                val = hns3_read_dev(hw, HNS3_VF_RST_ING);
                hns3_write_dev(hw, HNS3_VF_RST_ING, val | HNS3_VF_RST_ING_BIT);
                val = cmdq_stat_reg & ~BIT(HNS3_VECTOR0_RST_INT_B);
@@ -634,8 +634,8 @@ static int hns3vf_dev_link_update(struct rte_eth_dev 
*eth_dev,
        struct hns3_vf *vf = HNS3_DEV_HW_TO_VF(hw);
 
        if (vf->pf_push_lsc_cap == HNS3_PF_PUSH_LSC_CAP_UNKNOWN)
-               __atomic_compare_exchange(&vf->pf_push_lsc_cap, &exp, &val, 0,
-                                         __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
+               
rte_atomic_compare_exchange_strong_explicit(&vf->pf_push_lsc_cap, &exp, val,
+                                         rte_memory_order_acquire, 
rte_memory_order_acquire);
 }
 
 static void
@@ -650,8 +650,8 @@ static int hns3vf_dev_link_update(struct rte_eth_dev 
*eth_dev,
        struct hns3_vf *vf = HNS3_DEV_HW_TO_VF(hw);
        struct hns3_vf_to_pf_msg req;
 
-       __atomic_store_n(&vf->pf_push_lsc_cap, HNS3_PF_PUSH_LSC_CAP_UNKNOWN,
-                        __ATOMIC_RELEASE);
+       rte_atomic_store_explicit(&vf->pf_push_lsc_cap, 
HNS3_PF_PUSH_LSC_CAP_UNKNOWN,
+                        rte_memory_order_release);
 
        hns3vf_mbx_setup(&req, HNS3_MBX_GET_LINK_STATUS, 0);
        (void)hns3vf_mbx_send(hw, &req, false, NULL, 0);
@@ -666,7 +666,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev 
*eth_dev,
                 * mailbox from PF driver to get this capability.
                 */
                hns3vf_handle_mbx_msg(hw);
-               if (__atomic_load_n(&vf->pf_push_lsc_cap, __ATOMIC_ACQUIRE) !=
+               if (rte_atomic_load_explicit(&vf->pf_push_lsc_cap, 
rte_memory_order_acquire) !=
                        HNS3_PF_PUSH_LSC_CAP_UNKNOWN)
                        break;
                remain_ms--;
@@ -677,10 +677,10 @@ static int hns3vf_dev_link_update(struct rte_eth_dev 
*eth_dev,
         * state: unknown (means pf not ack), not_supported, supported.
         * Here config it as 'not_supported' when it's 'unknown' state.
         */
-       __atomic_compare_exchange(&vf->pf_push_lsc_cap, &exp, &val, 0,
-                                 __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
+       rte_atomic_compare_exchange_strong_explicit(&vf->pf_push_lsc_cap, &exp, 
val,
+                                 rte_memory_order_acquire, 
rte_memory_order_acquire);
 
-       if (__atomic_load_n(&vf->pf_push_lsc_cap, __ATOMIC_ACQUIRE) ==
+       if (rte_atomic_load_explicit(&vf->pf_push_lsc_cap, 
rte_memory_order_acquire) ==
                HNS3_PF_PUSH_LSC_CAP_SUPPORTED) {
                hns3_info(hw, "detect PF support push link status change!");
        } else {
@@ -920,7 +920,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev 
*eth_dev,
        bool send_req;
        int ret;
 
-       if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED))
+       if (rte_atomic_load_explicit(&hw->reset.resetting, 
rte_memory_order_relaxed))
                return;
 
        send_req = vf->pf_push_lsc_cap == HNS3_PF_PUSH_LSC_CAP_NOT_SUPPORTED ||
@@ -956,7 +956,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev 
*eth_dev,
         * sending request to PF kernel driver, then could update link status by
         * process PF kernel driver's link status mailbox message.
         */
-       if (!__atomic_load_n(&vf->poll_job_started, __ATOMIC_RELAXED))
+       if (!rte_atomic_load_explicit(&vf->poll_job_started, 
rte_memory_order_relaxed))
                return;
 
        if (hw->adapter_state != HNS3_NIC_STARTED)
@@ -994,7 +994,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev 
*eth_dev,
        struct hns3_hw *hw = &hns->hw;
        int ret;
 
-       if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
+       if (rte_atomic_load_explicit(&hw->reset.resetting, 
rte_memory_order_relaxed)) {
                hns3_err(hw,
                         "vf set vlan id failed during resetting, vlan_id =%u",
                         vlan_id);
@@ -1059,7 +1059,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev 
*eth_dev,
        unsigned int tmp_mask;
        int ret = 0;
 
-       if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
+       if (rte_atomic_load_explicit(&hw->reset.resetting, 
rte_memory_order_relaxed)) {
                hns3_err(hw, "vf set vlan offload failed during resetting, mask 
= 0x%x",
                         mask);
                return -EIO;
@@ -1252,7 +1252,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev 
*eth_dev,
        if (vf->pf_push_lsc_cap == HNS3_PF_PUSH_LSC_CAP_SUPPORTED)
                vf->req_link_info_cnt = HNS3_REQUEST_LINK_INFO_REMAINS_CNT;
 
-       __atomic_store_n(&vf->poll_job_started, 1, __ATOMIC_RELAXED);
+       rte_atomic_store_explicit(&vf->poll_job_started, 1, 
rte_memory_order_relaxed);
 
        hns3vf_service_handler(dev);
 }
@@ -1264,7 +1264,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev 
*eth_dev,
 
        rte_eal_alarm_cancel(hns3vf_service_handler, dev);
 
-       __atomic_store_n(&vf->poll_job_started, 0, __ATOMIC_RELAXED);
+       rte_atomic_store_explicit(&vf->poll_job_started, 0, 
rte_memory_order_relaxed);
 }
 
 static int
@@ -1500,10 +1500,10 @@ static int hns3vf_dev_link_update(struct rte_eth_dev 
*eth_dev,
         * during reset and is required to be released after the reset is
         * completed.
         */
-       if (__atomic_load_n(&hw->reset.resetting,  __ATOMIC_RELAXED) == 0)
+       if (rte_atomic_load_explicit(&hw->reset.resetting,  
rte_memory_order_relaxed) == 0)
                hns3_dev_release_mbufs(hns);
 
-       if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0) {
+       if (rte_atomic_load_explicit(&hw->reset.disable_cmd, 
rte_memory_order_relaxed) == 0) {
                hns3_configure_all_mac_addr(hns, true);
                ret = hns3_reset_all_tqps(hns);
                if (ret) {
@@ -1528,7 +1528,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev 
*eth_dev,
        hns3_stop_rxtx_datapath(dev);
 
        rte_spinlock_lock(&hw->lock);
-       if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) {
+       if (rte_atomic_load_explicit(&hw->reset.resetting, 
rte_memory_order_relaxed) == 0) {
                hns3_stop_tqps(hw);
                hns3vf_do_stop(hns);
                hns3_unmap_rx_interrupt(dev);
@@ -1643,7 +1643,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev 
*eth_dev,
        int ret;
 
        PMD_INIT_FUNC_TRACE();
-       if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED))
+       if (rte_atomic_load_explicit(&hw->reset.resetting, 
rte_memory_order_relaxed))
                return -EBUSY;
 
        rte_spinlock_lock(&hw->lock);
@@ -1773,7 +1773,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev 
*eth_dev,
 
        last_req = hns3vf_get_reset_level(hw, &hw->reset.pending);
        if (last_req == HNS3_NONE_RESET || last_req < new_req) {
-               __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
+               rte_atomic_store_explicit(&hw->reset.disable_cmd, 1, 
rte_memory_order_relaxed);
                hns3_schedule_delayed_reset(hns);
                hns3_warn(hw, "High level reset detected, delay do reset");
                return true;
@@ -1847,7 +1847,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev 
*eth_dev,
                if (ret)
                        return ret;
        }
-       __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
+       rte_atomic_store_explicit(&hw->reset.disable_cmd, 1, 
rte_memory_order_relaxed);
 
        return 0;
 }
@@ -1888,7 +1888,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev 
*eth_dev,
         * from table space. Hence, for function reset software intervention is
         * required to delete the entries.
         */
-       if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0)
+       if (rte_atomic_load_explicit(&hw->reset.disable_cmd, 
rte_memory_order_relaxed) == 0)
                hns3_configure_all_mc_mac_addr(hns, true);
        rte_spinlock_unlock(&hw->lock);
 
@@ -2030,7 +2030,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev 
*eth_dev,
 }
 
 static enum hns3_reset_level
-hns3vf_get_reset_level(struct hns3_hw *hw, uint64_t *levels)
+hns3vf_get_reset_level(struct hns3_hw *hw, RTE_ATOMIC(uint64_t) *levels)
 {
        enum hns3_reset_level reset_level;
 
@@ -2070,10 +2070,10 @@ static int hns3vf_dev_link_update(struct rte_eth_dev 
*eth_dev,
         * The interrupt may have been lost. It is necessary to handle
         * the interrupt to recover from the error.
         */
-       if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) ==
+       if (rte_atomic_load_explicit(&hw->reset.schedule, 
rte_memory_order_relaxed) ==
                            SCHEDULE_DEFERRED) {
-               __atomic_store_n(&hw->reset.schedule, SCHEDULE_REQUESTED,
-                                __ATOMIC_RELAXED);
+               rte_atomic_store_explicit(&hw->reset.schedule, 
SCHEDULE_REQUESTED,
+                                rte_memory_order_relaxed);
                hns3_err(hw, "Handling interrupts in delayed tasks");
                hns3vf_interrupt_handler(&rte_eth_devices[hw->data->port_id]);
                reset_level = hns3vf_get_reset_level(hw, &hw->reset.pending);
@@ -2082,7 +2082,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev 
*eth_dev,
                        hns3_atomic_set_bit(HNS3_VF_RESET, &hw->reset.pending);
                }
        }
-       __atomic_store_n(&hw->reset.schedule, SCHEDULE_NONE, __ATOMIC_RELAXED);
+       rte_atomic_store_explicit(&hw->reset.schedule, SCHEDULE_NONE, 
rte_memory_order_relaxed);
 
        /*
         * Hardware reset has been notified, we now have to poll & check if
@@ -2278,7 +2278,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev 
*eth_dev,
 
        hw->adapter_state = HNS3_NIC_INITIALIZED;
 
-       if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) ==
+       if (rte_atomic_load_explicit(&hw->reset.schedule, 
rte_memory_order_relaxed) ==
                            SCHEDULE_PENDING) {
                hns3_err(hw, "Reschedule reset service after dev_init");
                hns3_schedule_reset(hns);
diff --git a/drivers/net/hns3/hns3_intr.c b/drivers/net/hns3/hns3_intr.c
index 916bf30..26fa2eb 100644
--- a/drivers/net/hns3/hns3_intr.c
+++ b/drivers/net/hns3/hns3_intr.c
@@ -2033,7 +2033,7 @@ enum hns3_hw_err_report_type {
 
 static int
 hns3_handle_hw_error(struct hns3_adapter *hns, struct hns3_cmd_desc *desc,
-                    int num, uint64_t *levels,
+                    int num, RTE_ATOMIC(uint64_t) *levels,
                     enum hns3_hw_err_report_type err_type)
 {
        const struct hns3_hw_error_desc *err = pf_ras_err_tbl;
@@ -2104,7 +2104,7 @@ enum hns3_hw_err_report_type {
 }
 
 void
-hns3_handle_msix_error(struct hns3_adapter *hns, uint64_t *levels)
+hns3_handle_msix_error(struct hns3_adapter *hns, RTE_ATOMIC(uint64_t) *levels)
 {
        uint32_t mpf_bd_num, pf_bd_num, bd_num;
        struct hns3_hw *hw = &hns->hw;
@@ -2151,7 +2151,7 @@ enum hns3_hw_err_report_type {
 }
 
 void
-hns3_handle_ras_error(struct hns3_adapter *hns, uint64_t *levels)
+hns3_handle_ras_error(struct hns3_adapter *hns, RTE_ATOMIC(uint64_t) *levels)
 {
        uint32_t mpf_bd_num, pf_bd_num, bd_num;
        struct hns3_hw *hw = &hns->hw;
@@ -2402,7 +2402,7 @@ enum hns3_hw_err_report_type {
        hw->reset.request = 0;
        hw->reset.pending = 0;
        hw->reset.resetting = 0;
-       __atomic_store_n(&hw->reset.disable_cmd, 0, __ATOMIC_RELAXED);
+       rte_atomic_store_explicit(&hw->reset.disable_cmd, 0, 
rte_memory_order_relaxed);
        hw->reset.wait_data = rte_zmalloc("wait_data",
                                          sizeof(struct hns3_wait_data), 0);
        if (!hw->reset.wait_data) {
@@ -2419,8 +2419,8 @@ enum hns3_hw_err_report_type {
 
        /* Reschedule the reset process after successful initialization */
        if (hw->adapter_state == HNS3_NIC_UNINITIALIZED) {
-               __atomic_store_n(&hw->reset.schedule, SCHEDULE_PENDING,
-                                __ATOMIC_RELAXED);
+               rte_atomic_store_explicit(&hw->reset.schedule, SCHEDULE_PENDING,
+                                rte_memory_order_relaxed);
                return;
        }
 
@@ -2428,15 +2428,15 @@ enum hns3_hw_err_report_type {
                return;
 
        /* Schedule restart alarm if it is not scheduled yet */
-       if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) ==
+       if (rte_atomic_load_explicit(&hw->reset.schedule, 
rte_memory_order_relaxed) ==
                        SCHEDULE_REQUESTED)
                return;
-       if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) ==
+       if (rte_atomic_load_explicit(&hw->reset.schedule, 
rte_memory_order_relaxed) ==
                            SCHEDULE_DEFERRED)
                rte_eal_alarm_cancel(hw->reset.ops->reset_service, hns);
 
-       __atomic_store_n(&hw->reset.schedule, SCHEDULE_REQUESTED,
-                                __ATOMIC_RELAXED);
+       rte_atomic_store_explicit(&hw->reset.schedule, SCHEDULE_REQUESTED,
+                                rte_memory_order_relaxed);
 
        rte_eal_alarm_set(SWITCH_CONTEXT_US, hw->reset.ops->reset_service, hns);
 }
@@ -2453,11 +2453,11 @@ enum hns3_hw_err_report_type {
                return;
        }
 
-       if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) !=
+       if (rte_atomic_load_explicit(&hw->reset.schedule, 
rte_memory_order_relaxed) !=
                            SCHEDULE_NONE)
                return;
-       __atomic_store_n(&hw->reset.schedule, SCHEDULE_DEFERRED,
-                        __ATOMIC_RELAXED);
+       rte_atomic_store_explicit(&hw->reset.schedule, SCHEDULE_DEFERRED,
+                        rte_memory_order_relaxed);
        rte_eal_alarm_set(DEFERRED_SCHED_US, hw->reset.ops->reset_service, hns);
 }
 
@@ -2537,7 +2537,7 @@ enum hns3_hw_err_report_type {
 }
 
 static void
-hns3_clear_reset_level(struct hns3_hw *hw, uint64_t *levels)
+hns3_clear_reset_level(struct hns3_hw *hw, RTE_ATOMIC(uint64_t) *levels)
 {
        uint64_t merge_cnt = hw->reset.stats.merge_cnt;
        uint64_t tmp;
@@ -2633,7 +2633,7 @@ enum hns3_hw_err_report_type {
         * Regardless of whether the execution is successful or not, the
         * flow after execution must be continued.
         */
-       if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED))
+       if (rte_atomic_load_explicit(&hw->reset.disable_cmd, 
rte_memory_order_relaxed))
                (void)hns3_cmd_init(hw);
 reset_fail:
        hw->reset.attempts = 0;
@@ -2661,7 +2661,7 @@ enum hns3_hw_err_report_type {
        int ret;
 
        if (hw->reset.stage == RESET_STAGE_NONE) {
-               __atomic_store_n(&hns->hw.reset.resetting, 1, __ATOMIC_RELAXED);
+               rte_atomic_store_explicit(&hns->hw.reset.resetting, 1, 
rte_memory_order_relaxed);
                hw->reset.stage = RESET_STAGE_DOWN;
                hns3_report_reset_begin(hw);
                ret = hw->reset.ops->stop_service(hns);
@@ -2750,7 +2750,7 @@ enum hns3_hw_err_report_type {
                hns3_notify_reset_ready(hw, false);
                hns3_clear_reset_level(hw, &hw->reset.pending);
                hns3_clear_reset_status(hw);
-               __atomic_store_n(&hns->hw.reset.resetting, 0, __ATOMIC_RELAXED);
+               rte_atomic_store_explicit(&hns->hw.reset.resetting, 0, 
rte_memory_order_relaxed);
                hw->reset.attempts = 0;
                hw->reset.stats.success_cnt++;
                hw->reset.stage = RESET_STAGE_NONE;
@@ -2812,7 +2812,7 @@ enum hns3_hw_err_report_type {
                hw->reset.mbuf_deferred_free = false;
        }
        rte_spinlock_unlock(&hw->lock);
-       __atomic_store_n(&hns->hw.reset.resetting, 0, __ATOMIC_RELAXED);
+       rte_atomic_store_explicit(&hns->hw.reset.resetting, 0, 
rte_memory_order_relaxed);
        hw->reset.stage = RESET_STAGE_NONE;
        hns3_clock_gettime(&tv);
        timersub(&tv, &hw->reset.start_time, &tv_delta);
diff --git a/drivers/net/hns3/hns3_intr.h b/drivers/net/hns3/hns3_intr.h
index aca1c07..1edb07d 100644
--- a/drivers/net/hns3/hns3_intr.h
+++ b/drivers/net/hns3/hns3_intr.h
@@ -171,8 +171,8 @@ struct hns3_hw_error_desc {
 };
 
 int hns3_enable_hw_error_intr(struct hns3_adapter *hns, bool en);
-void hns3_handle_msix_error(struct hns3_adapter *hns, uint64_t *levels);
-void hns3_handle_ras_error(struct hns3_adapter *hns, uint64_t *levels);
+void hns3_handle_msix_error(struct hns3_adapter *hns, RTE_ATOMIC(uint64_t) 
*levels);
+void hns3_handle_ras_error(struct hns3_adapter *hns, RTE_ATOMIC(uint64_t) 
*levels);
 void hns3_config_mac_tnl_int(struct hns3_hw *hw, bool en);
 void hns3_handle_error(struct hns3_adapter *hns);
 
diff --git a/drivers/net/hns3/hns3_mbx.c b/drivers/net/hns3/hns3_mbx.c
index 9cdbc16..10c6e3b 100644
--- a/drivers/net/hns3/hns3_mbx.c
+++ b/drivers/net/hns3/hns3_mbx.c
@@ -65,7 +65,7 @@
 
        mbx_time_limit = (uint32_t)hns->mbx_time_limit_ms * US_PER_MS;
        while (wait_time < mbx_time_limit) {
-               if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED)) {
+               if (rte_atomic_load_explicit(&hw->reset.disable_cmd, 
rte_memory_order_relaxed)) {
                        hns3_err(hw, "Don't wait for mbx response because of "
                                 "disable_cmd");
                        return -EBUSY;
@@ -382,7 +382,7 @@
        rte_spinlock_lock(&hw->cmq.crq.lock);
 
        while (!hns3_cmd_crq_empty(hw)) {
-               if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED)) {
+               if (rte_atomic_load_explicit(&hw->reset.disable_cmd, 
rte_memory_order_relaxed)) {
                        rte_spinlock_unlock(&hw->cmq.crq.lock);
                        return;
                }
@@ -457,7 +457,7 @@
        }
 
        while (!hns3_cmd_crq_empty(hw)) {
-               if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED)) {
+               if (rte_atomic_load_explicit(&hw->reset.disable_cmd, 
rte_memory_order_relaxed)) {
                        rte_spinlock_unlock(&hw->cmq.crq.lock);
                        return;
                }
diff --git a/drivers/net/hns3/hns3_mp.c b/drivers/net/hns3/hns3_mp.c
index 556f194..ba8f8ec 100644
--- a/drivers/net/hns3/hns3_mp.c
+++ b/drivers/net/hns3/hns3_mp.c
@@ -151,7 +151,7 @@
        int i;
 
        if (rte_eal_process_type() == RTE_PROC_SECONDARY ||
-               __atomic_load_n(&hw->secondary_cnt, __ATOMIC_RELAXED) == 0)
+               rte_atomic_load_explicit(&hw->secondary_cnt, 
rte_memory_order_relaxed) == 0)
                return;
 
        if (!mp_req_type_is_valid(type)) {
@@ -277,7 +277,7 @@ void hns3_mp_req_stop_rxtx(struct rte_eth_dev *dev)
                                     ret);
                        return ret;
                }
-               __atomic_fetch_add(&hw->secondary_cnt, 1, __ATOMIC_RELAXED);
+               rte_atomic_fetch_add_explicit(&hw->secondary_cnt, 1, 
rte_memory_order_relaxed);
        } else {
                ret = hns3_mp_init_primary();
                if (ret) {
@@ -297,7 +297,7 @@ void hns3_mp_uninit(struct rte_eth_dev *dev)
        struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
        if (rte_eal_process_type() != RTE_PROC_PRIMARY)
-               __atomic_fetch_sub(&hw->secondary_cnt, 1, __ATOMIC_RELAXED);
+               rte_atomic_fetch_sub_explicit(&hw->secondary_cnt, 1, 
rte_memory_order_relaxed);
 
        process_data.eth_dev_cnt--;
        if (process_data.eth_dev_cnt == 0) {
diff --git a/drivers/net/hns3/hns3_rxtx.c b/drivers/net/hns3/hns3_rxtx.c
index 7e636a0..73a388b 100644
--- a/drivers/net/hns3/hns3_rxtx.c
+++ b/drivers/net/hns3/hns3_rxtx.c
@@ -4464,7 +4464,7 @@
        struct hns3_adapter *hns = eth_dev->data->dev_private;
 
        if (hns->hw.adapter_state == HNS3_NIC_STARTED &&
-           __atomic_load_n(&hns->hw.reset.resetting, __ATOMIC_RELAXED) == 0) {
+           rte_atomic_load_explicit(&hns->hw.reset.resetting, 
rte_memory_order_relaxed) == 0) {
                eth_dev->rx_pkt_burst = hns3_get_rx_function(eth_dev);
                eth_dev->rx_descriptor_status = hns3_dev_rx_descriptor_status;
                eth_dev->tx_pkt_burst = hw->set_link_down ?
@@ -4530,7 +4530,7 @@
 
        rte_spinlock_lock(&hw->lock);
 
-       if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
+       if (rte_atomic_load_explicit(&hw->reset.resetting, 
rte_memory_order_relaxed)) {
                hns3_err(hw, "fail to start Rx queue during resetting.");
                rte_spinlock_unlock(&hw->lock);
                return -EIO;
@@ -4586,7 +4586,7 @@
 
        rte_spinlock_lock(&hw->lock);
 
-       if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
+       if (rte_atomic_load_explicit(&hw->reset.resetting, 
rte_memory_order_relaxed)) {
                hns3_err(hw, "fail to stop Rx queue during resetting.");
                rte_spinlock_unlock(&hw->lock);
                return -EIO;
@@ -4615,7 +4615,7 @@
 
        rte_spinlock_lock(&hw->lock);
 
-       if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
+       if (rte_atomic_load_explicit(&hw->reset.resetting, 
rte_memory_order_relaxed)) {
                hns3_err(hw, "fail to start Tx queue during resetting.");
                rte_spinlock_unlock(&hw->lock);
                return -EIO;
@@ -4648,7 +4648,7 @@
 
        rte_spinlock_lock(&hw->lock);
 
-       if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
+       if (rte_atomic_load_explicit(&hw->reset.resetting, 
rte_memory_order_relaxed)) {
                hns3_err(hw, "fail to stop Tx queue during resetting.");
                rte_spinlock_unlock(&hw->lock);
                return -EIO;
diff --git a/drivers/net/hns3/hns3_tm.c b/drivers/net/hns3/hns3_tm.c
index d969164..92a6685 100644
--- a/drivers/net/hns3/hns3_tm.c
+++ b/drivers/net/hns3/hns3_tm.c
@@ -1051,7 +1051,7 @@
        if (error == NULL)
                return -EINVAL;
 
-       if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
+       if (rte_atomic_load_explicit(&hw->reset.resetting, 
rte_memory_order_relaxed)) {
                error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
                error->message = "device is resetting";
                /* don't goto fail_clear, user may try later */
@@ -1141,7 +1141,7 @@
        if (error == NULL)
                return -EINVAL;
 
-       if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
+       if (rte_atomic_load_explicit(&hw->reset.resetting, 
rte_memory_order_relaxed)) {
                error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
                error->message = "device is resetting";
                return -EBUSY;
-- 
1.8.3.1


Reply via email to