Removed odp_sync_stores() implementation and replaced usage with
odp_mb_full(), to minimize functional changes. Added minimal
validation test function for the new memory barriers.

Signed-off-by: Petri Savolainen <[email protected]>
---
 platform/linux-generic/include/odp/sync.h     |  5 ---
 platform/linux-generic/odp_queue.c            |  4 +--
 test/performance/odp_l2fwd.c                  |  8 ++---
 test/validation/synchronizers/synchronizers.c | 51 +++++++++++++++++++--------
 test/validation/synchronizers/synchronizers.h |  1 +
 5 files changed, 44 insertions(+), 25 deletions(-)

diff --git a/platform/linux-generic/include/odp/sync.h 
b/platform/linux-generic/include/odp/sync.h
index bfe67ee..b2995e1 100644
--- a/platform/linux-generic/include/odp/sync.h
+++ b/platform/linux-generic/include/odp/sync.h
@@ -36,11 +36,6 @@ static inline void odp_mb_full(void)
        __atomic_thread_fence(__ATOMIC_SEQ_CST);
 }
 
-static inline void odp_sync_stores(void)
-{
-       __atomic_thread_fence(__ATOMIC_RELEASE);
-}
-
 /**
  * @}
  */
diff --git a/platform/linux-generic/odp_queue.c 
b/platform/linux-generic/odp_queue.c
index 8b3671d..a2fd4ec 100644
--- a/platform/linux-generic/odp_queue.c
+++ b/platform/linux-generic/odp_queue.c
@@ -357,9 +357,9 @@ int odp_queue_context_set(odp_queue_t handle, void *context)
 {
        queue_entry_t *queue;
        queue = queue_to_qentry(handle);
-       odp_sync_stores();
+       odp_mb_full();
        queue->s.param.context = context;
-       odp_sync_stores();
+       odp_mb_full();
        return 0;
 }
 
diff --git a/test/performance/odp_l2fwd.c b/test/performance/odp_l2fwd.c
index 67a23ed..c2dfeec 100644
--- a/test/performance/odp_l2fwd.c
+++ b/test/performance/odp_l2fwd.c
@@ -237,8 +237,8 @@ static void *pktio_queue_thread(void *arg)
                stats->s.packets += pkts;
        }
 
-       /* Make sure that the last stats write is visible to readers */
-       odp_sync_stores();
+       /* Make sure that latest stat writes are visible to other threads */
+       odp_mb_full();
 
        return NULL;
 }
@@ -365,8 +365,8 @@ static void *pktio_direct_recv_thread(void *arg)
 
        }
 
-       /* Make sure that the last stats write is visible to readers */
-       odp_sync_stores();
+       /* Make sure that latest stat writes are visible to other threads */
+       odp_mb_full();
 
        return NULL;
 }
diff --git a/test/validation/synchronizers/synchronizers.c 
b/test/validation/synchronizers/synchronizers.c
index cebe0d2..0302069 100644
--- a/test/validation/synchronizers/synchronizers.c
+++ b/test/validation/synchronizers/synchronizers.c
@@ -36,6 +36,8 @@
 static odp_atomic_u32_t a32u;
 static odp_atomic_u64_t a64u;
 
+static volatile int temp_result;
+
 typedef __volatile uint32_t volatile_u32_t;
 typedef __volatile uint64_t volatile_u64_t;
 
@@ -224,12 +226,12 @@ static uint32_t barrier_test(per_thread_mem_t 
*per_thread_mem,
                        odp_barrier_wait(&global_mem->test_barriers[cnt]);
 
                global_mem->barrier_cnt1 = cnt + 1;
-               odp_sync_stores();
+               odp_mb_full();
 
                if (i_am_slow_thread) {
                        global_mem->slow_thread_num = next_slow_thread;
                        global_mem->barrier_cnt2 = cnt + 1;
-                       odp_sync_stores();
+                       odp_mb_full();
                } else {
                        while (global_mem->barrier_cnt2 != (cnt + 1))
                                thread_delay(per_thread_mem, BASE_DELAY);
@@ -501,7 +503,7 @@ static void *no_lock_functional_test(void *arg UNUSED)
 
        for (cnt = 1; cnt <= iterations; cnt++) {
                global_mem->global_lock_owner = thread_num;
-               odp_sync_stores();
+               odp_mb_full();
                thread_delay(per_thread_mem, lock_owner_delay);
 
                if (global_mem->global_lock_owner != thread_num) {
@@ -510,7 +512,7 @@ static void *no_lock_functional_test(void *arg UNUSED)
                }
 
                global_mem->global_lock_owner = 0;
-               odp_sync_stores();
+               odp_mb_full();
                thread_delay(per_thread_mem, MIN_DELAY);
 
                if (global_mem->global_lock_owner == thread_num) {
@@ -591,7 +593,7 @@ static void *spinlock_functional_test(void *arg UNUSED)
                * global_lock_owner to be themselves
                */
                global_mem->global_lock_owner = thread_num;
-               odp_sync_stores();
+               odp_mb_full();
                thread_delay(per_thread_mem, lock_owner_delay);
                if (global_mem->global_lock_owner != thread_num) {
                        current_errs++;
@@ -600,7 +602,7 @@ static void *spinlock_functional_test(void *arg UNUSED)
 
                /* Release shared lock, and make sure we no longer have it */
                global_mem->global_lock_owner = 0;
-               odp_sync_stores();
+               odp_mb_full();
                odp_spinlock_unlock(&global_mem->global_spinlock);
                if (global_mem->global_lock_owner == thread_num) {
                        current_errs++;
@@ -679,7 +681,7 @@ static void *spinlock_recursive_functional_test(void *arg 
UNUSED)
                * global_lock_owner to be themselves
                */
                global_mem->global_lock_owner = thread_num;
-               odp_sync_stores();
+               odp_mb_full();
                thread_delay(per_thread_mem, lock_owner_delay);
                if (global_mem->global_lock_owner != thread_num) {
                        current_errs++;
@@ -705,7 +707,7 @@ static void *spinlock_recursive_functional_test(void *arg 
UNUSED)
 
                /* Release shared lock, and make sure we no longer have it */
                global_mem->global_lock_owner = 0;
-               odp_sync_stores();
+               odp_mb_full();
                odp_spinlock_recursive_unlock(
                        &global_mem->global_recursive_spinlock);
                if (global_mem->global_lock_owner == thread_num) {
@@ -787,7 +789,7 @@ static void *ticketlock_functional_test(void *arg UNUSED)
                * global_lock_owner to be themselves
                */
                global_mem->global_lock_owner = thread_num;
-               odp_sync_stores();
+               odp_mb_full();
                thread_delay(per_thread_mem, lock_owner_delay);
                if (global_mem->global_lock_owner != thread_num) {
                        current_errs++;
@@ -796,7 +798,7 @@ static void *ticketlock_functional_test(void *arg UNUSED)
 
                /* Release shared lock, and make sure we no longer have it */
                global_mem->global_lock_owner = 0;
-               odp_sync_stores();
+               odp_mb_full();
                odp_ticketlock_unlock(&global_mem->global_ticketlock);
                if (global_mem->global_lock_owner == thread_num) {
                        current_errs++;
@@ -881,7 +883,7 @@ static void *rwlock_functional_test(void *arg UNUSED)
                * global_lock_owner to be themselves
                */
                global_mem->global_lock_owner = thread_num;
-               odp_sync_stores();
+               odp_mb_full();
                thread_delay(per_thread_mem, lock_owner_delay);
                if (global_mem->global_lock_owner != thread_num) {
                        current_errs++;
@@ -890,7 +892,7 @@ static void *rwlock_functional_test(void *arg UNUSED)
 
                /* Release shared lock, and make sure we no longer have it */
                global_mem->global_lock_owner = 0;
-               odp_sync_stores();
+               odp_mb_full();
                odp_rwlock_write_unlock(&global_mem->global_rwlock);
                if (global_mem->global_lock_owner == thread_num) {
                        current_errs++;
@@ -989,7 +991,7 @@ static void *rwlock_recursive_functional_test(void *arg 
UNUSED)
                * global_lock_owner to be themselves
                */
                global_mem->global_lock_owner = thread_num;
-               odp_sync_stores();
+               odp_mb_full();
                thread_delay(per_thread_mem, lock_owner_delay);
                if (global_mem->global_lock_owner != thread_num) {
                        current_errs++;
@@ -1016,7 +1018,7 @@ static void *rwlock_recursive_functional_test(void *arg 
UNUSED)
 
                /* Release shared lock, and make sure we no longer have it */
                global_mem->global_lock_owner = 0;
-               odp_sync_stores();
+               odp_mb_full();
                odp_rwlock_recursive_write_unlock(
                        &global_mem->global_recursive_rwlock);
                if (global_mem->global_lock_owner == thread_num) {
@@ -1267,6 +1269,26 @@ static void test_atomic_validate(void)
 }
 
 /* Barrier tests */
+void synchronizers_test_memory_barrier(void)
+{
+       volatile int a = 0;
+       volatile int b = 0;
+       volatile int c = 0;
+       volatile int d = 0;
+
+       /* Call all memory barriers to verify that those are implemented */
+       a = 1;
+       odp_mb_release();
+       b = 1;
+       odp_mb_acquire();
+       c = 1;
+       odp_mb_full();
+       d = 1;
+
+       /* Avoid "variable set but not used" warning */
+       temp_result = a + b + c + d;
+}
+
 void synchronizers_test_no_barrier_functional(void)
 {
        pthrd_arg arg;
@@ -1288,6 +1310,7 @@ void synchronizers_test_barrier_functional(void)
 }
 
 odp_testinfo_t synchronizers_suite_barrier[] = {
+       ODP_TEST_INFO(synchronizers_test_memory_barrier),
        ODP_TEST_INFO(synchronizers_test_no_barrier_functional),
        ODP_TEST_INFO(synchronizers_test_barrier_functional),
        ODP_TEST_INFO_NULL
diff --git a/test/validation/synchronizers/synchronizers.h 
b/test/validation/synchronizers/synchronizers.h
index 9725996..ad8db0b 100644
--- a/test/validation/synchronizers/synchronizers.h
+++ b/test/validation/synchronizers/synchronizers.h
@@ -10,6 +10,7 @@
 #include <odp_cunit_common.h>
 
 /* test functions: */
+void synchronizers_test_memory_barrier(void);
 void synchronizers_test_no_barrier_functional(void);
 void synchronizers_test_barrier_functional(void);
 void synchronizers_test_no_lock_functional(void);
-- 
2.6.3

_______________________________________________
lng-odp mailing list
[email protected]
https://lists.linaro.org/mailman/listinfo/lng-odp

Reply via email to