+ global_mem = odp_shm_addr(global_shm);
+
+ per_thread_mem->global_mem = global_mem;
+
+ return per_thread_mem;
+}
+
+/* Free per-thread memory */
+static void thread_finalize(per_thread_mem_t *per_thread_mem)
+{
+ free(per_thread_mem);
+}
+
+/* Custom barrier used to validate ODP barrier */
+static void custom_barrier_init(custom_barrier_t *custom_barrier,
+ uint32_t num_threads)
+{
+ odp_atomic_store_u32(&custom_barrier->wait_cnt, num_threads);
+}
+
+static void custom_barrier_wait(custom_barrier_t *custom_barrier)
+{
+ volatile_u64_t counter = 1;
+ uint32_t delay_cnt, wait_cnt;
+
+ odp_atomic_sub_u32(&custom_barrier->wait_cnt, 1);
+
+ wait_cnt = 1;
+ while (wait_cnt != 0) {
+ for (delay_cnt = 1; delay_cnt <= 10; delay_cnt++)
+ counter++;
+
+ wait_cnt = odp_atomic_load_u32(&custom_barrier->wait_cnt);
+ }
+}
+
+static uint32_t barrier_test(per_thread_mem_t *per_thread_mem,
+ uint8_t no_barrier_test)
+{
+ global_shared_mem_t *global_mem;
+ uint32_t barrier_errs, iterations, cnt, i_am_slow_thread;
+ uint32_t thread_num, slow_thread_num, next_slow_thread, num_threads;
+ uint32_t lock_owner_delay, barrier_cnt1, barrier_cnt2;
+
+ thread_num = odp_thread_core() + 1;
+ global_mem = per_thread_mem->global_mem;
+ num_threads = global_mem->g_num_threads;
+ iterations = BARRIER_ITERATIONS;
+
+ barrier_errs = 0;
+ lock_owner_delay = SLOW_BARRIER_DELAY;
+
+ for (cnt = 1; cnt < iterations; cnt++) {
+ /* Wait here until all of the threads reach this point */
+ custom_barrier_wait(&global_mem->custom_barrier1);
+
+ barrier_cnt1 = global_mem->barrier_cnt1;
+ barrier_cnt2 = global_mem->barrier_cnt2;
+
+ if ((barrier_cnt1 != cnt) || (barrier_cnt2 != cnt)) {
+ printf("thread_num=%u got bad barrier_cnts of \
+ %u %u cnt=%u\n",
+ thread_num, barrier_cnt1, barrier_cnt2, cnt);
+ barrier_errs++;
+ }
+
+ /* Wait here until all of the threads reach this point */
+ custom_barrier_wait(&global_mem->custom_barrier2);
+
+ slow_thread_num = global_mem->slow_thread_num;
+ i_am_slow_thread = thread_num == slow_thread_num;
+ next_slow_thread = slow_thread_num + 1;
+ if (num_threads < next_slow_thread)
+ next_slow_thread = 1;
+
+ /*
+ * Now run the test, which involves having all but one thread
+ * immediately calling odp_barrier_wait(), and one thread wait a
+ * moderate amount of time and then calling odp_barrier_wait().
+ * The test fails if any of the first group of threads
+ * has not waited for the "slow" thread. The "slow" thread is
+ * responsible for re-initializing the barrier for next trial.
+ */
+ if (i_am_slow_thread) {
+ thread_delay(per_thread_mem, lock_owner_delay);
+ lock_owner_delay += BASE_DELAY;
+ if ((global_mem->barrier_cnt1 != cnt) ||
+ (global_mem->barrier_cnt2 != cnt) ||
+ (global_mem->slow_thread_num
+ != slow_thread_num))
+ barrier_errs++;
+ }
+
+ if (no_barrier_test == 0)
+ odp_barrier_wait(&global_mem->test_barriers[cnt]);
+
+ global_mem->barrier_cnt1 = cnt + 1;
+ odp_sync_stores();
+
+ if (i_am_slow_thread) {
+ custom_barrier_init(&global_mem->custom_barrier1,
+ num_threads);
+ custom_barrier_init(&global_mem->custom_barrier2,
+ num_threads);
+ global_mem->slow_thread_num = next_slow_thread;
+ global_mem->barrier_cnt2 = cnt + 1;
+ odp_sync_stores();
+ } else {
+ while (global_mem->barrier_cnt2 != (cnt + 1))
+ thread_delay(per_thread_mem, BASE_DELAY);
+ }
+ }
+
+ if ((global_mem->g_verbose) && (barrier_errs != 0))
+ printf("\nThread %u (id=%d core=%d) had %u barrier_errs in \
+ %u iterations\n", thread_num,
+ per_thread_mem->thread_id,
+ per_thread_mem->thread_core, barrier_errs, iterations);
+
+ return barrier_errs;
+}
+
+static void *no_barrier_functional_test(void *arg UNUSED)
+{
+ per_thread_mem_t *per_thread_mem;
+ uint32_t barrier_errs;
+
+ per_thread_mem = thread_init();
+ barrier_errs = barrier_test(per_thread_mem, 1);
+
+ /*
+ * Note that the following CU_ASSERT MAY appear incorrect, but for the
+ * no_barrier test it should see barrier_errs or else there is something
+ * wrong with the test methodology or the ODP thread implementation.
+ * So this test PASSES only if it sees barrier_errs!
+ */
+ CU_ASSERT(barrier_errs != 0);
+ thread_finalize(per_thread_mem);
+
+ return NULL;
+}
+
+static void *barrier_functional_test(void *arg UNUSED)
+{
+ per_thread_mem_t *per_thread_mem;
+ uint32_t barrier_errs;
+
+ per_thread_mem = thread_init();
+ barrier_errs = barrier_test(per_thread_mem, 0);
+
+ CU_ASSERT(barrier_errs == 0);
+ thread_finalize(per_thread_mem);
+
+ return NULL;
+}
+
+static void spinlock_api_test(odp_spinlock_t *spinlock)
+{
+ odp_spinlock_init(spinlock);
+ CU_ASSERT(odp_spinlock_is_locked(spinlock) == 0);
+
+ odp_spinlock_lock(spinlock);
+ CU_ASSERT(odp_spinlock_is_locked(spinlock) == 1);
+
+ odp_spinlock_unlock(spinlock);
+ CU_ASSERT(odp_spinlock_is_locked(spinlock) == 0);
+
+ CU_ASSERT(odp_spinlock_trylock(spinlock) == 1);
+
+ CU_ASSERT(odp_spinlock_is_locked(spinlock) == 1);
+
+ odp_spinlock_unlock(spinlock);
+ CU_ASSERT(odp_spinlock_is_locked(spinlock) == 0);
+}
+
+static void *spinlock_api_tests(void *arg UNUSED)
+{
+ global_shared_mem_t *global_mem;
+ per_thread_mem_t *per_thread_mem;
+ odp_spinlock_t local_spin_lock;
+
+ per_thread_mem = thread_init();
+ global_mem = per_thread_mem->global_mem;
+
+ odp_barrier_wait(&global_mem->global_barrier);
+
+ spinlock_api_test(&local_spin_lock);
+ spinlock_api_test(&per_thread_mem->per_thread_spinlock);
+
+ thread_finalize(per_thread_mem);
+
+ return NULL;
+}
+
+static void ticketlock_api_test(odp_ticketlock_t *ticketlock)
+{
+ odp_ticketlock_init(ticketlock);
+ CU_ASSERT(odp_ticketlock_is_locked(ticketlock) == 0);
+
+ odp_ticketlock_lock(ticketlock);
+ CU_ASSERT(odp_ticketlock_is_locked(ticketlock) == 1);
+
+ odp_ticketlock_unlock(ticketlock);
+ CU_ASSERT(odp_ticketlock_is_locked(ticketlock) == 0);
+
+ CU_ASSERT(odp_ticketlock_trylock(ticketlock) == 1);
+ CU_ASSERT(odp_ticketlock_trylock(ticketlock) == 0);
+ CU_ASSERT(odp_ticketlock_is_locked(ticketlock) == 1);
+
+ odp_ticketlock_unlock(ticketlock);
+ CU_ASSERT(odp_ticketlock_is_locked(ticketlock) == 0);
+}
+
+static void *ticketlock_api_tests(void *arg UNUSED)
+{
+ global_shared_mem_t *global_mem;
+ per_thread_mem_t *per_thread_mem;
+ odp_ticketlock_t local_ticket_lock;
+
+ per_thread_mem = thread_init();
+ global_mem = per_thread_mem->global_mem;
+
+ odp_barrier_wait(&global_mem->global_barrier);
+
+ ticketlock_api_test(&local_ticket_lock);
+ ticketlock_api_test(&per_thread_mem->per_thread_ticketlock);
+
+ return NULL;
+}
+
+static void rwlock_api_test(odp_rwlock_t *rw_lock)
+{
+ odp_rwlock_init(rw_lock);
+ /* CU_ASSERT(odp_rwlock_is_locked(rw_lock) == 0); */
+
+ odp_rwlock_read_lock(rw_lock);
+ odp_rwlock_read_unlock(rw_lock);
+
+ odp_rwlock_write_lock(rw_lock);
+ /* CU_ASSERT(odp_rwlock_is_locked(rw_lock) == 1); */
+
+ odp_rwlock_write_unlock(rw_lock);
+ /* CU_ASSERT(odp_rwlock_is_locked(rw_lock) == 0); */
+}
+
+static void *rwlock_api_tests(void *arg UNUSED)
+{
+ global_shared_mem_t *global_mem;
+ per_thread_mem_t *per_thread_mem;
+ odp_rwlock_t local_rwlock;
+
+ per_thread_mem = thread_init();
+ global_mem = per_thread_mem->global_mem;
+
+ odp_barrier_wait(&global_mem->global_barrier);
+
+ rwlock_api_test(&local_rwlock);
+ rwlock_api_test(&per_thread_mem->per_thread_rwlock);
+
+ thread_finalize(per_thread_mem);
+
+ return NULL;
+}
+
+static void *no_lock_functional_test(void *arg UNUSED)
+{
+ global_shared_mem_t *global_mem;
+ per_thread_mem_t *per_thread_mem;
+ uint32_t thread_num, resync_cnt, resync_idx, iterations, cnt;
+ uint32_t sync_failures, current_errs, lock_owner_delay;
+
+ thread_num = odp_thread_core() + 1;
+ per_thread_mem = thread_init();
+ global_mem = per_thread_mem->global_mem;
+ iterations = global_mem->g_iterations;
+
+ odp_barrier_wait(&global_mem->global_barrier);
+
+ sync_failures = 0;
+ current_errs = 0;
+ resync_idx = 0;
+ resync_cnt = iterations / NUM_RESYNC_BARRIERS;
+ lock_owner_delay = BASE_DELAY;
+
+ for (cnt = 1; cnt <= iterations; cnt++) {
+ global_mem->global_lock_owner = thread_num;
+ odp_sync_stores();
+ thread_delay(per_thread_mem, lock_owner_delay);
+
+ if (global_mem->global_lock_owner != thread_num) {
+ current_errs++;
+ sync_failures++;
+ }
+
+ global_mem->global_lock_owner = 0;
+ odp_sync_stores();
+ thread_delay(per_thread_mem, MIN_DELAY);
+
+ if (global_mem->global_lock_owner == thread_num) {
+ current_errs++;
+ sync_failures++;
+ }
+
+ if (current_errs == 0)
+ lock_owner_delay++;
+
+ /* Wait a small amount of time and rerun the test */
+ thread_delay(per_thread_mem, BASE_DELAY);
+
+ /* Try to resync all of the threads to increase contention */
+ if ((resync_idx < NUM_RESYNC_BARRIERS) &&
+ ((cnt % resync_cnt) == (resync_cnt - 1)))
+
odp_barrier_wait(&global_mem->barrier_array[resync_idx++]);
+ }
+
+ if (global_mem->g_verbose)
+ printf("\nThread %u (id=%d core=%d) had %u sync_failures in \
+ %u iterations\n", thread_num,
+ per_thread_mem->thread_id,
+ per_thread_mem->thread_core,
+ sync_failures, iterations);
+
+ /* Note that the following CU_ASSERT MAY appear incorrect, but for the
+ * no_lock test it should see sync_failures or else there is something
+ * wrong with the test methodology or the ODP thread implementation.
+ * So this test PASSES only if it sees sync_failures
+ */
+ CU_ASSERT(sync_failures != 0);
+
+ thread_finalize(per_thread_mem);
+
+ return NULL;
+}
+
+static void *spinlock_functional_test(void *arg UNUSED)
+{
+ global_shared_mem_t *global_mem;
+ per_thread_mem_t *per_thread_mem;
+ uint32_t thread_num, resync_cnt, resync_idx, iterations, cnt;
+ uint32_t sync_failures, is_locked_errs, current_errs;
+ uint32_t lock_owner_delay;
+
+ thread_num = odp_thread_core() + 1;
+ per_thread_mem = thread_init();
+ global_mem = per_thread_mem->global_mem;
+ iterations = global_mem->g_iterations;
+
+ odp_barrier_wait(&global_mem->global_barrier);
+
+ sync_failures = 0;
+ is_locked_errs = 0;
+ current_errs = 0;
+ resync_idx = 0;
+ resync_cnt = iterations / NUM_RESYNC_BARRIERS;
+ lock_owner_delay = BASE_DELAY;
+
+ for (cnt = 1; cnt <= iterations; cnt++) {
+ /* Acquire the shared global lock */
+ odp_spinlock_lock(&global_mem->global_spinlock);
+
+ /* Make sure we have the lock AND didn't previously own it */
+ if (odp_spinlock_is_locked(&global_mem->global_spinlock) != 1)
+ is_locked_errs++;
+
+ if (global_mem->global_lock_owner != 0) {
+ current_errs++;
+ sync_failures++;
+ }
+
+ /* Now set the global_lock_owner to be us, wait a while, and
+ * then we see if anyone else has snuck in and changed the
+ * global_lock_owner to be themselves
+ */
+ global_mem->global_lock_owner = thread_num;
+ odp_sync_stores();
+ thread_delay(per_thread_mem, lock_owner_delay);
+ if (global_mem->global_lock_owner != thread_num) {
+ current_errs++;
+ sync_failures++;
+ }
+
+ /* Release shared lock, and make sure we no longer have it */
+ global_mem->global_lock_owner = 0;
+ odp_sync_stores();
+ odp_spinlock_unlock(&global_mem->global_spinlock);
+ if (global_mem->global_lock_owner == thread_num) {
+ current_errs++;
+ sync_failures++;
+ }
+
+ if (current_errs == 0)
+ lock_owner_delay++;
+
+ /* Wait a small amount of time and rerun the test */
+ thread_delay(per_thread_mem, BASE_DELAY);
+
+ /* Try to resync all of the threads to increase contention */
+ if ((resync_idx < NUM_RESYNC_BARRIERS) &&
+ ((cnt % resync_cnt) == (resync_cnt - 1)))
+
odp_barrier_wait(&global_mem->barrier_array[resync_idx++]);
+ }
+
+ if ((global_mem->g_verbose) &&
+ ((sync_failures != 0) || (is_locked_errs != 0)))
+ printf("\nThread %u (id=%d core=%d) had %u sync_failures and \
+ %u is_locked_errs in %u iterations\n", thread_num,
+ per_thread_mem->thread_id, per_thread_mem->thread_core,
+ sync_failures, is_locked_errs, iterations);
+
+ CU_ASSERT(sync_failures == 0);
+ CU_ASSERT(is_locked_errs == 0);
+
+ thread_finalize(per_thread_mem);
+
+ return NULL;
+}
+
+static void *ticketlock_functional_test(void *arg UNUSED)
+{
+ global_shared_mem_t *global_mem;
+ per_thread_mem_t *per_thread_mem;
+ uint32_t thread_num, resync_cnt, resync_idx, iterations, cnt;
+ uint32_t sync_failures, is_locked_errs, current_errs;
+ uint32_t lock_owner_delay;
+
+ thread_num = odp_thread_core() + 1;
+ per_thread_mem = thread_init();
+ global_mem = per_thread_mem->global_mem;
+ iterations = global_mem->g_iterations;
+
+ /* Wait here until all of the threads have also reached this point */
+ odp_barrier_wait(&global_mem->global_barrier);
+
+ sync_failures = 0;
+ is_locked_errs = 0;
+ current_errs = 0;
+ resync_idx = 0;
+ resync_cnt = iterations / NUM_RESYNC_BARRIERS;
+ lock_owner_delay = BASE_DELAY;
+
+ for (cnt = 1; cnt <= iterations; cnt++) {
+ /* Acquire the shared global lock */
+ odp_ticketlock_lock(&global_mem->global_ticketlock);
+
+ /* Make sure we have the lock AND didn't previously own it */
+ if (odp_ticketlock_is_locked(&global_mem->global_ticketlock)
+ != 1)
+ is_locked_errs++;
+
+ if (global_mem->global_lock_owner != 0) {
+ current_errs++;
+ sync_failures++;
+ }
+
+ /* Now set the global_lock_owner to be us, wait a while, and
+ * then we see if anyone else has snuck in and changed the
+ * global_lock_owner to be themselves
+ */
+ global_mem->global_lock_owner = thread_num;
+ odp_sync_stores();
+ thread_delay(per_thread_mem, lock_owner_delay);
+ if (global_mem->global_lock_owner != thread_num) {
+ current_errs++;
+ sync_failures++;
+ }
+
+ /* Release shared lock, and make sure we no longer have it */
+ global_mem->global_lock_owner = 0;
+ odp_sync_stores();
+ odp_ticketlock_unlock(&global_mem->global_ticketlock);
+ if (global_mem->global_lock_owner == thread_num) {
+ current_errs++;
+ sync_failures++;
+ }
+
+ if (current_errs == 0)
+ lock_owner_delay++;
+
+ /* Wait a small amount of time and then rerun the test */
+ thread_delay(per_thread_mem, BASE_DELAY);
+
+ /* Try to resync all of the threads to increase contention */
+ if ((resync_idx < NUM_RESYNC_BARRIERS) &&
+ ((cnt % resync_cnt) == (resync_cnt - 1)))
+
odp_barrier_wait(&global_mem->barrier_array[resync_idx++]);
+ }
+
+ if ((global_mem->g_verbose) &&
+ ((sync_failures != 0) || (is_locked_errs != 0)))
+ printf("\nThread %u (id=%d core=%d) had %u sync_failures and \
+ %u is_locked_errs in %u iterations\n", thread_num,
+ per_thread_mem->thread_id, per_thread_mem->thread_core,
+ sync_failures, is_locked_errs, iterations);
+
+ CU_ASSERT(sync_failures == 0);
+ CU_ASSERT(is_locked_errs == 0);
+
+ thread_finalize(per_thread_mem);
+
+ return NULL;
+}
+
+static void *rwlock_functional_test(void *arg UNUSED)
+{
+ global_shared_mem_t *global_mem;
+ per_thread_mem_t *per_thread_mem;
+ uint32_t thread_num, resync_cnt, resync_idx, iterations, cnt;
+ uint32_t sync_failures, current_errs, lock_owner_delay;
+
+ thread_num = odp_thread_core() + 1;
+ per_thread_mem = thread_init();
+ global_mem = per_thread_mem->global_mem;
+ iterations = global_mem->g_iterations;
+
+ /* Wait here until all of the threads have also reached this point */
+ odp_barrier_wait(&global_mem->global_barrier);
+
+ sync_failures = 0;
+ current_errs = 0;
+ resync_idx = 0;
+ resync_cnt = iterations / NUM_RESYNC_BARRIERS;
+ lock_owner_delay = BASE_DELAY;
+
+ for (cnt = 1; cnt <= iterations; cnt++) {
+ /* Acquire the shared global lock */
+ odp_rwlock_write_lock(&global_mem->global_rwlock);
+
+ /* Make sure we have lock now AND didn't previously own it */
+ if (global_mem->global_lock_owner != 0) {
+ current_errs++;
+ sync_failures++;
+ }
+
+ /* Now set the global_lock_owner to be us, wait a while, and
+ * then we see if anyone else has snuck in and changed the
+ * global_lock_owner to be themselves
+ */
+ global_mem->global_lock_owner = thread_num;
+ odp_sync_stores();
+ thread_delay(per_thread_mem, lock_owner_delay);
+ if (global_mem->global_lock_owner != thread_num) {
+ current_errs++;
+ sync_failures++;
+ }
+
+ /* Release shared lock, and make sure we no longer have it */
+ global_mem->global_lock_owner = 0;
+ odp_sync_stores();
+ odp_rwlock_write_unlock(&global_mem->global_rwlock);
+ if (global_mem->global_lock_owner == thread_num) {
+ current_errs++;
+ sync_failures++;
+ }
+
+ if (current_errs == 0)
+ lock_owner_delay++;
+
+ /* Wait a small amount of time and then rerun the test */
+ thread_delay(per_thread_mem, BASE_DELAY);
+
+ /* Try to resync all of the threads to increase contention */
+ if ((resync_idx < NUM_RESYNC_BARRIERS) &&
+ ((cnt % resync_cnt) == (resync_cnt - 1)))
+
odp_barrier_wait(&global_mem->barrier_array[resync_idx++]);
+ }
+
+ if ((global_mem->g_verbose) && (sync_failures != 0))
+ printf("\nThread %u (id=%d core=%d) had %u sync_failures in \
+ %u iterations\n", thread_num,
+ per_thread_mem->thread_id,
+ per_thread_mem->thread_core,
+ sync_failures, iterations);
+
+ CU_ASSERT(sync_failures == 0);
+
+ thread_finalize(per_thread_mem);
+
+ return NULL;
+}
+
+static void barrier_test_init(void)
+{
+ uint32_t num_threads, idx;
+
+ num_threads = global_mem->g_num_threads;
+
+ for (idx = 0; idx < NUM_TEST_BARRIERS; idx++)
+ odp_barrier_init(&global_mem->test_barriers[idx], num_threads);
+
+ custom_barrier_init(&global_mem->custom_barrier1, num_threads);
+ custom_barrier_init(&global_mem->custom_barrier2, num_threads);
+
+ global_mem->slow_thread_num = 1;
+ global_mem->barrier_cnt1 = 1;
+ global_mem->barrier_cnt2 = 1;
+}
+
+static void test_atomic_inc_u32(void)
+{
+ int i;
+
+ for (i = 0; i < CNT; i++)
+ odp_atomic_inc_u32(&a32u);
+}
+
+static void test_atomic_inc_64(void)
+{
+ int i;
+
+ for (i = 0; i < CNT; i++)
+ odp_atomic_inc_u64(&a64u);
+}
+
+static void test_atomic_dec_u32(void)
+{
+ int i;
+
+ for (i = 0; i < CNT; i++)
+ odp_atomic_dec_u32(&a32u);
+}
+
+static void test_atomic_dec_64(void)
+{
+ int i;
+
+ for (i = 0; i < CNT; i++)
+ odp_atomic_dec_u64(&a64u);
+}
+
+static void test_atomic_fetch_inc_u32(void)
+{
+ int i;
+
+ for (i = 0; i < CNT; i++)
+ odp_atomic_fetch_inc_u32(&a32u);
+}
+
+static void test_atomic_fetch_inc_64(void)
+{
+ int i;
+
+ for (i = 0; i < CNT; i++)
+ odp_atomic_fetch_inc_u64(&a64u);
+}
+
+static void test_atomic_fetch_dec_u32(void)
+{
+ int i;
+
+ for (i = 0; i < CNT; i++)
+ odp_atomic_fetch_dec_u32(&a32u);
+}
+
+static void test_atomic_fetch_dec_64(void)
+{
+ int i;
+
+ for (i = 0; i < CNT; i++)
+ odp_atomic_fetch_dec_u64(&a64u);
+}
+
+static void test_atomic_add_u32(void)
+{
+ int i;
+
+ for (i = 0; i < CNT; i++)
+ odp_atomic_add_u32(&a32u, ADD_SUB_CNT);
+}
+
+static void test_atomic_add_64(void)
+{
+ int i;
+
+ for (i = 0; i < CNT; i++)
+ odp_atomic_add_u64(&a64u, ADD_SUB_CNT);
+}
+
+static void test_atomic_sub_u32(void)
+{
+ int i;
+
+ for (i = 0; i < CNT; i++)
+ odp_atomic_sub_u32(&a32u, ADD_SUB_CNT);
+}
+
+static void test_atomic_sub_64(void)
+{
+ int i;
+
+ for (i = 0; i < CNT; i++)
+ odp_atomic_sub_u64(&a64u, ADD_SUB_CNT);
+}
+
+static void test_atomic_fetch_add_u32(void)
+{
+ int i;
+
+ for (i = 0; i < CNT; i++)
+ odp_atomic_fetch_add_u32(&a32u, ADD_SUB_CNT);
+}
+
+static void test_atomic_fetch_add_64(void)
+{
+ int i;
+
+ for (i = 0; i < CNT; i++)
+ odp_atomic_fetch_add_u64(&a64u, ADD_SUB_CNT);
+}
+
+static void test_atomic_fetch_sub_u32(void)
+{
+ int i;
+
+ for (i = 0; i < CNT; i++)
+ odp_atomic_fetch_sub_u32(&a32u, ADD_SUB_CNT);
+}
+
+static void test_atomic_fetch_sub_64(void)
+{
+ int i;
+
+ for (i = 0; i < CNT; i++)
+ odp_atomic_fetch_sub_u64(&a64u, ADD_SUB_CNT);
+}
+
+static void test_atomic_inc_dec_u32(void)
+{
+ test_atomic_inc_u32();
+ test_atomic_dec_u32();
+}
+
+static void test_atomic_inc_dec_64(void)
+{
+ test_atomic_inc_64();
+ test_atomic_dec_64();
+}
+
+static void test_atomic_fetch_inc_dec_u32(void)
+{
+ test_atomic_fetch_inc_u32();
+ test_atomic_fetch_dec_u32();
+}
+
+static void test_atomic_fetch_inc_dec_64(void)
+{
+ test_atomic_fetch_inc_64();
+ test_atomic_fetch_dec_64();
+}
+
+static void test_atomic_add_sub_u32(void)
+{
+ test_atomic_add_u32();
+ test_atomic_sub_u32();
+}
+
+
+static void test_atomic_add_sub_64(void)
+{
+ test_atomic_add_64();
+ test_atomic_sub_64();
+}
+
+static void test_atomic_fetch_add_sub_u32(void)
+{
+ test_atomic_fetch_add_u32();
+ test_atomic_fetch_sub_u32();
+}
+
+static void test_atomic_fetch_add_sub_64(void)
+{
+ test_atomic_fetch_add_64();
+ test_atomic_fetch_sub_64();
+}
+
+static void test_atomic_init(void)
+{
+ odp_atomic_init_u32(&a32u, 0);
+ odp_atomic_init_u64(&a64u, 0);
+}
+
+static void test_atomic_store(void)
+{
+ odp_atomic_store_u32(&a32u, U32_INIT_VAL);
+ odp_atomic_store_u64(&a64u, U64_INIT_VAL);
+}
+
+static void test_atomic_validate(void)
+{
+ CU_ASSERT(U32_INIT_VAL == odp_atomic_load_u32(&a32u));
+ CU_ASSERT(U64_INIT_VAL == odp_atomic_load_u64(&a64u));
+}
+
+/* Barrier tests */
+static void test_no_barrier_functional(void)
+{
+ pthrd_arg arg;
+ arg.numthrds = global_mem->g_num_threads;
+
+ barrier_test_init();
+ odp_cunit_thread_create(no_barrier_functional_test, &arg);
+ odp_cunit_thread_exit(&arg);
+}
+
+static void test_barrier_functionality(void)
+{
+ pthrd_arg arg;
+ arg.numthrds = global_mem->g_num_threads;
+
+ barrier_test_init();
+ odp_cunit_thread_create(barrier_functional_test, &arg);
+ odp_cunit_thread_exit(&arg);
+}
+
+static CU_TestInfo test_odp_barrier[] = {
+ {"no_barrier_functional", test_no_barrier_functional},
+ {"odp_barrier_functionality", test_barrier_functionality},
+ CU_TEST_INFO_NULL
+};
+
+/* Thread-unsafe tests */
+static void test_no_lock_functionality(void)
+{
+ pthrd_arg arg;
+ arg.numthrds = global_mem->g_num_threads;
+
+ odp_cunit_thread_create(no_lock_functional_test, &arg);
+ odp_cunit_thread_exit(&arg);
+}
+
+static CU_TestInfo test_no_locking[] = {
+ {"test_no_lock_functionality", test_no_lock_functionality},
+ CU_TEST_INFO_NULL
+};
+
+/* Spin lock tests */
+static void test_spinlock_api(void)
+{
+ pthrd_arg arg;
+ arg.numthrds = global_mem->g_num_threads;
+
+ odp_cunit_thread_create(spinlock_api_tests, &arg);
+ odp_cunit_thread_exit(&arg);
+}
+
+static void test_spinlock_functionality(void)
+{
+ pthrd_arg arg;
+ arg.numthrds = global_mem->g_num_threads;
+
+ odp_spinlock_init(&global_mem->global_spinlock);
+ odp_cunit_thread_create(spinlock_functional_test, &arg);
+ odp_cunit_thread_exit(&arg);
+}
+
+static CU_TestInfo test_odp_spinlock[] = {
+ {"odp_spinlock_api", test_spinlock_api},
+ {"odp_spinlock_functionality", test_spinlock_functionality},
+ CU_TEST_INFO_NULL
+};
+
+/* Ticket lock tests */
+static void test_ticketlock_api(void)
+{
+ pthrd_arg arg;
+ arg.numthrds = global_mem->g_num_threads;
+
+ odp_cunit_thread_create(ticketlock_api_tests, &arg);
+ odp_cunit_thread_exit(&arg);
+}
+
+static void test_ticketlock_functionality(void)
+{
+ pthrd_arg arg;
+ arg.numthrds = global_mem->g_num_threads;
+ odp_ticketlock_init(&global_mem->global_ticketlock);
+
+ odp_cunit_thread_create(ticketlock_functional_test, &arg);
+ odp_cunit_thread_exit(&arg);
+}
+
+static CU_TestInfo test_odp_ticketlock[] = {
+ {"odp_ticketlock_api", test_ticketlock_api},
+ {"odp_ticketlock_functionality", test_ticketlock_functionality},
+ CU_TEST_INFO_NULL
+};
+
+/* RW lock tests */
+static void test_rwlock_api(void)
+{
+ pthrd_arg arg;
+ arg.numthrds = global_mem->g_num_threads;
+
+ odp_cunit_thread_create(rwlock_api_tests, &arg);
+ odp_cunit_thread_exit(&arg);
+}
+
+static void test_rwlock_functionality(void)
+{
+ pthrd_arg arg;
+ arg.numthrds = global_mem->g_num_threads;
+
+ odp_rwlock_init(&global_mem->global_rwlock);
+ odp_cunit_thread_create(rwlock_functional_test, &arg);
+ odp_cunit_thread_exit(&arg);
+}
+
+static CU_TestInfo test_odp_rwlock[] = {
+ {"odp_rwlock_api", test_rwlock_api},
+ {"odp_rwlock_functionality", test_rwlock_functionality},
+ CU_TEST_INFO_NULL
+};
+
+
+static int init_locks(void)
+{
+ uint32_t num_threads, idx;
+
+ num_threads = global_mem->g_num_threads;
+ odp_barrier_init(&global_mem->global_barrier, num_threads);
+ for (idx = 0; idx < NUM_RESYNC_BARRIERS; idx++)
+ odp_barrier_init(&global_mem->barrier_array[idx], num_threads);
+
+ return 0;
+}
+
+int tests_global_init(void)
+{
+ uint32_t core_count, max_threads;
+ int ret = 0;
+
+ global_shm = odp_shm_reserve(GLOBAL_SHM_NAME,
+ sizeof(global_shared_mem_t), 64,
+ ODP_SHM_SW_ONLY | ODP_SHM_PROC);
+ global_mem = odp_shm_addr(global_shm);
+ memset(global_mem, 0, sizeof(global_shared_mem_t));
+
+ global_mem->g_num_threads = MAX_WORKERS / 2;
+ global_mem->g_iterations = MAX_ITERATIONS;
+ global_mem->g_verbose = VERBOSE;
+
+ core_count = odp_sys_core_count();
+
+ max_threads = (core_count >= MAX_WORKERS) ? MAX_WORKERS : core_count;
+
+ if (max_threads < global_mem->g_num_threads) {
+ printf("Requested num of threads is too large\n");
+ printf("reducing from %u to %u\n", global_mem->g_num_threads,
+ max_threads);
+ global_mem->g_num_threads = max_threads;
+ }
+
+ printf("Num of threads used = %u\n", global_mem->g_num_threads);
+
+ return ret;
+}
+
+/* Atomic tests */
+static void test_atomic_basic(void)
+{
+ test_atomic_init();
+ test_atomic_store();
+ test_atomic_validate();
+}
+
+static void test_atomic_inc_dec(void)
+{
+ test_atomic_store();
+ test_atomic_inc_dec_u32();
+ test_atomic_inc_dec_64();
+ test_atomic_validate();
+}
+
+static void test_atomic_add_sub(void)
+{
+ test_atomic_store();
+ test_atomic_add_sub_u32();
+ test_atomic_add_sub_64();
+ test_atomic_validate();
+}
+
+static void test_atomic_fetch_inc_dec(void)
+{
+ test_atomic_store();
+ test_atomic_fetch_inc_dec_u32();
+ test_atomic_fetch_inc_dec_64();
+ test_atomic_validate();
+}
+
+static void test_atomic_fetch_add_sub(void)
+{
+ test_atomic_store();
+ test_atomic_fetch_add_sub_u32();
+ test_atomic_fetch_add_sub_64();
+ test_atomic_validate();
+}
+
+CU_TestInfo test_odp_atomic[] = {
+ {"test_odp_atomic_basic", test_atomic_basic},
+ {"test_odp_atomic_inc_dec", test_atomic_inc_dec},
+ {"test_odp_atomic_add_sub", test_atomic_add_sub},
+ {"test_odp_atomic_fetch_inc_dec", test_atomic_fetch_inc_dec},
+ {"test_odp_atomic_fetch_add_sub", test_atomic_fetch_add_sub},
+ CU_TEST_INFO_NULL,
+};
+
+CU_SuiteInfo odp_testsuites[] = {
+ {"odp_barrier", NULL, NULL, NULL, NULL, test_odp_barrier},
+ {"odp_nolocking", init_locks, NULL, NULL, NULL, test_no_locking},
+ {"odp_spinlock", init_locks, NULL, NULL, NULL, test_odp_spinlock},
+ {"odp_ticketlock", init_locks, NULL, NULL, NULL, test_odp_ticketlock},
+ {"odp_rwlock", init_locks, NULL, NULL, NULL, test_odp_rwlock},
+ {"odp_atomic", NULL, NULL, NULL, NULL, test_odp_atomic},
+ CU_SUITE_INFO_NULL
+};