On 23 March 2015 at 20:39, Bill Fischofer <[email protected]> wrote:

> Not sure why this is needed, but:
>
> Reviewed-by: Bill Fischofer <[email protected]>
>


Bill is this the same as saying

Whatevered-by: Bill Fischofer <[email protected]>

Cheers,
Anders


>
> On Mon, Mar 23, 2015 at 8:04 AM, Maxim Uvarov <[email protected]>
> wrote:
>
>> global_mem is global static variable usage same name variable
>> inside function is confusing. Rename local vars from global_mem
>> to mem.
>>
>> Signed-off-by: Maxim Uvarov <[email protected]>
>> ---
>>  test/validation/odp_synchronizers.c | 158
>> ++++++++++++++++++------------------
>>  1 file changed, 79 insertions(+), 79 deletions(-)
>>
>> diff --git a/test/validation/odp_synchronizers.c
>> b/test/validation/odp_synchronizers.c
>> index ab9164f..b8f4e6a 100644
>> --- a/test/validation/odp_synchronizers.c
>> +++ b/test/validation/odp_synchronizers.c
>> @@ -107,7 +107,7 @@ static void thread_delay(per_thread_mem_t
>> *per_thread_mem, uint32_t iterations)
>>  /* Initialise per-thread memory */
>>  static per_thread_mem_t *thread_init(void)
>>  {
>> -       global_shared_mem_t *global_mem;
>> +       global_shared_mem_t *mem;
>>         per_thread_mem_t *per_thread_mem;
>>         odp_shm_t global_shm;
>>         uint32_t per_thread_mem_len;
>> @@ -122,10 +122,10 @@ static per_thread_mem_t *thread_init(void)
>>         per_thread_mem->thread_core = odp_cpu_id();
>>
>>         global_shm = odp_shm_lookup(GLOBAL_SHM_NAME);
>> -       global_mem = odp_shm_addr(global_shm);
>> +       mem = odp_shm_addr(global_shm);
>>         CU_ASSERT(global_mem != NULL);
>>
>> -       per_thread_mem->global_mem = global_mem;
>> +       per_thread_mem->global_mem = mem;
>>
>>         return per_thread_mem;
>>  }
>> @@ -160,13 +160,13 @@ static void custom_barrier_wait(custom_barrier_t
>> *custom_barrier)
>>  static uint32_t barrier_test(per_thread_mem_t *per_thread_mem,
>>                              odp_bool_t no_barrier_test)
>>  {
>> -       global_shared_mem_t *global_mem;
>> +       global_shared_mem_t *mem;
>>         uint32_t barrier_errs, iterations, cnt, i_am_slow_thread;
>>         uint32_t thread_num, slow_thread_num, next_slow_thread,
>> num_threads;
>>         uint32_t lock_owner_delay, barrier_cnt1, barrier_cnt2;
>>
>>         thread_num = odp_cpu_id() + 1;
>> -       global_mem = per_thread_mem->global_mem;
>> +       mem = per_thread_mem->global_mem;
>>         num_threads = global_mem->g_num_threads;
>>         iterations = BARRIER_ITERATIONS;
>>
>> @@ -175,10 +175,10 @@ static uint32_t barrier_test(per_thread_mem_t
>> *per_thread_mem,
>>
>>         for (cnt = 1; cnt < iterations; cnt++) {
>>                 /* Wait here until all of the threads reach this point */
>> -               custom_barrier_wait(&global_mem->custom_barrier1[cnt]);
>> +               custom_barrier_wait(&mem->custom_barrier1[cnt]);
>>
>> -               barrier_cnt1 = global_mem->barrier_cnt1;
>> -               barrier_cnt2 = global_mem->barrier_cnt2;
>> +               barrier_cnt1 = mem->barrier_cnt1;
>> +               barrier_cnt2 = mem->barrier_cnt2;
>>
>>                 if ((barrier_cnt1 != cnt) || (barrier_cnt2 != cnt)) {
>>                         printf("thread_num=%u barrier_cnts of %u %u
>> cnt=%u\n",
>> @@ -187,9 +187,9 @@ static uint32_t barrier_test(per_thread_mem_t
>> *per_thread_mem,
>>                 }
>>
>>                 /* Wait here until all of the threads reach this point */
>> -               custom_barrier_wait(&global_mem->custom_barrier2[cnt]);
>> +               custom_barrier_wait(&mem->custom_barrier2[cnt]);
>>
>> -               slow_thread_num = global_mem->slow_thread_num;
>> +               slow_thread_num = mem->slow_thread_num;
>>                 i_am_slow_thread = thread_num == slow_thread_num;
>>                 next_slow_thread = slow_thread_num + 1;
>>                 if (num_threads < next_slow_thread)
>> @@ -206,30 +206,30 @@ static uint32_t barrier_test(per_thread_mem_t
>> *per_thread_mem,
>>                 if (i_am_slow_thread) {
>>                         thread_delay(per_thread_mem, lock_owner_delay);
>>                         lock_owner_delay += BASE_DELAY;
>> -                       if ((global_mem->barrier_cnt1 != cnt) ||
>> -                           (global_mem->barrier_cnt2 != cnt) ||
>> -                           (global_mem->slow_thread_num
>> +                       if ((mem->barrier_cnt1 != cnt) ||
>> +                           (mem->barrier_cnt2 != cnt) ||
>> +                           (mem->slow_thread_num
>>                                         != slow_thread_num))
>>                                 barrier_errs++;
>>                 }
>>
>>                 if (no_barrier_test == 0)
>> -                       odp_barrier_wait(&global_mem->test_barriers[cnt]);
>> +                       odp_barrier_wait(&mem->test_barriers[cnt]);
>>
>> -               global_mem->barrier_cnt1 = cnt + 1;
>> +               mem->barrier_cnt1 = cnt + 1;
>>                 odp_sync_stores();
>>
>>                 if (i_am_slow_thread) {
>> -                       global_mem->slow_thread_num = next_slow_thread;
>> -                       global_mem->barrier_cnt2 = cnt + 1;
>> +                       mem->slow_thread_num = next_slow_thread;
>> +                       mem->barrier_cnt2 = cnt + 1;
>>                         odp_sync_stores();
>>                 } else {
>> -                       while (global_mem->barrier_cnt2 != (cnt + 1))
>> +                       while (mem->barrier_cnt2 != (cnt + 1))
>>                                 thread_delay(per_thread_mem, BASE_DELAY);
>>                 }
>>         }
>>
>> -       if ((global_mem->g_verbose) && (barrier_errs != 0))
>> +       if ((mem->g_verbose) && (barrier_errs != 0))
>>                 printf("\nThread %u (id=%d core=%d) had %u barrier_errs"
>>                        " in %u iterations\n", thread_num,
>>                         per_thread_mem->thread_id,
>> @@ -293,14 +293,14 @@ static void spinlock_api_test(odp_spinlock_t
>> *spinlock)
>>
>>  static void *spinlock_api_tests(void *arg UNUSED)
>>  {
>> -       global_shared_mem_t *global_mem;
>> +       global_shared_mem_t *mem;
>>         per_thread_mem_t *per_thread_mem;
>>         odp_spinlock_t local_spin_lock;
>>
>>         per_thread_mem = thread_init();
>> -       global_mem = per_thread_mem->global_mem;
>> +       mem = per_thread_mem->global_mem;
>>
>> -       odp_barrier_wait(&global_mem->global_barrier);
>> +       odp_barrier_wait(&mem->global_barrier);
>>
>>         spinlock_api_test(&local_spin_lock);
>>         spinlock_api_test(&per_thread_mem->per_thread_spinlock);
>> @@ -331,14 +331,14 @@ static void ticketlock_api_test(odp_ticketlock_t
>> *ticketlock)
>>
>>  static void *ticketlock_api_tests(void *arg UNUSED)
>>  {
>> -       global_shared_mem_t *global_mem;
>> +       global_shared_mem_t *mem;
>>         per_thread_mem_t *per_thread_mem;
>>         odp_ticketlock_t local_ticket_lock;
>>
>>         per_thread_mem = thread_init();
>> -       global_mem = per_thread_mem->global_mem;
>> +       mem = per_thread_mem->global_mem;
>>
>> -       odp_barrier_wait(&global_mem->global_barrier);
>> +       odp_barrier_wait(&mem->global_barrier);
>>
>>         ticketlock_api_test(&local_ticket_lock);
>>         ticketlock_api_test(&per_thread_mem->per_thread_ticketlock);
>> @@ -365,14 +365,14 @@ static void rwlock_api_test(odp_rwlock_t *rw_lock)
>>
>>  static void *rwlock_api_tests(void *arg UNUSED)
>>  {
>> -       global_shared_mem_t *global_mem;
>> +       global_shared_mem_t *mem;
>>         per_thread_mem_t *per_thread_mem;
>>         odp_rwlock_t local_rwlock;
>>
>>         per_thread_mem = thread_init();
>> -       global_mem = per_thread_mem->global_mem;
>> +       mem = per_thread_mem->global_mem;
>>
>> -       odp_barrier_wait(&global_mem->global_barrier);
>> +       odp_barrier_wait(&mem->global_barrier);
>>
>>         rwlock_api_test(&local_rwlock);
>>         rwlock_api_test(&per_thread_mem->per_thread_rwlock);
>> @@ -384,17 +384,17 @@ static void *rwlock_api_tests(void *arg UNUSED)
>>
>>  static void *no_lock_functional_test(void *arg UNUSED)
>>  {
>> -       global_shared_mem_t *global_mem;
>> +       global_shared_mem_t *mem;
>>         per_thread_mem_t *per_thread_mem;
>>         uint32_t thread_num, resync_cnt, rs_idx, iterations, cnt;
>>         uint32_t sync_failures, current_errs, lock_owner_delay;
>>
>>         thread_num = odp_cpu_id() + 1;
>>         per_thread_mem = thread_init();
>> -       global_mem = per_thread_mem->global_mem;
>> -       iterations = global_mem->g_iterations;
>> +       mem = per_thread_mem->global_mem;
>> +       iterations = mem->g_iterations;
>>
>> -       odp_barrier_wait(&global_mem->global_barrier);
>> +       odp_barrier_wait(&mem->global_barrier);
>>
>>         sync_failures = 0;
>>         current_errs = 0;
>> @@ -403,20 +403,20 @@ static void *no_lock_functional_test(void *arg
>> UNUSED)
>>         lock_owner_delay = BASE_DELAY;
>>
>>         for (cnt = 1; cnt <= iterations; cnt++) {
>> -               global_mem->global_lock_owner = thread_num;
>> +               mem->global_lock_owner = thread_num;
>>                 odp_sync_stores();
>>                 thread_delay(per_thread_mem, lock_owner_delay);
>>
>> -               if (global_mem->global_lock_owner != thread_num) {
>> +               if (mem->global_lock_owner != thread_num) {
>>                         current_errs++;
>>                         sync_failures++;
>>                 }
>>
>> -               global_mem->global_lock_owner = 0;
>> +               mem->global_lock_owner = 0;
>>                 odp_sync_stores();
>>                 thread_delay(per_thread_mem, MIN_DELAY);
>>
>> -               if (global_mem->global_lock_owner == thread_num) {
>> +               if (mem->global_lock_owner == thread_num) {
>>                         current_errs++;
>>                         sync_failures++;
>>                 }
>> @@ -430,10 +430,10 @@ static void *no_lock_functional_test(void *arg
>> UNUSED)
>>                 /* Try to resync all of the threads to increase
>> contention */
>>                 if ((rs_idx < NUM_RESYNC_BARRIERS) &&
>>                     ((cnt % resync_cnt) == (resync_cnt - 1)))
>> -
>>  odp_barrier_wait(&global_mem->barrier_array[rs_idx++]);
>> +                       odp_barrier_wait(&mem->barrier_array[rs_idx++]);
>>         }
>>
>> -       if (global_mem->g_verbose)
>> +       if (mem->g_verbose)
>>                 printf("\nThread %u (id=%d core=%d) had %u sync_failures"
>>                        " in %u iterations\n", thread_num,
>>                        per_thread_mem->thread_id,
>> @@ -454,7 +454,7 @@ static void *no_lock_functional_test(void *arg UNUSED)
>>
>>  static void *spinlock_functional_test(void *arg UNUSED)
>>  {
>> -       global_shared_mem_t *global_mem;
>> +       global_shared_mem_t *mem;
>>         per_thread_mem_t *per_thread_mem;
>>         uint32_t thread_num, resync_cnt, rs_idx, iterations, cnt;
>>         uint32_t sync_failures, is_locked_errs, current_errs;
>> @@ -462,10 +462,10 @@ static void *spinlock_functional_test(void *arg
>> UNUSED)
>>
>>         thread_num = odp_cpu_id() + 1;
>>         per_thread_mem = thread_init();
>> -       global_mem = per_thread_mem->global_mem;
>> -       iterations = global_mem->g_iterations;
>> +       mem = per_thread_mem->global_mem;
>> +       iterations = mem->g_iterations;
>>
>> -       odp_barrier_wait(&global_mem->global_barrier);
>> +       odp_barrier_wait(&mem->global_barrier);
>>
>>         sync_failures = 0;
>>         is_locked_errs = 0;
>> @@ -476,13 +476,13 @@ static void *spinlock_functional_test(void *arg
>> UNUSED)
>>
>>         for (cnt = 1; cnt <= iterations; cnt++) {
>>                 /* Acquire the shared global lock */
>> -               odp_spinlock_lock(&global_mem->global_spinlock);
>> +               odp_spinlock_lock(&mem->global_spinlock);
>>
>>                 /* Make sure we have the lock AND didn't previously own
>> it */
>> -               if (odp_spinlock_is_locked(&global_mem->global_spinlock)
>> != 1)
>> +               if (odp_spinlock_is_locked(&mem->global_spinlock) != 1)
>>                         is_locked_errs++;
>>
>> -               if (global_mem->global_lock_owner != 0) {
>> +               if (mem->global_lock_owner != 0) {
>>                         current_errs++;
>>                         sync_failures++;
>>                 }
>> @@ -491,19 +491,19 @@ static void *spinlock_functional_test(void *arg
>> UNUSED)
>>                 * then we see if anyone else has snuck in and changed the
>>                 * global_lock_owner to be themselves
>>                 */
>> -               global_mem->global_lock_owner = thread_num;
>> +               mem->global_lock_owner = thread_num;
>>                 odp_sync_stores();
>>                 thread_delay(per_thread_mem, lock_owner_delay);
>> -               if (global_mem->global_lock_owner != thread_num) {
>> +               if (mem->global_lock_owner != thread_num) {
>>                         current_errs++;
>>                         sync_failures++;
>>                 }
>>
>>                 /* Release shared lock, and make sure we no longer have
>> it */
>> -               global_mem->global_lock_owner = 0;
>> +               mem->global_lock_owner = 0;
>>                 odp_sync_stores();
>> -               odp_spinlock_unlock(&global_mem->global_spinlock);
>> -               if (global_mem->global_lock_owner == thread_num) {
>> +               odp_spinlock_unlock(&mem->global_spinlock);
>> +               if (mem->global_lock_owner == thread_num) {
>>                         current_errs++;
>>                         sync_failures++;
>>                 }
>> @@ -517,10 +517,10 @@ static void *spinlock_functional_test(void *arg
>> UNUSED)
>>                 /* Try to resync all of the threads to increase
>> contention */
>>                 if ((rs_idx < NUM_RESYNC_BARRIERS) &&
>>                     ((cnt % resync_cnt) == (resync_cnt - 1)))
>> -
>>  odp_barrier_wait(&global_mem->barrier_array[rs_idx++]);
>> +                       odp_barrier_wait(&mem->barrier_array[rs_idx++]);
>>         }
>>
>> -       if ((global_mem->g_verbose) &&
>> +       if ((mem->g_verbose) &&
>>             ((sync_failures != 0) || (is_locked_errs != 0)))
>>                 printf("\nThread %u (id=%d core=%d) had %u sync_failures"
>>                        " and %u is_locked_errs in %u iterations\n",
>> thread_num,
>> @@ -537,7 +537,7 @@ static void *spinlock_functional_test(void *arg
>> UNUSED)
>>
>>  static void *ticketlock_functional_test(void *arg UNUSED)
>>  {
>> -       global_shared_mem_t *global_mem;
>> +       global_shared_mem_t *mem;
>>         per_thread_mem_t *per_thread_mem;
>>         uint32_t thread_num, resync_cnt, rs_idx, iterations, cnt;
>>         uint32_t sync_failures, is_locked_errs, current_errs;
>> @@ -545,11 +545,11 @@ static void *ticketlock_functional_test(void *arg
>> UNUSED)
>>
>>         thread_num = odp_cpu_id() + 1;
>>         per_thread_mem = thread_init();
>> -       global_mem = per_thread_mem->global_mem;
>> -       iterations = global_mem->g_iterations;
>> +       mem = per_thread_mem->global_mem;
>> +       iterations = mem->g_iterations;
>>
>>         /* Wait here until all of the threads have also reached this
>> point */
>> -       odp_barrier_wait(&global_mem->global_barrier);
>> +       odp_barrier_wait(&mem->global_barrier);
>>
>>         sync_failures = 0;
>>         is_locked_errs = 0;
>> @@ -560,14 +560,14 @@ static void *ticketlock_functional_test(void *arg
>> UNUSED)
>>
>>         for (cnt = 1; cnt <= iterations; cnt++) {
>>                 /* Acquire the shared global lock */
>> -               odp_ticketlock_lock(&global_mem->global_ticketlock);
>> +               odp_ticketlock_lock(&mem->global_ticketlock);
>>
>>                 /* Make sure we have the lock AND didn't previously own
>> it */
>> -               if
>> (odp_ticketlock_is_locked(&global_mem->global_ticketlock)
>> +               if (odp_ticketlock_is_locked(&mem->global_ticketlock)
>>                                 != 1)
>>                         is_locked_errs++;
>>
>> -               if (global_mem->global_lock_owner != 0) {
>> +               if (mem->global_lock_owner != 0) {
>>                         current_errs++;
>>                         sync_failures++;
>>                 }
>> @@ -576,19 +576,19 @@ static void *ticketlock_functional_test(void *arg
>> UNUSED)
>>                 * then we see if anyone else has snuck in and changed the
>>                 * global_lock_owner to be themselves
>>                 */
>> -               global_mem->global_lock_owner = thread_num;
>> +               mem->global_lock_owner = thread_num;
>>                 odp_sync_stores();
>>                 thread_delay(per_thread_mem, lock_owner_delay);
>> -               if (global_mem->global_lock_owner != thread_num) {
>> +               if (mem->global_lock_owner != thread_num) {
>>                         current_errs++;
>>                         sync_failures++;
>>                 }
>>
>>                 /* Release shared lock, and make sure we no longer have
>> it */
>> -               global_mem->global_lock_owner = 0;
>> +               mem->global_lock_owner = 0;
>>                 odp_sync_stores();
>> -               odp_ticketlock_unlock(&global_mem->global_ticketlock);
>> -               if (global_mem->global_lock_owner == thread_num) {
>> +               odp_ticketlock_unlock(&mem->global_ticketlock);
>> +               if (mem->global_lock_owner == thread_num) {
>>                         current_errs++;
>>                         sync_failures++;
>>                 }
>> @@ -602,10 +602,10 @@ static void *ticketlock_functional_test(void *arg
>> UNUSED)
>>                 /* Try to resync all of the threads to increase
>> contention */
>>                 if ((rs_idx < NUM_RESYNC_BARRIERS) &&
>>                     ((cnt % resync_cnt) == (resync_cnt - 1)))
>> -
>>  odp_barrier_wait(&global_mem->barrier_array[rs_idx++]);
>> +                       odp_barrier_wait(&mem->barrier_array[rs_idx++]);
>>         }
>>
>> -       if ((global_mem->g_verbose) &&
>> +       if ((mem->g_verbose) &&
>>             ((sync_failures != 0) || (is_locked_errs != 0)))
>>                 printf("\nThread %u (id=%d core=%d) had %u sync_failures"
>>                        " and %u is_locked_errs in %u iterations\n",
>> thread_num,
>> @@ -622,18 +622,18 @@ static void *ticketlock_functional_test(void *arg
>> UNUSED)
>>
>>  static void *rwlock_functional_test(void *arg UNUSED)
>>  {
>> -       global_shared_mem_t *global_mem;
>> +       global_shared_mem_t *mem;
>>         per_thread_mem_t *per_thread_mem;
>>         uint32_t thread_num, resync_cnt, rs_idx, iterations, cnt;
>>         uint32_t sync_failures, current_errs, lock_owner_delay;
>>
>>         thread_num = odp_cpu_id() + 1;
>>         per_thread_mem = thread_init();
>> -       global_mem = per_thread_mem->global_mem;
>> -       iterations = global_mem->g_iterations;
>> +       mem = per_thread_mem->global_mem;
>> +       iterations = mem->g_iterations;
>>
>>         /* Wait here until all of the threads have also reached this
>> point */
>> -       odp_barrier_wait(&global_mem->global_barrier);
>> +       odp_barrier_wait(&mem->global_barrier);
>>
>>         sync_failures = 0;
>>         current_errs = 0;
>> @@ -643,10 +643,10 @@ static void *rwlock_functional_test(void *arg
>> UNUSED)
>>
>>         for (cnt = 1; cnt <= iterations; cnt++) {
>>                 /* Acquire the shared global lock */
>> -               odp_rwlock_write_lock(&global_mem->global_rwlock);
>> +               odp_rwlock_write_lock(&mem->global_rwlock);
>>
>>                 /* Make sure we have lock now AND didn't previously own
>> it */
>> -               if (global_mem->global_lock_owner != 0) {
>> +               if (mem->global_lock_owner != 0) {
>>                         current_errs++;
>>                         sync_failures++;
>>                 }
>> @@ -655,19 +655,19 @@ static void *rwlock_functional_test(void *arg
>> UNUSED)
>>                 * then we see if anyone else has snuck in and changed the
>>                 * global_lock_owner to be themselves
>>                 */
>> -               global_mem->global_lock_owner = thread_num;
>> +               mem->global_lock_owner = thread_num;
>>                 odp_sync_stores();
>>                 thread_delay(per_thread_mem, lock_owner_delay);
>> -               if (global_mem->global_lock_owner != thread_num) {
>> +               if (mem->global_lock_owner != thread_num) {
>>                         current_errs++;
>>                         sync_failures++;
>>                 }
>>
>>                 /* Release shared lock, and make sure we no longer have
>> it */
>> -               global_mem->global_lock_owner = 0;
>> +               mem->global_lock_owner = 0;
>>                 odp_sync_stores();
>> -               odp_rwlock_write_unlock(&global_mem->global_rwlock);
>> -               if (global_mem->global_lock_owner == thread_num) {
>> +               odp_rwlock_write_unlock(&mem->global_rwlock);
>> +               if (mem->global_lock_owner == thread_num) {
>>                         current_errs++;
>>                         sync_failures++;
>>                 }
>> @@ -681,10 +681,10 @@ static void *rwlock_functional_test(void *arg
>> UNUSED)
>>                 /* Try to resync all of the threads to increase
>> contention */
>>                 if ((rs_idx < NUM_RESYNC_BARRIERS) &&
>>                     ((cnt % resync_cnt) == (resync_cnt - 1)))
>> -
>>  odp_barrier_wait(&global_mem->barrier_array[rs_idx++]);
>> +                       odp_barrier_wait(&mem->barrier_array[rs_idx++]);
>>         }
>>
>> -       if ((global_mem->g_verbose) && (sync_failures != 0))
>> +       if ((mem->g_verbose) && (sync_failures != 0))
>>                 printf("\nThread %u (id=%d core=%d) had %u sync_failures"
>>                        " in %u iterations\n", thread_num,
>>                        per_thread_mem->thread_id,
>> --
>> 1.9.1
>>
>>
>> _______________________________________________
>> lng-odp mailing list
>> [email protected]
>> http://lists.linaro.org/mailman/listinfo/lng-odp
>>
>
>
> _______________________________________________
> lng-odp mailing list
> [email protected]
> http://lists.linaro.org/mailman/listinfo/lng-odp
>
>
_______________________________________________
lng-odp mailing list
[email protected]
http://lists.linaro.org/mailman/listinfo/lng-odp

Reply via email to