On 26/01/16 16:17, Ola Liljedahl wrote:


On 26 January 2016 at 14:32, Zoltan Kiss <[email protected]
<mailto:[email protected]>> wrote:

    Hi Ola,

    Would you mind taking a look at this?

    Zoli

    On 22/01/16 18:24, Zoltan Kiss wrote:

        As per-thread caches might retain some elements, no particular
        thread
        should assume that a certain amount of elements are available at any
        time.
        Also, we can't reliable check the high watermark anymore.

        Signed-off-by: Zoltan Kiss <[email protected]
        <mailto:[email protected]>>
        ---
           test/validation/timer/timer.c | 27 +++++++++++++++++----------
           1 file changed, 17 insertions(+), 10 deletions(-)

        diff --git a/test/validation/timer/timer.c
        b/test/validation/timer/timer.c
        index 5d89700..e9f2a90 100644
        --- a/test/validation/timer/timer.c
        +++ b/test/validation/timer/timer.c
        @@ -274,7 +274,7 @@ static void handle_tmo(odp_event_t ev, bool
        stale, uint64_t prev_tick)
           static void *worker_entrypoint(void *arg TEST_UNUSED)
           {
                 int thr = odp_thread_id();
        -       uint32_t i;
        +       uint32_t i, allocated;
                 unsigned seed = thr;
                 int rc;

        @@ -291,20 +291,28 @@ static void *worker_entrypoint(void *arg
        TEST_UNUSED)
                 /* Prepare all timers */
                 for (i = 0; i < NTIMERS; i++) {
                         tt[i].tim = odp_timer_alloc(tp, queue, &tt[i]);
        -               if (tt[i].tim == ODP_TIMER_INVALID)
        -                       CU_FAIL_FATAL("Failed to allocate timer");
        +               if (tt[i].tim == ODP_TIMER_INVALID) {
        +                       LOG_DBG("Failed to allocate timer
        (%d/%d)\n",
        +                               i, NTIMERS);
        +                       break;
        +               }
                         tt[i].ev =
        odp_timeout_to_event(odp_timeout_alloc(tbp));
        -               if (tt[i].ev == ODP_EVENT_INVALID)
        -                       CU_FAIL_FATAL("Failed to allocate timeout");
        +               if (tt[i].ev == ODP_EVENT_INVALID) {
        +                       LOG_DBG("Failed to allocate timeout
        (%d/%d)\n",
        +                               i, NTIMERS);
        +                       odp_timer_free(tt[i].tim);
        +                       break;
        +               }
                         tt[i].ev2 = tt[i].ev;
                         tt[i].tick = TICK_INVALID;
                 }
        +       allocated = i;

                 odp_barrier_wait(&test_barrier);

                 /* Initial set all timers with a random expiration time */
                 uint32_t nset = 0;
        -       for (i = 0; i < NTIMERS; i++) {
        +       for (i = 0; i < allocated; i++) {
                         uint64_t tck = odp_timer_current_tick(tp) + 1 +
                                        odp_timer_ns_to_tick(tp,

        (rand_r(&seed) % RANGE_MS)
        @@ -336,7 +344,7 @@ static void *worker_entrypoint(void *arg
        TEST_UNUSED)
                                 nrcv++;
                         }
                         prev_tick = odp_timer_current_tick(tp);
        -               i = rand_r(&seed) % NTIMERS;
        +               i = rand_r(&seed) % allocated;
                         if (tt[i].ev == ODP_EVENT_INVALID &&
                             (rand_r(&seed) % 2 == 0)) {
                                 /* Timer active, cancel it */
        @@ -384,7 +392,7 @@ static void *worker_entrypoint(void *arg
        TEST_UNUSED)

                 /* Cancel and free all timers */
                 uint32_t nstale = 0;
        -       for (i = 0; i < NTIMERS; i++) {
        +       for (i = 0; i < allocated; i++) {
                         (void)odp_timer_cancel(tt[i].tim, &tt[i].ev);
                         tt[i].tick = TICK_INVALID;
                         if (tt[i].ev == ODP_EVENT_INVALID)
        @@ -428,7 +436,7 @@ static void *worker_entrypoint(void *arg
        TEST_UNUSED)

                 rc = odp_queue_destroy(queue);
                 CU_ASSERT(rc == 0);
        -       for (i = 0; i < NTIMERS; i++) {
        +       for (i = 0; i < allocated; i++) {
                         if (tt[i].ev != ODP_EVENT_INVALID)
                                 odp_event_free(tt[i].ev);
                 }
        @@ -520,7 +528,6 @@ void timer_test_odp_timer_all(void)
                         CU_FAIL("odp_timer_pool_info");
                 CU_ASSERT(tpinfo.param.num_timers ==
        (unsigned)num_workers * NTIMERS);
                 CU_ASSERT(tpinfo.cur_timers == 0);
        -       CU_ASSERT(tpinfo.hwm_timers == (unsigned)num_workers *
        NTIMERS);

It is a bit sad that this check is removed. Now there will be now test
that 'hwm_timers' (high watermark) is correct.
Couldn't you accumulate all the allocated timers in the different
threads and test against that value?
Each thread could atomically add its 'allocated' variable to a shared
'ALLOCATED' variable which is used instead of "(unsigned)num_workers *
NTIMERS".

I've sent a new patch with that, although it wasn't as easy as it seemed. I had to change allocation order so no timer is released in the first phase, which would make the high watermark unpredictable.
I've also changed the title, and sent a new patch.

Zoli



                 /* Destroy timer pool, all timers must have been freed */
                 odp_timer_pool_destroy(tp);


_______________________________________________
lng-odp mailing list
[email protected]
https://lists.linaro.org/mailman/listinfo/lng-odp

Reply via email to