Perhaps it suffices to say, as done here: https://www.kernel.org/doc/Documentation/SubmittingPatches
that one should "Solve only one problem per patch." I think it is difficult to argue what the benefits would be of breaking this (perhaps trivial) rule ;-) Best regards, Ola D On Wed Jan 14 2015 at 6:37:38 PM Bill Fischofer <[email protected]> wrote: > I think the question of how patches are modularized depends on what the > purpose of the patch is. We can distinguish between a "Type 1" patch, > which represents a change that is intended to be accepted or rejected as a > whole, and a "Type 2" patch, which is designed to present a set of > selectable features that others may cherry-pick from in building their own > distribution. > > The tension we seem to have is that virtually all ODP patches at this > stage in its history are Type 1 patches, whereas virtually all of the > patches associated with things like the Linux kernel are Type 2. While is > is not incorrect to use a Type 2 structure for a Type 1 purpose, the > benefits of this level of modularity are diminished. This is because the > purpose of modularity in a Type 1 patch is simply to facilitate its timely > review and acceptance. Indeed, overly-modular patches may inhibit this > purpose because a proper review must confirm the implied orthogonality of > the modules, even if they are never intended to be separated. > > So it would seem that Type 1 patches should be sufficiently modular to > allow for easy review but we need not be overly concerned with > orthogonality, as the "parts" par there for review convenience, and are not > there to imply that the author intents them to be separable. > > > > > > On Wed, Jan 14, 2015 at 10:33 AM, Ola Liljedahl <[email protected]> > wrote: > >> On 14 January 2015 at 00:27, Anders Roxell <[email protected]> >> wrote: >> > On 13 January 2015 at 20:15, Mike Holmes <[email protected]> >> wrote: >> >> >> >> >> >> On 12 January 2015 at 16:20, Ola Liljedahl <[email protected]> >> wrote: >> >>> >> >>> Don't report too late timeouts using CU_FAIL as this interferes with >> the >> >>> cunit >> >>> test framework. Just count and report (using LOG_DBG) the number of >> late >> >>> timeouts. >> >>> Use CU_ASSERT_FATAL instead of plain assert so to work wither with the >> >>> cunit >> >>> test framework. >> >>> Don't dereference pointer after successful check for NULL as this >> makes >> >>> Coverity >> >>> complain. >> >>> Use LOG_DBG instead of printf. Remove some unnecessary printouts. >> >>> Use nanosleep instead of the deprecated usleep. >> >>> >> >>> Signed-off-by: Ola Liljedahl <[email protected]> >> >> >> >> >> >> Reviewed-and-tested-by: Mike Holmes <[email protected]> >> > >> > May I suggest splitting this as a patch set? >> > one patch for ODP_DBG, one for nanosleep, CU_ASSERT_FAIL to name a few. >> Much work for little gain. >> >> > >> > Cheers, >> > Anders >> > >> >> >> >>> >> >>> --- >> >>> (This document/code contribution attached is provided under the terms >> of >> >>> agreement LES-LTM-21309) >> >>> >> >>> test/validation/odp_timer.c | 102 >> >>> +++++++++++++++++++++++++------------------- >> >>> 1 file changed, 59 insertions(+), 43 deletions(-) >> >>> >> >>> diff --git a/test/validation/odp_timer.c b/test/validation/odp_timer.c >> >>> index 4c93f71..d893646 100644 >> >>> --- a/test/validation/odp_timer.c >> >>> +++ b/test/validation/odp_timer.c >> >>> @@ -8,10 +8,13 @@ >> >>> * @file >> >>> */ >> >>> >> >>> -#include <assert.h> >> >>> +/* For rand_r and nanosleep */ >> >>> +#define _POSIX_C_SOURCE 200112L >> >>> +#include <time.h> >> >>> #include <unistd.h> >> >>> #include <odp.h> >> >>> #include "odp_cunit_common.h" >> >>> +#include "test_debug.h" >> >>> >> >>> /** @private Timeout range in milliseconds (ms) */ >> >>> #define RANGE_MS 2000 >> >>> @@ -28,6 +31,9 @@ static odp_buffer_pool_t tbp; >> >>> /** @private Timer pool handle used by all threads */ >> >>> static odp_timer_pool_t tp; >> >>> >> >>> +/** @private Count of timeouts delivered too late */ >> >>> +static odp_atomic_u32_t ndelivtoolate; >> >>> + >> >>> /** @private min() function */ >> >>> static int min(int a, int b) >> >>> { >> >>> @@ -47,8 +53,7 @@ struct test_timer { >> >>> /* @private Handle a received (timeout) buffer */ >> >>> static void handle_tmo(odp_buffer_t buf, bool stale, uint64_t >> prev_tick) >> >>> { >> >>> - /* Use assert() for internal correctness checks of test >> program */ >> >>> - assert(buf != ODP_BUFFER_INVALID); >> >>> + CU_ASSERT_FATAL(buf != ODP_BUFFER_INVALID); /* Internal error >> */ >> >>> if (odp_buffer_type(buf) != ODP_BUFFER_TYPE_TIMEOUT) { >> >>> /* Not a timeout buffer */ >> >>> CU_FAIL("Unexpected buffer type received"); >> >>> @@ -65,38 +70,41 @@ static void handle_tmo(odp_buffer_t buf, bool >> stale, >> >>> uint64_t prev_tick) >> >>> if (ttp == NULL) >> >>> CU_FAIL("odp_timeout_user_ptr() null user ptr"); >> >>> >> >>> - if (ttp->buf2 != buf) >> >>> + if (ttp != NULL && ttp->buf2 != buf) >> >>> CU_FAIL("odp_timeout_user_ptr() wrong user ptr"); >> >>> - if (ttp->tim != tim) >> >>> + if (ttp != NULL && ttp->tim != tim) >> >>> CU_FAIL("odp_timeout_timer() wrong timer"); >> >>> if (stale) { >> >>> if (odp_timeout_fresh(tmo)) >> >>> CU_FAIL("Wrong status (fresh) for stale >> timeout"); >> >>> /* Stale timeout => local timer must have invalid >> tick */ >> >>> - if (ttp->tick != TICK_INVALID) >> >>> + if (ttp != NULL && ttp->tick != TICK_INVALID) >> >>> CU_FAIL("Stale timeout for active timer"); >> >>> } else { >> >>> if (!odp_timeout_fresh(tmo)) >> >>> CU_FAIL("Wrong status (stale) for fresh >> timeout"); >> >>> /* Fresh timeout => local timer must have matching >> tick */ >> >>> - if (ttp->tick != tick) { >> >>> - printf("Wrong tick: expected %"PRIu64" actual >> >>> %"PRIu64"\n", >> >>> - ttp->tick, tick); >> >>> + if (ttp != NULL && ttp->tick != tick) { >> >>> + LOG_DBG("Wrong tick: expected %"PRIu64" actual >> >>> %"PRIu64"\n", >> >>> + ttp->tick, tick); >> >>> CU_FAIL("odp_timeout_tick() wrong tick"); >> >>> } >> >>> /* Check that timeout was delivered 'timely' */ >> >>> if (tick > odp_timer_current_tick(tp)) >> >>> CU_FAIL("Timeout delivered early"); >> >>> if (tick < prev_tick) { >> >>> - printf("Too late tick: %"PRIu64" prev_tick >> >>> %"PRIu64"\n", >> >>> - tick, prev_tick); >> >>> - CU_FAIL("Timeout delivered late"); >> >>> + LOG_DBG("Too late tick: %"PRIu64" prev_tick >> >>> %"PRIu64"\n", >> >>> + tick, prev_tick); >> >>> + /* We don't report late timeouts using >> CU_FAIL */ >> >>> + odp_atomic_inc_u32(&ndelivtoolate); >> >>> } >> >>> } >> >>> >> >>> - /* Use assert() for correctness check of test program itself >> */ >> >>> - assert(ttp->buf == ODP_BUFFER_INVALID); >> >>> - ttp->buf = buf; >> >>> + if (ttp != NULL) { >> >>> + /* Internal error */ >> >>> + CU_ASSERT_FATAL(ttp->buf == ODP_BUFFER_INVALID); >> >>> + ttp->buf = buf; >> >>> + } >> >>> } >> >>> >> >>> /* @private Worker thread entrypoint which performs timer >> >>> alloc/set/cancel/free >> >>> @@ -203,8 +211,11 @@ static void *worker_entrypoint(void *arg) >> >>> /* Save expected expiration tick */ >> >>> tt[i].tick = cur_tick + tck; >> >>> } >> >>> - if (usleep(1000/*1ms*/) < 0) >> >>> - perror("usleep"), abort(); >> >>> + struct timespec ts; >> >>> + ts.tv_sec = 0; >> >>> + ts.tv_nsec = 1000000; /* 1ms */ >> >>> + if (nanosleep(&ts, NULL) < 0) >> >>> + perror("nanosleep"), abort(); >> >>> } >> >>> >> >>> /* Cancel and free all timers */ >> >>> @@ -220,18 +231,22 @@ static void *worker_entrypoint(void *arg) >> >>> CU_FAIL("odp_timer_free"); >> >>> } >> >>> >> >>> - printf("Thread %u: %u timers set\n", thr, nset); >> >>> - printf("Thread %u: %u timers reset\n", thr, nreset); >> >>> - printf("Thread %u: %u timers cancelled\n", thr, ncancel); >> >>> - printf("Thread %u: %u timers reset/cancelled too late\n", >> >>> - thr, ntoolate); >> >>> - printf("Thread %u: %u timeouts received\n", thr, nrcv); >> >>> - printf("Thread %u: %u stale timeout(s) after >> odp_timer_free()\n", >> >>> - thr, nstale); >> >>> + LOG_DBG("Thread %u: %u timers set\n", thr, nset); >> >>> + LOG_DBG("Thread %u: %u timers reset\n", thr, nreset); >> >>> + LOG_DBG("Thread %u: %u timers cancelled\n", thr, ncancel); >> >>> + LOG_DBG("Thread %u: %u timers reset/cancelled too late\n", >> >>> + thr, ntoolate); >> >>> + LOG_DBG("Thread %u: %u timeouts received\n", thr, nrcv); >> >>> + LOG_DBG("Thread %u: %u stale timeout(s) after >> odp_timer_free()\n", >> >>> + thr, nstale); >> >>> >> >>> /* Delay some more to ensure timeouts for expired timers can >> be >> >>> * received */ >> >>> - usleep(1000/*1ms*/); >> >>> + struct timespec ts; >> >>> + ts.tv_sec = 0; >> >>> + ts.tv_nsec = 1000000; /* 1ms */ >> >>> + if (nanosleep(&ts, NULL) < 0) >> >>> + perror("nanosleep"), abort(); >> >>> while (nstale != 0) { >> >>> odp_buffer_t buf = odp_queue_deq(queue); >> >>> if (buf != ODP_BUFFER_INVALID) { >> >>> @@ -247,7 +262,7 @@ static void *worker_entrypoint(void *arg) >> >>> if (buf != ODP_BUFFER_INVALID) >> >>> CU_FAIL("Unexpected buffer received"); >> >>> >> >>> - printf("Thread %u: exiting\n", thr); >> >>> + LOG_DBG("Thread %u: exiting\n", thr); >> >>> return NULL; >> >>> } >> >>> >> >>> @@ -256,9 +271,13 @@ static void test_odp_timer_all(void) >> >>> { >> >>> odp_buffer_pool_param_t params; >> >>> odp_timer_pool_param_t tparam; >> >>> - /* This is a stressfull test - need to reserve some cpu cycles >> >>> - * @TODO move to test/performance */ >> >>> - int num_workers = min(odp_sys_cpu_count()-1, MAX_WORKERS); >> >>> + /* Reserve at least one core for running other processes so >> the >> >>> timer >> >>> + * test hopefully can run undisturbed and thus get better >> timing >> >>> + * results. */ >> >>> + int num_workers = min(odp_sys_cpu_count() - 1, MAX_WORKERS); >> >>> + /* On a single-CPU machine run at least one thread */ >> >>> + if (num_workers < 1) >> >>> + num_workers = 1; >> >>> >> >>> /* Create timeout buffer pools */ >> >>> params.buf_size = 0; >> >>> @@ -294,19 +313,11 @@ static void test_odp_timer_all(void) >> >>> CU_ASSERT(tpinfo.param.res_ns == RES); >> >>> CU_ASSERT(tpinfo.param.min_tmo == MIN); >> >>> CU_ASSERT(tpinfo.param.max_tmo == MAX); >> >>> - printf("Timer pool\n"); >> >>> - printf("----------\n"); >> >>> - printf(" name: %s\n", tpinfo.name); >> >>> - printf(" resolution: %"PRIu64" ns (%"PRIu64" us)\n", >> >>> - tpinfo.param.res_ns, tpinfo.param.res_ns / 1000); >> >>> - printf(" min tmo: %"PRIu64" ns\n", tpinfo.param.min_tmo); >> >>> - printf(" max tmo: %"PRIu64" ns\n", tpinfo.param.max_tmo); >> >>> - printf("\n"); >> >>> - >> >>> - printf("#timers..: %u\n", NTIMERS); >> >>> - printf("Tmo range: %u ms (%"PRIu64" ticks)\n", RANGE_MS, >> >>> - odp_timer_ns_to_tick(tp, 1000000ULL * RANGE_MS)); >> >>> - printf("\n"); >> >>> + CU_ASSERT(strcmp(tpinfo.name, NAME) == 0); >> >>> + >> >>> + LOG_DBG("#timers..: %u\n", NTIMERS); >> >>> + LOG_DBG("Tmo range: %u ms (%"PRIu64" ticks)\n", RANGE_MS, >> >>> + odp_timer_ns_to_tick(tp, 1000000ULL * RANGE_MS)); >> >>> >> >>> uint64_t tick; >> >>> for (tick = 0; tick < 1000000000000ULL; tick += 1000000ULL) { >> >>> @@ -319,6 +330,9 @@ static void test_odp_timer_all(void) >> >>> /* Initialize barrier used by worker threads for >> synchronization >> >>> */ >> >>> odp_barrier_init(&test_barrier, num_workers); >> >>> >> >>> + /* Initialize the shared timeout counter */ >> >>> + odp_atomic_init_u32(&ndelivtoolate, 0); >> >>> + >> >>> /* Create and start worker threads */ >> >>> pthrd_arg thrdarg; >> >>> thrdarg.testcase = 0; >> >>> @@ -327,6 +341,8 @@ static void test_odp_timer_all(void) >> >>> >> >>> /* Wait for worker threads to exit */ >> >>> odp_cunit_thread_exit(&thrdarg); >> >>> + LOG_DBG("Number of timeouts delivered/received too late: >> %u\n", >> >>> + odp_atomic_load_u32(&ndelivtoolate)); >> >>> >> >>> /* Check some statistics after the test */ >> >>> if (odp_timer_pool_info(tp, &tpinfo) != 0) >> >>> -- >> >>> 1.9.1 >> >>> >> >>> >> >>> _______________________________________________ >> >>> lng-odp mailing list >> >>> [email protected] >> >>> http://lists.linaro.org/mailman/listinfo/lng-odp >> >> >> >> >> >> >> >> >> >> -- >> >> Mike Holmes >> >> Linaro Sr Technical Manager >> >> LNG - ODP >> >> >> >> _______________________________________________ >> >> lng-odp mailing list >> >> [email protected] >> >> http://lists.linaro.org/mailman/listinfo/lng-odp >> >> >> >> _______________________________________________ >> lng-odp mailing list >> [email protected] >> http://lists.linaro.org/mailman/listinfo/lng-odp >> > > _______________________________________________ > lng-odp mailing list > [email protected] > http://lists.linaro.org/mailman/listinfo/lng-odp >
_______________________________________________ lng-odp mailing list [email protected] http://lists.linaro.org/mailman/listinfo/lng-odp
