On Wed, Jul 13, 2016 at 11:54 AM, Mike Holmes <[email protected]>
wrote:

>
> On 13 July 2016 at 12:47, Bill Fischofer <[email protected]>
> wrote:
>
>> On Wed, Jul 13, 2016 at 9:23 AM, Maxim Uvarov <[email protected]>
>> wrote:
>>
>> > On 07/13/16 17:08, Bill Fischofer wrote:
>> >
>> >> From: Barry Spinney <[email protected]>
>> >>
>> >> Resolved a valgrind issue by adding pthread_join and
>> pthread_attr_destroy
>> >> calls when destroying a tm_system.
>> >>
>> >> Also resolve a todo by removing some code that was being #if'd out.
>> >>
>> >> Signed-off-by: Barry Spinney <[email protected]>
>> >> Signed-off-by: Bill Fischofer <[email protected]>
>> >> ---
>> >>   .../include/odp_traffic_mngr_internal.h            |  3 ++
>> >>   platform/linux-generic/odp_traffic_mngr.c          | 37
>> >> ++++++++--------------
>> >>   2 files changed, 17 insertions(+), 23 deletions(-)
>> >>
>> >> diff --git a/platform/linux-generic/include/odp_traffic_mngr_internal.h
>> >> b/platform/linux-generic/include/odp_traffic_mngr_internal.h
>> >> index 85a31e9..15451ac 100644
>> >> --- a/platform/linux-generic/include/odp_traffic_mngr_internal.h
>> >> +++ b/platform/linux-generic/include/odp_traffic_mngr_internal.h
>> >> @@ -19,6 +19,7 @@
>> >>   extern "C" {
>> >>   #endif
>> >>   +#include <pthread.h>
>> >>   #include <odp/api/traffic_mngr.h>
>> >>   #include <odp/api/packet_io.h>
>> >>   #include <odp_name_table_internal.h>
>> >> @@ -352,6 +353,8 @@ typedef struct {
>> >>         odp_barrier_t    tm_system_destroy_barrier;
>> >>         odp_atomic_u64_t destroying;
>> >>         _odp_int_name_t  name_tbl_id;
>> >> +       pthread_t        thread;
>> >> +       pthread_attr_t   attr;
>> >>         void               *trace_buffer;
>> >>         uint32_t            next_queue_num;
>> >> diff --git a/platform/linux-generic/odp_traffic_mngr.c
>> >> b/platform/linux-generic/odp_traffic_mngr.c
>> >> index aa14b6b..c6a69e7 100644
>> >> --- a/platform/linux-generic/odp_traffic_mngr.c
>> >> +++ b/platform/linux-generic/odp_traffic_mngr.c
>> >> @@ -2580,19 +2580,19 @@ static uint32_t tm_thread_cpu_select(void)
>> >>     static int tm_thread_create(tm_system_t *tm_system)
>> >>   {
>> >> -       pthread_attr_t attr;
>> >> -       pthread_t      thread;
>> >>         cpu_set_t      cpu_set;
>> >>         uint32_t       cpu_num;
>> >>         int            rc;
>> >>   -     pthread_attr_init(&attr);
>> >> +       pthread_attr_init(&tm_system->attr);
>> >>         cpu_num = tm_thread_cpu_select();
>> >>         CPU_ZERO(&cpu_set);
>> >>         CPU_SET(cpu_num, &cpu_set);
>> >> -       pthread_attr_setaffinity_np(&attr, sizeof(cpu_set_t),
>> &cpu_set);
>> >> +       pthread_attr_setaffinity_np(&tm_system->attr,
>> sizeof(cpu_set_t),
>> >> +                                   &cpu_set);
>> >>   -     rc = pthread_create(&thread, &attr, tm_system_thread,
>> tm_system);
>> >> +       rc = pthread_create(&tm_system->thread, &tm_system->attr,
>> >> +                           tm_system_thread, tm_system);
>> >>         if (rc != 0)
>> >>                 ODP_DBG("Failed to start thread on cpu num=%u\n",
>> >> cpu_num);
>> >>   @@ -2748,16 +2748,22 @@ int odp_tm_capability(odp_tm_t odp_tm,
>> >> odp_tm_capabilities_t *capabilities)
>> >>   int odp_tm_destroy(odp_tm_t odp_tm)
>> >>   {
>> >>         tm_system_t *tm_system;
>> >> +       int rc;
>> >>         tm_system = GET_TM_SYSTEM(odp_tm);
>> >>   -       /* First mark the tm_system as being in the destroying state
>> so
>> >> that
>> >> -       * all new pkts are prevented from coming in.
>> >> -       */
>> >> +       /* First mark the tm_system as being in the destroying state so
>> >> that
>> >> +        * all new pkts are prevented from coming in.
>> >> +        */
>> >>         odp_barrier_init(&tm_system->tm_system_destroy_barrier, 2);
>> >>         odp_atomic_inc_u64(&tm_system->destroying);
>> >>         odp_barrier_wait(&tm_system->tm_system_destroy_barrier);
>> >>   +     /* Next wait for the thread to exit. */
>> >> +       rc = pthread_join(tm_system->thread, NULL);
>> >> +       if (rc == 0)
>> >> +               pthread_attr_destroy(&tm_system->attr);
>> >> +
>> >>
>> >
>> > logic here a little bit unclear. Why destroy attrs only if join
>> returned 0?
>>
>>
>> The destroy call is only made if the join was successful.  If you can't
>> join it's a serious error and a memory leak is probably the least of our
>> concerns.
>>
>
> We have not previously focused on error paths.
>
> Should this print an ODP_ERR on failure or call an ODP_ABORT  to stop
> things in their tracks becasue it has seriously gone off the rails and will
> then produce a trace ?
>
>
How about an ODP_ASSERT()? I think that's the preferred way to handle an
abnormal condition that shouldn't happen, no?


>
>
>>
>> >
>> >
>> > Maxim.
>> >
>> >
>> >         input_work_queue_destroy(tm_system->input_work_queue);
>> >>         _odp_sorted_pool_destroy(tm_system->_odp_int_sorted_pool);
>> >>         _odp_queue_pool_destroy(tm_system->_odp_int_queue_pool);
>> >> @@ -4104,21 +4110,6 @@ int odp_tm_enq_with_cnt(odp_tm_queue_t tm_queue,
>> >> odp_packet_t pkt)
>> >>         return pkt_cnt;
>> >>   }
>> >>   -#ifdef NOT_USED /* @todo use or delete */
>> >> -static uint32_t odp_tm_input_work_queue_fullness(odp_tm_t odp_tm
>> >> ODP_UNUSED)
>> >> -{
>> >> -       input_work_queue_t *input_work_queue;
>> >> -       tm_system_t *tm_system;
>> >> -       uint32_t queue_cnt, fullness;
>> >> -
>> >> -       tm_system = GET_TM_SYSTEM(odp_tm);
>> >> -       input_work_queue = tm_system->input_work_queue;
>> >> -       queue_cnt = odp_atomic_load_u64(&input_work_queue->queue_cnt);
>> >> -       fullness = (100 * queue_cnt) / INPUT_WORK_RING_SIZE;
>> >> -       return fullness;
>> >> -}
>> >> -#endif
>> >> -
>> >>   int odp_tm_node_info(odp_tm_node_t tm_node, odp_tm_node_info_t *info)
>> >>   {
>> >>         tm_queue_thresholds_t *threshold_params;
>> >>
>> >
>> >
>>
>
>
>
> --
> Mike Holmes
> Technical Manager - Linaro Networking Group
> Linaro.org <http://www.linaro.org/> *│ *Open source software for ARM SoCs
> "Work should be fun and collaborative, the rest follows"
>
>
>

Reply via email to