Re: [lng-odp] [PATCH] linux-generic: move tm system barrier to tm group

2016-12-09 Thread Maxim Uvarov
Merged,
Maxim.

On 12/08/16 05:21, Bala Manoharan wrote:
> Reviewed-by: Balasubramanian Manoharan 
> 
> On 2 December 2016 at 13:41,   wrote:
>> From: Xuelin Shi 
>>
>> since tm thread is handling tm group, move the thread based
>> barrier to tm group. otherwise, packet cannot get into the
>> second tm system in the same group.
>>
>> Signed-off-by: Xuelin Shi 
>> ---
>>  platform/linux-generic/include/odp_traffic_mngr_internal.h |  3 ++-
>>  platform/linux-generic/odp_traffic_mngr.c  | 12 +++-
>>  2 files changed, 9 insertions(+), 6 deletions(-)
>>
>> diff --git a/platform/linux-generic/include/odp_traffic_mngr_internal.h 
>> b/platform/linux-generic/include/odp_traffic_mngr_internal.h
>> index 858183b..9f821fe 100644
>> --- a/platform/linux-generic/include/odp_traffic_mngr_internal.h
>> +++ b/platform/linux-generic/include/odp_traffic_mngr_internal.h
>> @@ -367,7 +367,6 @@ struct tm_system_s {
>> _odp_tm_group_t odp_tm_group;
>>
>> odp_ticketlock_t tm_system_lock;
>> -   odp_barrier_ttm_system_barrier;
>> odp_barrier_ttm_system_destroy_barrier;
>> odp_atomic_u64_t destroying;
>> _odp_int_name_t  name_tbl_id;
>> @@ -416,8 +415,10 @@ struct tm_system_group_s {
>> tm_system_group_t *prev;
>> tm_system_group_t *next;
>>
>> +   odp_barrier_t  tm_group_barrier;
>> tm_system_t   *first_tm_system;
>> uint32_t   num_tm_systems;
>> +   uint32_t   first_enq;
>> pthread_t  thread;
>> pthread_attr_t attr;
>>  };
>> diff --git a/platform/linux-generic/odp_traffic_mngr.c 
>> b/platform/linux-generic/odp_traffic_mngr.c
>> index a1f990f..62e5c63 100644
>> --- a/platform/linux-generic/odp_traffic_mngr.c
>> +++ b/platform/linux-generic/odp_traffic_mngr.c
>> @@ -1854,6 +1854,7 @@ static int tm_enqueue(tm_system_t *tm_system,
>>   tm_queue_obj_t *tm_queue_obj,
>>   odp_packet_t pkt)
>>  {
>> +   tm_system_group_t *tm_group;
>> input_work_item_t work_item;
>> odp_packet_color_t pkt_color;
>> tm_wred_node_t *initial_tm_wred_node;
>> @@ -1868,9 +1869,10 @@ static int tm_enqueue(tm_system_t *tm_system,
>> if (queue_tm_reorder(&tm_queue_obj->tm_qentry, &pkt_hdr->buf_hdr))
>> return 0;
>>
>> -   if (tm_system->first_enq == 0) {
>> -   odp_barrier_wait(&tm_system->tm_system_barrier);
>> -   tm_system->first_enq = 1;
>> +   tm_group = GET_TM_GROUP(tm_system->odp_tm_group);
>> +   if (tm_group->first_enq == 0) {
>> +   odp_barrier_wait(&tm_group->tm_group_barrier);
>> +   tm_group->first_enq = 1;
>> }
>>
>> pkt_color = odp_packet_color(pkt);
>> @@ -2327,7 +2329,7 @@ static void *tm_system_thread(void *arg)
>> input_work_queue = tm_system->input_work_queue;
>>
>> /* Wait here until we have seen the first enqueue operation. */
>> -   odp_barrier_wait(&tm_system->tm_system_barrier);
>> +   odp_barrier_wait(&tm_group->tm_group_barrier);
>> main_loop_running = true;
>>
>> destroying = odp_atomic_load_u64(&tm_system->destroying);
>> @@ -2625,6 +2627,7 @@ static _odp_tm_group_t _odp_tm_group_create(const char 
>> *name ODP_UNUSED)
>>
>> tm_group = malloc(sizeof(tm_system_group_t));
>> memset(tm_group, 0, sizeof(tm_system_group_t));
>> +   odp_barrier_init(&tm_group->tm_group_barrier, 2);
>>
>> /* Add this group to the tm_group_list linked list. */
>> if (tm_group_list == NULL) {
>> @@ -2868,7 +2871,6 @@ odp_tm_t odp_tm_create(const char*name,
>> tm_system->_odp_int_timer_wheel = _ODP_INT_TIMER_WHEEL_INVALID;
>>
>> odp_ticketlock_init(&tm_system->tm_system_lock);
>> -   odp_barrier_init(&tm_system->tm_system_barrier, 2);
>> odp_atomic_init_u64(&tm_system->destroying, 0);
>>
>> tm_system->_odp_int_sorted_pool = _odp_sorted_pool_create(
>> --
>> 1.8.3.1
>>



Re: [lng-odp] [PATCH] linux-generic: move tm system barrier to tm group

2016-12-07 Thread Bala Manoharan
Reviewed-by: Balasubramanian Manoharan 

On 2 December 2016 at 13:41,   wrote:
> From: Xuelin Shi 
>
> since tm thread is handling tm group, move the thread based
> barrier to tm group. otherwise, packet cannot get into the
> second tm system in the same group.
>
> Signed-off-by: Xuelin Shi 
> ---
>  platform/linux-generic/include/odp_traffic_mngr_internal.h |  3 ++-
>  platform/linux-generic/odp_traffic_mngr.c  | 12 +++-
>  2 files changed, 9 insertions(+), 6 deletions(-)
>
> diff --git a/platform/linux-generic/include/odp_traffic_mngr_internal.h 
> b/platform/linux-generic/include/odp_traffic_mngr_internal.h
> index 858183b..9f821fe 100644
> --- a/platform/linux-generic/include/odp_traffic_mngr_internal.h
> +++ b/platform/linux-generic/include/odp_traffic_mngr_internal.h
> @@ -367,7 +367,6 @@ struct tm_system_s {
> _odp_tm_group_t odp_tm_group;
>
> odp_ticketlock_t tm_system_lock;
> -   odp_barrier_ttm_system_barrier;
> odp_barrier_ttm_system_destroy_barrier;
> odp_atomic_u64_t destroying;
> _odp_int_name_t  name_tbl_id;
> @@ -416,8 +415,10 @@ struct tm_system_group_s {
> tm_system_group_t *prev;
> tm_system_group_t *next;
>
> +   odp_barrier_t  tm_group_barrier;
> tm_system_t   *first_tm_system;
> uint32_t   num_tm_systems;
> +   uint32_t   first_enq;
> pthread_t  thread;
> pthread_attr_t attr;
>  };
> diff --git a/platform/linux-generic/odp_traffic_mngr.c 
> b/platform/linux-generic/odp_traffic_mngr.c
> index a1f990f..62e5c63 100644
> --- a/platform/linux-generic/odp_traffic_mngr.c
> +++ b/platform/linux-generic/odp_traffic_mngr.c
> @@ -1854,6 +1854,7 @@ static int tm_enqueue(tm_system_t *tm_system,
>   tm_queue_obj_t *tm_queue_obj,
>   odp_packet_t pkt)
>  {
> +   tm_system_group_t *tm_group;
> input_work_item_t work_item;
> odp_packet_color_t pkt_color;
> tm_wred_node_t *initial_tm_wred_node;
> @@ -1868,9 +1869,10 @@ static int tm_enqueue(tm_system_t *tm_system,
> if (queue_tm_reorder(&tm_queue_obj->tm_qentry, &pkt_hdr->buf_hdr))
> return 0;
>
> -   if (tm_system->first_enq == 0) {
> -   odp_barrier_wait(&tm_system->tm_system_barrier);
> -   tm_system->first_enq = 1;
> +   tm_group = GET_TM_GROUP(tm_system->odp_tm_group);
> +   if (tm_group->first_enq == 0) {
> +   odp_barrier_wait(&tm_group->tm_group_barrier);
> +   tm_group->first_enq = 1;
> }
>
> pkt_color = odp_packet_color(pkt);
> @@ -2327,7 +2329,7 @@ static void *tm_system_thread(void *arg)
> input_work_queue = tm_system->input_work_queue;
>
> /* Wait here until we have seen the first enqueue operation. */
> -   odp_barrier_wait(&tm_system->tm_system_barrier);
> +   odp_barrier_wait(&tm_group->tm_group_barrier);
> main_loop_running = true;
>
> destroying = odp_atomic_load_u64(&tm_system->destroying);
> @@ -2625,6 +2627,7 @@ static _odp_tm_group_t _odp_tm_group_create(const char 
> *name ODP_UNUSED)
>
> tm_group = malloc(sizeof(tm_system_group_t));
> memset(tm_group, 0, sizeof(tm_system_group_t));
> +   odp_barrier_init(&tm_group->tm_group_barrier, 2);
>
> /* Add this group to the tm_group_list linked list. */
> if (tm_group_list == NULL) {
> @@ -2868,7 +2871,6 @@ odp_tm_t odp_tm_create(const char*name,
> tm_system->_odp_int_timer_wheel = _ODP_INT_TIMER_WHEEL_INVALID;
>
> odp_ticketlock_init(&tm_system->tm_system_lock);
> -   odp_barrier_init(&tm_system->tm_system_barrier, 2);
> odp_atomic_init_u64(&tm_system->destroying, 0);
>
> tm_system->_odp_int_sorted_pool = _odp_sorted_pool_create(
> --
> 1.8.3.1
>


Re: [lng-odp] [PATCH] linux-generic: move tm system barrier to tm group

2016-12-06 Thread Forrest Shi
Hi Bala,

I'm testing example/traffic_mgmt with 20Gbps connections, not see any issue
here.

Thanks,
Forrest

On 6 December 2016 at 21:43, Bala Manoharan 
wrote:

> On 6 December 2016 at 18:30, Forrest Shi  wrote:
> > Hi Bala,
> >
> > For each pktio, I'm trying to create one odp_tm_t and also one tm thread.
> >
> > The TM thread is bound to an odp_tm_group_t , that means I will create
> > odp_tm_group_t for each pktio.
> >
> > Currently, the group creation only available for big system with more
> than
> > 24 cores. So I also reduce this number.
> >
> > As for the traffic_mgmt example, associate the company profile for each
> > pktio, like it is a pktio profile.
> >
> > The whole picture is each pktio will be associated with one tm thread,
> and
> > this tm thread will have an odp_tm_t tree to handle.
>
> Understood. Have you done any tests to see if there is any performance
> issue after this change?
> If there is no performance degradation then I am fine with this change.
>
> Regards,
> Bala
>
> >
> >
> > Thanks,
> > Forrest
> >
> > On 6 December 2016 at 20:17, Bala Manoharan 
> > wrote:
> >>
> >> Can you please elaborate on the use-case for this change?
> >>
> >> Regards,
> >> Bala
> >>
> >>
> >> On 2 December 2016 at 13:41,   wrote:
> >> > From: Xuelin Shi 
> >> >
> >> > since tm thread is handling tm group, move the thread based
> >> > barrier to tm group. otherwise, packet cannot get into the
> >> > second tm system in the same group.
> >> >
> >> > Signed-off-by: Xuelin Shi 
> >> > ---
> >> >  platform/linux-generic/include/odp_traffic_mngr_internal.h |  3 ++-
> >> >  platform/linux-generic/odp_traffic_mngr.c  | 12
> >> > +++-
> >> >  2 files changed, 9 insertions(+), 6 deletions(-)
> >> >
> >> > diff --git a/platform/linux-generic/include/odp_traffic_mngr_
> internal.h
> >> > b/platform/linux-generic/include/odp_traffic_mngr_internal.h
> >> > index 858183b..9f821fe 100644
> >> > --- a/platform/linux-generic/include/odp_traffic_mngr_internal.h
> >> > +++ b/platform/linux-generic/include/odp_traffic_mngr_internal.h
> >> > @@ -367,7 +367,6 @@ struct tm_system_s {
> >> > _odp_tm_group_t odp_tm_group;
> >> >
> >> > odp_ticketlock_t tm_system_lock;
> >> > -   odp_barrier_ttm_system_barrier;
> >> > odp_barrier_ttm_system_destroy_barrier;
> >> > odp_atomic_u64_t destroying;
> >> > _odp_int_name_t  name_tbl_id;
> >> > @@ -416,8 +415,10 @@ struct tm_system_group_s {
> >> > tm_system_group_t *prev;
> >> > tm_system_group_t *next;
> >> >
> >> > +   odp_barrier_t  tm_group_barrier;
> >> > tm_system_t   *first_tm_system;
> >> > uint32_t   num_tm_systems;
> >> > +   uint32_t   first_enq;
> >> > pthread_t  thread;
> >> > pthread_attr_t attr;
> >> >  };
> >> > diff --git a/platform/linux-generic/odp_traffic_mngr.c
> >> > b/platform/linux-generic/odp_traffic_mngr.c
> >> > index a1f990f..62e5c63 100644
> >> > --- a/platform/linux-generic/odp_traffic_mngr.c
> >> > +++ b/platform/linux-generic/odp_traffic_mngr.c
> >> > @@ -1854,6 +1854,7 @@ static int tm_enqueue(tm_system_t *tm_system,
> >> >   tm_queue_obj_t *tm_queue_obj,
> >> >   odp_packet_t pkt)
> >> >  {
> >> > +   tm_system_group_t *tm_group;
> >> > input_work_item_t work_item;
> >> > odp_packet_color_t pkt_color;
> >> > tm_wred_node_t *initial_tm_wred_node;
> >> > @@ -1868,9 +1869,10 @@ static int tm_enqueue(tm_system_t *tm_system,
> >> > if (queue_tm_reorder(&tm_queue_obj->tm_qentry,
> >> > &pkt_hdr->buf_hdr))
> >> > return 0;
> >> >
> >> > -   if (tm_system->first_enq == 0) {
> >> > -   odp_barrier_wait(&tm_system->tm_system_barrier);
> >> > -   tm_system->first_enq = 1;
> >> > +   tm_group = GET_TM_GROUP(tm_system->odp_tm_group);
> >> > +   if (tm_group->first_enq == 0) {
> >> > +   odp_barrier_wait(&tm_group->tm_group_barrier);
> >> > +   tm_group->first_enq = 1;
> >> > }
> >> >
> >> > pkt_color = odp_packet_color(pkt);
> >> > @@ -2327,7 +2329,7 @@ static void *tm_system_thread(void *arg)
> >> > input_work_queue = tm_system->input_work_queue;
> >> >
> >> > /* Wait here until we have seen the first enqueue operation.
> */
> >> > -   odp_barrier_wait(&tm_system->tm_system_barrier);
> >> > +   odp_barrier_wait(&tm_group->tm_group_barrier);
> >> > main_loop_running = true;
> >> >
> >> > destroying = odp_atomic_load_u64(&tm_system->destroying);
> >> > @@ -2625,6 +2627,7 @@ static _odp_tm_group_t
> _odp_tm_group_create(const
> >> > char *name ODP_UNUSED)
> >> >
> >> > tm_group = malloc(sizeof(tm_system_group_t));
> >> > memset(tm_group, 0, sizeof(tm_system_group_t));
> >> > +   odp_barrier_init(&tm_group->tm_group_barrier, 2);
> >> >
> >> > /* Add this group to the tm_group_list linked list.

Re: [lng-odp] [PATCH] linux-generic: move tm system barrier to tm group

2016-12-06 Thread Bala Manoharan
On 6 December 2016 at 18:30, Forrest Shi  wrote:
> Hi Bala,
>
> For each pktio, I'm trying to create one odp_tm_t and also one tm thread.
>
> The TM thread is bound to an odp_tm_group_t , that means I will create
> odp_tm_group_t for each pktio.
>
> Currently, the group creation only available for big system with more than
> 24 cores. So I also reduce this number.
>
> As for the traffic_mgmt example, associate the company profile for each
> pktio, like it is a pktio profile.
>
> The whole picture is each pktio will be associated with one tm thread, and
> this tm thread will have an odp_tm_t tree to handle.

Understood. Have you done any tests to see if there is any performance
issue after this change?
If there is no performance degradation then I am fine with this change.

Regards,
Bala

>
>
> Thanks,
> Forrest
>
> On 6 December 2016 at 20:17, Bala Manoharan 
> wrote:
>>
>> Can you please elaborate on the use-case for this change?
>>
>> Regards,
>> Bala
>>
>>
>> On 2 December 2016 at 13:41,   wrote:
>> > From: Xuelin Shi 
>> >
>> > since tm thread is handling tm group, move the thread based
>> > barrier to tm group. otherwise, packet cannot get into the
>> > second tm system in the same group.
>> >
>> > Signed-off-by: Xuelin Shi 
>> > ---
>> >  platform/linux-generic/include/odp_traffic_mngr_internal.h |  3 ++-
>> >  platform/linux-generic/odp_traffic_mngr.c  | 12
>> > +++-
>> >  2 files changed, 9 insertions(+), 6 deletions(-)
>> >
>> > diff --git a/platform/linux-generic/include/odp_traffic_mngr_internal.h
>> > b/platform/linux-generic/include/odp_traffic_mngr_internal.h
>> > index 858183b..9f821fe 100644
>> > --- a/platform/linux-generic/include/odp_traffic_mngr_internal.h
>> > +++ b/platform/linux-generic/include/odp_traffic_mngr_internal.h
>> > @@ -367,7 +367,6 @@ struct tm_system_s {
>> > _odp_tm_group_t odp_tm_group;
>> >
>> > odp_ticketlock_t tm_system_lock;
>> > -   odp_barrier_ttm_system_barrier;
>> > odp_barrier_ttm_system_destroy_barrier;
>> > odp_atomic_u64_t destroying;
>> > _odp_int_name_t  name_tbl_id;
>> > @@ -416,8 +415,10 @@ struct tm_system_group_s {
>> > tm_system_group_t *prev;
>> > tm_system_group_t *next;
>> >
>> > +   odp_barrier_t  tm_group_barrier;
>> > tm_system_t   *first_tm_system;
>> > uint32_t   num_tm_systems;
>> > +   uint32_t   first_enq;
>> > pthread_t  thread;
>> > pthread_attr_t attr;
>> >  };
>> > diff --git a/platform/linux-generic/odp_traffic_mngr.c
>> > b/platform/linux-generic/odp_traffic_mngr.c
>> > index a1f990f..62e5c63 100644
>> > --- a/platform/linux-generic/odp_traffic_mngr.c
>> > +++ b/platform/linux-generic/odp_traffic_mngr.c
>> > @@ -1854,6 +1854,7 @@ static int tm_enqueue(tm_system_t *tm_system,
>> >   tm_queue_obj_t *tm_queue_obj,
>> >   odp_packet_t pkt)
>> >  {
>> > +   tm_system_group_t *tm_group;
>> > input_work_item_t work_item;
>> > odp_packet_color_t pkt_color;
>> > tm_wred_node_t *initial_tm_wred_node;
>> > @@ -1868,9 +1869,10 @@ static int tm_enqueue(tm_system_t *tm_system,
>> > if (queue_tm_reorder(&tm_queue_obj->tm_qentry,
>> > &pkt_hdr->buf_hdr))
>> > return 0;
>> >
>> > -   if (tm_system->first_enq == 0) {
>> > -   odp_barrier_wait(&tm_system->tm_system_barrier);
>> > -   tm_system->first_enq = 1;
>> > +   tm_group = GET_TM_GROUP(tm_system->odp_tm_group);
>> > +   if (tm_group->first_enq == 0) {
>> > +   odp_barrier_wait(&tm_group->tm_group_barrier);
>> > +   tm_group->first_enq = 1;
>> > }
>> >
>> > pkt_color = odp_packet_color(pkt);
>> > @@ -2327,7 +2329,7 @@ static void *tm_system_thread(void *arg)
>> > input_work_queue = tm_system->input_work_queue;
>> >
>> > /* Wait here until we have seen the first enqueue operation. */
>> > -   odp_barrier_wait(&tm_system->tm_system_barrier);
>> > +   odp_barrier_wait(&tm_group->tm_group_barrier);
>> > main_loop_running = true;
>> >
>> > destroying = odp_atomic_load_u64(&tm_system->destroying);
>> > @@ -2625,6 +2627,7 @@ static _odp_tm_group_t _odp_tm_group_create(const
>> > char *name ODP_UNUSED)
>> >
>> > tm_group = malloc(sizeof(tm_system_group_t));
>> > memset(tm_group, 0, sizeof(tm_system_group_t));
>> > +   odp_barrier_init(&tm_group->tm_group_barrier, 2);
>> >
>> > /* Add this group to the tm_group_list linked list. */
>> > if (tm_group_list == NULL) {
>> > @@ -2868,7 +2871,6 @@ odp_tm_t odp_tm_create(const char
>> > *name,
>> > tm_system->_odp_int_timer_wheel = _ODP_INT_TIMER_WHEEL_INVALID;
>> >
>> > odp_ticketlock_init(&tm_system->tm_system_lock);
>> > -   odp_barrier_init(&tm_system->tm_system_barrier, 2);
>> > odp_atomic_init_u64(&tm_system->destroying, 0);
>> >
>> > tm_sys

Re: [lng-odp] [PATCH] linux-generic: move tm system barrier to tm group

2016-12-06 Thread Forrest Shi
Hi Bala,

For each pktio, I'm trying to create one odp_tm_t and also one tm thread.

The TM thread is bound to an odp_tm_group_t , that means I will create
odp_tm_group_t for each pktio.

Currently, the group creation only available for big system with more than
24 cores. So I also reduce this number.

As for the traffic_mgmt example, associate the company profile for each
pktio, like it is a pktio profile.

The whole picture is each pktio will be associated with one tm thread, and
this tm thread will have an odp_tm_t tree to handle.


Thanks,
Forrest

On 6 December 2016 at 20:17, Bala Manoharan 
wrote:

> Can you please elaborate on the use-case for this change?
>
> Regards,
> Bala
>
>
> On 2 December 2016 at 13:41,   wrote:
> > From: Xuelin Shi 
> >
> > since tm thread is handling tm group, move the thread based
> > barrier to tm group. otherwise, packet cannot get into the
> > second tm system in the same group.
> >
> > Signed-off-by: Xuelin Shi 
> > ---
> >  platform/linux-generic/include/odp_traffic_mngr_internal.h |  3 ++-
> >  platform/linux-generic/odp_traffic_mngr.c  | 12
> +++-
> >  2 files changed, 9 insertions(+), 6 deletions(-)
> >
> > diff --git a/platform/linux-generic/include/odp_traffic_mngr_internal.h
> b/platform/linux-generic/include/odp_traffic_mngr_internal.h
> > index 858183b..9f821fe 100644
> > --- a/platform/linux-generic/include/odp_traffic_mngr_internal.h
> > +++ b/platform/linux-generic/include/odp_traffic_mngr_internal.h
> > @@ -367,7 +367,6 @@ struct tm_system_s {
> > _odp_tm_group_t odp_tm_group;
> >
> > odp_ticketlock_t tm_system_lock;
> > -   odp_barrier_ttm_system_barrier;
> > odp_barrier_ttm_system_destroy_barrier;
> > odp_atomic_u64_t destroying;
> > _odp_int_name_t  name_tbl_id;
> > @@ -416,8 +415,10 @@ struct tm_system_group_s {
> > tm_system_group_t *prev;
> > tm_system_group_t *next;
> >
> > +   odp_barrier_t  tm_group_barrier;
> > tm_system_t   *first_tm_system;
> > uint32_t   num_tm_systems;
> > +   uint32_t   first_enq;
> > pthread_t  thread;
> > pthread_attr_t attr;
> >  };
> > diff --git a/platform/linux-generic/odp_traffic_mngr.c
> b/platform/linux-generic/odp_traffic_mngr.c
> > index a1f990f..62e5c63 100644
> > --- a/platform/linux-generic/odp_traffic_mngr.c
> > +++ b/platform/linux-generic/odp_traffic_mngr.c
> > @@ -1854,6 +1854,7 @@ static int tm_enqueue(tm_system_t *tm_system,
> >   tm_queue_obj_t *tm_queue_obj,
> >   odp_packet_t pkt)
> >  {
> > +   tm_system_group_t *tm_group;
> > input_work_item_t work_item;
> > odp_packet_color_t pkt_color;
> > tm_wred_node_t *initial_tm_wred_node;
> > @@ -1868,9 +1869,10 @@ static int tm_enqueue(tm_system_t *tm_system,
> > if (queue_tm_reorder(&tm_queue_obj->tm_qentry,
> &pkt_hdr->buf_hdr))
> > return 0;
> >
> > -   if (tm_system->first_enq == 0) {
> > -   odp_barrier_wait(&tm_system->tm_system_barrier);
> > -   tm_system->first_enq = 1;
> > +   tm_group = GET_TM_GROUP(tm_system->odp_tm_group);
> > +   if (tm_group->first_enq == 0) {
> > +   odp_barrier_wait(&tm_group->tm_group_barrier);
> > +   tm_group->first_enq = 1;
> > }
> >
> > pkt_color = odp_packet_color(pkt);
> > @@ -2327,7 +2329,7 @@ static void *tm_system_thread(void *arg)
> > input_work_queue = tm_system->input_work_queue;
> >
> > /* Wait here until we have seen the first enqueue operation. */
> > -   odp_barrier_wait(&tm_system->tm_system_barrier);
> > +   odp_barrier_wait(&tm_group->tm_group_barrier);
> > main_loop_running = true;
> >
> > destroying = odp_atomic_load_u64(&tm_system->destroying);
> > @@ -2625,6 +2627,7 @@ static _odp_tm_group_t _odp_tm_group_create(const
> char *name ODP_UNUSED)
> >
> > tm_group = malloc(sizeof(tm_system_group_t));
> > memset(tm_group, 0, sizeof(tm_system_group_t));
> > +   odp_barrier_init(&tm_group->tm_group_barrier, 2);
> >
> > /* Add this group to the tm_group_list linked list. */
> > if (tm_group_list == NULL) {
> > @@ -2868,7 +2871,6 @@ odp_tm_t odp_tm_create(const char*name,
> > tm_system->_odp_int_timer_wheel = _ODP_INT_TIMER_WHEEL_INVALID;
> >
> > odp_ticketlock_init(&tm_system->tm_system_lock);
> > -   odp_barrier_init(&tm_system->tm_system_barrier, 2);
> > odp_atomic_init_u64(&tm_system->destroying, 0);
> >
> > tm_system->_odp_int_sorted_pool = _odp_sorted_pool_create(
> > --
> > 1.8.3.1
> >
>


Re: [lng-odp] [PATCH] linux-generic: move tm system barrier to tm group

2016-12-06 Thread Bala Manoharan
Can you please elaborate on the use-case for this change?

Regards,
Bala


On 2 December 2016 at 13:41,   wrote:
> From: Xuelin Shi 
>
> since tm thread is handling tm group, move the thread based
> barrier to tm group. otherwise, packet cannot get into the
> second tm system in the same group.
>
> Signed-off-by: Xuelin Shi 
> ---
>  platform/linux-generic/include/odp_traffic_mngr_internal.h |  3 ++-
>  platform/linux-generic/odp_traffic_mngr.c  | 12 +++-
>  2 files changed, 9 insertions(+), 6 deletions(-)
>
> diff --git a/platform/linux-generic/include/odp_traffic_mngr_internal.h 
> b/platform/linux-generic/include/odp_traffic_mngr_internal.h
> index 858183b..9f821fe 100644
> --- a/platform/linux-generic/include/odp_traffic_mngr_internal.h
> +++ b/platform/linux-generic/include/odp_traffic_mngr_internal.h
> @@ -367,7 +367,6 @@ struct tm_system_s {
> _odp_tm_group_t odp_tm_group;
>
> odp_ticketlock_t tm_system_lock;
> -   odp_barrier_ttm_system_barrier;
> odp_barrier_ttm_system_destroy_barrier;
> odp_atomic_u64_t destroying;
> _odp_int_name_t  name_tbl_id;
> @@ -416,8 +415,10 @@ struct tm_system_group_s {
> tm_system_group_t *prev;
> tm_system_group_t *next;
>
> +   odp_barrier_t  tm_group_barrier;
> tm_system_t   *first_tm_system;
> uint32_t   num_tm_systems;
> +   uint32_t   first_enq;
> pthread_t  thread;
> pthread_attr_t attr;
>  };
> diff --git a/platform/linux-generic/odp_traffic_mngr.c 
> b/platform/linux-generic/odp_traffic_mngr.c
> index a1f990f..62e5c63 100644
> --- a/platform/linux-generic/odp_traffic_mngr.c
> +++ b/platform/linux-generic/odp_traffic_mngr.c
> @@ -1854,6 +1854,7 @@ static int tm_enqueue(tm_system_t *tm_system,
>   tm_queue_obj_t *tm_queue_obj,
>   odp_packet_t pkt)
>  {
> +   tm_system_group_t *tm_group;
> input_work_item_t work_item;
> odp_packet_color_t pkt_color;
> tm_wred_node_t *initial_tm_wred_node;
> @@ -1868,9 +1869,10 @@ static int tm_enqueue(tm_system_t *tm_system,
> if (queue_tm_reorder(&tm_queue_obj->tm_qentry, &pkt_hdr->buf_hdr))
> return 0;
>
> -   if (tm_system->first_enq == 0) {
> -   odp_barrier_wait(&tm_system->tm_system_barrier);
> -   tm_system->first_enq = 1;
> +   tm_group = GET_TM_GROUP(tm_system->odp_tm_group);
> +   if (tm_group->first_enq == 0) {
> +   odp_barrier_wait(&tm_group->tm_group_barrier);
> +   tm_group->first_enq = 1;
> }
>
> pkt_color = odp_packet_color(pkt);
> @@ -2327,7 +2329,7 @@ static void *tm_system_thread(void *arg)
> input_work_queue = tm_system->input_work_queue;
>
> /* Wait here until we have seen the first enqueue operation. */
> -   odp_barrier_wait(&tm_system->tm_system_barrier);
> +   odp_barrier_wait(&tm_group->tm_group_barrier);
> main_loop_running = true;
>
> destroying = odp_atomic_load_u64(&tm_system->destroying);
> @@ -2625,6 +2627,7 @@ static _odp_tm_group_t _odp_tm_group_create(const char 
> *name ODP_UNUSED)
>
> tm_group = malloc(sizeof(tm_system_group_t));
> memset(tm_group, 0, sizeof(tm_system_group_t));
> +   odp_barrier_init(&tm_group->tm_group_barrier, 2);
>
> /* Add this group to the tm_group_list linked list. */
> if (tm_group_list == NULL) {
> @@ -2868,7 +2871,6 @@ odp_tm_t odp_tm_create(const char*name,
> tm_system->_odp_int_timer_wheel = _ODP_INT_TIMER_WHEEL_INVALID;
>
> odp_ticketlock_init(&tm_system->tm_system_lock);
> -   odp_barrier_init(&tm_system->tm_system_barrier, 2);
> odp_atomic_init_u64(&tm_system->destroying, 0);
>
> tm_system->_odp_int_sorted_pool = _odp_sorted_pool_create(
> --
> 1.8.3.1
>


[lng-odp] [PATCH] linux-generic: move tm system barrier to tm group

2016-12-02 Thread forrest.shi
From: Xuelin Shi 

since tm thread is handling tm group, move the thread based
barrier to tm group. otherwise, packet cannot get into the
second tm system in the same group.

Signed-off-by: Xuelin Shi 
---
 platform/linux-generic/include/odp_traffic_mngr_internal.h |  3 ++-
 platform/linux-generic/odp_traffic_mngr.c  | 12 +++-
 2 files changed, 9 insertions(+), 6 deletions(-)

diff --git a/platform/linux-generic/include/odp_traffic_mngr_internal.h 
b/platform/linux-generic/include/odp_traffic_mngr_internal.h
index 858183b..9f821fe 100644
--- a/platform/linux-generic/include/odp_traffic_mngr_internal.h
+++ b/platform/linux-generic/include/odp_traffic_mngr_internal.h
@@ -367,7 +367,6 @@ struct tm_system_s {
_odp_tm_group_t odp_tm_group;
 
odp_ticketlock_t tm_system_lock;
-   odp_barrier_ttm_system_barrier;
odp_barrier_ttm_system_destroy_barrier;
odp_atomic_u64_t destroying;
_odp_int_name_t  name_tbl_id;
@@ -416,8 +415,10 @@ struct tm_system_group_s {
tm_system_group_t *prev;
tm_system_group_t *next;
 
+   odp_barrier_t  tm_group_barrier;
tm_system_t   *first_tm_system;
uint32_t   num_tm_systems;
+   uint32_t   first_enq;
pthread_t  thread;
pthread_attr_t attr;
 };
diff --git a/platform/linux-generic/odp_traffic_mngr.c 
b/platform/linux-generic/odp_traffic_mngr.c
index a1f990f..62e5c63 100644
--- a/platform/linux-generic/odp_traffic_mngr.c
+++ b/platform/linux-generic/odp_traffic_mngr.c
@@ -1854,6 +1854,7 @@ static int tm_enqueue(tm_system_t *tm_system,
  tm_queue_obj_t *tm_queue_obj,
  odp_packet_t pkt)
 {
+   tm_system_group_t *tm_group;
input_work_item_t work_item;
odp_packet_color_t pkt_color;
tm_wred_node_t *initial_tm_wred_node;
@@ -1868,9 +1869,10 @@ static int tm_enqueue(tm_system_t *tm_system,
if (queue_tm_reorder(&tm_queue_obj->tm_qentry, &pkt_hdr->buf_hdr))
return 0;
 
-   if (tm_system->first_enq == 0) {
-   odp_barrier_wait(&tm_system->tm_system_barrier);
-   tm_system->first_enq = 1;
+   tm_group = GET_TM_GROUP(tm_system->odp_tm_group);
+   if (tm_group->first_enq == 0) {
+   odp_barrier_wait(&tm_group->tm_group_barrier);
+   tm_group->first_enq = 1;
}
 
pkt_color = odp_packet_color(pkt);
@@ -2327,7 +2329,7 @@ static void *tm_system_thread(void *arg)
input_work_queue = tm_system->input_work_queue;
 
/* Wait here until we have seen the first enqueue operation. */
-   odp_barrier_wait(&tm_system->tm_system_barrier);
+   odp_barrier_wait(&tm_group->tm_group_barrier);
main_loop_running = true;
 
destroying = odp_atomic_load_u64(&tm_system->destroying);
@@ -2625,6 +2627,7 @@ static _odp_tm_group_t _odp_tm_group_create(const char 
*name ODP_UNUSED)
 
tm_group = malloc(sizeof(tm_system_group_t));
memset(tm_group, 0, sizeof(tm_system_group_t));
+   odp_barrier_init(&tm_group->tm_group_barrier, 2);
 
/* Add this group to the tm_group_list linked list. */
if (tm_group_list == NULL) {
@@ -2868,7 +2871,6 @@ odp_tm_t odp_tm_create(const char*name,
tm_system->_odp_int_timer_wheel = _ODP_INT_TIMER_WHEEL_INVALID;
 
odp_ticketlock_init(&tm_system->tm_system_lock);
-   odp_barrier_init(&tm_system->tm_system_barrier, 2);
odp_atomic_init_u64(&tm_system->destroying, 0);
 
tm_system->_odp_int_sorted_pool = _odp_sorted_pool_create(
-- 
1.8.3.1