Reviewed-by: Balasubramanian Manoharan <bala.manoha...@linaro.org>

On 2 December 2016 at 13:41,  <forrest....@linaro.org> wrote:
> From: Xuelin Shi <forrest....@linaro.org>
>
> since tm thread is handling tm group, move the thread based
> barrier to tm group. otherwise, packet cannot get into the
> second tm system in the same group.
>
> Signed-off-by: Xuelin Shi <forrest....@linaro.org>
> ---
>  platform/linux-generic/include/odp_traffic_mngr_internal.h |  3 ++-
>  platform/linux-generic/odp_traffic_mngr.c                  | 12 +++++++-----
>  2 files changed, 9 insertions(+), 6 deletions(-)
>
> diff --git a/platform/linux-generic/include/odp_traffic_mngr_internal.h 
> b/platform/linux-generic/include/odp_traffic_mngr_internal.h
> index 858183b..9f821fe 100644
> --- a/platform/linux-generic/include/odp_traffic_mngr_internal.h
> +++ b/platform/linux-generic/include/odp_traffic_mngr_internal.h
> @@ -367,7 +367,6 @@ struct tm_system_s {
>         _odp_tm_group_t odp_tm_group;
>
>         odp_ticketlock_t tm_system_lock;
> -       odp_barrier_t    tm_system_barrier;
>         odp_barrier_t    tm_system_destroy_barrier;
>         odp_atomic_u64_t destroying;
>         _odp_int_name_t  name_tbl_id;
> @@ -416,8 +415,10 @@ struct tm_system_group_s {
>         tm_system_group_t *prev;
>         tm_system_group_t *next;
>
> +       odp_barrier_t  tm_group_barrier;
>         tm_system_t   *first_tm_system;
>         uint32_t       num_tm_systems;
> +       uint32_t       first_enq;
>         pthread_t      thread;
>         pthread_attr_t attr;
>  };
> diff --git a/platform/linux-generic/odp_traffic_mngr.c 
> b/platform/linux-generic/odp_traffic_mngr.c
> index a1f990f..62e5c63 100644
> --- a/platform/linux-generic/odp_traffic_mngr.c
> +++ b/platform/linux-generic/odp_traffic_mngr.c
> @@ -1854,6 +1854,7 @@ static int tm_enqueue(tm_system_t *tm_system,
>                       tm_queue_obj_t *tm_queue_obj,
>                       odp_packet_t pkt)
>  {
> +       tm_system_group_t *tm_group;
>         input_work_item_t work_item;
>         odp_packet_color_t pkt_color;
>         tm_wred_node_t *initial_tm_wred_node;
> @@ -1868,9 +1869,10 @@ static int tm_enqueue(tm_system_t *tm_system,
>         if (queue_tm_reorder(&tm_queue_obj->tm_qentry, &pkt_hdr->buf_hdr))
>                 return 0;
>
> -       if (tm_system->first_enq == 0) {
> -               odp_barrier_wait(&tm_system->tm_system_barrier);
> -               tm_system->first_enq = 1;
> +       tm_group = GET_TM_GROUP(tm_system->odp_tm_group);
> +       if (tm_group->first_enq == 0) {
> +               odp_barrier_wait(&tm_group->tm_group_barrier);
> +               tm_group->first_enq = 1;
>         }
>
>         pkt_color = odp_packet_color(pkt);
> @@ -2327,7 +2329,7 @@ static void *tm_system_thread(void *arg)
>         input_work_queue = tm_system->input_work_queue;
>
>         /* Wait here until we have seen the first enqueue operation. */
> -       odp_barrier_wait(&tm_system->tm_system_barrier);
> +       odp_barrier_wait(&tm_group->tm_group_barrier);
>         main_loop_running = true;
>
>         destroying = odp_atomic_load_u64(&tm_system->destroying);
> @@ -2625,6 +2627,7 @@ static _odp_tm_group_t _odp_tm_group_create(const char 
> *name ODP_UNUSED)
>
>         tm_group = malloc(sizeof(tm_system_group_t));
>         memset(tm_group, 0, sizeof(tm_system_group_t));
> +       odp_barrier_init(&tm_group->tm_group_barrier, 2);
>
>         /* Add this group to the tm_group_list linked list. */
>         if (tm_group_list == NULL) {
> @@ -2868,7 +2871,6 @@ odp_tm_t odp_tm_create(const char            *name,
>         tm_system->_odp_int_timer_wheel = _ODP_INT_TIMER_WHEEL_INVALID;
>
>         odp_ticketlock_init(&tm_system->tm_system_lock);
> -       odp_barrier_init(&tm_system->tm_system_barrier, 2);
>         odp_atomic_init_u64(&tm_system->destroying, 0);
>
>         tm_system->_odp_int_sorted_pool = _odp_sorted_pool_create(
> --
> 1.8.3.1
>

Reply via email to