On Tue, Jun 02, 2015 at 03:08:55PM +0200, Nicolas Morey-Chaisemartin wrote:
> Signed-off-by: Nicolas Morey-Chaisemartin <[email protected]>
> ---
> v2:
> * Change default value of rx_batch_len to the same as tx_batch_len
> * Use odp_schedule_multi when Rx batching is enabled
> 
>  test/performance/odp_pktio_perf.c | 77 
> ++++++++++++++++++++++++++++++---------
>  1 file changed, 59 insertions(+), 18 deletions(-)
> 
> diff --git a/test/performance/odp_pktio_perf.c 
> b/test/performance/odp_pktio_perf.c
> index fbe27a0..3014b47 100644
> --- a/test/performance/odp_pktio_perf.c
> +++ b/test/performance/odp_pktio_perf.c
> @@ -75,6 +75,8 @@ typedef struct {
>                                  batch */
>       int      schedule;      /* 1: receive packets via scheduler
>                                  0: receive packets via direct deq */
> +     uint32_t rx_batch_len;  /* Number of packets to receive in a single
> +                                batch */
>       uint64_t pps;           /* Attempted packet rate */
>       int      verbose;       /* Print verbose information, such as per
>                                  thread statistics */
> @@ -358,12 +360,46 @@ static void *run_thread_tx(void *arg)
>       return NULL;
>  }
>  
> -static void *run_thread_rx(void *arg TEST_UNUSED)
> +static int receive_packets(odp_queue_t pollq,
> +                        odp_event_t *event_tbl, unsigned num_pkts)
> +{
> +     int n_ev = 0;
> +
> +     if (num_pkts == 0)
> +             return 0;
> +
> +     if (pollq != ODP_QUEUE_INVALID) {
> +             if (num_pkts == 1) {
> +                     event_tbl[0] = odp_queue_deq(pollq);
> +                     n_ev = event_tbl[0] != ODP_EVENT_INVALID;
> +             } else {
> +                     n_ev = odp_queue_deq_multi(pollq, event_tbl, num_pkts);
> +             }
> +     } else {
> +             if (num_pkts == 1) {
> +                     event_tbl[0] = odp_schedule(NULL, ODP_SCHED_NO_WAIT);
> +                     n_ev = event_tbl[0] != ODP_EVENT_INVALID;
> +             } else {
> +                     n_ev = odp_schedule_multi(NULL, ODP_SCHED_NO_WAIT,
> +                                               event_tbl, num_pkts);
> +             }
> +     }
> +     return n_ev;
> +}
> +
> +static void *run_thread_rx(void *arg)
>  {
>       test_globals_t *globals;
> -     int thr_id;
> +     int thr_id, batch_len;
>       odp_queue_t pollq = ODP_QUEUE_INVALID;
>  
> +     thread_args_t *targs = arg;
> +
> +     batch_len = targs->batch_len;
> +
> +     if (batch_len > BATCH_LEN_MAX)
> +             batch_len = BATCH_LEN_MAX;
> +
>       thr_id = odp_thread_id();
>  
>       globals = odp_shm_addr(odp_shm_lookup("test_globals"));
> @@ -377,28 +413,24 @@ static void *run_thread_rx(void *arg TEST_UNUSED)
>       }
>  
>       odp_barrier_wait(&globals->rx_barrier);
> -
>       while (1) {
> -             odp_event_t ev;
> +             odp_event_t ev[BATCH_LEN_MAX];
> +             int i, n_ev;
>  
> -             if (pollq != ODP_QUEUE_INVALID)
> -                     ev = odp_queue_deq(pollq);
> -             else
> -                     ev = odp_schedule(NULL, ODP_SCHED_NO_WAIT);
> +             n_ev = receive_packets(pollq, ev, batch_len);
>  
> -             if (ev != ODP_EVENT_INVALID) {
> -                     if (odp_event_type(ev) == ODP_EVENT_PACKET) {
> -                             odp_packet_t pkt = odp_packet_from_event(ev);
> +             for (i = 0; i < n_ev; ++i) {
> +                     if (odp_event_type(ev[i]) == ODP_EVENT_PACKET) {
> +                             odp_packet_t pkt = odp_packet_from_event(ev[i]);
>                               if (pktio_pkt_has_magic(pkt))
>                                       stats->s.rx_cnt++;
>                               else
>                                       stats->s.rx_ignore++;
>                       }
> -
> -                     odp_buffer_free(odp_buffer_from_event(ev));
> -             } else if (odp_atomic_load_u32(&shutdown)) {
> -                     break;
> +                     odp_buffer_free(odp_buffer_from_event(ev[i]));
>               }
> +             if (n_ev == 0 && odp_atomic_load_u32(&shutdown))
> +                     break;
>       }
>  
>       return NULL;
> @@ -556,7 +588,7 @@ static int run_test_single(odp_cpumask_t *thd_mask_tx,
>                          test_status_t *status)
>  {
>       odph_linux_pthread_t thd_tbl[MAX_WORKERS];
> -     thread_args_t args_tx;
> +     thread_args_t args_tx, args_rx;
>       uint64_t expected_tx_cnt;
>       int num_tx_workers, num_rx_workers;
>  
> @@ -569,8 +601,9 @@ static int run_test_single(odp_cpumask_t *thd_mask_tx,
>       expected_tx_cnt = status->pps_curr * gbl_args->args.duration;
>  
>       /* start receiver threads first */
> +     args_rx.batch_len = gbl_args->args.rx_batch_len;
>       odph_linux_pthread_create(&thd_tbl[0], thd_mask_rx,
> -                               run_thread_rx, NULL);
> +                               run_thread_rx, &args_rx);
>       odp_barrier_wait(&gbl_args->rx_barrier);
>  
>       /* then start transmitters */
> @@ -618,6 +651,7 @@ static int run_test(void)
>       printf("\tReceive workers:      \t%d\n", odp_cpumask_count(&rxmask));
>       printf("\tDuration (seconds):   \t%d\n", gbl_args->args.duration);
>       printf("\tTransmit batch length:\t%d\n", gbl_args->args.tx_batch_len);
> +     printf("\tReceive batch length: \t%d\n", gbl_args->args.rx_batch_len);
>       printf("\tPacket receive method:\t%s\n",
>              gbl_args->args.schedule ? "schedule" : "poll");
>       printf("\tInterface(s):         \t");
> @@ -800,6 +834,8 @@ static void usage(void)
>       printf("                         default: %d\n", BATCH_LEN_MAX);
>       printf("  -p, --poll             Poll input queue for packet RX\n");
>       printf("                         default: disabled (use scheduler)\n");
> +     printf("  -R, --rxbatch <length> Number of packets per RX batch (pool 
> mode only)\n");

The poll mode only comment is now not needed.

-- 
Stuart.
_______________________________________________
lng-odp mailing list
[email protected]
https://lists.linaro.org/mailman/listinfo/lng-odp

Reply via email to