On 06/02/15 11:52, Nicolas Morey-Chaisemartin wrote:
Because I wanted to keep the default behavior the same as it is now.
Using the same value for both rx and tx batch will definitely give the same 
performance but the results will be biased.
In a normal behavior you can't expect your packet to be sent and received in 
the exact same burst size all the time. Because of the loop interface, (and the 
associated pktio lock), a rx will always fetch the full tx batch.
So good performances, but not very realistic I guess.

Anyway, if you feel changing the default value is the best solution, I'll 
submit a new patch.
Yes, it might be not realistic but it depends what do we measure exactly. If we want to get theoretical maximum speed then we need to use burst for both rx and tx. If we want to be closer to real life then we need to send some random number of packets, like I did in my [PATCHv7 5/5] ipc: example app patch. But I think it's better to show theoretical maximum here.
Lets wait fro feedback from Stuart before new version.

Thanks,
Maxim.

On 06/02/2015 10:47 AM, Maxim Uvarov wrote:
Looks good. But why default is 1 and not BATCH_LEN_MAX? Then more then faster 
right?

Stuart, it's your test. Do you also want to review it?

Thank you,
Maxim.

On 06/01/15 19:44, Nicolas Morey-Chaisemartin wrote:
Signed-off-by: Nicolas Morey-Chaisemartin <[email protected]>
---
   test/performance/odp_pktio_perf.c | 72 
+++++++++++++++++++++++++++++----------
   1 file changed, 54 insertions(+), 18 deletions(-)

diff --git a/test/performance/odp_pktio_perf.c 
b/test/performance/odp_pktio_perf.c
index fbe27a0..bb557b0 100644
--- a/test/performance/odp_pktio_perf.c
+++ b/test/performance/odp_pktio_perf.c
@@ -75,6 +75,8 @@ typedef struct {
                      batch */
       int      schedule;    /* 1: receive packets via scheduler
                      0: receive packets via direct deq */
+    uint32_t rx_batch_len;    /* Number of packets to receive in a single
+                   batch */
       uint64_t pps;        /* Attempted packet rate */
       int      verbose;    /* Print verbose information, such as per
                      thread statistics */
@@ -358,12 +360,41 @@ static void *run_thread_tx(void *arg)
       return NULL;
   }
   -static void *run_thread_rx(void *arg TEST_UNUSED)
+static int receive_packets(odp_queue_t pollq,
+               odp_event_t *event_tbl, unsigned num_pkts)
+{
+    int n_ev = 0;
+
+    if (num_pkts == 0)
+        return 0;
+
+    if (pollq != ODP_QUEUE_INVALID) {
+        if (num_pkts == 1) {
+            event_tbl[0] = odp_queue_deq(pollq);
+            n_ev = event_tbl[0] != ODP_EVENT_INVALID;
+        } else {
+            n_ev = odp_queue_deq_multi(pollq, event_tbl, num_pkts);
+        }
+    } else {
+        event_tbl[0] = odp_schedule(NULL, ODP_SCHED_NO_WAIT);
+        n_ev = event_tbl[0] != ODP_EVENT_INVALID;
+    }
+    return n_ev;
+}
+
+static void *run_thread_rx(void *arg)
   {
       test_globals_t *globals;
-    int thr_id;
+    int thr_id, batch_len;
       odp_queue_t pollq = ODP_QUEUE_INVALID;
   +    thread_args_t *targs = arg;
+
+    batch_len = targs->batch_len;
+
+    if (batch_len > BATCH_LEN_MAX)
+        batch_len = BATCH_LEN_MAX;
+
       thr_id = odp_thread_id();
         globals = odp_shm_addr(odp_shm_lookup("test_globals"));
@@ -377,28 +408,24 @@ static void *run_thread_rx(void *arg TEST_UNUSED)
       }
         odp_barrier_wait(&globals->rx_barrier);
-
       while (1) {
-        odp_event_t ev;
+        odp_event_t ev[BATCH_LEN_MAX];
+        int i, n_ev;
   -        if (pollq != ODP_QUEUE_INVALID)
-            ev = odp_queue_deq(pollq);
-        else
-            ev = odp_schedule(NULL, ODP_SCHED_NO_WAIT);
+        n_ev = receive_packets(pollq, ev, batch_len);
   -        if (ev != ODP_EVENT_INVALID) {
-            if (odp_event_type(ev) == ODP_EVENT_PACKET) {
-                odp_packet_t pkt = odp_packet_from_event(ev);
+        for (i = 0; i < n_ev; ++i) {
+            if (odp_event_type(ev[i]) == ODP_EVENT_PACKET) {
+                odp_packet_t pkt = odp_packet_from_event(ev[i]);
                   if (pktio_pkt_has_magic(pkt))
                       stats->s.rx_cnt++;
                   else
                       stats->s.rx_ignore++;
               }
-
-            odp_buffer_free(odp_buffer_from_event(ev));
-        } else if (odp_atomic_load_u32(&shutdown)) {
-            break;
+            odp_buffer_free(odp_buffer_from_event(ev[i]));
           }
+        if (n_ev == 0 && odp_atomic_load_u32(&shutdown))
+            break;
       }
         return NULL;
@@ -556,7 +583,7 @@ static int run_test_single(odp_cpumask_t *thd_mask_tx,
                  test_status_t *status)
   {
       odph_linux_pthread_t thd_tbl[MAX_WORKERS];
-    thread_args_t args_tx;
+    thread_args_t args_tx, args_rx;
       uint64_t expected_tx_cnt;
       int num_tx_workers, num_rx_workers;
   @@ -569,8 +596,9 @@ static int run_test_single(odp_cpumask_t *thd_mask_tx,
       expected_tx_cnt = status->pps_curr * gbl_args->args.duration;
         /* start receiver threads first */
+    args_rx.batch_len = gbl_args->args.rx_batch_len;
       odph_linux_pthread_create(&thd_tbl[0], thd_mask_rx,
-                  run_thread_rx, NULL);
+                  run_thread_rx, &args_rx);
       odp_barrier_wait(&gbl_args->rx_barrier);
         /* then start transmitters */
@@ -618,6 +646,7 @@ static int run_test(void)
       printf("\tReceive workers:      \t%d\n", odp_cpumask_count(&rxmask));
       printf("\tDuration (seconds):   \t%d\n", gbl_args->args.duration);
       printf("\tTransmit batch length:\t%d\n", gbl_args->args.tx_batch_len);
+    printf("\tReceive batch length: \t%d\n", gbl_args->args.rx_batch_len);
       printf("\tPacket receive method:\t%s\n",
              gbl_args->args.schedule ? "schedule" : "poll");
       printf("\tInterface(s):         \t");
@@ -800,6 +829,8 @@ static void usage(void)
       printf("                         default: %d\n", BATCH_LEN_MAX);
       printf("  -p, --poll             Poll input queue for packet RX\n");
       printf("                         default: disabled (use scheduler)\n");
+    printf("  -R, --rxbatch <length> Number of packets per RX batch (pool mode 
only)\n");
+    printf("                         default: %d\n", 1);
       printf("  -l, --length <length>  Additional payload length in bytes\n");
       printf("                         default: 0\n");
       printf("  -r, --rate <number>    Attempted packet rate in PPS\n");
@@ -820,6 +851,7 @@ static void parse_args(int argc, char *argv[], test_args_t 
*args)
           {"txcount",   required_argument, NULL, 't'},
           {"txbatch",   required_argument, NULL, 'b'},
           {"poll",      no_argument,       NULL, 'p'},
+        {"rxbatch",   required_argument, NULL, 'R'},
           {"length",    required_argument, NULL, 'l'},
           {"rate",      required_argument, NULL, 'r'},
           {"interface", required_argument, NULL, 'i'},
@@ -832,6 +864,7 @@ static void parse_args(int argc, char *argv[], test_args_t 
*args)
       args->cpu_count      = 0; /* all CPUs */
       args->num_tx_workers = 0; /* defaults to cpu_count+1/2 */
       args->tx_batch_len   = BATCH_LEN_MAX;
+    args->rx_batch_len   = 1;
       args->duration       = 1;
       args->pps            = RATE_SEARCH_INITIAL_PPS;
       args->search         = 1;
@@ -839,7 +872,7 @@ static void parse_args(int argc, char *argv[], test_args_t 
*args)
       args->verbose        = 0;
         while (1) {
-        opt = getopt_long(argc, argv, "+c:t:b:pl:r:i:d:vh",
+        opt = getopt_long(argc, argv, "+c:t:b:pR:l:r:i:d:vh",
                     longopts, &long_index);
             if (opt == -1)
@@ -886,6 +919,9 @@ static void parse_args(int argc, char *argv[], test_args_t 
*args)
           case 'b':
               args->tx_batch_len = atoi(optarg);
               break;
+        case 'R':
+            args->rx_batch_len = atoi(optarg);
+            break;
           case 'v':
               args->verbose = 1;
               break;
_______________________________________________
lng-odp mailing list
[email protected]
https://lists.linaro.org/mailman/listinfo/lng-odp
_______________________________________________
lng-odp mailing list
[email protected]
https://lists.linaro.org/mailman/listinfo/lng-odp


_______________________________________________
lng-odp mailing list
[email protected]
https://lists.linaro.org/mailman/listinfo/lng-odp

Reply via email to