Signed-off-by: Sunil Kumar Kori <sk...@marvell.com>
---
 examples/l3fwd/Makefile         |   2 +-
 examples/l3fwd/l3fwd.h          |  21 +-
 examples/l3fwd/l3fwd_common.h   |  10 +
 examples/l3fwd/l3fwd_em.c       |  69 +++++
 examples/l3fwd/l3fwd_eventdev.c | 593 ++++++++++++++++++++++++++++++++++++++++
 examples/l3fwd/l3fwd_eventdev.h |  85 ++++++
 examples/l3fwd/l3fwd_lpm.c      |  71 +++++
 examples/l3fwd/main.c           |  46 +++-
 8 files changed, 887 insertions(+), 10 deletions(-)
 create mode 100644 examples/l3fwd/l3fwd_eventdev.c
 create mode 100644 examples/l3fwd/l3fwd_eventdev.h

diff --git a/examples/l3fwd/Makefile b/examples/l3fwd/Makefile
index c55f5c2..4d20f37 100644
--- a/examples/l3fwd/Makefile
+++ b/examples/l3fwd/Makefile
@@ -5,7 +5,7 @@
 APP = l3fwd
 
 # all source are stored in SRCS-y
-SRCS-y := main.c l3fwd_lpm.c l3fwd_em.c
+SRCS-y := main.c l3fwd_lpm.c l3fwd_em.c l3fwd_eventdev.c
 
 # Build using pkg-config variables if possible
 ifeq ($(shell pkg-config --exists libdpdk && echo 0),0)
diff --git a/examples/l3fwd/l3fwd.h b/examples/l3fwd/l3fwd.h
index 293fb1f..95b0197 100644
--- a/examples/l3fwd/l3fwd.h
+++ b/examples/l3fwd/l3fwd.h
@@ -5,8 +5,11 @@
 #ifndef __L3_FWD_H__
 #define __L3_FWD_H__
 
+#include <rte_eventdev.h>
 #include <rte_vect.h>
 
+#include "l3fwd_eventdev.h"
+
 #define DO_RFC_1812_CHECKS
 
 #define RTE_LOGTYPE_L3FWD RTE_LOGTYPE_USER1
@@ -64,13 +67,15 @@ struct lcore_conf {
        uint16_t n_tx_port;
        uint16_t tx_port_id[RTE_MAX_ETHPORTS];
        uint16_t tx_queue_id[RTE_MAX_ETHPORTS];
-       struct mbuf_table tx_mbufs[RTE_MAX_ETHPORTS];
+       struct mbuf_table tx_mbufs[RTE_MAX_LCORE];
        void *ipv4_lookup_struct;
        void *ipv6_lookup_struct;
 } __rte_cache_aligned;
 
 extern volatile bool force_quit;
 
+extern struct rte_mempool *pktmbuf_pool[RTE_MAX_ETHPORTS][NB_SOCKETS];
+
 /* ethernet addresses of ports */
 extern uint64_t dest_eth_addr[RTE_MAX_ETHPORTS];
 extern struct rte_ether_addr ports_eth_addr[RTE_MAX_ETHPORTS];
@@ -86,6 +91,12 @@ struct lcore_conf {
 
 extern struct lcore_conf lcore_conf[RTE_MAX_LCORE];
 
+int init_mem(uint16_t portid, unsigned int nb_mbuf);
+
+void setup_l3fwd_lookup_tables(void);
+
+void print_ethaddr(const char *name, const struct rte_ether_addr *eth_addr);
+
 /* Send burst of packets on an output interface */
 static inline int
 send_burst(struct lcore_conf *qconf, uint16_t n, uint16_t port)
@@ -114,6 +125,14 @@ struct lcore_conf {
 {
        uint16_t len;
 
+       if (pkt_transfer_mode == PACKET_TRANSFER_MODE_EVENTDEV) {
+               m->port = port;
+               port = qconf->tx_port_id[0];
+               eventdev_rsrc.send_burst_eventdev((struct rte_mbuf **)m,
+                                                 1, port);
+               return 0;
+       }
+
        len = qconf->tx_mbufs[port].len;
        qconf->tx_mbufs[port].m_table[len] = m;
        len++;
diff --git a/examples/l3fwd/l3fwd_common.h b/examples/l3fwd/l3fwd_common.h
index 7d83ff6..254158a 100644
--- a/examples/l3fwd/l3fwd_common.h
+++ b/examples/l3fwd/l3fwd_common.h
@@ -183,6 +183,16 @@
 {
        uint32_t len, j, n;
 
+       /* Check whether packet is to be Tx on eventdev */
+       if (pkt_transfer_mode == PACKET_TRANSFER_MODE_EVENTDEV) {
+               for (j = 0; j < num; j++)
+                       m[j]->port = port;
+
+               port = qconf->tx_port_id[0];
+               eventdev_rsrc.send_burst_eventdev(m, num, port);
+               return;
+       }
+
        len = qconf->tx_mbufs[port].len;
 
        /*
diff --git a/examples/l3fwd/l3fwd_em.c b/examples/l3fwd/l3fwd_em.c
index 74a7c8f..bc77972 100644
--- a/examples/l3fwd/l3fwd_em.c
+++ b/examples/l3fwd/l3fwd_em.c
@@ -699,6 +699,75 @@ struct ipv6_l3fwd_em_route {
        return 0;
 }
 
+/* main eventdev processing loop */
+int em_main_loop_eventdev(__attribute__((unused)) void *dummy)
+{
+       struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
+       struct rte_event events[MAX_PKT_BURST];
+       struct lcore_conf *lconf;
+       int32_t i, j = 0, nb_rx;
+       uint16_t event_d_id;
+       uint16_t event_p_id;
+       uint32_t lcore_id;
+       uint16_t deq_len;
+       uint32_t portid;
+
+       /* Assign dedicated event port for enqueue/dequeue operation */
+       deq_len = event_d_conf.nb_event_port_dequeue_depth;
+       event_d_id = eventdev_rsrc.event_d_id;
+       event_p_id = get_free_event_port();
+       lcore_id = rte_lcore_id();
+
+       lconf = &lcore_conf[lcore_id];
+       lconf->n_rx_queue = 1;
+       lconf->rx_queue_list[0].port_id = event_p_id;
+       lconf->n_tx_port = 1;
+       lconf->tx_port_id[0] = event_p_id;
+
+       RTE_LOG(INFO, L3FWD, "entering eventdev main loop on lcore %u\n",
+               lcore_id);
+
+       while (!force_quit) {
+               /* Read packet from RX queues */
+               nb_rx = rte_event_dequeue_burst(event_d_id, event_p_id,
+                                               events, deq_len, 0);
+               if (nb_rx == 0) {
+                       rte_pause();
+                       continue;
+               }
+
+               for (i = 0; i < nb_rx; i++)
+                       pkts_burst[i] = events[i].mbuf;
+
+               portid = pkts_burst[0]->port;
+               for (i = 1; i < nb_rx; i++) {
+                       if (portid != pkts_burst[i]->port) {
+
+#if defined RTE_ARCH_X86 || defined RTE_MACHINE_CPUFLAG_NEON
+                               l3fwd_em_send_packets(i - j, &pkts_burst[j],
+                                                     portid, lconf);
+#else
+                               l3fwd_em_no_opt_send_packets(i - j,
+                                                            &pkts_burst[j],
+                                                            portid, lconf);
+#endif
+                               j = i;
+                               portid = pkts_burst[i]->port;
+                       }
+               }
+
+               /* Send remaining packets */
+#if defined RTE_ARCH_X86 || defined RTE_MACHINE_CPUFLAG_NEON
+               l3fwd_em_send_packets(i - j, &pkts_burst[j], portid, lconf);
+#else
+               l3fwd_em_no_opt_send_packets(i - j, &pkts_burst[j], portid,
+                                            lconf);
+#endif
+       }
+
+       return 0;
+}
+
 /*
  * Initialize exact match (hash) parameters.
  */
diff --git a/examples/l3fwd/l3fwd_eventdev.c b/examples/l3fwd/l3fwd_eventdev.c
new file mode 100644
index 0000000..1e1ed6e
--- /dev/null
+++ b/examples/l3fwd/l3fwd_eventdev.c
@@ -0,0 +1,593 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#include <stdbool.h>
+#include <getopt.h>
+
+#include <rte_ethdev.h>
+#include <rte_eventdev.h>
+#include <rte_event_eth_rx_adapter.h>
+#include <rte_event_eth_tx_adapter.h>
+#include <rte_lcore.h>
+#include <rte_spinlock.h>
+
+#include "l3fwd.h"
+
+enum {
+       CMD_LINE_OPT_MODE_NUM = 265,
+       CMD_LINE_OPT_EVENTQ_SYNC_NUM,
+};
+
+static const struct option eventdev_lgopts[] = {
+       {CMD_LINE_OPT_MODE, 1, 0, CMD_LINE_OPT_MODE_NUM},
+       {CMD_LINE_OPT_EVENTQ_SYNC, 1, 0, CMD_LINE_OPT_EVENTQ_SYNC_NUM},
+       {NULL, 0, 0, 0}
+};
+
+/* Eventdev command line options */
+int evd_argc;
+char *evd_argv[3];
+
+/* Default configurations */
+int pkt_transfer_mode = PACKET_TRANSFER_MODE_POLL;
+int eventq_sync_mode = RTE_SCHED_TYPE_ATOMIC;
+uint32_t num_workers = RTE_MAX_LCORE;
+struct eventdev_resources eventdev_rsrc;
+
+static struct rte_eth_conf port_config = {
+       .rxmode = {
+               .mq_mode = ETH_MQ_RX_RSS,
+               .max_rx_pkt_len = RTE_ETHER_MAX_LEN,
+               .split_hdr_size = 0,
+               .offloads = DEV_RX_OFFLOAD_CHECKSUM,
+       },
+       .rx_adv_conf = {
+               .rss_conf = {
+                       .rss_key = NULL,
+                       .rss_hf = ETH_RSS_IP,
+               },
+       },
+       .txmode = {
+               .mq_mode = ETH_MQ_TX_NONE,
+       },
+};
+
+struct rte_event_dev_config event_d_conf = {
+       .nb_event_queues = 1,
+       .nb_event_ports = RTE_MAX_LCORE,
+       .nb_events_limit  = 4096,
+       .nb_event_queue_flows = 1024,
+       .nb_event_port_dequeue_depth = 128,
+       .nb_event_port_enqueue_depth = 128,
+};
+
+static struct rte_event_port_conf event_p_conf = {
+       .dequeue_depth = 32,
+       .enqueue_depth = 32,
+       .new_event_threshold = 4096,
+};
+
+static struct rte_event_queue_conf event_q_conf = {
+       .nb_atomic_flows = 1024,
+       .nb_atomic_order_sequences = 1024,
+       .event_queue_cfg = 0,
+       .schedule_type = RTE_SCHED_TYPE_ATOMIC,
+       .priority = RTE_EVENT_DEV_PRIORITY_HIGHEST,
+};
+
+static struct rte_event_eth_rx_adapter_queue_conf eth_q_conf = {
+       .rx_queue_flags = 0,
+       .servicing_weight = 1,
+       .ev = {
+               .queue_id = 0,
+               .priority = RTE_EVENT_DEV_PRIORITY_HIGHEST,
+               .sched_type = RTE_SCHED_TYPE_ATOMIC,
+       },
+};
+
+static void parse_mode(const char *optarg)
+{
+       if (!strncmp(optarg, "poll", 4))
+               pkt_transfer_mode = PACKET_TRANSFER_MODE_POLL;
+       else if (!strncmp(optarg, "eventdev", 8))
+               pkt_transfer_mode = PACKET_TRANSFER_MODE_EVENTDEV;
+}
+
+static void parse_eventq_sync(const char *optarg)
+{
+       if (!strncmp(optarg, "ordered", 7))
+               eventq_sync_mode = RTE_SCHED_TYPE_ORDERED;
+       else if (!strncmp(optarg, "atomic", 6))
+               eventq_sync_mode = RTE_SCHED_TYPE_ATOMIC;
+}
+
+static int parse_eventdev_args(int argc, char **argv)
+{
+       char **argvopt = argv;
+       int32_t opt, ret = -1;
+       int32_t option_index;
+
+       while ((opt = getopt_long(argc, argvopt, "", eventdev_lgopts,
+                                 &option_index)) != EOF) {
+               switch (opt) {
+               case CMD_LINE_OPT_MODE_NUM:
+                       parse_mode(optarg);
+                       break;
+
+               case CMD_LINE_OPT_EVENTQ_SYNC_NUM:
+                       parse_eventq_sync(optarg);
+                       break;
+
+               case '?':
+                       /* skip other parameters except eventdev specific */
+                       break;
+
+               default:
+                       printf("Invalid eventdev parameter\n");
+                       return -1;
+               }
+       }
+
+       if (pkt_transfer_mode == PACKET_TRANSFER_MODE_EVENTDEV)
+               ret = EVENT_DEV_PARAM_PRESENT;
+
+       return ret;
+}
+
+/* Send burst of packets on an output interface */
+static inline int send_burst_eventdev_generic(struct rte_mbuf *m[], uint16_t n,
+                                             uint16_t port)
+{
+       struct rte_event events[MAX_PKT_BURST];
+       uint8_t event_d_id;
+       int ret, i;
+
+       event_d_id = eventdev_rsrc.event_d_id;
+
+       for (i = 0; i < n; i++) {
+               events[i].queue_id = 0;
+               events[i].op = RTE_EVENT_OP_FORWARD;
+               events[i].mbuf = m[i];
+       }
+
+       ret = rte_event_enqueue_burst(event_d_id, port, events, n);
+       if (unlikely(ret < n)) {
+               do {
+                       rte_pktmbuf_free(m[ret]);
+               } while (++ret < n);
+       }
+
+       return 0;
+}
+
+/* Send burst of packets on an output interface */
+static inline int send_burst_eventdev_adapter(struct rte_mbuf *m[], uint16_t n,
+                                             uint16_t port)
+{
+       struct rte_event events[MAX_PKT_BURST];
+       uint8_t event_d_id;
+       int32_t ret, i;
+
+       event_d_id = eventdev_rsrc.event_d_id;
+
+       for (i = 0; i < n; i++) {
+               events[i].queue_id = 0;
+               events[i].op = RTE_EVENT_OP_FORWARD;
+               events[i].mbuf = m[i];
+               rte_event_eth_tx_adapter_txq_set(events[i].mbuf, 0);
+       }
+
+       ret = rte_event_eth_tx_adapter_enqueue(event_d_id, port, events, n);
+       if (unlikely(ret < n)) {
+               do {
+                       rte_pktmbuf_free(m[ret]);
+               } while (++ret < n);
+       }
+
+       return 0;
+}
+
+static uint32_t event_dev_setup(uint16_t ethdev_count)
+{
+       struct rte_event_dev_info dev_info;
+       const uint8_t event_d_id = 0; /* Always use first event device only */
+       uint32_t event_queue_cfg = 0;
+       int ret;
+
+       /* Event device configurtion */
+       rte_event_dev_info_get(event_d_id, &dev_info);
+
+       if (dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES)
+               event_queue_cfg |= RTE_EVENT_QUEUE_CFG_ALL_TYPES;
+
+       event_d_conf.nb_event_queues = ethdev_count;
+       if (dev_info.max_event_queues < event_d_conf.nb_event_queues)
+               event_d_conf.nb_event_queues = dev_info.max_event_queues;
+
+       if (dev_info.max_num_events < event_d_conf.nb_events_limit)
+               event_d_conf.nb_events_limit = dev_info.max_num_events;
+
+       if (dev_info.max_event_port_dequeue_depth <
+                               event_d_conf.nb_event_port_dequeue_depth)
+               event_d_conf.nb_event_port_dequeue_depth =
+                               dev_info.max_event_port_dequeue_depth;
+
+       if (dev_info.max_event_port_enqueue_depth <
+                               event_d_conf.nb_event_port_enqueue_depth)
+               event_d_conf.nb_event_port_enqueue_depth =
+                               dev_info.max_event_port_enqueue_depth;
+
+       num_workers = rte_lcore_count();
+       if (dev_info.max_event_ports < num_workers)
+               num_workers = dev_info.max_event_ports;
+
+       event_d_conf.nb_event_ports = num_workers;
+
+       ret = rte_event_dev_configure(event_d_id, &event_d_conf);
+       if (ret < 0)
+               rte_exit(EXIT_FAILURE, "Error in configuring event device");
+
+       eventdev_rsrc.event_d_id = event_d_id;
+       return event_queue_cfg;
+}
+
+static void event_port_setup(void)
+{
+       uint8_t event_d_id = eventdev_rsrc.event_d_id;
+       struct rte_event_port_conf evp_conf;
+       uint8_t event_p_id;
+       int32_t ret;
+
+       eventdev_rsrc.evp.nb_ports = num_workers;
+       eventdev_rsrc.evp.event_p_id = (uint8_t *)malloc(sizeof(uint8_t) *
+                                       eventdev_rsrc.evp.nb_ports);
+       if (!eventdev_rsrc.evp.event_p_id)
+               rte_exit(EXIT_FAILURE, " No space is available");
+
+       for (event_p_id = 0; event_p_id < num_workers; event_p_id++) {
+               rte_event_port_default_conf_get(event_d_id, event_p_id,
+                                               &evp_conf);
+
+               if (evp_conf.new_event_threshold <
+                                       event_p_conf.new_event_threshold)
+                       event_p_conf.new_event_threshold =
+                                       evp_conf.new_event_threshold;
+
+               if (evp_conf.dequeue_depth < event_p_conf.dequeue_depth)
+                       event_p_conf.dequeue_depth = evp_conf.dequeue_depth;
+
+               if (evp_conf.enqueue_depth < event_p_conf.enqueue_depth)
+                       event_p_conf.enqueue_depth = evp_conf.enqueue_depth;
+
+               ret = rte_event_port_setup(event_d_id, event_p_id,
+                                          &event_p_conf);
+               if (ret < 0) {
+                       rte_exit(EXIT_FAILURE,
+                                "Error in configuring event port %d\n",
+                                event_p_id);
+               }
+
+               ret = rte_event_port_link(event_d_id, event_p_id, NULL,
+                                         NULL, 0);
+               if (ret < 0) {
+                       rte_exit(EXIT_FAILURE, "Error in linking event port %d "
+                                "to event queue", event_p_id);
+               }
+               eventdev_rsrc.evp.event_p_id[event_p_id] = event_p_id;
+
+               /* init spinlock */
+               rte_spinlock_init(&eventdev_rsrc.evp.lock);
+       }
+}
+
+static void event_queue_setup(uint16_t ethdev_count, uint32_t event_queue_cfg)
+{
+       uint8_t event_d_id = eventdev_rsrc.event_d_id;
+       struct rte_event_queue_conf evq_conf;
+       uint8_t event_q_id = 0;
+       int32_t ret;
+
+       rte_event_queue_default_conf_get(event_d_id, event_q_id, &evq_conf);
+
+       if (evq_conf.nb_atomic_flows < event_q_conf.nb_atomic_flows)
+               event_q_conf.nb_atomic_flows = evq_conf.nb_atomic_flows;
+
+       if (evq_conf.nb_atomic_order_sequences <
+                                       event_q_conf.nb_atomic_order_sequences)
+               event_q_conf.nb_atomic_order_sequences =
+                                       evq_conf.nb_atomic_order_sequences;
+
+       event_q_conf.event_queue_cfg = event_queue_cfg;
+       event_q_conf.schedule_type = eventq_sync_mode;
+       eventdev_rsrc.evq.nb_queues = ethdev_count;
+       eventdev_rsrc.evq.event_q_id = (uint8_t *)malloc(sizeof(uint8_t) *
+                                       eventdev_rsrc.evq.nb_queues);
+       if (!eventdev_rsrc.evq.event_q_id)
+               rte_exit(EXIT_FAILURE, "Memory allocation failure");
+
+       for (event_q_id = 0; event_q_id < ethdev_count; event_q_id++) {
+               ret = rte_event_queue_setup(event_d_id, event_q_id,
+                                           &event_q_conf);
+               if (ret < 0) {
+                       rte_exit(EXIT_FAILURE,
+                                "Error in configuring event queue");
+               }
+               eventdev_rsrc.evq.event_q_id[event_q_id] = event_q_id;
+       }
+}
+
+static void rx_tx_adapter_setup(uint16_t ethdev_count)
+{
+       uint8_t event_d_id = eventdev_rsrc.event_d_id;
+       uint32_t service_id;
+       uint32_t cap = 0;
+       int32_t ret, i;
+
+       eventdev_rsrc.rx_adptr.nb_rx_adptr = ethdev_count;
+       eventdev_rsrc.rx_adptr.rx_adptr = (uint8_t *)malloc(sizeof(uint8_t) *
+                                       eventdev_rsrc.rx_adptr.nb_rx_adptr);
+       if (!eventdev_rsrc.rx_adptr.rx_adptr) {
+               free(eventdev_rsrc.evp.event_p_id);
+               free(eventdev_rsrc.evq.event_q_id);
+               rte_exit(EXIT_FAILURE,
+                        "failed to allocate memery for Rx adapter");
+       }
+
+       for (i = 0; i < ethdev_count; i++) {
+               ret = rte_event_eth_rx_adapter_create(i, event_d_id,
+                                                     &event_p_conf);
+               if (ret)
+                       rte_exit(EXIT_FAILURE,
+                                "failed to create rx adapter[%d]", i);
+
+               ret = rte_event_eth_rx_adapter_caps_get(event_d_id, i, &cap);
+               if (ret)
+                       rte_exit(EXIT_FAILURE,
+                                "failed to get event rx adapter capabilities");
+
+               /* Configure user requested sync mode */
+               eth_q_conf.ev.queue_id = eventdev_rsrc.evq.event_q_id[i];
+               eth_q_conf.ev.sched_type = eventq_sync_mode;
+               ret = rte_event_eth_rx_adapter_queue_add(i, i, -1, &eth_q_conf);
+               if (ret)
+                       rte_exit(EXIT_FAILURE,
+                                "Failed to add queues to Rx adapter");
+
+               if (!(cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT)) {
+                       ret = rte_event_eth_rx_adapter_service_id_get(i,
+                                                               &service_id);
+                       if (ret != -ESRCH && ret != 0) {
+                               rte_exit(EXIT_FAILURE,
+                               "Error getting the service ID for rx adptr\n");
+                       }
+
+                       rte_service_runstate_set(service_id, 1);
+                       rte_service_set_runstate_mapped_check(service_id, 1);
+               }
+
+               ret = rte_event_eth_rx_adapter_start(i);
+               if (ret)
+                       rte_exit(EXIT_FAILURE,
+                                "Rx adapter[%d] start failed", i);
+
+               eventdev_rsrc.rx_adptr.rx_adptr[i] = i;
+       }
+
+       eventdev_rsrc.tx_adptr.nb_tx_adptr = ethdev_count;
+       eventdev_rsrc.tx_adptr.tx_adptr = (uint8_t *)malloc(sizeof(uint8_t) *
+                                       eventdev_rsrc.tx_adptr.nb_tx_adptr);
+       if (!eventdev_rsrc.tx_adptr.tx_adptr) {
+               free(eventdev_rsrc.rx_adptr.rx_adptr);
+               free(eventdev_rsrc.evp.event_p_id);
+               free(eventdev_rsrc.evq.event_q_id);
+               rte_exit(EXIT_FAILURE,
+                        "failed to allocate memery for Rx adapter");
+       }
+
+       eventdev_rsrc.send_burst_eventdev = send_burst_eventdev_adapter;
+       for (i = 0; i < ethdev_count; i++) {
+               ret = rte_event_eth_tx_adapter_create(i, event_d_id,
+                                                     &event_p_conf);
+               if (ret)
+                       rte_exit(EXIT_FAILURE,
+                                "failed to create tx adapter[%d]", i);
+
+               ret = rte_event_eth_tx_adapter_caps_get(event_d_id, i, &cap);
+               if (ret)
+                       rte_exit(EXIT_FAILURE,
+                                "Failed to get event tx adapter capabilities");
+
+               if (!(cap & RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT)) {
+                       ret = rte_event_eth_tx_adapter_service_id_get(i,
+                                                                  &service_id);
+                       if (ret != -ESRCH && ret != 0) {
+                               rte_exit(EXIT_FAILURE,
+                                        "Failed to get Tx adapter service ID");
+                       }
+
+                       rte_service_runstate_set(service_id, 1);
+                       rte_service_set_runstate_mapped_check(service_id, 1);
+                       eventdev_rsrc.send_burst_eventdev =
+                                               send_burst_eventdev_generic;
+               }
+
+               ret = rte_event_eth_tx_adapter_queue_add(i, i, -1);
+               if (ret)
+                       rte_exit(EXIT_FAILURE,
+                                "failed to add queues to Tx adapter");
+
+               ret = rte_event_eth_tx_adapter_start(i);
+               if (ret)
+                       rte_exit(EXIT_FAILURE,
+                                "Tx adapter[%d] start failed", i);
+
+               eventdev_rsrc.tx_adptr.tx_adptr[i] = i;
+       }
+}
+
+static void eth_dev_port_setup(uint16_t ethdev_count)
+{
+       struct rte_eth_conf local_port_conf = port_config;
+       struct rte_eth_dev_info dev_info;
+       struct rte_eth_txconf txconf;
+       struct rte_eth_rxconf rxconf;
+       uint16_t nb_rx_queue = 1;
+       uint16_t n_tx_queue = 1;
+       uint16_t nb_rxd = 1024;
+       uint16_t nb_txd = 1024;
+       uint32_t nb_lcores;
+       uint16_t portid;
+       int32_t ret;
+
+       nb_lcores = rte_lcore_count();
+
+       /* initialize all ports */
+       RTE_ETH_FOREACH_DEV(portid) {
+               /* skip ports that are not enabled */
+               if ((enabled_port_mask & (1 << portid)) == 0) {
+                       printf("\nSkipping disabled port %d\n", portid);
+                       continue;
+               }
+
+               /* init port */
+               printf("Initializing port %d ... ", portid);
+               fflush(stdout);
+               printf("Creating queues: nb_rxq=%d nb_txq=%u... ",
+                       nb_rx_queue, n_tx_queue);
+
+               rte_eth_dev_info_get(portid, &dev_info);
+               if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+                       local_port_conf.txmode.offloads |=
+                                               DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+
+               local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
+                                               dev_info.flow_type_rss_offloads;
+               if (local_port_conf.rx_adv_conf.rss_conf.rss_hf !=
+                               port_config.rx_adv_conf.rss_conf.rss_hf) {
+                       printf("Port %u modified RSS hash function "
+                              "based on hardware support,"
+                              "requested:%#"PRIx64" configured:%#"PRIx64"\n",
+                              portid,
+                              port_config.rx_adv_conf.rss_conf.rss_hf,
+                              local_port_conf.rx_adv_conf.rss_conf.rss_hf);
+               }
+
+               ret = rte_eth_dev_configure(portid, nb_rx_queue, n_tx_queue,
+                                           &local_port_conf);
+               if (ret < 0)
+                       rte_exit(EXIT_FAILURE,
+                                "Cannot configure device: err=%d, port=%d\n",
+                                ret, portid);
+
+               ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd,
+                                                      &nb_txd);
+               if (ret < 0)
+                       rte_exit(EXIT_FAILURE,
+                                "Cannot adjust number of descriptors: err=%d, "
+                                "port=%d\n", ret, portid);
+
+               rte_eth_macaddr_get(portid, &ports_eth_addr[portid]);
+               print_ethaddr(" Address:", &ports_eth_addr[portid]);
+               printf(", ");
+               print_ethaddr("Destination:",
+                       (const struct rte_ether_addr *)&dest_eth_addr[portid]);
+               printf(", ");
+
+               /* prepare source MAC for each port. */
+               rte_ether_addr_copy(&ports_eth_addr[portid],
+                       (struct rte_ether_addr *)(val_eth + portid) + 1);
+
+               /* init memory */
+               ret = init_mem(portid, NUM_MBUF(ethdev_count));
+               if (ret < 0)
+                       rte_exit(EXIT_FAILURE, "init_mem failed\n");
+
+               /* init one Rx queue per port */
+               rxconf = dev_info.default_rxconf;
+               rxconf.offloads = local_port_conf.rxmode.offloads;
+               ret = rte_eth_rx_queue_setup(portid, 0, nb_rxd, 0, &rxconf,
+                                            pktmbuf_pool[portid][0]);
+               if (ret < 0)
+                       rte_exit(EXIT_FAILURE,
+                                "rte_eth_rx_queue_setup: err=%d, "
+                                "port=%d\n", ret, portid);
+
+               /* init one Tx queue per port */
+               txconf = dev_info.default_txconf;
+               txconf.offloads = local_port_conf.txmode.offloads;
+               ret = rte_eth_tx_queue_setup(portid, 0, nb_txd, 0, &txconf);
+               if (ret < 0)
+                       rte_exit(EXIT_FAILURE,
+                                "rte_eth_tx_queue_setup: err=%d, "
+                                "port=%d\n", ret, portid);
+       }
+}
+
+int get_free_event_port(void)
+{
+       static int index;
+       int port_id;
+
+       rte_spinlock_lock(&eventdev_rsrc.evp.lock);
+       if (index >= eventdev_rsrc.evp.nb_ports) {
+               printf("No free event port is available\n");
+               return -1;
+       }
+
+       port_id = eventdev_rsrc.evp.event_p_id[index];
+       index++;
+       rte_spinlock_unlock(&eventdev_rsrc.evp.lock);
+
+       return port_id;
+}
+
+int eventdev_resource_setup(int argc, char **argv)
+{
+       uint16_t ethdev_count = rte_eth_dev_count_avail();
+       uint32_t event_queue_cfg = 0;
+       uint32_t service_id;
+       int32_t ret;
+
+       /* Parse eventdev command line options */
+       ret = parse_eventdev_args(argc, argv);
+       if (ret < 0)
+               return ret;
+
+       if (rte_event_dev_count() < 1)
+               rte_exit(EXIT_FAILURE, "No Eventdev found");
+
+       /* Setup function pointers for lookup method */
+       setup_l3fwd_lookup_tables();
+
+       /* Ethernet device configuration */
+       eth_dev_port_setup(ethdev_count);
+
+       /* Event device configuration */
+       event_queue_cfg = event_dev_setup(ethdev_count);
+
+       /* Event queue configuration */
+       event_queue_setup(ethdev_count, event_queue_cfg);
+
+       /* Event port configuration */
+       event_port_setup();
+
+       /* Rx/Tx adapters configuration */
+       rx_tx_adapter_setup(ethdev_count);
+
+       /* Start event device service */
+       ret = rte_event_dev_service_id_get(eventdev_rsrc.event_d_id,
+                                          &service_id);
+       if (ret != -ESRCH && ret != 0)
+               rte_exit(EXIT_FAILURE, "Error in starting eventdev");
+
+       rte_service_runstate_set(service_id, 1);
+       rte_service_set_runstate_mapped_check(service_id, 1);
+
+       /* Start event device */
+       ret = rte_event_dev_start(eventdev_rsrc.event_d_id);
+       if (ret < 0)
+               rte_exit(EXIT_FAILURE, "Error in starting eventdev");
+
+       return EVENT_DEV_PARAM_PRESENT;
+}
diff --git a/examples/l3fwd/l3fwd_eventdev.h b/examples/l3fwd/l3fwd_eventdev.h
new file mode 100644
index 0000000..a0ffc5c
--- /dev/null
+++ b/examples/l3fwd/l3fwd_eventdev.h
@@ -0,0 +1,85 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#ifndef __L3FWD_EVENTDEV_H__
+#define __L3FWD_EVENTDEV_H__
+
+#include <rte_common.h>
+#include <rte_spinlock.h>
+
+/*
+ * This expression is used to calculate the number of mbufs needed
+ * depending on user input, taking  into account memory for rx and
+ * tx hardware rings, cache per lcore and mtable per port per lcore.
+ * RTE_MAX is used to ensure that NB_MBUF never goes below a minimum
+ * value of 8192
+ */
+#define NUM_MBUF(nports) RTE_MAX(              \
+       (nports*nb_rx_queue*nb_rxd +            \
+       nports*nb_lcores*MAX_PKT_BURST +        \
+       nports*n_tx_queue*nb_txd +              \
+       nb_lcores*256),                         \
+       (unsigned int)8192)
+
+#define EVENT_DEV_PARAM_PRESENT        0x8000  /* Random value*/
+
+/* Packet transfer mode of the application */
+#define PACKET_TRANSFER_MODE_POLL  1
+#define PACKET_TRANSFER_MODE_EVENTDEV  2
+
+#define CMD_LINE_OPT_MODE "mode"
+#define CMD_LINE_OPT_EVENTQ_SYNC "eventq-sync"
+
+typedef int (*tx_eventdev_t)(struct rte_mbuf *m[], uint16_t n, uint16_t port);
+
+struct eventdev_queues {
+       uint8_t *event_q_id;
+       uint8_t nb_queues;
+};
+
+struct eventdev_ports {
+       uint8_t *event_p_id;
+       uint8_t nb_ports;
+       rte_spinlock_t lock;
+};
+
+struct eventdev_rx_adptr {
+       uint8_t nb_rx_adptr;
+       uint8_t *rx_adptr;
+};
+
+struct eventdev_tx_adptr {
+       uint8_t nb_tx_adptr;
+       uint8_t *tx_adptr;
+};
+
+struct eventdev_resources {
+       tx_eventdev_t   send_burst_eventdev;
+       struct eventdev_rx_adptr rx_adptr;
+       struct eventdev_tx_adptr tx_adptr;
+       struct eventdev_queues evq;
+       struct eventdev_ports evp;
+       uint8_t event_d_id;
+};
+
+extern struct rte_event_dev_config event_d_conf;
+extern struct eventdev_resources eventdev_rsrc;
+extern int pkt_transfer_mode;
+extern int eventq_sync_mode;
+extern char *evd_argv[3];
+extern int evd_argc;
+
+/* Event device and required resource setup function */
+int eventdev_resource_setup(int argc, char **argv);
+
+/* Returns next available event port */
+int get_free_event_port(void);
+
+/* Event processing function with exact match algorithm */
+int em_main_loop_eventdev(__attribute__((unused)) void *dummy);
+
+/* Event processing function with longest prefix match algorithm */
+int lpm_main_loop_eventdev(__attribute__((unused)) void *dummy);
+
+#endif /* __L3FWD_EVENTDEV_H__ */
diff --git a/examples/l3fwd/l3fwd_lpm.c b/examples/l3fwd/l3fwd_lpm.c
index 4143683..2839982 100644
--- a/examples/l3fwd/l3fwd_lpm.c
+++ b/examples/l3fwd/l3fwd_lpm.c
@@ -254,6 +254,77 @@ struct ipv6_l3fwd_lpm_route {
        return 0;
 }
 
+/* main eventdev processing loop */
+int lpm_main_loop_eventdev(__attribute__((unused)) void *dummy)
+{
+       struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
+       struct rte_event events[MAX_PKT_BURST];
+       struct lcore_conf *lconf;
+       int32_t i, j = 0, nb_rx;
+       uint16_t event_d_id;
+       uint16_t event_p_id;
+       uint32_t lcore_id;
+       uint16_t deq_len;
+       uint32_t portid;
+
+       /* Assign dedicated event port for enqueue/dequeue operation */
+       deq_len = event_d_conf.nb_event_port_dequeue_depth;
+       event_d_id = eventdev_rsrc.event_d_id;
+       event_p_id = get_free_event_port();
+       lcore_id = rte_lcore_id();
+
+       lconf = &lcore_conf[lcore_id];
+       lconf->n_rx_queue = 1;
+       lconf->rx_queue_list[0].port_id = event_p_id;
+       lconf->n_tx_port = 1;
+       lconf->tx_port_id[0] = event_p_id;
+
+       RTE_LOG(INFO, L3FWD, "entering eventdev main loop on lcore %u\n",
+               lcore_id);
+
+       while (!force_quit) {
+               /* Read packet from RX queues */
+               nb_rx = rte_event_dequeue_burst(event_d_id, event_p_id,
+                                               events, deq_len, 0);
+               if (nb_rx == 0) {
+                       rte_pause();
+                       continue;
+               }
+
+               for (i = 0; i < nb_rx; i++)
+                       pkts_burst[i] = events[i].mbuf;
+
+               portid = pkts_burst[0]->port;
+               for (i = 1; i < nb_rx; i++) {
+                       if (portid != pkts_burst[i]->port) {
+
+#if defined RTE_ARCH_X86 || defined RTE_MACHINE_CPUFLAG_NEON \
+                        || defined RTE_ARCH_PPC_64
+                               l3fwd_lpm_send_packets(i - j, &pkts_burst[j],
+                                                      portid, lconf);
+#else
+                               l3fwd_lpm_no_opt_send_packets(i - j,
+                                                             &pkts_burst[j],
+                                                             portid, lconf);
+#endif /* X86 */
+                               j = i;
+                               portid = pkts_burst[i]->port;
+                       }
+               }
+
+               /* Send remaining packets */
+#if defined RTE_ARCH_X86 || defined RTE_MACHINE_CPUFLAG_NEON \
+                        || defined RTE_ARCH_PPC_64
+               l3fwd_lpm_send_packets(i - j, &pkts_burst[j], portid, lconf);
+#else
+               l3fwd_lpm_no_opt_send_packets(i - j, &pkts_burst[j], portid,
+                                             lconf);
+#endif /* X86 */
+       }
+
+       return 0;
+}
+
 void
 setup_lpm(const int socketid)
 {
diff --git a/examples/l3fwd/main.c b/examples/l3fwd/main.c
index 3800bad..408fdc6 100644
--- a/examples/l3fwd/main.c
+++ b/examples/l3fwd/main.c
@@ -41,11 +41,13 @@
 #include <rte_udp.h>
 #include <rte_string_fns.h>
 #include <rte_cpuflags.h>
+#include <rte_eventdev.h>
 
 #include <cmdline_parse.h>
 #include <cmdline_parse_etheraddr.h>
 
 #include "l3fwd.h"
+#include "l3fwd_eventdev.h"
 
 /*
  * Configurable number of RX/TX ring descriptors
@@ -135,7 +137,7 @@ struct lcore_params {
        },
 };
 
-static struct rte_mempool *pktmbuf_pool[RTE_MAX_ETHPORTS][NB_SOCKETS];
+struct rte_mempool *pktmbuf_pool[RTE_MAX_ETHPORTS][NB_SOCKETS];
 static uint8_t lkp_per_socket[NB_SOCKETS];
 
 struct l3fwd_lkp_mode {
@@ -172,15 +174,20 @@ struct l3fwd_lkp_mode {
  * Currently exact-match and longest-prefix-match
  * are supported ones.
  */
-static void
+void
 setup_l3fwd_lookup_tables(void)
 {
        /* Setup HASH lookup functions. */
-       if (l3fwd_em_on)
+       if (l3fwd_em_on) {
+               if (pkt_transfer_mode == PACKET_TRANSFER_MODE_EVENTDEV)
+                       l3fwd_em_lkp.main_loop = em_main_loop_eventdev;
                l3fwd_lkp = l3fwd_em_lkp;
        /* Setup LPM lookup functions. */
-       else
+       } else {
+               if (pkt_transfer_mode == PACKET_TRANSFER_MODE_EVENTDEV)
+                       l3fwd_lpm_lkp.main_loop = lpm_main_loop_eventdev;
                l3fwd_lkp = l3fwd_lpm_lkp;
+       }
 }
 
 static int
@@ -289,7 +296,9 @@ struct l3fwd_lkp_mode {
                " [--hash-entry-num]"
                " [--ipv6]"
                " [--parse-ptype]"
-               " [--per-port-pool]\n\n"
+               " [--per-port-pool]"
+               " [--mode]"
+               " [--eventq-sync]\n\n"
 
                "  -p PORTMASK: Hexadecimal bitmask of ports to configure\n"
                "  -P : Enable promiscuous mode\n"
@@ -304,7 +313,12 @@ struct l3fwd_lkp_mode {
                "  --hash-entry-num: Specify the hash entry number in 
hexadecimal to be setup\n"
                "  --ipv6: Set if running ipv6 packets\n"
                "  --parse-ptype: Set to use software to analyze packet type\n"
-               "  --per-port-pool: Use separate buffer pool per port\n\n",
+               "  --per-port-pool: Use separate buffer pool per port\n"
+               "  --mode: Packet transfer mode for I/O, poll or eventdev\n"
+               "          Default mode = poll\n"
+               "  --eventq-sync:Event queue synchronization method,\n"
+               "                ordered or atomic. \nDefault: atomic\n"
+               "                Valid only if --mode=eventdev\n\n",
                prgname);
 }
 
@@ -509,6 +523,8 @@ enum {
        int option_index;
        char *prgname = argv[0];
 
+       evd_argv[0] = argv[0];
+       evd_argc++;
        argvopt = argv;
 
        /* Error or normal output strings. */
@@ -538,6 +554,14 @@ enum {
                        l3fwd_lpm_on = 1;
                        break;
 
+               case '?':
+                       /* May be eventdev options are encountered. skip for
+                        * now. Will be processed later.
+                        */
+                       evd_argv[evd_argc] = argv[optind - 1];
+                       evd_argc++;
+                       break;
+
                /* long options */
                case CMD_LINE_OPT_CONFIG_NUM:
                        ret = parse_config(optarg);
@@ -646,7 +670,7 @@ enum {
        return ret;
 }
 
-static void
+void
 print_ethaddr(const char *name, const struct rte_ether_addr *eth_addr)
 {
        char buf[RTE_ETHER_ADDR_FMT_SIZE];
@@ -654,7 +678,7 @@ enum {
        printf("%s%s", name, buf);
 }
 
-static int
+int
 init_mem(uint16_t portid, unsigned int nb_mbuf)
 {
        struct lcore_conf *qconf;
@@ -836,6 +860,11 @@ enum {
        if (ret < 0)
                rte_exit(EXIT_FAILURE, "Invalid L3FWD parameters\n");
 
+       /* Configure eventdev parameters if user has requested */
+       ret = eventdev_resource_setup(evd_argc, evd_argv);
+       if (ret == EVENT_DEV_PARAM_PRESENT)
+               goto skip_port_config;
+
        if (check_lcore_params() < 0)
                rte_exit(EXIT_FAILURE, "check_lcore_params failed\n");
 
@@ -1005,6 +1034,7 @@ enum {
                }
        }
 
+skip_port_config:
        printf("\n");
 
        /* start ports */
-- 
1.8.3.1

Reply via email to