On 17/12/15 08:09, Elo, Matias (Nokia - FI/Espoo) wrote:
-----Original Message-----
From: EXT Zoltan Kiss [mailto:[email protected]]
Sent: Wednesday, December 16, 2015 6:42 PM
To: Savolainen, Petri (Nokia - FI/Espoo) <[email protected]>; lng-
[email protected]; Elo, Matias (Nokia - FI/Espoo) <[email protected]>; 
Elo,
Matias (Nokia - FI/Espoo) <[email protected]>
Subject: Re: [lng-odp] [API-NEXT PATCH 05/19] linux-generic: netmap: add initial
multi queue support



On 16/12/15 13:45, Petri Savolainen wrote:
From: Matias Elo <[email protected]>

Added multi queue support to netmap pktio.

Signed-off-by: Matias Elo <[email protected]>
---
   platform/linux-generic/include/odp_packet_netmap.h |  34 +-
   platform/linux-generic/pktio/netmap.c              | 422 
++++++++++++++++++---
   2 files changed, 403 insertions(+), 53 deletions(-)

diff --git a/platform/linux-generic/include/odp_packet_netmap.h
b/platform/linux-generic/include/odp_packet_netmap.h
index a088d23..bede176 100644
--- a/platform/linux-generic/include/odp_packet_netmap.h
+++ b/platform/linux-generic/include/odp_packet_netmap.h
@@ -7,23 +7,53 @@
   #ifndef ODP_PACKET_NETMAP_H
   #define ODP_PACKET_NETMAP_H

+#include <odp/align.h>
+#include <odp/debug.h>
   #include <odp/packet_io.h>
   #include <odp/pool.h>
+#include <odp/ticketlock.h>
+#include <odp_align_internal.h>

   #include <linux/if_ether.h>
   #include <net/if.h>

+#define NM_MAX_DESC 32
+
+/** Ring for mapping pktin/pktout queues to netmap descriptors */
+struct netmap_ring_t {
+       unsigned first; /**< Index of first netmap descriptor */
+       unsigned last;  /**< Index of last netmap descriptor */
+       unsigned num;   /**< Number of netmap descriptors */
+       /** Netmap metadata for the device */
+       struct nm_desc *desc[NM_MAX_DESC];
+       unsigned cur;           /**< Index of current netmap descriptor */
+       odp_ticketlock_t lock;  /**< Queue lock */
+};
+
+typedef union {
+       struct netmap_ring_t s;
+       uint8_t pad[ODP_CACHE_LINE_SIZE_ROUNDUP(sizeof(struct
netmap_ring_t))];
+} netmap_ring_t ODP_ALIGNED_CACHE;
+
   /** Packet socket using netmap mmaped rings for both Rx and Tx */
   typedef struct {
        odp_pool_t pool;                /**< pool to alloc packets from */
        size_t max_frame_len;           /**< buf_size - sizeof(pkt_hdr) */
-       struct nm_desc *rx_desc;        /**< netmap meta-data for the device */
-       struct nm_desc *tx_desc;        /**< netmap meta-data for the device */
        uint32_t if_flags;              /**< interface flags */
        int sockfd;                     /**< control socket */
        unsigned char if_mac[ETH_ALEN]; /**< eth mac address */
        char nm_name[IF_NAMESIZE + 7];  /**< netmap:<ifname> */
        odp_pktio_capability_t  capa;   /**< interface capabilities */
+       unsigned num_rx_queues;         /**< number of pktin queues */
+       unsigned num_tx_queues;         /**< number of pktout queues */
+       uint32_t num_rx_rings;          /**< number of nm rx rings */
+       uint32_t num_tx_rings;          /**< number of nm tx rings */
+       odp_bool_t lockless_rx;         /**< no locking for rx */
+       odp_bool_t lockless_tx;         /**< no locking for tx */
+       /** mapping of pktin queues to netmap rx descriptors */
+       netmap_ring_t rx_desc_ring[PKTIO_MAX_QUEUES];
+       /** mapping of pktout queues to netmap tx descriptors */
+       netmap_ring_t tx_desc_ring[PKTIO_MAX_QUEUES];
   } pkt_netmap_t;

   #endif
diff --git a/platform/linux-generic/pktio/netmap.c b/platform/linux-
generic/pktio/netmap.c
index 38cdd1f..38a7d46 100644
--- a/platform/linux-generic/pktio/netmap.c
+++ b/platform/linux-generic/pktio/netmap.c
@@ -10,9 +10,9 @@
   #define _GNU_SOURCE
   #endif

+#include <odp_packet_io_internal.h>
   #include <odp_packet_netmap.h>
   #include <odp_packet_socket.h>
-#include <odp_packet_io_internal.h>
   #include <odp_debug_internal.h>
   #include <odp/helper/eth.h>

@@ -78,14 +78,178 @@ done:
        return err;
   }

+/**
+ * Map netmap rings to pktin/pktout queues
+ *
+ * @param rings          Array of netmap descriptor rings
+ * @param num_queues     Number of pktin/pktout queues
+ * @param num_rings      Number of matching netmap rings
+ */
+static inline void map_netmap_rings(netmap_ring_t *rings,
+                                   unsigned num_queues, unsigned num_rings)
+{
+       struct netmap_ring_t *desc_ring;
+       unsigned rings_per_queue;
+       unsigned remainder;
+       unsigned mapped_rings;
+       unsigned i;
+       unsigned desc_id = 0;
+
+       rings_per_queue = num_rings / num_queues;
+       remainder = num_rings % num_queues;
+
+       if (remainder)
+               ODP_DBG("WARNING: Netmap rings mapped unevenly to
queues\n");
+
+       for (i = 0; i < num_queues; i++) {
+               desc_ring = &rings[i].s;
+               if (i < remainder)
+                       mapped_rings = rings_per_queue + 1;
+               else
+                       mapped_rings = rings_per_queue;
+
+               desc_ring->first = desc_id;
+               desc_ring->cur = desc_id;
+               desc_ring->last = desc_ring->first + mapped_rings - 1;
+               desc_ring->num = mapped_rings;
+
+               desc_id = desc_ring->last + 1;
+       }
+}
+
+/**
+ * Close pktio queues
+ *
+ * @param pktio_entry    Packet IO handle
+ */
+static inline void netmap_close_queues(pktio_entry_t *pktio_entry)
+{
+       int i;
+       struct pktio_entry *pktio = &pktio_entry->s;
+       odp_pktio_input_mode_t mode = pktio_entry->s.param.in_mode;
+
+       for (i = 0; i < PKTIO_MAX_QUEUES; i++) {
+               if (mode != ODP_PKTIN_MODE_POLL && mode !=
ODP_PKTIN_MODE_SCHED)
+                       continue;
+
+               if (pktio->in_queue[i].queue != ODP_QUEUE_INVALID) {
+                       odp_queue_destroy(pktio->in_queue[i].queue);
+                       pktio->in_queue[i].queue = ODP_QUEUE_INVALID;
+               }
+       }
+}
+
+static int netmap_input_queues_config(pktio_entry_t *pktio_entry,
+                                     const odp_pktio_input_queue_param_t *p)
+{
+       struct pktio_entry *pktio = &pktio_entry->s;
+       pkt_netmap_t *pkt_nm = &pktio_entry->s.pkt_nm;
+       odp_pktio_input_mode_t mode = pktio_entry->s.param.in_mode;
+       odp_queue_t queue;
+       unsigned num_queues = p->num_queues;
+       unsigned i;
+
+       if (mode == ODP_PKTIN_MODE_DISABLED)
+               return -1;
+
+       if (num_queues <= 0 || num_queues > pkt_nm-
capa.max_input_queues) {
+               ODP_ERR("Invalid input queue count: %u\n", num_queues);
+               return -1;
+       }
+
+       if (p->hash_enable && num_queues > 1) {
+               if (rss_conf_set_fd(pktio_entry->s.pkt_nm.sockfd,
+                                   pktio_entry->s.name, &p->hash_proto)) {
+                       ODP_ERR("Failed to configure input hash\n");
+                       return -1;
+               }
+       }
+
+       netmap_close_queues(pktio_entry);
+
+       for (i = 0; i < num_queues; i++) {
+               if (mode == ODP_PKTIN_MODE_POLL ||
+                   mode == ODP_PKTIN_MODE_SCHED) {
+                       odp_queue_type_t type = ODP_QUEUE_TYPE_POLL;
+
+                       if (mode == ODP_PKTIN_MODE_SCHED)
+                               type = ODP_QUEUE_TYPE_SCHED;
+
+                       /* Ugly cast to uintptr_t is needed since queue_param
+                        * is not defined as const in odp_queue_create() */
+                       queue = odp_queue_create("pktio_in", type,
+                                                (odp_queue_param_t *)
+                                                (uintptr_t)&p->queue_param);
+                       if (queue == ODP_QUEUE_INVALID) {
+                               netmap_close_queues(pktio_entry);
+                               return -1;
+                       }
+                       pktio->in_queue[i].queue = queue;

If you call odp_queue_deq(), how does it gets the packet from the
hardware input queue? I've tried to look at the other patches, but I
couldn't find the place where it does that.
Also, I tried to figure out what happens when odp_queue_enq() happens,
regarding my question in the another mail thread, but no luck there.

odp_queue_deq()/odp_queue_enq() still use the old style API, where you have to 
first manually get/set the default queues with odp_pktio_outq_getdef() and 
odp_pktio_inq_setdef().
The standard odp_pktio_send() and odp_pktio_recv() functions are used 
internally. In case of linux-generic the relevant functions are pktout_enqueue 
() and pktin_dequeue().
These queues you create here have ODP_QUEUE_TYPE_POLL/SCHED type, pktout_enqueue() and pktin_dequeue() are used for _PKTIN type.


Currently only RECV and SCHED modes are implemented in the new MQ API and 
odp_l2fwd is the only application which uses them. In the next phase we plan to 
remove the old API and implement the POLL mode.

-Matias



+               } else {
+                       pktio->in_queue[i].queue = ODP_QUEUE_INVALID;
+                       pktio->in_queue[i].pktin.index = i;
+                       pktio->in_queue[i].pktin.pktio = pktio_entry->s.handle;
+               }
+       }
+       /* Map pktin queues to netmap rings */
+       map_netmap_rings(pkt_nm->rx_desc_ring, num_queues,
+                        pkt_nm->num_rx_rings);
+
+       pkt_nm->lockless_rx = p->single_user;
+       pkt_nm->num_rx_queues = num_queues;
+       return 0;
+}
+
+static int netmap_output_queues_config(pktio_entry_t *pktio_entry,
+                                      const odp_pktio_output_queue_param_t *p)
+{
+       struct pktio_entry *pktio = &pktio_entry->s;
+       pkt_netmap_t *pkt_nm = &pktio_entry->s.pkt_nm;
+       odp_pktio_output_mode_t mode = pktio_entry->s.param.out_mode;
+       unsigned num_queues = p->num_queues;
+       unsigned i;
+
+       if (mode == ODP_PKTOUT_MODE_DISABLED)
+               return -1;
+
+       if (num_queues <= 0 || num_queues > pkt_nm-
capa.max_output_queues) {
+               ODP_ERR("Invalid output queue count: %u\n", num_queues);
+               return -1;
+       }
+
+       /* Enough to map only one netmap tx ring per pktout queue */
+       map_netmap_rings(pkt_nm->tx_desc_ring, num_queues,
num_queues);
+
+       for (i = 0; i < num_queues; i++) {
+               pktio->out_queue[i].pktout.index = i;
+               pktio->out_queue[i].pktout.pktio = pktio_entry->s.handle;
+       }
+       pkt_nm->lockless_tx = p->single_user;
+       pkt_nm->num_tx_queues = num_queues;
+       return 0;
+}
+
   static int netmap_close(pktio_entry_t *pktio_entry)
   {
+       int i, j;
        pkt_netmap_t *pkt_nm = &pktio_entry->s.pkt_nm;

-       if (pkt_nm->rx_desc != NULL)
-               nm_close(pkt_nm->rx_desc);
-       if (pkt_nm->tx_desc != NULL)
-               nm_close(pkt_nm->tx_desc);
+       for (i = 0; i < PKTIO_MAX_QUEUES; i++) {
+               for (j = 0; j < NM_MAX_DESC; j++) {
+                       if (pkt_nm->rx_desc_ring[i].s.desc[j] != NULL) {
+                               nm_close(pkt_nm->rx_desc_ring[i].s.desc[j]);
+                               pkt_nm->rx_desc_ring[i].s.desc[j] = NULL;
+                       }
+               }
+               for (j = 0; j < NM_MAX_DESC; j++) {
+                       if (pkt_nm->tx_desc_ring[i].s.desc[j] != NULL) {
+                               nm_close(pkt_nm->tx_desc_ring[i].s.desc[j]);
+                               pkt_nm->tx_desc_ring[i].s.desc[j] = NULL;
+                       }
+               }
+       }
+
+       netmap_close_queues(pktio_entry);

        if (pkt_nm->sockfd != -1 && close(pkt_nm->sockfd) != 0) {
                __odp_errno = errno;
@@ -98,10 +262,12 @@ static int netmap_close(pktio_entry_t *pktio_entry)
   static int netmap_open(odp_pktio_t id ODP_UNUSED, pktio_entry_t
*pktio_entry,
                       const char *netdev, odp_pool_t pool)
   {
+       int i;
        int err;
        int sockfd;
        pkt_netmap_t *pkt_nm = &pktio_entry->s.pkt_nm;
        struct nm_desc *desc;
+       odp_pktin_hash_proto_t hash_proto;

        if (getenv("ODP_PKTIO_DISABLE_NETMAP"))
                return -1;
@@ -124,16 +290,29 @@ static int netmap_open(odp_pktio_t id
ODP_UNUSED, pktio_entry_t *pktio_entry,
        snprintf(pkt_nm->nm_name, sizeof(pkt_nm->nm_name), "netmap:%s",
                 netdev);

-       /* Dummy open here to check if netmap module is available */
+       /* Dummy open here to check if netmap module is available and to read
+        * capability info. */
        desc = nm_open(pkt_nm->nm_name, NULL, 0, NULL);
        if (desc == NULL) {
                ODP_ERR("nm_open(%s) failed\n", pkt_nm->nm_name);
                goto error;
        }
+       if (desc->nifp->ni_rx_rings > NM_MAX_DESC) {
+               ODP_ERR("Unable to store all rx rings\n");
+               nm_close(desc);
+               goto error;
+       }
+       pkt_nm->num_rx_rings = desc->nifp->ni_rx_rings;
        pkt_nm->capa.max_input_queues = PKTIO_MAX_QUEUES;
        if (desc->nifp->ni_rx_rings < PKTIO_MAX_QUEUES)
                pkt_nm->capa.max_input_queues = desc->nifp->ni_rx_rings;

+       if (desc->nifp->ni_tx_rings > NM_MAX_DESC) {
+               ODP_ERR("Unable to store all tx rings\n");
+               nm_close(desc);
+               goto error;
+       }
+       pkt_nm->num_tx_rings = desc->nifp->ni_tx_rings;
        pkt_nm->capa.max_output_queues = PKTIO_MAX_QUEUES;
        if (desc->nifp->ni_tx_rings < PKTIO_MAX_QUEUES)
                pkt_nm->capa.max_output_queues = desc->nifp->ni_tx_rings;
@@ -147,6 +326,12 @@ static int netmap_open(odp_pktio_t id ODP_UNUSED,
pktio_entry_t *pktio_entry,
        }
        pkt_nm->sockfd = sockfd;

+       /* Check if RSS is supported. If not, set 'max_input_queues' to 1. */
+       if (rss_conf_get_supported_fd(sockfd, netdev, &hash_proto) == 0) {
+               ODP_DBG("RSS not supported\n");
+               pkt_nm->capa.max_input_queues = 1;
+       }
+
        err = netmap_do_ioctl(pktio_entry, SIOCGIFFLAGS, 0);
        if (err)
                goto error;
@@ -157,6 +342,11 @@ static int netmap_open(odp_pktio_t id ODP_UNUSED,
pktio_entry_t *pktio_entry,
        if (err)
                goto error;

+       for (i = 0; i < PKTIO_MAX_QUEUES; i++) {
+               odp_ticketlock_init(&pkt_nm->rx_desc_ring[i].s.lock);
+               odp_ticketlock_init(&pkt_nm->tx_desc_ring[i].s.lock);
+       }
+
        return 0;

   error:
@@ -167,19 +357,82 @@ error:
   static int netmap_start(pktio_entry_t *pktio_entry)
   {
        pkt_netmap_t *pkt_nm = &pktio_entry->s.pkt_nm;
+       netmap_ring_t *desc_ring;
+       struct nm_desc base_desc;
        int err;
        unsigned i;
-       const char *ifname = pkt_nm->nm_name;
+       unsigned j;
+       uint64_t flags;
+       odp_pktio_input_mode_t in_mode = pktio_entry->s.param.in_mode;
+       odp_pktio_output_mode_t out_mode = pktio_entry-
s.param.out_mode;

-       pkt_nm->rx_desc = nm_open(ifname, NULL, NETMAP_NO_TX_POLL,
NULL);
-       pkt_nm->tx_desc = nm_open(ifname, NULL, NM_OPEN_NO_MMAP,
-                                 pkt_nm->rx_desc);
+       /* If no pktin/pktout queues have been configured. Configure one
+        * for each direction. */
+       if (!pkt_nm->num_rx_queues && in_mode !=
ODP_PKTIN_MODE_DISABLED) {
+               odp_pktio_input_queue_param_t param;

-       if (pkt_nm->rx_desc == NULL || pkt_nm->tx_desc == NULL) {
-               ODP_ERR("nm_open(%s) failed\n", ifname);
+               memset(&param, 0, sizeof(odp_pktio_input_queue_param_t));
+               param.num_queues = 1;
+               if (netmap_input_queues_config(pktio_entry, &param))
+                       return -1;
+       }
+       if (!pkt_nm->num_tx_queues && out_mode ==
ODP_PKTOUT_MODE_SEND) {
+               odp_pktio_output_queue_param_t param;
+
+               memset(&param, 0,
sizeof(odp_pktio_output_queue_param_t));
+               param.num_queues = 1;
+               if (netmap_output_queues_config(pktio_entry, &param))
+                       return -1;
+       }
+
+       base_desc.self = &base_desc;
+       base_desc.mem = NULL;
+       memcpy(base_desc.req.nr_name, pktio_entry->s.name,
+              sizeof(pktio_entry->s.name));
+       base_desc.req.nr_flags &= ~NR_REG_MASK;
+       base_desc.req.nr_flags |= NR_REG_ONE_NIC;
+       base_desc.req.nr_ringid = 0;
+
+       /* Only the first rx descriptor does mmap */
+       desc_ring = pkt_nm->rx_desc_ring;
+       flags = NM_OPEN_IFNAME | NETMAP_NO_TX_POLL;
+       desc_ring[0].s.desc[0] = nm_open(pkt_nm->nm_name, NULL, flags,
+                                        &base_desc);
+       if (desc_ring[0].s.desc[0] == NULL) {
+               ODP_ERR("nm_start(%s) failed\n", pkt_nm->nm_name);
                goto error;
        }
-
+       /* Open rest of the rx descriptors (one per netmap ring) */
+       flags = NM_OPEN_IFNAME | NETMAP_NO_TX_POLL |
NM_OPEN_NO_MMAP;
+       for (i = 0; i < pkt_nm->num_rx_queues; i++) {
+               for (j = desc_ring[i].s.first; j <= desc_ring[i].s.last; j++) {
+                       if (i == 0 && j == 0) /* First already opened */
+                               continue;
+                       base_desc.req.nr_ringid = j;
+                       desc_ring[i].s.desc[j] = nm_open(pkt_nm->nm_name,
NULL,
+                                                        flags, &base_desc);
+                       if (desc_ring[i].s.desc[j] == NULL) {
+                               ODP_ERR("nm_start(%s) failed\n",
+                                       pkt_nm->nm_name);
+                               goto error;
+                       }
+               }
+       }
+       /* Open tx descriptors */
+       desc_ring = pkt_nm->tx_desc_ring;
+       flags = NM_OPEN_IFNAME | NM_OPEN_NO_MMAP;
+       for (i = 0; i < pkt_nm->num_tx_queues; i++) {
+               for (j = desc_ring[i].s.first; j <= desc_ring[i].s.last; j++) {
+                       base_desc.req.nr_ringid = j;
+                       desc_ring[i].s.desc[j] = nm_open(pkt_nm->nm_name,
NULL,
+                                                        flags, &base_desc);
+                       if (desc_ring[i].s.desc[j] == NULL) {
+                               ODP_ERR("nm_start(%s) failed\n",
+                                       pkt_nm->nm_name);
+                               goto error;
+                       }
+               }
+       }
        /* Wait for the link to come up */
        for (i = 0; i < NM_OPEN_RETRIES; i++) {
                err = netmap_do_ioctl(pktio_entry, SIOCETHTOOL,
ETHTOOL_GLINK);
@@ -251,35 +504,45 @@ static inline int netmap_pkt_to_odp(pktio_entry_t
*pktio_entry,
                }

                packet_parse_l2(pkt_hdr);
+
+               pkt_hdr->input = pktio_entry->s.handle;
+
                *pkt_out = pkt;
        }

        return 0;
   }

-static int netmap_recv(pktio_entry_t *pktio_entry, odp_packet_t pkt_table[],
-                      unsigned num)
+static int netmap_recv_queue(pktio_entry_t *pktio_entry, int index,
+                            odp_packet_t pkt_table[], int num)
   {
+       char *buf;
        struct netmap_ring *ring;
-       struct nm_desc *desc = pktio_entry->s.pkt_nm.rx_desc;
+       struct nm_desc *desc;
        struct pollfd polld;
-       char *buf;
+       pkt_netmap_t *pkt_nm = &pktio_entry->s.pkt_nm;
+       unsigned first_desc_id = pkt_nm->rx_desc_ring[index].s.first;
+       unsigned last_desc_id = pkt_nm->rx_desc_ring[index].s.last;
+       unsigned desc_id;
+       int num_desc = pkt_nm->rx_desc_ring[index].s.num;
        int i;
-       int num_rings = desc->last_rx_ring - desc->first_rx_ring + 1;
-       int ring_id = desc->cur_rx_ring;
-       unsigned num_rx = 0;
+       int num_rx = 0;
        uint32_t slot_id;

-       polld.fd = desc->fd;
-       polld.events = POLLIN;
+       if (odp_unlikely(pktio_entry->s.state == STATE_STOP))
+               return 0;

-       for (i = 0; i < num_rings && num_rx != num; i++) {
-               ring_id = desc->cur_rx_ring + i;
+       if (!pkt_nm->lockless_rx)
+               odp_ticketlock_lock(&pkt_nm->rx_desc_ring[index].s.lock);

-               if (ring_id > desc->last_rx_ring)
-                       ring_id = desc->first_rx_ring;
+       desc_id = pkt_nm->rx_desc_ring[index].s.cur;

-               ring = NETMAP_RXRING(desc->nifp, ring_id);
+       for (i = 0; i < num_desc && num_rx != num; i++) {
+               if (desc_id > last_desc_id)
+                       desc_id = first_desc_id;
+
+               desc = pkt_nm->rx_desc_ring[index].s.desc[desc_id];
+               ring = NETMAP_RXRING(desc->nifp, desc->cur_rx_ring);

                while (!nm_ring_empty(ring) && num_rx != num) {
                        slot_id = ring->cur;
@@ -294,51 +557,108 @@ static int netmap_recv(pktio_entry_t *pktio_entry,
odp_packet_t pkt_table[],
                        ring->cur = nm_ring_next(ring, slot_id);
                        ring->head = ring->cur;
                }
-       }
-       desc->cur_rx_ring = ring_id;

-       if (num_rx == 0) {
-               if (odp_unlikely(poll(&polld, 1, 0) < 0))
-                       ODP_ERR("RX: poll error\n");
+               if (num_rx != num) {
+                       polld.fd = desc->fd;
+                       polld.events = POLLIN;
+                       if (odp_unlikely(poll(&polld, 1, 0) < 0))
+                               ODP_ERR("RX: poll error\n");
+               }
+               desc_id++;
        }
+       pkt_nm->rx_desc_ring[index].s.cur = desc_id;
+
+       if (!pkt_nm->lockless_rx)
+               odp_ticketlock_unlock(&pkt_nm->rx_desc_ring[index].s.lock);
+
        return num_rx;
   }

-static int netmap_send(pktio_entry_t *pktio_entry, odp_packet_t
pkt_table[],
+static int netmap_recv(pktio_entry_t *pktio_entry, odp_packet_t
pkt_table[],
                       unsigned num)
   {
+       return netmap_recv_queue(pktio_entry, 0, pkt_table, num);
+}
+
+static int netmap_send_queue(pktio_entry_t *pktio_entry, int index,
+                            odp_packet_t pkt_table[], int num)
+{
+       pkt_netmap_t *pkt_nm = &pktio_entry->s.pkt_nm;
        struct pollfd polld;
-       struct nm_desc *nm_desc = pktio_entry->s.pkt_nm.tx_desc;
-       unsigned i, nb_tx;
-       uint8_t *frame;
-       uint32_t frame_len;
+       struct nm_desc *desc;
+       struct netmap_ring *ring;
+       int i;
+       int nb_tx;
+       int desc_id;
+       odp_packet_t pkt;
+       uint32_t pkt_len;
+       unsigned slot_id;
+       char *buf;

-       polld.fd = nm_desc->fd;
+       if (odp_unlikely(pktio_entry->s.state == STATE_STOP))
+               return 0;
+
+       /* Only one netmap tx ring per pktout queue */
+       desc_id = pkt_nm->tx_desc_ring[index].s.cur;
+       desc = pkt_nm->tx_desc_ring[index].s.desc[desc_id];
+       ring = NETMAP_TXRING(desc->nifp, desc->cur_tx_ring);
+
+       if (!pkt_nm->lockless_tx)
+               odp_ticketlock_lock(&pkt_nm->tx_desc_ring[index].s.lock);
+
+       polld.fd = desc->fd;
        polld.events = POLLOUT;

        for (nb_tx = 0; nb_tx < num; nb_tx++) {
-               frame_len = 0;
-               frame = odp_packet_l2_ptr(pkt_table[nb_tx], &frame_len);
+               pkt = pkt_table[nb_tx];
+               pkt_len = odp_packet_len(pkt);
+
+               if (pkt_len > ring->nr_buf_size) {
+                       if (nb_tx == 0)
+                               __odp_errno = EMSGSIZE;
+                       break;
+               }
                for (i = 0; i < NM_INJECT_RETRIES; i++) {
-                       if (nm_inject(nm_desc, frame, frame_len) == 0)
+                       if (nm_ring_empty(ring)) {
                                poll(&polld, 1, 0);
-                       else
+                               continue;
+                       }
+                       slot_id = ring->cur;
+                       ring->slot[slot_id].flags = 0;
+                       ring->slot[slot_id].len = pkt_len;
+
+                       buf = NETMAP_BUF(ring, ring->slot[slot_id].buf_idx);
+
+                       if (odp_packet_copydata_out(pkt, 0, pkt_len, buf)) {
+                               i = NM_INJECT_RETRIES;
                                break;
-               }
-               if (odp_unlikely(i == NM_INJECT_RETRIES)) {
-                       ioctl(nm_desc->fd, NIOCTXSYNC, NULL);
+                       }
+                       ring->cur = nm_ring_next(ring, slot_id);
+                       ring->head = ring->cur;
                        break;
                }
+               if (i == NM_INJECT_RETRIES)
+                       break;
+               odp_packet_free(pkt);
        }
        /* Send pending packets */
        poll(&polld, 1, 0);

-       for (i = 0; i < nb_tx; i++)
-               odp_packet_free(pkt_table[i]);
+       if (!pkt_nm->lockless_tx)
+               odp_ticketlock_unlock(&pkt_nm->tx_desc_ring[index].s.lock);
+
+       if (odp_unlikely(nb_tx == 0 && __odp_errno != 0))
+               return -1;

        return nb_tx;
   }

+static int netmap_send(pktio_entry_t *pktio_entry, odp_packet_t
pkt_table[],
+                      unsigned num)
+{
+       return netmap_send_queue(pktio_entry, 0, pkt_table, num);
+}
+
   static int netmap_mac_addr_get(pktio_entry_t *pktio_entry, void
*mac_addr)
   {
        memcpy(mac_addr, pktio_entry->s.pkt_nm.if_mac, ETH_ALEN);
@@ -385,13 +705,13 @@ const pktio_if_ops_t netmap_pktio_ops = {
        .promisc_mode_get = netmap_promisc_mode_get,
        .mac_get = netmap_mac_addr_get,
        .capability = netmap_capability,
-       .input_queues_config = NULL,
-       .output_queues_config = NULL,
+       .input_queues_config = netmap_input_queues_config,
+       .output_queues_config = netmap_output_queues_config,
        .in_queues = NULL,
        .pktin_queues = NULL,
        .pktout_queues = NULL,
-       .recv_queue = NULL,
-       .send_queue = NULL
+       .recv_queue = netmap_recv_queue,
+       .send_queue = netmap_send_queue
   };

   #endif /* ODP_NETMAP */

_______________________________________________
lng-odp mailing list
[email protected]
https://lists.linaro.org/mailman/listinfo/lng-odp

Reply via email to