+ } else {
+ pktio->in_queue[i].queue = ODP_QUEUE_INVALID;
+ pktio->in_queue[i].pktin.index = i;
+ pktio->in_queue[i].pktin.pktio = pktio_entry->s.handle;
+ }
+ }
+ /* Map pktin queues to netmap rings */
+ map_netmap_rings(pkt_nm->rx_desc_ring, num_queues,
+ pkt_nm->num_rx_rings);
+
+ pkt_nm->lockless_rx = p->single_user;
+ pkt_nm->num_rx_queues = num_queues;
+ return 0;
+}
+
+static int netmap_output_queues_config(pktio_entry_t *pktio_entry,
+ const odp_pktio_output_queue_param_t *p)
+{
+ struct pktio_entry *pktio = &pktio_entry->s;
+ pkt_netmap_t *pkt_nm = &pktio_entry->s.pkt_nm;
+ odp_pktio_output_mode_t mode = pktio_entry->s.param.out_mode;
+ unsigned num_queues = p->num_queues;
+ unsigned i;
+
+ if (mode == ODP_PKTOUT_MODE_DISABLED)
+ return -1;
+
+ if (num_queues <= 0 || num_queues > pkt_nm->capa.max_output_queues) {
+ ODP_ERR("Invalid output queue count: %u\n", num_queues);
+ return -1;
+ }
+
+ /* Enough to map only one netmap tx ring per pktout queue */
+ map_netmap_rings(pkt_nm->tx_desc_ring, num_queues, num_queues);
+
+ for (i = 0; i < num_queues; i++) {
+ pktio->out_queue[i].pktout.index = i;
+ pktio->out_queue[i].pktout.pktio = pktio_entry->s.handle;
+ }
+ pkt_nm->lockless_tx = p->single_user;
+ pkt_nm->num_tx_queues = num_queues;
+ return 0;
+}
+
static int netmap_close(pktio_entry_t *pktio_entry)
{
+ int i, j;
pkt_netmap_t *pkt_nm = &pktio_entry->s.pkt_nm;
- if (pkt_nm->rx_desc != NULL)
- nm_close(pkt_nm->rx_desc);
- if (pkt_nm->tx_desc != NULL)
- nm_close(pkt_nm->tx_desc);
+ for (i = 0; i < PKTIO_MAX_QUEUES; i++) {
+ for (j = 0; j < NM_MAX_DESC; j++) {
+ if (pkt_nm->rx_desc_ring[i].s.desc[j] != NULL) {
+ nm_close(pkt_nm->rx_desc_ring[i].s.desc[j]);
+ pkt_nm->rx_desc_ring[i].s.desc[j] = NULL;
+ }
+ }
+ for (j = 0; j < NM_MAX_DESC; j++) {
+ if (pkt_nm->tx_desc_ring[i].s.desc[j] != NULL) {
+ nm_close(pkt_nm->tx_desc_ring[i].s.desc[j]);
+ pkt_nm->tx_desc_ring[i].s.desc[j] = NULL;
+ }
+ }
+ }
+
+ netmap_close_queues(pktio_entry);
if (pkt_nm->sockfd != -1 && close(pkt_nm->sockfd) != 0) {
__odp_errno = errno;
@@ -98,10 +262,12 @@ static int netmap_close(pktio_entry_t *pktio_entry)
static int netmap_open(odp_pktio_t id ODP_UNUSED, pktio_entry_t *pktio_entry,
const char *netdev, odp_pool_t pool)
{
+ int i;
int err;
int sockfd;
pkt_netmap_t *pkt_nm = &pktio_entry->s.pkt_nm;
struct nm_desc *desc;
+ odp_pktin_hash_proto_t hash_proto;
if (getenv("ODP_PKTIO_DISABLE_NETMAP"))
return -1;
@@ -124,16 +290,29 @@ static int netmap_open(odp_pktio_t id ODP_UNUSED,
pktio_entry_t *pktio_entry,
snprintf(pkt_nm->nm_name, sizeof(pkt_nm->nm_name), "netmap:%s",
netdev);
- /* Dummy open here to check if netmap module is available */
+ /* Dummy open here to check if netmap module is available and to read
+ * capability info. */
desc = nm_open(pkt_nm->nm_name, NULL, 0, NULL);
if (desc == NULL) {
ODP_ERR("nm_open(%s) failed\n", pkt_nm->nm_name);
goto error;
}
+ if (desc->nifp->ni_rx_rings > NM_MAX_DESC) {
+ ODP_ERR("Unable to store all rx rings\n");
+ nm_close(desc);
+ goto error;
+ }
+ pkt_nm->num_rx_rings = desc->nifp->ni_rx_rings;
pkt_nm->capa.max_input_queues = PKTIO_MAX_QUEUES;
if (desc->nifp->ni_rx_rings < PKTIO_MAX_QUEUES)
pkt_nm->capa.max_input_queues = desc->nifp->ni_rx_rings;
+ if (desc->nifp->ni_tx_rings > NM_MAX_DESC) {
+ ODP_ERR("Unable to store all tx rings\n");
+ nm_close(desc);
+ goto error;
+ }
+ pkt_nm->num_tx_rings = desc->nifp->ni_tx_rings;
pkt_nm->capa.max_output_queues = PKTIO_MAX_QUEUES;
if (desc->nifp->ni_tx_rings < PKTIO_MAX_QUEUES)
pkt_nm->capa.max_output_queues = desc->nifp->ni_tx_rings;
@@ -147,6 +326,12 @@ static int netmap_open(odp_pktio_t id ODP_UNUSED,
pktio_entry_t *pktio_entry,
}
pkt_nm->sockfd = sockfd;
+ /* Check if RSS is supported. If not, set 'max_input_queues' to 1. */
+ if (rss_conf_get_supported_fd(sockfd, netdev, &hash_proto) == 0) {
+ ODP_DBG("RSS not supported\n");
+ pkt_nm->capa.max_input_queues = 1;
+ }
+
err = netmap_do_ioctl(pktio_entry, SIOCGIFFLAGS, 0);
if (err)
goto error;
@@ -157,6 +342,11 @@ static int netmap_open(odp_pktio_t id ODP_UNUSED,
pktio_entry_t *pktio_entry,
if (err)
goto error;
+ for (i = 0; i < PKTIO_MAX_QUEUES; i++) {
+ odp_ticketlock_init(&pkt_nm->rx_desc_ring[i].s.lock);
+ odp_ticketlock_init(&pkt_nm->tx_desc_ring[i].s.lock);
+ }
+
return 0;
error:
@@ -167,19 +357,82 @@ error:
static int netmap_start(pktio_entry_t *pktio_entry)
{
pkt_netmap_t *pkt_nm = &pktio_entry->s.pkt_nm;
+ netmap_ring_t *desc_ring;
+ struct nm_desc base_desc;
int err;
unsigned i;
- const char *ifname = pkt_nm->nm_name;
+ unsigned j;
+ uint64_t flags;
+ odp_pktio_input_mode_t in_mode = pktio_entry->s.param.in_mode;
+ odp_pktio_output_mode_t out_mode = pktio_entry->s.param.out_mode;
- pkt_nm->rx_desc = nm_open(ifname, NULL, NETMAP_NO_TX_POLL, NULL);
- pkt_nm->tx_desc = nm_open(ifname, NULL, NM_OPEN_NO_MMAP,
- pkt_nm->rx_desc);
+ /* If no pktin/pktout queues have been configured. Configure one
+ * for each direction. */
+ if (!pkt_nm->num_rx_queues && in_mode != ODP_PKTIN_MODE_DISABLED) {
+ odp_pktio_input_queue_param_t param;
- if (pkt_nm->rx_desc == NULL || pkt_nm->tx_desc == NULL) {
- ODP_ERR("nm_open(%s) failed\n", ifname);
+ memset(¶m, 0, sizeof(odp_pktio_input_queue_param_t));
+ param.num_queues = 1;
+ if (netmap_input_queues_config(pktio_entry, ¶m))
+ return -1;
+ }
+ if (!pkt_nm->num_tx_queues && out_mode == ODP_PKTOUT_MODE_SEND) {
+ odp_pktio_output_queue_param_t param;
+
+ memset(¶m, 0, sizeof(odp_pktio_output_queue_param_t));
+ param.num_queues = 1;
+ if (netmap_output_queues_config(pktio_entry, ¶m))
+ return -1;
+ }
+
+ base_desc.self = &base_desc;
+ base_desc.mem = NULL;
+ memcpy(base_desc.req.nr_name, pktio_entry->s.name,
+ sizeof(pktio_entry->s.name));
+ base_desc.req.nr_flags &= ~NR_REG_MASK;
+ base_desc.req.nr_flags |= NR_REG_ONE_NIC;
+ base_desc.req.nr_ringid = 0;
+
+ /* Only the first rx descriptor does mmap */
+ desc_ring = pkt_nm->rx_desc_ring;
+ flags = NM_OPEN_IFNAME | NETMAP_NO_TX_POLL;
+ desc_ring[0].s.desc[0] = nm_open(pkt_nm->nm_name, NULL, flags,
+ &base_desc);
+ if (desc_ring[0].s.desc[0] == NULL) {
+ ODP_ERR("nm_start(%s) failed\n", pkt_nm->nm_name);
goto error;
}
-
+ /* Open rest of the rx descriptors (one per netmap ring) */
+ flags = NM_OPEN_IFNAME | NETMAP_NO_TX_POLL | NM_OPEN_NO_MMAP;
+ for (i = 0; i < pkt_nm->num_rx_queues; i++) {
+ for (j = desc_ring[i].s.first; j <= desc_ring[i].s.last; j++) {
+ if (i == 0 && j == 0) /* First already opened */
+ continue;
+ base_desc.req.nr_ringid = j;
+ desc_ring[i].s.desc[j] = nm_open(pkt_nm->nm_name, NULL,
+ flags, &base_desc);
+ if (desc_ring[i].s.desc[j] == NULL) {
+ ODP_ERR("nm_start(%s) failed\n",
+ pkt_nm->nm_name);
+ goto error;
+ }
+ }
+ }
+ /* Open tx descriptors */
+ desc_ring = pkt_nm->tx_desc_ring;
+ flags = NM_OPEN_IFNAME | NM_OPEN_NO_MMAP;
+ for (i = 0; i < pkt_nm->num_tx_queues; i++) {
+ for (j = desc_ring[i].s.first; j <= desc_ring[i].s.last; j++) {
+ base_desc.req.nr_ringid = j;
+ desc_ring[i].s.desc[j] = nm_open(pkt_nm->nm_name, NULL,
+ flags, &base_desc);
+ if (desc_ring[i].s.desc[j] == NULL) {
+ ODP_ERR("nm_start(%s) failed\n",
+ pkt_nm->nm_name);
+ goto error;
+ }
+ }
+ }
/* Wait for the link to come up */
for (i = 0; i < NM_OPEN_RETRIES; i++) {
err = netmap_do_ioctl(pktio_entry, SIOCETHTOOL, ETHTOOL_GLINK);
@@ -251,35 +504,45 @@ static inline int netmap_pkt_to_odp(pktio_entry_t
*pktio_entry,
}
packet_parse_l2(pkt_hdr);
+
+ pkt_hdr->input = pktio_entry->s.handle;
+
*pkt_out = pkt;
}
return 0;
}
-static int netmap_recv(pktio_entry_t *pktio_entry, odp_packet_t pkt_table[],
- unsigned num)
+static int netmap_recv_queue(pktio_entry_t *pktio_entry, int index,
+ odp_packet_t pkt_table[], int num)
{
+ char *buf;
struct netmap_ring *ring;
- struct nm_desc *desc = pktio_entry->s.pkt_nm.rx_desc;
+ struct nm_desc *desc;
struct pollfd polld;
- char *buf;
+ pkt_netmap_t *pkt_nm = &pktio_entry->s.pkt_nm;
+ unsigned first_desc_id = pkt_nm->rx_desc_ring[index].s.first;
+ unsigned last_desc_id = pkt_nm->rx_desc_ring[index].s.last;
+ unsigned desc_id;
+ int num_desc = pkt_nm->rx_desc_ring[index].s.num;
int i;
- int num_rings = desc->last_rx_ring - desc->first_rx_ring + 1;
- int ring_id = desc->cur_rx_ring;
- unsigned num_rx = 0;
+ int num_rx = 0;
uint32_t slot_id;
- polld.fd = desc->fd;
- polld.events = POLLIN;
+ if (odp_unlikely(pktio_entry->s.state == STATE_STOP))
+ return 0;
- for (i = 0; i < num_rings && num_rx != num; i++) {
- ring_id = desc->cur_rx_ring + i;
+ if (!pkt_nm->lockless_rx)
+ odp_ticketlock_lock(&pkt_nm->rx_desc_ring[index].s.lock);
- if (ring_id > desc->last_rx_ring)
- ring_id = desc->first_rx_ring;
+ desc_id = pkt_nm->rx_desc_ring[index].s.cur;
- ring = NETMAP_RXRING(desc->nifp, ring_id);
+ for (i = 0; i < num_desc && num_rx != num; i++) {
+ if (desc_id > last_desc_id)
+ desc_id = first_desc_id;
+
+ desc = pkt_nm->rx_desc_ring[index].s.desc[desc_id];
+ ring = NETMAP_RXRING(desc->nifp, desc->cur_rx_ring);
while (!nm_ring_empty(ring) && num_rx != num) {
slot_id = ring->cur;
@@ -294,51 +557,108 @@ static int netmap_recv(pktio_entry_t *pktio_entry,
odp_packet_t pkt_table[],
ring->cur = nm_ring_next(ring, slot_id);
ring->head = ring->cur;
}
- }
- desc->cur_rx_ring = ring_id;
- if (num_rx == 0) {
- if (odp_unlikely(poll(&polld, 1, 0) < 0))
- ODP_ERR("RX: poll error\n");
+ if (num_rx != num) {
+ polld.fd = desc->fd;
+ polld.events = POLLIN;
+ if (odp_unlikely(poll(&polld, 1, 0) < 0))
+ ODP_ERR("RX: poll error\n");
+ }
+ desc_id++;
}
+ pkt_nm->rx_desc_ring[index].s.cur = desc_id;
+
+ if (!pkt_nm->lockless_rx)
+ odp_ticketlock_unlock(&pkt_nm->rx_desc_ring[index].s.lock);
+
return num_rx;
}
-static int netmap_send(pktio_entry_t *pktio_entry, odp_packet_t pkt_table[],
+static int netmap_recv(pktio_entry_t *pktio_entry, odp_packet_t pkt_table[],
unsigned num)
{
+ return netmap_recv_queue(pktio_entry, 0, pkt_table, num);
+}
+
+static int netmap_send_queue(pktio_entry_t *pktio_entry, int index,
+ odp_packet_t pkt_table[], int num)
+{
+ pkt_netmap_t *pkt_nm = &pktio_entry->s.pkt_nm;
struct pollfd polld;
- struct nm_desc *nm_desc = pktio_entry->s.pkt_nm.tx_desc;
- unsigned i, nb_tx;
- uint8_t *frame;
- uint32_t frame_len;
+ struct nm_desc *desc;
+ struct netmap_ring *ring;
+ int i;
+ int nb_tx;
+ int desc_id;
+ odp_packet_t pkt;
+ uint32_t pkt_len;
+ unsigned slot_id;
+ char *buf;
- polld.fd = nm_desc->fd;
+ if (odp_unlikely(pktio_entry->s.state == STATE_STOP))
+ return 0;
+
+ /* Only one netmap tx ring per pktout queue */
+ desc_id = pkt_nm->tx_desc_ring[index].s.cur;
+ desc = pkt_nm->tx_desc_ring[index].s.desc[desc_id];
+ ring = NETMAP_TXRING(desc->nifp, desc->cur_tx_ring);
+
+ if (!pkt_nm->lockless_tx)
+ odp_ticketlock_lock(&pkt_nm->tx_desc_ring[index].s.lock);
+
+ polld.fd = desc->fd;
polld.events = POLLOUT;
for (nb_tx = 0; nb_tx < num; nb_tx++) {
- frame_len = 0;
- frame = odp_packet_l2_ptr(pkt_table[nb_tx], &frame_len);
+ pkt = pkt_table[nb_tx];
+ pkt_len = odp_packet_len(pkt);
+
+ if (pkt_len > ring->nr_buf_size) {
+ if (nb_tx == 0)
+ __odp_errno = EMSGSIZE;
+ break;
+ }
for (i = 0; i < NM_INJECT_RETRIES; i++) {
- if (nm_inject(nm_desc, frame, frame_len) == 0)
+ if (nm_ring_empty(ring)) {
poll(&polld, 1, 0);
- else
+ continue;
+ }
+ slot_id = ring->cur;
+ ring->slot[slot_id].flags = 0;
+ ring->slot[slot_id].len = pkt_len;
+
+ buf = NETMAP_BUF(ring, ring->slot[slot_id].buf_idx);
+
+ if (odp_packet_copydata_out(pkt, 0, pkt_len, buf)) {
+ i = NM_INJECT_RETRIES;
break;
- }
- if (odp_unlikely(i == NM_INJECT_RETRIES)) {
- ioctl(nm_desc->fd, NIOCTXSYNC, NULL);
+ }
+ ring->cur = nm_ring_next(ring, slot_id);
+ ring->head = ring->cur;
break;
}
+ if (i == NM_INJECT_RETRIES)
+ break;
+ odp_packet_free(pkt);
}
/* Send pending packets */
poll(&polld, 1, 0);
- for (i = 0; i < nb_tx; i++)
- odp_packet_free(pkt_table[i]);
+ if (!pkt_nm->lockless_tx)
+ odp_ticketlock_unlock(&pkt_nm->tx_desc_ring[index].s.lock);
+
+ if (odp_unlikely(nb_tx == 0 && __odp_errno != 0))
+ return -1;
return nb_tx;
}
+static int netmap_send(pktio_entry_t *pktio_entry, odp_packet_t pkt_table[],
+ unsigned num)
+{
+ return netmap_send_queue(pktio_entry, 0, pkt_table, num);
+}
+
static int netmap_mac_addr_get(pktio_entry_t *pktio_entry, void *mac_addr)
{
memcpy(mac_addr, pktio_entry->s.pkt_nm.if_mac, ETH_ALEN);
@@ -385,13 +705,13 @@ const pktio_if_ops_t netmap_pktio_ops = {
.promisc_mode_get = netmap_promisc_mode_get,
.mac_get = netmap_mac_addr_get,
.capability = netmap_capability,
- .input_queues_config = NULL,
- .output_queues_config = NULL,
+ .input_queues_config = netmap_input_queues_config,
+ .output_queues_config = netmap_output_queues_config,
.in_queues = NULL,
.pktin_queues = NULL,
.pktout_queues = NULL,
- .recv_queue = NULL,
- .send_queue = NULL
+ .recv_queue = netmap_recv_queue,
+ .send_queue = netmap_send_queue
};
#endif /* ODP_NETMAP */