new netdev type like DPDK can support multi-queue IO. Following patch Adds support for same.
Signed-off-by: Pravin B Shelar <[email protected]> --- lib/dpif-netdev.c | 73 +++++++++++++++++++++++++++++++++---------------- lib/dpif-netdev.h | 1 + lib/netdev-provider.h | 2 ++ lib/netdev.c | 16 ++++++++++- lib/netdev.h | 3 +- 5 files changed, 69 insertions(+), 26 deletions(-) diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c index 8ffbaa1..bf7549d 100644 --- a/lib/dpif-netdev.c +++ b/lib/dpif-netdev.c @@ -171,7 +171,7 @@ static struct dp_netdev_port *dp_netdev_lookup_port(const struct dp_netdev *dp, /* A port in a netdev-based datapath. */ struct dp_netdev_port { struct pkt_metadata md; - struct netdev_rxq *rxq; + struct netdev_rxq **rxq; struct netdev *netdev; odp_port_t port_no; struct netdev_saved_flags *sf; @@ -643,6 +643,7 @@ do_add_port(struct dp_netdev *dp, const char *devname, const char *type, enum netdev_flags flags; const char *open_type; int error; + int i; /* XXX reject devices already in some dp_netdev. */ @@ -665,19 +666,24 @@ do_add_port(struct dp_netdev *dp, const char *devname, const char *type, port->port_no = port_no; port->md = PKT_METADATA_INITIALIZER(port->port_no); port->netdev = netdev; + port->rxq = xmalloc(sizeof *port->rxq * netdev_nr_rx(netdev)); port->type = xstrdup(type); - error = netdev_rxq_open(netdev, &port->rxq); - if (error - && !(error == EOPNOTSUPP && dpif_netdev_class_is_dummy(dp->class))) { - VLOG_ERR("%s: cannot receive packets on this network device (%s)", - devname, ovs_strerror(errno)); - netdev_close(netdev); - return error; + for (i = 0; i < netdev_nr_rx(netdev); i++) { + error = netdev_rxq_open(netdev, &port->rxq[i], i); + if (error + && !(error == EOPNOTSUPP && dpif_netdev_class_is_dummy(dp->class))) { + VLOG_ERR("%s: cannot receive packets on this network device (%s)", + devname, ovs_strerror(errno)); + netdev_close(netdev); + return error; + } } error = netdev_turn_flags_on(netdev, NETDEV_PROMISC, &sf); if (error) { - netdev_rxq_close(port->rxq); + for (i = 0; i < netdev_nr_rx(netdev); i++) { + netdev_rxq_close(port->rxq[i]); + } netdev_close(netdev); free(port->rxq); free(port); @@ -786,8 +792,11 @@ static void port_unref(struct dp_netdev_port *port) { if (port && ovs_refcount_unref(&port->ref_cnt) == 1) { + int i; - netdev_rxq_close(port->rxq); + for (i = 0; i < netdev_nr_rx(port->netdev); i++) { + netdev_rxq_close(port->rxq[i]); + } netdev_restore_flags(port->sf); netdev_close(port->netdev); @@ -1659,8 +1668,12 @@ dpif_netdev_run(struct dpif *dpif) ovs_rwlock_rdlock(&dp->port_rwlock); HMAP_FOR_EACH (port, node, &dp->ports) { - if (port->rxq && !netdev_is_pmd(port->netdev)) { - dp_netdev_process_rxq_port(dp, port, port->rxq); + if (!netdev_is_pmd(port->netdev)) { + int i; + + for (i = 0; i < netdev_nr_rx(port->netdev); i++) { + dp_netdev_process_rxq_port(dp, port, port->rxq[i]); + } } } @@ -1676,8 +1689,12 @@ dpif_netdev_wait(struct dpif *dpif) ovs_rwlock_rdlock(&dp->port_rwlock); HMAP_FOR_EACH (port, node, &dp->ports) { - if (port->rxq && !netdev_is_pmd(port->netdev)) { - netdev_rxq_wait(port->rxq); + if (!netdev_is_pmd(port->netdev)) { + int i; + + for (i = 0; i < netdev_nr_rx(port->netdev); i++) { + netdev_rxq_wait(port->rxq[i]); + } } } ovs_rwlock_unlock(&dp->port_rwlock); @@ -1685,6 +1702,7 @@ dpif_netdev_wait(struct dpif *dpif) struct rxq_poll { struct dp_netdev_port *port; + struct netdev_rxq *rx; }; static int @@ -1710,11 +1728,14 @@ pmd_load_queues(struct pmd_thread *f, HMAP_FOR_EACH (port, node, &f->dp->ports) { if (netdev_is_pmd(port->netdev)) { - if ((index % dp->n_pmd_threads) == id) { - port_ref(port); - poll_cnt++; + for (i = 0; i < netdev_nr_rx(port->netdev); i++) { + + if ((index % dp->n_pmd_threads) == id) { + port_ref(port); + poll_cnt++; + } + index++; } - index++; } } @@ -1724,12 +1745,16 @@ pmd_load_queues(struct pmd_thread *f, HMAP_FOR_EACH (port, node, &f->dp->ports) { if (netdev_is_pmd(port->netdev)) { - if ((index % dp->n_pmd_threads) == id) { - poll_list[poll_cnt].port = port; - poll_cnt++; - VLOG_INFO("poll_cnt %d port = %d i = %d",poll_cnt,port->port_no, i); + for (i = 0; i < netdev_nr_rx(port->netdev); i++) { + + if ((index % dp->n_pmd_threads) == id) { + poll_list[poll_cnt].port = port; + poll_list[poll_cnt].rx = port->rxq[i]; + poll_cnt++; + VLOG_INFO("poll_cnt %d port = %d i = %d",poll_cnt,port->port_no, i); + } + index++; } - index++; } } @@ -1762,7 +1787,7 @@ reload: int i; for (i = 0; i < poll_cnt; i++) { - dp_netdev_process_rxq_port(dp, poll_list[i].port, poll_list[i].port->rxq); + dp_netdev_process_rxq_port(dp, poll_list[i].port, poll_list[i].rx); } if (lc++ > (64 * 1024 * 1024)) { diff --git a/lib/dpif-netdev.h b/lib/dpif-netdev.h index 76f261b..12a7169 100644 --- a/lib/dpif-netdev.h +++ b/lib/dpif-netdev.h @@ -33,6 +33,7 @@ enum { DP_NETDEV_HEADROOM = 2 + VLAN_HEADER_LEN }; enum { MAX_RX_BATCH = 256 }; /* Maximum number of flows in flow table. */ +#define NR_QUEUE 1 #define NR_THREADS 1 #ifdef __cplusplus diff --git a/lib/netdev-provider.h b/lib/netdev-provider.h index b60d2fb..7871cf0 100644 --- a/lib/netdev-provider.h +++ b/lib/netdev-provider.h @@ -39,6 +39,7 @@ struct netdev { this device. */ /* The following are protected by 'netdev_mutex' (internal to netdev.c). */ + int nr_rx; int ref_cnt; /* Times this devices was opened. */ struct shash_node *node; /* Pointer to element in global map. */ struct list saved_flags_list; /* Contains "struct netdev_saved_flags". */ @@ -59,6 +60,7 @@ void netdev_get_devices(const struct netdev_class *, * None of these members change during the lifetime of a struct netdev_rxq. */ struct netdev_rxq { struct netdev *netdev; /* Owns a reference to the netdev. */ + int queue_id; }; struct netdev *netdev_rxq_get_netdev(const struct netdev_rxq *); diff --git a/lib/netdev.c b/lib/netdev.c index 3ba8308..378b391 100644 --- a/lib/netdev.c +++ b/lib/netdev.c @@ -90,6 +90,12 @@ static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 20); static void restore_all_flags(void *aux OVS_UNUSED); void update_device_args(struct netdev *, const struct shash *args); +int +netdev_nr_rx(const struct netdev *netdev) +{ + return netdev->nr_rx; +} + bool netdev_is_pmd(const struct netdev *netdev) { @@ -333,6 +339,13 @@ netdev_open(const char *name, const char *type, struct netdev **netdevp) netdev->netdev_class = rc->class; netdev->name = xstrdup(name); netdev->node = shash_add(&netdev_shash, name, netdev); + + /* By default enable one rx queue per netdev. */ + if (netdev->netdev_class->rxq_alloc) { + netdev->nr_rx = 1; + } else { + netdev->nr_rx = 0; + } list_init(&netdev->saved_flags_list); error = rc->class->construct(netdev); @@ -514,7 +527,7 @@ netdev_parse_name(const char *netdev_name_, char **name, char **type) * Some kinds of network devices might not support receiving packets. This * function returns EOPNOTSUPP in that case.*/ int -netdev_rxq_open(struct netdev *netdev, struct netdev_rxq **rxp) +netdev_rxq_open(struct netdev *netdev, struct netdev_rxq **rxp, int id) OVS_EXCLUDED(netdev_mutex) { int error; @@ -523,6 +536,7 @@ netdev_rxq_open(struct netdev *netdev, struct netdev_rxq **rxp) struct netdev_rxq *rx = netdev->netdev_class->rxq_alloc(); if (rx) { rx->netdev = netdev; + rx->queue_id = id; error = netdev->netdev_class->rxq_construct(rx); if (!error) { ovs_mutex_lock(&netdev_mutex); diff --git a/lib/netdev.h b/lib/netdev.h index 08c58ad..e50503e 100644 --- a/lib/netdev.h +++ b/lib/netdev.h @@ -134,6 +134,7 @@ void netdev_wait(void); void netdev_enumerate_types(struct sset *types); bool netdev_is_reserved_name(const char *name); +int netdev_nr_rx(const struct netdev *netdev); bool netdev_is_pmd(const struct netdev *netdev); /* Open and close. */ @@ -159,7 +160,7 @@ int netdev_set_mtu(const struct netdev *, int mtu); int netdev_get_ifindex(const struct netdev *); /* Packet reception. */ -int netdev_rxq_open(struct netdev *, struct netdev_rxq **); +int netdev_rxq_open(struct netdev *, struct netdev_rxq **, int id); void netdev_rxq_close(struct netdev_rxq *); const char *netdev_rxq_get_name(const struct netdev_rxq *); -- 1.7.9.5 _______________________________________________ dev mailing list [email protected] http://openvswitch.org/mailman/listinfo/dev
