Hello Ilya,
On Tue, Apr 9, 2019 at 1:42 PM Ilya Maximets <[email protected]> wrote:
>
> What do you think about something like this (not even compile tested):
> ---
> diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c
> index bd9718824..0cf492a3b 100644
> --- a/lib/dpif-netdev.c
> +++ b/lib/dpif-netdev.c
> @@ -591,6 +591,8 @@ struct polled_queue {
> struct dp_netdev_rxq *rxq;
> odp_port_t port_no;
> bool emc_enabled;
> + bool enabled;
> + uint64_t change_seq;
> };
>
> /* Contained by struct dp_netdev_pmd_thread's 'poll_list' member. */
> @@ -5375,6 +5377,9 @@ pmd_load_queues_and_ports(struct
> dp_netdev_pmd_thread *pmd,
> poll_list[i].rxq = poll->rxq;
> poll_list[i].port_no = poll->rxq->port->port_no;
> poll_list[i].emc_enabled = poll->rxq->port->emc_enabled;
> + poll_list[i].enabled = netdev_rxq_enabled(poll->rxq);
> + poll_list[i].change_seq =
> + netdev_get_change_seq(poll->rxq->port->netdev);
> i++;
> }
>
> @@ -5440,6 +5445,10 @@ reload:
>
> for (i = 0; i < poll_cnt; i++) {
>
> + if (!poll_list[i].enabled) {
> + continue;
> + }
> +
> if (poll_list[i].emc_enabled) {
> atomic_read_relaxed(&pmd->dp->emc_insert_min,
> &pmd->ctx.emc_insert_min);
> @@ -5476,6 +5485,15 @@ reload:
> if (reload) {
> break;
> }
> +
> + for (i = 0; i < poll_cnt; i++) {
> + uint64_t current_seq =
> +
> netdev_get_change_seq(poll_list[i].rxq->port->netdev);
> + if (poll_list[i].change_seq != current_seq) {
> + poll_list[i].change_seq = current_seq;
> + poll_list[i].enabled =
> netdev_rxq_enabled(poll_list[i].rxq);
> + }
> + }
> }
> pmd_perf_end_iteration(s, rx_packets, tx_packets,
> pmd_perf_metrics_enabled(pmd));
> ---
> + replacing of your 'netdev_request_reconfigure(&dev->up)' inside
> 'vring_state_changed'
> with 'netdev_change_seq_changed(&dev->up)'?
>
Interesting, I would prefer to have a bitmap of rxq rather than looping on
all and checking the state.
But let me try your approach first.
> This should help to not reconfigure/reschedule everything and not waste
> much time on
> polling. Also this will give ability to faster react on queue state
> changes.
>
I suspect the additional branch could still have an impact, but I won't
know before trying.
> After that we'll be able to fix reconfiguration on F_MQ manipulations with
> something
> simple like that:
> ---
> diff --git a/lib/netdev-dpdk.c b/lib/netdev-dpdk.c
> index 91af377e8..1937561cc 100644
> --- a/lib/netdev-dpdk.c
> +++ b/lib/netdev-dpdk.c
> @@ -3492,8 +3528,8 @@ new_device(int vid)
> newnode = dev->socket_id;
> }
>
> - if (dev->requested_n_txq != qp_num
> - || dev->requested_n_rxq != qp_num
> + if (dev->requested_n_txq < qp_num
> + || dev->requested_n_rxq < qp_num
> || dev->requested_socket_id != newnode) {
> dev->requested_socket_id = newnode;
> dev->requested_n_rxq = qp_num;
> ---
> Decreasing of 'qp_num' will not cause big issues because we'll not poll
> disabled queues.
>
Yes.
> And there could be one separate change to drop the requested values if
> socket connection
> closed (I hope that guest is not able to control that):
>
I don't think the guest can control this part.
---
> diff --git a/lib/netdev-dpdk.c b/lib/netdev-dpdk.c
> index 91af377e8..1937561cc 100644
> --- a/lib/netdev-dpdk.c
> +++ b/lib/netdev-dpdk.c
> @@ -185,13 +185,16 @@ static const struct rte_eth_conf port_conf = {
> */
> static int new_device(int vid);
> static void destroy_device(int vid);
> +static void destroy_connection(int vid);
> static int vring_state_changed(int vid, uint16_t queue_id, int enable);
> static const struct vhost_device_ops virtio_net_device_ops =
> {
> .new_device = new_device,
> .destroy_device = destroy_device,
> .vring_state_changed = vring_state_changed,
> - .features_changed = NULL
> + .features_changed = NULL,
> + .new_connection = NULL,
> + .destroy_connection = destroy_connection
> };
>
> enum { DPDK_RING_SIZE = 256 };
> @@ -3462,6 +3465,39 @@ netdev_dpdk_remap_txqs(struct netdev_dpdk *dev)
> free(enabled_queues);
> }
>
> +/*
> + * vhost-user socket connection is closed.
> + */
> +static void
> +destroy_connection(int vid)
> +{
> + struct netdev_dpdk *dev;
> + char ifname[IF_NAME_SZ];
> +
> + rte_vhost_get_ifname(vid, ifname, sizeof ifname);
> +
> + ovs_mutex_lock(&dpdk_mutex);
> + LIST_FOR_EACH(dev, list_node, &dpdk_list) {
> + ovs_mutex_lock(&dev->mutex);
> + if (nullable_string_is_equal(ifname, dev->vhost_id)) {
> + uint32_t qp_num = NR_QUEUE;
> +
> + /* Restore the number of queue pairs to default. */
> + if (dev->requested_n_txq != qp_num
> + || dev->requested_n_rxq != qp_num) {
> + dev->requested_n_rxq = qp_num;
> + dev->requested_n_txq = qp_num;
> + netdev_request_reconfigure(&dev->up);
> + }
> + ovs_mutex_unlock(&dev->mutex);
> + break;
> + }
> + ovs_mutex_unlock(&dev->mutex);
> + }
> + ovs_mutex_unlock(&dpdk_mutex);
> +}
> +
> +
> /*
> * A new virtio-net device is added to a vhost port.
> */
>
>
--
David Marchand
_______________________________________________
dev mailing list
[email protected]
https://mail.openvswitch.org/mailman/listinfo/ovs-dev