Ping.
Bets regards, Ilya Maximets.
On 24.02.2016 15:47, Ilya Maximets wrote:
> Since 5f2ccb1c0d3b ("dpif: Allow adding ukeys for same flow by
> different pmds.") there is the possibility to reassign queues among
> pmd threads without restarting them and deleting the megaflow cache.
>
> So, reconfiguration can be performed without destroying of PMD threads
> if pmd-cpu-mask not changed.
>
> Reassignment of all rx queues done to achieve fair distribution.
>
> Signed-off-by: Ilya Maximets <[email protected]>
> Acked-by: Flavio Leitner <[email protected]>
> ---
>
> version 2:
> * Reduced indentation level in dpif_netdev_pmd_set().
>
> lib/dpif-netdev.c | 100
> +++++++++++++++++++++++++++++++++---------------------
> 1 file changed, 62 insertions(+), 38 deletions(-)
>
> diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c
> index 500e7cc..8ffff90 100644
> --- a/lib/dpif-netdev.c
> +++ b/lib/dpif-netdev.c
> @@ -2411,27 +2411,31 @@ dpif_netdev_operate(struct dpif *dpif, struct dpif_op
> **ops, size_t n_ops)
> }
> }
>
> -/* Returns true if the configuration for rx queues or cpu mask
> - * is changed. */
> +/* Returns true if the configuration of rx queues changed. */
> static bool
> -pmd_config_changed(const struct dp_netdev *dp, const char *cmask)
> +pmd_n_rxq_changed(const struct dp_netdev *dp)
> {
> struct dp_netdev_port *port;
>
> CMAP_FOR_EACH (port, node, &dp->ports) {
> - struct netdev *netdev = port->netdev;
> - int requested_n_rxq = netdev_requested_n_rxq(netdev);
> - if (netdev_is_pmd(netdev)
> + int requested_n_rxq = netdev_requested_n_rxq(port->netdev);
> + if (netdev_is_pmd(port->netdev)
> && port->latest_requested_n_rxq != requested_n_rxq) {
> return true;
> }
> }
> + return false;
> +}
>
> +/* Returns true if cpu mask changed. */
> +static bool
> +pmd_cpu_mask_changed(const struct dp_netdev *dp, const char *cmask)
> +{
> if (dp->pmd_cmask != NULL && cmask != NULL) {
> return strcmp(dp->pmd_cmask, cmask);
> - } else {
> - return (dp->pmd_cmask != NULL || cmask != NULL);
> }
> +
> + return (dp->pmd_cmask != NULL || cmask != NULL);
> }
>
> /* Resets pmd threads if the configuration for 'rxq's or cpu mask changes. */
> @@ -2439,44 +2443,58 @@ static int
> dpif_netdev_pmd_set(struct dpif *dpif, const char *cmask)
> {
> struct dp_netdev *dp = get_dp_netdev(dpif);
> + bool cmask_changed = pmd_cpu_mask_changed(dp, cmask);
> + struct dp_netdev_port *port;
>
> - if (pmd_config_changed(dp, cmask)) {
> - struct dp_netdev_port *port;
> + if (!cmask_changed && !pmd_n_rxq_changed(dp)) {
> + return 0;
> + }
>
> + if (cmask_changed) {
> dp_netdev_destroy_all_pmds(dp);
> + } else {
> + struct dp_netdev_pmd_thread *pmd;
>
> - CMAP_FOR_EACH (port, node, &dp->ports) {
> - struct netdev *netdev = port->netdev;
> - int requested_n_rxq = netdev_requested_n_rxq(netdev);
> - if (netdev_is_pmd(port->netdev)
> - && port->latest_requested_n_rxq != requested_n_rxq) {
> - int i, err;
> + CMAP_FOR_EACH (pmd, node, &dp->poll_threads) {
> + dp_netdev_pmd_clear_poll_list(pmd);
> + dp_netdev_reload_pmd__(pmd);
> + }
> + }
>
> - /* Closes the existing 'rxq's. */
> - for (i = 0; i < netdev_n_rxq(port->netdev); i++) {
> - netdev_rxq_close(port->rxq[i]);
> - port->rxq[i] = NULL;
> - }
> + CMAP_FOR_EACH (port, node, &dp->ports) {
> + struct netdev *netdev = port->netdev;
> + int requested_n_rxq = netdev_requested_n_rxq(netdev);
> + if (netdev_is_pmd(port->netdev)
> + && port->latest_requested_n_rxq != requested_n_rxq) {
> + int i, err;
>
> - /* Sets the new rx queue config. */
> - err = netdev_set_multiq(port->netdev,
> - ovs_numa_get_n_cores() + 1,
> - requested_n_rxq);
> - if (err && (err != EOPNOTSUPP)) {
> - VLOG_ERR("Failed to set dpdk interface %s rx_queue to:"
> - " %u", netdev_get_name(port->netdev),
> - requested_n_rxq);
> - return err;
> - }
> - port->latest_requested_n_rxq = requested_n_rxq;
> - /* If the set_multiq() above succeeds, reopens the 'rxq's. */
> - port->rxq = xrealloc(port->rxq, sizeof *port->rxq
> - * netdev_n_rxq(port->netdev));
> - for (i = 0; i < netdev_n_rxq(port->netdev); i++) {
> - netdev_rxq_open(port->netdev, &port->rxq[i], i);
> - }
> + /* Closes the existing 'rxq's. */
> + for (i = 0; i < netdev_n_rxq(port->netdev); i++) {
> + netdev_rxq_close(port->rxq[i]);
> + port->rxq[i] = NULL;
> + }
> +
> + /* Sets the new rx queue config. */
> + err = netdev_set_multiq(port->netdev,
> + ovs_numa_get_n_cores() + 1,
> + requested_n_rxq);
> + if (err && (err != EOPNOTSUPP)) {
> + VLOG_ERR("Failed to set dpdk interface %s rx_queue to:"
> + " %u", netdev_get_name(port->netdev),
> + requested_n_rxq);
> + return err;
> + }
> + port->latest_requested_n_rxq = requested_n_rxq;
> + /* If the set_multiq() above succeeds, reopens the 'rxq's. */
> + port->rxq = xrealloc(port->rxq, sizeof *port->rxq
> + * netdev_n_rxq(port->netdev));
> + for (i = 0; i < netdev_n_rxq(port->netdev); i++) {
> + netdev_rxq_open(port->netdev, &port->rxq[i], i);
> }
> }
> + }
> +
> + if (cmask_changed) {
> /* Reconfigures the cpu mask. */
> ovs_numa_set_cpu_mask(cmask);
> free(dp->pmd_cmask);
> @@ -2486,6 +2504,12 @@ dpif_netdev_pmd_set(struct dpif *dpif, const char
> *cmask)
> dp_netdev_set_nonpmd(dp);
> /* Restores all pmd threads. */
> dp_netdev_reset_pmd_threads(dp);
> + } else {
> + CMAP_FOR_EACH (port, node, &dp->ports) {
> + if (netdev_is_pmd(port->netdev)) {
> + dp_netdev_add_port_to_pmds(dp, port);
> + }
> + }
> }
>
> return 0;
>
_______________________________________________
dev mailing list
[email protected]
http://openvswitch.org/mailman/listinfo/dev