On Thu, Apr 13, 2023 at 10:45 AM Lorenzo Bianconi
<[email protected]> wrote:
>
> Rework OVN QoS implementation in order to configure it through OVS QoS
> table instead of running tc command directly bypassing OVS.
>
> https://bugzilla.redhat.com/show_bug.cgi?id=2129742
> Tested-by: Rodolfo Alonso <[email protected]>
> Signed-off-by: Lorenzo Bianconi <[email protected]>
> ---
> controller/binding.c | 421 +++++++++++++++++++-----------------
> controller/ovn-controller.c | 6 +
> tests/system-ovn.at | 67 +++++-
> 3 files changed, 285 insertions(+), 209 deletions(-)
>
> diff --git a/controller/binding.c b/controller/binding.c
> index cde59ba99..1ac44b915 100644
> --- a/controller/binding.c
> +++ b/controller/binding.c
> @@ -111,6 +111,7 @@ binding_wait(void)
>
> struct qos_queue {
> struct hmap_node node;
> + char *network;
> uint32_t queue_id;
> uint32_t min_rate;
> uint32_t max_rate;
> @@ -148,213 +149,248 @@ static void update_lport_tracking(const struct
> sbrec_port_binding *pb,
> bool claimed);
>
> static void
> -get_qos_params(const struct sbrec_port_binding *pb, struct hmap *queue_map)
> +get_qos_params(const struct sbrec_port_binding *pb, struct hmap *queue_map,
> + struct smap *egress_ifaces)
> {
> uint32_t min_rate = smap_get_int(&pb->options, "qos_min_rate", 0);
> uint32_t max_rate = smap_get_int(&pb->options, "qos_max_rate", 0);
> uint32_t burst = smap_get_int(&pb->options, "qos_burst", 0);
> uint32_t queue_id = smap_get_int(&pb->options, "qdisc_queue_id", 0);
> + const char *network = smap_get(egress_ifaces, pb->logical_port);
>
> - if ((!min_rate && !max_rate && !burst) || !queue_id) {
> + if ((!min_rate && !max_rate && !burst) || !queue_id || !network) {
> /* Qos is not configured for this port. */
> return;
> }
>
> struct qos_queue *node = xzalloc(sizeof *node);
> hmap_insert(queue_map, &node->node, hash_int(queue_id, 0));
> + node->network = xstrdup(network);
> node->min_rate = min_rate;
> node->max_rate = max_rate;
> node->burst = burst;
> node->queue_id = queue_id;
> }
>
> -static const struct ovsrec_qos *
> -get_noop_qos(struct ovsdb_idl_txn *ovs_idl_txn,
> - const struct ovsrec_qos_table *qos_table)
> +static const char *
> +get_qos_egress_interface_name(struct shash *bridge_mappings,
> + const char *network)
> {
> - const struct ovsrec_qos *qos;
> - OVSREC_QOS_TABLE_FOR_EACH (qos, qos_table) {
> - if (!strcmp(qos->type, "linux-noop")) {
> - return qos;
> - }
> - }
> -
> - if (!ovs_idl_txn) {
> + struct ovsrec_bridge *br_ln = shash_find_data(bridge_mappings, network);
> + if (!br_ln) {
> return NULL;
> }
> - qos = ovsrec_qos_insert(ovs_idl_txn);
> - ovsrec_qos_set_type(qos, "linux-noop");
> - return qos;
> -}
>
> -static bool
> -set_noop_qos(struct ovsdb_idl_txn *ovs_idl_txn,
> - const struct ovsrec_port_table *port_table,
> - const struct ovsrec_qos_table *qos_table,
> - struct smap *egress_ifaces)
> -{
> - if (!ovs_idl_txn) {
> - return false;
> - }
> + /* Add egress-ifaces from the connected bridge */
> + for (size_t i = 0; i < br_ln->n_ports; i++) {
> + const struct ovsrec_port *port_rec = br_ln->ports[i];
>
> - const struct ovsrec_qos *noop_qos = get_noop_qos(ovs_idl_txn, qos_table);
> - if (!noop_qos) {
> - return false;
> - }
> + for (size_t j = 0; j < port_rec->n_interfaces; j++) {
> + const struct ovsrec_interface *iface_rec;
>
> - const struct ovsrec_port *port;
> - size_t count = 0;
> + iface_rec = port_rec->interfaces[j];
> + if (smap_get(&iface_rec->external_ids, "iface-id")) {
> + continue;
> + }
>
> - OVSREC_PORT_TABLE_FOR_EACH (port, port_table) {
> - struct smap_node *node;
> - SMAP_FOR_EACH (node, egress_ifaces) {
> - if (node->value && !strcmp(node->value, port->name)) {
> - ovsrec_port_set_qos(port, noop_qos);
> - count++;
> + bool is_egress_iface = smap_get_bool(&iface_rec->external_ids,
> + "ovn-egress-iface", false);
> + if (is_egress_iface) {
> + return iface_rec->name;
> }
> }
> - if (smap_count(egress_ifaces) == count) {
> - break;
> - }
> }
> - return true;
> +
> + return NULL;
> }
>
> +/* 34359738360 == (2^32 - 1) * 8. netdev_set_qos() doesn't support
> + * 64-bit rate netlink attributes, so the maximum value is 2^32 - 1
> + * bytes. The 'max-rate' config option is in bits, so multiplying by 8.
> + * Without setting max-rate the reported link speed will be used, which
> + * can be unrecognized for certain NICs or reported too low for virtual
> + * interfaces. */
> +#define OVN_QOS_MAX_RATE 34359738360
> static void
> -set_qos_type(struct netdev *netdev, const char *type)
> +add_ovs_qos_table_entry(struct ovsdb_idl_txn *ovs_idl_txn,
> + const struct ovsrec_port_table *port_table,
> + struct shash *bridge_mappings,
> + struct qos_queue *q)
> {
> - /* 34359738360 == (2^32 - 1) * 8. netdev_set_qos() doesn't support
> - * 64-bit rate netlink attributes, so the maximum value is 2^32 - 1
> bytes.
> - * The 'max-rate' config option is in bits, so multiplying by 8.
> - * Without setting max-rate the reported link speed will be used, which
> - * can be unrecognized for certain NICs or reported too low for virtual
> - * interfaces. */
> - const struct smap conf = SMAP_CONST1(&conf, "max-rate", "34359738360");
> - int error = netdev_set_qos(netdev, type, &conf);
> - if (error) {
> - static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1);
> - VLOG_WARN_RL(&rl, "%s: could not set qdisc type \"%s\" (%s)",
> - netdev_get_name(netdev), type, ovs_strerror(error));
> + const char *port_name =
> + get_qos_egress_interface_name(bridge_mappings, q->network);
> + if (!port_name) {
> + return;
> }
> -}
>
> -static void
> -setup_qos(const char *egress_iface, struct hmap *queue_map)
> -{
> - static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 5);
> - struct netdev *netdev_phy;
> + const struct ovsrec_port *port = NULL, *iter;
> + OVSREC_PORT_TABLE_FOR_EACH (iter, port_table) {
> + if (!strcmp(iter->name, port_name)) {
> + port = iter;
> + break;
> + }
> + }
I think you can make get_qos_egress_interface_name return a port
record instead of its name and then you won't need to iterate over the
table again.
>
> - if (!egress_iface) {
> - /* Queues cannot be configured. */
> + if (!port) {
> return;
> }
>
> - int error = netdev_open(egress_iface, NULL, &netdev_phy);
> - if (error) {
> - VLOG_WARN_RL(&rl, "%s: could not open netdev (%s)",
> - egress_iface, ovs_strerror(error));
> - return;
> + struct smap other_config = SMAP_INITIALIZER(&other_config);
> + const struct ovsrec_qos *qos = port->qos;
> + if (!qos) {
> + qos = ovsrec_qos_insert(ovs_idl_txn);
> + ovsrec_qos_set_type(qos, OVN_QOS_TYPE);
> + ovsrec_port_set_qos(port, qos);
> + smap_add_format(&other_config, "max-rate", "%ld", OVN_QOS_MAX_RATE);
> + ovsrec_qos_set_other_config(qos, &other_config);
> + smap_clear(&other_config);
> }
>
> - /* Check current qdisc. */
> - const char *qdisc_type;
> - struct smap qdisc_details;
> + struct ovsrec_queue *queue;
> + size_t i;
> + for (i = 0; i < qos->n_queues; i++) {
> + queue = qos->value_queues[i];
> + if (qos->key_queues[i] != q->queue_id) {
> + continue;
> + }
> +
> + uint32_t max_rate = smap_get_int(&queue->other_config, "max-rate",
> 0);
> + uint32_t min_rate = smap_get_int(&queue->other_config, "min-rate",
> 0);
> + uint32_t burst = smap_get_int(&queue->other_config, "burst", 0);
>
> - smap_init(&qdisc_details);
> - if (netdev_get_qos(netdev_phy, &qdisc_type, &qdisc_details) != 0 ||
> - qdisc_type[0] == '\0') {
> - smap_destroy(&qdisc_details);
> - netdev_close(netdev_phy);
> - /* Qos is not supported. */
> + if (max_rate != q->max_rate || min_rate != q->min_rate ||
> + burst != q->burst) {
> + break;
> + }
> + /* queue already configured. */
> return;
> }
> - smap_destroy(&qdisc_details);
>
> - /* If we're not actually being requested to do any QoS:
> - *
> - * - If the current qdisc type is OVN_QOS_TYPE, then we clear the
> qdisc
> - * type to "". Otherwise, it's possible that our own leftover
> qdisc
> - * settings could cause strange behavior on egress. Also, QoS is
> - * expensive and may waste CPU time even if it's not really in use.
> - *
> - * OVN isn't the only software that can configure qdiscs, and
> - * physical interfaces are shared resources, so there is some risk
> in
> - * this strategy: we could disrupt some other program's QoS.
> - * Probably, to entirely avoid this possibility we would need to
> add
> - * a configuration setting.
> - *
> - * - Otherwise leave the qdisc alone. */
> - if (hmap_is_empty(queue_map)) {
> - if (!strcmp(qdisc_type, OVN_QOS_TYPE)) {
> - set_qos_type(netdev_phy, "");
> - }
> - netdev_close(netdev_phy);
> - return;
> + if (i == qos->n_queues) {
> + queue = ovsrec_queue_insert(ovs_idl_txn);
> + ovsrec_qos_update_queues_setkey(qos, q->queue_id, queue);
> }
>
> - /* Configure qdisc. */
> - if (strcmp(qdisc_type, OVN_QOS_TYPE)) {
> - set_qos_type(netdev_phy, OVN_QOS_TYPE);
> - }
> -
> - /* Check and delete if needed. */
> - struct netdev_queue_dump dump;
> - unsigned int queue_id;
> - struct smap queue_details;
> - struct qos_queue *sb_info;
> - struct hmap consistent_queues;
> -
> - smap_init(&queue_details);
> - hmap_init(&consistent_queues);
> - NETDEV_QUEUE_FOR_EACH (&queue_id, &queue_details, &dump, netdev_phy) {
> - bool is_queue_needed = false;
> -
> - HMAP_FOR_EACH_WITH_HASH (sb_info, node, hash_int(queue_id, 0),
> - queue_map) {
> - is_queue_needed = true;
> - if (sb_info->min_rate ==
> - smap_get_int(&queue_details, "min-rate", 0)
> - && sb_info->max_rate ==
> - smap_get_int(&queue_details, "max-rate", 0)
> - && sb_info->burst ==
> - smap_get_int(&queue_details, "burst", 0)) {
> - /* This queue is consistent. */
> - hmap_insert(&consistent_queues, &sb_info->node,
> - hash_int(queue_id, 0));
> + smap_add_format(&other_config, "max-rate", "%d", q->max_rate);
> + smap_add_format(&other_config, "min-rate", "%d", q->min_rate);
> + smap_add_format(&other_config, "burst", "%d", q->burst);
> + ovsrec_queue_verify_other_config(queue);
> + ovsrec_queue_set_other_config(queue, &other_config);
> + smap_destroy(&other_config);
> +}
> +
> +static void
> +remove_stale_ovs_qos_entry(const struct ovsrec_port_table *port_table,
remove_stale_ovs_qos_entrIES?
> + const struct ovsrec_qos_table *qos_table,
> + const struct sbrec_port_binding_table *pb_table,
> + struct shash *network_mappings,
> + struct smap *egress_ifaces)
> +{
> + const struct ovsrec_qos *qos, *qos_next;
> + OVSREC_QOS_TABLE_FOR_EACH_SAFE (qos, qos_next, qos_table) {
> + const struct ovsrec_port *port = NULL, *iter;
> + OVSREC_PORT_TABLE_FOR_EACH (iter, port_table) {
> + if (iter->qos == qos) {
> + port = iter;
> break;
> }
> }
> + if (!port) {
> + continue;
> + }
> +
> + int n_queue_deleted = 0, n_queues = qos->n_queues;
> + for (size_t i = 0; i < n_queues; i++) {
> + struct ovsrec_queue *queue = qos->value_queues[i];
> + if (!queue) {
> + continue;
> + }
> +
> + bool stale = true;
> + const struct sbrec_port_binding *pb;
> + SBREC_PORT_BINDING_TABLE_FOR_EACH (pb, pb_table) {
> + const char *network = smap_get(
> + egress_ifaces, pb->logical_port);
> + if (!network) {
> + continue;
> + }
> +
> + uint32_t queue_id = smap_get_int(&pb->options,
> + "qdisc_queue_id", 0);
nit: extract queue_id after potentially "continuing" on wrong name not matching.
> + const char *port_name = shash_find_data(
> + network_mappings, network);
> + if (!port_name) {
> + continue;
> + }
> +
> + if (!strcmp(port->name, port_name) &&
> + queue_id == qos->key_queues[i]) {
> + stale = false;
> + break;
> + }
> + }
> +
> + if (stale) {
> + ovsrec_qos_update_queues_delkey(qos, qos->key_queues[i]);
> + ovsrec_queue_delete(queue);
> + n_queue_deleted++;
> + }
> + }
>
> - if (!is_queue_needed) {
> - error = netdev_delete_queue(netdev_phy, queue_id);
> - if (error) {
> - VLOG_WARN_RL(&rl, "%s: could not delete queue %u (%s)",
> - egress_iface, queue_id, ovs_strerror(error));
> + if (n_queue_deleted && n_queue_deleted == n_queues) {
> + if (port) {
> + ovsrec_port_set_qos(port, NULL);
> }
> + ovsrec_qos_delete(qos);
> }
> }
> +}
>
> - /* Create/Update queues. */
> - HMAP_FOR_EACH (sb_info, node, queue_map) {
> - if (hmap_contains(&consistent_queues, &sb_info->node)) {
> - hmap_remove(&consistent_queues, &sb_info->node);
> +static void
> +configure_ovs_qos(struct hmap *queue_map,
> + struct ovsdb_idl_txn *ovs_idl_txn,
> + const struct ovsrec_port_table *port_table,
> + const struct ovsrec_qos_table *qos_table,
> + const struct sbrec_port_binding_table *pb_table,
> + struct shash *bridge_mappings,
> + struct smap *egress_ifaces)
> +
> +{
> + if (!ovs_idl_txn) {
> + return;
> + }
> +
> + if (smap_is_empty(egress_ifaces)) {
> + return;
> + }
> +
> + struct shash pb_network_mappings =
> SHASH_INITIALIZER(&pb_network_mappings);
> + const struct sbrec_port_binding *pb;
> + SBREC_PORT_BINDING_TABLE_FOR_EACH (pb, pb_table) {
> + const char *network = smap_get(egress_ifaces, pb->logical_port);
> + if (!network) {
> continue;
> }
>
> - smap_clear(&queue_details);
> - smap_add_format(&queue_details, "min-rate", "%d", sb_info->min_rate);
> - smap_add_format(&queue_details, "max-rate", "%d", sb_info->max_rate);
> - smap_add_format(&queue_details, "burst", "%d", sb_info->burst);
> - error = netdev_set_queue(netdev_phy, sb_info->queue_id,
> - &queue_details);
> - if (error) {
> - VLOG_WARN_RL(&rl, "%s: could not configure queue %u (%s)",
> - egress_iface, sb_info->queue_id,
> ovs_strerror(error));
> + const char *port_name =
> + get_qos_egress_interface_name(bridge_mappings, network);
> + if (port_name) {
> + shash_add(&pb_network_mappings, network, port_name);
> }
> }
> - smap_destroy(&queue_details);
> - hmap_destroy(&consistent_queues);
> - netdev_close(netdev_phy);
> +
> + /* Remove stale QoS entries. */
> + remove_stale_ovs_qos_entry(port_table, qos_table, pb_table,
> + &pb_network_mappings, egress_ifaces);
> +
> + struct qos_queue *q;
> + HMAP_FOR_EACH (q, node, queue_map) {
> + /* Add new QoS entries. */
> + add_ovs_qos_table_entry(ovs_idl_txn, port_table,
> + bridge_mappings, q);
> + }
> +
> + shash_destroy(&pb_network_mappings);
> }
>
> static void
> @@ -362,6 +398,7 @@ destroy_qos_map(struct hmap *qos_map)
> {
> struct qos_queue *qos_queue;
> HMAP_FOR_EACH_POP (qos_queue, node, qos_map) {
> + free(qos_queue->network);
> free(qos_queue);
> }
>
> @@ -433,7 +470,7 @@ add_localnet_egress_interface_mappings(
> continue;
> }
> smap_replace(egress_ifaces, port_binding->logical_port,
> - iface_rec->name);
> + network);
> }
> }
> }
> @@ -1460,7 +1497,7 @@ consider_vif_lport_(const struct sbrec_port_binding *pb,
> b_ctx_out->tracked_dp_bindings);
> }
> if (b_lport->lbinding->iface && qos_map &&
> b_ctx_in->ovs_idl_txn) {
> - get_qos_params(pb, qos_map);
> + get_qos_params(pb, qos_map, b_ctx_out->egress_ifaces);
> }
> } else {
> /* We could, but can't claim the lport. */
> @@ -1789,7 +1826,7 @@ consider_localnet_lport(const struct sbrec_port_binding
> *pb,
> update_local_lports(pb->logical_port, b_ctx_out);
>
> if (qos_map && b_ctx_in->ovs_idl_txn) {
> - get_qos_params(pb, qos_map);
> + get_qos_params(pb, qos_map, b_ctx_out->egress_ifaces);
> }
>
> update_related_lport(pb, b_ctx_out);
> @@ -1912,9 +1949,6 @@ binding_run(struct binding_ctx_in *b_ctx_in, struct
> binding_ctx_out *b_ctx_out)
> build_local_bindings(b_ctx_in, b_ctx_out);
> }
>
> - struct hmap *qos_map_ptr =
> - smap_is_empty(b_ctx_out->egress_ifaces) ? NULL : &qos_map;
> -
> struct ovs_list localnet_lports = OVS_LIST_INITIALIZER(&localnet_lports);
> struct ovs_list external_lports = OVS_LIST_INITIALIZER(&external_lports);
> struct ovs_list multichassis_ports = OVS_LIST_INITIALIZER(
> @@ -1951,7 +1985,7 @@ binding_run(struct binding_ctx_in *b_ctx_in, struct
> binding_ctx_out *b_ctx_out)
> break;
>
> case LP_VIF:
> - consider_vif_lport(pb, b_ctx_in, b_ctx_out, NULL, qos_map_ptr);
> + consider_vif_lport(pb, b_ctx_in, b_ctx_out, NULL, &qos_map);
> if (pb->additional_chassis) {
> struct lport *multichassis_lport = xmalloc(
> sizeof *multichassis_lport);
> @@ -1962,11 +1996,11 @@ binding_run(struct binding_ctx_in *b_ctx_in, struct
> binding_ctx_out *b_ctx_out)
> break;
>
> case LP_CONTAINER:
> - consider_container_lport(pb, b_ctx_in, b_ctx_out, qos_map_ptr);
> + consider_container_lport(pb, b_ctx_in, b_ctx_out, &qos_map);
> break;
>
> case LP_VIRTUAL:
> - consider_virtual_lport(pb, b_ctx_in, b_ctx_out, qos_map_ptr);
> + consider_virtual_lport(pb, b_ctx_in, b_ctx_out, &qos_map);
> break;
>
> case LP_L2GATEWAY:
> @@ -2044,18 +2078,14 @@ binding_run(struct binding_ctx_in *b_ctx_in, struct
> binding_ctx_out *b_ctx_out)
> free(multichassis_lport);
> }
>
> - shash_destroy(&bridge_mappings);
> -
> - if (!smap_is_empty(b_ctx_out->egress_ifaces)
> - && set_noop_qos(b_ctx_in->ovs_idl_txn, b_ctx_in->port_table,
> - b_ctx_in->qos_table, b_ctx_out->egress_ifaces)) {
> - const struct smap_node *entry;
> - SMAP_FOR_EACH (entry, b_ctx_out->egress_ifaces) {
> - setup_qos(entry->value, &qos_map);
> - }
> - }
> + configure_ovs_qos(&qos_map, b_ctx_in->ovs_idl_txn,
> + b_ctx_in->port_table, b_ctx_in->qos_table,
> + b_ctx_in->port_binding_table,
> + &bridge_mappings,
> + b_ctx_out->egress_ifaces);
>
> destroy_qos_map(&qos_map);
> + shash_destroy(&bridge_mappings);
>
> cleanup_claimed_port_timestamps();
> }
> @@ -2441,18 +2471,6 @@ binding_handle_ovs_interface_changes(struct
> binding_ctx_in *b_ctx_in,
> break;
> }
>
> - if (smap_get(&iface_rec->external_ids, "ovn-egress-iface")) {
> - handled = false;
> - break;
> - }
> -
> - struct smap_node *node;
> - SMAP_FOR_EACH (node, b_ctx_out->egress_ifaces) {
> - if (node->value && !strcmp(node->value, iface_rec->name)) {
> - return false;
> - }
> - }
> -
> const char *iface_id = smap_get(&iface_rec->external_ids,
> "iface-id");
> const char *old_iface_id = smap_get(b_ctx_out->local_iface_ids,
> iface_rec->name);
> @@ -2495,8 +2513,6 @@ binding_handle_ovs_interface_changes(struct
> binding_ctx_in *b_ctx_in,
> }
>
> struct hmap qos_map = HMAP_INITIALIZER(&qos_map);
> - struct hmap *qos_map_ptr =
> - smap_is_empty(b_ctx_out->egress_ifaces) ? NULL : &qos_map;
>
> /*
> * We consider an OVS interface for claiming if the following
> @@ -2526,21 +2542,23 @@ binding_handle_ovs_interface_changes(struct
> binding_ctx_in *b_ctx_in,
> if (iface_id && ofport > 0 &&
> is_iface_in_int_bridge(iface_rec, b_ctx_in->br_int)) {
> handled = consider_iface_claim(iface_rec, iface_id, b_ctx_in,
> - b_ctx_out, qos_map_ptr);
> + b_ctx_out, &qos_map);
> if (!handled) {
> break;
> }
> }
> }
>
> - if (handled && qos_map_ptr && set_noop_qos(b_ctx_in->ovs_idl_txn,
> - b_ctx_in->port_table,
> - b_ctx_in->qos_table,
> - b_ctx_out->egress_ifaces)) {
> - const struct smap_node *entry;
> - SMAP_FOR_EACH (entry, b_ctx_out->egress_ifaces) {
> - setup_qos(entry->value, &qos_map);
> - }
> + if (handled) {
> + struct shash bridge_mappings = SHASH_INITIALIZER(&bridge_mappings);
> + add_ovs_bridge_mappings(b_ctx_in->ovs_table, b_ctx_in->bridge_table,
> + &bridge_mappings);
> + configure_ovs_qos(&qos_map, b_ctx_in->ovs_idl_txn,
> + b_ctx_in->port_table, b_ctx_in->qos_table,
> + b_ctx_in->port_binding_table,
> + &bridge_mappings,
> + b_ctx_out->egress_ifaces);
> + shash_destroy(&bridge_mappings);
> }
>
> destroy_qos_map(&qos_map);
> @@ -2979,8 +2997,6 @@ delete_done:
> }
>
> struct hmap qos_map = HMAP_INITIALIZER(&qos_map);
> - struct hmap *qos_map_ptr =
> - smap_is_empty(b_ctx_out->egress_ifaces) ? NULL : &qos_map;
>
> SBREC_PORT_BINDING_TABLE_FOR_EACH_TRACKED (pb,
> b_ctx_in->port_binding_table)
> {
> @@ -2993,7 +3009,7 @@ delete_done:
> update_ld_peers(pb, b_ctx_out->local_datapaths);
> }
>
> - handled = handle_updated_port(b_ctx_in, b_ctx_out, pb, qos_map_ptr);
> + handled = handle_updated_port(b_ctx_in, b_ctx_out, pb, &qos_map);
> if (!handled) {
> break;
> }
> @@ -3010,7 +3026,7 @@ delete_done:
> sset_find_and_delete(b_ctx_out->postponed_ports, port_name);
> continue;
> }
> - handled = handle_updated_port(b_ctx_in, b_ctx_out, pb, qos_map_ptr);
> + handled = handle_updated_port(b_ctx_in, b_ctx_out, pb, &qos_map);
> if (!handled) {
> break;
> }
> @@ -3053,17 +3069,14 @@ delete_done:
> }
> sbrec_port_binding_index_destroy_row(target);
> }
> - shash_destroy(&bridge_mappings);
> - }
>
> - if (handled && qos_map_ptr && set_noop_qos(b_ctx_in->ovs_idl_txn,
> - b_ctx_in->port_table,
> - b_ctx_in->qos_table,
> - b_ctx_out->egress_ifaces)) {
> - const struct smap_node *entry;
> - SMAP_FOR_EACH (entry, b_ctx_out->egress_ifaces) {
> - setup_qos(entry->value, &qos_map);
> - }
> + configure_ovs_qos(&qos_map, b_ctx_in->ovs_idl_txn,
> + b_ctx_in->port_table, b_ctx_in->qos_table,
> + b_ctx_in->port_binding_table,
> + &bridge_mappings,
> + b_ctx_out->egress_ifaces);
> +
> + shash_destroy(&bridge_mappings);
> }
>
> destroy_qos_map(&qos_map);
> diff --git a/controller/ovn-controller.c b/controller/ovn-controller.c
> index d172a1f4f..4bc1baf93 100644
> --- a/controller/ovn-controller.c
> +++ b/controller/ovn-controller.c
> @@ -1060,6 +1060,11 @@ ctrl_register_ovs_idl(struct ovsdb_idl *ovs_idl)
> ovsdb_idl_add_table(ovs_idl, &ovsrec_table_datapath);
> ovsdb_idl_add_column(ovs_idl, &ovsrec_datapath_col_capabilities);
> ovsdb_idl_add_table(ovs_idl, &ovsrec_table_flow_sample_collector_set);
> + ovsdb_idl_add_table(ovs_idl, &ovsrec_table_qos);
> + ovsdb_idl_add_column(ovs_idl, &ovsrec_qos_col_other_config);
> + ovsdb_idl_add_column(ovs_idl, &ovsrec_qos_col_queues);
> + ovsdb_idl_add_table(ovs_idl, &ovsrec_table_queue);
> + ovsdb_idl_add_column(ovs_idl, &ovsrec_queue_col_other_config);
>
> chassis_register_ovs_idl(ovs_idl);
> encaps_register_ovs_idl(ovs_idl);
> @@ -1074,6 +1079,7 @@ ctrl_register_ovs_idl(struct ovsdb_idl *ovs_idl)
> ovsdb_idl_track_add_column(ovs_idl, &ovsrec_interface_col_type);
> ovsdb_idl_track_add_column(ovs_idl, &ovsrec_interface_col_options);
> ovsdb_idl_track_add_column(ovs_idl, &ovsrec_interface_col_ofport);
> + ovsdb_idl_track_add_column(ovs_idl, &ovsrec_interface_col_external_ids);
> ovsdb_idl_track_add_column(ovs_idl, &ovsrec_port_col_name);
> ovsdb_idl_track_add_column(ovs_idl, &ovsrec_port_col_interfaces);
> ovsdb_idl_track_add_column(ovs_idl, &ovsrec_port_col_external_ids);
> diff --git a/tests/system-ovn.at b/tests/system-ovn.at
> index 96d1c295e..3e84b6dcb 100644
> --- a/tests/system-ovn.at
> +++ b/tests/system-ovn.at
> @@ -6533,8 +6533,10 @@ ovn_start
> OVS_TRAFFIC_VSWITCHD_START()
>
> ADD_BR([br-int])
> +ADD_BR([br-public])
> ADD_BR([br-ext])
>
> +ovs-ofctl add-flow br-public action=normal
> ovs-ofctl add-flow br-ext action=normal
> # Set external-ids in br-int needed for ovn-controller
> ovs-vsctl \
> @@ -6554,32 +6556,87 @@ ADD_VETH(sw01, sw01, br-int, "192.168.1.2/24",
> "f0:00:00:01:02:03")
> ovn-nbctl lsp-add sw0 sw01 \
> -- lsp-set-addresses sw01 "f0:00:00:01:02:03 192.168.1.2"
>
> +ovn-nbctl ls-add sw1
> +
> +ADD_NAMESPACES(sw11)
> +ADD_VETH(sw11, sw11, br-int, "192.168.4.2/24", "f0:00:00:01:04:03")
> +ovn-nbctl lsp-add sw1 sw11 \
> + -- lsp-set-addresses sw11 "f0:00:00:01:04:03 192.168.4.2"
> +
> ADD_NAMESPACES(public)
> -ADD_VETH(public, public, br-ext, "192.168.2.2/24", "f0:00:00:01:02:05")
> +ADD_VETH(public, public, br-public, "192.168.2.2/24", "f0:00:00:01:02:05")
> +AT_CHECK([ovs-vsctl remove interface ovs-public external-ids
> iface-id=public])
>
> -AT_CHECK([ovs-vsctl set Open_vSwitch .
> external-ids:ovn-bridge-mappings=phynet:br-ext])
> +ADD_NAMESPACES(ext)
> +ADD_VETH(ext, ext, br-ext, "192.168.3.2/24", "f0:00:00:01:02:06")
> +AT_CHECK([ovs-vsctl remove interface ovs-ext external-ids iface-id=ext])
> +
> +AT_CHECK([ovs-vsctl set Open_vSwitch .
> external-ids:ovn-bridge-mappings=public:br-public,ext:br-ext])
> ovn-nbctl lsp-add sw0 public \
> -- lsp-set-addresses public unknown \
> -- lsp-set-type public localnet \
> - -- lsp-set-options public network_name=phynet
> + -- lsp-set-options public network_name=public
> +
> +ovn-nbctl lsp-add sw1 ext \
> + -- lsp-set-addresses ext unknown \
> + -- lsp-set-type ext localnet \
> + -- lsp-set-options ext network_name=ext
>
> AT_CHECK([ovn-nbctl set Logical_Switch_Port public
> options:qos_min_rate=200000])
> AT_CHECK([ovn-nbctl set Logical_Switch_Port public
> options:qos_max_rate=300000])
> AT_CHECK([ovn-nbctl set Logical_Switch_Port public
> options:qos_burst=3000000])
> AT_CHECK([ovs-vsctl set interface ovs-public
> external-ids:ovn-egress-iface=true])
> +
> +AT_CHECK([ovn-nbctl set Logical_Switch_Port ext options:qos_min_rate=400000])
> +AT_CHECK([ovn-nbctl set Logical_Switch_Port ext options:qos_max_rate=600000])
> +AT_CHECK([ovn-nbctl set Logical_Switch_Port ext options:qos_burst=6000000])
> +AT_CHECK([ovs-vsctl set interface ovs-ext
> external-ids:ovn-egress-iface=true])
> +
> OVS_WAIT_UNTIL([tc qdisc show | grep -q 'htb 1: dev ovs-public'])
> OVS_WAIT_UNTIL([tc class show dev ovs-public | \
> grep -q 'class htb .* rate 200Kbit ceil 300Kbit burst
> 375000b cburst 375000b'])
>
> +OVS_WAIT_UNTIL([tc qdisc show | grep -q 'htb 1: dev ovs-ext'])
> +OVS_WAIT_UNTIL([tc class show dev ovs-ext | \
> + grep -q 'class htb .* rate 400Kbit ceil 600Kbit burst
> 750000b cburst 750000b'])
>
> +AT_CHECK([ovn-nbctl remove Logical_Switch_Port public options
> qos_min_rate=200000])
> AT_CHECK([ovn-nbctl remove Logical_Switch_Port public options
> qos_max_rate=300000])
> +
> OVS_WAIT_UNTIL([tc class show dev ovs-public | \
> - grep -q 'class htb .* rate 200Kbit ceil 34359Mbit burst
> 375000b .*'])
> + grep -q 'class htb .* rate 12Kbit ceil 34359Mbit burst
> 375000b cburst 373662b'])
>
> -AT_CHECK([ovn-nbctl remove Logical_Switch_Port public options
> qos_min_rate=200000])
> AT_CHECK([ovn-nbctl remove Logical_Switch_Port public options
> qos_burst=3000000])
> OVS_WAIT_UNTIL([test "$(tc qdisc show | grep 'htb 1: dev ovs-public')" = ""])
>
> +AT_CHECK([ovn-nbctl set Logical_Switch_Port ext options:qos_max_rate=800000])
> +OVS_WAIT_UNTIL([tc class show dev ovs-ext | \
> + grep -q 'class htb .* rate 400Kbit ceil 800Kbit burst
> 750000b cburst 750000b'])
> +
> +AT_CHECK([ovn-nbctl set Logical_Switch_Port public
> options:qos_min_rate=400000])
> +AT_CHECK([ovn-nbctl set Logical_Switch_Port public
> options:qos_max_rate=800000])
> +AT_CHECK([ovn-nbctl set Logical_Switch_Port public
> options:qos_burst=6000000])
> +
> +OVS_WAIT_UNTIL([tc qdisc show | grep -q 'htb 1: dev ovs-public'])
> +OVS_WAIT_UNTIL([tc class show dev ovs-public | \
> + grep -q 'class htb .* rate 400Kbit ceil 800Kbit burst
> 750000b cburst 750000b'])
> +
> +AT_CHECK([ovn-nbctl remove Logical_Switch_Port ext options
> qos_min_rate=400000])
> +AT_CHECK([ovn-nbctl remove Logical_Switch_Port ext options
> qos_max_rate=800000])
> +AT_CHECK([ovn-nbctl remove Logical_Switch_Port ext options
> qos_burst=6000000])
> +
> +OVS_WAIT_UNTIL([test "$(tc qdisc show | grep 'htb 1: dev ovs-ext')" = ""])
> +
> +OVS_WAIT_UNTIL([tc qdisc show | grep -q 'htb 1: dev ovs-public'])
> +OVS_WAIT_UNTIL([tc class show dev ovs-public | \
> + grep -q 'class htb .* rate 400Kbit ceil 800Kbit burst
> 750000b cburst 750000b'])
> +
> +AT_CHECK([ovn-nbctl remove Logical_Switch_Port public options
> qos_min_rate=400000])
> +AT_CHECK([ovn-nbctl remove Logical_Switch_Port public options
> qos_max_rate=800000])
> +AT_CHECK([ovn-nbctl remove Logical_Switch_Port public options
> qos_burst=6000000])
> +
> +OVS_WAIT_UNTIL([test "$(tc qdisc show | grep 'htb 1: dev ovs-public')" = ""])
> +
> kill $(pidof ovn-controller)
>
> as ovn-sb
> --
> 2.39.2
>
_______________________________________________
dev mailing list
[email protected]
https://mail.openvswitch.org/mailman/listinfo/ovs-dev