Add an option to LS that is inline with existing LR option called
"dynamic-routing-redistribute". That option currently accepts only
"fdb" which will lead to advertisment of workloads connected to
the specified switch. This doesn't work without the switch having
"dynamic-routing-vni" option set.

Currently ovn-controller will advertise VIFs, virtual ports,
container ports, DGPs and GW router ports bound to that chassis.
In other words only local workloads are advertised to avoid
the potential confusion of defining the same MAC multiple times
as the same EVPN might be connected to more than one chassis.

Reported-at: https://issues.redhat.com/browse/FDP-1389
Signed-off-by: Ales Musil <amu...@redhat.com>
---
v2: Rebase on top of latest main.
    Add TODO item for dynamic FDB.
    Add TODO for the L3 EVPN.
    Don't add the datapath external_id if the vni isn't specified.
    Address nits from Dumitru.
---
 NEWS                        |   2 +
 TODO.rst                    |   6 ++
 controller/neighbor.c       | 116 ++++++++++++++++++++++++++++++++++++
 controller/neighbor.h       |  13 ++++
 controller/ovn-controller.c |  92 +++++++++++++++++++++++++++-
 northd/northd.c             |  33 +++++++---
 northd/northd.h             |   3 +-
 ovn-nb.xml                  |  20 +++++++
 tests/system-ovn.at         |  18 ++++++
 9 files changed, 292 insertions(+), 11 deletions(-)

diff --git a/NEWS b/NEWS
index 047bdff94..44d6d9636 100644
--- a/NEWS
+++ b/NEWS
@@ -51,6 +51,8 @@ Post v25.03.0
        for the EVPN traffic egressing through the tunnel.
      * Add the "other_config:dynamic-routing-vni" to Logical Switches. If set
        to valid integer the LS is considered to be connected to EVPN L2 domain.
+     * Add the "other_config:dynamic-routing-redistribute" to Logical
+       Switches. The only accepted value at the moment is "fdb".
 
 OVN v25.03.0 - 07 Mar 2025
 --------------------------
diff --git a/TODO.rst b/TODO.rst
index 0d3c29506..b8b3c98cf 100644
--- a/TODO.rst
+++ b/TODO.rst
@@ -160,3 +160,9 @@ OVN To-do List
 
   * Allow CMS to set FDB priority for EVPN, currently the remote FDB has
    higher priority.
+
+  * Learn FDBs dynamically from the incoming traffic for EVPN. The same way
+    we do for other traffic passing through LS.
+
+  * Add support for EVPN L3, that involves MAC Binding learning and
+    advertisement.
diff --git a/controller/neighbor.c b/controller/neighbor.c
index 81f36af57..0f19d8eff 100644
--- a/controller/neighbor.c
+++ b/controller/neighbor.c
@@ -19,6 +19,7 @@
 #include "lib/packets.h"
 #include "lib/sset.h"
 #include "local_data.h"
+#include "lport.h"
 #include "ovn-sb-idl.h"
 
 #include "neighbor.h"
@@ -38,6 +39,14 @@ static struct neighbor_interface_monitor *
 neighbor_interface_monitor_alloc(enum neighbor_family family,
                                  enum neighbor_interface_type type,
                                  uint32_t vni);
+static void neighbor_collect_mac_to_advertise(
+    const struct neighbor_ctx_in *, struct hmap *neighbors,
+    struct sset *advertised_pbs, const struct sbrec_datapath_binding *);
+static const struct sbrec_port_binding *neighbor_get_relevant_port_binding(
+    struct ovsdb_idl_index *sbrec_pb_by_name,
+    const struct sbrec_port_binding *);
+static void advertise_neigh_add(struct hmap *neighbors, struct eth_addr mac,
+                                struct in6_addr ip);
 
 static uint32_t
 advertise_neigh_hash(const struct eth_addr *eth, const struct in6_addr *ip)
@@ -102,6 +111,15 @@ neighbor_run(struct neighbor_ctx_in *n_ctx_in,
             neighbor_interface_monitor_alloc(NEIGH_AF_INET6,
                                              NEIGH_IFACE_BRIDGE, vni);
         vector_push(n_ctx_out->monitored_interfaces, &br_v6);
+
+        if (!smap_get_bool(&ld->datapath->external_ids,
+                           "dynamic-routing-advertise-fdb", false)) {
+            continue;
+        }
+
+        neighbor_collect_mac_to_advertise(n_ctx_in, &lo->announced_neighbors,
+                                          n_ctx_out->advertised_pbs,
+                                          ld->datapath);
     }
 }
 
@@ -115,6 +133,26 @@ neighbor_cleanup(struct vector *monitored_interfaces)
     vector_clear(monitored_interfaces);
 }
 
+bool
+neighbor_is_relevant_port_updated(struct ovsdb_idl_index *sbrec_pb_by_name,
+                                  const struct sbrec_chassis *chassis,
+                                  struct sset *advertised_pbs,
+                                  const struct tracked_lport *lport)
+{
+    if (lport->tracked_type == TRACKED_RESOURCE_REMOVED &&
+        sset_contains(advertised_pbs, lport->pb->logical_port)) {
+        return true;
+    }
+
+    if (lport->tracked_type != TRACKED_RESOURCE_NEW) {
+        return false;
+    }
+
+    const struct sbrec_port_binding *pb =
+        neighbor_get_relevant_port_binding(sbrec_pb_by_name, lport->pb);
+    return pb && lport_pb_is_chassis_resident(chassis, pb);
+}
+
 static void
 neighbor_interface_monitor_destroy(struct neighbor_interface_monitor *nim)
 {
@@ -157,3 +195,81 @@ neighbor_interface_monitor_alloc(enum neighbor_family 
family,
              neighbor_interface_prefixes[type], vni);
     return nim;
 }
+
+static void
+neighbor_collect_mac_to_advertise(const struct neighbor_ctx_in *n_ctx_in,
+                                  struct hmap *neighbors,
+                                  struct sset *advertised_pbs,
+                                  const struct sbrec_datapath_binding *dp)
+{
+    struct sbrec_port_binding *target =
+        sbrec_port_binding_index_init_row(n_ctx_in->sbrec_pb_by_dp);
+    sbrec_port_binding_index_set_datapath(target, dp);
+
+    const struct sbrec_port_binding *dp_pb;
+    SBREC_PORT_BINDING_FOR_EACH_EQUAL (dp_pb, target,
+                                       n_ctx_in->sbrec_pb_by_dp) {
+        const struct sbrec_port_binding *pb =
+            neighbor_get_relevant_port_binding(n_ctx_in->sbrec_pb_by_name,
+                                               dp_pb);
+        if (!(pb && lport_pb_is_chassis_resident(n_ctx_in->chassis, pb))) {
+            continue;
+        }
+
+        for (size_t i = 0; i < pb->n_mac; i++) {
+            struct lport_addresses addresses;
+            if (!extract_lsp_addresses(pb->mac[i], &addresses)) {
+                continue;
+            }
+
+            if (!advertise_neigh_find(neighbors, addresses.ea, &in6addr_any)) {
+                advertise_neigh_add(neighbors, addresses.ea, in6addr_any);
+            }
+
+            destroy_lport_addresses(&addresses);
+        }
+
+        sset_add(advertised_pbs, pb->logical_port);
+    }
+
+    sbrec_port_binding_index_destroy_row(target);
+}
+
+static const struct sbrec_port_binding *
+neighbor_get_relevant_port_binding(struct ovsdb_idl_index *sbrec_pb_by_name,
+                                   const struct sbrec_port_binding *pb)
+{
+    enum en_lport_type type = get_lport_type(pb);
+    if (type == LP_VIF || type == LP_VIRTUAL || type == LP_CONTAINER) {
+        return pb;
+    }
+
+    if (type == LP_L3GATEWAY) {
+        return lport_get_peer(pb, sbrec_pb_by_name);
+    }
+
+    if (type == LP_PATCH) {
+        const struct sbrec_port_binding *peer =
+            lport_get_peer(pb, sbrec_pb_by_name);
+        if (!peer) {
+            return NULL;
+        }
+
+        return lport_get_cr_port(sbrec_pb_by_name, peer, NULL);
+    }
+
+    return NULL;
+}
+
+static void
+advertise_neigh_add(struct hmap *neighbors, struct eth_addr mac,
+                    struct in6_addr ip)
+{
+    struct advertise_neighbor_entry *ne = xmalloc(sizeof *ne);
+    *ne = (struct advertise_neighbor_entry) {
+        .lladdr = mac,
+        .addr = ip,
+    };
+
+    hmap_insert(neighbors, &ne->node, advertise_neigh_hash(&mac, &ip));
+}
diff --git a/controller/neighbor.h b/controller/neighbor.h
index 9be7e679e..d9ce9f565 100644
--- a/controller/neighbor.h
+++ b/controller/neighbor.h
@@ -33,6 +33,8 @@
 #endif
 #endif /* __APPLE__ */
 
+struct tracked_lport;
+
 enum neighbor_family {
     NEIGH_AF_INET = AF_INET,
     NEIGH_AF_INET6 = AF_INET6,
@@ -42,11 +44,18 @@ enum neighbor_family {
 struct neighbor_ctx_in {
     /* Contains 'struct local_datapath'. */
     const struct hmap *local_datapaths;
+    /* Index for Port Binding by Datapath. */
+    struct ovsdb_idl_index *sbrec_pb_by_dp;
+    /* Index for Port Binding by name. */
+    struct ovsdb_idl_index *sbrec_pb_by_name;
+    const struct sbrec_chassis *chassis;
 };
 
 struct neighbor_ctx_out {
     /* Contains struct neighbor_interface_monitor pointers. */
     struct vector *monitored_interfaces;
+    /* Contains set of port binding names that are currently advertised. */
+    struct sset *advertised_pbs;
 };
 
 enum neighbor_interface_type {
@@ -79,5 +88,9 @@ struct advertise_neighbor_entry *advertise_neigh_find(
     const struct in6_addr *ip);
 void neighbor_run(struct neighbor_ctx_in *, struct neighbor_ctx_out *);
 void neighbor_cleanup(struct vector *monitored_interfaces);
+bool neighbor_is_relevant_port_updated(
+    struct ovsdb_idl_index *sbrec_pb_by_name,
+    const struct sbrec_chassis *chassis, struct sset *advertised_pbs,
+    const struct tracked_lport *lport);
 
 #endif /* NEIGHBOR_H */
diff --git a/controller/ovn-controller.c b/controller/ovn-controller.c
index d32484e33..c31cc2a00 100644
--- a/controller/ovn-controller.c
+++ b/controller/ovn-controller.c
@@ -5772,6 +5772,8 @@ en_host_if_monitor_run(struct engine_node *node 
OVS_UNUSED, void *data)
 struct ed_type_neighbor {
     /* Contains struct neighbor_interface_monitor pointers. */
     struct vector monitored_interfaces;
+    /* Contains set of PB names that are currently advertised. */
+    struct sset advertised_pbs;
 };
 
 static void *
@@ -5783,6 +5785,7 @@ en_neighbor_init(struct engine_node *node OVS_UNUSED,
     *data = (struct ed_type_neighbor) {
         .monitored_interfaces =
             VECTOR_EMPTY_INITIALIZER(struct neighbor_interface_monitor *),
+        .advertised_pbs = SSET_INITIALIZER(&data->advertised_pbs),
     };
     return data;
 }
@@ -5794,6 +5797,7 @@ en_neighbor_cleanup(void *data)
 
     neighbor_cleanup(&ne_data->monitored_interfaces);
     vector_destroy(&ne_data->monitored_interfaces);
+    sset_destroy(&ne_data->advertised_pbs);
 }
 
 static enum engine_node_state
@@ -5802,25 +5806,68 @@ en_neighbor_run(struct engine_node *node OVS_UNUSED, 
void *data)
     struct ed_type_neighbor *ne_data = data;
     struct ed_type_runtime_data *rt_data =
         engine_get_input_data("runtime_data", node);
+    struct ovsdb_idl_index *sbrec_port_binding_by_datapath =
+        engine_ovsdb_node_get_index(
+            engine_get_input("SB_port_binding", node),
+            "datapath");
+    struct ovsdb_idl_index *sbrec_port_binding_by_name =
+        engine_ovsdb_node_get_index(
+            engine_get_input("SB_port_binding", node),
+            "name");
+    const struct ovsrec_open_vswitch_table *ovs_table =
+        EN_OVSDB_GET(engine_get_input("OVS_open_vswitch", node));
+    struct ovsdb_idl_index *sbrec_chassis_by_name =
+        engine_ovsdb_node_get_index(
+            engine_get_input("SB_chassis", node),
+            "name");
+
+    const char *chassis_id = get_ovs_chassis_id(ovs_table);
+    ovs_assert(chassis_id);
+    const struct sbrec_chassis *chassis =
+        chassis_lookup_by_name(sbrec_chassis_by_name, chassis_id);
+    ovs_assert(chassis);
+
     struct neighbor_ctx_in n_ctx_in = {
         .local_datapaths = &rt_data->local_datapaths,
+        .sbrec_pb_by_dp = sbrec_port_binding_by_datapath,
+        .sbrec_pb_by_name = sbrec_port_binding_by_name,
+        .chassis = chassis,
     };
 
     struct neighbor_ctx_out n_ctx_out = {
         .monitored_interfaces = &ne_data->monitored_interfaces,
+        .advertised_pbs = &ne_data->advertised_pbs,
     };
 
     neighbor_cleanup(&ne_data->monitored_interfaces);
+    sset_clear(&ne_data->advertised_pbs);
     neighbor_run(&n_ctx_in, &n_ctx_out);
 
     return EN_UPDATED;
 }
 
 static enum engine_input_handler_result
-neighbor_runtime_data_handler(struct engine_node *node, void *data OVS_UNUSED)
+neighbor_runtime_data_handler(struct engine_node *node, void *data)
 {
+    struct ed_type_neighbor *ne_data = data;
     struct ed_type_runtime_data *rt_data =
         engine_get_input_data("runtime_data", node);
+    struct ovsdb_idl_index *sbrec_port_binding_by_name =
+        engine_ovsdb_node_get_index(
+            engine_get_input("SB_port_binding", node),
+            "name");
+    const struct ovsrec_open_vswitch_table *ovs_table =
+        EN_OVSDB_GET(engine_get_input("OVS_open_vswitch", node));
+    struct ovsdb_idl_index *sbrec_chassis_by_name =
+        engine_ovsdb_node_get_index(
+            engine_get_input("SB_chassis", node),
+            "name");
+
+    const char *chassis_id = get_ovs_chassis_id(ovs_table);
+    ovs_assert(chassis_id);
+    const struct sbrec_chassis *chassis =
+        chassis_lookup_by_name(sbrec_chassis_by_name, chassis_id);
+    ovs_assert(chassis);
 
     /* There are no tracked data. Fall back to full recompute. */
     if (!rt_data->tracked) {
@@ -5845,6 +5892,21 @@ neighbor_runtime_data_handler(struct engine_node *node, 
void *data OVS_UNUSED)
             tdp->tracked_type == TRACKED_RESOURCE_REMOVED) {
             return EN_UNHANDLED;
         }
+
+        if (!smap_get_bool(&ld->datapath->external_ids,
+                           "dynamic-routing-advertise-fdb", false)) {
+            continue;
+        }
+
+        const struct shash_node *shash_node;
+        SHASH_FOR_EACH (shash_node, &tdp->lports) {
+            if (neighbor_is_relevant_port_updated(sbrec_port_binding_by_name,
+                                                  chassis,
+                                                  &ne_data->advertised_pbs,
+                                                  shash_node->data)) {
+                return EN_UNHANDLED;
+            }
+        }
     }
 
     return EN_HANDLED_UNCHANGED;
@@ -5882,6 +5944,30 @@ neighbor_sb_datapath_binding_handler(struct engine_node 
*node,
     return EN_HANDLED_UNCHANGED;
 }
 
+static enum engine_input_handler_result
+neighbor_sb_port_binding_handler(struct engine_node *node, void *data)
+{
+    struct ed_type_neighbor *ne_data = data;
+    const struct sbrec_port_binding_table *pb_table =
+        EN_OVSDB_GET(engine_get_input("SB_port_binding", node));
+
+    const struct sbrec_port_binding *pb;
+    SBREC_PORT_BINDING_TABLE_FOR_EACH_TRACKED (pb, pb_table) {
+        if (sbrec_port_binding_is_new(pb) ||
+            sbrec_port_binding_is_deleted(pb)) {
+            /* The removal and addition is handled via runtime_data. */
+            return EN_HANDLED_UNCHANGED;
+        }
+
+        if (sbrec_port_binding_is_updated(pb, SBREC_PORT_BINDING_COL_MAC) &&
+            sset_contains(&ne_data->advertised_pbs, pb->logical_port)) {
+            return EN_UNHANDLED;
+        }
+    }
+
+    return EN_HANDLED_UNCHANGED;
+}
+
 struct ed_type_neighbor_table_notify {
     /* For incremental processing this could be tracked per interface in
      * the future. */
@@ -6776,10 +6862,14 @@ main(int argc, char *argv[])
     engine_add_input(&en_garp_rarp, &en_runtime_data,
                      garp_rarp_runtime_data_handler);
 
+    engine_add_input(&en_neighbor, &en_ovs_open_vswitch, NULL);
+    engine_add_input(&en_neighbor, &en_sb_chassis, NULL);
     engine_add_input(&en_neighbor, &en_runtime_data,
                      neighbor_runtime_data_handler);
     engine_add_input(&en_neighbor, &en_sb_datapath_binding,
                      neighbor_sb_datapath_binding_handler);
+    engine_add_input(&en_neighbor, &en_sb_port_binding,
+                     neighbor_sb_port_binding_handler);
     engine_add_input(&en_neighbor_exchange, &en_neighbor, NULL);
     engine_add_input(&en_neighbor_exchange, &en_host_if_monitor, NULL);
     engine_add_input(&en_neighbor_exchange, &en_neighbor_table_notify, NULL);
diff --git a/northd/northd.c b/northd/northd.c
index 07ef786d2..1c2ad37f8 100644
--- a/northd/northd.c
+++ b/northd/northd.c
@@ -818,6 +818,10 @@ ovn_datapath_update_external_ids(struct ovn_datapath *od)
             const char *vni = smap_get(&od->nbs->other_config,
                                        "dynamic-routing-vni");
             smap_add(&ids, "dynamic-routing-vni", vni);
+            smap_add(&ids, "dynamic-routing-advertise-fdb",
+                     drr_mode_FDB_is_set(od->dynamic_routing_redistribute)
+                     ? "true"
+                     : "false");
         }
     }
 
@@ -858,7 +862,7 @@ static enum dynamic_routing_redistribute_mode
 parse_dynamic_routing_redistribute(
     const struct smap *options,
     enum dynamic_routing_redistribute_mode default_dynamic_mode,
-    const char *nb_entity_name)
+    const char *nb_entity_name, bool lr)
 {
     char *save_ptr = NULL;
     enum dynamic_routing_redistribute_mode out = DRRM_NONE;
@@ -874,27 +878,31 @@ parse_dynamic_routing_redistribute(
          token != NULL;
          token = strtok_r(NULL, ",", &save_ptr)) {
 
-        if (!strcmp(token, "connected")) {
+        if (lr && !strcmp(token, "connected")) {
             out |= DRRM_CONNECTED;
             continue;
         }
-        if (!strcmp(token, "connected-as-host")) {
+        if (lr && !strcmp(token, "connected-as-host")) {
             /* Setting connected-as-host implies connected. */
             out |= DRRM_CONNECTED | DRRM_CONNECTED_AS_HOST;
             continue;
         }
-        if (!strcmp(token, "static")) {
+        if (lr && !strcmp(token, "static")) {
             out |= DRRM_STATIC;
             continue;
         }
-        if (!strcmp(token, "nat")) {
+        if (lr && !strcmp(token, "nat")) {
             out |= DRRM_NAT;
             continue;
         }
-        if (!strcmp(token, "lb")) {
+        if (lr && !strcmp(token, "lb")) {
             out |= DRRM_LB;
             continue;
         }
+        if (!lr && !strcmp(token, "fdb")) {
+            out |= DRRM_FDB;
+            continue;
+        }
         static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 1);
         VLOG_WARN_RL(&rl,
                      "unknown dynamic-routing-redistribute option '%s' on %s",
@@ -984,6 +992,11 @@ join_datapaths(const struct nbrec_logical_switch_table 
*nbrec_ls_table,
         if (ovn_is_valid_vni(vni)) {
             od->has_evpn_vni = true;
         }
+
+        od->dynamic_routing_redistribute =
+            parse_dynamic_routing_redistribute(&od->nbs->other_config,
+                                               DRRM_NONE, od->nbs->name,
+                                               false);
     }
 
     const struct nbrec_logical_router *nbr;
@@ -1021,7 +1034,7 @@ join_datapaths(const struct nbrec_logical_switch_table 
*nbrec_ls_table,
                                             "dynamic-routing", false);
         od->dynamic_routing_redistribute =
             parse_dynamic_routing_redistribute(&od->nbr->options, DRRM_NONE,
-                                               od->nbr->name);
+                                               od->nbr->name, true);
     }
 }
 
@@ -1904,7 +1917,7 @@ join_logical_ports_lrp(struct hmap *ports,
     op->dynamic_routing_redistribute =
         parse_dynamic_routing_redistribute(&op->nbrp->options,
                                            od->dynamic_routing_redistribute,
-                                           op->nbrp->name);
+                                           op->nbrp->name, true);
 
     for (size_t j = 0; j < op->lrp_networks.n_ipv4_addrs; j++) {
         sset_add(&op->od->router_ips,
@@ -5824,7 +5837,7 @@ build_lswitch_learn_fdb_od(
     struct lflow_ref *lflow_ref)
 {
     ovs_assert(od->nbs);
-    const char *lkp_action =  od->has_evpn_vni
+    const char *lkp_action = od->has_evpn_vni
         ? "outport = get_fdb(eth.dst); "
           "remote_outport = get_remote_fdb(eth.dst); next;"
         : "outport = get_fdb(eth.dst); next;";
@@ -9378,6 +9391,8 @@ build_lswitch_lflows_evpn_l2_unknown(struct ovn_datapath 
*od,
             lflows, od, S_SWITCH_IN_L2_UNKNOWN, 50, "outport == \"none\" && "
             "remote_outport == \"none\"", "No L2 destination", lflow_ref);
     }
+    /* The following flows ensure that the remote FDB is prefered before local
+     * one. */
     ovn_lflow_add(lflows, od, S_SWITCH_IN_L2_UNKNOWN, 25,
                   "remote_outport == \"none\"", "output;", lflow_ref);
     ovn_lflow_add(lflows, od, S_SWITCH_IN_L2_UNKNOWN, 0, "1",
diff --git a/northd/northd.h b/northd/northd.h
index 1235f3912..3bafaa36a 100644
--- a/northd/northd.h
+++ b/northd/northd.h
@@ -326,7 +326,8 @@ struct mcast_port_info {
     DRR_MODE(CONNECTED_AS_HOST, 1) \
     DRR_MODE(STATIC,            2) \
     DRR_MODE(NAT,               3) \
-    DRR_MODE(LB,                4)
+    DRR_MODE(LB,                4) \
+    DRR_MODE(FDB,               5)
 
 enum dynamic_routing_redistribute_mode_bits {
 #define DRR_MODE(PROTOCOL, BIT) DRRM_##PROTOCOL##_BIT = BIT,
diff --git a/ovn-nb.xml b/ovn-nb.xml
index 8a98d2f70..e17b62b91 100644
--- a/ovn-nb.xml
+++ b/ovn-nb.xml
@@ -889,6 +889,26 @@
           removal/change in the future.
         </p>
       </column>
+
+      <column name="other_config" key="dynamic-routing-redistribute"
+              type='{"type": "string"}'>
+        <p>
+          Only relevant if <ref column="other_config" key="dynamic-routing-vni"
+          table="Logical_switch"/> is set to valid VNI.
+        </p>
+
+        <p>
+          If <code>fdb</code> is specified then ovn-controller will advertise
+          all workloads that are local to the chassis. The applies to VIFs,
+          container ports, virtual ports, connected DGPs and connected GW
+          routers.
+        </p>
+
+        <p>
+          NOTE: this feature is experimental and may be subject to
+          removal/change in the future.
+        </p>
+      </column>
     </group>
 
     <group title="IP Multicast Snooping Options">
diff --git a/tests/system-ovn.at b/tests/system-ovn.at
index 1a9d59830..4a6454ca6 100644
--- a/tests/system-ovn.at
+++ b/tests/system-ovn.at
@@ -18455,6 +18455,24 @@ check diff -q bindings_before bindings_after
 check diff -q mc_groups_before mc_groups_after
 check diff -q fdb_before fdb_after
 
+check ovn-nbctl --wait=hv set logical_switch ls-evpn 
other_config:dynamic-routing-redistribute=fdb
+OVS_WAIT_FOR_OUTPUT([bridge fdb show | grep "lo-10" | grep "f0:00:0f:16:01" | 
sort], [0], [dnl
+f0:00:0f:16:01:10 dev lo-10 master br-10 static
+f0:00:0f:16:01:10 dev lo-10 vlan 1 master br-10 static
+f0:00:0f:16:01:20 dev lo-10 master br-10 static
+f0:00:0f:16:01:20 dev lo-10 vlan 1 master br-10 static
+])
+
+check ovn-nbctl --wait=hv lsp-del workload2
+OVS_WAIT_FOR_OUTPUT([bridge fdb show | grep "lo-10" | grep "f0:00:0f:16:01" | 
sort], [0], [dnl
+f0:00:0f:16:01:10 dev lo-10 master br-10 static
+f0:00:0f:16:01:10 dev lo-10 vlan 1 master br-10 static
+])
+
+check ovn-nbctl --wait=hv remove logical_switch ls-evpn other_config 
dynamic-routing-redistribute
+OVS_WAIT_FOR_OUTPUT([bridge fdb show | grep "lo-10" | grep "f0:00:0f:16:01" | 
sort], [0], [dnl
+])
+
 OVN_CLEANUP_CONTROLLER([hv1])
 
 as ovn-sb
-- 
2.50.1

_______________________________________________
dev mailing list
d...@openvswitch.org
https://mail.openvswitch.org/mailman/listinfo/ovs-dev

Reply via email to