Recheck-request: github-robot-_Build_and_Test On Thu, May 7, 2026 at 9:10 AM Jacob Tanenbaum <[email protected]> wrote:
> Recheck-request: github-robot-_Build_and_Test > > On Wed, May 6, 2026 at 10:37 AM Jacob Tanenbaum <[email protected]> > wrote: > >> When ovn-monitor-all is set to false ovn-controller sets ovn-installed >> on OVS interfaces too early. ovn-controller needs to wait for the >> response from the southbound database with the updates to the newly >> monitored fields only then can it install flows and label the OVS >> interface as installed. >> >> Reported-at: https://redhat.atlassian.net/browse/FDP-2887 >> Signed-off-by: Jacob Tanenbaum <[email protected]> >> >> --- >> v9->v10 >> * added a bool for if the controller is in monitor-all >> * removed some code leftover from debugging >> * changed argument to if_status_mgr_update to dps_waiting_for_sb >> * removed ovs_assert in favor of NULL checking >> * removed unrelated new lines >> >> v8->v9 >> * added the functionality of waiting for sb to update to the incremental >> processor >> >> v7->v8 >> * removed printing in the system testcases that shouldn't be there and >> caused failure. >> >> v6->v7 >> * added an sset that holds the uuid's of datapaths that are waiting for >> sb updates >> * added back the the state OIF_WAITING_SB_COND to the if_mgr state >> machine. >> * no longer hold if the datapath is updated in the local_datapaths >> struct as that is generated each transaction and cannot be relied >> upon. >> * removed some leftover logic from previous patch versions >> >> v5->v6 >> * simplified the logic as requested, Ales saw that we did not need to >> save the seqno if we checked before the engine run. >> * removing the extra state from the state machine >> * removing some leftover logic that was seen. >> >> v4->v5 >> * corrected a sanitizer error: used bool update_seqno without >> initializing >> >> v3->v4 >> * Added state OIF_WAITING_SB_COND to the state machine that manages >> the adding of interfaces. This state waits until the Southbound has >> updated the ovn-controller of relevent information about ports related >> to it >> * Addressed several nits >> >> v2->v3 >> * adding the ld->monitor_updated required the additiona of checking for >> monitor_all in update_sb_monitors. I didn't account for being able to >> toggle on monitor_all >> >> v1->v2 >> * if_status_mgr_run() will run everytime the conditional seqno is >> changed so it should be safe to only skip when the expected_seqno and >> seqno returned from ovn are strictly not equal, that way we do not >> have to deal with overflow in the seqno. Additionally add a boolean to >> the local_datapath in the event that the seqno wraps around at the >> same time the datapath would go back into the state OIF_INSTALL_FLOWS. >> * remove setting the state to itself for OIF_INSTALL_FLOWS in >> if_status_mgr_update() >> * added assert(pb) in if_status_mgr_run() >> * removed a manual loop looking for the local_datapath and replaced with >> get_local_datapath() in if_status_mgr_run >> * remove a few nit spelling errors in the test case >> >> diff --git a/controller/if-status.c b/controller/if-status.c >> index ee9337e63..d12ac5515 100644 >> --- a/controller/if-status.c >> +++ b/controller/if-status.c >> @@ -18,6 +18,7 @@ >> #include "binding.h" >> #include "if-status.h" >> #include "lib/ofctrl-seqno.h" >> +#include "local_data.h" >> #include "ovsport.h" >> #include "simap.h" >> >> @@ -58,6 +59,11 @@ VLOG_DEFINE_THIS_MODULE(if_status); >> enum if_state { >> OIF_CLAIMED, /* Newly claimed interface. pb->chassis update >> not >> yet initiated. */ >> + OIF_WAITING_SB_COND, /* Waiting for the Southbound database to >> update >> + * ovn-controller for a given datapath. We >> should >> + * only be waiting in this state when >> monitor_all >> + * is false AND it is the first time that we >> see >> + * a specific datapath. */ >> OIF_INSTALL_FLOWS, /* Claimed interface with pb->chassis update >> sent to >> * SB (but update notification not confirmed, >> so the >> * update may be resent in any of the following >> @@ -87,6 +93,7 @@ enum if_state { >> >> static const char *if_state_names[] = { >> [OIF_CLAIMED] = "CLAIMED", >> + [OIF_WAITING_SB_COND] = "WAITING_SB_COND", >> [OIF_INSTALL_FLOWS] = "INSTALL_FLOWS", >> [OIF_REM_OLD_OVN_INST] = "REM_OLD_OVN_INST", >> [OIF_MARK_UP] = "MARK_UP", >> @@ -114,7 +121,18 @@ static const char *if_state_names[] = { >> * | | | +--+ >> | | | >> * | | | >> | | | >> * | | | mgr_update(when sb is rw i.e. pb->chassis) >> | | | >> - * | | | has been updated >> | | | >> + * | | V has been updated >> | | | >> + * | | +----------------------+ >> | | | >> + * | | | | >> | | | >> + * | | | WAITING_SB_COND | >> | | | >> + * | | | | >> | | | >> + * | | | | >> | | | >> + * | | +----------------------+ >> | | | >> + * | | | >> | | | >> + * | | | >> | | | >> + * | | | mgr_update(when sb_cond_seqno == expected) >> | | | >> + * | | | - request seqno >> | | | >> + * | | | >> | | | >> * | | release_iface | - request seqno >> | | | >> * | | | >> | | | >> * | | V >> | | | >> @@ -335,6 +353,7 @@ if_status_mgr_claim_iface(struct if_status_mgr *mgr, >> >> switch (iface->state) { >> case OIF_CLAIMED: >> + case OIF_WAITING_SB_COND: >> case OIF_INSTALL_FLOWS: >> case OIF_REM_OLD_OVN_INST: >> case OIF_MARK_UP: >> @@ -383,6 +402,7 @@ if_status_mgr_release_iface(struct if_status_mgr >> *mgr, const char *iface_id) >> >> switch (iface->state) { >> case OIF_CLAIMED: >> + case OIF_WAITING_SB_COND: >> case OIF_INSTALL_FLOWS: >> /* Not yet fully installed interfaces: >> * pb->chassis still need to be deleted. >> @@ -424,6 +444,7 @@ if_status_mgr_delete_iface(struct if_status_mgr *mgr, >> const char *iface_id, >> >> switch (iface->state) { >> case OIF_CLAIMED: >> + case OIF_WAITING_SB_COND: >> case OIF_INSTALL_FLOWS: >> /* Not yet fully installed interfaces: >> * pb->chassis still need to be deleted. >> @@ -500,6 +521,8 @@ if_status_mgr_update(struct if_status_mgr *mgr, >> const struct sbrec_chassis *chassis_rec, >> const struct ovsrec_interface_table *iface_table, >> const struct sbrec_port_binding_table *pb_table, >> + const struct hmap *local_datapaths, >> + const struct uuidset *dps_waiting_for_sb, >> bool ovs_readonly, >> bool sb_readonly) >> { >> @@ -510,6 +533,8 @@ if_status_mgr_update(struct if_status_mgr *mgr, >> return; >> } >> >> + ovs_assert(dps_waiting_for_sb); >> + >> struct shash *bindings = &binding_data->bindings; >> struct hmapx_node *node; >> >> @@ -622,9 +647,7 @@ if_status_mgr_update(struct if_status_mgr *mgr, >> * in if_status_handle_claims or if_status_mgr_claim_iface >> */ >> if (iface->is_vif) { >> - ovs_iface_set_state(mgr, iface, OIF_INSTALL_FLOWS); >> - iface->install_seqno = mgr->iface_seqno + 1; >> - new_ifaces = true; >> + ovs_iface_set_state(mgr, iface, OIF_WAITING_SB_COND); >> } else { >> ovs_iface_set_state(mgr, iface, OIF_MARK_UP); >> } >> @@ -639,6 +662,33 @@ if_status_mgr_update(struct if_status_mgr *mgr, >> } >> } >> >> + if (!sb_readonly) { >> + HMAPX_FOR_EACH_SAFE (node, >> + >> &mgr->ifaces_per_state[OIF_WAITING_SB_COND]) { >> + struct ovs_iface *iface = node->data; >> + if (local_datapaths) { >> + const struct sbrec_port_binding *pb = >> + sbrec_port_binding_table_get_for_uuid(pb_table, >> + >> &iface->pb_uuid); >> + if (!pb) { >> + continue; >> + } >> + struct local_datapath *ld = >> + get_local_datapath(local_datapaths, >> + pb->datapath->tunnel_key); >> + if (!ld) { >> + continue; >> + } >> + if (!uuidset_find(dps_waiting_for_sb, >> + &ld->datapath->header_.uuid)) { >> + ovs_iface_set_state(mgr, iface, OIF_INSTALL_FLOWS); >> + iface->install_seqno = mgr->iface_seqno + 1; >> + new_ifaces = true; >> + } >> + } >> + } >> + } >> + >> if (!sb_readonly) { >> HMAPX_FOR_EACH_SAFE (node, >> &mgr->ifaces_per_state[OIF_UPDATE_PORT]) { >> struct ovs_iface *iface = node->data; >> diff --git a/controller/if-status.h b/controller/if-status.h >> index d15ca3008..75c7bf71c 100644 >> --- a/controller/if-status.h >> +++ b/controller/if-status.h >> @@ -18,6 +18,7 @@ >> >> #include "openvswitch/shash.h" >> #include "lib/vswitch-idl.h" >> +#include "lib/uuidset.h" >> >> #include "binding.h" >> #include "lport.h" >> @@ -43,6 +44,8 @@ void if_status_mgr_update(struct if_status_mgr *, >> struct local_binding_data *, >> const struct sbrec_chassis *chassis, >> const struct ovsrec_interface_table >> *iface_table, >> const struct sbrec_port_binding_table >> *pb_table, >> + const struct hmap *local_datapaths, >> + const struct uuidset *dps_waiting_for_sb, >> bool ovs_readonly, >> bool sb_readonly); >> void if_status_mgr_run(struct if_status_mgr *mgr, struct >> local_binding_data *, >> diff --git a/controller/ovn-controller.c b/controller/ovn-controller.c >> index 35a5cd0b4..3a8eeb4c0 100644 >> --- a/controller/ovn-controller.c >> +++ b/controller/ovn-controller.c >> @@ -170,6 +170,8 @@ static char *unixctl_path; >> struct controller_engine_ctx { >> struct lflow_cache *lflow_cache; >> struct if_status_mgr *if_mgr; >> + const unsigned int *ovnsb_expected_cond_seqno; >> + const bool *sb_monitor_all; >> }; >> >> /* Pending packet to be injected into connected OVS. */ >> @@ -1145,6 +1147,50 @@ ctrl_register_ovs_idl(struct ovsdb_idl *ovs_idl) >> * track that column which should be addressed in the future. */ >> } >> >> +struct ed_type_datapaths_updated { >> + struct uuidset waiting_sb_cond_update; >> +}; >> + >> +static void * >> +en_datapaths_updated_init(struct engine_node *node OVS_UNUSED, >> + struct engine_arg *arg OVS_UNUSED) >> +{ >> + struct ed_type_datapaths_updated *data = xzalloc(sizeof *data); >> + *data = (struct ed_type_datapaths_updated) { >> + .waiting_sb_cond_update = >> + UUIDSET_INITIALIZER(&data->waiting_sb_cond_update), >> + }; >> + return data; >> +} >> + >> +static void >> +en_datapaths_updated_cleanup(void *data) >> +{ >> + struct ed_type_datapaths_updated *sb_data = data; >> + uuidset_destroy(&sb_data->waiting_sb_cond_update); >> +} >> + >> +static enum engine_node_state >> +en_datapaths_updated_run(struct engine_node *node OVS_UNUSED, >> + void *data) >> +{ >> + struct controller_engine_ctx *ctrl_ctx = >> engine_get_context()->client_ctx; >> + struct ovsdb_idl_txn *ovnsb_idl_txn = >> engine_get_context()->ovnsb_idl_txn; >> + if (!ovnsb_idl_txn) { >> + return EN_UNCHANGED; >> + } >> + struct ovsdb_idl *ovnsb_idl = ovsdb_idl_txn_get_idl(ovnsb_idl_txn); >> + if (*ctrl_ctx->ovnsb_expected_cond_seqno == >> + ovsdb_idl_get_condition_seqno(ovnsb_idl)) { >> + struct ed_type_datapaths_updated *dp_data = data; >> + if (!uuidset_is_empty(&dp_data->waiting_sb_cond_update)) { >> + uuidset_clear(&dp_data->waiting_sb_cond_update); >> + return EN_UPDATED; >> + } >> + } >> + return EN_UNCHANGED; >> +} >> + >> struct ed_type_ofctrl_is_connected { >> bool connected; >> }; >> @@ -1180,6 +1226,41 @@ en_ofctrl_is_connected_run(struct engine_node >> *node OVS_UNUSED, void *data) >> return EN_UNCHANGED; >> } >> >> +struct ed_type_sb_cond_seqno { >> + unsigned int last_sb_cond_seqno; >> +}; >> + >> +static void * >> +en_sb_cond_seqno_init(struct engine_node *node OVS_UNUSED, >> + struct engine_arg *arg OVS_UNUSED) >> +{ >> + struct ed_type_sb_cond_seqno *data = xzalloc(sizeof *data); >> + return data; >> +} >> + >> +static void en_sb_cond_seqno_cleanup(void *data OVS_UNUSED) >> +{ >> +} >> + >> +static enum engine_node_state >> +en_sb_cond_seqno_run(struct engine_node *node OVS_UNUSED, void *data) >> +{ >> + struct ed_type_sb_cond_seqno *sb_seqno_data = data; >> + struct ovsdb_idl_txn *ovnsb_idl_txn = >> engine_get_context()->ovnsb_idl_txn; >> + if (!ovnsb_idl_txn) { >> + return EN_UNCHANGED; >> + } >> + >> + unsigned int curr_seqno = >> + >> ovsdb_idl_get_condition_seqno(ovsdb_idl_txn_get_idl(ovnsb_idl_txn)); >> + >> + if (sb_seqno_data->last_sb_cond_seqno != curr_seqno) { >> + sb_seqno_data->last_sb_cond_seqno = curr_seqno; >> + return EN_UPDATED; >> + } >> + return EN_UNCHANGED; >> +} >> + >> struct ed_type_if_status_mgr { >> const struct if_status_mgr *manager; >> const struct ovsrec_interface_table *iface_table; >> @@ -5204,6 +5285,15 @@ controller_output_acl_id_handler(struct >> engine_node *node OVS_UNUSED, >> return EN_HANDLED_UPDATED; >> } >> >> +static enum engine_input_handler_result >> +controlller_output_datapaths_updated_handler( >> + struct engine_node *node OVS_UNUSED, >> + void *data OVS_UNUSED) >> +{ >> + return EN_HANDLED_UPDATED; >> +} >> + >> + >> static enum engine_input_handler_result >> controller_output_route_exchange_handler(struct engine_node *node >> OVS_UNUSED, >> void *data OVS_UNUSED) >> @@ -6745,6 +6835,56 @@ evpn_arp_vtep_binding_handler(struct engine_node >> *node, void *data OVS_UNUSED) >> return EN_UNHANDLED; >> } >> >> +static enum engine_input_handler_result >> +datapaths_updated_runtime_data_handler(struct engine_node *node, >> + void *data) >> +{ >> + enum engine_input_handler_result status = EN_HANDLED_UNCHANGED; >> + struct ed_type_datapaths_updated *dp_data = data; >> + struct ed_type_runtime_data *rt_data = >> + engine_get_input_data("runtime_data", node); >> + struct controller_engine_ctx *ctrl_ctx = >> engine_get_context()->client_ctx; >> + if (*ctrl_ctx->sb_monitor_all) { >> + if (!uuidset_is_empty(&dp_data->waiting_sb_cond_update)) { >> + uuidset_clear(&dp_data->waiting_sb_cond_update); >> + status = EN_HANDLED_UPDATED; >> + } >> + return status; >> + } >> + >> + struct tracked_datapath *tdp; >> + HMAP_FOR_EACH_SAFE (tdp, node, &rt_data->tracked_dp_bindings) { >> + if (tdp->tracked_type == TRACKED_RESOURCE_NEW) { >> + uuidset_insert(&dp_data->waiting_sb_cond_update, >> + &tdp->dp->header_.uuid); >> + status = EN_HANDLED_UPDATED; >> + } >> + } >> + return status; >> +} >> + >> +static enum engine_input_handler_result >> +datapaths_update_sb_cond_handler(struct engine_node *node OVS_UNUSED, >> + void *data) >> +{ >> + struct controller_engine_ctx *ctrl_ctx = >> engine_get_context()->client_ctx; >> + struct ovsdb_idl_txn *ovnsb_idl_txn = >> engine_get_context()->ovnsb_idl_txn; >> + if (!ovnsb_idl_txn) { >> + return EN_HANDLED_UNCHANGED; >> + } >> + struct ovsdb_idl *ovnsb_idl = ovsdb_idl_txn_get_idl(ovnsb_idl_txn); >> + if (*ctrl_ctx->ovnsb_expected_cond_seqno == >> + ovsdb_idl_get_condition_seqno(ovnsb_idl)) { >> + struct ed_type_datapaths_updated *dp_data = data; >> + >> + if (!uuidset_is_empty(&dp_data->waiting_sb_cond_update)) { >> + uuidset_clear(&dp_data->waiting_sb_cond_update); >> + return EN_HANDLED_UPDATED; >> + } >> + } >> + return EN_HANDLED_UNCHANGED; >> +} >> + >> /* Define engine node functions for nodes that represent SB tables. >> * >> * en_sb_<TABLE_NAME>_run() >> @@ -6867,6 +7007,8 @@ static ENGINE_NODE(neighbor_exchange_status); >> static ENGINE_NODE(evpn_vtep_binding, CLEAR_TRACKED_DATA); >> static ENGINE_NODE(evpn_fdb, CLEAR_TRACKED_DATA); >> static ENGINE_NODE(evpn_arp, CLEAR_TRACKED_DATA); >> +static ENGINE_NODE(datapaths_updated); >> +static ENGINE_NODE(sb_cond_seqno); >> >> static void >> inc_proc_ovn_controller_init( >> @@ -6889,6 +7031,8 @@ inc_proc_ovn_controller_init( >> engine_add_input(&en_template_vars, &en_sb_chassis_template_var, >> template_vars_sb_chassis_template_var_handler); >> >> + engine_add_input(&en_datapaths_updated, &en_sb_cond_seqno, >> + datapaths_update_sb_cond_handler); >> engine_add_input(&en_lb_data, &en_sb_load_balancer, >> lb_data_sb_load_balancer_handler); >> engine_add_input(&en_lb_data, &en_template_vars, >> @@ -6975,6 +7119,9 @@ inc_proc_ovn_controller_init( >> engine_add_input(&en_pflow_output, &en_sb_sb_global, >> pflow_output_debug_handler); >> >> + engine_add_input(&en_datapaths_updated, &en_runtime_data, >> + datapaths_updated_runtime_data_handler); >> + >> engine_add_input(&en_northd_options, &en_sb_sb_global, >> en_northd_options_sb_sb_global_handler); >> >> @@ -7163,6 +7310,8 @@ inc_proc_ovn_controller_init( >> engine_add_input(&en_acl_id, &en_sb_acl_id, NULL); >> engine_add_input(&en_controller_output, &en_acl_id, >> controller_output_acl_id_handler); >> + engine_add_input(&en_controller_output, &en_datapaths_updated, >> + controlller_output_datapaths_updated_handler); >> >> struct engine_arg engine_arg = { >> .sb_idl = sb_idl_loop->idl, >> @@ -7556,6 +7705,8 @@ main(int argc, char *argv[]) >> engine_get_internal_data(&en_evpn_fdb); >> struct ed_type_evpn_arp *earp_data = >> engine_get_internal_data(&en_evpn_arp); >> + struct ed_type_datapaths_updated *dp_updated_data = >> + engine_get_internal_data(&en_datapaths_updated); >> >> ofctrl_init(&lflow_output_data->group_table, >> &lflow_output_data->meter_table); >> @@ -7655,10 +7806,13 @@ main(int argc, char *argv[]) >> unsigned int ovs_cond_seqno = UINT_MAX; >> unsigned int ovnsb_cond_seqno = UINT_MAX; >> unsigned int ovnsb_expected_cond_seqno = UINT_MAX; >> + bool sb_monitor_all = false; >> >> struct controller_engine_ctx ctrl_engine_ctx = { >> .lflow_cache = lflow_cache_create(), >> .if_mgr = if_status_mgr_create(), >> + .ovnsb_expected_cond_seqno = &ovnsb_expected_cond_seqno, >> + .sb_monitor_all = &sb_monitor_all, >> }; >> struct if_status_mgr *if_mgr = ctrl_engine_ctx.if_mgr; >> >> @@ -7673,7 +7827,6 @@ main(int argc, char *argv[]) >> /* Main loop. */ >> int ovnsb_txn_status = 1; >> int ovs_txn_status = 1; >> - bool sb_monitor_all = false; >> struct tracked_acl_ids *tracked_acl_ids = NULL; >> while (!exit_args.exiting) { >> ovsrcu_quiesce_end(); >> @@ -8085,11 +8238,17 @@ main(int argc, char *argv[]) >> runtime_data ? &runtime_data->lbinding_data : >> NULL; >> stopwatch_start(IF_STATUS_MGR_UPDATE_STOPWATCH_NAME, >> time_msec()); >> + >> if_status_mgr_update(if_mgr, binding_data, chassis, >> ovsrec_interface_table_get( >> ovs_idl_loop.idl), >> sbrec_port_binding_table_get( >> ovnsb_idl_loop.idl), >> + runtime_data ? >> + >> &runtime_data->local_datapaths >> + : NULL, >> + &dp_updated_data-> >> + waiting_sb_cond_update, >> !ovs_idl_txn, >> !ovnsb_idl_txn); >> stopwatch_stop(IF_STATUS_MGR_UPDATE_STOPWATCH_NAME, >> diff --git a/tests/ovn-controller.at b/tests/ovn-controller.at >> index c98de9bc4..f196213f0 100644 >> --- a/tests/ovn-controller.at >> +++ b/tests/ovn-controller.at >> @@ -3944,3 +3944,67 @@ OVN_CLEANUP([hv1], [hv2 >> /already has encap ip.*cannot duplicate on/d]) >> AT_CLEANUP >> ]) >> + >> +OVN_FOR_EACH_NORTHD([ >> +AT_SETUP([ovn-installed]) >> +ovn_start >> + >> +net_add n1 >> +sim_add hv1 >> + >> +as hv1 >> +ovs-vsctl add-br br-phys >> +ovn_attach n1 br-phys 192.168.0.1 >> +ovn-appctl vlog/set dbg >> +ovs-vsctl add-port br-int vif1 -- \ >> + set Interface vif1 external-ids:iface-id=lsp1 >> + >> +check ovn-nbctl ls-add ls1 >> +sleep_controller hv1 >> +check ovn-nbctl --wait=sb lsp-add ls1 lsp1 -- \ >> + lsp-set-addresses lsp1 "f0:00:00:00:00:01 >> 10.0.0.1" >> + >> +sleep_sb >> +wake_up_controller hv1 >> + >> +# Wait for pflow for lsp1 >> +OVS_WAIT_UNTIL([ >> + ofport=$(as hv1 ovs-vsctl --bare --columns ofport find Interface >> name=vif1) >> + echo "vif1 port=$ofport" >> + test -n "$ofport" && test 1 -le $(as hv1 ovs-ofctl dump-flows br-int >> | grep -c in_port=$ofport) >> +]) >> + >> +# If ovn-installed in ovs, all flows should be installed. >> +# In that case, there should be at least one flow with lsp1 address. >> +OVS_WAIT_UNTIL([ >> + ovn_installed=$(as hv1 ovs-vsctl get Interface vif1 >> external_ids:ovn-installed) >> + echo $ovn_installed >> + flow_count=$(as hv1 ovs-ofctl dump-flows br-int | grep -Fc >> "10.0.0.1") >> + # for the monitor-all=true case the flow gets installed because >> ovn-controller is monitoring all >> + # tables in OVN_SOUTHBOUND. >> + if test -n "$ovn_installed"; then >> + test $flow_count -ge 1 >> + else >> + true >> + fi >> +]) >> + >> +wake_up_sb >> +# After the southbound db has woken up and can send the update to the >> +# ovn-controller not monitoring all tables in the southbound db it >> +# should be able to install the interface. >> +OVS_WAIT_UNTIL([ >> + ovn_installed=$(as hv1 ovs-vsctl get Interface vif1 >> external_ids:ovn-installed) >> + flow_count=$(as hv1 ovs-ofctl dump-flows br-int | grep -Fc >> "10.0.0.1") >> + echo "installed=$ovn_installed, count=$flow_count" >> + if test -n "$ovn_installed"; then >> + test $flow_count -ge 1 >> + else >> + false >> + fi >> +]) >> +wait_for_ports_up >> + >> +OVN_CLEANUP([hv1]) >> +AT_CLEANUP >> +]) >> diff --git a/tests/ovn-inc-proc-graph-dump.at b/tests/ >> ovn-inc-proc-graph-dump.at >> index 178310978..bd82f0015 100644 >> --- a/tests/ovn-inc-proc-graph-dump.at >> +++ b/tests/ovn-inc-proc-graph-dump.at >> @@ -478,6 +478,10 @@ digraph "Incremental-Processing-Engine" { >> SB_acl_id [[style=filled, shape=box, fillcolor=white, >> label="SB_acl_id"]]; >> acl_id [[style=filled, shape=box, fillcolor=white, >> label="acl_id"]]; >> SB_acl_id -> acl_id [[label=""]]; >> + sb_cond_seqno [[style=filled, shape=box, fillcolor=white, >> label="sb_cond_seqno"]]; >> + datapaths_updated [[style=filled, shape=box, fillcolor=white, >> label="datapaths_updated"]]; >> + sb_cond_seqno -> datapaths_updated >> [[label="datapaths_update_sb_cond_handler"]]; >> + runtime_data -> datapaths_updated >> [[label="datapaths_updated_runtime_data_handler"]]; >> controller_output [[style=filled, shape=box, fillcolor=white, >> label="controller_output"]]; >> dns_cache -> controller_output [[label=""]]; >> lflow_output -> controller_output >> [[label="controller_output_lflow_output_handler"]]; >> @@ -487,6 +491,7 @@ digraph "Incremental-Processing-Engine" { >> route_exchange -> controller_output >> [[label="controller_output_route_exchange_handler"]]; >> garp_rarp -> controller_output >> [[label="controller_output_garp_rarp_handler"]]; >> acl_id -> controller_output >> [[label="controller_output_acl_id_handler"]]; >> + datapaths_updated -> controller_output >> [[label="controlller_output_datapaths_updated_handler"]]; >> } >> ]) >> AT_CLEANUP >> -- >> 2.54.0 >> >> _______________________________________________ dev mailing list [email protected] https://mail.openvswitch.org/mailman/listinfo/ovs-dev
