This commit adds support for synchronizing load balancer health checks
across interconnected availability zones via OVN-IC. Key features include:
1. Added new Service_Monitor table to ICSB database
- Tracks service monitors across AZs with source/target zone info
- Supports status propagation between zones
2. Implemented synchronization logic in ovn-ic:
- Pushes local service monitors to remote AZs via ICSB
- Pulls remote monitors into local SBDB as IC-learned
- Handles status updates in both directions
- Maintains consistency during configuration changes
3. Added comprehensive test casesю
4. Extended OVSDB indexes for efficient lookups.
Signed-off-by: Alexandra Rukomoinikova <arukomoinikova@k2.cloud>
---
ic/ovn-ic.c | 503 ++++++++++++++++++++++++++++++++++++++++++++
ovn-ic-sb.ovsschema | 32 ++-
ovn-ic-sb.xml | 56 +++++
tests/ovn-ic.at | 380 +++++++++++++++++++++++++++++++++
4 files changed, 968 insertions(+), 3 deletions(-)
diff --git a/ic/ovn-ic.c b/ic/ovn-ic.c
index 0d36b0bf5..52c4c8435 100644
--- a/ic/ovn-ic.c
+++ b/ic/ovn-ic.c
@@ -70,6 +70,9 @@ struct ic_context {
struct ovsdb_idl_index *nbrec_port_by_name;
struct ovsdb_idl_index *sbrec_chassis_by_name;
struct ovsdb_idl_index *sbrec_port_binding_by_name;
+ struct ovsdb_idl_index *sbrec_service_monitor_by_local_type;
+ struct ovsdb_idl_index *sbrec_service_monitor_by_ic_learned;
+ struct ovsdb_idl_index *sbrec_service_monitor_by_local_type_logical_port;
struct ovsdb_idl_index *icnbrec_transit_switch_by_name;
struct ovsdb_idl_index *icsbrec_port_binding_by_az;
struct ovsdb_idl_index *icsbrec_port_binding_by_ts;
@@ -77,6 +80,9 @@ struct ic_context {
struct ovsdb_idl_index *icsbrec_route_by_az;
struct ovsdb_idl_index *icsbrec_route_by_ts;
struct ovsdb_idl_index *icsbrec_route_by_ts_az;
+ struct ovsdb_idl_index *icsbrec_service_monitor_by_source_az;
+ struct ovsdb_idl_index *icsbrec_service_monitor_by_target_az;
+ struct ovsdb_idl_index *icsbrec_service_monitor_by_target_az_logical_port;
};
struct ic_state {
@@ -95,6 +101,9 @@ static const char *ssl_private_key_file;
static const char *ssl_certificate_file;
static const char *ssl_ca_cert_file;
+static const struct sbrec_port_binding * find_sb_pb_by_name(
+ struct ovsdb_idl_index *sbrec_port_binding_by_name, const char *name);
+
static void
usage(void)
@@ -2182,6 +2191,428 @@ route_run(struct ic_context *ctx)
hmap_destroy(&ic_lrs);
}
+/*
+ * Data structures and functions related to
+ * synchronize health checks for load balancers
+ * between availability zones.
+ */
+struct sync_service_monitor_data {
+ /* Map of service monitors to be pushed to other AZs. */
+ struct hmap pushed_svcs_map;
+ /* Map of service monitors synced from other AZs to our. */
+ struct hmap synced_svcs_map;
+ /* Map of local service monitors in the ICSBDB. */
+ struct hmap local_ic_svcs_map;
+ /* Map of local service monitors in SBDB. */
+ struct hmap local_sb_svcs_map;
+ /* MAC address used for service monitor. */
+ char *prpg_svc_monitor_mac;
+};
+
+struct service_monitor_info {
+ struct hmap_node hmap_node;
+ union {
+ const struct sbrec_service_monitor *sb_rec;
+ const struct icsbrec_service_monitor *ic_rec;
+ } db_rec;
+ /* Destination availability zone name. */
+ char *dst_az_name;
+ /* Source availability zone name. */
+ char *src_az_name;
+ /* Chassis name associated with monitor logical port. */
+ char *chassis_name;
+};
+
+static void
+create_service_monitor_info(struct hmap *svc_map,
+ const void *db_rec,
+ const struct uuid *uuid,
+ const char *src_az_name,
+ const char *target_az_name,
+ const char *chassis_name,
+ bool ic_rec)
+{
+ struct service_monitor_info *svc_mon = xzalloc(sizeof(*svc_mon));
+ size_t hash = uuid_hash(uuid);
+
+ if (ic_rec) {
+ svc_mon->db_rec.ic_rec =
+ (const struct icsbrec_service_monitor *) db_rec;
+ } else {
+ svc_mon->db_rec.sb_rec =
+ (const struct sbrec_service_monitor *) db_rec;
+ }
+
+ svc_mon->dst_az_name = target_az_name ? xstrdup(target_az_name) : NULL;
+ svc_mon->chassis_name = chassis_name ? xstrdup(chassis_name) : NULL;
+ svc_mon->src_az_name = xstrdup(src_az_name);
+
+ hmap_insert(svc_map, &svc_mon->hmap_node, hash);
+}
+
+static void
+destroy_service_monitor_info(struct service_monitor_info *svc_mon)
+{
+ free(svc_mon->src_az_name);
+ free(svc_mon->dst_az_name);
+ free(svc_mon->chassis_name);
+ free(svc_mon);
+}
+
+static void
+refresh_sb_record_cache(struct hmap *svc_mon_map,
+ const struct sbrec_service_monitor *lookup_rec)
+{
+ size_t hash = uuid_hash(&lookup_rec->header_.uuid);
+ struct service_monitor_info *svc_mon;
+
+ HMAP_FOR_EACH_WITH_HASH (svc_mon, hmap_node, hash, svc_mon_map) {
+ ovs_assert(svc_mon->db_rec.sb_rec);
+ if (svc_mon->db_rec.sb_rec == lookup_rec) {
+ hmap_remove(svc_mon_map, &svc_mon->hmap_node);
+ destroy_service_monitor_info(svc_mon);
+ return;
+ }
+ }
+}
+
+static void
+refresh_ic_record_cache(struct hmap *svc_mon_map,
+ const struct icsbrec_service_monitor *lookup_rec)
+{
+ size_t hash = uuid_hash(&lookup_rec->header_.uuid);
+ struct service_monitor_info *svc_mon;
+
+ HMAP_FOR_EACH_WITH_HASH (svc_mon, hmap_node, hash, svc_mon_map) {
+ ovs_assert(svc_mon->db_rec.ic_rec);
+ if (svc_mon->db_rec.ic_rec == lookup_rec) {
+ hmap_remove(svc_mon_map, &svc_mon->hmap_node);
+ destroy_service_monitor_info(svc_mon);
+ return;
+ }
+ }
+}
+
+static void
+remove_unused_ic_records(struct hmap *local_ic_svcs_map)
+{
+ struct service_monitor_info *svc_mon;
+ HMAP_FOR_EACH_SAFE (svc_mon, hmap_node, local_ic_svcs_map) {
+ icsbrec_service_monitor_delete(svc_mon->db_rec.ic_rec);
+ destroy_service_monitor_info(svc_mon);
+ }
+
+ hmap_destroy(local_ic_svcs_map);
+}
+
+static void
+remove_unused_sb_records(struct hmap *local_sb_svcs_map)
+{
+ struct service_monitor_info *svc_mon;
+ HMAP_FOR_EACH_SAFE (svc_mon, hmap_node, local_sb_svcs_map) {
+ sbrec_service_monitor_delete(svc_mon->db_rec.sb_rec);
+ destroy_service_monitor_info(svc_mon);
+ }
+
+ hmap_destroy(local_sb_svcs_map);
+}
+
+static void
+create_pushed_svcs_mon(struct ic_context *ctx,
+ struct hmap *pushed_svcs_map)
+{
+ struct sbrec_service_monitor *key =
+ sbrec_service_monitor_index_init_row(
+ ctx->sbrec_service_monitor_by_local_type);
+
+ sbrec_service_monitor_index_set_local(key, false);
+
+ const struct sbrec_service_monitor *sb_rec;
+ SBREC_SERVICE_MONITOR_FOR_EACH_EQUAL (sb_rec, key,
+ ctx->sbrec_service_monitor_by_local_type) {
+ const char *target_az_name = smap_get_def(&sb_rec->options,
+ "az-name", "");
+ if (!target_az_name) {
+ continue;
+ }
+ create_service_monitor_info(pushed_svcs_map, sb_rec,
+ &sb_rec->header_.uuid,
+ ctx->runned_az->name, target_az_name,
+ NULL, false);
+ }
+
+ sbrec_service_monitor_index_destroy_row(key);
+}
+
+static void
+create_synced_svcs_mon(struct ic_context *ctx,
+ struct hmap *synced_svcs_map)
+{
+ struct icsbrec_service_monitor *key =
+ icsbrec_service_monitor_index_init_row(
+ ctx->icsbrec_service_monitor_by_target_az);
+
+ icsbrec_service_monitor_index_set_target_availability_zone(
+ key, ctx->runned_az->name);
+
+ const struct icsbrec_service_monitor *ic_rec;
+ ICSBREC_SERVICE_MONITOR_FOR_EACH_EQUAL (ic_rec, key,
+ ctx->icsbrec_service_monitor_by_target_az) {
+
+ const struct sbrec_port_binding *pb =
+ find_sb_pb_by_name(ctx->sbrec_port_binding_by_name,
+ ic_rec->logical_port);
+
+ if (!pb || !pb->up) {
+ continue;
+ }
+
+ const char *chassis_name = pb->chassis ? pb->chassis->name : NULL;
+ create_service_monitor_info(synced_svcs_map, ic_rec,
+ &ic_rec->header_.uuid,
+ ctx->runned_az->name,
+ NULL, chassis_name, true);
+ }
+
+ icsbrec_service_monitor_index_destroy_row(key);
+}
+
+static void
+create_local_ic_svcs_map(struct ic_context *ctx,
+ struct hmap *owned_svc_map)
+{
+ struct icsbrec_service_monitor *key =
+ icsbrec_service_monitor_index_init_row(
+ ctx->icsbrec_service_monitor_by_source_az);
+
+ icsbrec_service_monitor_index_set_source_availability_zone(
+ key, ctx->runned_az->name);
+
+ const struct icsbrec_service_monitor *ic_rec;
+ ICSBREC_SERVICE_MONITOR_FOR_EACH_EQUAL (ic_rec, key,
+ ctx->icsbrec_service_monitor_by_source_az) {
+ create_service_monitor_info(owned_svc_map, ic_rec,
+ &ic_rec->header_.uuid,
+ ctx->runned_az->name, NULL,
+ NULL, true);
+ }
+
+ icsbrec_service_monitor_index_destroy_row(key);
+}
+
+static void
+create_local_sb_svcs_map(struct ic_context *ctx,
+ struct hmap *owned_svc_map)
+{
+ struct sbrec_service_monitor *key =
+ sbrec_service_monitor_index_init_row(
+ ctx->sbrec_service_monitor_by_ic_learned);
+
+ sbrec_service_monitor_index_set_ic_learned(
+ key, true);
+
+ const struct sbrec_service_monitor *sb_rec;
+ SBREC_SERVICE_MONITOR_FOR_EACH_EQUAL (sb_rec, key,
+ ctx->sbrec_service_monitor_by_ic_learned) {
+ create_service_monitor_info(owned_svc_map, sb_rec,
+ &sb_rec->header_.uuid,
+ ctx->runned_az->name, NULL,
+ NULL, false);
+ }
+
+ sbrec_service_monitor_index_destroy_row(key);
+}
+
+static const struct sbrec_service_monitor *
+lookup_sb_svc_rec(struct ic_context *ctx,
+ const struct service_monitor_info *svc_mon)
+{
+ const struct icsbrec_service_monitor *db_rec =
+ svc_mon->db_rec.ic_rec;
+ struct sbrec_service_monitor *key =
+ sbrec_service_monitor_index_init_row(
+ ctx->sbrec_service_monitor_by_local_type_logical_port);
+
+ sbrec_service_monitor_index_set_local(key, true);
+ sbrec_service_monitor_index_set_logical_port(key, db_rec->logical_port);
+
+ const struct sbrec_service_monitor *sb_rec;
+ SBREC_SERVICE_MONITOR_FOR_EACH_EQUAL (sb_rec, key,
+ ctx->sbrec_service_monitor_by_local_type_logical_port) {
+ if (db_rec->port == sb_rec->port &&
+ !strcmp(db_rec->ip, sb_rec->ip) &&
+ !strcmp(db_rec->src_ip, sb_rec->src_ip) &&
+ !strcmp(db_rec->protocol, sb_rec->protocol)) {
+ sbrec_service_monitor_index_destroy_row(key);
+ return sb_rec;
+ }
+ }
+
+ sbrec_service_monitor_index_destroy_row(key);
+
+ return NULL;
+}
+
+static const struct icsbrec_service_monitor *
+lookup_icsb_svc_rec(struct ic_context *ctx,
+ const struct service_monitor_info *svc_mon)
+{
+ const struct sbrec_service_monitor *db_rec =
+ svc_mon->db_rec.sb_rec;
+ struct icsbrec_service_monitor *key =
+ icsbrec_service_monitor_index_init_row(
+ ctx->icsbrec_service_monitor_by_target_az_logical_port);
+
+ ovs_assert(svc_mon->dst_az_name);
+ icsbrec_service_monitor_index_set_target_availability_zone(
+ key, svc_mon->dst_az_name);
+
+ icsbrec_service_monitor_index_set_logical_port(
+ key, db_rec->logical_port);
+
+ const struct icsbrec_service_monitor *ic_rec;
+ ICSBREC_SERVICE_MONITOR_FOR_EACH_EQUAL (ic_rec, key,
+ ctx->icsbrec_service_monitor_by_target_az_logical_port) {
+ if (db_rec->port == ic_rec->port &&
+ !strcmp(db_rec->ip, ic_rec->ip) &&
+ !strcmp(db_rec->src_ip, ic_rec->src_ip) &&
+ !strcmp(db_rec->protocol, ic_rec->protocol) &&
+ !strcmp(db_rec->logical_port, ic_rec->logical_port)) {
+ icsbrec_service_monitor_index_destroy_row(key);
+ return ic_rec;
+ }
+ }
+
+ icsbrec_service_monitor_index_destroy_row(key);
+
+ return NULL;
+}
+
+static void
+create_service_monitor_data(struct ic_context *ctx,
+ struct sync_service_monitor_data *sync_data)
+{
+ const struct sbrec_sb_global *ic_sb = sbrec_sb_global_first(
+ ctx->ovnsb_idl);
+ const char *svc_monitor_mac = smap_get_def(&ic_sb->options,
+ "svc_monitor_mac", "");
+
+ if (svc_monitor_mac) {
+ sync_data->prpg_svc_monitor_mac = xstrdup(svc_monitor_mac);
+ }
+
+ create_pushed_svcs_mon(ctx, &sync_data->pushed_svcs_map);
+ create_synced_svcs_mon(ctx, &sync_data->synced_svcs_map);
+ create_local_ic_svcs_map(ctx, &sync_data->local_ic_svcs_map);
+ create_local_sb_svcs_map(ctx, &sync_data->local_sb_svcs_map);
+}
+
+static void
+destroy_service_monitor_data(struct sync_service_monitor_data *sync_data)
+{
+ struct service_monitor_info *svc_mon;
+ HMAP_FOR_EACH_SAFE (svc_mon, hmap_node, &sync_data->pushed_svcs_map) {
+ destroy_service_monitor_info(svc_mon);
+ }
+
+ HMAP_FOR_EACH_SAFE (svc_mon, hmap_node, &sync_data->synced_svcs_map) {
+ destroy_service_monitor_info(svc_mon);
+ }
+
+ hmap_destroy(&sync_data->pushed_svcs_map);
+ hmap_destroy(&sync_data->synced_svcs_map);
+ free(sync_data->prpg_svc_monitor_mac);
+}
+
+static void
+sync_service_monitor(struct ic_context *ctx)
+{
+ if (!ctx->ovnisb_txn || !ctx->ovnsb_txn) {
+ return;
+ }
+
+ struct sync_service_monitor_data sync_data;
+ memset(&sync_data, 0, sizeof(sync_data));
+ hmap_init(&sync_data.pushed_svcs_map);
+ hmap_init(&sync_data.synced_svcs_map);
+ hmap_init(&sync_data.local_ic_svcs_map);
+ hmap_init(&sync_data.local_sb_svcs_map);
+
+ create_service_monitor_data(ctx, &sync_data);
+
+ struct service_monitor_info *svc_mon;
+ HMAP_FOR_EACH_SAFE (svc_mon, hmap_node, &sync_data.pushed_svcs_map) {
+ const struct sbrec_service_monitor *db_rec = svc_mon->db_rec.sb_rec;
+ const struct icsbrec_service_monitor *ic_rec =
+ lookup_icsb_svc_rec(ctx, svc_mon);
+
+ if (ic_rec) {
+ sbrec_service_monitor_set_status(db_rec, ic_rec->status);
+ } else {
+ ic_rec = icsbrec_service_monitor_insert(ctx->ovnisb_txn);
+ icsbrec_service_monitor_set_ip(ic_rec, db_rec->ip);
+ icsbrec_service_monitor_set_port(ic_rec, db_rec->port);
+ icsbrec_service_monitor_set_src_ip(ic_rec, db_rec->src_ip);
+ icsbrec_service_monitor_set_src_mac(ic_rec,
+ sync_data.prpg_svc_monitor_mac);
+ icsbrec_service_monitor_set_protocol(ic_rec, db_rec->protocol);
+ icsbrec_service_monitor_set_logical_port(ic_rec,
+ db_rec->logical_port);
+ icsbrec_service_monitor_set_target_availability_zone(ic_rec,
+ svc_mon->dst_az_name);
+ icsbrec_service_monitor_set_source_availability_zone(ic_rec,
+ svc_mon->src_az_name);
+ }
+
+ /* Always update options because they change from NB. */
+ icsbrec_service_monitor_set_options(ic_rec, &db_rec->options);
+ refresh_ic_record_cache(&sync_data.local_ic_svcs_map, ic_rec);
+ }
+
+ HMAP_FOR_EACH_SAFE (svc_mon, hmap_node, &sync_data.synced_svcs_map) {
+ const struct icsbrec_service_monitor *db_rec =
+ svc_mon->db_rec.ic_rec;
+ const struct sbrec_service_monitor *sb_rec =
+ lookup_sb_svc_rec(ctx, svc_mon);
+
+ if (sb_rec) {
+ icsbrec_service_monitor_set_status(svc_mon->db_rec.ic_rec,
+ sb_rec->status);
+ } else {
+ sb_rec = sbrec_service_monitor_insert(ctx->ovnsb_txn);
+ sbrec_service_monitor_set_ip(sb_rec, db_rec->ip);
+ sbrec_service_monitor_set_port(sb_rec, db_rec->port);
+ sbrec_service_monitor_set_src_ip(sb_rec, db_rec->src_ip);
+ /* Set svc_monitor_mac from local SBDB. */
+ sbrec_service_monitor_set_src_mac(sb_rec,
+ sync_data.prpg_svc_monitor_mac);
+ sbrec_service_monitor_set_protocol(sb_rec,
+ db_rec->protocol);
+ sbrec_service_monitor_set_logical_port(sb_rec,
+ db_rec->logical_port);
+ sbrec_service_monitor_set_local(sb_rec, true);
+ sbrec_service_monitor_set_ic_learned(sb_rec, true);
+ }
+
+ /* Always update options since they may change via
+ * NB configuration. Also update chassis_name if
+ * the port has been reassigned to a different chassis.
+ */
+ if (svc_mon->chassis_name) {
+ sbrec_service_monitor_set_chassis_name(sb_rec,
+ svc_mon->chassis_name);
+ }
+ sbrec_service_monitor_set_options(sb_rec, &db_rec->options);
+ refresh_sb_record_cache(&sync_data.local_sb_svcs_map, sb_rec);
+ }
+
+ /* Delete local created records that are no longer used. */
+ remove_unused_ic_records(&sync_data.local_ic_svcs_map);
+ remove_unused_sb_records(&sync_data.local_sb_svcs_map);
+
+ destroy_service_monitor_data(&sync_data);
+}
+
/*
* This function implements a sequence number protocol that can be used by
* the INB end user to verify that ISB is synced with all the changes that
@@ -2274,6 +2705,7 @@ ovn_db_run(struct ic_context *ctx)
ts_run(ctx);
port_binding_run(ctx);
route_run(ctx);
+ sync_service_monitor(ctx);
}
static void
@@ -2570,6 +3002,9 @@ main(int argc, char *argv[])
struct ovsdb_idl_loop ovnsb_idl_loop = OVSDB_IDL_LOOP_INITIALIZER(
ovsdb_idl_create(ovnsb_db, &sbrec_idl_class, false, true));
+ ovsdb_idl_add_table(ovnsb_idl_loop.idl, &sbrec_table_sb_global);
+ ovsdb_idl_add_column(ovnsb_idl_loop.idl, &sbrec_sb_global_col_options);
+
ovsdb_idl_add_table(ovnsb_idl_loop.idl, &sbrec_table_chassis);
ovsdb_idl_add_column(ovnsb_idl_loop.idl, &sbrec_chassis_col_encaps);
ovsdb_idl_add_column(ovnsb_idl_loop.idl, &sbrec_chassis_col_name);
@@ -2599,6 +3034,35 @@ main(int argc, char *argv[])
&sbrec_port_binding_col_external_ids);
ovsdb_idl_add_column(ovnsb_idl_loop.idl,
&sbrec_port_binding_col_chassis);
+ ovsdb_idl_add_column(ovnsb_idl_loop.idl,
+ &sbrec_port_binding_col_up);
+
+ ovsdb_idl_add_table(ovnsb_idl_loop.idl,
+ &sbrec_table_service_monitor);
+ ovsdb_idl_add_column(ovnsb_idl_loop.idl,
+ &sbrec_service_monitor_col_chassis_name);
+ ovsdb_idl_add_column(ovnsb_idl_loop.idl,
+ &sbrec_service_monitor_col_external_ids);
+ ovsdb_idl_add_column(ovnsb_idl_loop.idl,
+ &sbrec_service_monitor_col_ip);
+ ovsdb_idl_add_column(ovnsb_idl_loop.idl,
+ &sbrec_service_monitor_col_logical_port);
+ ovsdb_idl_add_column(ovnsb_idl_loop.idl,
+ &sbrec_service_monitor_col_port);
+ ovsdb_idl_add_column(ovnsb_idl_loop.idl,
+ &sbrec_service_monitor_col_protocol);
+ ovsdb_idl_add_column(ovnsb_idl_loop.idl,
+ &sbrec_service_monitor_col_src_ip);
+ ovsdb_idl_add_column(ovnsb_idl_loop.idl,
+ &sbrec_service_monitor_col_src_mac);
+ ovsdb_idl_add_column(ovnsb_idl_loop.idl,
+ &sbrec_service_monitor_col_local);
+ ovsdb_idl_add_column(ovnsb_idl_loop.idl,
+ &sbrec_service_monitor_col_ic_learned);
+ ovsdb_idl_add_column(ovnsb_idl_loop.idl,
+ &sbrec_service_monitor_col_status);
+ ovsdb_idl_add_column(ovnsb_idl_loop.idl,
+ &sbrec_service_monitor_col_options);
/* Create IDL indexes */
struct ovsdb_idl_index *nbrec_ls_by_name
@@ -2617,6 +3081,19 @@ main(int argc, char *argv[])
= ovsdb_idl_index_create1(ovnsb_idl_loop.idl,
&sbrec_chassis_col_name);
+ struct ovsdb_idl_index *sbrec_service_monitor_by_local_type
+ = ovsdb_idl_index_create1(ovnsb_idl_loop.idl,
+ &sbrec_service_monitor_col_local);
+
+ struct ovsdb_idl_index *sbrec_service_monitor_by_ic_learned
+ = ovsdb_idl_index_create1(ovnsb_idl_loop.idl,
+ &sbrec_service_monitor_col_ic_learned);
+
+ struct ovsdb_idl_index *sbrec_service_monitor_by_local_type_logical_port
+ = ovsdb_idl_index_create2(ovnsb_idl_loop.idl,
+ &sbrec_service_monitor_col_local,
+ &sbrec_service_monitor_col_logical_port);
+
struct ovsdb_idl_index *icnbrec_transit_switch_by_name
= ovsdb_idl_index_create1(ovninb_idl_loop.idl,
&icnbrec_transit_switch_col_name);
@@ -2647,6 +3124,19 @@ main(int argc, char *argv[])
&icsbrec_route_col_transit_switch,
&icsbrec_route_col_availability_zone);
+ struct ovsdb_idl_index *icsbrec_service_monitor_by_source_az
+ = ovsdb_idl_index_create1(ovnisb_idl_loop.idl,
+ &icsbrec_service_monitor_col_source_availability_zone);
+
+ struct ovsdb_idl_index *icsbrec_service_monitor_by_target_az
+ = ovsdb_idl_index_create1(ovnisb_idl_loop.idl,
+ &icsbrec_service_monitor_col_target_availability_zone);
+
+ struct ovsdb_idl_index *icsbrec_service_monitor_by_target_az_logical_port
+ = ovsdb_idl_index_create2(ovnisb_idl_loop.idl,
+ &icsbrec_service_monitor_col_target_availability_zone,
+ &icsbrec_service_monitor_col_logical_port);
+
unixctl_command_register("nb-connection-status", "", 0, 0,
ovn_conn_show, ovnnb_idl_loop.idl);
unixctl_command_register("sb-connection-status", "", 0, 0,
@@ -2698,6 +3188,12 @@ main(int argc, char *argv[])
.nbrec_port_by_name = nbrec_port_by_name,
.sbrec_port_binding_by_name = sbrec_port_binding_by_name,
.sbrec_chassis_by_name = sbrec_chassis_by_name,
+ .sbrec_service_monitor_by_local_type =
+ sbrec_service_monitor_by_local_type,
+ .sbrec_service_monitor_by_ic_learned =
+ sbrec_service_monitor_by_ic_learned,
+ .sbrec_service_monitor_by_local_type_logical_port =
+ sbrec_service_monitor_by_local_type_logical_port,
.icnbrec_transit_switch_by_name =
icnbrec_transit_switch_by_name,
.icsbrec_port_binding_by_az = icsbrec_port_binding_by_az,
@@ -2706,6 +3202,12 @@ main(int argc, char *argv[])
.icsbrec_route_by_az = icsbrec_route_by_az,
.icsbrec_route_by_ts = icsbrec_route_by_ts,
.icsbrec_route_by_ts_az = icsbrec_route_by_ts_az,
+ .icsbrec_service_monitor_by_source_az =
+ icsbrec_service_monitor_by_source_az,
+ .icsbrec_service_monitor_by_target_az =
+ icsbrec_service_monitor_by_target_az,
+ .icsbrec_service_monitor_by_target_az_logical_port =
+ icsbrec_service_monitor_by_target_az_logical_port,
};
if (!state.had_lock && ovsdb_idl_has_lock(ovnsb_idl_loop.idl)) {
@@ -2731,6 +3233,7 @@ main(int argc, char *argv[])
ovn_db_run(&ctx);
update_sequence_numbers(&ctx, &ovnisb_idl_loop);
}
+
}
int rc1 = ovsdb_idl_loop_commit_and_wait(&ovnnb_idl_loop);
diff --git a/ovn-ic-sb.ovsschema b/ovn-ic-sb.ovsschema
index 22f75b06c..34b5457bb 100644
--- a/ovn-ic-sb.ovsschema
+++ b/ovn-ic-sb.ovsschema
@@ -1,7 +1,7 @@
{
"name": "OVN_IC_Southbound",
- "version": "2.1.0",
- "cksum": "1466425967 7126",
+ "version": "2.2.0",
+ "cksum": "2294868959 8438",
"tables": {
"IC_SB_Global": {
"columns": {
@@ -145,6 +145,32 @@
"value": "string",
"min": 0,
"max": "unlimited"}}},
- "maxRows": 1}
+ "maxRows": 1},
+ "Service_Monitor": {
+ "columns": {
+ "ip": {"type": "string"},
+ "protocol": {
+ "type": {"key": {"type": "string",
+ "enum": ["set", ["tcp", "udp"]]},
+ "min": 0, "max": 1}},
+ "port": {"type": {"key": {"type": "integer",
+ "minInteger": 0,
+ "maxInteger": 65535}}},
+ "logical_port": {"type": "string"},
+ "src_ip": {"type": "string"},
+ "src_mac": {"type": "string"},
+ "status": {
+ "type": {"key": {"type": "string",
+ "enum": ["set", ["online", "offline", "error"]]},
+ "min": 0, "max": 1}},
+ "target_availability_zone": {"type": "string"},
+ "source_availability_zone": {"type": "string"},
+ "options": {
+ "type": {"key": "string", "value": "string",
+ "min": 0, "max": "unlimited"}},
+ "external_ids": {
+ "type": {"key": "string", "value": "string",
+ "min": 0, "max": "unlimited"}}},
+ "isRoot": true}
}
}
diff --git a/ovn-ic-sb.xml b/ovn-ic-sb.xml
index 8f98c7ebb..35dc1f509 100644
--- a/ovn-ic-sb.xml
+++ b/ovn-ic-sb.xml
@@ -691,4 +691,60 @@
</group>
</table>
+ <table name="Service_Monitor">
+ <column name="ip">
+ IP of the service to be monitored. Copy from SBDB record.
+ </column>
+
+ <column name="protocol">
+ The protocol of the service.
+ Copy from source Southbound Database record.
+ </column>
+
+ <column name="port">
+ The TCP or UDP port of the service.
+ Copy from source Southbound Database record.
+ </column>
+
+ <column name="logical_port">
+ The VIF of the logical port on which the service is running. The
+ <code>ovn-controller</code> that binds this <code>logical_port</code>
+ monitors the service by sending periodic monitor packets.
+ Copy from source Southbound Database record.
+ </column>
+
+ <column name="src_ip">
+ Source IPv4 or IPv6 address to use in the service monitor packet.
+ Copy from source Southbound Database record.
+ </column>
+
+ <column name="src_mac">
+ Source Ethernet address to use in the service monitor packet.
+ Copy from Southbound Database record.
+ </column>
+
+ <column name="status">
+ The health status of the service monitor, synchronized from target
+ Southbound Database.
+ </column>
+
+ <column name="target_availability_zone">
+ The Availability Zone where the monitored service endpoint is located.
+ </column>
+
+ <column name="source_availability_zone">
+ Availability Zone that initiated this monitor entry in ICSB and retains
+ ownership for lifecycle management.
+ </column>
+
+ <column name="options">
+ Same as in Table Service_Monitor in SBDB.
+ Copy from source SBDB record.
+ </column>
+
+ <column name="external_ids">
+ Same as in Table Service_Monitor in SBDB.
+ Copy from source SBDB record.
+ </column>
+ </table>
</database>
diff --git a/tests/ovn-ic.at b/tests/ovn-ic.at
index 49a409015..817efd090 100644
--- a/tests/ovn-ic.at
+++ b/tests/ovn-ic.at
@@ -3643,3 +3643,383 @@ OVS_WAIT_FOR_OUTPUT([ovn_as az1 ovn-nbctl lr-route-list
lr11 | grep 2001 |
OVN_CLEANUP_IC([az1], [az2])
AT_CLEANUP
])
+
+AT_BANNER([OVN Interconnection Service Monitor synchronization])
+OVN_FOR_EACH_NORTHD([
+AT_SETUP([ovn-ic -- Service Monitor synchronization])
+
+ovn_init_ic_db
+ovn_start az1
+ovn_start az2
+ovn_start az3
+
+#
┌─────────────────────────────────────────────────────────────────────────────┐
+# │ Service Monitor Sync Check (Cross-AZ LB Setup)
│
+#
├───────────────┬───────────────┬────────────────────────────────────────────-┤
+# │ AZ1 │ AZ2 │ AZ3 (Backends only)
│
+#
├───────────────┼───────────────┼────────────────────────────────────────────-┤
+# │ ┌─────────┐ │ ┌─────────┐ │ ┌─────────┐ ┌─────────┐ ┌─────────┐
│
+# │ │ LB1 │ │ │ LB2 │ │ │ lport5 │ │ lport6 │ │ lport7 │
│
+# │ ├─────────┤ │ ├─────────┤ │ │ (AZ3) │ │ (AZ3) │ │ (AZ3) │
│
+# │ │ - lport1├──┼──┤ - lport3│ │ └─────────┘ └─────────┘ └─────────┘
│
+# │ │ (AZ1) │ │ │ (AZ2) │ │
│
+# │ │ - lport3│ │ │ - lport1├──┼───────────────────┐
│
+# │ │ (AZ2) │ │ │ (AZ3) │ │ │
│
+# │ │ - lport5│ │ └─────────┘ │ │
│
+# │ │ (AZ3) │ │ │ │
│
+# │ └─────────┘ │ ┌─────────┐ │ │
│
+# │ │ │ LB3 │ │ │
│
+# │ │ ├─────────┤ │ │
│
+# │ │ │ - lport5├──┼───────────────────┘
│
+# │ │ │ (AZ3) │ │
│
+# │ │ │ - lport6│ │
│
+# │ │ │ (AZ3) │ │
│
+# │ │ │ - lport2├──┘
│
+# │ │ │ (AZ1) │
│
+# │ │ └─────────┘
│
+#
└───────────────┴───────────────┴────────────────────────────────────────────-┘
+#
+# Physical Backend Locations:
+# - AZ1: lport1, lport2
+# - AZ2: lport3, lport4
+# - AZ3: lport5, lport6 , lport7
+#
+# Load Balancer Configurations:
+# - LB1 (AZ1): lport1(AZ1), lport3(AZ2), lport5(AZ3), lport4(az2)
+# - LB2 (AZ2): lport3(AZ2), lport1(AZ3)
+# - LB3 (AZ2): lport5(AZ3), lport6(AZ3), lport2(AZ1)
+
+
+# AZ1 Configuration
+ovn_as az1
+check ovn-nbctl ls-add az1_ls1
+check ovn-nbctl lsp-add az1_ls1 lport1_az1
+check ovn-nbctl lsp-add az1_ls1 lport2_az1
+
+check ovn-nbctl lb-add az1_lb1 10.10.10.1:80
1.1.1.1:10880,5.5.5.1:10880,3.3.3.1:10880,4.4.4.1:10880
+check ovn-nbctl ls-lb-add az1_ls1 az1_lb1
+AT_CHECK([ovn-nbctl --wait=sb \
+ -- --id=@hc create Load_Balancer_Health_Check vip="10.10.10.1\:80" \
+ options:failure_count=100 \
+ -- add Load_Balancer . health_check @hc | uuidfilt], [0], [<0>
+])
+
+check ovn-nbctl set load_balancer az1_lb1
ip_port_mappings:1.1.1.1=lport1_az1:1.1.1.9
+check ovn-nbctl set load_balancer az1_lb1
ip_port_mappings:3.3.3.1=lport3_az2:3.3.3.9:remote:az2
+check ovn-nbctl set load_balancer az1_lb1
ip_port_mappings:5.5.5.1=lport5_az3:5.5.5.9:remote:az3
+check ovn-nbctl set load_balancer az1_lb1
ip_port_mappings:4.4.4.1=lport4_az2:4.4.4.9:remote:az2
+
+check_row_count sb:Service_Monitor 4
+check_row_count ic-sb:Service_Monitor 3
+
+AT_CHECK([ovn-sbctl lflow-list az1_ls1 | grep arp | grep -c "1.1.1.9"], [0],
[dnl
+1
+])
+
+AT_CHECK([ovn-sbctl lflow-list az1_ls1 | grep arp | grep -c "3.3.3.9"], [1],
[dnl
+0
+])
+
+# AZ2 Configuration
+ovn_as az2
+check ovn-nbctl ls-add az2_ls1
+check ovn-nbctl lsp-add az2_ls1 lport3_az2
+check ovn-nbctl lsp-add az2_ls1 lport4_az2
+
+check ovn-nbctl lb-add az2_lb1 20.20.20.1:80 3.3.3.1:10880,1.1.1.1:10880
+check ovn-nbctl ls-lb-add az2_ls1 az2_lb1
+-nbctl ls-lb-add az2_ls1 az2_lb1
+AT_CHECK([ovn-nbctl --wait=sb \
+ -- --id=@hc create Load_Balancer_Health_Check vip="20.20.20.1\:80" \
+ options:failure_count=100 \
+ -- add Load_Balancer . health_check @hc | uuidfilt], [0], [<0>
+])
+
+check ovn-nbctl set load_balancer az2_lb1
ip_port_mappings:3.3.3.1=lport3_az2:3.3.3.9
+check ovn-nbctl set load_balancer az2_lb1
ip_port_mappings:1.1.1.1=lport1_az1:1.1.1.9:remote:az1
+
+check_row_count sb:Service_Monitor 3
+check_row_count ic-sb:Service_Monitor 4
+
+ovn_as az1
+check_row_count sb:Service_Monitor 4
+check_column false sb:Service_Monitor ic_learned logical_port=lport1_az1
+check_column true sb:Service_Monitor local logical_port=lport1_az1
+
+ovn_as az2
+check_column "3.3.3.1" sb:Service_Monitor ip logical_port=lport3_az2
+check_column 10880 sb:Service_Monitor port logical_port=lport3_az2
+check_column tcp sb:Service_Monitor protocol logical_port=lport3_az2
+check_column "3.3.3.9" sb:Service_Monitor src_ip logical_port=lport3_az2
+check_column true sb:Service_Monitor ic_learned logical_port=lport3_az2
+check_column true sb:Service_Monitor local logical_port=lport3_az2
+
+check_column false sb:Service_Monitor ic_learned logical_port=lport1_az1
+check_column false sb:Service_Monitor local logical_port=lport1_az1
+
+check_column true sb:Service_Monitor ic_learned logical_port=lport4_az2
+check_column true sb:Service_Monitor local logical_port=lport4_az2
+
+# Check ICSB records
+check_column "4.4.4.1" ic-sb:Service_Monitor ip logical_port=lport4_az2
+check_column 10880 ic-sb:Service_Monitor port logical_port=lport4_az2
+check_column tcp ic-sb:Service_Monitor protocol logical_port=lport4_az2
+check_column "4.4.4.9" ic-sb:Service_Monitor src_ip logical_port=lport4_az2
+check_column az2 ic-sb:Service_Monitor target_availability_zone
logical_port=lport4_az2
+check_column az1 ic-sb:Service_Monitor source_availability_zone
logical_port=lport4_az2
+
+check_column az2 ic-sb:Service_Monitor target_availability_zone
logical_port=lport3_az2
+check_column az1 ic-sb:Service_Monitor source_availability_zone
logical_port=lport3_az2
+
+check_column az1 ic-sb:Service_Monitor target_availability_zone
logical_port=lport1_az1
+check_column az2 ic-sb:Service_Monitor source_availability_zone
logical_port=lport1_az1
+
+check_column az3 ic-sb:Service_Monitor target_availability_zone
logical_port=lport5_az3
+check_column az1 ic-sb:Service_Monitor source_availability_zone
logical_port=lport5_az3
+
+# AZ3 Configuration
+ovn_as az3
+check ovn-nbctl ls-add az3_ls1
+
+# Check there no Service_Monitor when we have no ports.
+check_row_count sb:Service_Monitor 0
+check_row_count nb:Load_Balancer 0
+
+check ovn-nbctl lsp-add az3_ls1 lport5_az3
+check ovn-nbctl lsp-add az3_ls1 lport6_az3
+check ovn-nbctl lsp-add az3_ls1 lport7_az3
+
+# Check there is one Service Monitor learned from az1.
+check_row_count sb:Service_Monitor 1
+
+check_column "5.5.5.1" sb:Service_Monitor ip logical_port=lport5_az3
+check_column 10880 sb:Service_Monitor port logical_port=lport5_az3
+check_column tcp sb:Service_Monitor protocol logical_port=lport5_az3
+check_column "5.5.5.9" sb:Service_Monitor src_ip logical_port=lport5_az3
+check_column true sb:Service_Monitor ic_learned logical_port=lport5_az3
+check_column true sb:Service_Monitor local logical_port=lport5_az3
+
+AT_CHECK([ovn-sbctl lflow-list az3_ls1 | grep arp | grep -c "5.5.5.9"], [0],
[dnl
+1
+])
+
+# Add more lb in AZ2, change port for one backend, check we will have one more
+# records for this Service_Monitor
+ovn_as az2
+check ovn-nbctl lb-add az2_lb2 20.20.20.2:80
2.2.2.1:10880,5.5.5.1:10881,6.6.6.1:10880
+check ovn-nbctl ls-lb-add az2_ls1 az2_lb2
+-nbctl ls-lb-add az2_ls1 az2_lb2
+AT_CHECK([ovn-nbctl --wait=sb \
+ -- --id=@hc create Load_Balancer_Health_Check vip="20.20.20.2\:80" \
+ options:failure_count=100 \
+ -- add Load_Balancer az2_lb2 health_check @hc | uuidfilt], [0], [<0>
+])
+
+check ovn-nbctl set load_balancer az2_lb2
ip_port_mappings:2.2.2.1=lport2_az1:2.2.2.9:remote:az1
+check ovn-nbctl set load_balancer az2_lb2
ip_port_mappings:5.5.5.1=lport5_az3:5.5.5.9:remote:az3
+check ovn-nbctl set load_balancer az2_lb2
ip_port_mappings:6.6.6.1=lport6_az3:6.6.6.9:remote:az3
+
+check_row_count sb:Service_Monitor 6
+
+AT_CHECK([ovn-sbctl lflow-list az2_ls1 | grep arp | grep -c "6.6.6.9"], [1],
[dnl
+0
+])
+
+AT_CHECK([ovn-sbctl lflow-list az2_ls1 | grep arp | grep -c "5.5.5.9"], [1],
[dnl
+0
+])
+
+AT_CHECK([ovn-sbctl lflow-list az2_ls1 | grep arp | grep -c "2.2.2.9"], [1],
[dnl
+0
+])
+
+AT_CHECK([ovn-sbctl lflow-list az2_ls1 | grep arp | grep -c "3.3.3.9"], [0],
[dnl
+1
+])
+
+ovn_as az1
+
+check_row_count sb:Service_Monitor 5
+
+check_column true sb:Service_Monitor ic_learned logical_port=lport2_az1
+check_column true sb:Service_Monitor local logical_port=lport2_az1
+
+ovn_as az3
+
+check_row_count sb:Service_Monitor 3
+
+AT_CHECK([ovn-sbctl lflow-list az3_ls1 | grep arp | wc -l], [0], [dnl
+3
+])
+
+AT_CHECK([ovn-sbctl lflow-list az3_ls1 | grep arp | grep -c "6.6.6.9"], [0],
[dnl
+1
+])
+
+AT_CHECK([ovn-sbctl lflow-list az3_ls1 | grep arp | grep -c "5.5.5.9"], [0],
[dnl
+1
+])
+
+# Check status propogation.
+
+ovn_as az1
+AT_CHECK([ovn-sbctl lflow-list az1_ls1 | grep ls_in_lb | grep backends |
ovn_strip_lflows], [0], [dnl
+ table=??(ls_in_lb ), priority=120 , match=(ct.new && ip4.dst == 10.10.10.1
&& reg1[[16..23]] == 6 && reg1[[0..15]] == 80), action=(reg4 = 10.10.10.1;
reg2[[0..15]] = 80; ct_lb_mark(backends=1.1.1.1:10880,5.5.5.1:10880,3.3.3.1:10880,4.4.4.1:10880);)
+])
+
+ovn_as az2
+AT_CHECK([ovn-sbctl lflow-list az2_ls1 | grep ls_in_lb | grep backends |
ovn_strip_lflows], [0], [dnl
+ table=??(ls_in_lb ), priority=120 , match=(ct.new && ip4.dst == 20.20.20.1
&& reg1[[16..23]] == 6 && reg1[[0..15]] == 80), action=(reg4 = 20.20.20.1;
reg2[[0..15]] = 80; ct_lb_mark(backends=3.3.3.1:10880,1.1.1.1:10880);)
+ table=??(ls_in_lb ), priority=120 , match=(ct.new && ip4.dst == 20.20.20.2
&& reg1[[16..23]] == 6 && reg1[[0..15]] == 80), action=(reg4 = 20.20.20.2;
reg2[[0..15]] = 80; ct_lb_mark(backends=2.2.2.1:10880,5.5.5.1:10881,6.6.6.1:10880);)
+])
+
+ovn_as az1
+svc_lport1_uuid=$(ovn-sbctl -d bare --no-headings --columns _uuid find
Service_Monitor logical_port=lport1_az1)
+
+check ovn-sbctl set Service_Monitor $svc_lport1_uuid status="offline"
+
+check_column offline ic-sb:Service_Monitor status logical_port=lport1_az1
+
+ovn_as az1
+AT_CHECK([ovn-sbctl lflow-list az1_ls1 | grep ls_in_lb | grep backends |
ovn_strip_lflows], [0], [dnl
+ table=??(ls_in_lb ), priority=120 , match=(ct.new && ip4.dst == 10.10.10.1
&& reg1[[16..23]] == 6 && reg1[[0..15]] == 80), action=(reg4 = 10.10.10.1;
reg2[[0..15]] = 80; ct_lb_mark(backends=5.5.5.1:10880,3.3.3.1:10880,4.4.4.1:10880);)
+])
+
+ovn_as az2
+AT_CHECK([ovn-sbctl lflow-list az2_ls1 | grep ls_in_lb | grep backends |
ovn_strip_lflows], [0], [dnl
+ table=??(ls_in_lb ), priority=120 , match=(ct.new && ip4.dst == 20.20.20.1
&& reg1[[16..23]] == 6 && reg1[[0..15]] == 80), action=(reg4 = 20.20.20.1;
reg2[[0..15]] = 80; ct_lb_mark(backends=3.3.3.1:10880);)
+ table=??(ls_in_lb ), priority=120 , match=(ct.new && ip4.dst == 20.20.20.2
&& reg1[[16..23]] == 6 && reg1[[0..15]] == 80), action=(reg4 = 20.20.20.2;
reg2[[0..15]] = 80; ct_lb_mark(backends=2.2.2.1:10880,5.5.5.1:10881,6.6.6.1:10880);)
+])
+
+ovn_as az3
+svc_lport6_uuid=$(ovn-sbctl -d bare --no-headings --columns _uuid find
Service_Monitor logical_port=lport6_az3)
+
+check ovn-sbctl set Service_Monitor $svc_lport6_uuid status="offline"
+
+check_column offline ic-sb:Service_Monitor status logical_port=lport6_az3
+
+ovn_as az2
+check_column offline sb:Service_Monitor status logical_port=lport6_az3
+
+AT_CHECK([ovn-sbctl lflow-list az2_ls1 | grep ls_in_lb | grep backends |
ovn_strip_lflows], [0], [dnl
+ table=??(ls_in_lb ), priority=120 , match=(ct.new && ip4.dst == 20.20.20.1
&& reg1[[16..23]] == 6 && reg1[[0..15]] == 80), action=(reg4 = 20.20.20.1;
reg2[[0..15]] = 80; ct_lb_mark(backends=3.3.3.1:10880);)
+ table=??(ls_in_lb ), priority=120 , match=(ct.new && ip4.dst == 20.20.20.2
&& reg1[[16..23]] == 6 && reg1[[0..15]] == 80), action=(reg4 = 20.20.20.2;
reg2[[0..15]] = 80; ct_lb_mark(backends=2.2.2.1:10880,5.5.5.1:10881);)
+])
+
+ovn_as az1
+svc_lport2_uuid=$(ovn-sbctl -d bare --no-headings --columns _uuid find
Service_Monitor logical_port=lport2_az1)
+
+check ovn-sbctl set Service_Monitor $svc_lport2_uuid status="offline"
+
+check_column offline ic-sb:Service_Monitor status logical_port=lport2_az1
+
+ovn_as az2
+AT_CHECK([ovn-sbctl lflow-list az2_ls1 | grep ls_in_lb | grep backends |
ovn_strip_lflows], [0], [dnl
+ table=??(ls_in_lb ), priority=120 , match=(ct.new && ip4.dst == 20.20.20.1
&& reg1[[16..23]] == 6 && reg1[[0..15]] == 80), action=(reg4 = 20.20.20.1;
reg2[[0..15]] = 80; ct_lb_mark(backends=3.3.3.1:10880);)
+ table=??(ls_in_lb ), priority=120 , match=(ct.new && ip4.dst == 20.20.20.2
&& reg1[[16..23]] == 6 && reg1[[0..15]] == 80), action=(reg4 = 20.20.20.2;
reg2[[0..15]] = 80; ct_lb_mark(backends=5.5.5.1:10881);)
+])
+
+svc_lport4_uuid=$(ovn-sbctl -d bare --no-headings --columns _uuid find
Service_Monitor logical_port=lport4_az2)
+
+check ovn-sbctl set Service_Monitor $svc_lport4_uuid status="offline"
+
+check_column offline ic-sb:Service_Monitor status logical_port=lport4_az2
+
+ovn_as az1
+AT_CHECK([ovn-sbctl lflow-list az1_ls1 | grep ls_in_lb | grep backends |
ovn_strip_lflows], [0], [dnl
+ table=??(ls_in_lb ), priority=120 , match=(ct.new && ip4.dst == 10.10.10.1
&& reg1[[16..23]] == 6 && reg1[[0..15]] == 80), action=(reg4 = 10.10.10.1;
reg2[[0..15]] = 80; ct_lb_mark(backends=5.5.5.1:10880,3.3.3.1:10880);)
+])
+
+ovn_as az2
+check_row_count sb:Service_Monitor 1 logical_port=lport4_az2
+
+ovn_as az1
+az1_lb1_uuid=$(ovn-nbctl --bare --no-headings --columns=_uuid find
Load_Balancer name=az1_lb1)
+
+check_row_count sb:Service_Monitor 5
+
+check_row_count sb:Service_Monitor 1 logical_port=lport4_az2
+
+check ovn-nbctl set Load_Balancer $az1_lb1_uuid
vip='"10.10.10.1:80"="1.1.1.1:10880,5.5.5.1:10880,3.3.3.1:10880,2.2.2.1:10880"'
+check ovn-nbctl set load_balancer az1_lb1
ip_port_mappings:2.2.2.1=lport2_az1:2.2.2.9
+
+check_row_count sb:Service_Monitor 4
+
+check_row_count sb:Service_Monitor 0 logical_port=lport4_az2
+
+check_row_count ic-sb:Service_Monitor 0 logical_port=lport4_az2
+
+ovn_as az3
+svc_lport6_uuid=$(ovn-sbctl -d bare --no-headings --columns _uuid find
Service_Monitor logical_port=lport6_az3)
+
+check ovn-sbctl set Service_Monitor $svc_lport6_uuid status="online"
+
+check_column online ic-sb:Service_Monitor status logical_port=lport6_az3
+
+ovn_as az2
+check_column online sb:Service_Monitor status logical_port=lport6_az3
+
+ovn_as az2
+AT_CHECK([ovn-sbctl lflow-list az2_ls1 | grep ls_in_lb | grep backends |
ovn_strip_lflows], [0], [dnl
+ table=??(ls_in_lb ), priority=120 , match=(ct.new && ip4.dst == 20.20.20.1
&& reg1[[16..23]] == 6 && reg1[[0..15]] == 80), action=(reg4 = 20.20.20.1;
reg2[[0..15]] = 80; ct_lb_mark(backends=3.3.3.1:10880);)
+ table=??(ls_in_lb ), priority=120 , match=(ct.new && ip4.dst == 20.20.20.2
&& reg1[[16..23]] == 6 && reg1[[0..15]] == 80), action=(reg4 = 20.20.20.2;
reg2[[0..15]] = 80; ct_lb_mark(backends=5.5.5.1:10881,6.6.6.1:10880);)
+])
+
+ovn_as az1
+svc_lport1_uuid=$(ovn-sbctl -d bare --no-headings --columns _uuid find
Service_Monitor logical_port=lport1_az1)
+lport1_uuid=$(ovn-sbctl -d bare --no-headings --columns _uuid find
Port_Binding logical_port=lport1_az1)
+
+check ovn-sbctl set Port_Binding $lport1_uuid up=true
+
+check ovn-sbctl set Service_Monitor $svc_lport1_uuid status="online"
+
+check_column online ic-sb:Service_Monitor status logical_port=lport1_az1
+
+ovn_as az2
+AT_CHECK([ovn-sbctl lflow-list az2_ls1 | grep ls_in_lb | grep backends |
ovn_strip_lflows], [0], [dnl
+ table=??(ls_in_lb ), priority=120 , match=(ct.new && ip4.dst == 20.20.20.1
&& reg1[[16..23]] == 6 && reg1[[0..15]] == 80), action=(reg4 = 20.20.20.1;
reg2[[0..15]] = 80; ct_lb_mark(backends=3.3.3.1:10880,1.1.1.1:10880);)
+ table=??(ls_in_lb ), priority=120 , match=(ct.new && ip4.dst == 20.20.20.2
&& reg1[[16..23]] == 6 && reg1[[0..15]] == 80), action=(reg4 = 20.20.20.2;
reg2[[0..15]] = 80; ct_lb_mark(backends=5.5.5.1:10881,6.6.6.1:10880);)
+])
+
+ovn_as az1
+AT_CHECK([ovn-sbctl lflow-list az1_ls1 | grep ls_in_lb | grep backends |
ovn_strip_lflows], [0], [dnl
+ table=??(ls_in_lb ), priority=120 , match=(ct.new && ip4.dst == 10.10.10.1
&& reg1[[16..23]] == 6 && reg1[[0..15]] == 80), action=(reg4 = 10.10.10.1;
reg2[[0..15]] = 80; ct_lb_mark(backends=1.1.1.1:10880,5.5.5.1:10880,3.3.3.1:10880);)
+])
+
+check ovn-nbctl lb-del az1_lb1
+
+/* check ic_learned not deleted */
+
+check_row_count sb:Service_Monitor 2
+check_row_count sb:Service_Monitor 1 logical_port=lport1_az1
+check_row_count sb:Service_Monitor 1 logical_port=lport2_az1
+
+ovn_as az2
+
+check_row_count sb:Service_Monitor 0 logical_port=lport4_az2
+
+ovn-nbctl list load_balancer az2_lb2
+
+check ovn-nbctl lb-del az2_lb2
+check_row_count sb:Service_Monitor 2
+
+ovn_as az1
+check_row_count sb:Service_Monitor 1
+
+ovn_as az3
+check_row_count sb:Service_Monitor 0
+
+ovn_as az1
+check_row_count ic-sb:Service_Monitor 1
+
+ovn_as az2
+check ovn-nbctl lb-del az2_lb1
+check_row_count sb:Service_Monitor 0
+
+ovn_as az1
+check_row_count sb:Service_Monitor 0
+check_row_count ic-sb:Service_Monitor 0
+
+OVN_CLEANUP_IC([az1], [az2], [az3])
+AT_CLEANUP
+])