This commit adds support for synchronizing load balancer health checks across interconnected availability zones via OVN-IC. Key features include:
1. Added new Service_Monitor table to ICSB database - Tracks service monitors across AZs with source/target zone info - Supports status propagation between zones 2. Implemented synchronization logic in ovn-ic: - Pushes local service monitors to remote AZs via ICSB - Pulls remote monitors into local SBDB as IC-learned - Handles status updates in both directions - Maintains consistency during configuration changes 3. Added comprehensive test casesю 4. Extended OVSDB indexes for efficient lookups. Signed-off-by: Alexandra Rukomoinikova <arukomoinikova@k2.cloud> --- v1 --> v2: 1) added test for parsing ip_port_mapping for ipv4 2) corrected comments regarding tests --- ic/ovn-ic.c | 503 ++++++++++++++++++++++++++++++++++++++++++++ ovn-ic-sb.ovsschema | 32 ++- ovn-ic-sb.xml | 56 +++++ tests/ovn-ic.at | 404 +++++++++++++++++++++++++++++++++++ tests/ovn-northd.at | 105 +++++---- 5 files changed, 1058 insertions(+), 42 deletions(-) diff --git a/ic/ovn-ic.c b/ic/ovn-ic.c index ac245a8ed..cfdbd4b40 100644 --- a/ic/ovn-ic.c +++ b/ic/ovn-ic.c @@ -70,6 +70,9 @@ struct ic_context { struct ovsdb_idl_index *nbrec_port_by_name; struct ovsdb_idl_index *sbrec_chassis_by_name; struct ovsdb_idl_index *sbrec_port_binding_by_name; + struct ovsdb_idl_index *sbrec_service_monitor_by_local_type; + struct ovsdb_idl_index *sbrec_service_monitor_by_ic_learned; + struct ovsdb_idl_index *sbrec_service_monitor_by_local_type_logical_port; struct ovsdb_idl_index *icnbrec_transit_switch_by_name; struct ovsdb_idl_index *icsbrec_port_binding_by_az; struct ovsdb_idl_index *icsbrec_port_binding_by_ts; @@ -77,6 +80,9 @@ struct ic_context { struct ovsdb_idl_index *icsbrec_route_by_az; struct ovsdb_idl_index *icsbrec_route_by_ts; struct ovsdb_idl_index *icsbrec_route_by_ts_az; + struct ovsdb_idl_index *icsbrec_service_monitor_by_source_az; + struct ovsdb_idl_index *icsbrec_service_monitor_by_target_az; + struct ovsdb_idl_index *icsbrec_service_monitor_by_target_az_logical_port; }; struct ic_state { @@ -95,6 +101,9 @@ static const char *ssl_private_key_file; static const char *ssl_certificate_file; static const char *ssl_ca_cert_file; +static const struct sbrec_port_binding * find_sb_pb_by_name( + struct ovsdb_idl_index *sbrec_port_binding_by_name, const char *name); + static void usage(void) @@ -2181,6 +2190,428 @@ route_run(struct ic_context *ctx) hmap_destroy(&ic_lrs); } +/* + * Data structures and functions related to + * synchronize health checks for load balancers + * between availability zones. + */ +struct sync_service_monitor_data { + /* Map of service monitors to be pushed to other AZs. */ + struct hmap pushed_svcs_map; + /* Map of service monitors synced from other AZs to our. */ + struct hmap synced_svcs_map; + /* Map of local service monitors in the ICSBDB. */ + struct hmap local_ic_svcs_map; + /* Map of local service monitors in SBDB. */ + struct hmap local_sb_svcs_map; + /* MAC address used for service monitor. */ + char *prpg_svc_monitor_mac; +}; + +struct service_monitor_info { + struct hmap_node hmap_node; + union { + const struct sbrec_service_monitor *sb_rec; + const struct icsbrec_service_monitor *ic_rec; + } db_rec; + /* Destination availability zone name. */ + char *dst_az_name; + /* Source availability zone name. */ + char *src_az_name; + /* Chassis name associated with monitor logical port. */ + char *chassis_name; +}; + +static void +create_service_monitor_info(struct hmap *svc_map, + const void *db_rec, + const struct uuid *uuid, + const char *src_az_name, + const char *target_az_name, + const char *chassis_name, + bool ic_rec) +{ + struct service_monitor_info *svc_mon = xzalloc(sizeof(*svc_mon)); + size_t hash = uuid_hash(uuid); + + if (ic_rec) { + svc_mon->db_rec.ic_rec = + (const struct icsbrec_service_monitor *) db_rec; + } else { + svc_mon->db_rec.sb_rec = + (const struct sbrec_service_monitor *) db_rec; + } + + svc_mon->dst_az_name = target_az_name ? xstrdup(target_az_name) : NULL; + svc_mon->chassis_name = chassis_name ? xstrdup(chassis_name) : NULL; + svc_mon->src_az_name = xstrdup(src_az_name); + + hmap_insert(svc_map, &svc_mon->hmap_node, hash); +} + +static void +destroy_service_monitor_info(struct service_monitor_info *svc_mon) +{ + free(svc_mon->src_az_name); + free(svc_mon->dst_az_name); + free(svc_mon->chassis_name); + free(svc_mon); +} + +static void +refresh_sb_record_cache(struct hmap *svc_mon_map, + const struct sbrec_service_monitor *lookup_rec) +{ + size_t hash = uuid_hash(&lookup_rec->header_.uuid); + struct service_monitor_info *svc_mon; + + HMAP_FOR_EACH_WITH_HASH (svc_mon, hmap_node, hash, svc_mon_map) { + ovs_assert(svc_mon->db_rec.sb_rec); + if (svc_mon->db_rec.sb_rec == lookup_rec) { + hmap_remove(svc_mon_map, &svc_mon->hmap_node); + destroy_service_monitor_info(svc_mon); + return; + } + } +} + +static void +refresh_ic_record_cache(struct hmap *svc_mon_map, + const struct icsbrec_service_monitor *lookup_rec) +{ + size_t hash = uuid_hash(&lookup_rec->header_.uuid); + struct service_monitor_info *svc_mon; + + HMAP_FOR_EACH_WITH_HASH (svc_mon, hmap_node, hash, svc_mon_map) { + ovs_assert(svc_mon->db_rec.ic_rec); + if (svc_mon->db_rec.ic_rec == lookup_rec) { + hmap_remove(svc_mon_map, &svc_mon->hmap_node); + destroy_service_monitor_info(svc_mon); + return; + } + } +} + +static void +remove_unused_ic_records(struct hmap *local_ic_svcs_map) +{ + struct service_monitor_info *svc_mon; + HMAP_FOR_EACH_SAFE (svc_mon, hmap_node, local_ic_svcs_map) { + icsbrec_service_monitor_delete(svc_mon->db_rec.ic_rec); + destroy_service_monitor_info(svc_mon); + } + + hmap_destroy(local_ic_svcs_map); +} + +static void +remove_unused_sb_records(struct hmap *local_sb_svcs_map) +{ + struct service_monitor_info *svc_mon; + HMAP_FOR_EACH_SAFE (svc_mon, hmap_node, local_sb_svcs_map) { + sbrec_service_monitor_delete(svc_mon->db_rec.sb_rec); + destroy_service_monitor_info(svc_mon); + } + + hmap_destroy(local_sb_svcs_map); +} + +static void +create_pushed_svcs_mon(struct ic_context *ctx, + struct hmap *pushed_svcs_map) +{ + struct sbrec_service_monitor *key = + sbrec_service_monitor_index_init_row( + ctx->sbrec_service_monitor_by_local_type); + + sbrec_service_monitor_index_set_local(key, false); + + const struct sbrec_service_monitor *sb_rec; + SBREC_SERVICE_MONITOR_FOR_EACH_EQUAL (sb_rec, key, + ctx->sbrec_service_monitor_by_local_type) { + const char *target_az_name = smap_get_def(&sb_rec->options, + "az-name", ""); + if (!target_az_name) { + continue; + } + create_service_monitor_info(pushed_svcs_map, sb_rec, + &sb_rec->header_.uuid, + ctx->runned_az->name, target_az_name, + NULL, false); + } + + sbrec_service_monitor_index_destroy_row(key); +} + +static void +create_synced_svcs_mon(struct ic_context *ctx, + struct hmap *synced_svcs_map) +{ + struct icsbrec_service_monitor *key = + icsbrec_service_monitor_index_init_row( + ctx->icsbrec_service_monitor_by_target_az); + + icsbrec_service_monitor_index_set_target_availability_zone( + key, ctx->runned_az->name); + + const struct icsbrec_service_monitor *ic_rec; + ICSBREC_SERVICE_MONITOR_FOR_EACH_EQUAL (ic_rec, key, + ctx->icsbrec_service_monitor_by_target_az) { + + const struct sbrec_port_binding *pb = + find_sb_pb_by_name(ctx->sbrec_port_binding_by_name, + ic_rec->logical_port); + + if (!pb || !pb->up) { + continue; + } + + const char *chassis_name = pb->chassis ? pb->chassis->name : NULL; + create_service_monitor_info(synced_svcs_map, ic_rec, + &ic_rec->header_.uuid, + ctx->runned_az->name, + NULL, chassis_name, true); + } + + icsbrec_service_monitor_index_destroy_row(key); +} + +static void +create_local_ic_svcs_map(struct ic_context *ctx, + struct hmap *owned_svc_map) +{ + struct icsbrec_service_monitor *key = + icsbrec_service_monitor_index_init_row( + ctx->icsbrec_service_monitor_by_source_az); + + icsbrec_service_monitor_index_set_source_availability_zone( + key, ctx->runned_az->name); + + const struct icsbrec_service_monitor *ic_rec; + ICSBREC_SERVICE_MONITOR_FOR_EACH_EQUAL (ic_rec, key, + ctx->icsbrec_service_monitor_by_source_az) { + create_service_monitor_info(owned_svc_map, ic_rec, + &ic_rec->header_.uuid, + ctx->runned_az->name, NULL, + NULL, true); + } + + icsbrec_service_monitor_index_destroy_row(key); +} + +static void +create_local_sb_svcs_map(struct ic_context *ctx, + struct hmap *owned_svc_map) +{ + struct sbrec_service_monitor *key = + sbrec_service_monitor_index_init_row( + ctx->sbrec_service_monitor_by_ic_learned); + + sbrec_service_monitor_index_set_ic_learned( + key, true); + + const struct sbrec_service_monitor *sb_rec; + SBREC_SERVICE_MONITOR_FOR_EACH_EQUAL (sb_rec, key, + ctx->sbrec_service_monitor_by_ic_learned) { + create_service_monitor_info(owned_svc_map, sb_rec, + &sb_rec->header_.uuid, + ctx->runned_az->name, NULL, + NULL, false); + } + + sbrec_service_monitor_index_destroy_row(key); +} + +static const struct sbrec_service_monitor * +lookup_sb_svc_rec(struct ic_context *ctx, + const struct service_monitor_info *svc_mon) +{ + const struct icsbrec_service_monitor *db_rec = + svc_mon->db_rec.ic_rec; + struct sbrec_service_monitor *key = + sbrec_service_monitor_index_init_row( + ctx->sbrec_service_monitor_by_local_type_logical_port); + + sbrec_service_monitor_index_set_local(key, true); + sbrec_service_monitor_index_set_logical_port(key, db_rec->logical_port); + + const struct sbrec_service_monitor *sb_rec; + SBREC_SERVICE_MONITOR_FOR_EACH_EQUAL (sb_rec, key, + ctx->sbrec_service_monitor_by_local_type_logical_port) { + if (db_rec->port == sb_rec->port && + !strcmp(db_rec->ip, sb_rec->ip) && + !strcmp(db_rec->src_ip, sb_rec->src_ip) && + !strcmp(db_rec->protocol, sb_rec->protocol)) { + sbrec_service_monitor_index_destroy_row(key); + return sb_rec; + } + } + + sbrec_service_monitor_index_destroy_row(key); + + return NULL; +} + +static const struct icsbrec_service_monitor * +lookup_icsb_svc_rec(struct ic_context *ctx, + const struct service_monitor_info *svc_mon) +{ + const struct sbrec_service_monitor *db_rec = + svc_mon->db_rec.sb_rec; + struct icsbrec_service_monitor *key = + icsbrec_service_monitor_index_init_row( + ctx->icsbrec_service_monitor_by_target_az_logical_port); + + ovs_assert(svc_mon->dst_az_name); + icsbrec_service_monitor_index_set_target_availability_zone( + key, svc_mon->dst_az_name); + + icsbrec_service_monitor_index_set_logical_port( + key, db_rec->logical_port); + + const struct icsbrec_service_monitor *ic_rec; + ICSBREC_SERVICE_MONITOR_FOR_EACH_EQUAL (ic_rec, key, + ctx->icsbrec_service_monitor_by_target_az_logical_port) { + if (db_rec->port == ic_rec->port && + !strcmp(db_rec->ip, ic_rec->ip) && + !strcmp(db_rec->src_ip, ic_rec->src_ip) && + !strcmp(db_rec->protocol, ic_rec->protocol) && + !strcmp(db_rec->logical_port, ic_rec->logical_port)) { + icsbrec_service_monitor_index_destroy_row(key); + return ic_rec; + } + } + + icsbrec_service_monitor_index_destroy_row(key); + + return NULL; +} + +static void +create_service_monitor_data(struct ic_context *ctx, + struct sync_service_monitor_data *sync_data) +{ + const struct sbrec_sb_global *ic_sb = sbrec_sb_global_first( + ctx->ovnsb_idl); + const char *svc_monitor_mac = smap_get_def(&ic_sb->options, + "svc_monitor_mac", ""); + + if (svc_monitor_mac) { + sync_data->prpg_svc_monitor_mac = xstrdup(svc_monitor_mac); + } + + create_pushed_svcs_mon(ctx, &sync_data->pushed_svcs_map); + create_synced_svcs_mon(ctx, &sync_data->synced_svcs_map); + create_local_ic_svcs_map(ctx, &sync_data->local_ic_svcs_map); + create_local_sb_svcs_map(ctx, &sync_data->local_sb_svcs_map); +} + +static void +destroy_service_monitor_data(struct sync_service_monitor_data *sync_data) +{ + struct service_monitor_info *svc_mon; + HMAP_FOR_EACH_SAFE (svc_mon, hmap_node, &sync_data->pushed_svcs_map) { + destroy_service_monitor_info(svc_mon); + } + + HMAP_FOR_EACH_SAFE (svc_mon, hmap_node, &sync_data->synced_svcs_map) { + destroy_service_monitor_info(svc_mon); + } + + hmap_destroy(&sync_data->pushed_svcs_map); + hmap_destroy(&sync_data->synced_svcs_map); + free(sync_data->prpg_svc_monitor_mac); +} + +static void +sync_service_monitor(struct ic_context *ctx) +{ + if (!ctx->ovnisb_txn || !ctx->ovnsb_txn) { + return; + } + + struct sync_service_monitor_data sync_data; + memset(&sync_data, 0, sizeof(sync_data)); + hmap_init(&sync_data.pushed_svcs_map); + hmap_init(&sync_data.synced_svcs_map); + hmap_init(&sync_data.local_ic_svcs_map); + hmap_init(&sync_data.local_sb_svcs_map); + + create_service_monitor_data(ctx, &sync_data); + + struct service_monitor_info *svc_mon; + HMAP_FOR_EACH_SAFE (svc_mon, hmap_node, &sync_data.pushed_svcs_map) { + const struct sbrec_service_monitor *db_rec = svc_mon->db_rec.sb_rec; + const struct icsbrec_service_monitor *ic_rec = + lookup_icsb_svc_rec(ctx, svc_mon); + + if (ic_rec) { + sbrec_service_monitor_set_status(db_rec, ic_rec->status); + } else { + ic_rec = icsbrec_service_monitor_insert(ctx->ovnisb_txn); + icsbrec_service_monitor_set_ip(ic_rec, db_rec->ip); + icsbrec_service_monitor_set_port(ic_rec, db_rec->port); + icsbrec_service_monitor_set_src_ip(ic_rec, db_rec->src_ip); + icsbrec_service_monitor_set_src_mac(ic_rec, + sync_data.prpg_svc_monitor_mac); + icsbrec_service_monitor_set_protocol(ic_rec, db_rec->protocol); + icsbrec_service_monitor_set_logical_port(ic_rec, + db_rec->logical_port); + icsbrec_service_monitor_set_target_availability_zone(ic_rec, + svc_mon->dst_az_name); + icsbrec_service_monitor_set_source_availability_zone(ic_rec, + svc_mon->src_az_name); + } + + /* Always update options because they change from NB. */ + icsbrec_service_monitor_set_options(ic_rec, &db_rec->options); + refresh_ic_record_cache(&sync_data.local_ic_svcs_map, ic_rec); + } + + HMAP_FOR_EACH_SAFE (svc_mon, hmap_node, &sync_data.synced_svcs_map) { + const struct icsbrec_service_monitor *db_rec = + svc_mon->db_rec.ic_rec; + const struct sbrec_service_monitor *sb_rec = + lookup_sb_svc_rec(ctx, svc_mon); + + if (sb_rec) { + icsbrec_service_monitor_set_status(svc_mon->db_rec.ic_rec, + sb_rec->status); + } else { + sb_rec = sbrec_service_monitor_insert(ctx->ovnsb_txn); + sbrec_service_monitor_set_ip(sb_rec, db_rec->ip); + sbrec_service_monitor_set_port(sb_rec, db_rec->port); + sbrec_service_monitor_set_src_ip(sb_rec, db_rec->src_ip); + /* Set svc_monitor_mac from local SBDB. */ + sbrec_service_monitor_set_src_mac(sb_rec, + sync_data.prpg_svc_monitor_mac); + sbrec_service_monitor_set_protocol(sb_rec, + db_rec->protocol); + sbrec_service_monitor_set_logical_port(sb_rec, + db_rec->logical_port); + sbrec_service_monitor_set_local(sb_rec, true); + sbrec_service_monitor_set_ic_learned(sb_rec, true); + } + + /* Always update options since they may change via + * NB configuration. Also update chassis_name if + * the port has been reassigned to a different chassis. + */ + if (svc_mon->chassis_name) { + sbrec_service_monitor_set_chassis_name(sb_rec, + svc_mon->chassis_name); + } + sbrec_service_monitor_set_options(sb_rec, &db_rec->options); + refresh_sb_record_cache(&sync_data.local_sb_svcs_map, sb_rec); + } + + /* Delete local created records that are no longer used. */ + remove_unused_ic_records(&sync_data.local_ic_svcs_map); + remove_unused_sb_records(&sync_data.local_sb_svcs_map); + + destroy_service_monitor_data(&sync_data); +} + /* * This function implements a sequence number protocol that can be used by * the INB end user to verify that ISB is synced with all the changes that @@ -2273,6 +2704,7 @@ ovn_db_run(struct ic_context *ctx) ts_run(ctx); port_binding_run(ctx); route_run(ctx); + sync_service_monitor(ctx); } static void @@ -2569,6 +3001,9 @@ main(int argc, char *argv[]) struct ovsdb_idl_loop ovnsb_idl_loop = OVSDB_IDL_LOOP_INITIALIZER( ovsdb_idl_create(ovnsb_db, &sbrec_idl_class, false, true)); + ovsdb_idl_add_table(ovnsb_idl_loop.idl, &sbrec_table_sb_global); + ovsdb_idl_add_column(ovnsb_idl_loop.idl, &sbrec_sb_global_col_options); + ovsdb_idl_add_table(ovnsb_idl_loop.idl, &sbrec_table_chassis); ovsdb_idl_add_column(ovnsb_idl_loop.idl, &sbrec_chassis_col_encaps); ovsdb_idl_add_column(ovnsb_idl_loop.idl, &sbrec_chassis_col_name); @@ -2599,6 +3034,35 @@ main(int argc, char *argv[]) &sbrec_port_binding_col_external_ids); ovsdb_idl_add_column(ovnsb_idl_loop.idl, &sbrec_port_binding_col_chassis); + ovsdb_idl_add_column(ovnsb_idl_loop.idl, + &sbrec_port_binding_col_up); + + ovsdb_idl_add_table(ovnsb_idl_loop.idl, + &sbrec_table_service_monitor); + ovsdb_idl_add_column(ovnsb_idl_loop.idl, + &sbrec_service_monitor_col_chassis_name); + ovsdb_idl_add_column(ovnsb_idl_loop.idl, + &sbrec_service_monitor_col_external_ids); + ovsdb_idl_add_column(ovnsb_idl_loop.idl, + &sbrec_service_monitor_col_ip); + ovsdb_idl_add_column(ovnsb_idl_loop.idl, + &sbrec_service_monitor_col_logical_port); + ovsdb_idl_add_column(ovnsb_idl_loop.idl, + &sbrec_service_monitor_col_port); + ovsdb_idl_add_column(ovnsb_idl_loop.idl, + &sbrec_service_monitor_col_protocol); + ovsdb_idl_add_column(ovnsb_idl_loop.idl, + &sbrec_service_monitor_col_src_ip); + ovsdb_idl_add_column(ovnsb_idl_loop.idl, + &sbrec_service_monitor_col_src_mac); + ovsdb_idl_add_column(ovnsb_idl_loop.idl, + &sbrec_service_monitor_col_local); + ovsdb_idl_add_column(ovnsb_idl_loop.idl, + &sbrec_service_monitor_col_ic_learned); + ovsdb_idl_add_column(ovnsb_idl_loop.idl, + &sbrec_service_monitor_col_status); + ovsdb_idl_add_column(ovnsb_idl_loop.idl, + &sbrec_service_monitor_col_options); /* Create IDL indexes */ struct ovsdb_idl_index *nbrec_ls_by_name @@ -2617,6 +3081,19 @@ main(int argc, char *argv[]) = ovsdb_idl_index_create1(ovnsb_idl_loop.idl, &sbrec_chassis_col_name); + struct ovsdb_idl_index *sbrec_service_monitor_by_local_type + = ovsdb_idl_index_create1(ovnsb_idl_loop.idl, + &sbrec_service_monitor_col_local); + + struct ovsdb_idl_index *sbrec_service_monitor_by_ic_learned + = ovsdb_idl_index_create1(ovnsb_idl_loop.idl, + &sbrec_service_monitor_col_ic_learned); + + struct ovsdb_idl_index *sbrec_service_monitor_by_local_type_logical_port + = ovsdb_idl_index_create2(ovnsb_idl_loop.idl, + &sbrec_service_monitor_col_local, + &sbrec_service_monitor_col_logical_port); + struct ovsdb_idl_index *icnbrec_transit_switch_by_name = ovsdb_idl_index_create1(ovninb_idl_loop.idl, &icnbrec_transit_switch_col_name); @@ -2647,6 +3124,19 @@ main(int argc, char *argv[]) &icsbrec_route_col_transit_switch, &icsbrec_route_col_availability_zone); + struct ovsdb_idl_index *icsbrec_service_monitor_by_source_az + = ovsdb_idl_index_create1(ovnisb_idl_loop.idl, + &icsbrec_service_monitor_col_source_availability_zone); + + struct ovsdb_idl_index *icsbrec_service_monitor_by_target_az + = ovsdb_idl_index_create1(ovnisb_idl_loop.idl, + &icsbrec_service_monitor_col_target_availability_zone); + + struct ovsdb_idl_index *icsbrec_service_monitor_by_target_az_logical_port + = ovsdb_idl_index_create2(ovnisb_idl_loop.idl, + &icsbrec_service_monitor_col_target_availability_zone, + &icsbrec_service_monitor_col_logical_port); + unixctl_command_register("nb-connection-status", "", 0, 0, ovn_conn_show, ovnnb_idl_loop.idl); unixctl_command_register("sb-connection-status", "", 0, 0, @@ -2698,6 +3188,12 @@ main(int argc, char *argv[]) .nbrec_port_by_name = nbrec_port_by_name, .sbrec_port_binding_by_name = sbrec_port_binding_by_name, .sbrec_chassis_by_name = sbrec_chassis_by_name, + .sbrec_service_monitor_by_local_type = + sbrec_service_monitor_by_local_type, + .sbrec_service_monitor_by_ic_learned = + sbrec_service_monitor_by_ic_learned, + .sbrec_service_monitor_by_local_type_logical_port = + sbrec_service_monitor_by_local_type_logical_port, .icnbrec_transit_switch_by_name = icnbrec_transit_switch_by_name, .icsbrec_port_binding_by_az = icsbrec_port_binding_by_az, @@ -2706,6 +3202,12 @@ main(int argc, char *argv[]) .icsbrec_route_by_az = icsbrec_route_by_az, .icsbrec_route_by_ts = icsbrec_route_by_ts, .icsbrec_route_by_ts_az = icsbrec_route_by_ts_az, + .icsbrec_service_monitor_by_source_az = + icsbrec_service_monitor_by_source_az, + .icsbrec_service_monitor_by_target_az = + icsbrec_service_monitor_by_target_az, + .icsbrec_service_monitor_by_target_az_logical_port = + icsbrec_service_monitor_by_target_az_logical_port, }; if (!state.had_lock && ovsdb_idl_has_lock(ovnsb_idl_loop.idl)) { @@ -2731,6 +3233,7 @@ main(int argc, char *argv[]) ovn_db_run(&ctx); update_sequence_numbers(&ctx, &ovnisb_idl_loop); } + } int rc1 = ovsdb_idl_loop_commit_and_wait(&ovnnb_idl_loop); diff --git a/ovn-ic-sb.ovsschema b/ovn-ic-sb.ovsschema index 22f75b06c..34b5457bb 100644 --- a/ovn-ic-sb.ovsschema +++ b/ovn-ic-sb.ovsschema @@ -1,7 +1,7 @@ { "name": "OVN_IC_Southbound", - "version": "2.1.0", - "cksum": "1466425967 7126", + "version": "2.2.0", + "cksum": "2294868959 8438", "tables": { "IC_SB_Global": { "columns": { @@ -145,6 +145,32 @@ "value": "string", "min": 0, "max": "unlimited"}}}, - "maxRows": 1} + "maxRows": 1}, + "Service_Monitor": { + "columns": { + "ip": {"type": "string"}, + "protocol": { + "type": {"key": {"type": "string", + "enum": ["set", ["tcp", "udp"]]}, + "min": 0, "max": 1}}, + "port": {"type": {"key": {"type": "integer", + "minInteger": 0, + "maxInteger": 65535}}}, + "logical_port": {"type": "string"}, + "src_ip": {"type": "string"}, + "src_mac": {"type": "string"}, + "status": { + "type": {"key": {"type": "string", + "enum": ["set", ["online", "offline", "error"]]}, + "min": 0, "max": 1}}, + "target_availability_zone": {"type": "string"}, + "source_availability_zone": {"type": "string"}, + "options": { + "type": {"key": "string", "value": "string", + "min": 0, "max": "unlimited"}}, + "external_ids": { + "type": {"key": "string", "value": "string", + "min": 0, "max": "unlimited"}}}, + "isRoot": true} } } diff --git a/ovn-ic-sb.xml b/ovn-ic-sb.xml index 8f98c7ebb..35dc1f509 100644 --- a/ovn-ic-sb.xml +++ b/ovn-ic-sb.xml @@ -691,4 +691,60 @@ </group> </table> + <table name="Service_Monitor"> + <column name="ip"> + IP of the service to be monitored. Copy from SBDB record. + </column> + + <column name="protocol"> + The protocol of the service. + Copy from source Southbound Database record. + </column> + + <column name="port"> + The TCP or UDP port of the service. + Copy from source Southbound Database record. + </column> + + <column name="logical_port"> + The VIF of the logical port on which the service is running. The + <code>ovn-controller</code> that binds this <code>logical_port</code> + monitors the service by sending periodic monitor packets. + Copy from source Southbound Database record. + </column> + + <column name="src_ip"> + Source IPv4 or IPv6 address to use in the service monitor packet. + Copy from source Southbound Database record. + </column> + + <column name="src_mac"> + Source Ethernet address to use in the service monitor packet. + Copy from Southbound Database record. + </column> + + <column name="status"> + The health status of the service monitor, synchronized from target + Southbound Database. + </column> + + <column name="target_availability_zone"> + The Availability Zone where the monitored service endpoint is located. + </column> + + <column name="source_availability_zone"> + Availability Zone that initiated this monitor entry in ICSB and retains + ownership for lifecycle management. + </column> + + <column name="options"> + Same as in Table Service_Monitor in SBDB. + Copy from source SBDB record. + </column> + + <column name="external_ids"> + Same as in Table Service_Monitor in SBDB. + Copy from source SBDB record. + </column> + </table> </database> diff --git a/tests/ovn-ic.at b/tests/ovn-ic.at index 49a409015..74ea3bad2 100644 --- a/tests/ovn-ic.at +++ b/tests/ovn-ic.at @@ -3643,3 +3643,407 @@ OVS_WAIT_FOR_OUTPUT([ovn_as az1 ovn-nbctl lr-route-list lr11 | grep 2001 | OVN_CLEANUP_IC([az1], [az2]) AT_CLEANUP ]) + +AT_BANNER([OVN Interconnection Service Monitor synchronization]) +OVN_FOR_EACH_NORTHD([ +AT_SETUP([ovn-ic -- Service Monitor synchronization: mock_test]) + +ovn_init_ic_db +ovn_start az1 +ovn_start az2 +ovn_start az3 + +#┌─────────────────────────────────────────────────────────────────────────────┐ +#│ Service Monitor Sync Check (Cross-AZ LB Setup) │ +#├───────────────┬───────────────┬─────────────────────────────────────────────┤ +#│ AZ1 │ AZ2 │ AZ3 (Backends only) │ +#├───────────────┼───────────────┼─────────────────────────────────────────────┤ +#│ ┌─────────┐ │ ┌─────────┐ │ ┌─────────┐ ┌─────────┐ ┌─────────┐ │ +#│ │ LB1 │ │ │ LB2 │ │ │ lport5 │ │ lport6 │ │ lport7 │ │ +#│ ├─────────┤ │ ├─────────┤ │ │ (AZ3) │ │ (AZ3) │ │ (AZ3) │ │ +#│ │ - lport1├──┼──┤ - lport3│ │ └─────────┘ └─────────┘ └─────────┘ │ +#│ │ (AZ1) │ │ │ (AZ2) │ │ │ +#│ │ - lport3├──┼──┤ - lport5│ │ │ +#│ │ (AZ2) │ │ │ (AZ3) │ │ │ +#│ │ - lport5│ │ └─────────┘ │ │ +#│ │ (AZ3) │ │ │ │ +#│ │ - lport4│ │ ┌─────────┐ │ │ +#│ │ (AZ2) │ │ │ LB3 │ │ │ +#│ └─────────┘ │ ├─────────┤ │ │ +#│ │ │ - lport5├──┼───────────────────┐ │ +#│ │ │ (AZ3) │ │ │ │ +#│ │ │ - lport6│ │ │ │ +#│ │ │ (AZ3) │ │ │ │ +#│ │ │ - lport2├──┘ │ │ +#│ │ │ (AZ1) │ │ │ +#│ │ └─────────┘ │ │ +#│ │ │ │ +#│ │ │ │ +#│ └───────────────────────────────────┘ │ +#└───────────────┴───────────────┴─────────────────────────────────────────────┘ +# +# Physical Backend Locations: +# - AZ1: lport1, lport2 +# - AZ2: lport3, lport4 +# - AZ3: lport5, lport6 , lport7 +# +# Load Balancer Configurations: +# - LB1 (AZ1): lport1(AZ1), lport3(AZ2), lport5(AZ3), lport4(az2) +# - LB2 (AZ2): lport3(AZ2), lport1(AZ3) +# - LB3 (AZ2): lport5(AZ3), lport6(AZ3), lport2(AZ1) + + +# AZ1 Configuration +ovn_as az1 +# change the MAC address for more convenient search of logical flows +check ovn-nbctl set NB_Global . options:svc_monitor_mac="11:11:11:11:11:11" + +check ovn-nbctl ls-add az1_ls1 +check ovn-nbctl lsp-add az1_ls1 lport1_az1 +check ovn-nbctl lsp-add az1_ls1 lport2_az1 + +check ovn-nbctl lb-add az1_lb1 10.10.10.1:80 1.1.1.1:10880,5.5.5.1:10880,3.3.3.1:10880,4.4.4.1:10880 +check ovn-nbctl ls-lb-add az1_ls1 az1_lb1 +AT_CHECK([ovn-nbctl --wait=sb \ + -- --id=@hc create Load_Balancer_Health_Check vip="10.10.10.1\:80" \ + options:failure_count=100 \ + -- add Load_Balancer . health_check @hc | uuidfilt], [0], [<0> +]) + +# We will leave one backend without a service monitor since it is not expected to participate in load balancing. +check ovn-nbctl set load_balancer az1_lb1 ip_port_mappings:1.1.1.1=lport1_az1:1.1.1.9 +check ovn-nbctl set load_balancer az1_lb1 ip_port_mappings:3.3.3.1=lport3_az2:3.3.3.9:az2 +check ovn-nbctl set load_balancer az1_lb1 ip_port_mappings:5.5.5.1=lport5_az3:5.5.5.9:az3 + +# Сheck that all remote and local service monitors have been created. +check_row_count sb:Service_Monitor 3 + +check_column "3.3.3.1" sb:Service_Monitor ip logical_port=lport3_az2 +check_column 10880 sb:Service_Monitor port logical_port=lport3_az2 +check_column tcp sb:Service_Monitor protocol logical_port=lport3_az2 +check_column "3.3.3.9" sb:Service_Monitor src_ip logical_port=lport3_az2 +check_column false sb:Service_Monitor ic_learned logical_port=lport3_az2 +check_column false sb:Service_Monitor local logical_port=lport3_az2 + +check_column "1.1.1.1" sb:Service_Monitor ip logical_port=lport1_az1 +check_column 10880 sb:Service_Monitor port logical_port=lport1_az1 +check_column tcp sb:Service_Monitor protocol logical_port=lport1_az1 +check_column "1.1.1.9" sb:Service_Monitor src_ip logical_port=lport1_az1 +check_column false sb:Service_Monitor ic_learned logical_port=lport1_az1 +check_column true sb:Service_Monitor local logical_port=lport1_az1 + +check_column "5.5.5.1" sb:Service_Monitor ip logical_port=lport5_az3 +check_column 10880 sb:Service_Monitor port logical_port=lport5_az3 +check_column tcp sb:Service_Monitor protocol logical_port=lport5_az3 +check_column "5.5.5.9" sb:Service_Monitor src_ip logical_port=lport5_az3 +check_column false sb:Service_Monitor ic_learned logical_port=lport5_az3 +check_column false sb:Service_Monitor local logical_port=lport5_az3 + +check_row_count ic-sb:Service_Monitor 2 + +check_column "5.5.5.1" ic-sb:Service_Monitor ip logical_port=lport5_az3 +check_column 10880 ic-sb:Service_Monitor port logical_port=lport5_az3 +check_column tcp ic-sb:Service_Monitor protocol logical_port=lport5_az3 +check_column "5.5.5.9" ic-sb:Service_Monitor src_ip logical_port=lport5_az3 +check_column az3 ic-sb:Service_Monitor target_availability_zone logical_port=lport5_az3 +check_column az1 ic-sb:Service_Monitor source_availability_zone logical_port=lport5_az3 + +check_column az2 ic-sb:Service_Monitor target_availability_zone logical_port=lport3_az2 +check_column az1 ic-sb:Service_Monitor source_availability_zone logical_port=lport3_az2 + +# Check that logical flows are created only for local service monitors. +AT_CHECK([ovn-sbctl lflow-list az1_ls1 | grep 'ls_in_arp_rsp'| ovn_strip_lflows], [0], [dnl + table=??(ls_in_arp_rsp ), priority=0 , match=(1), action=(next;) + table=??(ls_in_arp_rsp ), priority=110 , match=(arp.tpa == 1.1.1.9 && arp.op == 1), action=(eth.dst = eth.src; eth.src = 11:11:11:11:11:11; arp.op = 2; /* ARP reply */ arp.tha = arp.sha; arp.sha = 11:11:11:11:11:11; arp.tpa = arp.spa; arp.spa = 1.1.1.9; outport = inport; flags.loopback = 1; output;) +]) + +# AZ2 Configuration +ovn_as az2 +# change the MAC address for more convenient search of logical flows +check ovn-nbctl set NB_Global . options:svc_monitor_mac="12:12:12:12:12:12" + +check ovn-nbctl ls-add az2_ls1 +check ovn-nbctl lsp-add az2_ls1 lport3_az2 +check ovn-nbctl lsp-add az2_ls1 lport4_az2 + +check ovn-nbctl lb-add az2_lb1 20.20.20.1:80 3.3.3.1:10880,1.1.1.1:10880 +check ovn-nbctl ls-lb-add az2_ls1 az2_lb1 +AT_CHECK([ovn-nbctl --wait=sb \ + -- --id=@hc create Load_Balancer_Health_Check vip="20.20.20.1\:80" \ + options:failure_count=100 \ + -- add Load_Balancer . health_check @hc | uuidfilt], [0], [<0> +]) + +# Create a cross-cutting backend with a balancer in the first availability zone to check that everything works correctly +check ovn-nbctl set load_balancer az2_lb1 ip_port_mappings:3.3.3.1=lport3_az2:3.3.3.9 +check ovn-nbctl set load_balancer az2_lb1 ip_port_mappings:1.1.1.1=lport1_az1:1.1.1.9:az1 + +check_row_count sb:Service_Monitor 2 +check_row_count ic-sb:Service_Monitor 3 + +ovn_as az1 +check_row_count sb:Service_Monitor 3 + +# Check that the local backend, which intersects with the one created in another availability zone, remains local +check_column false sb:Service_Monitor ic_learned logical_port=lport1_az1 +check_column true sb:Service_Monitor local logical_port=lport1_az1 + +ovn_as az2 +check_column false sb:Service_Monitor ic_learned logical_port=lport1_az1 +check_column false sb:Service_Monitor local logical_port=lport1_az1 + +# Сheck that all remote and local service monitors have been created. +check_column az2 ic-sb:Service_Monitor target_availability_zone logical_port=lport3_az2 +check_column az1 ic-sb:Service_Monitor source_availability_zone logical_port=lport3_az2 + +check_column az1 ic-sb:Service_Monitor target_availability_zone logical_port=lport1_az1 +check_column az2 ic-sb:Service_Monitor source_availability_zone logical_port=lport1_az1 + +check_column az3 ic-sb:Service_Monitor target_availability_zone logical_port=lport5_az3 +check_column az1 ic-sb:Service_Monitor source_availability_zone logical_port=lport5_az3 + +# AZ3 Configuration +ovn_as az3 +check ovn-nbctl set NB_Global . options:svc_monitor_mac="13:13:13:13:13:13" +check ovn-nbctl ls-add az3_ls1 + +# Check there no Service_Monitor when we have no ports. +check_row_count sb:Service_Monitor 0 +check_row_count nb:Load_Balancer 0 + +check ovn-nbctl lsp-add az3_ls1 lport5_az3 +check ovn-nbctl lsp-add az3_ls1 lport6_az3 +check ovn-nbctl lsp-add az3_ls1 lport7_az3 + +# Check there is one Service Monitor learned from az1. +check_row_count sb:Service_Monitor 1 + +check_column "5.5.5.1" sb:Service_Monitor ip logical_port=lport5_az3 +check_column 10880 sb:Service_Monitor port logical_port=lport5_az3 +check_column tcp sb:Service_Monitor protocol logical_port=lport5_az3 +check_column "5.5.5.9" sb:Service_Monitor src_ip logical_port=lport5_az3 +check_column true sb:Service_Monitor ic_learned logical_port=lport5_az3 +check_column true sb:Service_Monitor local logical_port=lport5_az3 + +# Check that logical flows are created for ic_learned service monitors when we have no load balancers. +AT_CHECK([ovn-sbctl lflow-list az3_ls1 | grep 'ls_in_arp_rsp' | ovn_strip_lflows], [0], [dnl + table=??(ls_in_arp_rsp ), priority=0 , match=(1), action=(next;) + table=??(ls_in_arp_rsp ), priority=110 , match=(arp.tpa == 5.5.5.9 && arp.op == 1), action=(eth.dst = eth.src; eth.src = 13:13:13:13:13:13; arp.op = 2; /* ARP reply */ arp.tha = arp.sha; arp.sha = 13:13:13:13:13:13; arp.tpa = arp.spa; arp.spa = 5.5.5.9; outport = inport; flags.loopback = 1; output;) +]) + +# Add more lb in AZ2, change port for one backend, check we will have one more +# records for this Service_Monitor +ovn_as az2 +check ovn-nbctl lb-add az2_lb2 20.20.20.2:80 2.2.2.1:10880,5.5.5.1:10880,6.6.6.1:10880 +check ovn-nbctl ls-lb-add az2_ls1 az2_lb2 +AT_CHECK([ovn-nbctl --wait=sb \ + -- --id=@hc create Load_Balancer_Health_Check vip="20.20.20.2\:80" \ + options:failure_count=100 \ + -- add Load_Balancer az2_lb2 health_check @hc | uuidfilt], [0], [<0> +]) + +# We will leave one backend without a service monitor since it is not expected to participate in load balancing. +check ovn-nbctl set load_balancer az2_lb2 ip_port_mappings:2.2.2.1=lport2_az1:2.2.2.9:az1 +check ovn-nbctl set load_balancer az2_lb2 ip_port_mappings:6.6.6.1=lport6_az3:6.6.6.9:az3 + +ovn_as az1 +check_row_count sb:Service_Monitor 4 +check_column true sb:Service_Monitor ic_learned logical_port=lport2_az1 +check_column true sb:Service_Monitor local logical_port=lport2_az1 + +ovn_as az2 +check_row_count sb:Service_Monitor 4 + +AT_CHECK([ovn-sbctl lflow-list az2_ls1 | grep 'ls_in_arp_rsp'| ovn_strip_lflows], [0], [dnl + table=??(ls_in_arp_rsp ), priority=0 , match=(1), action=(next;) + table=??(ls_in_arp_rsp ), priority=110 , match=(arp.tpa == 3.3.3.9 && arp.op == 1), action=(eth.dst = eth.src; eth.src = 12:12:12:12:12:12; arp.op = 2; /* ARP reply */ arp.tha = arp.sha; arp.sha = 12:12:12:12:12:12; arp.tpa = arp.spa; arp.spa = 3.3.3.9; outport = inport; flags.loopback = 1; output;) +]) + +ovn_as az3 +check_row_count sb:Service_Monitor 2 +AT_CHECK([ovn-sbctl lflow-list az3_ls1 | grep 'ls_in_arp_rsp' | ovn_strip_lflows], [0], [dnl + table=??(ls_in_arp_rsp ), priority=0 , match=(1), action=(next;) + table=??(ls_in_arp_rsp ), priority=110 , match=(arp.tpa == 5.5.5.9 && arp.op == 1), action=(eth.dst = eth.src; eth.src = 13:13:13:13:13:13; arp.op = 2; /* ARP reply */ arp.tha = arp.sha; arp.sha = 13:13:13:13:13:13; arp.tpa = arp.spa; arp.spa = 5.5.5.9; outport = inport; flags.loopback = 1; output;) + table=??(ls_in_arp_rsp ), priority=110 , match=(arp.tpa == 6.6.6.9 && arp.op == 1), action=(eth.dst = eth.src; eth.src = 13:13:13:13:13:13; arp.op = 2; /* ARP reply */ arp.tha = arp.sha; arp.sha = 13:13:13:13:13:13; arp.tpa = arp.spa; arp.spa = 6.6.6.9; outport = inport; flags.loopback = 1; output;) +]) + +# Check status propogation: since this is a mock test, the statuses will be changed +# manually, waiting for the desired behavior of the code +# az1: az1_lb1 10.10.10.1:80 1.1.1.1:10880,5.5.5.1:10880,3.3.3.1:10880,4.4.4.1:10880 +# (4.4.4.1 - does not have a service monitor) +# az2: az2_lb1 20.20.20.1:80 3.3.3.1:10880,1.1.1.1:10880 +# az2_lb2 20.20.20.2:80 2.2.2.1:10880,5.5.5.1:10880,6.6.6.1:10880 +# (5.5.5.1 - does not have a service monitor) + +# Check that if the service monitors are not initialized, balancing is performed on all but the 4th +ovn_as az1 +AT_CHECK([ovn-sbctl lflow-list az1_ls1 | grep ls_in_lb | grep backends | ovn_strip_lflows], [0], [dnl + table=??(ls_in_lb ), priority=120 , match=(ct.new && ip4.dst == 10.10.10.1 && reg1[[16..23]] == 6 && reg1[[0..15]] == 80), action=(reg4 = 10.10.10.1; reg2[[0..15]] = 80; ct_lb_mark(backends=1.1.1.1:10880,5.5.5.1:10880,3.3.3.1:10880);) +]) + +# Check that if the service monitors are not initialized, balancing is performed on all but the 5th +ovn_as az2 +AT_CHECK([ovn-sbctl lflow-list az2_ls1 | grep ls_in_lb | grep backends | ovn_strip_lflows], [0], [dnl + table=??(ls_in_lb ), priority=120 , match=(ct.new && ip4.dst == 20.20.20.1 && reg1[[16..23]] == 6 && reg1[[0..15]] == 80), action=(reg4 = 20.20.20.1; reg2[[0..15]] = 80; ct_lb_mark(backends=3.3.3.1:10880,1.1.1.1:10880);) + table=??(ls_in_lb ), priority=120 , match=(ct.new && ip4.dst == 20.20.20.2 && reg1[[16..23]] == 6 && reg1[[0..15]] == 80), action=(reg4 = 20.20.20.2; reg2[[0..15]] = 80; ct_lb_mark(backends=2.2.2.1:10880,6.6.6.1:10880);) +]) + +# Change statuses +ovn_as az1 +svc_lport1_uuid=$(ovn-sbctl -d bare --no-headings --columns _uuid find Service_Monitor logical_port=lport1_az1) +check ovn-sbctl set Service_Monitor $svc_lport1_uuid status="offline" +check_column offline ic-sb:Service_Monitor status logical_port=lport1_az1 + +# Since port1 is a backend in another availability zone, we check that the status has also been updated there +ovn_as az1 +AT_CHECK([ovn-sbctl lflow-list az1_ls1 | grep ls_in_lb | grep backends | ovn_strip_lflows], [0], [dnl + table=??(ls_in_lb ), priority=120 , match=(ct.new && ip4.dst == 10.10.10.1 && reg1[[16..23]] == 6 && reg1[[0..15]] == 80), action=(reg4 = 10.10.10.1; reg2[[0..15]] = 80; ct_lb_mark(backends=5.5.5.1:10880,3.3.3.1:10880);) +]) + +# Check the status in the interconet db. +check_column offline ic-sb:Service_Monitor status logical_port=lport1_az1 + +ovn_as az2 +check_column offline sb:Service_Monitor status logical_port=lport1_az1 +AT_CHECK([ovn-sbctl lflow-list az2_ls1 | grep ls_in_lb | grep backends | ovn_strip_lflows], [0], [dnl + table=??(ls_in_lb ), priority=120 , match=(ct.new && ip4.dst == 20.20.20.1 && reg1[[16..23]] == 6 && reg1[[0..15]] == 80), action=(reg4 = 20.20.20.1; reg2[[0..15]] = 80; ct_lb_mark(backends=3.3.3.1:10880);) + table=??(ls_in_lb ), priority=120 , match=(ct.new && ip4.dst == 20.20.20.2 && reg1[[16..23]] == 6 && reg1[[0..15]] == 80), action=(reg4 = 20.20.20.2; reg2[[0..15]] = 80; ct_lb_mark(backends=2.2.2.1:10880,6.6.6.1:10880);) +]) + +# Change all statuses to offline to check online propogation. +ovn_as az3 +svc_lport6_uuid=$(ovn-sbctl -d bare --no-headings --columns _uuid find Service_Monitor logical_port=lport6_az3) +check ovn-sbctl set Service_Monitor $svc_lport6_uuid status="offline" +check_column offline ic-sb:Service_Monitor status logical_port=lport6_az3 + +ovn_as az2 +check_column offline sb:Service_Monitor status logical_port=lport6_az3 + +ovn_as az2 +ovn-sbctl list service_m + +svc_lport3_uuid=$(ovn-sbctl -d bare --no-headings --columns _uuid find Service_Monitor logical_port=lport3_az2) +check ovn-sbctl set Service_Monitor $svc_lport3_uuid status="offline" +check_column offline sb:Service_Monitor status logical_port=lport3_az2 +check_column offline ic-sb:Service_Monitor status logical_port=lport3_az2 + +ovn_as az2 +check_column offline sb:Service_Monitor status logical_port=lport3_az2 + +ovn_as az1 +svc_lport2_uuid=$(ovn-sbctl -d bare --no-headings --columns _uuid find Service_Monitor logical_port=lport2_az1) +check ovn-sbctl set Service_Monitor $svc_lport2_uuid status="offline" +check_column offline sb:Service_Monitor status logical_port=lport2_az1 +check_column offline ic-sb:Service_Monitor status logical_port=lport2_az1 + +ovn_as az3 +svc_lport5_uuid=$(ovn-sbctl -d bare --no-headings --columns _uuid find Service_Monitor logical_port=lport5_az3) +check ovn-sbctl set Service_Monitor $svc_lport5_uuid status="offline" + +check_column offline sb:Service_Monitor status logical_port=lport5_az3 +check_column offline ic-sb:Service_Monitor status logical_port=lport5_az3 + +ovn_as az1 +check_column offline sb:Service_Monitor status logical_port=lport5_az3 + +ovn_as az1 +AT_CHECK([ovn-sbctl lflow-list az1_ls1 | grep ls_in_lb | grep backends | ovn_strip_lflows], [0], [dnl]) + +ovn_as az2 +AT_CHECK([ovn-sbctl lflow-list az2_ls1 | grep ls_in_lb | grep backends | ovn_strip_lflows], [0], [dnl]) + +ovn_as az1 +# Change one backend and check that it is correctly removed from the databases +az1_lb1_uuid=$(ovn-nbctl --bare --no-headings --columns=_uuid find Load_Balancer name=az1_lb1) + +check_row_count sb:Service_Monitor 1 logical_port=lport3_az2 +check ovn-nbctl set Load_Balancer $az1_lb1_uuid vip='"10.10.10.1:80"="1.1.1.1:10880,5.5.5.1:10880,2.2.2.1:10880"' +check ovn-nbctl set load_balancer az1_lb1 ip_port_mappings:2.2.2.1=lport2_az1:2.2.2.9 +check_row_count sb:Service_Monitor 0 logical_port=lport3_az2 + +# Check deletion from ICSB. +check_row_count ic-sb:Service_Monitor 0 logical_port=lport3_az2 + +ovn_as az3 +svc_lport6_uuid=$(ovn-sbctl -d bare --no-headings --columns _uuid find Service_Monitor logical_port=lport6_az3) +check ovn-sbctl set Service_Monitor $svc_lport6_uuid status="online" +check_column online ic-sb:Service_Monitor status logical_port=lport6_az3 + +ovn_as az2 +check_column online sb:Service_Monitor status logical_port=lport6_az3 +check_row_count sb:Service_Monitor 1 logical_port=lport3_az2 +check_column offline sb:Service_Monitor status logical_port=lport3_az2 + +# Need to check that the 3.3.3.1 backend retained its previous status +# and after deleting the record from the interconnect database - the old +# information remained in sbdb of the target availability zone. +AT_CHECK([ovn-sbctl lflow-list az2_ls1 | grep ls_in_lb | grep backends | ovn_strip_lflows], [0], [dnl + table=??(ls_in_lb ), priority=120 , match=(ct.new && ip4.dst == 20.20.20.2 && reg1[[16..23]] == 6 && reg1[[0..15]] == 80), action=(reg4 = 20.20.20.2; reg2[[0..15]] = 80; ct_lb_mark(backends=6.6.6.1:10880);) +]) + +ovn_as az1 +# set other statuses to online state. +svc_lport1_uuid=$(ovn-sbctl -d bare --no-headings --columns _uuid find Service_Monitor logical_port=lport1_az1) +lport1_uuid=$(ovn-sbctl -d bare --no-headings --columns _uuid find Port_Binding logical_port=lport1_az1) +check ovn-sbctl set Port_Binding $lport1_uuid up=true +check ovn-sbctl set Service_Monitor $svc_lport1_uuid status="online" +check_column online ic-sb:Service_Monitor status logical_port=lport1_az1 + +svc_lport2_uuid=$(ovn-sbctl -d bare --no-headings --columns _uuid find Service_Monitor logical_port=lport2_az1) +lport2_uuid=$(ovn-sbctl -d bare --no-headings --columns _uuid find Port_Binding logical_port=lport2_az1) +check ovn-sbctl set Port_Binding $lport2_uuid up=true +check ovn-sbctl set Service_Monitor $svc_lport2_uuid status="online" +check_column online ic-sb:Service_Monitor status logical_port=lport2_az1 + +ovn_as az1 +AT_CHECK([ovn-sbctl lflow-list az1_ls1 | grep ls_in_lb | grep backends | ovn_strip_lflows], [0], [dnl + table=??(ls_in_lb ), priority=120 , match=(ct.new && ip4.dst == 10.10.10.1 && reg1[[16..23]] == 6 && reg1[[0..15]] == 80), action=(reg4 = 10.10.10.1; reg2[[0..15]] = 80; ct_lb_mark(backends=1.1.1.1:10880,2.2.2.1:10880);) +]) + +ovn_as az2 +AT_CHECK([ovn-sbctl lflow-list az2_ls1 | grep ls_in_lb | grep backends | ovn_strip_lflows], [0], [dnl + table=??(ls_in_lb ), priority=120 , match=(ct.new && ip4.dst == 20.20.20.1 && reg1[[16..23]] == 6 && reg1[[0..15]] == 80), action=(reg4 = 20.20.20.1; reg2[[0..15]] = 80; ct_lb_mark(backends=1.1.1.1:10880);) + table=??(ls_in_lb ), priority=120 , match=(ct.new && ip4.dst == 20.20.20.2 && reg1[[16..23]] == 6 && reg1[[0..15]] == 80), action=(reg4 = 20.20.20.2; reg2[[0..15]] = 80; ct_lb_mark(backends=2.2.2.1:10880,6.6.6.1:10880);) +]) + +# Check deletion. +ovn_as az1 +check ovn-nbctl lb-del az1_lb1 +# az1_lb1 had : 1.1.1.1:10880,5.5.5.1:10880,2.2.2.1:10880,4.4.4.1:10880 backends +# (4.4.4.1 - no hc, 5.5.5.1 - no hc) +# az2: az2_lb1 20.20.20.1:80 3.3.3.1:10880,1.1.1.1:10880 +# az2_lb2 20.20.20.2:80 2.2.2.1:10880,5.5.5.1:10880,6.6.6.1:10880 + +# Despite the balancer being removed, records for lport1 and lport2 should no +# be removed as they participate in balancing in other availability zones. +check_row_count sb:Service_Monitor 2 +check_row_count sb:Service_Monitor 1 logical_port=lport1_az1 +check_row_count sb:Service_Monitor 1 logical_port=lport2_az1 +check_row_count ic-sb:Service_Monitor 3 + +ovn_as az2 +# Need to check that there is no service_monitor left in az3, +# in az1 there will be one service monitors for 1.1.1.1 (used by az2_lb1) +# and in az2 there will be two service monitor for 3.3.3.1 and 1.1.1.1 +check ovn-nbctl lb-del az2_lb2 + +ovn_as az1 +# Check that the ic_learned records have been deleted +check_row_count sb:Service_Monitor 1 +check_row_count ic-sb:Service_Monitor 1 + +ovn_as az2 +check_row_count sb:Service_Monitor 2 + +ovn_as az3 +check_row_count sb:Service_Monitor 0 + +ovn_as az2 +check ovn-nbctl lb-del az2_lb1 +check_row_count sb:Service_Monitor 0 +check_row_count ic-sb:Service_Monitor 0 + +OVN_CLEANUP_IC([az1], [az2], [az3]) +AT_CLEANUP +]) + diff --git a/tests/ovn-northd.at b/tests/ovn-northd.at index 39e674dff..9146c3752 100644 --- a/tests/ovn-northd.at +++ b/tests/ovn-northd.at @@ -17752,54 +17752,81 @@ AT_CLEANUP ]) OVN_FOR_EACH_NORTHD_NO_HV([ -AT_SETUP([Synced logical switch and router incremental procesesing]) +AT_SETUP([ip_port_mappings validation]) ovn_start -# datapath_synced_logical_switch and datapath_synced_logical_router -# should only recompute if datapath_sync has to recompute. Therefore, -# andy situation where datapath_sync can run incrementally, the -# synced datapath nodes should also run incrementally. +# ip_port_mappings syntax: ip:lport_name:src_ip:<az_name>(for remote lports) -check as northd ovn-appctl -t ovn-northd inc-engine/clear-stats -check ovn-nbctl --wait=sb ls-add sw0 -check_engine_stats datapath_synced_logical_switch norecompute compute -check_engine_stats datapath_synced_logical_router norecompute compute -CHECK_NO_CHANGE_AFTER_RECOMPUTE +check ovn-nbctl ls-add ls1 -check as northd ovn-appctl -t ovn-northd inc-engine/clear-stats -check ovn-nbctl --wait=sb set logical_switch sw0 other_config:fdb_age_threshold=5 -check_engine_stats datapath_synced_logical_switch norecompute compute -check_engine_stats datapath_synced_logical_router norecompute compute -CHECK_NO_CHANGE_AFTER_RECOMPUTE +check ovn-nbctl lb-add lb1_ipv4 1.1.1.1:80 192.168.0.1:10880,192.168.0.2:10880,192.168.0.3:10880 +AT_CHECK([ovn-nbctl --wait=sb \ + -- --id=@hc create Load_Balancer_Health_Check vip="1.1.1.1\:80" \ + options:failure_count=100 \ + -- add Load_Balancer lb1_ipv4 health_check @hc | uuidfilt], [0], [<0> +]) -check as northd ovn-appctl -t ovn-northd inc-engine/clear-stats -check ovn-nbctl --wait=sb set logical_switch sw0 other_config:requested-tnl-key=123 -check_engine_stats datapath_synced_logical_switch norecompute compute -check_engine_stats datapath_synced_logical_router norecompute compute -CHECK_NO_CHANGE_AFTER_RECOMPUTE +check ovn-nbctl ls-lb-add ls1 lb1_ipv4 +check ovn-nbctl lsp-add ls1 lport1 +check ovn-nbctl lsp-add ls1 sw0-p1 -check as northd ovn-appctl -t ovn-northd inc-engine/clear-stats -check ovn-nbctl --wait=sb ls-del sw0 -check_engine_stats datapath_synced_logical_switch norecompute compute -check_engine_stats datapath_synced_logical_router norecompute compute -CHECK_NO_CHANGE_AFTER_RECOMPUTE +check ovn-nbctl set load_balancer lb1_ipv4 ip_port_mappings:192.168.0.1=lport1:192.168.0.99 -check as northd ovn-appctl -t ovn-northd inc-engine/clear-stats -check ovn-nbctl --wait=sb lr-add lr0 -check_engine_stats datapath_synced_logical_switch norecompute compute -check_engine_stats datapath_synced_logical_router norecompute compute -CHECK_NO_CHANGE_AFTER_RECOMPUTE +# Check that the service monitor was created correctly. +check_row_count sb:Service_Monitor 1 +check_column "192.168.0.1" sb:Service_Monitor ip logical_port=lport1 +check_column 10880 sb:Service_Monitor port logical_port=lport1 +check_column tcp sb:Service_Monitor protocol logical_port=lport1 +check_column "192.168.0.99" sb:Service_Monitor src_ip logical_port=lport1 +check_column false sb:Service_Monitor ic_learned logical_port=lport1 +check_column true sb:Service_Monitor local logical_port=lport1 -check as northd ovn-appctl -t ovn-northd inc-engine/clear-stats -check ovn-nbctl --wait=sb set Logical_Router lr0 options:ct-zone-limit=10 -check_engine_stats datapath_synced_logical_switch norecompute compute -check_engine_stats datapath_synced_logical_router norecompute compute +# Empty src_ip. +check ovn-nbctl clear load_balancer lb1_ipv4 ip_port_mappings +check ovn-nbctl set load_balancer lb1_ipv4 ip_port_mappings:192.168.0.1=lport1: +OVS_WAIT_UNTIL([grep "Invalid svc mon src IP" northd/ovn-northd.log]) +check_row_count sb:Service_Monitor 0 +echo > northd/ovn-northd.log -check as northd ovn-appctl -t ovn-northd inc-engine/clear-stats -check ovn-nbctl --wait=sb lr-del lr0 -check_engine_stats datapath_synced_logical_switch norecompute compute -check_engine_stats datapath_synced_logical_router norecompute compute -CHECK_NO_CHANGE_AFTER_RECOMPUTE +# Uncorrect ip_address. +check ovn-nbctl set load_balancer lb1_ipv4 ip_port_mappings:invalid=lport2_az1:2.2.2.9 +OVS_WAIT_UNTIL([grep "bad IP address" northd/ovn-northd.log]) +echo > northd/ovn-northd.log + +check ovn-nbctl set load_balancer lb1_ipv4 ip_port_mappings:2.2.2.1=lport2_az1:invalid +OVS_WAIT_UNTIL([grep "bad IP address" northd/ovn-northd.log]) +echo > northd/ovn-northd.log + +check ovn-nbctl set load_balancer lb1_ipv4 ip_port_mappings:2.2.2.1=:2.2.2.9 +OVS_WAIT_UNTIL([grep "bad IP address" northd/ovn-northd.log]) +echo > northd/ovn-northd.log + +check ovn-nbctl set load_balancer lb1_ipv4 ip_port_mappings:192.168.0.1=lport1:192.168.0.99:az_name +check_row_count sb:Service_Monitor 1 +check_column "192.168.0.1" sb:Service_Monitor ip logical_port=lport1 +check_column 10880 sb:Service_Monitor port logical_port=lport1 +check_column tcp sb:Service_Monitor protocol logical_port=lport1 +check_column "192.168.0.99" sb:Service_Monitor src_ip logical_port=lport1 +check_column false sb:Service_Monitor ic_learned logical_port=lport1 +check_column false sb:Service_Monitor local logical_port=lport1 + +uuid=$(ovn-sbctl -d bare --no-headings --columns _uuid find Service_Monitor logical_port=lport1) + +# Check az_name presence in options. +AT_CHECK([ovn-sbctl get Service_Monitor ${uuid} options:az-name], +[0], [az_name +]) + +AT_CHECK([ovn-sbctl get Service_Monitor ${uuid} options:failure_count], +[0], ["100" +]) + +# Empty availability zone name. +check ovn-nbctl set load_balancer lb1_ipv4 ip_port_mappings:192.168.0.1=lport1:192.168.0.99: +check_row_count sb:Service_Monitor 0 + +OVS_WAIT_UNTIL([grep "Empty AZ name specified" northd/ovn-northd.log]) AT_CLEANUP ]) + -- 2.48.1 _______________________________________________ dev mailing list d...@openvswitch.org https://mail.openvswitch.org/mailman/listinfo/ovs-dev