On 12/4/20 5:36 PM, Dumitru Ceara wrote:
> On 12/3/20 3:03 PM, Ilya Maximets wrote:
>> ovn-controller will receive updates from Logical_DP_Group table
>> and process logical flows accordingly. Feature is fully backward
>> compatible since old 'logical_datapath' column kept as is.
>> It will also be used by nothd to not create datapath groups if there
>> is ony one datapath in it.
>>
>> Unfortunately, almost every part of the ovn-controller depends on
>> fact that there is 1:1 relation between logical flows and logical
>> datapaths, starting from the logical flow handling and all the way
>> to deep internals of expression parsing and I-P engine.
>> So, instead of re-writing everything we're taking a "safe" approach
>> and just re-factoring a bit to add new 'datapath' arguments to
>> functions and call them in a loop for all datapaths in a datapath
>> group. This might have some performance impact in case datapath
>> groups are actually used by nothd and there are many datapaths that
>> are local to this ovn-controller. However, this imapct might be
>> compensated by lower number of logical flows in general.
>> There should be no performance penalty if datapath groups are not
>> used by northd.
>>
>> Signed-off-by: Ilya Maximets <[email protected]>
>> ---
>> controller/lflow.c | 129 +++++++++++++++++++++++++++---------
>> controller/lflow.h | 2 +
>> controller/ovn-controller.c | 54 ++++++++++++++-
>> 3 files changed, 151 insertions(+), 34 deletions(-)
>>
>> diff --git a/controller/lflow.c b/controller/lflow.c
>> index 7b4679f20..1e9680bb1 100644
>> --- a/controller/lflow.c
>> +++ b/controller/lflow.c
>> @@ -667,6 +667,7 @@ update_conj_id_ofs(uint32_t *conj_id_ofs, uint32_t
>> n_conjs)
>>
>> static void
>> add_matches_to_flow_table(const struct sbrec_logical_flow *lflow,
>> + const struct sbrec_datapath_binding *dp,
>> struct hmap *matches, size_t conj_id_ofs,
>> uint8_t ptable, uint8_t output_ptable,
>> struct ofpbuf *ovnacts,
>> @@ -677,7 +678,7 @@ add_matches_to_flow_table(const struct
>> sbrec_logical_flow *lflow,
>> .sbrec_multicast_group_by_name_datapath
>> = l_ctx_in->sbrec_multicast_group_by_name_datapath,
>> .sbrec_port_binding_by_name = l_ctx_in->sbrec_port_binding_by_name,
>> - .dp = lflow->logical_datapath
>> + .dp = dp,
>> };
>>
>> /* Encode OVN logical actions into OpenFlow. */
>> @@ -687,7 +688,7 @@ add_matches_to_flow_table(const struct
>> sbrec_logical_flow *lflow,
>> .lookup_port = lookup_port_cb,
>> .tunnel_ofport = tunnel_ofport_cb,
>> .aux = &aux,
>> - .is_switch = datapath_is_switch(lflow->logical_datapath),
>> + .is_switch = datapath_is_switch(dp),
>> .group_table = l_ctx_out->group_table,
>> .meter_table = l_ctx_out->meter_table,
>> .lflow_uuid = lflow->header_.uuid,
>> @@ -706,17 +707,16 @@ add_matches_to_flow_table(const struct
>> sbrec_logical_flow *lflow,
>>
>> struct expr_match *m;
>> HMAP_FOR_EACH (m, hmap_node, matches) {
>> - match_set_metadata(&m->match,
>> - htonll(lflow->logical_datapath->tunnel_key));
>> + match_set_metadata(&m->match, htonll(dp->tunnel_key));
>> if (m->match.wc.masks.conj_id) {
>> m->match.flow.conj_id += conj_id_ofs;
>> }
>> - if (datapath_is_switch(lflow->logical_datapath)) {
>> + if (datapath_is_switch(dp)) {
>> unsigned int reg_index
>> = (ingress ? MFF_LOG_INPORT : MFF_LOG_OUTPORT) - MFF_REG0;
>> int64_t port_id = m->match.flow.regs[reg_index];
>> if (port_id) {
>> - int64_t dp_id = lflow->logical_datapath->tunnel_key;
>> + int64_t dp_id = dp->tunnel_key;
>> char buf[16];
>> get_unique_lport_key(dp_id, port_id, buf, sizeof(buf));
>> lflow_resource_add(l_ctx_out->lfrr, REF_TYPE_PORTBINDING,
>> buf,
>> @@ -765,6 +765,7 @@ add_matches_to_flow_table(const struct
>> sbrec_logical_flow *lflow,
>> */
>> static struct expr *
>> convert_match_to_expr(const struct sbrec_logical_flow *lflow,
>> + const struct sbrec_datapath_binding *dp,
>> struct expr *prereqs,
>> const struct shash *addr_sets,
>> const struct shash *port_groups,
>> @@ -777,8 +778,7 @@ convert_match_to_expr(const struct sbrec_logical_flow
>> *lflow,
>>
>> struct expr *e = expr_parse_string(lflow->match, &symtab, addr_sets,
>> port_groups, &addr_sets_ref,
>> - &port_groups_ref,
>> - lflow->logical_datapath->tunnel_key,
>> + &port_groups_ref, dp->tunnel_key,
>> &error);
>> const char *addr_set_name;
>> SSET_FOR_EACH (addr_set_name, &addr_sets_ref) {
>> @@ -816,23 +816,18 @@ convert_match_to_expr(const struct sbrec_logical_flow
>> *lflow,
>> }
>>
>> static bool
>> -consider_logical_flow(const struct sbrec_logical_flow *lflow,
>> - struct hmap *dhcp_opts, struct hmap *dhcpv6_opts,
>> - struct hmap *nd_ra_opts,
>> - struct controller_event_options
>> *controller_event_opts,
>> - struct lflow_ctx_in *l_ctx_in,
>> - struct lflow_ctx_out *l_ctx_out)
>> +consider_logical_flow__(const struct sbrec_logical_flow *lflow,
>> + const struct sbrec_datapath_binding *dp,
>> + struct hmap *dhcp_opts, struct hmap *dhcpv6_opts,
>> + struct hmap *nd_ra_opts,
>> + struct controller_event_options
>> *controller_event_opts,
>> + struct lflow_ctx_in *l_ctx_in,
>> + struct lflow_ctx_out *l_ctx_out)
>> {
>> /* Determine translation of logical table IDs to physical table IDs. */
>> bool ingress = !strcmp(lflow->pipeline, "ingress");
>>
>> - const struct sbrec_datapath_binding *ldp = lflow->logical_datapath;
>> - if (!ldp) {
>> - VLOG_DBG("lflow "UUID_FMT" has no datapath binding, skip",
>> - UUID_ARGS(&lflow->header_.uuid));
>> - return true;
>> - }
>> - if (!get_local_datapath(l_ctx_in->local_datapaths, ldp->tunnel_key)) {
>> + if (!get_local_datapath(l_ctx_in->local_datapaths, dp->tunnel_key)) {
>> VLOG_DBG("lflow "UUID_FMT" is not for local datapath, skip",
>> UUID_ARGS(&lflow->header_.uuid));
>> return true;
>> @@ -881,7 +876,7 @@ consider_logical_flow(const struct sbrec_logical_flow
>> *lflow,
>> .sbrec_multicast_group_by_name_datapath
>> = l_ctx_in->sbrec_multicast_group_by_name_datapath,
>> .sbrec_port_binding_by_name = l_ctx_in->sbrec_port_binding_by_name,
>> - .dp = lflow->logical_datapath
>> + .dp = dp,
>> };
>> struct condition_aux cond_aux = {
>> .sbrec_port_binding_by_name = l_ctx_in->sbrec_port_binding_by_name,
>> @@ -894,7 +889,7 @@ consider_logical_flow(const struct sbrec_logical_flow
>> *lflow,
>> struct expr *expr = NULL;
>> if (!l_ctx_out->lflow_cache_map) {
>> /* Caching is disabled. */
>> - expr = convert_match_to_expr(lflow, prereqs, l_ctx_in->addr_sets,
>> + expr = convert_match_to_expr(lflow, dp, prereqs,
>> l_ctx_in->addr_sets,
>> l_ctx_in->port_groups, l_ctx_out->lfrr,
>> NULL);
>> if (!expr) {
>> @@ -920,7 +915,7 @@ consider_logical_flow(const struct sbrec_logical_flow
>> *lflow,
>> return true;
>> }
>>
>> - add_matches_to_flow_table(lflow, &matches, *l_ctx_out->conj_id_ofs,
>> + add_matches_to_flow_table(lflow, dp, &matches,
>> *l_ctx_out->conj_id_ofs,
>> ptable, output_ptable, &ovnacts, ingress,
>> l_ctx_in, l_ctx_out);
>>
>> @@ -937,7 +932,7 @@ consider_logical_flow(const struct sbrec_logical_flow
>> *lflow,
>> if (lc && lc->type == LCACHE_T_MATCHES) {
>> /* 'matches' is cached. No need to do expr parsing.
>> * Add matches to flow table and return. */
>> - add_matches_to_flow_table(lflow, lc->expr_matches, lc->conj_id_ofs,
>> + add_matches_to_flow_table(lflow, dp, lc->expr_matches,
>> lc->conj_id_ofs,
>> ptable, output_ptable, &ovnacts, ingress,
>> l_ctx_in, l_ctx_out);
>> ovnacts_free(ovnacts.data, ovnacts.size);
>> @@ -957,7 +952,7 @@ consider_logical_flow(const struct sbrec_logical_flow
>> *lflow,
>>
>> bool pg_addr_set_ref = false;
>> if (!expr) {
>> - expr = convert_match_to_expr(lflow, prereqs, l_ctx_in->addr_sets,
>> + expr = convert_match_to_expr(lflow, dp, prereqs,
>> l_ctx_in->addr_sets,
>> l_ctx_in->port_groups, l_ctx_out->lfrr,
>> &pg_addr_set_ref);
>> if (!expr) {
>> @@ -1015,7 +1010,7 @@ consider_logical_flow(const struct sbrec_logical_flow
>> *lflow,
>> }
>>
>> /* Encode OVN logical actions into OpenFlow. */
>> - add_matches_to_flow_table(lflow, matches, lc->conj_id_ofs,
>> + add_matches_to_flow_table(lflow, dp, matches, lc->conj_id_ofs,
>> ptable, output_ptable, &ovnacts, ingress,
>> l_ctx_in, l_ctx_out);
>> ovnacts_free(ovnacts.data, ovnacts.size);
>> @@ -1037,6 +1032,42 @@ consider_logical_flow(const struct sbrec_logical_flow
>> *lflow,
>> return true;
>> }
>>
>> +static bool
>> +consider_logical_flow(const struct sbrec_logical_flow *lflow,
>> + struct hmap *dhcp_opts, struct hmap *dhcpv6_opts,
>> + struct hmap *nd_ra_opts,
>> + struct controller_event_options
>> *controller_event_opts,
>> + struct lflow_ctx_in *l_ctx_in,
>> + struct lflow_ctx_out *l_ctx_out)
>> +{
>> + const struct sbrec_logical_dp_group *dp_group = lflow->logical_dp_group;
>> + const struct sbrec_datapath_binding *dp = lflow->logical_datapath;
>> + bool ret = true;
>> +
>> + if (!dp_group && !dp) {
>> + VLOG_DBG("lflow "UUID_FMT" has no datapath binding, skip",
>> + UUID_ARGS(&lflow->header_.uuid));
>> + return true;
>> + }
>> + ovs_assert(!dp_group || !dp);
>> +
>> + if (dp && !consider_logical_flow__(lflow, dp,
>> + dhcp_opts, dhcpv6_opts, nd_ra_opts,
>> + controller_event_opts,
>> + l_ctx_in, l_ctx_out)) {
>> + ret = false;
>> + }
>> + for (size_t i = 0; dp_group && i < dp_group->n_datapaths; i++) {
>> + if (!consider_logical_flow__(lflow, dp_group->datapaths[i],
>> + dhcp_opts, dhcpv6_opts, nd_ra_opts,
>> + controller_event_opts,
>> + l_ctx_in, l_ctx_out)) {
>> + ret = false;
>> + }
>> + }
>> + return ret;
>> +}
>> +
>> static void
>> put_load(const uint8_t *data, size_t len,
>> enum mf_field_id dst, int ofs, int n_bits,
>> @@ -1432,14 +1463,46 @@ lflow_add_flows_for_datapath(const struct
>> sbrec_datapath_binding *dp,
>> const struct sbrec_logical_flow *lflow;
>> SBREC_LOGICAL_FLOW_FOR_EACH_EQUAL (
>> lflow, lf_row, l_ctx_in->sbrec_logical_flow_by_logical_datapath) {
>> - if (!consider_logical_flow(lflow, &dhcp_opts, &dhcpv6_opts,
>> - &nd_ra_opts, &controller_event_opts,
>> - l_ctx_in, l_ctx_out)) {
>> - handled = false;
>> - l_ctx_out->conj_id_overflow = true;
>> - break;
>> + if (!consider_logical_flow__(lflow, dp, &dhcp_opts, &dhcpv6_opts,
>> + &nd_ra_opts, &controller_event_opts,
>> + l_ctx_in, l_ctx_out)) {
>> + handled = false;
>> + l_ctx_out->conj_id_overflow = true;
>> + goto lflow_processing_end;
>> + }
>
> Nit: indentation of the three lines above (one space too much).
Oops. :)
>
>> + }
>> + sbrec_logical_flow_index_destroy_row(lf_row);
>> +
>> + lf_row = sbrec_logical_flow_index_init_row(
>> + l_ctx_in->sbrec_logical_flow_by_logical_dp_group);
>> + /* There are far fewer datapath groups than logical flows. */
>> + const struct sbrec_logical_dp_group *ldpg;
>> + SBREC_LOGICAL_DP_GROUP_TABLE_FOR_EACH (ldpg,
>> +
>> l_ctx_in->logical_dp_group_table) {
>> + bool found = false;
>> + for (size_t i = 0; i < ldpg->n_datapaths; i++) {
>> + if (ldpg->datapaths[i] == dp) {
>> + found = true;
>> + break;
>> + }
>> + }
>> + if (!found) {
>> + continue;
>> + }
>> +
>> + sbrec_logical_flow_index_set_logical_dp_group(lf_row, ldpg);
>> + SBREC_LOGICAL_FLOW_FOR_EACH_EQUAL (
>> + lflow, lf_row,
>> l_ctx_in->sbrec_logical_flow_by_logical_dp_group) {
>> + if (!consider_logical_flow__(lflow, dp, &dhcp_opts,
>> &dhcpv6_opts,
>> + &nd_ra_opts,
>> &controller_event_opts,
>> + l_ctx_in, l_ctx_out)) {
>> + handled = false;
>> + l_ctx_out->conj_id_overflow = true;
>> + goto lflow_processing_end;
>> + }
>> }
>> }
>> +lflow_processing_end:
>> sbrec_logical_flow_index_destroy_row(lf_row);
>>
>> dhcp_opts_destroy(&dhcp_opts);
>> diff --git a/controller/lflow.h b/controller/lflow.h
>> index 1225131de..ba79cc374 100644
>> --- a/controller/lflow.h
>> +++ b/controller/lflow.h
>> @@ -127,12 +127,14 @@ void lflow_resource_clear(struct lflow_resource_ref *);
>> struct lflow_ctx_in {
>> struct ovsdb_idl_index *sbrec_multicast_group_by_name_datapath;
>> struct ovsdb_idl_index *sbrec_logical_flow_by_logical_datapath;
>> + struct ovsdb_idl_index *sbrec_logical_flow_by_logical_dp_group;
>> struct ovsdb_idl_index *sbrec_port_binding_by_name;
>> const struct sbrec_dhcp_options_table *dhcp_options_table;
>> const struct sbrec_dhcpv6_options_table *dhcpv6_options_table;
>> const struct sbrec_datapath_binding_table *dp_binding_table;
>> const struct sbrec_mac_binding_table *mac_binding_table;
>> const struct sbrec_logical_flow_table *logical_flow_table;
>> + const struct sbrec_logical_dp_group_table *logical_dp_group_table;
>> const struct sbrec_multicast_group_table *mc_group_table;
>> const struct sbrec_chassis *chassis;
>> const struct sbrec_load_balancer_table *lb_table;
>> diff --git a/controller/ovn-controller.c b/controller/ovn-controller.c
>> index 46589e421..eb0de32a3 100644
>> --- a/controller/ovn-controller.c
>> +++ b/controller/ovn-controller.c
>> @@ -157,6 +157,7 @@ update_sb_monitors(struct ovsdb_idl *ovnsb_idl,
>> * the connected logical routers and logical switches. */
>> struct ovsdb_idl_condition pb = OVSDB_IDL_CONDITION_INIT(&pb);
>> struct ovsdb_idl_condition lf = OVSDB_IDL_CONDITION_INIT(&lf);
>> + struct ovsdb_idl_condition ldpg = OVSDB_IDL_CONDITION_INIT(&ldpg);
>> struct ovsdb_idl_condition mb = OVSDB_IDL_CONDITION_INIT(&mb);
>> struct ovsdb_idl_condition mg = OVSDB_IDL_CONDITION_INIT(&mg);
>> struct ovsdb_idl_condition dns = OVSDB_IDL_CONDITION_INIT(&dns);
>> @@ -168,6 +169,7 @@ update_sb_monitors(struct ovsdb_idl *ovnsb_idl,
>> if (monitor_all) {
>> ovsdb_idl_condition_add_clause_true(&pb);
>> ovsdb_idl_condition_add_clause_true(&lf);
>> + ovsdb_idl_condition_add_clause_true(&ldpg);
>> ovsdb_idl_condition_add_clause_true(&mb);
>> ovsdb_idl_condition_add_clause_true(&mg);
>> ovsdb_idl_condition_add_clause_true(&dns);
>> @@ -231,18 +233,39 @@ update_sb_monitors(struct ovsdb_idl *ovnsb_idl,
>> sbrec_port_binding_add_clause_datapath(&pb, OVSDB_F_EQ, uuid);
>> sbrec_logical_flow_add_clause_logical_datapath(&lf, OVSDB_F_EQ,
>> uuid);
>> + sbrec_logical_dp_group_add_clause_datapaths(
>> + &ldpg, OVSDB_F_INCLUDES, &uuid, 1);
>> sbrec_mac_binding_add_clause_datapath(&mb, OVSDB_F_EQ, uuid);
>> sbrec_multicast_group_add_clause_datapath(&mg, OVSDB_F_EQ,
>> uuid);
>> sbrec_dns_add_clause_datapaths(&dns, OVSDB_F_INCLUDES, &uuid,
>> 1);
>> sbrec_ip_multicast_add_clause_datapath(&ip_mcast, OVSDB_F_EQ,
>> uuid);
>> }
>> +
>> + /* Updating conditions to receive logical flows that references
>> + * datapath groups containing local datapaths. */
>> + const struct sbrec_logical_dp_group *group;
>> + SBREC_LOGICAL_DP_GROUP_FOR_EACH (group, ovnsb_idl) {
>> + struct uuid *uuid = CONST_CAST(struct uuid *,
>> + &group->header_.uuid);
>> + size_t i;
>> +
>> + for (i = 0; i < group->n_datapaths; i++) {
>> + if (get_local_datapath(local_datapaths,
>> + group->datapaths[i]->tunnel_key)) {
>> + sbrec_logical_flow_add_clause_logical_dp_group(
>> + &lf, OVSDB_F_EQ, uuid);
>> + break;
>> + }
>> + }
>> + }
>> }
>>
>> out:;
>> unsigned int cond_seqnos[] = {
>> sbrec_port_binding_set_condition(ovnsb_idl, &pb),
>> sbrec_logical_flow_set_condition(ovnsb_idl, &lf),
>> + sbrec_logical_dp_group_set_condition(ovnsb_idl, &ldpg),
>> sbrec_mac_binding_set_condition(ovnsb_idl, &mb),
>> sbrec_multicast_group_set_condition(ovnsb_idl, &mg),
>> sbrec_dns_set_condition(ovnsb_idl, &dns),
>> @@ -259,6 +282,7 @@ out:;
>>
>> ovsdb_idl_condition_destroy(&pb);
>> ovsdb_idl_condition_destroy(&lf);
>> + ovsdb_idl_condition_destroy(&ldpg);
>> ovsdb_idl_condition_destroy(&mb);
>> ovsdb_idl_condition_destroy(&mg);
>> ovsdb_idl_condition_destroy(&dns);
>> @@ -921,6 +945,7 @@ ctrl_register_ovs_idl(struct ovsdb_idl *ovs_idl)
>> SB_NODE(port_group, "port_group") \
>> SB_NODE(multicast_group, "multicast_group") \
>> SB_NODE(datapath_binding, "datapath_binding") \
>> + SB_NODE(logical_dp_group, "logical_dp_group") \
>> SB_NODE(port_binding, "port_binding") \
>> SB_NODE(mac_binding, "mac_binding") \
>> SB_NODE(logical_flow, "logical_flow") \
>> @@ -1804,6 +1829,11 @@ static void init_lflow_ctx(struct engine_node *node,
>> engine_get_input("SB_logical_flow", node),
>> "logical_datapath");
>>
>> + struct ovsdb_idl_index *sbrec_logical_flow_by_dp_group =
>> + engine_ovsdb_node_get_index(
>> + engine_get_input("SB_logical_flow", node),
>> + "logical_dp_group");
>> +
>> struct ovsdb_idl_index *sbrec_mc_group_by_name_dp =
>> engine_ovsdb_node_get_index(
>> engine_get_input("SB_multicast_group", node),
>> @@ -1825,6 +1855,10 @@ static void init_lflow_ctx(struct engine_node *node,
>> (struct sbrec_logical_flow_table *)EN_OVSDB_GET(
>> engine_get_input("SB_logical_flow", node));
>>
>> + struct sbrec_logical_dp_group_table *logical_dp_group_table =
>> + (struct sbrec_logical_dp_group_table *)EN_OVSDB_GET(
>> + engine_get_input("SB_logical_dp_group", node));
>> +
>> struct sbrec_multicast_group_table *multicast_group_table =
>> (struct sbrec_multicast_group_table *)EN_OVSDB_GET(
>> engine_get_input("SB_multicast_group", node));
>> @@ -1857,11 +1891,14 @@ static void init_lflow_ctx(struct engine_node *node,
>> sbrec_mc_group_by_name_dp;
>> l_ctx_in->sbrec_logical_flow_by_logical_datapath =
>> sbrec_logical_flow_by_dp;
>> + l_ctx_in->sbrec_logical_flow_by_logical_dp_group =
>> + sbrec_logical_flow_by_dp_group;
>> l_ctx_in->sbrec_port_binding_by_name = sbrec_port_binding_by_name;
>> l_ctx_in->dhcp_options_table = dhcp_table;
>> l_ctx_in->dhcpv6_options_table = dhcpv6_table;
>> l_ctx_in->mac_binding_table = mac_binding_table;
>> l_ctx_in->logical_flow_table = logical_flow_table;
>> + l_ctx_in->logical_dp_group_table = logical_dp_group_table;
>> l_ctx_in->mc_group_table = multicast_group_table;
>> l_ctx_in->chassis = chassis;
>> l_ctx_in->lb_table = lb_table;
>> @@ -2405,6 +2442,9 @@ main(int argc, char *argv[])
>> struct ovsdb_idl_index *sbrec_logical_flow_by_logical_datapath
>> = ovsdb_idl_index_create1(ovnsb_idl_loop.idl,
>> &sbrec_logical_flow_col_logical_datapath);
>> + struct ovsdb_idl_index *sbrec_logical_flow_by_logical_dp_group
>> + = ovsdb_idl_index_create1(ovnsb_idl_loop.idl,
>> + &sbrec_logical_flow_col_logical_dp_group);
>> struct ovsdb_idl_index *sbrec_port_binding_by_name
>> = ovsdb_idl_index_create1(ovnsb_idl_loop.idl,
>> &sbrec_port_binding_col_logical_port);
>> @@ -2538,6 +2578,12 @@ main(int argc, char *argv[])
>> flow_output_sb_mac_binding_handler);
>> engine_add_input(&en_flow_output, &en_sb_logical_flow,
>> flow_output_sb_logical_flow_handler);
>> + /* Using a noop handler since we don't really need any data from
>> datapath
>> + * groups or a full recompute. Update of a datapath group will put
>> + * logical flow into the tracked list, so the logical flow handler will
>> + * process all changes. */
>> + engine_add_input(&en_flow_output, &en_sb_logical_dp_group,
>> + engine_noop_handler);
>> engine_add_input(&en_flow_output, &en_sb_dhcp_options, NULL);
>> engine_add_input(&en_flow_output, &en_sb_dhcpv6_options, NULL);
>> engine_add_input(&en_flow_output, &en_sb_dns, NULL);
>> @@ -2581,6 +2627,8 @@ main(int argc, char *argv[])
>> sbrec_multicast_group_by_name_datapath);
>> engine_ovsdb_node_add_index(&en_sb_logical_flow, "logical_datapath",
>> sbrec_logical_flow_by_logical_datapath);
>> + engine_ovsdb_node_add_index(&en_sb_logical_flow, "logical_dp_group",
>> + sbrec_logical_flow_by_logical_dp_group);
>> engine_ovsdb_node_add_index(&en_sb_port_binding, "name",
>> sbrec_port_binding_by_name);
>> engine_ovsdb_node_add_index(&en_sb_port_binding, "key",
>> @@ -2820,7 +2868,11 @@ main(int argc, char *argv[])
>> br_int, chassis,
>> &runtime_data->local_datapaths,
>> &runtime_data->active_tunnels);
>> - if (engine_node_changed(&en_runtime_data)) {
>> + /* Updating monitor conditions if runtime data
>> changed
>> + * and, also, on flow output changes since this
>> might
>> + * mean update of logical datapath goups. */
>> + if (engine_node_changed(&en_runtime_data)
>> + || engine_node_changed(&en_flow_output)) {
>
> I've been thinking some more about this and I think this can be safely
> changed to:
>
> " || engine_node_changed(&en_sb_logical_dp_group)"
>
> That would avoid checking if conditions need to be updated on every
> logical_flow add/update.
>
> I only tested it on my machine though by running the OVN tests and it
> seems to work fine.
Yes, that works. Thanks for the suggestion.
I'll send v3 shortly with this change and typos/style fixed.
Best regards, Ilya Maximets.
_______________________________________________
dev mailing list
[email protected]
https://mail.openvswitch.org/mailman/listinfo/ovs-dev