On 6/2/22 14:39, Dumitru Ceara wrote:
On 6/2/22 19:22, Han Zhou wrote:
On Thu, Jun 2, 2022 at 10:09 AM Mark Michelson <[email protected]> wrote:
On 6/2/22 11:22, Dumitru Ceara wrote:
Change the way ovn-controller decides whether it should match on
ct_mark.natted or ct_label.natted for hairpin load balancer traffic.
Until now this was done solely based on the northd-reported internal
version.
However, to cover the case when OVN central components are not the last
ones to be updated, ovn-northd now explicitly informs ovn-controller
whether it should use ct_mark or ct_label.natted via a new option in the
OVN_Southbound.Load_Balancer record: hairpin_use_ct_mark.
Signed-off-by: Dumitru Ceara <[email protected]>
---
controller/lflow.c | 32 ++++++++++++--------------------
controller/lflow.h | 1 -
controller/ovn-controller.c | 9 ---------
lib/lb.c | 3 +++
lib/lb.h | 4 ++++
northd/northd.c | 8 ++++++--
ovn-sb.xml | 8 ++++++++
tests/ovn-northd.at | 2 +-
8 files changed, 34 insertions(+), 33 deletions(-)
diff --git a/controller/lflow.c b/controller/lflow.c
index 7a3419305..0d9184285 100644
--- a/controller/lflow.c
+++ b/controller/lflow.c
@@ -1942,7 +1942,6 @@ add_lb_vip_hairpin_flows(struct ovn_controller_lb
*lb,
struct ovn_lb_vip *lb_vip,
struct ovn_lb_backend *lb_backend,
uint8_t lb_proto,
- bool check_ct_label_for_lb_hairpin,
struct ovn_desired_flow_table *flow_table)
{
uint64_t stub[1024 / 8];
@@ -2033,18 +2032,19 @@ add_lb_vip_hairpin_flows(struct
ovn_controller_lb *lb,
* - packets must have ip.src == ip.dst at this point.
* - the destination protocol and port must be of a valid backend
that
* has the same IP as ip.dst.
+ *
+ * During upgrades logical flow might still use the old way of
storing
+ * ct.natted in ct_label. For backwards compatibility, only use
ct_mark
+ * if ovn-northd notified ovn-controller to do that.
*/
- uint32_t lb_ct_mark = OVN_CT_NATTED;
- match_set_ct_mark_masked(&hairpin_match, lb_ct_mark, lb_ct_mark);
-
- ofctrl_add_flow(flow_table, OFTABLE_CHK_LB_HAIRPIN, 100,
- lb->slb->header_.uuid.parts[0], &hairpin_match,
- &ofpacts, &lb->slb->header_.uuid);
+ if (lb->hairpin_use_ct_mark) {
+ uint32_t lb_ct_mark = OVN_CT_NATTED;
+ match_set_ct_mark_masked(&hairpin_match, lb_ct_mark,
lb_ct_mark);
- /* The below flow is identical to the above except that it checks
- * ct_label.natted instead of ct_mark.natted, for backward
compatibility
- * during the upgrade from a previous version that uses ct_label.
*/
- if (check_ct_label_for_lb_hairpin) {
+ ofctrl_add_flow(flow_table, OFTABLE_CHK_LB_HAIRPIN, 100,
+ lb->slb->header_.uuid.parts[0], &hairpin_match,
+ &ofpacts, &lb->slb->header_.uuid);
+ } else {
match_set_ct_mark_masked(&hairpin_match, 0, 0);
ovs_u128 lb_ct_label = {
.u64.lo = OVN_CT_NATTED,
@@ -2328,7 +2328,6 @@ add_lb_ct_snat_hairpin_flows(struct
ovn_controller_lb *lb,
static void
consider_lb_hairpin_flows(const struct sbrec_load_balancer *sbrec_lb,
const struct hmap *local_datapaths,
- bool check_ct_label_for_lb_hairpin,
struct ovn_desired_flow_table *flow_table,
struct simap *ids)
{
@@ -2368,7 +2367,6 @@ consider_lb_hairpin_flows(const struct
sbrec_load_balancer *sbrec_lb,
struct ovn_lb_backend *lb_backend = &lb_vip->backends[j];
add_lb_vip_hairpin_flows(lb, lb_vip, lb_backend, lb_proto,
- check_ct_label_for_lb_hairpin,
flow_table);
}
}
@@ -2383,7 +2381,6 @@ consider_lb_hairpin_flows(const struct
sbrec_load_balancer *sbrec_lb,
static void
add_lb_hairpin_flows(const struct sbrec_load_balancer_table *lb_table,
const struct hmap *local_datapaths,
- bool check_ct_label_for_lb_hairpin,
struct ovn_desired_flow_table *flow_table,
struct simap *ids,
struct id_pool *pool)
@@ -2406,9 +2403,7 @@ add_lb_hairpin_flows(const struct
sbrec_load_balancer_table *lb_table,
ovs_assert(id_pool_alloc_id(pool, &id));
simap_put(ids, lb->name, id);
}
- consider_lb_hairpin_flows(lb, local_datapaths,
- check_ct_label_for_lb_hairpin,
- flow_table, ids);
+ consider_lb_hairpin_flows(lb, local_datapaths, flow_table,
ids);
}
}
@@ -2545,7 +2540,6 @@ lflow_run(struct lflow_ctx_in *l_ctx_in, struct
lflow_ctx_out *l_ctx_out)
l_ctx_in->local_datapaths,
l_ctx_out->flow_table);
add_lb_hairpin_flows(l_ctx_in->lb_table,
l_ctx_in->local_datapaths,
- l_ctx_in->check_ct_label_for_lb_hairpin,
l_ctx_out->flow_table,
l_ctx_out->hairpin_lb_ids,
l_ctx_out->hairpin_id_pool);
@@ -2709,7 +2703,6 @@ lflow_add_flows_for_datapath(const struct
sbrec_datapath_binding *dp,
* associated. */
for (size_t i = 0; i < n_dp_lbs; i++) {
consider_lb_hairpin_flows(dp_lbs[i],
l_ctx_in->local_datapaths,
-
l_ctx_in->check_ct_label_for_lb_hairpin,
l_ctx_out->flow_table,
l_ctx_out->hairpin_lb_ids);
}
@@ -2840,7 +2833,6 @@ lflow_handle_changed_lbs(struct lflow_ctx_in
*l_ctx_in,
VLOG_DBG("Add load balancer hairpin flows for "UUID_FMT,
UUID_ARGS(&lb->header_.uuid));
consider_lb_hairpin_flows(lb, l_ctx_in->local_datapaths,
-
l_ctx_in->check_ct_label_for_lb_hairpin,
l_ctx_out->flow_table,
l_ctx_out->hairpin_lb_ids);
}
diff --git a/controller/lflow.h b/controller/lflow.h
index ad9449d3a..2aa896a75 100644
--- a/controller/lflow.h
+++ b/controller/lflow.h
@@ -160,7 +160,6 @@ struct lflow_ctx_in {
const struct sset *related_lport_ids;
const struct shash *binding_lports;
const struct hmap *chassis_tunnels;
- bool check_ct_label_for_lb_hairpin;
};
struct lflow_ctx_out {
diff --git a/controller/ovn-controller.c b/controller/ovn-controller.c
index b597c0e37..b9801c42d 100644
--- a/controller/ovn-controller.c
+++ b/controller/ovn-controller.c
@@ -486,13 +486,6 @@ get_ovs_chassis_id(const struct
ovsrec_open_vswitch_table *ovs_table)
return chassis_id;
}
-static bool
-get_check_ct_label_for_lb_hairpin(const char *northd_internal_ver)
-{
- unsigned int minor =
ovn_parse_internal_version_minor(northd_internal_ver);
- return (minor <= 3);
-}
-
static void
update_ssl_config(const struct ovsrec_ssl_table *ssl_table)
{
@@ -2529,8 +2522,6 @@ init_lflow_ctx(struct engine_node *node,
l_ctx_in->related_lport_ids = &rt_data->related_lports.lport_ids;
l_ctx_in->binding_lports = &rt_data->lbinding_data.lports;
l_ctx_in->chassis_tunnels = &non_vif_data->chassis_tunnels;
- l_ctx_in->check_ct_label_for_lb_hairpin =
- get_check_ct_label_for_lb_hairpin(n_ver->ver);
l_ctx_out->flow_table = &fo->flow_table;
l_ctx_out->group_table = &fo->group_table;
diff --git a/lib/lb.c b/lib/lb.c
index 7b0ed1abe..6a48175a7 100644
--- a/lib/lb.c
+++ b/lib/lb.c
@@ -304,6 +304,9 @@ ovn_controller_lb_create(const struct
sbrec_load_balancer *sbrec_lb)
lb->hairpin_orig_tuple = smap_get_bool(&sbrec_lb->options,
"hairpin_orig_tuple",
false);
+ lb->hairpin_use_ct_mark = smap_get_bool(&sbrec_lb->options,
+ "hairpin_use_ct_mark",
+ false);
For the backports to 22.03 and 21.12, I understand why this would
default to false. However, for "new" versions of OVN (main and 22.06),
why don't we default to true? I know this would cause trouble for the
case of upgrading the central by a major version before updating the
hosts, but I thought the point of these patches was to make sure that
minor release upgrades would not break.
Thanks Mark, Han, for the review!
I agree with Mark here. Otherwise, we would need to keep the option
explicitly forever, which looks quite noisy.
Makes sense. What if I add a third patch that flips the default to
"true" and we only apply this third patch on main branch, does that
sound OK to you?
Works for me! And since this was my only issue with this particular patch,
Acked-by: Mark Michelson <[email protected]>
I'll wait to merge until Han weighs in on what you said below.
In addition, I wonder if it is better to just have a global NB option
rather than per LB option? A global option looks more clear/obvious and
sufficient.
We could try but then we need to add more incremental processing support
to ovn-controller and have an explicit "load balancer" node that has
SB_Global as input. Currently LB hairpin flows are added as part of the
"lflow_output" node processing (or its input change handlers).
On the other hand, if we flip the default for hairpin_use_ct_mark to
"true" in 22.06.0 and make ovn-northd clear it in the SB.Load_Balancer
table then the "noise" would be gone. This has the "advantage" of less
I-P code in ovn-controller.
Please let me know what you think.
I can try to spin up a v2 soon or early tomorrow to try not to delay the
releases too much.
Thanks,
Dumitru
Thanks,
Han
ovn_lb_get_hairpin_snat_ip(&sbrec_lb->header_.uuid,
&sbrec_lb->options,
&lb->hairpin_snat_ips);
return lb;
diff --git a/lib/lb.h b/lib/lb.h
index 832ed31fb..d703c6bf5 100644
--- a/lib/lb.h
+++ b/lib/lb.h
@@ -101,6 +101,10 @@ struct ovn_controller_lb {
bool hairpin_orig_tuple; /* True if ovn-northd stores the original
* destination tuple in registers.
*/
+ bool hairpin_use_ct_mark; /* True if ovn-northd uses ct_mark for
+ * load balancer sessions. False if it
uses
+ * ct_label.
+ */
struct lport_addresses hairpin_snat_ips; /* IP (v4 and/or v6) to
be used
* as source for
hairpinned
diff --git a/northd/northd.c b/northd/northd.c
index 450e05ad6..6e2f2a880 100644
--- a/northd/northd.c
+++ b/northd/northd.c
@@ -4146,7 +4146,8 @@ build_lb_port_related_data(struct hmap
*datapaths, struct hmap *ports,
*/
static void
sync_lbs(struct northd_input *input_data, struct ovsdb_idl_txn
*ovnsb_txn,
- struct hmap *datapaths, struct hmap *lbs)
+ struct hmap *datapaths, struct hmap *lbs,
+ const struct chassis_features *features)
{
struct ovn_northd_lb *lb;
@@ -4193,6 +4194,8 @@ sync_lbs(struct northd_input *input_data, struct
ovsdb_idl_txn *ovnsb_txn,
struct smap options;
smap_clone(&options, &lb->nlb->options);
smap_replace(&options, "hairpin_orig_tuple", "true");
+ smap_replace(&options, "hairpin_use_ct_mark",
+ features->ct_lb_mark ? "true" : "false");
struct sbrec_datapath_binding **lb_dps =
xmalloc(lb->n_nb_ls * sizeof *lb_dps);
@@ -15344,7 +15347,8 @@ ovnnb_db_run(struct northd_input *input_data,
ovn_update_ipv6_options(&data->ports);
ovn_update_ipv6_prefix(&data->ports);
- sync_lbs(input_data, ovnsb_txn, &data->datapaths, &data->lbs);
+ sync_lbs(input_data, ovnsb_txn, &data->datapaths, &data->lbs,
+ &data->features);
sync_address_sets(input_data, ovnsb_txn, &data->datapaths);
sync_port_groups(input_data, ovnsb_txn, &data->port_groups);
sync_meters(input_data, ovnsb_txn, &data->meter_groups);
diff --git a/ovn-sb.xml b/ovn-sb.xml
index 2dc0d5bea..b8b8074ec 100644
--- a/ovn-sb.xml
+++ b/ovn-sb.xml
@@ -4566,6 +4566,14 @@ tcp.flags = RST;
of the load balanced packets are stored in registers
<code>reg1, reg2, xxreg1</code>.
</column>
+ <column name="options" key="hairpin_use_ct_mark"
+ type='{"type": "boolean"}'>
+ This value is automatically set to <code>true</code> by
+ <code>ovn-northd</code> when action <code>ct_lb_mark</code> is
used
+ for new load balancer sessions. <code>ovn-controller</code>
then knows
+ that it should check <code>ct_mark.natted</code> to detect load
balanced
+ traffic.
+ </column>
</group>
<group title="Common Columns">
diff --git a/tests/ovn-northd.at b/tests/ovn-northd.at
index 7bb6d33ac..4bb815c7b 100644
--- a/tests/ovn-northd.at
+++ b/tests/ovn-northd.at
@@ -2563,7 +2563,7 @@ check_column "" sb:datapath_binding
load_balancers external_ids:name=sw1
echo
echo "__file__:__line__: Set hairpin_snat_ip on lb1 and check that SB
DB is updated."
check ovn-nbctl --wait=sb set Load_Balancer lb1
options:hairpin_snat_ip="42.42.42.42 4242::4242"
-check_column "$lb1_uuid" sb:load_balancer _uuid name=lb1
options='{hairpin_orig_tuple="true", hairpin_snat_ip="42.42.42.42
4242::4242"}'
+check_column "$lb1_uuid" sb:load_balancer _uuid name=lb1
options='{hairpin_orig_tuple="true", hairpin_snat_ip="42.42.42.42
4242::4242", hairpin_use_ct_mark="true"}'
echo
echo "__file__:__line__: Delete load balancers lb1 and lbg1 and check
that datapath sw1's load_balancers is still empty."
_______________________________________________
dev mailing list
[email protected]
https://mail.openvswitch.org/mailman/listinfo/ovs-dev