This reverts commit ffe267317c252b1aac864b6d18c89beebd4d3846.

That commit removed the priority-120 flows that skipped unSNAT for
traffic destined to VIPs. As a result, it broke hardware offload:
packets would enter the SNAT zone but never be committed, and were
always returned as ct.new.

The follow-up commit 800fd0681579 ("northd: Add LR option to commit all
traffic.") attempted to address the issue by committing to both DNAT and
SNAT zones whenever stateful NATs are present. However, this approach
does not fully resolve cases where a packet must be DNATed and then
SNATed. It would require committing two entries in the SNAT zone to
ensure that the connection is established during CT lookups both before
and after DNAT.

Further attempts to fix this became increasingly complex and risked
breaking other scenarios, without a clearly proven solution. (See
detailed discussions in [0].)

Therefore, for the original issue [1] that ffe267317c25 aimed to fix, we
now recommend avoiding port conflicts in ovn-kubernetes via
configuration. This commit is being reverted to restore hardware offload
support.

A test case is added to help ensure hardware offload is not broken for
this scenario.

[0] https://mail.openvswitch.org/pipermail/ovs-dev/2025-May/423389.html
[1] https://issues.redhat.com/browse/FDP-291

Discussed-at: 
https://mail.openvswitch.org/pipermail/ovs-dev/2025-May/423389.html
Signed-off-by: Han Zhou <hz...@ovn.org>
---
v2: Added a test case.
v3: Moved the test case to check-kernel only, because userspace conntrack dump
doesn't have status.

 northd/en-lflow.c        |   3 +-
 northd/en-lr-nat.c       |   5 ++
 northd/en-lr-nat.h       |   3 +
 northd/en-lr-stateful.c  |  51 ++++++++++++++++
 northd/en-lr-stateful.h  |   9 ++-
 northd/northd.c          |  33 ++++++++++-
 tests/ovn-northd.at      |  24 +++++---
 tests/system-ovn-kmod.at | 124 +++++++++++++++++++++++++++++++++++++++
 tests/system-ovn.at      |  80 -------------------------
 9 files changed, 241 insertions(+), 91 deletions(-)

diff --git a/northd/en-lflow.c b/northd/en-lflow.c
index 898869287aa3..63565ef80cb5 100644
--- a/northd/en-lflow.c
+++ b/northd/en-lflow.c
@@ -179,7 +179,8 @@ lflow_lr_stateful_handler(struct engine_node *node, void 
*data)
     struct ed_type_lr_stateful *lr_sful_data =
         engine_get_input_data("lr_stateful", node);
 
-    if (!lr_stateful_has_tracked_data(&lr_sful_data->trk_data)) {
+    if (!lr_stateful_has_tracked_data(&lr_sful_data->trk_data)
+        || lr_sful_data->trk_data.vip_nats_changed) {
         return EN_UNHANDLED;
     }
 
diff --git a/northd/en-lr-nat.c b/northd/en-lr-nat.c
index 18a5a6255923..5a7b857f4960 100644
--- a/northd/en-lr-nat.c
+++ b/northd/en-lr-nat.c
@@ -362,6 +362,10 @@ lr_nat_record_init(struct lr_nat_record *lrnat_rec,
     lrnat_rec->nbr_uuid = od->nbr->header_.uuid;
 
     shash_init(&lrnat_rec->snat_ips);
+    sset_init(&lrnat_rec->external_ips);
+    for (size_t i = 0; i < od->nbr->n_nat; i++) {
+        sset_add(&lrnat_rec->external_ips, od->nbr->nat[i]->external_ip);
+    }
 
     sset_init(&lrnat_rec->external_macs);
     lrnat_rec->has_distributed_nat = false;
@@ -531,6 +535,7 @@ lr_nat_record_clear(struct lr_nat_record *lrnat_rec)
     }
 
     free(lrnat_rec->nat_entries);
+    sset_destroy(&lrnat_rec->external_ips);
     sset_destroy(&lrnat_rec->external_macs);
 }
 
diff --git a/northd/en-lr-nat.h b/northd/en-lr-nat.h
index df4eb0786af6..495e24042eb9 100644
--- a/northd/en-lr-nat.h
+++ b/northd/en-lr-nat.h
@@ -80,6 +80,9 @@ struct lr_nat_record {
 
     bool has_distributed_nat;
 
+    /* Set of nat external ips on the router. */
+    struct sset external_ips;
+
     /* Set of nat external macs on the router. */
     struct sset external_macs;
 
diff --git a/northd/en-lr-stateful.c b/northd/en-lr-stateful.c
index 889f80b28d3f..56e93f3c4574 100644
--- a/northd/en-lr-stateful.c
+++ b/northd/en-lr-stateful.c
@@ -82,6 +82,7 @@ static void remove_lrouter_lb_reachable_ips(struct 
lr_stateful_record *,
                                             enum lb_neighbor_responder_mode,
                                             const struct sset *lb_ips_v4,
                                             const struct sset *lb_ips_v6);
+static bool lr_stateful_rebuild_vip_nats(struct lr_stateful_record *);
 
 /* 'lr_stateful' engine node manages the NB logical router LB data.
  */
@@ -109,6 +110,7 @@ en_lr_stateful_clear_tracked_data(void *data_)
     struct ed_type_lr_stateful *data = data_;
 
     hmapx_clear(&data->trk_data.crupdated);
+    data->trk_data.vip_nats_changed = false;
 }
 
 enum engine_node_state
@@ -196,6 +198,10 @@ lr_stateful_lb_data_handler(struct engine_node *node, void 
*data_)
 
             /* Add the lr_stateful_rec rec to the tracking data. */
             hmapx_add(&data->trk_data.crupdated, lr_stateful_rec);
+
+            if (!sset_is_empty(&lr_stateful_rec->vip_nats)) {
+                data->trk_data.vip_nats_changed = true;
+            }
             continue;
         }
 
@@ -313,6 +319,9 @@ lr_stateful_lb_data_handler(struct engine_node *node, void 
*data_)
 
         HMAPX_FOR_EACH (hmapx_node, &data->trk_data.crupdated) {
             struct lr_stateful_record *lr_stateful_rec = hmapx_node->data;
+            if (lr_stateful_rebuild_vip_nats(lr_stateful_rec)) {
+                data->trk_data.vip_nats_changed = true;
+            }
             const struct ovn_datapath *od =
                 ovn_datapaths_find_by_index(input_data.lr_datapaths,
                                             lr_stateful_rec->lr_index);
@@ -351,6 +360,13 @@ lr_stateful_lr_nat_handler(struct engine_node *node, void 
*data_)
                 lr_stateful_record_create(&data->table, lrnat_rec, od,
                                           input_data.lb_datapaths_map,
                                           input_data.lbgrp_datapaths_map);
+            if (!sset_is_empty(&lr_stateful_rec->vip_nats)) {
+                data->trk_data.vip_nats_changed = true;
+            }
+        } else {
+            if (lr_stateful_rebuild_vip_nats(lr_stateful_rec)) {
+                data->trk_data.vip_nats_changed = true;
+            }
         }
 
         /* Add the lr_stateful_rec rec to the tracking data. */
@@ -509,6 +525,11 @@ lr_stateful_record_create(struct lr_stateful_table *table,
         build_lrouter_lb_reachable_ips(lr_stateful_rec, od, lb_dps->lb);
     }
 
+    sset_init(&lr_stateful_rec->vip_nats);
+
+    if (nbr->n_nat) {
+        lr_stateful_rebuild_vip_nats(lr_stateful_rec);
+    }
     lr_stateful_rec->has_lb_vip = od_has_lb_vip(od);
 
     hmap_insert(&table->entries, &lr_stateful_rec->key_node,
@@ -523,6 +544,7 @@ static void
 lr_stateful_record_destroy(struct lr_stateful_record *lr_stateful_rec)
 {
     ovn_lb_ip_set_destroy(lr_stateful_rec->lb_ips);
+    sset_destroy(&lr_stateful_rec->vip_nats);
     lflow_ref_destroy(lr_stateful_rec->lflow_ref);
     free(lr_stateful_rec);
 }
@@ -637,3 +659,32 @@ remove_lrouter_lb_reachable_ips(struct lr_stateful_record 
*lr_stateful_rec,
                              ip_address);
     }
 }
+
+static bool
+lr_stateful_rebuild_vip_nats(struct lr_stateful_record *lr_stateful_rec)
+{
+    struct sset old_vip_nats = SSET_INITIALIZER(&old_vip_nats);
+    sset_swap(&lr_stateful_rec->vip_nats, &old_vip_nats);
+
+    const char *external_ip;
+    SSET_FOR_EACH (external_ip, &lr_stateful_rec->lrnat_rec->external_ips) {
+        bool is_vip_nat = false;
+        if (addr_is_ipv6(external_ip)) {
+            is_vip_nat = sset_contains(&lr_stateful_rec->lb_ips->ips_v6,
+                                       external_ip);
+        } else {
+            is_vip_nat = sset_contains(&lr_stateful_rec->lb_ips->ips_v4,
+                                       external_ip);
+        }
+
+        if (is_vip_nat) {
+            sset_add(&lr_stateful_rec->vip_nats, external_ip);
+        }
+    }
+
+    bool vip_nats_changed = !sset_equals(&lr_stateful_rec->vip_nats,
+                                         &old_vip_nats);
+    sset_destroy(&old_vip_nats);
+
+    return vip_nats_changed;
+}
diff --git a/northd/en-lr-stateful.h b/northd/en-lr-stateful.h
index 2864ebc73fd7..75975c93503a 100644
--- a/northd/en-lr-stateful.h
+++ b/northd/en-lr-stateful.h
@@ -62,6 +62,9 @@ struct lr_stateful_record {
     /* Load Balancer vIPs relevant for this datapath. */
     struct ovn_lb_ip_set *lb_ips;
 
+    /* sset of vips which are also part of lr nats. */
+    struct sset vip_nats;
+
     /* 'lflow_ref' is used to reference logical flows generated for
      * this lr_stateful_record.
      *
@@ -104,6 +107,10 @@ struct lr_stateful_table {
 struct lr_stateful_tracked_data {
     /* Created or updated logical router with LB and/or NAT data. */
     struct hmapx crupdated; /* Stores 'struct lr_stateful_record'. */
+
+    /* Indicates if any router's NATs changed which were also LB vips
+     * or vice versa. */
+    bool vip_nats_changed;
 };
 
 struct ed_type_lr_stateful {
@@ -138,7 +145,7 @@ const struct lr_stateful_record 
*lr_stateful_table_find_by_index(
 static inline bool
 lr_stateful_has_tracked_data(struct lr_stateful_tracked_data *trk_data)
 {
-    return !hmapx_is_empty(&trk_data->crupdated);
+    return !hmapx_is_empty(&trk_data->crupdated) || trk_data->vip_nats_changed;
 }
 
 static inline bool
diff --git a/northd/northd.c b/northd/northd.c
index 2da94e7e19e4..dab9032f84d9 100644
--- a/northd/northd.c
+++ b/northd/northd.c
@@ -12489,6 +12489,7 @@ build_lrouter_nat_flows_for_lb(
     struct ds skip_snat_act = DS_EMPTY_INITIALIZER;
     struct ds force_snat_act = DS_EMPTY_INITIALIZER;
     struct ds undnat_match = DS_EMPTY_INITIALIZER;
+    struct ds unsnat_match = DS_EMPTY_INITIALIZER;
     struct ds gw_redir_action = DS_EMPTY_INITIALIZER;
 
     ds_clear(match);
@@ -12534,6 +12535,13 @@ build_lrouter_nat_flows_for_lb(
     /* Remove the trailing " || ". */
     ds_truncate(&undnat_match, undnat_match.length - 4);
 
+    ds_put_format(&unsnat_match, "%s && %s.dst == %s && %s",
+                  ip_match, ip_match, lb_vip->vip_str, lb->proto);
+    if (lb_vip->port_str) {
+        ds_put_format(&unsnat_match, " && %s.dst == %s", lb->proto,
+                      lb_vip->port_str);
+    }
+
     struct lrouter_nat_lb_flows_ctx ctx = {
         .lb_vip = lb_vip,
         .lb = lb,
@@ -12602,6 +12610,23 @@ build_lrouter_nat_flows_for_lb(
         if (lb->affinity_timeout) {
             bitmap_set1(aff_dp_bitmap[type], index);
         }
+
+        if (sset_contains(&lrnat_rec->external_ips, lb_vip->vip_str)) {
+            /* The load balancer vip is also present in the NAT entries.
+             * So add a high priority lflow to advance the the packet
+             * destined to the vip (and the vip port if defined)
+             * in the S_ROUTER_IN_UNSNAT stage.
+             * There seems to be an issue with ovs-vswitchd. When the new
+             * connection packet destined for the lb vip is received,
+             * it is dnat'ed in the S_ROUTER_IN_DNAT stage in the dnat
+             * conntrack zone. For the next packet, if it goes through
+             * unsnat stage, the conntrack flags are not set properly, and
+             * it doesn't hit the established state flows in
+             * S_ROUTER_IN_DNAT stage. */
+            ovn_lflow_add_with_hint(lflows, od, S_ROUTER_IN_UNSNAT, 120,
+                                    ds_cstr(&unsnat_match), "next;",
+                                    &lb->nlb->header_, lb_dps->lflow_ref);
+        }
     }
 
     for (size_t type = 0; type < LROUTER_NAT_LB_FLOW_MAX; type++) {
@@ -12613,6 +12638,7 @@ build_lrouter_nat_flows_for_lb(
                                    lr_datapaths, lb_dps->lflow_ref);
     }
 
+    ds_destroy(&unsnat_match);
     ds_destroy(&undnat_match);
     ds_destroy(&skip_snat_act);
     ds_destroy(&force_snat_act);
@@ -17253,7 +17279,12 @@ build_lrouter_nat_defrag_and_lb(
                                    stateless, lflow_ref);
 
         /* ARP resolve for NAT IPs. */
-        if (!od->is_gw_router) {
+        if (od->is_gw_router) {
+            /* Add the NAT external_ip to the nat_entries for
+             * gateway routers. This is required for adding load balancer
+             * flows.*/
+            sset_add(&nat_entries, nat->external_ip);
+        } else {
             if (!sset_contains(&nat_entries, nat->external_ip)) {
                 /* Drop packets coming in from external that still has
                  * destination IP equals to the NAT external IP, to avoid loop.
diff --git a/tests/ovn-northd.at b/tests/ovn-northd.at
index 97eaa6a40357..cd28a0da4d74 100644
--- a/tests/ovn-northd.at
+++ b/tests/ovn-northd.at
@@ -1723,6 +1723,9 @@ AT_CAPTURE_FILE([sbflows])
 # dnat_and_snat or snat entry.
 AT_CHECK([grep "lr_in_unsnat" sbflows | ovn_strip_lflows], [0], [dnl
   table=??(lr_in_unsnat       ), priority=0    , match=(1), action=(next;)
+  table=??(lr_in_unsnat       ), priority=120  , match=(ip4 && ip4.dst == 
192.168.2.1 && tcp && tcp.dst == 8080), action=(next;)
+  table=??(lr_in_unsnat       ), priority=120  , match=(ip4 && ip4.dst == 
192.168.2.4 && udp && udp.dst == 8080), action=(next;)
+  table=??(lr_in_unsnat       ), priority=120  , match=(ip4 && ip4.dst == 
192.168.2.5 && tcp && tcp.dst == 8080), action=(next;)
   table=??(lr_in_unsnat       ), priority=90   , match=(ip && ip4.dst == 
192.168.2.1), action=(ct_snat;)
   table=??(lr_in_unsnat       ), priority=90   , match=(ip && ip4.dst == 
192.168.2.4), action=(ct_snat;)
 ])
@@ -1753,6 +1756,9 @@ AT_CAPTURE_FILE([sbflows])
 # dnat_and_snat or snat entry.
 AT_CHECK([grep "lr_in_unsnat" sbflows | ovn_strip_lflows], [0], [dnl
   table=??(lr_in_unsnat       ), priority=0    , match=(1), action=(next;)
+  table=??(lr_in_unsnat       ), priority=120  , match=(ip4 && ip4.dst == 
192.168.2.1 && tcp && tcp.dst == 8080), action=(next;)
+  table=??(lr_in_unsnat       ), priority=120  , match=(ip4 && ip4.dst == 
192.168.2.4 && udp && udp.dst == 8080), action=(next;)
+  table=??(lr_in_unsnat       ), priority=120  , match=(ip4 && ip4.dst == 
192.168.2.5 && tcp && tcp.dst == 8080), action=(next;)
   table=??(lr_in_unsnat       ), priority=90   , match=(ip && ip4.dst == 
192.168.2.1), action=(ct_snat;)
   table=??(lr_in_unsnat       ), priority=90   , match=(ip && ip4.dst == 
192.168.2.4), action=(ct_snat;)
 ])
@@ -6296,6 +6302,7 @@ AT_CHECK([grep "lr_in_unsnat" lr0flows | 
ovn_strip_lflows], [0], [dnl
   table=??(lr_in_unsnat       ), priority=0    , match=(1), action=(next;)
   table=??(lr_in_unsnat       ), priority=110  , match=(inport == "lr0-public" 
&& ip4.dst == 172.168.0.10), action=(ct_snat;)
   table=??(lr_in_unsnat       ), priority=110  , match=(inport == "lr0-sw0" && 
ip4.dst == 10.0.0.1), action=(ct_snat;)
+  table=??(lr_in_unsnat       ), priority=120  , match=(ip4 && ip4.dst == 
172.168.0.10 && tcp && tcp.dst == 9082), action=(next;)
   table=??(lr_in_unsnat       ), priority=90   , match=(ip && ip4.dst == 
172.168.0.10), action=(ct_snat;)
   table=??(lr_in_unsnat       ), priority=90   , match=(ip && ip4.dst == 
172.168.0.20), action=(ct_snat;)
   table=??(lr_in_unsnat       ), priority=90   , match=(ip && ip4.dst == 
172.168.0.30), action=(ct_snat;)
@@ -6374,6 +6381,7 @@ AT_CHECK([grep "lr_in_unsnat" lr0flows | 
ovn_strip_lflows], [0], [dnl
   table=??(lr_in_unsnat       ), priority=110  , match=(inport == "lr0-public" 
&& ip6.dst == def0::10), action=(ct_snat;)
   table=??(lr_in_unsnat       ), priority=110  , match=(inport == "lr0-sw0" && 
ip4.dst == 10.0.0.1), action=(ct_snat;)
   table=??(lr_in_unsnat       ), priority=110  , match=(inport == "lr0-sw0" && 
ip6.dst == aef0::1), action=(ct_snat;)
+  table=??(lr_in_unsnat       ), priority=120  , match=(ip4 && ip4.dst == 
172.168.0.10 && tcp && tcp.dst == 9082), action=(next;)
   table=??(lr_in_unsnat       ), priority=90   , match=(ip && ip4.dst == 
172.168.0.10), action=(ct_snat;)
   table=??(lr_in_unsnat       ), priority=90   , match=(ip && ip4.dst == 
172.168.0.20), action=(ct_snat;)
   table=??(lr_in_unsnat       ), priority=90   , match=(ip && ip4.dst == 
172.168.0.30), action=(ct_snat;)
@@ -12733,7 +12741,7 @@ check ovn-nbctl lb-add lb2 172.168.0.150:80 
10.0.0.40:8080
 check ovn-nbctl lr-lb-add lr0 lb1
 check ovn-nbctl lr-lb-add lr0 lb2
 
-# lflow engine should not recompute even if the nat ip 172.168.0.140
+# lflow engine should recompute since the nat ip 172.168.0.140
 # is a lb vip.
 check as northd ovn-appctl -t ovn-northd inc-engine/clear-stats
 check ovn-nbctl --wait=sb lr-nat-add lr0 dnat_and_snat 172.168.0.140 10.0.0.20
@@ -12742,10 +12750,10 @@ check_engine_stats lr_nat norecompute compute
 check_engine_stats lr_stateful norecompute compute
 check_engine_stats sync_to_sb_pb recompute nocompute
 check_engine_stats sync_to_sb_lb norecompute compute
-check_engine_stats lflow norecompute compute
+check_engine_stats lflow recompute nocompute
 CHECK_NO_CHANGE_AFTER_RECOMPUTE
 
-# lflow engine should not recompute even if the nat ip 172.168.0.150
+# lflow engine should recompute since the nat ip 172.168.0.150
 # is a lb vip.
 check as northd ovn-appctl -t ovn-northd inc-engine/clear-stats
 check ovn-nbctl --wait=sb lr-nat-add lr0 dnat_and_snat 172.168.0.150 10.0.0.41
@@ -12754,10 +12762,10 @@ check_engine_stats lr_nat norecompute compute
 check_engine_stats lr_stateful norecompute compute
 check_engine_stats sync_to_sb_pb recompute nocompute
 check_engine_stats sync_to_sb_lb norecompute compute
-check_engine_stats lflow norecompute compute
+check_engine_stats lflow recompute nocompute
 CHECK_NO_CHANGE_AFTER_RECOMPUTE
 
-# lflow engine should not recompute even if the deleted nat ip 172.168.0.150
+# lflow engine should recompute since the deleted nat ip 172.168.0.150
 # is a lb vip.
 check as northd ovn-appctl -t ovn-northd inc-engine/clear-stats
 check ovn-nbctl --wait=sb lr-nat-del lr0 dnat_and_snat 172.168.0.150
@@ -12766,10 +12774,10 @@ check_engine_stats lr_nat norecompute compute
 check_engine_stats lr_stateful norecompute compute
 check_engine_stats sync_to_sb_pb recompute nocompute
 check_engine_stats sync_to_sb_lb norecompute compute
-check_engine_stats lflow norecompute compute
+check_engine_stats lflow recompute nocompute
 CHECK_NO_CHANGE_AFTER_RECOMPUTE
 
-# lflow engine should not recompute even if the deleted nat ip 172.168.0.140
+# lflow engine should recompute since the deleted nat ip 172.168.0.140
 # is a lb vip.
 check as northd ovn-appctl -t ovn-northd inc-engine/clear-stats
 check ovn-nbctl --wait=sb lr-nat-del lr0 dnat_and_snat 172.168.0.140
@@ -12778,7 +12786,7 @@ check_engine_stats lr_nat norecompute compute
 check_engine_stats lr_stateful norecompute compute
 check_engine_stats sync_to_sb_pb recompute nocompute
 check_engine_stats sync_to_sb_lb norecompute compute
-check_engine_stats lflow norecompute compute
+check_engine_stats lflow recompute nocompute
 CHECK_NO_CHANGE_AFTER_RECOMPUTE
 
 # Delete the NAT
diff --git a/tests/system-ovn-kmod.at b/tests/system-ovn-kmod.at
index 438908335997..07d5b9149295 100644
--- a/tests/system-ovn-kmod.at
+++ b/tests/system-ovn-kmod.at
@@ -1630,3 +1630,127 @@ OVS_TRAFFIC_VSWITCHD_STOP(["/failed to query port 
patch-.*/d
 /connection dropped.*/d"])
 AT_CLEANUP
 ])
+
+OVN_FOR_EACH_NORTHD([
+AT_SETUP([load balancing in gateway router - ensure hw-offload with LB and 
SNAT])
+AT_SKIP_IF([test $HAVE_NC = no])
+AT_KEYWORDS([lb])
+
+ovn_start
+OVS_TRAFFIC_VSWITCHD_START()
+ADD_BR([br-int])
+
+check ovs-vsctl \
+        -- set Open_vSwitch . external-ids:system-id=hv1 \
+        -- set Open_vSwitch . 
external-ids:ovn-remote=unix:$ovs_base/ovn-sb/ovn-sb.sock \
+        -- set Open_vSwitch . external-ids:ovn-encap-type=geneve \
+        -- set Open_vSwitch . external-ids:ovn-encap-ip=169.0.0.1 \
+        -- set bridge br-int fail-mode=secure other-config:disable-in-band=true
+
+start_daemon ovn-controller
+
+check ovn-nbctl lr-add lr \
+    -- set logical_router lr options:chassis=hv1
+check ovn-nbctl lrp-add lr lr-ls1 00:00:00:00:01:00 41.41.41.2/24
+check ovn-nbctl lrp-add lr lr-ls2 00:00:00:00:02:00 42.42.42.2/24
+check ovn-nbctl ls-add ls1
+check ovn-nbctl ls-add ls2
+
+check ovn-nbctl lsp-add ls1 ls1-lr
+check ovn-nbctl lsp-set-addresses ls1-lr 00:00:00:00:01:00
+check ovn-nbctl lsp-set-type ls1-lr router
+check ovn-nbctl lsp-set-options ls1-lr router-port=lr-ls1
+check ovn-nbctl lsp-add ls1 vm1
+check ovn-nbctl lsp-set-addresses vm1 00:00:00:00:00:01
+
+check ovn-nbctl lsp-add ls2 ls2-lr
+check ovn-nbctl lsp-set-addresses ls2-lr 00:00:00:00:02:00
+check ovn-nbctl lsp-set-type ls2-lr router
+check ovn-nbctl lsp-set-options ls2-lr router-port=lr-ls2
+check ovn-nbctl lsp-add ls2 vm2
+check ovn-nbctl lsp-set-addresses vm2 00:00:00:00:00:02
+
+dnl LB using the router IP connected to vm2 as VIP.
+check ovn-nbctl lb-add lb-test 42.42.42.2:8080 41.41.41.1:8080 tcp \
+    -- lr-lb-add lr lb-test
+
+dnl SNAT everything coming from vm1 to the router IP (towards vm2).
+check ovn-nbctl lr-nat-add lr snat 42.42.42.2 41.41.41.1
+
+ADD_NAMESPACES(vm1)
+ADD_VETH(vm1, vm1, br-int, "41.41.41.1/24", "00:00:00:00:00:01", "41.41.41.2")
+
+ADD_NAMESPACES(vm2)
+ADD_VETH(vm2, vm2, br-int, "42.42.42.1/24", "00:00:00:00:00:02", "42.42.42.2")
+
+dnl Start a server on vm1.
+NETNS_DAEMONIZE([vm1], [nc -l -k 41.41.41.1 8080], [vm1.pid])
+
+dnl Wait for ovn-controller to catch up.
+wait_for_ports_up
+check ovn-nbctl --wait=hv sync
+
+dnl Test the connection to VIP in a loop so that we can capture all DP flows.
+for i in $(seq 10); do
+    NS_CHECK_EXEC([vm2], [nc -z 42.42.42.2 8080], 0, [ignore], [ignore])
+done
+
+dnl For each DP flow with ct action, make sure a CT entry exists as established
+dnl for the zone the ct action is taken. Otherwise it means HW offload is
+dnl broken.
+check_flow_assured() {
+    local flow_str="$1"
+    local src_ip dst_ip
+
+    # extract only unmasked IPv4 src/dst
+    ipv4_src_dst=$(awk -F 'ipv4\\(src=' '{ print $2 }' <<< "$flow_str" | awk 
-F ',proto=' '{print $1}')
+    src_ip=$(awk -F ',dst=' '{ print $1 }' <<< "$ipv4_src_dst")
+    dst_ip=$(awk -F ',dst=' '{ print $2 }' <<< "$ipv4_src_dst")
+
+    if test "${src_ip#*/}" != "$src_ip"; then
+        src_ip=""
+    fi
+    if test "${dst_ip#*/}" != "$dst_ip"; then
+        dst_ip=""
+    fi
+
+    if test -z "$src_ip" && test -z "$dst_ip"; then
+        return
+    fi
+
+    zone=$(awk -F 'zone=' '{ print $2 }' <<< "$flow_str" | awk -F ',' '{print 
$1}')
+    echo Check if a conntrack entry exists for src=$src_ip, dst=$dst_ip, 
zone=$zone
+
+    if test -n "$src_ip" && test -n "$dst_ip"; then
+        AT_CHECK([grep -F "$src_ip" conntrack_file | grep -F "$dst_ip" | grep 
"zone=$zone" | grep -q "ASSURED"])
+    elif test -n "$src_ip"; then
+        AT_CHECK([grep -F "$src_ip" conntrack_file | grep "zone=$zone" | grep 
-q "ASSURED"])
+    else
+        AT_CHECK([grep -F "$dst_ip" conntrack_file | grep "zone=$zone" | grep 
-q "ASSURED"])
+    fi
+}
+
+ovs-appctl dpctl/dump-conntrack -m > conntrack_file
+
+ovs-appctl dpctl/dump-flows -m \
+    | grep 'ct(' \
+    | while read -r flow; do
+        check_flow_assured "$flow"
+done
+
+OVS_APP_EXIT_AND_WAIT([ovn-controller])
+
+as ovn-sb
+OVS_APP_EXIT_AND_WAIT([ovsdb-server])
+
+as ovn-nb
+OVS_APP_EXIT_AND_WAIT([ovsdb-server])
+
+as northd
+OVS_APP_EXIT_AND_WAIT([ovn-northd])
+
+as
+OVS_TRAFFIC_VSWITCHD_STOP(["/failed to query port patch-.*/d
+/connection dropped.*/d"])
+AT_CLEANUP
+])
diff --git a/tests/system-ovn.at b/tests/system-ovn.at
index d6d5cada16f4..c2b70d3a1a3e 100644
--- a/tests/system-ovn.at
+++ b/tests/system-ovn.at
@@ -12570,86 +12570,6 @@ OVS_TRAFFIC_VSWITCHD_STOP(["/failed to query port 
patch-.*/d
 /connection dropped.*/d"])
 AT_CLEANUP
 
-OVN_FOR_EACH_NORTHD([
-AT_SETUP([load balancing in gateway router - client behind LB with SNAT])
-AT_SKIP_IF([test $HAVE_NC = no])
-AT_KEYWORDS([lb])
-
-ovn_start
-OVS_TRAFFIC_VSWITCHD_START()
-ADD_BR([br-int])
-
-check ovs-vsctl \
-        -- set Open_vSwitch . external-ids:system-id=hv1 \
-        -- set Open_vSwitch . 
external-ids:ovn-remote=unix:$ovs_base/ovn-sb/ovn-sb.sock \
-        -- set Open_vSwitch . external-ids:ovn-encap-type=geneve \
-        -- set Open_vSwitch . external-ids:ovn-encap-ip=169.0.0.1 \
-        -- set bridge br-int fail-mode=secure other-config:disable-in-band=true
-
-start_daemon ovn-controller
-
-check ovn-nbctl lr-add lr \
-    -- set logical_router lr options:chassis=hv1
-check ovn-nbctl lrp-add lr lr-ls1 00:00:00:00:01:00 41.41.41.2/24
-check ovn-nbctl lrp-add lr lr-ls2 00:00:00:00:02:00 42.42.42.2/24
-check ovn-nbctl ls-add ls1
-check ovn-nbctl ls-add ls2
-
-check ovn-nbctl lsp-add ls1 ls1-lr
-check ovn-nbctl lsp-set-addresses ls1-lr 00:00:00:00:01:00
-check ovn-nbctl lsp-set-type ls1-lr router
-check ovn-nbctl lsp-set-options ls1-lr router-port=lr-ls1
-check ovn-nbctl lsp-add ls1 vm1
-check ovn-nbctl lsp-set-addresses vm1 00:00:00:00:00:01
-
-check ovn-nbctl lsp-add ls2 ls2-lr
-check ovn-nbctl lsp-set-addresses ls2-lr 00:00:00:00:02:00
-check ovn-nbctl lsp-set-type ls2-lr router
-check ovn-nbctl lsp-set-options ls2-lr router-port=lr-ls2
-check ovn-nbctl lsp-add ls2 vm2
-check ovn-nbctl lsp-set-addresses vm2 00:00:00:00:00:02
-
-dnl LB using the router IP connected to vm2 as VIP.
-check ovn-nbctl lb-add lb-test 42.42.42.2:8080 41.41.41.1:8080 tcp \
-    -- lr-lb-add lr lb-test
-
-dnl SNAT everything coming from vm1 to the router IP (towards vm2).
-check ovn-nbctl lr-nat-add lr snat 42.42.42.2 41.41.41.1
-
-ADD_NAMESPACES(vm1)
-ADD_VETH(vm1, vm1, br-int, "41.41.41.1/24", "00:00:00:00:00:01", "41.41.41.2")
-
-ADD_NAMESPACES(vm2)
-ADD_VETH(vm2, vm2, br-int, "42.42.42.1/24", "00:00:00:00:00:02", "42.42.42.2")
-
-dnl Start a server on vm2.
-NETNS_DAEMONIZE([vm2], [nc -l -k 42.42.42.1 80], [vm2.pid])
-
-dnl Wait for ovn-controller to catch up.
-wait_for_ports_up
-check ovn-nbctl --wait=hv sync
-
-dnl Test the connection originating something that uses the same source port
-dnl as the LB VIP.
-NS_CHECK_EXEC([vm1], [nc -z -p 8080 42.42.42.1 80], 0, [ignore], [ignore])
-
-OVN_CLEANUP_CONTROLLER([hv1])
-
-as ovn-sb
-OVS_APP_EXIT_AND_WAIT([ovsdb-server])
-
-as ovn-nb
-OVS_APP_EXIT_AND_WAIT([ovsdb-server])
-
-as northd
-OVS_APP_EXIT_AND_WAIT([ovn-northd])
-
-as
-OVS_TRAFFIC_VSWITCHD_STOP(["/failed to query port patch-.*/d
-/connection dropped.*/d"])
-AT_CLEANUP
-])
-
 OVN_FOR_EACH_NORTHD([
 AT_SETUP([load balancing affinity sessions - auto clear learnt flows])
 AT_SKIP_IF([test $HAVE_NC = no])
-- 
2.38.1

_______________________________________________
dev mailing list
d...@openvswitch.org
https://mail.openvswitch.org/mailman/listinfo/ovs-dev

Reply via email to