Rather than drop all pending Tx offloads on recirculation,
preserve inner offloads (and mark packet with outer Tx offloads)
when parsing the packet again.

Fixes: c6538b443984 ("dpif-netdev: Fix crash due to tunnel offloading on 
recirculation.")
Fixes: 084c8087292c ("userspace: Support VXLAN and GENEVE TSO.")
Signed-off-by: David Marchand <[email protected]>
---
Changes since v1:
- rebased,
- dropped API change on miniflow_extract(), rely on tunnel offloading
  flag presence instead,
- introduced dp_packet_reset_outer_offsets,

---
 lib/dp-packet.h   | 23 +++++++++++------------
 lib/dpif-netdev.c | 27 ---------------------------
 lib/flow.c        | 34 ++++++++++++++++++++++++++++------
 3 files changed, 39 insertions(+), 45 deletions(-)

diff --git a/lib/dp-packet.h b/lib/dp-packet.h
index f94a82b07c..0f487a4283 100644
--- a/lib/dp-packet.h
+++ b/lib/dp-packet.h
@@ -210,6 +210,7 @@ static inline void dp_packet_set_tso_segsz(struct dp_packet 
*, uint16_t);
 void *dp_packet_resize_l2(struct dp_packet *, int increment);
 void *dp_packet_resize_l2_5(struct dp_packet *, int increment);
 static inline void *dp_packet_eth(const struct dp_packet *);
+static inline void dp_packet_reset_outer_offsets(struct dp_packet *);
 static inline void dp_packet_reset_offsets(struct dp_packet *);
 static inline void dp_packet_reset_offload(struct dp_packet *);
 static inline uint16_t dp_packet_l2_pad_size(const struct dp_packet *);
@@ -433,15 +434,22 @@ dp_packet_eth(const struct dp_packet *b)
             ? dp_packet_data(b) : NULL;
 }
 
-/* Resets all layer offsets.  'l3' offset must be set before 'l2' can be
- * retrieved. */
+/* Resets all outer layer offsets. */
 static inline void
-dp_packet_reset_offsets(struct dp_packet *b)
+dp_packet_reset_outer_offsets(struct dp_packet *b)
 {
     b->l2_pad_size = 0;
     b->l2_5_ofs = UINT16_MAX;
     b->l3_ofs = UINT16_MAX;
     b->l4_ofs = UINT16_MAX;
+}
+
+/* Resets all layer offsets.  'l3' offset must be set before 'l2' can be
+ * retrieved. */
+static inline void
+dp_packet_reset_offsets(struct dp_packet *b)
+{
+    dp_packet_reset_outer_offsets(b);
     b->inner_l3_ofs = UINT16_MAX;
     b->inner_l4_ofs = UINT16_MAX;
 }
@@ -1316,15 +1324,6 @@ dp_packet_hwol_set_tunnel_gre(struct dp_packet *b)
     *dp_packet_ol_flags_ptr(b) |= DP_PACKET_OL_TX_TUNNEL_GRE;
 }
 
-/* Clears tunnel offloading marks. */
-static inline void
-dp_packet_hwol_reset_tunnel(struct dp_packet *b)
-{
-    *dp_packet_ol_flags_ptr(b) &= ~(DP_PACKET_OL_TX_TUNNEL_VXLAN |
-                                    DP_PACKET_OL_TX_TUNNEL_GRE |
-                                    DP_PACKET_OL_TX_TUNNEL_GENEVE);
-}
-
 /* Mark packet 'b' as a tunnel packet with outer IPv4 header. */
 static inline void
 dp_packet_hwol_set_tx_outer_ipv4(struct dp_packet *b)
diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c
index b572fab23d..11f82210a8 100644
--- a/lib/dpif-netdev.c
+++ b/lib/dpif-netdev.c
@@ -115,7 +115,6 @@ COVERAGE_DEFINE(datapath_drop_lock_error);
 COVERAGE_DEFINE(datapath_drop_userspace_action_error);
 COVERAGE_DEFINE(datapath_drop_tunnel_push_error);
 COVERAGE_DEFINE(datapath_drop_tunnel_pop_error);
-COVERAGE_DEFINE(datapath_drop_tunnel_tso_recirc);
 COVERAGE_DEFINE(datapath_drop_recirc_error);
 COVERAGE_DEFINE(datapath_drop_invalid_port);
 COVERAGE_DEFINE(datapath_drop_invalid_bond);
@@ -8923,32 +8922,6 @@ static void
 dp_netdev_recirculate(struct dp_netdev_pmd_thread *pmd,
                       struct dp_packet_batch *packets)
 {
-    static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
-    size_t i, size = dp_packet_batch_size(packets);
-    struct dp_packet *packet;
-
-    DP_PACKET_BATCH_REFILL_FOR_EACH (i, size, packet, packets) {
-        if (dp_packet_hwol_is_tunnel(packet)) {
-            if (dp_packet_hwol_is_tso(packet)) {
-                /* Can't perform GSO in the middle of a pipeline. */
-                COVERAGE_INC(datapath_drop_tunnel_tso_recirc);
-                dp_packet_delete(packet);
-                VLOG_WARN_RL(&rl, "Recirculating tunnel packets with "
-                                  "TSO is not supported");
-                continue;
-            }
-            /* Have to fix all the checksums before re-parsing, because the
-             * packet will be treated as having a single set of headers. */
-            dp_packet_ol_send_prepare(packet, 0);
-            /* This packet must not be marked with anything tunnel-related. */
-            dp_packet_hwol_reset_tunnel(packet);
-            /* Clear inner offsets.  Other ones are collateral, but they will
-             * be re-initialized on re-parsing. */
-            dp_packet_reset_offsets(packet);
-        }
-        dp_packet_batch_refill(packets, packet, i);
-    }
-
     dp_netdev_input__(pmd, packets, true, 0);
 }
 
diff --git a/lib/flow.c b/lib/flow.c
index 0eb34892f2..93ee50f8df 100644
--- a/lib/flow.c
+++ b/lib/flow.c
@@ -804,6 +804,7 @@ miniflow_extract(struct dp_packet *packet, struct miniflow 
*dst)
     uint8_t nw_frag, nw_tos, nw_ttl, nw_proto;
     uint8_t *ct_nw_proto_p = NULL;
     ovs_be16 ct_tp_src = 0, ct_tp_dst = 0;
+    bool tunneling;
 
     /* Metadata. */
     if (flow_tnl_dst_is_set(&md->tunnel)) {
@@ -857,7 +858,13 @@ miniflow_extract(struct dp_packet *packet, struct miniflow 
*dst)
 
     /* Initialize packet's layer pointer and offsets. */
     frame = data;
-    dp_packet_reset_offsets(packet);
+    tunneling = dp_packet_hwol_is_tunnel(packet);
+    if (tunneling) {
+        /* Preserve inner offsets from previous circulation. */
+        dp_packet_reset_outer_offsets(packet);
+    } else {
+        dp_packet_reset_offsets(packet);
+    }
 
     if (packet_type == htonl(PT_ETH)) {
         /* Must have full Ethernet header to proceed. */
@@ -936,9 +943,16 @@ miniflow_extract(struct dp_packet *packet, struct miniflow 
*dst)
         nw_proto = nh->ip_proto;
         nw_frag = ipv4_get_nw_frag(nh);
         data_pull(&data, &size, ip_len);
-        dp_packet_hwol_set_tx_ipv4(packet);
-        if (dp_packet_ip_checksum_good(packet)) {
-            dp_packet_hwol_set_tx_ip_csum(packet);
+        if (tunneling) {
+            dp_packet_hwol_set_tx_outer_ipv4(packet);
+            if (dp_packet_ip_checksum_good(packet)) {
+                dp_packet_hwol_set_tx_outer_ipv4_csum(packet);
+            }
+        } else {
+            dp_packet_hwol_set_tx_ipv4(packet);
+            if (dp_packet_ip_checksum_good(packet)) {
+                dp_packet_hwol_set_tx_ip_csum(packet);
+            }
         }
     } else if (dl_type == htons(ETH_TYPE_IPV6)) {
         const struct ovs_16aligned_ip6_hdr *nh = data;
@@ -953,7 +967,11 @@ miniflow_extract(struct dp_packet *packet, struct miniflow 
*dst)
         }
         data_pull(&data, &size, sizeof *nh);
 
-        dp_packet_hwol_set_tx_ipv6(packet);
+        if (tunneling) {
+            dp_packet_hwol_set_tx_outer_ipv6(packet);
+        } else {
+            dp_packet_hwol_set_tx_ipv6(packet);
+        }
         plen = ntohs(nh->ip6_plen);
         dp_packet_set_l2_pad_size(packet, size - plen);
         size = plen;   /* Never pull padding. */
@@ -1078,7 +1096,11 @@ miniflow_extract(struct dp_packet *packet, struct 
miniflow *dst)
                 dp_packet_ol_l4_csum_check_partial(packet);
                 if (dp_packet_l4_checksum_good(packet)
                     || dp_packet_ol_l4_csum_partial(packet)) {
-                    dp_packet_hwol_set_csum_udp(packet);
+                    if (tunneling) {
+                        dp_packet_hwol_set_outer_udp_csum(packet);
+                    } else {
+                        dp_packet_hwol_set_csum_udp(packet);
+                    }
                 }
             }
         } else if (OVS_LIKELY(nw_proto == IPPROTO_SCTP)) {
-- 
2.47.1

_______________________________________________
dev mailing list
[email protected]
https://mail.openvswitch.org/mailman/listinfo/ovs-dev

Reply via email to