Re: [ovs-dev] [PATCH 1/4] Userspace: Software fallback for UDP encapsulated TCP segmentation.

2024-02-12 Thread Mike Pattrick
On Mon, Feb 12, 2024 at 10:04 AM Ilya Maximets  wrote:
>
> On 2/12/24 15:19, Mike Pattrick wrote:
> > On Mon, Feb 12, 2024 at 7:52 AM Ilya Maximets  wrote:
> >>
> >> On 2/12/24 09:13, Mike Pattrick wrote:
> >>> When sending packets that are flagged as requiring segmentation to an
> >>> interface that doens't support this feature, send the packet to the TSO
> >>> software fallback instead of dropping it.
> >>>
> >>> Signed-off-by: Mike Pattrick 
> >>>
> >>> ---
> >>> Note: Previously this patch failed gitlab ci, however, I was not able to
> >>> reproduce that failure in my VM. I'm resubmitting to see if the failure
> >>> was a fluke.
> >>
> >> Doesn't look like a fluke.  Tests seem to fail consistently...
> >
> > Yes. I'll continue to look into this. I've tried different
> > kernel/compiler versions but it consistently passes for me.
>
> There is always an option to add some debug stuff into the test and
> run GHA in your fork.  But that's the most annoying way of debugging,
> I agree.

That was a good idea, I never would have guessed the issue on my own.

-M

>
> Best regards, Ilya Maximets.
>

___
dev mailing list
d...@openvswitch.org
https://mail.openvswitch.org/mailman/listinfo/ovs-dev


Re: [ovs-dev] [PATCH 1/4] Userspace: Software fallback for UDP encapsulated TCP segmentation.

2024-02-12 Thread Ilya Maximets
On 2/12/24 15:19, Mike Pattrick wrote:
> On Mon, Feb 12, 2024 at 7:52 AM Ilya Maximets  wrote:
>>
>> On 2/12/24 09:13, Mike Pattrick wrote:
>>> When sending packets that are flagged as requiring segmentation to an
>>> interface that doens't support this feature, send the packet to the TSO
>>> software fallback instead of dropping it.
>>>
>>> Signed-off-by: Mike Pattrick 
>>>
>>> ---
>>> Note: Previously this patch failed gitlab ci, however, I was not able to
>>> reproduce that failure in my VM. I'm resubmitting to see if the failure
>>> was a fluke.
>>
>> Doesn't look like a fluke.  Tests seem to fail consistently...
> 
> Yes. I'll continue to look into this. I've tried different
> kernel/compiler versions but it consistently passes for me.

There is always an option to add some debug stuff into the test and
run GHA in your fork.  But that's the most annoying way of debugging,
I agree.

Best regards, Ilya Maximets.
___
dev mailing list
d...@openvswitch.org
https://mail.openvswitch.org/mailman/listinfo/ovs-dev


Re: [ovs-dev] [PATCH 1/4] Userspace: Software fallback for UDP encapsulated TCP segmentation.

2024-02-12 Thread Mike Pattrick
On Mon, Feb 12, 2024 at 7:52 AM Ilya Maximets  wrote:
>
> On 2/12/24 09:13, Mike Pattrick wrote:
> > When sending packets that are flagged as requiring segmentation to an
> > interface that doens't support this feature, send the packet to the TSO
> > software fallback instead of dropping it.
> >
> > Signed-off-by: Mike Pattrick 
> >
> > ---
> > Note: Previously this patch failed gitlab ci, however, I was not able to
> > reproduce that failure in my VM. I'm resubmitting to see if the failure
> > was a fluke.
>
> Doesn't look like a fluke.  Tests seem to fail consistently...

Yes. I'll continue to look into this. I've tried different
kernel/compiler versions but it consistently passes for me.

-M

>
> >
> > ---
> >  lib/dp-packet-gso.c | 73 +
> >  lib/dp-packet.h | 26 +++
> >  lib/netdev-native-tnl.c |  8 +
> >  lib/netdev.c| 19 ---
> >  tests/system-traffic.at | 48 +++
> >  5 files changed, 142 insertions(+), 32 deletions(-)
> >
> > diff --git a/lib/dp-packet-gso.c b/lib/dp-packet-gso.c
> > index 847685ad9..f25abf436 100644
> > --- a/lib/dp-packet-gso.c
> > +++ b/lib/dp-packet-gso.c
> > @@ -47,6 +47,8 @@ dp_packet_gso_seg_new(const struct dp_packet *p, size_t 
> > hdr_len,
> >  seg->l2_5_ofs = p->l2_5_ofs;
> >  seg->l3_ofs = p->l3_ofs;
> >  seg->l4_ofs = p->l4_ofs;
> > +seg->inner_l3_ofs = p->inner_l3_ofs;
> > +seg->inner_l4_ofs = p->inner_l4_ofs;
> >
> >  /* The protocol headers remain the same, so preserve hash and mark. */
> >  *dp_packet_rss_ptr(seg) = *dp_packet_rss_ptr(p);
> > @@ -71,7 +73,12 @@ dp_packet_gso_nr_segs(struct dp_packet *p)
> >  const char *data_tail;
> >  const char *data_pos;
> >
> > -data_pos = dp_packet_get_tcp_payload(p);
> > +if (dp_packet_hwol_is_tunnel_vxlan(p) ||
> > +dp_packet_hwol_is_tunnel_geneve(p)) {
> > +data_pos = dp_packet_get_inner_tcp_payload(p);
> > +} else {
> > +data_pos = dp_packet_get_tcp_payload(p);
> > +}
> >  data_tail = (char *) dp_packet_tail(p) - dp_packet_l2_pad_size(p);
> >
> >  return DIV_ROUND_UP(data_tail - data_pos, segsz);
> > @@ -91,12 +98,15 @@ dp_packet_gso(struct dp_packet *p, struct 
> > dp_packet_batch **batches)
> >  struct tcp_header *tcp_hdr;
> >  struct ip_header *ip_hdr;
> >  struct dp_packet *seg;
> > +const char *data_pos;
> >  uint16_t tcp_offset;
> >  uint16_t tso_segsz;
> > +uint16_t ip_id = 0;
> >  uint32_t tcp_seq;
> > -uint16_t ip_id;
> > +bool outer_ipv4;
> >  int hdr_len;
> >  int seg_len;
> > +bool tnl;
> >
> >  tso_segsz = dp_packet_get_tso_segsz(p);
> >  if (!tso_segsz) {
> > @@ -105,20 +115,35 @@ dp_packet_gso(struct dp_packet *p, struct 
> > dp_packet_batch **batches)
> >  return false;
> >  }
> >
> > -tcp_hdr = dp_packet_l4(p);
> > -tcp_offset = TCP_OFFSET(tcp_hdr->tcp_ctl);
> > -tcp_seq = ntohl(get_16aligned_be32(_hdr->tcp_seq));
> > -hdr_len = ((char *) dp_packet_l4(p) - (char *) dp_packet_eth(p))
> > -  + tcp_offset * 4;
> > -ip_id = 0;
> > -if (dp_packet_hwol_is_ipv4(p)) {
> > +if (dp_packet_hwol_is_tunnel_vxlan(p) ||
> > +dp_packet_hwol_is_tunnel_geneve(p)) {
> > +data_pos =  dp_packet_get_inner_tcp_payload(p);
> > +outer_ipv4 = dp_packet_hwol_is_outer_ipv4(p);
> > +tcp_hdr = dp_packet_inner_l4(p);
> > +ip_hdr = dp_packet_inner_l3(p);
> > +tnl = true;
> > +if (outer_ipv4) {
> > +ip_id = ntohs(((struct ip_header *) dp_packet_l3(p))->ip_id);
> > +} else if (dp_packet_hwol_is_ipv4(p)) {
> > +ip_id = ntohs(ip_hdr->ip_id);
> > +}
> > +} else {
> > +data_pos = dp_packet_get_tcp_payload(p);
> > +outer_ipv4 = dp_packet_hwol_is_ipv4(p);
> > +tcp_hdr = dp_packet_l4(p);
> >  ip_hdr = dp_packet_l3(p);
> > -ip_id = ntohs(ip_hdr->ip_id);
> > +tnl = false;
> > +if (outer_ipv4) {
> > +ip_id = ntohs(ip_hdr->ip_id);
> > +}
> >  }
> >
> > +tcp_offset = TCP_OFFSET(tcp_hdr->tcp_ctl);
> > +tcp_seq = ntohl(get_16aligned_be32(_hdr->tcp_seq));
> > +hdr_len = ((char *) tcp_hdr - (char *) dp_packet_eth(p))
> > +  + tcp_offset * 4;
> >  const char *data_tail = (char *) dp_packet_tail(p)
> >  - dp_packet_l2_pad_size(p);
> > -const char *data_pos = dp_packet_get_tcp_payload(p);
> >  int n_segs = dp_packet_gso_nr_segs(p);
> >
> >  for (int i = 0; i < n_segs; i++) {
> > @@ -130,8 +155,26 @@ dp_packet_gso(struct dp_packet *p, struct 
> > dp_packet_batch **batches)
> >  seg = dp_packet_gso_seg_new(p, hdr_len, data_pos, seg_len);
> >  data_pos += seg_len;
> >
> > +if (tnl) {
> > +/* Update tunnel L3 header. */
> > +if (dp_packet_hwol_is_ipv4(seg)) {
> > +   

Re: [ovs-dev] [PATCH 1/4] Userspace: Software fallback for UDP encapsulated TCP segmentation.

2024-02-12 Thread Ilya Maximets
On 2/12/24 09:13, Mike Pattrick wrote:
> When sending packets that are flagged as requiring segmentation to an
> interface that doens't support this feature, send the packet to the TSO
> software fallback instead of dropping it.
> 
> Signed-off-by: Mike Pattrick 
> 
> ---
> Note: Previously this patch failed gitlab ci, however, I was not able to
> reproduce that failure in my VM. I'm resubmitting to see if the failure
> was a fluke.

Doesn't look like a fluke.  Tests seem to fail consistently...

> 
> ---
>  lib/dp-packet-gso.c | 73 +
>  lib/dp-packet.h | 26 +++
>  lib/netdev-native-tnl.c |  8 +
>  lib/netdev.c| 19 ---
>  tests/system-traffic.at | 48 +++
>  5 files changed, 142 insertions(+), 32 deletions(-)
> 
> diff --git a/lib/dp-packet-gso.c b/lib/dp-packet-gso.c
> index 847685ad9..f25abf436 100644
> --- a/lib/dp-packet-gso.c
> +++ b/lib/dp-packet-gso.c
> @@ -47,6 +47,8 @@ dp_packet_gso_seg_new(const struct dp_packet *p, size_t 
> hdr_len,
>  seg->l2_5_ofs = p->l2_5_ofs;
>  seg->l3_ofs = p->l3_ofs;
>  seg->l4_ofs = p->l4_ofs;
> +seg->inner_l3_ofs = p->inner_l3_ofs;
> +seg->inner_l4_ofs = p->inner_l4_ofs;
>  
>  /* The protocol headers remain the same, so preserve hash and mark. */
>  *dp_packet_rss_ptr(seg) = *dp_packet_rss_ptr(p);
> @@ -71,7 +73,12 @@ dp_packet_gso_nr_segs(struct dp_packet *p)
>  const char *data_tail;
>  const char *data_pos;
>  
> -data_pos = dp_packet_get_tcp_payload(p);
> +if (dp_packet_hwol_is_tunnel_vxlan(p) ||
> +dp_packet_hwol_is_tunnel_geneve(p)) {
> +data_pos = dp_packet_get_inner_tcp_payload(p);
> +} else {
> +data_pos = dp_packet_get_tcp_payload(p);
> +}
>  data_tail = (char *) dp_packet_tail(p) - dp_packet_l2_pad_size(p);
>  
>  return DIV_ROUND_UP(data_tail - data_pos, segsz);
> @@ -91,12 +98,15 @@ dp_packet_gso(struct dp_packet *p, struct dp_packet_batch 
> **batches)
>  struct tcp_header *tcp_hdr;
>  struct ip_header *ip_hdr;
>  struct dp_packet *seg;
> +const char *data_pos;
>  uint16_t tcp_offset;
>  uint16_t tso_segsz;
> +uint16_t ip_id = 0;
>  uint32_t tcp_seq;
> -uint16_t ip_id;
> +bool outer_ipv4;
>  int hdr_len;
>  int seg_len;
> +bool tnl;
>  
>  tso_segsz = dp_packet_get_tso_segsz(p);
>  if (!tso_segsz) {
> @@ -105,20 +115,35 @@ dp_packet_gso(struct dp_packet *p, struct 
> dp_packet_batch **batches)
>  return false;
>  }
>  
> -tcp_hdr = dp_packet_l4(p);
> -tcp_offset = TCP_OFFSET(tcp_hdr->tcp_ctl);
> -tcp_seq = ntohl(get_16aligned_be32(_hdr->tcp_seq));
> -hdr_len = ((char *) dp_packet_l4(p) - (char *) dp_packet_eth(p))
> -  + tcp_offset * 4;
> -ip_id = 0;
> -if (dp_packet_hwol_is_ipv4(p)) {
> +if (dp_packet_hwol_is_tunnel_vxlan(p) ||
> +dp_packet_hwol_is_tunnel_geneve(p)) {
> +data_pos =  dp_packet_get_inner_tcp_payload(p);
> +outer_ipv4 = dp_packet_hwol_is_outer_ipv4(p);
> +tcp_hdr = dp_packet_inner_l4(p);
> +ip_hdr = dp_packet_inner_l3(p);
> +tnl = true;
> +if (outer_ipv4) {
> +ip_id = ntohs(((struct ip_header *) dp_packet_l3(p))->ip_id);
> +} else if (dp_packet_hwol_is_ipv4(p)) {
> +ip_id = ntohs(ip_hdr->ip_id);
> +}
> +} else {
> +data_pos = dp_packet_get_tcp_payload(p);
> +outer_ipv4 = dp_packet_hwol_is_ipv4(p);
> +tcp_hdr = dp_packet_l4(p);
>  ip_hdr = dp_packet_l3(p);
> -ip_id = ntohs(ip_hdr->ip_id);
> +tnl = false;
> +if (outer_ipv4) {
> +ip_id = ntohs(ip_hdr->ip_id);
> +}
>  }
>  
> +tcp_offset = TCP_OFFSET(tcp_hdr->tcp_ctl);
> +tcp_seq = ntohl(get_16aligned_be32(_hdr->tcp_seq));
> +hdr_len = ((char *) tcp_hdr - (char *) dp_packet_eth(p))
> +  + tcp_offset * 4;
>  const char *data_tail = (char *) dp_packet_tail(p)
>  - dp_packet_l2_pad_size(p);
> -const char *data_pos = dp_packet_get_tcp_payload(p);
>  int n_segs = dp_packet_gso_nr_segs(p);
>  
>  for (int i = 0; i < n_segs; i++) {
> @@ -130,8 +155,26 @@ dp_packet_gso(struct dp_packet *p, struct 
> dp_packet_batch **batches)
>  seg = dp_packet_gso_seg_new(p, hdr_len, data_pos, seg_len);
>  data_pos += seg_len;
>  
> +if (tnl) {
> +/* Update tunnel L3 header. */
> +if (dp_packet_hwol_is_ipv4(seg)) {
> +ip_hdr = dp_packet_inner_l3(seg);
> +ip_hdr->ip_tot_len = htons(sizeof *ip_hdr +
> +   dp_packet_inner_l4_size(seg));
> +ip_hdr->ip_id = htons(ip_id);
> +ip_hdr->ip_csum = 0;
> +ip_id++;
> +} else {
> +struct ovs_16aligned_ip6_hdr *ip6_hdr;
> +
> +  

[ovs-dev] [PATCH 1/4] Userspace: Software fallback for UDP encapsulated TCP segmentation.

2024-02-12 Thread Mike Pattrick
When sending packets that are flagged as requiring segmentation to an
interface that doens't support this feature, send the packet to the TSO
software fallback instead of dropping it.

Signed-off-by: Mike Pattrick 

---
Note: Previously this patch failed gitlab ci, however, I was not able to
reproduce that failure in my VM. I'm resubmitting to see if the failure
was a fluke.

---
 lib/dp-packet-gso.c | 73 +
 lib/dp-packet.h | 26 +++
 lib/netdev-native-tnl.c |  8 +
 lib/netdev.c| 19 ---
 tests/system-traffic.at | 48 +++
 5 files changed, 142 insertions(+), 32 deletions(-)

diff --git a/lib/dp-packet-gso.c b/lib/dp-packet-gso.c
index 847685ad9..f25abf436 100644
--- a/lib/dp-packet-gso.c
+++ b/lib/dp-packet-gso.c
@@ -47,6 +47,8 @@ dp_packet_gso_seg_new(const struct dp_packet *p, size_t 
hdr_len,
 seg->l2_5_ofs = p->l2_5_ofs;
 seg->l3_ofs = p->l3_ofs;
 seg->l4_ofs = p->l4_ofs;
+seg->inner_l3_ofs = p->inner_l3_ofs;
+seg->inner_l4_ofs = p->inner_l4_ofs;
 
 /* The protocol headers remain the same, so preserve hash and mark. */
 *dp_packet_rss_ptr(seg) = *dp_packet_rss_ptr(p);
@@ -71,7 +73,12 @@ dp_packet_gso_nr_segs(struct dp_packet *p)
 const char *data_tail;
 const char *data_pos;
 
-data_pos = dp_packet_get_tcp_payload(p);
+if (dp_packet_hwol_is_tunnel_vxlan(p) ||
+dp_packet_hwol_is_tunnel_geneve(p)) {
+data_pos = dp_packet_get_inner_tcp_payload(p);
+} else {
+data_pos = dp_packet_get_tcp_payload(p);
+}
 data_tail = (char *) dp_packet_tail(p) - dp_packet_l2_pad_size(p);
 
 return DIV_ROUND_UP(data_tail - data_pos, segsz);
@@ -91,12 +98,15 @@ dp_packet_gso(struct dp_packet *p, struct dp_packet_batch 
**batches)
 struct tcp_header *tcp_hdr;
 struct ip_header *ip_hdr;
 struct dp_packet *seg;
+const char *data_pos;
 uint16_t tcp_offset;
 uint16_t tso_segsz;
+uint16_t ip_id = 0;
 uint32_t tcp_seq;
-uint16_t ip_id;
+bool outer_ipv4;
 int hdr_len;
 int seg_len;
+bool tnl;
 
 tso_segsz = dp_packet_get_tso_segsz(p);
 if (!tso_segsz) {
@@ -105,20 +115,35 @@ dp_packet_gso(struct dp_packet *p, struct dp_packet_batch 
**batches)
 return false;
 }
 
-tcp_hdr = dp_packet_l4(p);
-tcp_offset = TCP_OFFSET(tcp_hdr->tcp_ctl);
-tcp_seq = ntohl(get_16aligned_be32(_hdr->tcp_seq));
-hdr_len = ((char *) dp_packet_l4(p) - (char *) dp_packet_eth(p))
-  + tcp_offset * 4;
-ip_id = 0;
-if (dp_packet_hwol_is_ipv4(p)) {
+if (dp_packet_hwol_is_tunnel_vxlan(p) ||
+dp_packet_hwol_is_tunnel_geneve(p)) {
+data_pos =  dp_packet_get_inner_tcp_payload(p);
+outer_ipv4 = dp_packet_hwol_is_outer_ipv4(p);
+tcp_hdr = dp_packet_inner_l4(p);
+ip_hdr = dp_packet_inner_l3(p);
+tnl = true;
+if (outer_ipv4) {
+ip_id = ntohs(((struct ip_header *) dp_packet_l3(p))->ip_id);
+} else if (dp_packet_hwol_is_ipv4(p)) {
+ip_id = ntohs(ip_hdr->ip_id);
+}
+} else {
+data_pos = dp_packet_get_tcp_payload(p);
+outer_ipv4 = dp_packet_hwol_is_ipv4(p);
+tcp_hdr = dp_packet_l4(p);
 ip_hdr = dp_packet_l3(p);
-ip_id = ntohs(ip_hdr->ip_id);
+tnl = false;
+if (outer_ipv4) {
+ip_id = ntohs(ip_hdr->ip_id);
+}
 }
 
+tcp_offset = TCP_OFFSET(tcp_hdr->tcp_ctl);
+tcp_seq = ntohl(get_16aligned_be32(_hdr->tcp_seq));
+hdr_len = ((char *) tcp_hdr - (char *) dp_packet_eth(p))
+  + tcp_offset * 4;
 const char *data_tail = (char *) dp_packet_tail(p)
 - dp_packet_l2_pad_size(p);
-const char *data_pos = dp_packet_get_tcp_payload(p);
 int n_segs = dp_packet_gso_nr_segs(p);
 
 for (int i = 0; i < n_segs; i++) {
@@ -130,8 +155,26 @@ dp_packet_gso(struct dp_packet *p, struct dp_packet_batch 
**batches)
 seg = dp_packet_gso_seg_new(p, hdr_len, data_pos, seg_len);
 data_pos += seg_len;
 
+if (tnl) {
+/* Update tunnel L3 header. */
+if (dp_packet_hwol_is_ipv4(seg)) {
+ip_hdr = dp_packet_inner_l3(seg);
+ip_hdr->ip_tot_len = htons(sizeof *ip_hdr +
+   dp_packet_inner_l4_size(seg));
+ip_hdr->ip_id = htons(ip_id);
+ip_hdr->ip_csum = 0;
+ip_id++;
+} else {
+struct ovs_16aligned_ip6_hdr *ip6_hdr;
+
+ip6_hdr = dp_packet_inner_l3(seg);
+ip6_hdr->ip6_ctlun.ip6_un1.ip6_un1_plen
+= htons(dp_packet_inner_l3_size(seg) - sizeof *ip6_hdr);
+}
+}
+
 /* Update L3 header. */
-if (dp_packet_hwol_is_ipv4(seg)) {
+if (outer_ipv4) {
 ip_hdr = dp_packet_l3(seg);