TX checksum offload applies to TCP / UDP packets which are not
fragmented using the MAPv4 checksum trailer. The following needs to be
done to have checksum computed in hardware -

1. Set the checksum start offset and inset offset.
2. Set the csum_enabled bit
3. Compute and set 1's complement of partial checksum field in
   transport header.

Signed-off-by: Subash Abhinov Kasiviswanathan <subas...@codeaurora.org>
---
 .../net/ethernet/qualcomm/rmnet/rmnet_handlers.c   |   8 ++
 drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h    |   2 +
 .../net/ethernet/qualcomm/rmnet/rmnet_map_data.c   | 120 +++++++++++++++++++++
 drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c    |   1 +
 4 files changed, 131 insertions(+)

diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c 
b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
index 3409458..601edec 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
@@ -141,11 +141,19 @@ static int rmnet_map_egress_handler(struct sk_buff *skb,
        additional_header_len = 0;
        required_headroom = sizeof(struct rmnet_map_header);
 
+       if (port->data_format & RMNET_EGRESS_FORMAT_MAP_CKSUMV4) {
+               additional_header_len = sizeof(struct rmnet_map_ul_csum_header);
+               required_headroom += additional_header_len;
+       }
+
        if (skb_headroom(skb) < required_headroom) {
                if (pskb_expand_head(skb, required_headroom, 0, GFP_KERNEL))
                        goto fail;
        }
 
+       if (port->data_format & RMNET_EGRESS_FORMAT_MAP_CKSUMV4)
+               rmnet_map_checksum_uplink_packet(skb, orig_dev);
+
        map_header = rmnet_map_add_map_header(skb, additional_header_len, 0);
        if (!map_header)
                goto fail;
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h 
b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h
index ca9f473..6ce31e2 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h
@@ -89,5 +89,7 @@ struct rmnet_map_header *rmnet_map_add_map_header(struct 
sk_buff *skb,
                                                  int hdrlen, int pad);
 void rmnet_map_command(struct sk_buff *skb, struct rmnet_port *port);
 int rmnet_map_checksum_downlink_packet(struct sk_buff *skb, u16 len);
+void rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
+                                     struct net_device *orig_dev);
 
 #endif /* _RMNET_MAP_H_ */
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c 
b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
index 881c1dc..c74a6c5 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
@@ -171,6 +171,86 @@ static __sum16 *rmnet_map_get_csum_field(unsigned char 
protocol,
 }
 #endif
 
+static void rmnet_map_complement_ipv4_txporthdr_csum_field(void *iphdr)
+{
+       struct iphdr *ip4h = (struct iphdr *)iphdr;
+       void *txphdr;
+       u16 *csum;
+
+       txphdr = iphdr + ip4h->ihl * 4;
+
+       if (ip4h->protocol == IPPROTO_TCP || ip4h->protocol == IPPROTO_UDP) {
+               csum = (u16 *)rmnet_map_get_csum_field(ip4h->protocol, txphdr);
+               *csum = ~(*csum);
+       }
+}
+
+static void
+rmnet_map_ipv4_ul_csum_header(void *iphdr,
+                             struct rmnet_map_ul_csum_header *ul_header,
+                             struct sk_buff *skb)
+{
+       struct iphdr *ip4h = (struct iphdr *)iphdr;
+       __be16 *hdr = (__be16 *)ul_header, offset;
+
+       offset = htons((__force u16)(skb_transport_header(skb) -
+                                    (unsigned char *)iphdr));
+       ul_header->csum_start_offset = offset;
+       ul_header->csum_insert_offset = skb->csum_offset;
+       ul_header->csum_enabled = 1;
+       if (ip4h->protocol == IPPROTO_UDP)
+               ul_header->udp_ip4_ind = 1;
+       else
+               ul_header->udp_ip4_ind = 0;
+
+       /* Changing remaining fields to network order */
+       hdr++;
+       *hdr = htons((__force u16)*hdr);
+
+       skb->ip_summed = CHECKSUM_NONE;
+
+       rmnet_map_complement_ipv4_txporthdr_csum_field(iphdr);
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
+static void rmnet_map_complement_ipv6_txporthdr_csum_field(void *ip6hdr)
+{
+       struct ipv6hdr *ip6h = (struct ipv6hdr *)ip6hdr;
+       void *txphdr;
+       u16 *csum;
+
+       txphdr = ip6hdr + sizeof(struct ipv6hdr);
+
+       if (ip6h->nexthdr == IPPROTO_TCP || ip6h->nexthdr == IPPROTO_UDP) {
+               csum = (u16 *)rmnet_map_get_csum_field(ip6h->nexthdr, txphdr);
+               *csum = ~(*csum);
+       }
+}
+
+static void
+rmnet_map_ipv6_ul_csum_header(void *ip6hdr,
+                             struct rmnet_map_ul_csum_header *ul_header,
+                             struct sk_buff *skb)
+{
+       __be16 *hdr = (__be16 *)ul_header, offset;
+
+       offset = htons((__force u16)(skb_transport_header(skb) -
+                                    (unsigned char *)ip6hdr));
+       ul_header->csum_start_offset = offset;
+       ul_header->csum_insert_offset = skb->csum_offset;
+       ul_header->csum_enabled = 1;
+       ul_header->udp_ip4_ind = 0;
+
+       /* Changing remaining fields to network order */
+       hdr++;
+       *hdr = htons((__force u16)*hdr);
+
+       skb->ip_summed = CHECKSUM_NONE;
+
+       rmnet_map_complement_ipv6_txporthdr_csum_field(ip6hdr);
+}
+#endif
+
 /* Adds MAP header to front of skb->data
  * Padding is calculated and set appropriately in MAP header. Mux ID is
  * initialized to 0.
@@ -281,3 +361,43 @@ int rmnet_map_checksum_downlink_packet(struct sk_buff 
*skb, u16 len)
 
        return 0;
 }
+
+/* Generates UL checksum meta info header for IPv4 and IPv6 over TCP and UDP
+ * packets that are supported for UL checksum offload.
+ */
+void rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
+                                     struct net_device *orig_dev)
+{
+       struct rmnet_map_ul_csum_header *ul_header;
+       void *iphdr;
+
+       ul_header = (struct rmnet_map_ul_csum_header *)
+                   skb_push(skb, sizeof(struct rmnet_map_ul_csum_header));
+
+       if (unlikely(!(orig_dev->features &
+                    (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))))
+               goto sw_csum;
+
+       if (skb->ip_summed == CHECKSUM_PARTIAL) {
+               iphdr = (char *)ul_header +
+                       sizeof(struct rmnet_map_ul_csum_header);
+
+               if (skb->protocol == htons(ETH_P_IP)) {
+                       rmnet_map_ipv4_ul_csum_header(iphdr, ul_header, skb);
+                       return;
+               } else if (skb->protocol == htons(ETH_P_IPV6)) {
+#if IS_ENABLED(CONFIG_IPV6)
+                       rmnet_map_ipv6_ul_csum_header(iphdr, ul_header, skb);
+                       return;
+#else
+                       goto sw_csum;
+#endif
+               }
+       }
+
+sw_csum:
+       ul_header->csum_start_offset = 0;
+       ul_header->csum_insert_offset = 0;
+       ul_header->csum_enabled = 0;
+       ul_header->udp_ip4_ind = 0;
+}
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c 
b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
index 879a2e0..f7f57ce 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
@@ -189,6 +189,7 @@ int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev,
                return -EBUSY;
 
        rmnet_dev->hw_features = NETIF_F_RXCSUM;
+       rmnet_dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
 
        rc = register_netdevice(rmnet_dev);
        if (!rc) {
-- 
1.9.1

Reply via email to