Because of the hardware constraints, hns3 network engine doesn't support
sending packets with more than eight fragments. And hns3 pmd driver tries
to reassemble these kind of packets to meet hardware requirements.
Currently, there are two problems:
1) when the input buffer_len * 8 < pkt_len, the packets are impossible to
   be reassembled into 8 Buffer Descriptors. In this case, the packets will
   be passed to hardware, which eventually causes a hardware reset.
2) The meta data in origin packets which are required to fill into the
   descriptor haven't been copied into the reassembled pkts.

This patch adds a check for 1) to ensure such packets will be dropped by
driver and copies useful meta data from the origin packets to the
reassembled packets.

Fixes: bba636698316 ("net/hns3: support Rx/Tx and related operations")
Cc: sta...@dpdk.org

Signed-off-by: Chengchang Tang <tangchengch...@huawei.com>
Signed-off-by: Wei Hu (Xavier) <xavier.hu...@huawei.com>
Signed-off-by: Chengwen Feng <fengcheng...@huawei.com>
---
 drivers/net/hns3/hns3_rxtx.c | 25 +++++++++++++++++++++----
 1 file changed, 21 insertions(+), 4 deletions(-)

diff --git a/drivers/net/hns3/hns3_rxtx.c b/drivers/net/hns3/hns3_rxtx.c
index 931d89a..8892fc1 100644
--- a/drivers/net/hns3/hns3_rxtx.c
+++ b/drivers/net/hns3/hns3_rxtx.c
@@ -2095,6 +2095,20 @@ hns3_tx_alloc_mbufs(struct hns3_tx_queue *txq, struct 
rte_mempool *mb_pool,
        return 0;
 }
 
+static inline void
+hns3_pktmbuf_copy_hdr(struct rte_mbuf *new_pkt, struct rte_mbuf *old_pkt)
+{
+       new_pkt->ol_flags = old_pkt->ol_flags;
+       new_pkt->pkt_len = rte_pktmbuf_pkt_len(old_pkt);
+       new_pkt->outer_l2_len = old_pkt->outer_l2_len;
+       new_pkt->outer_l3_len = old_pkt->outer_l3_len;
+       new_pkt->l2_len = old_pkt->l2_len;
+       new_pkt->l3_len = old_pkt->l3_len;
+       new_pkt->l4_len = old_pkt->l4_len;
+       new_pkt->vlan_tci_outer = old_pkt->vlan_tci_outer;
+       new_pkt->vlan_tci = old_pkt->vlan_tci;
+}
+
 static int
 hns3_reassemble_tx_pkts(void *tx_queue, struct rte_mbuf *tx_pkt,
                        struct rte_mbuf **new_pkt)
@@ -2118,9 +2132,11 @@ hns3_reassemble_tx_pkts(void *tx_queue, struct rte_mbuf 
*tx_pkt,
 
        mb_pool = tx_pkt->pool;
        buf_size = tx_pkt->buf_len - RTE_PKTMBUF_HEADROOM;
-       nb_new_buf = (tx_pkt->pkt_len - 1) / buf_size + 1;
+       nb_new_buf = (rte_pktmbuf_pkt_len(tx_pkt) - 1) / buf_size + 1;
+       if (nb_new_buf > HNS3_MAX_NON_TSO_BD_PER_PKT)
+               return -EINVAL;
 
-       last_buf_len = tx_pkt->pkt_len % buf_size;
+       last_buf_len = rte_pktmbuf_pkt_len(tx_pkt) % buf_size;
        if (last_buf_len == 0)
                last_buf_len = buf_size;
 
@@ -2132,7 +2148,7 @@ hns3_reassemble_tx_pkts(void *tx_queue, struct rte_mbuf 
*tx_pkt,
        /* Copy the original packet content to the new mbufs */
        temp = tx_pkt;
        s = rte_pktmbuf_mtod(temp, char *);
-       len_s = temp->data_len;
+       len_s = rte_pktmbuf_data_len(temp);
        temp_new = new_mbuf;
        for (i = 0; i < nb_new_buf; i++) {
                d = rte_pktmbuf_mtod(temp_new, char *);
@@ -2155,13 +2171,14 @@ hns3_reassemble_tx_pkts(void *tx_queue, struct rte_mbuf 
*tx_pkt,
                                if (temp == NULL)
                                        break;
                                s = rte_pktmbuf_mtod(temp, char *);
-                               len_s = temp->data_len;
+                               len_s = rte_pktmbuf_data_len(temp);
                        }
                }
 
                temp_new->data_len = buf_len;
                temp_new = temp_new->next;
        }
+       hns3_pktmbuf_copy_hdr(new_mbuf, tx_pkt);
 
        /* free original mbufs */
        rte_pktmbuf_free(tx_pkt);
-- 
2.7.4

Reply via email to