Some NIC drivers support DEV_TX_OFFLOAD_MBUF_FAST_FREE offload(
Device supports optimization for fast release of mbufs.When set
application must guarantee that per-queue all mbufs comes from the
same mempool and has refcnt = 1).In order to adapt to this offload
function,we need to modify the existing fragment logic(attach mbuf,
so it is fast,we can call it fast fragment mode) and add the fragment
logic in the non-attach mbuf mode(slow fragment mode).Add some test
data for this modification.

Signed-off-by: Huichao Cai <chcch...@163.com>
---
 app/test/test_ipfrag.c               | 14 +++++++--
 lib/ip_frag/rte_ipv4_fragmentation.c | 56 +++++++++++++++++++++++++-----------
 2 files changed, 51 insertions(+), 19 deletions(-)

diff --git a/app/test/test_ipfrag.c b/app/test/test_ipfrag.c
index 610a86b..f5fe4b8 100644
--- a/app/test/test_ipfrag.c
+++ b/app/test/test_ipfrag.c
@@ -407,12 +407,20 @@ static void ut_teardown(void)
                                              pktid);
                }
 
-               if (tests[i].ipv == 4)
-                       len = rte_ipv4_fragment_packet(b, pkts_out, BURST,
+               if (tests[i].ipv == 4) {
+                       if (i % 2)
+                               len = rte_ipv4_fragment_packet(b,
+                                                      pkts_out, BURST,
                                                       tests[i].mtu_size,
                                                       direct_pool,
                                                       indirect_pool);
-               else if (tests[i].ipv == 6)
+                       else
+                               len = rte_ipv4_fragment_packet(b,
+                                                      pkts_out, BURST,
+                                                          tests[i].mtu_size,
+                                                          direct_pool,
+                                                          direct_pool);
+               } else if (tests[i].ipv == 6)
                        len = rte_ipv6_fragment_packet(b, pkts_out, BURST,
                                                       tests[i].mtu_size,
                                                       direct_pool,
diff --git a/lib/ip_frag/rte_ipv4_fragmentation.c 
b/lib/ip_frag/rte_ipv4_fragmentation.c
index a562424..65bfad7 100644
--- a/lib/ip_frag/rte_ipv4_fragmentation.c
+++ b/lib/ip_frag/rte_ipv4_fragmentation.c
@@ -102,6 +102,11 @@ static inline uint16_t __create_ipopt_frag_hdr(uint8_t 
*iph,
  *   MBUF pool used for allocating direct buffers for the output fragments.
  * @param pool_indirect
  *   MBUF pool used for allocating indirect buffers for the output fragments.
+ *   If pool_indirect == pool_direct,this means that the fragment will adapt
+ *   to DEV_TX_OFFLOAD_MBUF_FAST_FREE offload.
+ *   DEV_TX_OFFLOAD_MBUF_FAST_FREE: Device supports optimization
+ *   for fast release of mbufs. When set application must guarantee that
+ *   per-queue all mbufs comes from the same mempool and has refcnt = 1.
  * @return
  *   Upon successful completion - number of output fragments placed
  *   in the pkts_out array.
@@ -123,6 +128,7 @@ static inline uint16_t __create_ipopt_frag_hdr(uint8_t *iph,
        uint16_t frag_bytes_remaining;
        uint8_t ipopt_frag_hdr[IPV4_HDR_MAX_LEN];
        uint16_t ipopt_len;
+       bool is_fast_frag_mode = true;
 
        /*
         * Formal parameter checking.
@@ -133,6 +139,9 @@ static inline uint16_t __create_ipopt_frag_hdr(uint8_t *iph,
            unlikely(mtu_size < RTE_ETHER_MIN_MTU))
                return -EINVAL;
 
+       if (pool_indirect == pool_direct)
+               is_fast_frag_mode = false;
+
        in_hdr = rte_pktmbuf_mtod(pkt_in, struct rte_ipv4_hdr *);
        header_len = (in_hdr->version_ihl & RTE_IPV4_HDR_IHL_MASK) *
            RTE_IPV4_IHL_MULTIPLIER;
@@ -190,30 +199,45 @@ static inline uint16_t __create_ipopt_frag_hdr(uint8_t 
*iph,
                out_seg_prev = out_pkt;
                more_out_segs = 1;
                while (likely(more_out_segs && more_in_segs)) {
-                       struct rte_mbuf *out_seg = NULL;
                        uint32_t len;
 
-                       /* Allocate indirect buffer */
-                       out_seg = rte_pktmbuf_alloc(pool_indirect);
-                       if (unlikely(out_seg == NULL)) {
-                               rte_pktmbuf_free(out_pkt);
-                               __free_fragments(pkts_out, out_pkt_pos);
-                               return -ENOMEM;
-                       }
-                       out_seg_prev->next = out_seg;
-                       out_seg_prev = out_seg;
-
-                       /* Prepare indirect buffer */
-                       rte_pktmbuf_attach(out_seg, in_seg);
                        len = frag_bytes_remaining;
                        if (len > (in_seg->data_len - in_seg_data_pos)) {
                                len = in_seg->data_len - in_seg_data_pos;
                        }
-                       out_seg->data_off = in_seg->data_off + in_seg_data_pos;
-                       out_seg->data_len = (uint16_t)len;
+
+                       if (is_fast_frag_mode) {
+                               struct rte_mbuf *out_seg = NULL;
+                               /* Allocate indirect buffer */
+                               out_seg = rte_pktmbuf_alloc(pool_indirect);
+                               if (unlikely(out_seg == NULL)) {
+                                       rte_pktmbuf_free(out_pkt);
+                                       __free_fragments(pkts_out, out_pkt_pos);
+                                       return -ENOMEM;
+                               }
+                               out_seg_prev->next = out_seg;
+                               out_seg_prev = out_seg;
+
+                               /* Prepare indirect buffer */
+                               rte_pktmbuf_attach(out_seg, in_seg);
+
+                               out_seg->data_off = in_seg->data_off +
+                                       in_seg_data_pos;
+                               out_seg->data_len = (uint16_t)len;
+                               out_pkt->nb_segs += 1;
+                       } else {
+                               rte_memcpy(
+                                   rte_pktmbuf_mtod_offset(out_pkt, char *,
+                                           out_pkt->pkt_len),
+                                   rte_pktmbuf_mtod_offset(in_seg, char *,
+                                           in_seg_data_pos),
+                                   len);
+                               out_pkt->data_len = (uint16_t)(len +
+                                   out_pkt->data_len);
+                       }
+
                        out_pkt->pkt_len = (uint16_t)(len +
                            out_pkt->pkt_len);
-                       out_pkt->nb_segs += 1;
                        in_seg_data_pos += len;
                        frag_bytes_remaining -= len;
 
-- 
1.8.3.1

Reply via email to