add non temporal load and temporal store for mprq memcpy.
Enabling CONFIG_RTE_LIBRTE_MLX5_NTLOAD_TSTORE_ALIGN_COPY=y in
dpdk config will enable this optimization.

Signed-off-by: Aman Kumar <aman.ku...@vvdntech.in>
---
 config/common_base                      |  1 +
 drivers/net/mlx5/mlx5.c                 | 12 ++++
 drivers/net/mlx5/mlx5.h                 |  3 +
 drivers/net/mlx5/mlx5_rxq.c             |  3 +
 drivers/net/mlx5/mlx5_rxtx.c            | 17 ++++-
 drivers/net/mlx5/mlx5_rxtx.h            |  3 +
 lib/librte_eal/x86/include/rte_memcpy.h | 92 +++++++++++++++++++++++++
 7 files changed, 129 insertions(+), 2 deletions(-)

diff --git a/config/common_base b/config/common_base
index fbf0ee70c..1476cf334 100644
--- a/config/common_base
+++ b/config/common_base
@@ -371,6 +371,7 @@ CONFIG_RTE_LIBRTE_MLX4_DEBUG=n
 # ConnectX-6 & BlueField (MLX5) PMD
 #
 CONFIG_RTE_LIBRTE_MLX5_PMD=n
+CONFIG_RTE_LIBRTE_MLX5_NTLOAD_TSTORE_ALIGN_COPY=n
 CONFIG_RTE_LIBRTE_MLX5_DEBUG=n
 
 #
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index 1e4c695f8..6eb85dfac 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -163,6 +163,11 @@
 /* Configure timeout of LRO session (in microseconds). */
 #define MLX5_LRO_TIMEOUT_USEC "lro_timeout_usec"
 
+#ifdef RTE_LIBRTE_MLX5_NTLOAD_TSTORE_ALIGN_COPY
+/* mprq_tstore_memcpy */
+#define MLX5_MPRQ_TSTORE_MEMCPY "mprq_tstore_memcpy"
+#endif
+
 /*
  * Device parameter to configure the total data buffer size for a single
  * hairpin queue (logarithm value).
@@ -1621,6 +1626,10 @@ mlx5_args_check(const char *key, const char *val, void 
*opaque)
                config->sys_mem_en = !!tmp;
        } else if (strcmp(MLX5_DECAP_EN, key) == 0) {
                config->decap_en = !!tmp;
+#ifdef RTE_LIBRTE_MLX5_NTLOAD_TSTORE_ALIGN_COPY
+       } else if (strcmp(MLX5_MPRQ_TSTORE_MEMCPY, key) == 0) {
+               config->mprq_tstore_memcpy = tmp;
+#endif
        } else {
                DRV_LOG(WARNING, "%s: unknown parameter", key);
                rte_errno = EINVAL;
@@ -1681,6 +1690,9 @@ mlx5_args(struct mlx5_dev_config *config, struct 
rte_devargs *devargs)
                MLX5_RECLAIM_MEM,
                MLX5_SYS_MEM_EN,
                MLX5_DECAP_EN,
+#ifdef RTE_LIBRTE_MLX5_NTLOAD_TSTORE_ALIGN_COPY
+               MLX5_MPRQ_TSTORE_MEMCPY,
+#endif
                NULL,
        };
        struct rte_kvargs *kvlist;
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 78d6eb728..09dc90953 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -236,6 +236,9 @@ struct mlx5_dev_config {
        int tx_skew; /* Tx scheduling skew between WQE and data on wire. */
        struct mlx5_hca_attr hca_attr; /* HCA attributes. */
        struct mlx5_lro_config lro; /* LRO configuration. */
+#ifdef RTE_LIBRTE_MLX5_NTLOAD_TSTORE_ALIGN_COPY
+       unsigned int mprq_tstore_memcpy:1;
+#endif
 };
 
 
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index 79eb8f8d7..bee5c03bc 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -2302,6 +2302,9 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, 
uint16_t desc,
        tmpl->socket = socket;
        if (dev->data->dev_conf.intr_conf.rxq)
                tmpl->irq = 1;
+#ifdef RTE_LIBRTE_MLX5_NTLOAD_TSTORE_ALIGN_COPY
+       tmpl->rxq.mprq_tstore_memcpy = config->mprq_tstore_memcpy;
+#endif
        mprq_stride_nums = config->mprq.stride_num_n ?
                config->mprq.stride_num_n : MLX5_MPRQ_STRIDE_NUM_N;
        mprq_stride_size = non_scatter_min_mbuf_size <=
diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c
index 1b71e9422..62ade3775 100644
--- a/drivers/net/mlx5/mlx5_rxtx.c
+++ b/drivers/net/mlx5/mlx5_rxtx.c
@@ -1774,8 +1774,21 @@ mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf 
**pkts, uint16_t pkts_n)
                    rxq->mprq_repl == NULL ||
                    (hdrm_overlap > 0 && !rxq->strd_scatter_en)) {
                        if (likely(rte_pktmbuf_tailroom(pkt) >= len)) {
-                               rte_memcpy(rte_pktmbuf_mtod(pkt, void *),
-                                          addr, len);
+#ifdef RTE_LIBRTE_MLX5_NTLOAD_TSTORE_ALIGN_COPY
+                               if ((rxq->mprq_tstore_memcpy) &&
+                                   (!(((uintptr_t)(rte_pktmbuf_mtod(pkt,
+                                                                    void *)) |
+                                       (uintptr_t)addr) & ALIGNMENT_MASK))) {
+                                       memcpy_aligned_rx_tstore_16B(
+                                               rte_pktmbuf_mtod(pkt, void *),
+                                               addr, len);
+                               } else {
+#endif
+                                       rte_memcpy(rte_pktmbuf_mtod(pkt, void 
*),
+                                                       addr, len);
+#ifdef RTE_LIBRTE_MLX5_NTLOAD_TSTORE_ALIGN_COPY
+                               }
+#endif
                                DATA_LEN(pkt) = len;
                        } else if (rxq->strd_scatter_en) {
                                struct rte_mbuf *prev = pkt;
diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h
index c02a007c8..72763962f 100644
--- a/drivers/net/mlx5/mlx5_rxtx.h
+++ b/drivers/net/mlx5/mlx5_rxtx.h
@@ -153,6 +153,9 @@ struct mlx5_rxq_data {
        uint32_t tunnel; /* Tunnel information. */
        uint64_t flow_meta_mask;
        int32_t flow_meta_offset;
+#ifdef RTE_LIBRTE_MLX5_NTLOAD_TSTORE_ALIGN_COPY
+       unsigned int mprq_tstore_memcpy:1;
+#endif
 } __rte_cache_aligned;
 
 enum mlx5_rxq_obj_type {
diff --git a/lib/librte_eal/x86/include/rte_memcpy.h 
b/lib/librte_eal/x86/include/rte_memcpy.h
index 9c67232df..6345572a7 100644
--- a/lib/librte_eal/x86/include/rte_memcpy.h
+++ b/lib/librte_eal/x86/include/rte_memcpy.h
@@ -874,6 +874,98 @@ rte_memcpy(void *dst, const void *src, size_t n)
                return rte_memcpy_generic(dst, src, n);
 }
 
+#ifdef RTE_LIBRTE_MLX5_NTLOAD_TSTORE_ALIGN_COPY
+static __rte_always_inline
+void copy16B_ts(void *dst, void *src)
+{
+       __m128i var128;
+
+       var128 = _mm_stream_load_si128((__m128i *)src);
+       _mm_storeu_si128((__m128i *)dst, var128);
+}
+
+static __rte_always_inline
+void copy32B_ts(void *dst, void *src)
+{
+       __m256i ymm0;
+
+       ymm0 = _mm256_stream_load_si256((const __m256i *)src);
+       _mm256_storeu_si256((__m256i *)dst, ymm0);
+}
+
+static __rte_always_inline
+void copy64B_ts(void *dst, void *src)
+{
+       __m256i ymm0, ymm1;
+
+       ymm0 = _mm256_stream_load_si256((const __m256i *)src);
+       ymm1 = _mm256_stream_load_si256((const __m256i *)((uint8_t *)src + 32));
+       _mm256_storeu_si256((__m256i *)dst, ymm0);
+       _mm256_storeu_si256((__m256i *)((uint8_t *)dst + 32), ymm1);
+}
+
+static __rte_always_inline
+void copy128B_ts(void *dst, void *src)
+{
+       __m256i ymm0, ymm1, ymm2, ymm3;
+
+       ymm0 = _mm256_stream_load_si256((const __m256i *)src);
+       ymm1 = _mm256_stream_load_si256((const __m256i *)((uint8_t *)src + 32));
+       ymm2 = _mm256_stream_load_si256((const __m256i *)((uint8_t *)src + 64));
+       ymm3 = _mm256_stream_load_si256((const __m256i *)((uint8_t *)src + 96));
+       _mm256_storeu_si256((__m256i *)dst, ymm0);
+       _mm256_storeu_si256((__m256i *)((uint8_t *)dst + 32), ymm1);
+       _mm256_storeu_si256((__m256i *)((uint8_t *)dst + 64), ymm2);
+       _mm256_storeu_si256((__m256i *)((uint8_t *)dst + 96), ymm3);
+}
+
+static __rte_always_inline
+void memcpy_aligned_rx_tstore_16B(void *dst, void *src, int len)
+{
+       while (len >= 128) {
+               copy128B_ts(dst, src);
+               dst = (uint8_t *)dst + 128;
+               src = (uint8_t *)src + 128;
+               len -= 128;
+       }
+       while (len >= 64) {
+               copy64B_ts(dst, src);
+               dst = (uint8_t *)dst + 64;
+               src = (uint8_t *)src + 64;
+               len -= 64;
+       }
+       while (len >= 32) {
+               copy32B_ts(dst, src);
+               dst = (uint8_t *)dst + 32;
+               src = (uint8_t *)src + 32;
+               len -= 32;
+       }
+       if (len >= 16) {
+               copy16B_ts(dst, src);
+               dst = (uint8_t *)dst + 16;
+               src = (uint8_t *)src + 16;
+               len -= 16;
+       }
+       if (len >= 8) {
+               *(uint64_t *)dst = *(const uint64_t *)src;
+               dst = (uint8_t *)dst + 8;
+               src = (uint8_t *)src + 8;
+               len -= 8;
+       }
+       if (len >= 4) {
+               *(uint32_t *)dst = *(const uint32_t *)src;
+               dst = (uint8_t *)dst + 4;
+               src = (uint8_t *)src + 4;
+               len -= 4;
+       }
+       if (len != 0) {
+               dst = (uint8_t *)dst - (4 - len);
+               src = (uint8_t *)src - (4 - len);
+               *(uint32_t *)dst = *(const uint32_t *)src;
+       }
+}
+#endif
+
 #if defined(RTE_TOOLCHAIN_GCC) && (GCC_VERSION >= 100000)
 #pragma GCC diagnostic pop
 #endif
-- 
2.25.1


-- 



_Disclaimer: _(c) 2020 VVDN Technologies Pvt. Ltd. This e-mail contains 
PRIVILEGED AND CONFIDENTIAL INFORMATION intended solely for the use of the 
addressee(s). If you are not the intended recipient, please notify the 
sender by e-mail and delete the original message. Further, you are not to 
copy, disclose, or distribute this e-mail or its contents to any other 
person and any such actions are unlawful._
_
_
_
__

Reply via email to