From: Jun Yang <jun.y...@nxp.com>

Use DPAA2_VADDR_TO_IOVA to convert va to iova for both VA and PA modes.

Signed-off-by: Jun Yang <jun.y...@nxp.com>
---
 drivers/mempool/dpaa2/dpaa2_hw_mempool.c | 222 +++++++++++++++--------
 1 file changed, 142 insertions(+), 80 deletions(-)

diff --git a/drivers/mempool/dpaa2/dpaa2_hw_mempool.c 
b/drivers/mempool/dpaa2/dpaa2_hw_mempool.c
index eb22a14fb5..3c5155afde 100644
--- a/drivers/mempool/dpaa2/dpaa2_hw_mempool.c
+++ b/drivers/mempool/dpaa2/dpaa2_hw_mempool.c
@@ -224,26 +224,47 @@ rte_hw_mbuf_free_pool(struct rte_mempool *mp)
        dpaa2_free_dpbp_dev(dpbp_node);
 }
 
-static void
-rte_dpaa2_mbuf_release(struct rte_mempool *pool __rte_unused,
-                       void * const *obj_table,
-                       uint32_t bpid,
-                       uint32_t meta_data_size,
-                       int count)
+static inline int
+dpaa2_bman_multi_release(uint64_t *bufs, int num,
+       struct qbman_swp *swp,
+       const struct qbman_release_desc *releasedesc)
+{
+       int retry_count = 0, ret;
+
+release_again:
+       ret = qbman_swp_release(swp, releasedesc, bufs, num);
+       if (unlikely(ret == -EBUSY)) {
+               retry_count++;
+               if (retry_count <= DPAA2_MAX_TX_RETRY_COUNT)
+                       goto release_again;
+
+               DPAA2_MEMPOOL_ERR("bman release retry exceeded, low fbpr?");
+               return ret;
+       }
+       if (unlikely(ret)) {
+               DPAA2_MEMPOOL_ERR("bman release failed(err=%d)", ret);
+               return ret;
+       }
+
+       return 0;
+}
+
+static int
+dpaa2_mbuf_release(void * const *obj_table, uint32_t bpid,
+       uint32_t meta_data_size, int count)
 {
        struct qbman_release_desc releasedesc;
        struct qbman_swp *swp;
        int ret;
-       int i, n, retry_count;
+       int i, n;
        uint64_t bufs[DPAA2_MBUF_MAX_ACQ_REL];
 
        if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
                ret = dpaa2_affine_qbman_swp();
-               if (ret != 0) {
-                       DPAA2_MEMPOOL_ERR(
-                               "Failed to allocate IO portal, tid: %d",
-                               rte_gettid());
-                       return;
+               if (ret) {
+                       DPAA2_MEMPOOL_ERR("affine portal err: %d, tid: %d",
+                               ret, rte_gettid());
+                       return -EIO;
                }
        }
        swp = DPAA2_PER_LCORE_PORTAL;
@@ -259,51 +280,62 @@ rte_dpaa2_mbuf_release(struct rte_mempool *pool 
__rte_unused,
                goto aligned;
 
        /* convert mbuf to buffers for the remainder */
-       for (i = 0; i < n ; i++) {
-#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
-               bufs[i] = (uint64_t)rte_mempool_virt2iova(obj_table[i])
-                               + meta_data_size;
-#else
-               bufs[i] = (uint64_t)obj_table[i] + meta_data_size;
-#endif
+       if (likely(rte_eal_iova_mode() == RTE_IOVA_VA)) {
+               for (i = 0; i < n ; i++) {
+                       bufs[i] = DPAA2_VAMODE_VADDR_TO_IOVA(obj_table[i]) +
+                               meta_data_size;
+               }
+       } else {
+               for (i = 0; i < n ; i++) {
+                       bufs[i] = DPAA2_PAMODE_VADDR_TO_IOVA(obj_table[i]) +
+                               meta_data_size;
+               }
        }
 
        /* feed them to bman */
-       retry_count = 0;
-       while ((ret = qbman_swp_release(swp, &releasedesc, bufs, n)) ==
-                       -EBUSY) {
-               retry_count++;
-               if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) {
-                       DPAA2_MEMPOOL_ERR("bman release retry exceeded, low 
fbpr?");
-                       return;
-               }
-       }
+       ret = dpaa2_bman_multi_release(bufs, n, swp, &releasedesc);
+       if (unlikely(ret))
+               return 0;
 
 aligned:
        /* if there are more buffers to free */
+       if (unlikely(rte_eal_iova_mode() != RTE_IOVA_VA))
+               goto iova_pa_release;
+
        while (n < count) {
                /* convert mbuf to buffers */
                for (i = 0; i < DPAA2_MBUF_MAX_ACQ_REL; i++) {
-#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
-                       bufs[i] = (uint64_t)
-                                 rte_mempool_virt2iova(obj_table[n + i])
-                                 + meta_data_size;
-#else
-                       bufs[i] = (uint64_t)obj_table[n + i] + meta_data_size;
-#endif
+                       bufs[i] = DPAA2_VAMODE_VADDR_TO_IOVA(obj_table[n + i]) +
+                               meta_data_size;
                }
 
-               retry_count = 0;
-               while ((ret = qbman_swp_release(swp, &releasedesc, bufs,
-                                       DPAA2_MBUF_MAX_ACQ_REL)) == -EBUSY) {
-                       retry_count++;
-                       if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) {
-                               DPAA2_MEMPOOL_ERR("bman release retry exceeded, 
low fbpr?");
-                               return;
-                       }
+               ret = dpaa2_bman_multi_release(bufs,
+                               DPAA2_MBUF_MAX_ACQ_REL, swp, &releasedesc);
+               if (unlikely(ret))
+                       return n;
+
+               n += DPAA2_MBUF_MAX_ACQ_REL;
+       }
+
+       return count;
+
+iova_pa_release:
+       while (n < count) {
+               /* convert mbuf to buffers */
+               for (i = 0; i < DPAA2_MBUF_MAX_ACQ_REL; i++) {
+                       bufs[i] = DPAA2_PAMODE_VADDR_TO_IOVA(obj_table[n + i]) +
+                               meta_data_size;
                }
+
+               ret = dpaa2_bman_multi_release(bufs,
+                               DPAA2_MBUF_MAX_ACQ_REL, swp, &releasedesc);
+               if (unlikely(ret))
+                       return n;
+
                n += DPAA2_MBUF_MAX_ACQ_REL;
        }
+
+       return count;
 }
 
 RTE_EXPORT_INTERNAL_SYMBOL(rte_dpaa2_bpid_info_init)
@@ -371,7 +403,7 @@ rte_dpaa2_mbuf_alloc_bulk(struct rte_mempool *pool,
 #endif
        struct qbman_swp *swp;
        uint16_t bpid;
-       size_t bufs[DPAA2_MBUF_MAX_ACQ_REL];
+       uint64_t bufs[DPAA2_MBUF_MAX_ACQ_REL];
        int i, ret;
        unsigned int n = 0;
        struct dpaa2_bp_info *bp_info;
@@ -387,56 +419,79 @@ rte_dpaa2_mbuf_alloc_bulk(struct rte_mempool *pool,
 
        if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
                ret = dpaa2_affine_qbman_swp();
-               if (ret != 0) {
-                       DPAA2_MEMPOOL_ERR(
-                               "Failed to allocate IO portal, tid: %d",
-                               rte_gettid());
+               if (ret) {
+                       DPAA2_MEMPOOL_ERR("affine portal err: %d, tid: %d",
+                               ret, rte_gettid());
                        return ret;
                }
        }
        swp = DPAA2_PER_LCORE_PORTAL;
 
+       if (unlikely(rte_eal_iova_mode() != RTE_IOVA_VA))
+               goto iova_pa_acquire;
+
        while (n < count) {
                /* Acquire is all-or-nothing, so we drain in 7s,
                 * then the remainder.
                 */
-               if ((count - n) > DPAA2_MBUF_MAX_ACQ_REL) {
-                       ret = qbman_swp_acquire(swp, bpid, (void *)bufs,
-                                               DPAA2_MBUF_MAX_ACQ_REL);
-               } else {
-                       ret = qbman_swp_acquire(swp, bpid, (void *)bufs,
-                                               count - n);
+               ret = qbman_swp_acquire(swp, bpid, bufs,
+                       (count - n) > DPAA2_MBUF_MAX_ACQ_REL ?
+                       DPAA2_MBUF_MAX_ACQ_REL : (count - n));
+               if (unlikely(ret <= 0))
+                       goto acquire_failed;
+
+               /* assigning mbuf from the acquired objects */
+               for (i = 0; i < ret; i++) {
+                       DPAA2_VAMODE_MODIFY_IOVA_TO_VADDR(bufs[i],
+                               size_t);
+                       obj_table[n] = (void *)(uintptr_t)(bufs[i] -
+                               bp_info->meta_data_size);
+                       n++;
                }
-               /* In case of less than requested number of buffers available
-                * in pool, qbman_swp_acquire returns 0
+       }
+       goto acquire_success;
+
+iova_pa_acquire:
+
+       while (n < count) {
+               /* Acquire is all-or-nothing, so we drain in 7s,
+                * then the remainder.
                 */
-               if (ret <= 0) {
-                       DPAA2_MEMPOOL_DP_DEBUG(
-                               "Buffer acquire failed with err code: %d", ret);
-                       /* The API expect the exact number of requested bufs */
-                       /* Releasing all buffers allocated */
-                       rte_dpaa2_mbuf_release(pool, obj_table, bpid,
-                                          bp_info->meta_data_size, n);
-                       return -ENOBUFS;
-               }
+               ret = qbman_swp_acquire(swp, bpid, bufs,
+                       (count - n) > DPAA2_MBUF_MAX_ACQ_REL ?
+                       DPAA2_MBUF_MAX_ACQ_REL : (count - n));
+               if (unlikely(ret <= 0))
+                       goto acquire_failed;
+
                /* assigning mbuf from the acquired objects */
-               for (i = 0; (i < ret) && bufs[i]; i++) {
-                       DPAA2_MODIFY_IOVA_TO_VADDR(bufs[i], size_t);
-                       obj_table[n] = (struct rte_mbuf *)
-                                      (bufs[i] - bp_info->meta_data_size);
-                       DPAA2_MEMPOOL_DP_DEBUG(
-                                  "Acquired %p address %p from BMAN\n",
-                                  (void *)bufs[i], (void *)obj_table[n]);
+               for (i = 0; i < ret; i++) {
+                       DPAA2_PAMODE_MODIFY_IOVA_TO_VADDR(bufs[i],
+                               size_t);
+                       obj_table[n] = (void *)(uintptr_t)(bufs[i] -
+                               bp_info->meta_data_size);
                        n++;
                }
        }
 
+acquire_success:
 #ifdef RTE_LIBRTE_DPAA2_DEBUG_DRIVER
        alloc += n;
-       DPAA2_MEMPOOL_DP_DEBUG("Total = %d , req = %d done = %d\n",
-                              alloc, count, n);
+       DPAA2_MEMPOOL_DP_DEBUG("Total = %d , req = %d done = %d",
+               alloc, count, n);
 #endif
        return 0;
+
+acquire_failed:
+       DPAA2_MEMPOOL_DP_DEBUG("Buffer acquire err: %d", ret);
+       /* The API expect the exact number of requested bufs */
+       /* Releasing all buffers allocated */
+       ret = dpaa2_mbuf_release(obj_table, bpid,
+                       bp_info->meta_data_size, n);
+       if (ret != (int)n) {
+               DPAA2_MEMPOOL_ERR("%s: expect to free %d!= %d",
+                       __func__, n, ret);
+       }
+       return -ENOBUFS;
 }
 
 static int
@@ -444,14 +499,21 @@ rte_hw_mbuf_free_bulk(struct rte_mempool *pool,
                  void * const *obj_table, unsigned int n)
 {
        struct dpaa2_bp_info *bp_info;
+       int ret;
 
        bp_info = mempool_to_bpinfo(pool);
        if (!(bp_info->bp_list)) {
                DPAA2_MEMPOOL_ERR("DPAA2 buffer pool not configured");
                return -ENOENT;
        }
-       rte_dpaa2_mbuf_release(pool, obj_table, bp_info->bpid,
-                          bp_info->meta_data_size, n);
+       ret = dpaa2_mbuf_release(obj_table, bp_info->bpid,
+                       bp_info->meta_data_size, n);
+       if (unlikely(ret != (int)n)) {
+               DPAA2_MEMPOOL_ERR("%s: expect to free %d!= %d",
+                       __func__, n, ret);
+
+               return -EIO;
+       }
 
        return 0;
 }
@@ -497,6 +559,7 @@ dpaa2_populate(struct rte_mempool *mp, unsigned int 
max_objs,
              rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg)
 {
        struct rte_memseg_list *msl;
+       int ret;
        /* The memsegment list exists incase the memory is not external.
         * So, DMA-Map is required only when memory is provided by user,
         * i.e. External.
@@ -505,11 +568,10 @@ dpaa2_populate(struct rte_mempool *mp, unsigned int 
max_objs,
 
        if (!msl) {
                DPAA2_MEMPOOL_DEBUG("Memsegment is External.");
-               rte_fslmc_vfio_mem_dmamap((size_t)vaddr,
-                               (size_t)paddr, (size_t)len);
+               ret = rte_fslmc_vfio_mem_dmamap((size_t)vaddr, paddr, len);
+               if (ret)
+                       return ret;
        }
-       /* Insert entry into the PA->VA Table */
-       dpaax_iova_table_update(paddr, vaddr, len);
 
        return rte_mempool_op_populate_helper(mp, 0, max_objs, vaddr, paddr,
                                               len, obj_cb, obj_cb_arg);
-- 
2.25.1

Reply via email to