Enable callbacks on first device attach, disable callbacks
on last device attach.

PPC64 IOMMU does memseg walk, which will cause a deadlock on
trying to do it inside a callback, so provide a local,
thread-unsafe copy of memseg walk.

Signed-off-by: Anatoly Burakov <anatoly.bura...@intel.com>
---

Notes:
    v4:
    - Fix PPC64 memseg walk in callback
    - Check if registering callbacks succeeded
    
    v3:
    - Moved callbacks to attach/detach as opposed to init
    
    v4:
    - Fix PPC64 memseg walk in callback
    
    v3:
    - Moved callbacks to attach/detach as opposed to init

 lib/librte_eal/linuxapp/eal/eal_vfio.c | 133 +++++++++++++++++++++++++++++++--
 1 file changed, 125 insertions(+), 8 deletions(-)

diff --git a/lib/librte_eal/linuxapp/eal/eal_vfio.c 
b/lib/librte_eal/linuxapp/eal/eal_vfio.c
index 5084a6b..ae47a5f 100644
--- a/lib/librte_eal/linuxapp/eal/eal_vfio.c
+++ b/lib/librte_eal/linuxapp/eal/eal_vfio.c
@@ -7,6 +7,7 @@
 #include <unistd.h>
 #include <sys/ioctl.h>
 
+#include <rte_errno.h>
 #include <rte_log.h>
 #include <rte_memory.h>
 #include <rte_eal_memconfig.h>
@@ -18,6 +19,8 @@
 
 #ifdef VFIO_PRESENT
 
+#define VFIO_MEM_EVENT_CLB_NAME "vfio_mem_event_clb"
+
 /* per-process VFIO config */
 static struct vfio_config vfio_cfg;
 
@@ -53,6 +56,42 @@ static const struct vfio_iommu_type iommu_types[] = {
        },
 };
 
+/* for sPAPR IOMMU, we will need to walk memseg list, but we cannot use
+ * rte_memseg_walk() because by the time we enter callback we will be holding a
+ * write lock, so regular rte-memseg_walk will deadlock. copying the same
+ * iteration code everywhere is not ideal as well. so, use a lockless copy of
+ * memseg walk here.
+ */
+static int
+memseg_walk_thread_unsafe(rte_memseg_walk_t func, void *arg)
+{
+       struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+       int i, ms_idx, ret = 0;
+
+       for (i = 0; i < RTE_MAX_MEMSEG_LISTS; i++) {
+               struct rte_memseg_list *msl = &mcfg->memsegs[i];
+               const struct rte_memseg *ms;
+               struct rte_fbarray *arr;
+
+               if (msl->memseg_arr.count == 0)
+                       continue;
+
+               arr = &msl->memseg_arr;
+
+               ms_idx = rte_fbarray_find_next_used(arr, 0);
+               while (ms_idx >= 0) {
+                       ms = rte_fbarray_get(arr, ms_idx);
+                       ret = func(msl, ms, arg);
+                       if (ret < 0)
+                               return -1;
+                       if (ret > 0)
+                               return 1;
+                       ms_idx = rte_fbarray_find_next_used(arr, ms_idx + 1);
+               }
+       }
+       return 0;
+}
+
 int
 vfio_get_group_fd(int iommu_group_no)
 {
@@ -214,6 +253,38 @@ vfio_group_device_count(int vfio_group_fd)
        return vfio_cfg.vfio_groups[i].devices;
 }
 
+static void
+vfio_mem_event_callback(enum rte_mem_event type, const void *addr, size_t len)
+{
+       struct rte_memseg_list *msl;
+       struct rte_memseg *ms;
+       size_t cur_len = 0;
+
+       msl = rte_mem_virt2memseg_list(addr);
+
+       /* for IOVA as VA mode, no need to care for IOVA addresses */
+       if (rte_eal_iova_mode() == RTE_IOVA_VA) {
+               uint64_t vfio_va = (uint64_t)(uintptr_t)addr;
+               if (type == RTE_MEM_EVENT_ALLOC)
+                       rte_vfio_dma_map(vfio_va, vfio_va, len);
+               else
+                       rte_vfio_dma_unmap(vfio_va, vfio_va, len);
+               return;
+       }
+
+       /* memsegs are contiguous in memory */
+       ms = rte_mem_virt2memseg(addr, msl);
+       while (cur_len < len) {
+               if (type == RTE_MEM_EVENT_ALLOC)
+                       rte_vfio_dma_map(ms->addr_64, ms->iova, ms->len);
+               else
+                       rte_vfio_dma_unmap(ms->addr_64, ms->iova, ms->len);
+
+               cur_len += ms->len;
+               ++ms;
+       }
+}
+
 int
 rte_vfio_clear_group(int vfio_group_fd)
 {
@@ -276,6 +347,8 @@ int
 rte_vfio_setup_device(const char *sysfs_base, const char *dev_addr,
                int *vfio_dev_fd, struct vfio_device_info *device_info)
 {
+       struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+       rte_rwlock_t *mem_lock = &mcfg->memory_hotplug_lock;
        struct vfio_group_status group_status = {
                        .argsz = sizeof(group_status)
        };
@@ -363,6 +436,10 @@ rte_vfio_setup_device(const char *sysfs_base, const char 
*dev_addr,
                                rte_vfio_clear_group(vfio_group_fd);
                                return -1;
                        }
+                       /* lock memory hotplug before mapping and release it
+                        * after registering callback, to prevent races
+                        */
+                       rte_rwlock_read_lock(mem_lock);
                        ret = t->dma_map_func(vfio_cfg.vfio_container_fd);
                        if (ret) {
                                RTE_LOG(ERR, EAL,
@@ -370,10 +447,26 @@ rte_vfio_setup_device(const char *sysfs_base, const char 
*dev_addr,
                                        dev_addr, errno, strerror(errno));
                                close(vfio_group_fd);
                                rte_vfio_clear_group(vfio_group_fd);
+                               rte_rwlock_read_unlock(mem_lock);
                                return -1;
                        }
 
                        vfio_cfg.vfio_iommu_type = t;
+
+                       /* register callback for mem events */
+                       ret = rte_mem_event_callback_register(
+                                       VFIO_MEM_EVENT_CLB_NAME,
+                                       vfio_mem_event_callback);
+                       /* unlock memory hotplug */
+                       rte_rwlock_read_unlock(mem_lock);
+                       if (ret && rte_errno == ENOTSUP) {
+                               RTE_LOG(DEBUG, EAL, "Memory event callbacks not 
supported\n");
+                       } else if (ret) {
+                               RTE_LOG(ERR, EAL, "Could not install memory 
event callback for VFIO\n");
+                               return -1;
+                       } else {
+                               RTE_LOG(DEBUG, EAL, "Installed memory event 
callback for VFIO\n");
+                       }
                }
        }
 
@@ -411,6 +504,8 @@ int
 rte_vfio_release_device(const char *sysfs_base, const char *dev_addr,
                    int vfio_dev_fd)
 {
+       struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+       rte_rwlock_t *mem_lock = &mcfg->memory_hotplug_lock;
        struct vfio_group_status group_status = {
                        .argsz = sizeof(group_status)
        };
@@ -418,13 +513,20 @@ rte_vfio_release_device(const char *sysfs_base, const 
char *dev_addr,
        int iommu_group_no;
        int ret;
 
+       /* we don't want any DMA mapping messages to come while we're detaching
+        * VFIO device, because this might be the last device and we might need
+        * to unregister the callback.
+        */
+       rte_rwlock_read_lock(mem_lock);
+
        /* get group number */
        ret = vfio_get_group_no(sysfs_base, dev_addr, &iommu_group_no);
        if (ret <= 0) {
                RTE_LOG(WARNING, EAL, "  %s not managed by VFIO driver\n",
                        dev_addr);
                /* This is an error at this point. */
-               return -1;
+               ret = -1;
+               goto out;
        }
 
        /* get the actual group fd */
@@ -432,7 +534,8 @@ rte_vfio_release_device(const char *sysfs_base, const char 
*dev_addr,
        if (vfio_group_fd <= 0) {
                RTE_LOG(INFO, EAL, "vfio_get_group_fd failed for %s\n",
                                   dev_addr);
-               return -1;
+               ret = -1;
+               goto out;
        }
 
        /* At this point we got an active group. Closing it will make the
@@ -444,7 +547,8 @@ rte_vfio_release_device(const char *sysfs_base, const char 
*dev_addr,
        if (close(vfio_dev_fd) < 0) {
                RTE_LOG(INFO, EAL, "Error when closing vfio_dev_fd for %s\n",
                                   dev_addr);
-               return -1;
+               ret = -1;
+               goto out;
        }
 
        /* An VFIO group can have several devices attached. Just when there is
@@ -456,17 +560,30 @@ rte_vfio_release_device(const char *sysfs_base, const 
char *dev_addr,
                if (close(vfio_group_fd) < 0) {
                        RTE_LOG(INFO, EAL, "Error when closing vfio_group_fd 
for %s\n",
                                dev_addr);
-                       return -1;
+                       ret = -1;
+                       goto out;
                }
 
                if (rte_vfio_clear_group(vfio_group_fd) < 0) {
                        RTE_LOG(INFO, EAL, "Error when clearing group for %s\n",
                                           dev_addr);
-                       return -1;
+                       ret = -1;
+                       goto out;
                }
        }
 
-       return 0;
+       /* if there are no active device groups, unregister the callback to
+        * avoid spurious attempts to map/unmap memory from VFIO.
+        */
+       if (vfio_cfg.vfio_active_groups == 0)
+               rte_mem_event_callback_unregister(VFIO_MEM_EVENT_CLB_NAME);
+
+       /* success */
+       ret = 0;
+
+out:
+       rte_rwlock_read_unlock(mem_lock);
+       return ret;
 }
 
 int
@@ -884,7 +1001,7 @@ vfio_spapr_dma_mem_map(int vfio_container_fd, uint64_t 
vaddr, uint64_t iova,
        /* check if window size needs to be adjusted */
        memset(&param, 0, sizeof(param));
 
-       if (rte_memseg_walk(vfio_spapr_window_size_walk, &param) < 0) {
+       if (memseg_walk_thread_unsafe(vfio_spapr_window_size_walk, &param) < 0) 
{
                RTE_LOG(ERR, EAL, "Could not get window size\n");
                return -1;
        }
@@ -903,7 +1020,7 @@ vfio_spapr_dma_mem_map(int vfio_container_fd, uint64_t 
vaddr, uint64_t iova,
                                RTE_LOG(ERR, EAL, "Could not create new DMA 
window\n");
                                return -1;
                        }
-                       if (rte_memseg_walk(vfio_spapr_map_walk,
+                       if (memseg_walk_thread_unsafe(vfio_spapr_map_walk,
                                        &vfio_container_fd) < 0) {
                                RTE_LOG(ERR, EAL, "Could not recreate DMA 
maps\n");
                                return -1;
-- 
2.7.4

Reply via email to