Module: Mesa
Branch: main
Commit: 006342be8b53a1bd1ba959ba32406a0ff845c35f
URL:    
http://cgit.freedesktop.org/mesa/mesa/commit/?id=006342be8b53a1bd1ba959ba32406a0ff845c35f

Author: Rob Clark <[email protected]>
Date:   Wed Jun 28 15:58:21 2023 -0700

tu/drm: Split out helper for iova alloc

Signed-off-by: Rob Clark <[email protected]>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/23533>

---

 src/freedreno/vulkan/tu_knl_drm.cc     | 35 ++++++++++++++++++++
 src/freedreno/vulkan/tu_knl_drm.h      |  5 +++
 src/freedreno/vulkan/tu_knl_drm_msm.cc | 60 ++++++++++++----------------------
 3 files changed, 61 insertions(+), 39 deletions(-)

diff --git a/src/freedreno/vulkan/tu_knl_drm.cc 
b/src/freedreno/vulkan/tu_knl_drm.cc
index 1450feadf7b..9f8a5ba6eb3 100644
--- a/src/freedreno/vulkan/tu_knl_drm.cc
+++ b/src/freedreno/vulkan/tu_knl_drm.cc
@@ -107,6 +107,41 @@ tu_InvalidateMappedMemoryRanges(VkDevice _device,
                      pMemoryRanges);
 }
 
+VkResult
+tu_allocate_userspace_iova(struct tu_device *dev,
+                           uint64_t size,
+                           uint64_t client_iova,
+                           enum tu_bo_alloc_flags flags,
+                           uint64_t *iova)
+{
+   *iova = 0;
+
+   if (flags & TU_BO_ALLOC_REPLAYABLE) {
+      if (client_iova) {
+         if (util_vma_heap_alloc_addr(&dev->vma, client_iova, size)) {
+            *iova = client_iova;
+         } else {
+            return VK_ERROR_INVALID_OPAQUE_CAPTURE_ADDRESS;
+         }
+      } else {
+         /* We have to separate replayable IOVAs from ordinary one in order to
+          * for them not to clash. The easiest way to do this is to allocate
+          * them from the other end of the address space.
+          */
+         dev->vma.alloc_high = true;
+         *iova = util_vma_heap_alloc(&dev->vma, size, 0x1000);
+      }
+   } else {
+      dev->vma.alloc_high = false;
+      *iova = util_vma_heap_alloc(&dev->vma, size, 0x1000);
+   }
+
+   if (!*iova)
+      return VK_ERROR_OUT_OF_DEVICE_MEMORY;
+
+   return VK_SUCCESS;
+}
+
 int
 tu_drm_export_dmabuf(struct tu_device *dev, struct tu_bo *bo)
 {
diff --git a/src/freedreno/vulkan/tu_knl_drm.h 
b/src/freedreno/vulkan/tu_knl_drm.h
index dffd6a1c4a5..52fb4ef0e4b 100644
--- a/src/freedreno/vulkan/tu_knl_drm.h
+++ b/src/freedreno/vulkan/tu_knl_drm.h
@@ -26,6 +26,11 @@ tu_sync_cache_bo(struct tu_device *dev,
                  VkDeviceSize size,
                  enum tu_mem_sync_op op);
 
+VkResult tu_allocate_userspace_iova(struct tu_device *dev,
+                                    uint64_t size,
+                                    uint64_t client_iova,
+                                    enum tu_bo_alloc_flags flags,
+                                    uint64_t *iova);
 int tu_drm_export_dmabuf(struct tu_device *dev, struct tu_bo *bo);
 void tu_drm_bo_finish(struct tu_device *dev, struct tu_bo *bo);
 
diff --git a/src/freedreno/vulkan/tu_knl_drm_msm.cc 
b/src/freedreno/vulkan/tu_knl_drm_msm.cc
index ff6a2ede495..02542cd2b77 100644
--- a/src/freedreno/vulkan/tu_knl_drm_msm.cc
+++ b/src/freedreno/vulkan/tu_knl_drm_msm.cc
@@ -345,54 +345,36 @@ tu_free_zombie_vma_locked(struct tu_device *dev, bool 
wait)
 }
 
 static VkResult
-tu_allocate_userspace_iova(struct tu_device *dev,
-                           uint32_t gem_handle,
-                           uint64_t size,
-                           uint64_t client_iova,
-                           enum tu_bo_alloc_flags flags,
-                           uint64_t *iova)
+msm_allocate_userspace_iova(struct tu_device *dev,
+                            uint32_t gem_handle,
+                            uint64_t size,
+                            uint64_t client_iova,
+                            enum tu_bo_alloc_flags flags,
+                            uint64_t *iova)
 {
+   VkResult result;
+
    mtx_lock(&dev->vma_mutex);
 
    *iova = 0;
 
    tu_free_zombie_vma_locked(dev, false);
 
-   if (flags & TU_BO_ALLOC_REPLAYABLE) {
-      if (client_iova) {
-         if (util_vma_heap_alloc_addr(&dev->vma, client_iova, size)) {
-            *iova = client_iova;
-         } else {
-            /* Address may be already freed by us, but not considered as
-             * freed by the kernel. We have to wait until all work that
-             * may hold the address is done. Since addresses are meant to
-             * be replayed only by debug tooling, it should be ok to wait.
-             */
-            if (tu_free_zombie_vma_locked(dev, true) == VK_SUCCESS &&
-                util_vma_heap_alloc_addr(&dev->vma, client_iova, size)) {
-               *iova = client_iova;
-            } else {
-               mtx_unlock(&dev->vma_mutex);
-               return VK_ERROR_INVALID_OPAQUE_CAPTURE_ADDRESS;
-            }
-         }
-      } else {
-         /* We have to separate replayable IOVAs from ordinary one in order to
-          * for them not to clash. The easiest way to do this is to allocate
-          * them from the other end of the address space.
-          */
-         dev->vma.alloc_high = true;
-         *iova = util_vma_heap_alloc(&dev->vma, size, 0x1000);
-      }
-   } else {
-      dev->vma.alloc_high = false;
-      *iova = util_vma_heap_alloc(&dev->vma, size, 0x1000);
+   result = tu_allocate_userspace_iova(dev, size, client_iova, flags, iova);
+   if (result == VK_ERROR_INVALID_OPAQUE_CAPTURE_ADDRESS) {
+      /* Address may be already freed by us, but not considered as
+       * freed by the kernel. We have to wait until all work that
+       * may hold the address is done. Since addresses are meant to
+       * be replayed only by debug tooling, it should be ok to wait.
+       */
+      tu_free_zombie_vma_locked(dev, true);
+      result = tu_allocate_userspace_iova(dev, size, client_iova, flags, iova);
    }
 
    mtx_unlock(&dev->vma_mutex);
 
-   if (!*iova)
-      return VK_ERROR_OUT_OF_DEVICE_MEMORY;
+   if (result != VK_SUCCESS)
+      return result;
 
    struct drm_msm_gem_info req = {
       .handle = gem_handle,
@@ -437,8 +419,8 @@ tu_bo_init(struct tu_device *dev,
    assert(!client_iova || dev->physical_device->has_set_iova);
 
    if (dev->physical_device->has_set_iova) {
-      result = tu_allocate_userspace_iova(dev, gem_handle, size, client_iova,
-                                          flags, &iova);
+      result = msm_allocate_userspace_iova(dev, gem_handle, size, client_iova,
+                                           flags, &iova);
    } else {
       result = tu_allocate_kernel_iova(dev, gem_handle, &iova);
    }

Reply via email to