Some illegal combination of input flags were not checked and we need to
take the PDEs into account when returning the fence as well.

Signed-off-by: Christian König <[email protected]>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 76 +++++++++++--------------
 1 file changed, 34 insertions(+), 42 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 88a21400ae09..98276b55ad3c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -30,6 +30,7 @@
 #include <linux/pagemap.h>
 #include <linux/pci.h>
 #include <linux/dma-buf.h>
+#include <linux/dma-fence-unwrap.h>
 
 #include <drm/amdgpu_drm.h>
 #include <drm/drm_drv.h>
@@ -741,11 +742,10 @@ amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
        struct dma_fence *fence;
        int r = 0;
 
-       /* Always start from the VM's existing last update fence. */
-       fence = dma_fence_get(vm->last_update);
-
+       /* If the VM is not ready return only a stub. */
        if (!amdgpu_vm_ready(vm))
-               return fence;
+               return dma_fence_get_stub();
+
 
        /*
         * First clean up any freed mappings in the VM.
@@ -754,7 +754,7 @@ amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
         * schedules GPU work. If nothing needs clearing, @fence can remain as
         * the original vm->last_update.
         */
-       r = amdgpu_vm_clear_freed(adev, vm, &fence);
+       r = amdgpu_vm_clear_freed(adev, vm, &vm->last_update);
        if (r)
                goto error;
 
@@ -771,47 +771,34 @@ amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
        if (r)
                goto error;
 
-       /*
-        * Decide which fence best represents the last update:
-        *
-        * MAP/REPLACE:
-        *   - For always-valid mappings, use vm->last_update.
-        *   - Otherwise, export bo_va->last_pt_update.
-        *
-        * UNMAP/CLEAR:
-        *   Keep the fence returned by amdgpu_vm_clear_freed(). If no work was
-        *   needed, it can remain as vm->last_pt_update.
-        *
-        * The VM and BO update fences are always initialized to a valid value.
-        * vm->last_update and bo_va->last_pt_update always start as valid 
fences.
-        * and are never expected to be NULL.
-        */
-       switch (operation) {
-       case AMDGPU_VA_OP_MAP:
-       case AMDGPU_VA_OP_REPLACE:
+       if ((operation == AMDGPU_VA_OP_MAP ||
+            operation == AMDGPU_VA_OP_REPLACE) &&
+           !amdgpu_vm_is_bo_always_valid(vm, bo_va->base.bo)) {
+
                /*
-                * For MAP/REPLACE, return the page table update fence for the
-                * mapping we just modified. bo_va is expected to be valid here.
+                * For MAP/REPLACE of non per-VM BOs we need to sync to both the
+                * bo_va->last_pt_update and vm->last_update or otherwise we
+                * potentially miss the PDE updates.
                 */
-               dma_fence_put(fence);
-
-               if (amdgpu_vm_is_bo_always_valid(vm, bo_va->base.bo))
-                       fence = dma_fence_get(vm->last_update);
-               else
-                       fence = dma_fence_get(bo_va->last_pt_update);
-               break;
-       case AMDGPU_VA_OP_UNMAP:
-       case AMDGPU_VA_OP_CLEAR:
-       default:
-               /* keep @fence as returned by amdgpu_vm_clear_freed() */
-               break;
+               fence = dma_fence_unwrap_merge(vm->last_update,
+                                              bo_va->last_pt_update);
+               if (!fence) {
+                       /* As fallback in OOM situations */
+                       dma_fence_wait(vm->last_update, false);
+                       dma_fence_wait(bo_va->last_pt_update, false);
+                       fence = dma_fence_get_stub();
+               }
+       } else {
+               fence = dma_fence_get(vm->last_update);
        }
 
+       return fence;
+
 error:
        if (r && r != -ERESTARTSYS)
                DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
 
-       return fence;
+       return dma_fence_get(vm->last_update);
 }
 
 int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
@@ -832,7 +819,6 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
        struct amdgpu_bo_va *bo_va;
        struct drm_syncobj *timeline_syncobj = NULL;
        struct dma_fence_chain *timeline_chain = NULL;
-       struct dma_fence *fence;
        struct drm_exec exec;
        uint64_t vm_size;
        int r = 0;
@@ -884,6 +870,10 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
                return -EINVAL;
        }
 
+       if (args->flags & AMDGPU_VM_DELAY_UPDATE &&
+           args->vm_timeline_syncobj_out)
+               return -EINVAL;
+
        if ((args->operation != AMDGPU_VA_OP_CLEAR) &&
            !(args->flags & AMDGPU_VM_PAGE_PRT)) {
                gobj = drm_gem_object_lookup(filp, args->handle);
@@ -973,11 +963,13 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void 
*data,
         * that represents the last relevant update for this mapping. This
         * fence can then be exported to the user-visible VM timeline.
         */
-       if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE) && !adev->debug_vm) {
+       if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE) &&
+           (!adev->debug_vm || timeline_syncobj)) {
+               struct dma_fence *fence;
+
                fence = amdgpu_gem_va_update_vm(adev, &fpriv->vm, bo_va,
                                                args->operation);
-
-               if (timeline_syncobj && fence) {
+               if (timeline_syncobj) {
                        if (!args->vm_timeline_point) {
                                /* Replace the existing fence when no point is 
given. */
                                drm_syncobj_replace_fence(timeline_syncobj,
-- 
2.43.0

Reply via email to