[PATCH] amdgpu:fix incorrect use on the remap_mutex

this patch fixed a multi-thread race issue by expand the protection range of 
remap_mutex to the whole LIST walk through, by this fix the CTS test:
"dEQP-VK.api.object_management.multithreaded_per_thread_device.device_memory_smal"
can always pass now,
Previously it has 1/15 chance to fail with a high performance I7 CPU under 
virtualization envrionment.

Change-Id: If88b9d35e79fe1cb6eb0e32c680bc4996c7915bc
Signed-off-by: Monk Liu <[email protected]>
---
 amdgpu/amdgpu_bo.c    | 29 ++++++++++++++++++-----------
 amdgpu/amdgpu_vamgr.c |  5 +++--
 2 files changed, 21 insertions(+), 13 deletions(-)

diff --git a/amdgpu/amdgpu_bo.c b/amdgpu/amdgpu_bo.c index 33cb705..8960728 
100644
--- a/amdgpu/amdgpu_bo.c
+++ b/amdgpu/amdgpu_bo.c
@@ -989,6 +989,8 @@ int amdgpu_bo_va_op_refcounted(amdgpu_device_handle dev,
                        return -EINVAL;
        }
 
+       pthread_mutex_lock(&dev->remap_mutex);
+
        /* find the previous mapped va object and its bo and unmap it*/
        if (ops == AMDGPU_VA_OP_MAP) {
                LIST_FOR_EACH_ENTRY(vahandle, &dev->remap_list, list) { @@ 
-998,15 +1000,15 @@ int amdgpu_bo_va_op_refcounted(amdgpu_device_handle dev,
                                /* the overlap va mapping which need to be 
unmapped first */
                                vao = vahandle;
                                r = amdgpu_bo_va_op(vao->bo, vao->offset, 
vao->size, vao->address, flags, AMDGPU_VA_OP_UNMAP);
-                               if (r)
+                               if (r) {
+                                       pthread_mutex_unlock(&dev->remap_mutex);
                                        return -EINVAL;
+                               }
 
                                /* Just drop the reference. */
                                amdgpu_bo_reference(&vao->bo, NULL);
                                /* remove the remap from list */
-                               pthread_mutex_lock(&dev->remap_mutex);
                                list_del(&vao->list);
-                               pthread_mutex_unlock(&dev->remap_mutex);
                                free(vao);
                        }
                }
@@ -1021,12 +1023,18 @@ int amdgpu_bo_va_op_refcounted(amdgpu_device_handle dev,
                        }
                }
                if (vao) {
-                       if (bo && (bo != vao->bo))
+                       if (bo && (bo != vao->bo)) {
+                               pthread_mutex_unlock(&dev->remap_mutex);
                                return -EINVAL;
-               } else
+                       }
+               } else {
+                       pthread_mutex_unlock(&dev->remap_mutex);
                        return -EINVAL;
-       } else
+               }
+       } else {
+               pthread_mutex_unlock(&dev->remap_mutex);
                return -EINVAL;
+       }
 
        /* we only allow null bo for unmap operation */
        if (!bo)
@@ -1039,26 +1047,25 @@ int amdgpu_bo_va_op_refcounted(amdgpu_device_handle dev,
                        /* Just drop the reference. */
                        amdgpu_bo_reference(&bo, NULL);
                        /* remove the remap from list */
-                       pthread_mutex_lock(&dev->remap_mutex);
                        list_del(&vao->list);
-                       pthread_mutex_unlock(&dev->remap_mutex);
                        free(vao);
                } else if (ops == AMDGPU_VA_OP_MAP) {
                        /* bump the refcount of bo! */
                        atomic_inc(&bo->refcount);
                        /* add the remap to list and vao should be NULL for map 
*/
                        vao = (struct amdgpu_va_remap*)calloc(1, sizeof(struct 
amdgpu_va_remap));
-                       if (!vao)
+                       if (!vao) {
+                               pthread_mutex_unlock(&dev->remap_mutex);
                                return -ENOMEM;
+                       }
                        vao->address = addr;
                        vao->size = size;
                        vao->offset = offset;
                        vao->bo = bo;
-                       pthread_mutex_lock(&dev->remap_mutex);
                        list_add(&vao->list, &dev->remap_list);
-                       pthread_mutex_unlock(&dev->remap_mutex);
                }
        }
+       pthread_mutex_unlock(&dev->remap_mutex);
        return r;
 }
 
diff --git a/amdgpu/amdgpu_vamgr.c b/amdgpu/amdgpu_vamgr.c index 
b3f5397..c5a7f41 100644
--- a/amdgpu/amdgpu_vamgr.c
+++ b/amdgpu/amdgpu_vamgr.c
@@ -32,6 +32,7 @@
 #include "amdgpu_drm.h"
 #include "amdgpu_internal.h"
 #include "util_math.h"
+#include "stdio.h"
 
 /* Devices share SVM range. So a global SVM VAM manager is needed. */  static 
struct amdgpu_bo_va_mgr vamgr_svm; @@ -488,6 +489,7 @@ int 
amdgpu_va_range_free(amdgpu_va_handle va_range_handle)
        address = va_range_handle->address;
        size    = va_range_handle->size;
 
+       pthread_mutex_lock(&dev->remap_mutex);
        /* clean up previous mapping if it is used for virtual allocation */
        LIST_FOR_EACH_ENTRY(vahandle, &dev->remap_list, list) {
                /* check whether the remap list alraedy have va that overlap 
with current request */ @@ -500,12 +502,11 @@ int 
amdgpu_va_range_free(amdgpu_va_handle va_range_handle)
                        /* Just drop the reference. */
                        amdgpu_bo_reference(&vahandle->bo, NULL);
                        /* remove the remap from list */
-                       pthread_mutex_lock(&dev->remap_mutex);
                        list_del(&vahandle->list);
-                       pthread_mutex_unlock(&dev->remap_mutex);
                        free(vahandle);
                }
        }
+       pthread_mutex_unlock(&dev->remap_mutex);
 
        amdgpu_vamgr_free_va(va_range_handle->vamgr,
                        va_range_handle->address,
--
2.7.4

_______________________________________________
amd-gfx mailing list
[email protected]
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Reply via email to