RE: [PATCH 3/4] drm/ttm: make unlocking in ttm_bo_cleanup_refs optional
Please see comments inline -Original Message- From: Christian König [mailto:ckoenig.leichtzumer...@gmail.com] Sent: Wednesday, November 08, 2017 11:00 PM To: He, Roger <hongbo...@amd.com>; amd-gfx@lists.freedesktop.org; dri-de...@lists.freedesktop.org Subject: [PATCH 3/4] drm/ttm: make unlocking in ttm_bo_cleanup_refs optional Needed for the next patch. Signed-off-by: Christian König <christian.koe...@amd.com> --- drivers/gpu/drm/ttm/ttm_bo.c | 52 1 file changed, 28 insertions(+), 24 deletions(-) diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 6f55310a9d09..d23592cfe42e 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -486,20 +486,21 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) } /** - * function ttm_bo_cleanup_refs_and_unlock + * function ttm_bo_cleanup_refs * If bo idle, remove from delayed- and lru lists, and unref. * If not idle, do nothing. * * Must be called with lru_lock and reservation held, this function - * will drop both before returning. + * will drop the lru lock and optionally the reservation lock before returning. * * @interruptible Any sleeps should occur interruptibly. * @no_wait_gpu Never wait for gpu. Return -EBUSY instead. + * @unlock_resv Unlock the reservation lock as well. */ -static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo, - bool interruptible, - bool no_wait_gpu) +static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, + bool interruptible, bool no_wait_gpu, + bool unlock_resv) { struct ttm_bo_global *glob = bo->glob; struct reservation_object *resv; @@ -518,7 +519,8 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo, if (ret && !no_wait_gpu) { long lret; - reservation_object_unlock(bo->resv); + if (unlock_resv) + reservation_object_unlock(bo->resv); spin_unlock(>lru_lock); lret = reservation_object_wait_timeout_rcu(resv, true, @@ -531,19 +533,22 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo, return -EBUSY; spin_lock(>lru_lock); - ret = __ttm_bo_reserve(bo, false, true, NULL); - - /* -* We raced, and lost, someone else holds the reservation now, -* and is probably busy in ttm_bo_cleanup_memtype_use. -* -* Even if it's not the case, because we finished waiting any -* delayed destruction would succeed, so just return success -* here. -*/ - if (ret) { - spin_unlock(>lru_lock); - return 0; + if (unlock_resv) { + ret = __ttm_bo_reserve(bo, false, true, NULL); + /* +* We raced, and lost, someone else holds the reservation now, +* and is probably busy in ttm_bo_cleanup_memtype_use. +* +* Even if it's not the case, because we finished waiting any +* delayed destruction would succeed, so just return success +* here. +*/ + if (ret) { + spin_unlock(>lru_lock); + return 0; + } + } else { + ret = 0; } } [Roger]: //Looks like we also need the condition adjudge here. Otherwise, it will unlock the vm root bo reservation that is not what we want here I think. if (unlock_resv) //need this condition here reservation_object_unlock(bo->resv); return 0; } @@ -600,8 +605,8 @@ static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all) } if (!ret) - ret = ttm_bo_cleanup_refs_and_unlock(entry, false, -!remove_all); + ret = ttm_bo_cleanup_refs(entry, false, !remove_all, + true); else spin_unlock(>lru_lock); @@ -770,8 +775,7 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev, kref_get(>list_kref); if (!list_empty(>ddestroy)) { - ret = ttm_bo_cleanup_refs_and_unlock(bo, interruptible, -no_wait_gpu); +
Recall: [PATCH 3/4] drm/ttm: make unlocking in ttm_bo_cleanup_refs optional
He, Roger would like to recall the message, "[PATCH 3/4] drm/ttm: make unlocking in ttm_bo_cleanup_refs optional". ___ amd-gfx mailing list amd-gfx@lists.freedesktop.org https://lists.freedesktop.org/mailman/listinfo/amd-gfx
Recall: [PATCH 3/4] drm/ttm: make unlocking in ttm_bo_cleanup_refs optional
He, Roger would like to recall the message, "[PATCH 3/4] drm/ttm: make unlocking in ttm_bo_cleanup_refs optional". ___ amd-gfx mailing list amd-gfx@lists.freedesktop.org https://lists.freedesktop.org/mailman/listinfo/amd-gfx
Recall: [PATCH 3/4] drm/ttm: make unlocking in ttm_bo_cleanup_refs optional
He, Roger would like to recall the message, "[PATCH 3/4] drm/ttm: make unlocking in ttm_bo_cleanup_refs optional". ___ amd-gfx mailing list amd-gfx@lists.freedesktop.org https://lists.freedesktop.org/mailman/listinfo/amd-gfx
[PATCH 3/4] drm/ttm: make unlocking in ttm_bo_cleanup_refs optional
Needed for the next patch. Signed-off-by: Christian König--- drivers/gpu/drm/ttm/ttm_bo.c | 52 1 file changed, 28 insertions(+), 24 deletions(-) diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 6f55310a9d09..d23592cfe42e 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -486,20 +486,21 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) } /** - * function ttm_bo_cleanup_refs_and_unlock + * function ttm_bo_cleanup_refs * If bo idle, remove from delayed- and lru lists, and unref. * If not idle, do nothing. * * Must be called with lru_lock and reservation held, this function - * will drop both before returning. + * will drop the lru lock and optionally the reservation lock before returning. * * @interruptible Any sleeps should occur interruptibly. * @no_wait_gpu Never wait for gpu. Return -EBUSY instead. + * @unlock_resv Unlock the reservation lock as well. */ -static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo, - bool interruptible, - bool no_wait_gpu) +static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, + bool interruptible, bool no_wait_gpu, + bool unlock_resv) { struct ttm_bo_global *glob = bo->glob; struct reservation_object *resv; @@ -518,7 +519,8 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo, if (ret && !no_wait_gpu) { long lret; - reservation_object_unlock(bo->resv); + if (unlock_resv) + reservation_object_unlock(bo->resv); spin_unlock(>lru_lock); lret = reservation_object_wait_timeout_rcu(resv, true, @@ -531,19 +533,22 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo, return -EBUSY; spin_lock(>lru_lock); - ret = __ttm_bo_reserve(bo, false, true, NULL); - - /* -* We raced, and lost, someone else holds the reservation now, -* and is probably busy in ttm_bo_cleanup_memtype_use. -* -* Even if it's not the case, because we finished waiting any -* delayed destruction would succeed, so just return success -* here. -*/ - if (ret) { - spin_unlock(>lru_lock); - return 0; + if (unlock_resv) { + ret = __ttm_bo_reserve(bo, false, true, NULL); + /* +* We raced, and lost, someone else holds the reservation now, +* and is probably busy in ttm_bo_cleanup_memtype_use. +* +* Even if it's not the case, because we finished waiting any +* delayed destruction would succeed, so just return success +* here. +*/ + if (ret) { + spin_unlock(>lru_lock); + return 0; + } + } else { + ret = 0; } } @@ -600,8 +605,8 @@ static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all) } if (!ret) - ret = ttm_bo_cleanup_refs_and_unlock(entry, false, -!remove_all); + ret = ttm_bo_cleanup_refs(entry, false, !remove_all, + true); else spin_unlock(>lru_lock); @@ -770,8 +775,7 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev, kref_get(>list_kref); if (!list_empty(>ddestroy)) { - ret = ttm_bo_cleanup_refs_and_unlock(bo, interruptible, -no_wait_gpu); + ret = ttm_bo_cleanup_refs(bo, interruptible, no_wait_gpu, true); kref_put(>list_kref, ttm_bo_release_list); return ret; } @@ -1735,7 +1739,7 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink) kref_get(>list_kref); if (!list_empty(>ddestroy)) { - ret = ttm_bo_cleanup_refs_and_unlock(bo, false, false); + ret = ttm_bo_cleanup_refs(bo, false, false, true); kref_put(>list_kref, ttm_bo_release_list); return ret; } -- 2.11.0 ___ amd-gfx mailing list amd-gfx@lists.freedesktop.org