On Mon, Jul 22, 2019 at 11:44:24AM +0200, Christoph Hellwig wrote:
> Currently nouveau_svm_fault expects nouveau_range_fault to never unlock
> mmap_sem, but the latter unlocks it for a random selection of error
> codes. Fix this up by always unlocking mmap_sem for non-zero return
> values in nouveau_range_fault, and only unlocking it in the caller
> for successful returns.
> 
> Signed-off-by: Christoph Hellwig <h...@lst.de>
>  drivers/gpu/drm/nouveau/nouveau_svm.c | 12 ++++++------
>  1 file changed, 6 insertions(+), 6 deletions(-)
> 
> diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c 
> b/drivers/gpu/drm/nouveau/nouveau_svm.c
> index 5dd83a46578f..5de2d54b9782 100644
> +++ b/drivers/gpu/drm/nouveau/nouveau_svm.c
> @@ -494,8 +494,10 @@ nouveau_range_fault(struct hmm_mirror *mirror, struct 
> hmm_range *range)
>       ret = hmm_range_register(range, mirror,
>                                range->start, range->end,
>                                PAGE_SHIFT);
> -     if (ret)
> +     if (ret) {
> +             up_read(&range->vma->vm_mm->mmap_sem);
>               return (int)ret;
> +     }
>  
>       if (!hmm_range_wait_until_valid(range, HMM_RANGE_DEFAULT_TIMEOUT)) {
>               up_read(&range->vma->vm_mm->mmap_sem);
> @@ -504,11 +506,9 @@ nouveau_range_fault(struct hmm_mirror *mirror, struct 
> hmm_range *range)
>  
>       ret = hmm_range_fault(range, true);
>       if (ret <= 0) {
> -             if (ret == -EBUSY || !ret) {
> -                     up_read(&range->vma->vm_mm->mmap_sem);
> -                     ret = -EBUSY;
> -             } else if (ret == -EAGAIN)
> +             if (ret == 0)
>                       ret = -EBUSY;
> +             up_read(&range->vma->vm_mm->mmap_sem);
>               hmm_range_unregister(range);
>               return ret;

Hum..

The caller does this:

again:
                ret = nouveau_range_fault(&svmm->mirror, &range);
                if (ret == 0) {
                        mutex_lock(&svmm->mutex);
                        if (!nouveau_range_done(&range)) {
                                mutex_unlock(&svmm->mutex);
                                goto again;

And we can't call nouveau_range_fault() -> hmm_range_fault() without
holding the mmap_sem, so we can't allow nouveau_range_fault to unlock
it.

Maybe this instead? 

diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c 
b/drivers/gpu/drm/nouveau/nouveau_svm.c
index a9c5c58d425b3d..92cf760a9bcc5d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_svm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_svm.c
@@ -494,21 +494,16 @@ nouveau_range_fault(struct hmm_mirror *mirror, struct 
hmm_range *range)
        ret = hmm_range_register(range, mirror,
                                 range->start, range->end,
                                 PAGE_SHIFT);
-       if (ret) {
-               up_read(&range->vma->vm_mm->mmap_sem);
+       if (ret)
                return (int)ret;
-       }
 
-       if (!hmm_range_wait_until_valid(range, HMM_RANGE_DEFAULT_TIMEOUT)) {
-               up_read(&range->vma->vm_mm->mmap_sem);
+       if (!hmm_range_wait_until_valid(range, HMM_RANGE_DEFAULT_TIMEOUT))
                return -EBUSY;
-       }
 
        ret = hmm_range_fault(range, true);
        if (ret <= 0) {
                if (ret == 0)
                        ret = -EBUSY;
-               up_read(&range->vma->vm_mm->mmap_sem);
                hmm_range_unregister(range);
                return ret;
        }
@@ -706,8 +701,8 @@ nouveau_svm_fault(struct nvif_notify *notify)
                                                NULL);
                        svmm->vmm->vmm.object.client->super = false;
                        mutex_unlock(&svmm->mutex);
-                       up_read(&svmm->mm->mmap_sem);
                }
+               up_read(&svmm->mm->mmap_sem);
 
                /* Cancel any faults in the window whose pages didn't manage
                 * to keep their valid bit, or stay writeable when required.
_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

Reply via email to