This code was using get_user_pages*(), in a "Case 2" scenario
(DMA/RDMA), using the categorization from [1]. That means that it's
time to convert the get_user_pages*() + put_page() calls to
pin_user_pages*() + unpin_user_pages() calls.

There is some helpful background in [2]: basically, this is a small
part of fixing a long-standing disconnect between pinning pages, and
file systems' use of those pages.

[1] Documentation/core-api/pin_user_pages.rst

[2] "Explicit pinning of user-space pages":
    https://lwn.net/Articles/807108/

Signed-off-by: John Hubbard <jhubb...@nvidia.com>
---
 drivers/gpu/drm/i915/gem/i915_gem_userptr.c | 22 ++++++++++++---------
 1 file changed, 13 insertions(+), 9 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c 
b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
index 7ffd7afeb7a5..b55ac7563189 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
@@ -471,7 +471,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct 
*_work)
                                        down_read(&mm->mmap_sem);
                                        locked = 1;
                                }
-                               ret = get_user_pages_remote
+                               ret = pin_user_pages_remote
                                        (work->task, mm,
                                         obj->userptr.ptr + pinned * PAGE_SIZE,
                                         npages - pinned,
@@ -507,7 +507,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct 
*_work)
        }
        mutex_unlock(&obj->mm.lock);
 
-       release_pages(pvec, pinned);
+       unpin_user_pages(pvec, pinned);
        kvfree(pvec);
 
        i915_gem_object_put(obj);
@@ -564,6 +564,7 @@ static int i915_gem_userptr_get_pages(struct 
drm_i915_gem_object *obj)
        struct sg_table *pages;
        bool active;
        int pinned;
+       unsigned int gup_flags = 0;
 
        /* If userspace should engineer that these pages are replaced in
         * the vma between us binding this page into the GTT and completion
@@ -598,11 +599,14 @@ static int i915_gem_userptr_get_pages(struct 
drm_i915_gem_object *obj)
                                      GFP_KERNEL |
                                      __GFP_NORETRY |
                                      __GFP_NOWARN);
-               if (pvec) /* defer to worker if malloc fails */
-                       pinned = __get_user_pages_fast(obj->userptr.ptr,
-                                                      num_pages,
-                                                      
!i915_gem_object_is_readonly(obj),
-                                                      pvec);
+               /* defer to worker if malloc fails */
+               if (pvec) {
+                       if (!i915_gem_object_is_readonly(obj))
+                               gup_flags |= FOLL_WRITE;
+                       pinned = pin_user_pages_fast_only(obj->userptr.ptr,
+                                                         num_pages, gup_flags,
+                                                         pvec);
+               }
        }
 
        active = false;
@@ -620,7 +624,7 @@ static int i915_gem_userptr_get_pages(struct 
drm_i915_gem_object *obj)
                __i915_gem_userptr_set_active(obj, true);
 
        if (IS_ERR(pages))
-               release_pages(pvec, pinned);
+               unpin_user_pages(pvec, pinned);
        kvfree(pvec);
 
        return PTR_ERR_OR_ZERO(pages);
@@ -675,7 +679,7 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj,
                }
 
                mark_page_accessed(page);
-               put_page(page);
+               unpin_user_page(page);
        }
        obj->mm.dirty = false;
 
-- 
2.26.2

Reply via email to