Convert ipath_get_user_pages() to use get_user_pages_unlocked(). This
shortens the section where we hold mmap_sem for writing and also removes
the knowledge about get_user_pages() locking from ipath driver. We also
fix a bug in testing pinned number of pages when changing the code.

CC: Mike Marciniszyn <[email protected]>
CC: Roland Dreier <[email protected]>
CC: [email protected]
Signed-off-by: Jan Kara <[email protected]>
---
 drivers/infiniband/hw/ipath/ipath_user_pages.c | 62 +++++++++++---------------
 1 file changed, 27 insertions(+), 35 deletions(-)

diff --git a/drivers/infiniband/hw/ipath/ipath_user_pages.c 
b/drivers/infiniband/hw/ipath/ipath_user_pages.c
index dc66c4506916..a89af9654112 100644
--- a/drivers/infiniband/hw/ipath/ipath_user_pages.c
+++ b/drivers/infiniband/hw/ipath/ipath_user_pages.c
@@ -52,40 +52,58 @@ static void __ipath_release_user_pages(struct page **p, 
size_t num_pages,
        }
 }
 
-/* call with current->mm->mmap_sem held */
-static int __ipath_get_user_pages(unsigned long start_page, size_t num_pages,
-                                 struct page **p, struct vm_area_struct **vma)
+/**
+ * ipath_get_user_pages - lock user pages into memory
+ * @start_page: the start page
+ * @num_pages: the number of pages
+ * @p: the output page structures
+ *
+ * This function takes a given start page (page aligned user virtual
+ * address) and pins it and the following specified number of pages.  For
+ * now, num_pages is always 1, but that will probably change at some point
+ * (because caller is doing expected sends on a single virtually contiguous
+ * buffer, so we can do all pages at once).
+ */
+int ipath_get_user_pages(unsigned long start_page, size_t num_pages,
+                        struct page **p)
 {
        unsigned long lock_limit;
        size_t got;
        int ret;
 
+       down_write(&current->mm->mmap_sem);
        lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
 
-       if (num_pages > lock_limit) {
+       if (current->mm->pinned_vm + num_pages > lock_limit && 
+           !capable(CAP_IPC_LOCK)) {
+               up_write(&current->mm->mmap_sem);
                ret = -ENOMEM;
                goto bail;
        }
+       current->mm->pinned_vm += num_pages;
+       up_write(&current->mm->mmap_sem);
 
        ipath_cdbg(VERBOSE, "pin %lx pages from vaddr %lx\n",
                   (unsigned long) num_pages, start_page);
 
        for (got = 0; got < num_pages; got += ret) {
-               ret = get_user_pages(current, current->mm,
-                                    start_page + got * PAGE_SIZE,
-                                    num_pages - got, 1, 1,
-                                    p + got, vma);
+               ret = get_user_pages_unlocked(current, current->mm,
+                                             start_page + got * PAGE_SIZE,
+                                             num_pages - got, 1, 1,
+                                             p + got);
                if (ret < 0)
                        goto bail_release;
        }
 
-       current->mm->pinned_vm += num_pages;
 
        ret = 0;
        goto bail;
 
 bail_release:
        __ipath_release_user_pages(p, got, 0);
+       down_write(&current->mm->mmap_sem);
+       current->mm->pinned_vm -= num_pages;
+       up_write(&current->mm->mmap_sem);
 bail:
        return ret;
 }
@@ -146,32 +164,6 @@ dma_addr_t ipath_map_single(struct pci_dev *hwdev, void 
*ptr, size_t size,
        return phys;
 }
 
-/**
- * ipath_get_user_pages - lock user pages into memory
- * @start_page: the start page
- * @num_pages: the number of pages
- * @p: the output page structures
- *
- * This function takes a given start page (page aligned user virtual
- * address) and pins it and the following specified number of pages.  For
- * now, num_pages is always 1, but that will probably change at some point
- * (because caller is doing expected sends on a single virtually contiguous
- * buffer, so we can do all pages at once).
- */
-int ipath_get_user_pages(unsigned long start_page, size_t num_pages,
-                        struct page **p)
-{
-       int ret;
-
-       down_write(&current->mm->mmap_sem);
-
-       ret = __ipath_get_user_pages(start_page, num_pages, p, NULL);
-
-       up_write(&current->mm->mmap_sem);
-
-       return ret;
-}
-
 void ipath_release_user_pages(struct page **p, size_t num_pages)
 {
        down_write(&current->mm->mmap_sem);
-- 
1.8.1.4

--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to [email protected]
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to