[PATCH 07/64] mm/hugetlb: teach hugetlb_fault() about range locking

2018-02-04 Thread Davidlohr Bueso
From: Davidlohr Bueso 

Such that we can pass the mmrange along to vm_fault for
page in userfault range (handle_userfault()) which gets
funky with mmap_sem - just look at the locking rules.

Signed-off-by: Davidlohr Bueso 
---
 include/linux/hugetlb.h |  9 +
 mm/gup.c|  3 ++-
 mm/hugetlb.c| 16 +++-
 mm/memory.c |  2 +-
 4 files changed, 19 insertions(+), 11 deletions(-)

diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 36fa6a2a82e3..df0a89a95bdc 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -91,7 +91,7 @@ int copy_hugetlb_page_range(struct mm_struct *, struct 
mm_struct *, struct vm_ar
 long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *,
 struct page **, struct vm_area_struct **,
 unsigned long *, unsigned long *, long, unsigned int,
-int *);
+int *, struct range_lock *);
 void unmap_hugepage_range(struct vm_area_struct *,
  unsigned long, unsigned long, struct page *);
 void __unmap_hugepage_range_final(struct mmu_gather *tlb,
@@ -106,7 +106,8 @@ int hugetlb_report_node_meminfo(int, char *);
 void hugetlb_show_meminfo(void);
 unsigned long hugetlb_total_pages(void);
 int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
-   unsigned long address, unsigned int flags);
+ unsigned long address, unsigned int flags,
+ struct range_lock *mmrange);
 int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, pte_t *dst_pte,
struct vm_area_struct *dst_vma,
unsigned long dst_addr,
@@ -170,7 +171,7 @@ static inline unsigned long hugetlb_total_pages(void)
return 0;
 }
 
-#define follow_hugetlb_page(m,v,p,vs,a,b,i,w,n)({ BUG(); 0; })
+#define follow_hugetlb_page(m,v,p,vs,a,b,i,w,n,r) ({ BUG(); 0; })
 #define follow_huge_addr(mm, addr, write)  ERR_PTR(-EINVAL)
 #define copy_hugetlb_page_range(src, dst, vma) ({ BUG(); 0; })
 static inline void hugetlb_report_meminfo(struct seq_file *m)
@@ -189,7 +190,7 @@ static inline void hugetlb_show_meminfo(void)
 #define pud_huge(x)0
 #define is_hugepage_only_range(mm, addr, len)  0
 #define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; })
-#define hugetlb_fault(mm, vma, addr, flags)({ BUG(); 0; })
+#define hugetlb_fault(mm, vma, addr, flags,mmrange)({ BUG(); 0; })
 #define hugetlb_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma, dst_addr, \
src_addr, pagep)({ BUG(); 0; })
 #define huge_pte_offset(mm, address, sz)   0
diff --git a/mm/gup.c b/mm/gup.c
index 01983a7b3750..3d1b6dd11616 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -684,7 +684,8 @@ static long __get_user_pages(struct task_struct *tsk, 
struct mm_struct *mm,
if (is_vm_hugetlb_page(vma)) {
i = follow_hugetlb_page(mm, vma, pages, vmas,
, _pages, i,
-   gup_flags, nonblocking);
+   gup_flags, nonblocking,
+   mmrange);
continue;
}
}
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 7c204e3d132b..fd22459e89ef 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -3675,7 +3675,8 @@ int huge_add_to_page_cache(struct page *page, struct 
address_space *mapping,
 
 static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
   struct address_space *mapping, pgoff_t idx,
-  unsigned long address, pte_t *ptep, unsigned int 
flags)
+  unsigned long address, pte_t *ptep, unsigned int 
flags,
+  struct range_lock *mmrange)
 {
struct hstate *h = hstate_vma(vma);
int ret = VM_FAULT_SIGBUS;
@@ -3716,6 +3717,7 @@ static int hugetlb_no_page(struct mm_struct *mm, struct 
vm_area_struct *vma,
.vma = vma,
.address = address,
.flags = flags,
+   .lockrange = mmrange,
/*
 * Hard to debug if it ends up being
 * used by a callee that assumes
@@ -3869,7 +3871,8 @@ u32 hugetlb_fault_mutex_hash(struct hstate *h, struct 
mm_struct *mm,
 #endif
 
 int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
-   unsigned long address, unsigned int flags)
+ unsigned long address, unsigned int flags,
+ struct range_lock *mmrange)
 {
pte_t *ptep, entry;
spinlock_t 

[PATCH 07/64] mm/hugetlb: teach hugetlb_fault() about range locking

2018-02-04 Thread Davidlohr Bueso
From: Davidlohr Bueso 

Such that we can pass the mmrange along to vm_fault for
page in userfault range (handle_userfault()) which gets
funky with mmap_sem - just look at the locking rules.

Signed-off-by: Davidlohr Bueso 
---
 include/linux/hugetlb.h |  9 +
 mm/gup.c|  3 ++-
 mm/hugetlb.c| 16 +++-
 mm/memory.c |  2 +-
 4 files changed, 19 insertions(+), 11 deletions(-)

diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 36fa6a2a82e3..df0a89a95bdc 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -91,7 +91,7 @@ int copy_hugetlb_page_range(struct mm_struct *, struct 
mm_struct *, struct vm_ar
 long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *,
 struct page **, struct vm_area_struct **,
 unsigned long *, unsigned long *, long, unsigned int,
-int *);
+int *, struct range_lock *);
 void unmap_hugepage_range(struct vm_area_struct *,
  unsigned long, unsigned long, struct page *);
 void __unmap_hugepage_range_final(struct mmu_gather *tlb,
@@ -106,7 +106,8 @@ int hugetlb_report_node_meminfo(int, char *);
 void hugetlb_show_meminfo(void);
 unsigned long hugetlb_total_pages(void);
 int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
-   unsigned long address, unsigned int flags);
+ unsigned long address, unsigned int flags,
+ struct range_lock *mmrange);
 int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, pte_t *dst_pte,
struct vm_area_struct *dst_vma,
unsigned long dst_addr,
@@ -170,7 +171,7 @@ static inline unsigned long hugetlb_total_pages(void)
return 0;
 }
 
-#define follow_hugetlb_page(m,v,p,vs,a,b,i,w,n)({ BUG(); 0; })
+#define follow_hugetlb_page(m,v,p,vs,a,b,i,w,n,r) ({ BUG(); 0; })
 #define follow_huge_addr(mm, addr, write)  ERR_PTR(-EINVAL)
 #define copy_hugetlb_page_range(src, dst, vma) ({ BUG(); 0; })
 static inline void hugetlb_report_meminfo(struct seq_file *m)
@@ -189,7 +190,7 @@ static inline void hugetlb_show_meminfo(void)
 #define pud_huge(x)0
 #define is_hugepage_only_range(mm, addr, len)  0
 #define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; })
-#define hugetlb_fault(mm, vma, addr, flags)({ BUG(); 0; })
+#define hugetlb_fault(mm, vma, addr, flags,mmrange)({ BUG(); 0; })
 #define hugetlb_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma, dst_addr, \
src_addr, pagep)({ BUG(); 0; })
 #define huge_pte_offset(mm, address, sz)   0
diff --git a/mm/gup.c b/mm/gup.c
index 01983a7b3750..3d1b6dd11616 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -684,7 +684,8 @@ static long __get_user_pages(struct task_struct *tsk, 
struct mm_struct *mm,
if (is_vm_hugetlb_page(vma)) {
i = follow_hugetlb_page(mm, vma, pages, vmas,
, _pages, i,
-   gup_flags, nonblocking);
+   gup_flags, nonblocking,
+   mmrange);
continue;
}
}
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 7c204e3d132b..fd22459e89ef 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -3675,7 +3675,8 @@ int huge_add_to_page_cache(struct page *page, struct 
address_space *mapping,
 
 static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
   struct address_space *mapping, pgoff_t idx,
-  unsigned long address, pte_t *ptep, unsigned int 
flags)
+  unsigned long address, pte_t *ptep, unsigned int 
flags,
+  struct range_lock *mmrange)
 {
struct hstate *h = hstate_vma(vma);
int ret = VM_FAULT_SIGBUS;
@@ -3716,6 +3717,7 @@ static int hugetlb_no_page(struct mm_struct *mm, struct 
vm_area_struct *vma,
.vma = vma,
.address = address,
.flags = flags,
+   .lockrange = mmrange,
/*
 * Hard to debug if it ends up being
 * used by a callee that assumes
@@ -3869,7 +3871,8 @@ u32 hugetlb_fault_mutex_hash(struct hstate *h, struct 
mm_struct *mm,
 #endif
 
 int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
-   unsigned long address, unsigned int flags)
+ unsigned long address, unsigned int flags,
+ struct range_lock *mmrange)
 {
pte_t *ptep, entry;
spinlock_t *ptl;
@@ -3912,7 +3915,8 @@ int