[PATCH 33/64] arch/powerpc: use mm locking wrappers

2018-02-04 Thread Davidlohr Bueso
From: Davidlohr Bueso 

This becomes quite straightforward with the mmrange in place.
For those mmap_sem callers who don't, we add it within the same
function context.

Signed-off-by: Davidlohr Bueso 
---
 arch/powerpc/kernel/vdso.c |  7 ---
 arch/powerpc/kvm/book3s_64_mmu_hv.c|  6 --
 arch/powerpc/kvm/book3s_64_mmu_radix.c |  6 --
 arch/powerpc/kvm/book3s_64_vio.c   |  5 +++--
 arch/powerpc/kvm/book3s_hv.c   |  7 ---
 arch/powerpc/kvm/e500_mmu_host.c   |  5 +++--
 arch/powerpc/mm/copro_fault.c  |  4 ++--
 arch/powerpc/mm/mmu_context_iommu.c|  5 +++--
 arch/powerpc/mm/subpage-prot.c | 13 +++--
 arch/powerpc/oprofile/cell/spu_task_sync.c |  7 ---
 arch/powerpc/platforms/cell/spufs/file.c   |  6 --
 arch/powerpc/platforms/powernv/npu-dma.c   |  2 +-
 12 files changed, 43 insertions(+), 30 deletions(-)

diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
index 22b01a3962f0..869632b601b8 100644
--- a/arch/powerpc/kernel/vdso.c
+++ b/arch/powerpc/kernel/vdso.c
@@ -155,6 +155,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, 
int uses_interp)
unsigned long vdso_pages;
unsigned long vdso_base;
int rc;
+   DEFINE_RANGE_LOCK_FULL(mmrange);
 
if (!vdso_ready)
return 0;
@@ -196,7 +197,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, 
int uses_interp)
 * and end up putting it elsewhere.
 * Add enough to the size so that the result can be aligned.
 */
-   if (down_write_killable(>mmap_sem))
+   if (mm_write_lock_killable(mm, ))
return -EINTR;
vdso_base = get_unmapped_area(NULL, vdso_base,
  (vdso_pages << PAGE_SHIFT) +
@@ -236,11 +237,11 @@ int arch_setup_additional_pages(struct linux_binprm 
*bprm, int uses_interp)
goto fail_mmapsem;
}
 
-   up_write(>mmap_sem);
+   mm_write_unlock(mm, );
return 0;
 
  fail_mmapsem:
-   up_write(>mmap_sem);
+   mm_write_unlock(mm, );
return rc;
 }
 
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c 
b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index b73dbc9e797d..c05a99209fc1 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -583,8 +583,10 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, 
struct kvm_vcpu *vcpu,
hva = gfn_to_hva_memslot(memslot, gfn);
npages = get_user_pages_fast(hva, 1, writing, pages);
if (npages < 1) {
+   DEFINE_RANGE_LOCK_FULL(mmrange);
+
/* Check if it's an I/O mapping */
-   down_read(>mm->mmap_sem);
+   mm_read_lock(current->mm, );
vma = find_vma(current->mm, hva);
if (vma && vma->vm_start <= hva && hva + psize <= vma->vm_end &&
(vma->vm_flags & VM_PFNMAP)) {
@@ -594,7 +596,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct 
kvm_vcpu *vcpu,
is_ci = pte_ci(__pte((pgprot_val(vma->vm_page_prot;
write_ok = vma->vm_flags & VM_WRITE;
}
-   up_read(>mm->mmap_sem);
+   mm_read_unlock(current->mm, );
if (!pfn)
goto out_put;
} else {
diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c 
b/arch/powerpc/kvm/book3s_64_mmu_radix.c
index 0c854816e653..9a4d1758b0db 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_radix.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c
@@ -397,8 +397,10 @@ int kvmppc_book3s_radix_page_fault(struct kvm_run *run, 
struct kvm_vcpu *vcpu,
level = 0;
npages = get_user_pages_fast(hva, 1, writing, pages);
if (npages < 1) {
+   DEFINE_RANGE_LOCK_FULL(mmrange);
+
/* Check if it's an I/O mapping */
-   down_read(>mm->mmap_sem);
+   mm_read_lock(current->mm, );
vma = find_vma(current->mm, hva);
if (vma && vma->vm_start <= hva && hva < vma->vm_end &&
(vma->vm_flags & VM_PFNMAP)) {
@@ -406,7 +408,7 @@ int kvmppc_book3s_radix_page_fault(struct kvm_run *run, 
struct kvm_vcpu *vcpu,
((hva - vma->vm_start) >> PAGE_SHIFT);
pgflags = pgprot_val(vma->vm_page_prot);
}
-   up_read(>mm->mmap_sem);
+   mm_read_unlock(current->mm, );
if (!pfn)
return -EFAULT;
} else {
diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c
index 4dffa611376d..5e6fe2820009 100644
--- a/arch/powerpc/kvm/book3s_64_vio.c
+++ b/arch/powerpc/kvm/book3s_64_vio.c
@@ -60,11 +60,12 @@ static unsigned long kvmppc_stt_pages(unsigned long 
tce_pages)
 static long kvmppc_account_memlimit(unsigned long 

[PATCH 33/64] arch/powerpc: use mm locking wrappers

2018-02-04 Thread Davidlohr Bueso
From: Davidlohr Bueso 

This becomes quite straightforward with the mmrange in place.
For those mmap_sem callers who don't, we add it within the same
function context.

Signed-off-by: Davidlohr Bueso 
---
 arch/powerpc/kernel/vdso.c |  7 ---
 arch/powerpc/kvm/book3s_64_mmu_hv.c|  6 --
 arch/powerpc/kvm/book3s_64_mmu_radix.c |  6 --
 arch/powerpc/kvm/book3s_64_vio.c   |  5 +++--
 arch/powerpc/kvm/book3s_hv.c   |  7 ---
 arch/powerpc/kvm/e500_mmu_host.c   |  5 +++--
 arch/powerpc/mm/copro_fault.c  |  4 ++--
 arch/powerpc/mm/mmu_context_iommu.c|  5 +++--
 arch/powerpc/mm/subpage-prot.c | 13 +++--
 arch/powerpc/oprofile/cell/spu_task_sync.c |  7 ---
 arch/powerpc/platforms/cell/spufs/file.c   |  6 --
 arch/powerpc/platforms/powernv/npu-dma.c   |  2 +-
 12 files changed, 43 insertions(+), 30 deletions(-)

diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
index 22b01a3962f0..869632b601b8 100644
--- a/arch/powerpc/kernel/vdso.c
+++ b/arch/powerpc/kernel/vdso.c
@@ -155,6 +155,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, 
int uses_interp)
unsigned long vdso_pages;
unsigned long vdso_base;
int rc;
+   DEFINE_RANGE_LOCK_FULL(mmrange);
 
if (!vdso_ready)
return 0;
@@ -196,7 +197,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, 
int uses_interp)
 * and end up putting it elsewhere.
 * Add enough to the size so that the result can be aligned.
 */
-   if (down_write_killable(>mmap_sem))
+   if (mm_write_lock_killable(mm, ))
return -EINTR;
vdso_base = get_unmapped_area(NULL, vdso_base,
  (vdso_pages << PAGE_SHIFT) +
@@ -236,11 +237,11 @@ int arch_setup_additional_pages(struct linux_binprm 
*bprm, int uses_interp)
goto fail_mmapsem;
}
 
-   up_write(>mmap_sem);
+   mm_write_unlock(mm, );
return 0;
 
  fail_mmapsem:
-   up_write(>mmap_sem);
+   mm_write_unlock(mm, );
return rc;
 }
 
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c 
b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index b73dbc9e797d..c05a99209fc1 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -583,8 +583,10 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, 
struct kvm_vcpu *vcpu,
hva = gfn_to_hva_memslot(memslot, gfn);
npages = get_user_pages_fast(hva, 1, writing, pages);
if (npages < 1) {
+   DEFINE_RANGE_LOCK_FULL(mmrange);
+
/* Check if it's an I/O mapping */
-   down_read(>mm->mmap_sem);
+   mm_read_lock(current->mm, );
vma = find_vma(current->mm, hva);
if (vma && vma->vm_start <= hva && hva + psize <= vma->vm_end &&
(vma->vm_flags & VM_PFNMAP)) {
@@ -594,7 +596,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct 
kvm_vcpu *vcpu,
is_ci = pte_ci(__pte((pgprot_val(vma->vm_page_prot;
write_ok = vma->vm_flags & VM_WRITE;
}
-   up_read(>mm->mmap_sem);
+   mm_read_unlock(current->mm, );
if (!pfn)
goto out_put;
} else {
diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c 
b/arch/powerpc/kvm/book3s_64_mmu_radix.c
index 0c854816e653..9a4d1758b0db 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_radix.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c
@@ -397,8 +397,10 @@ int kvmppc_book3s_radix_page_fault(struct kvm_run *run, 
struct kvm_vcpu *vcpu,
level = 0;
npages = get_user_pages_fast(hva, 1, writing, pages);
if (npages < 1) {
+   DEFINE_RANGE_LOCK_FULL(mmrange);
+
/* Check if it's an I/O mapping */
-   down_read(>mm->mmap_sem);
+   mm_read_lock(current->mm, );
vma = find_vma(current->mm, hva);
if (vma && vma->vm_start <= hva && hva < vma->vm_end &&
(vma->vm_flags & VM_PFNMAP)) {
@@ -406,7 +408,7 @@ int kvmppc_book3s_radix_page_fault(struct kvm_run *run, 
struct kvm_vcpu *vcpu,
((hva - vma->vm_start) >> PAGE_SHIFT);
pgflags = pgprot_val(vma->vm_page_prot);
}
-   up_read(>mm->mmap_sem);
+   mm_read_unlock(current->mm, );
if (!pfn)
return -EFAULT;
} else {
diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c
index 4dffa611376d..5e6fe2820009 100644
--- a/arch/powerpc/kvm/book3s_64_vio.c
+++ b/arch/powerpc/kvm/book3s_64_vio.c
@@ -60,11 +60,12 @@ static unsigned long kvmppc_stt_pages(unsigned long 
tce_pages)
 static long kvmppc_account_memlimit(unsigned long stt_pages, bool inc)
 {
long