[PATCH 32/64] arch/s390: use mm locking wrappers
From: Davidlohr BuesoThis becomes quite straightforward with the mmrange in place. Signed-off-by: Davidlohr Bueso --- arch/s390/kernel/vdso.c | 5 +++-- arch/s390/kvm/gaccess.c | 4 ++-- arch/s390/kvm/kvm-s390.c | 24 ++-- arch/s390/kvm/priv.c | 29 + arch/s390/mm/fault.c | 6 +++--- arch/s390/mm/gmap.c | 45 - arch/s390/pci/pci_mmio.c | 5 +++-- 7 files changed, 66 insertions(+), 52 deletions(-) diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c index f3a1c7c6824e..0395c6b906fd 100644 --- a/arch/s390/kernel/vdso.c +++ b/arch/s390/kernel/vdso.c @@ -213,6 +213,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) unsigned long vdso_pages; unsigned long vdso_base; int rc; + DEFINE_RANGE_LOCK_FULL(mmrange); if (!vdso_enabled) return 0; @@ -239,7 +240,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) * it at vdso_base which is the "natural" base for it, but we might * fail and end up putting it elsewhere. */ - if (down_write_killable(>mmap_sem)) + if (mm_write_lock_killable(mm, )) return -EINTR; vdso_base = get_unmapped_area(NULL, 0, vdso_pages << PAGE_SHIFT, 0, 0); if (IS_ERR_VALUE(vdso_base)) { @@ -270,7 +271,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) rc = 0; out_up: - up_write(>mmap_sem); + mm_write_unlock(mm, ); return rc; } diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c index ff739b86df36..28c2c14319c8 100644 --- a/arch/s390/kvm/gaccess.c +++ b/arch/s390/kvm/gaccess.c @@ -1179,7 +1179,7 @@ int kvm_s390_shadow_fault(struct kvm_vcpu *vcpu, struct gmap *sg, int rc; DEFINE_RANGE_LOCK_FULL(mmrange); - down_read(>mm->mmap_sem); + mm_read_lock(sg->mm, ); /* * We don't want any guest-2 tables to change - so the parent * tables/pointers we read stay valid - unshadowing is however @@ -1209,6 +1209,6 @@ int kvm_s390_shadow_fault(struct kvm_vcpu *vcpu, struct gmap *sg, if (!rc) rc = gmap_shadow_page(sg, saddr, __pte(pte.val), ); ipte_unlock(vcpu); - up_read(>mm->mmap_sem); + mm_read_unlock(sg->mm, ); return rc; } diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index ba4c7092335a..942aeb6cbf1c 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -1420,6 +1420,7 @@ static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args) { uint8_t *keys; uint64_t hva; + DEFINE_RANGE_LOCK_FULL(mmrange); int srcu_idx, i, r = 0; if (args->flags != 0) @@ -1437,7 +1438,7 @@ static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args) if (!keys) return -ENOMEM; - down_read(>mm->mmap_sem); + mm_read_lock(current->mm, ); srcu_idx = srcu_read_lock(>srcu); for (i = 0; i < args->count; i++) { hva = gfn_to_hva(kvm, args->start_gfn + i); @@ -1451,7 +1452,7 @@ static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args) break; } srcu_read_unlock(>srcu, srcu_idx); - up_read(>mm->mmap_sem); + mm_read_unlock(current->mm, ); if (!r) { r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys, @@ -1468,6 +1469,7 @@ static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args) { uint8_t *keys; uint64_t hva; + DEFINE_RANGE_LOCK_FULL(mmrange); int srcu_idx, i, r = 0; if (args->flags != 0) @@ -1493,7 +1495,7 @@ static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args) if (r) goto out; - down_read(>mm->mmap_sem); + mm_read_lock(current->mm, ); srcu_idx = srcu_read_lock(>srcu); for (i = 0; i < args->count; i++) { hva = gfn_to_hva(kvm, args->start_gfn + i); @@ -1513,7 +1515,7 @@ static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args) break; } srcu_read_unlock(>srcu, srcu_idx); - up_read(>mm->mmap_sem); + mm_read_unlock(current->mm, ); out: kvfree(keys); return r; @@ -1543,6 +1545,7 @@ static int kvm_s390_get_cmma_bits(struct kvm *kvm, unsigned long bufsize, hva, pgstev, i, next, cur; int srcu_idx, peek, r = 0, rr; u8 *res; + DEFINE_RANGE_LOCK_FULL(mmrange); cur = args->start_gfn; i = next = pgstev = 0; @@ -1586,7 +1589,7 @@ static int kvm_s390_get_cmma_bits(struct kvm *kvm, args->start_gfn = cur; - down_read(>mm->mmap_sem); +
[PATCH 32/64] arch/s390: use mm locking wrappers
From: Davidlohr Bueso This becomes quite straightforward with the mmrange in place. Signed-off-by: Davidlohr Bueso --- arch/s390/kernel/vdso.c | 5 +++-- arch/s390/kvm/gaccess.c | 4 ++-- arch/s390/kvm/kvm-s390.c | 24 ++-- arch/s390/kvm/priv.c | 29 + arch/s390/mm/fault.c | 6 +++--- arch/s390/mm/gmap.c | 45 - arch/s390/pci/pci_mmio.c | 5 +++-- 7 files changed, 66 insertions(+), 52 deletions(-) diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c index f3a1c7c6824e..0395c6b906fd 100644 --- a/arch/s390/kernel/vdso.c +++ b/arch/s390/kernel/vdso.c @@ -213,6 +213,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) unsigned long vdso_pages; unsigned long vdso_base; int rc; + DEFINE_RANGE_LOCK_FULL(mmrange); if (!vdso_enabled) return 0; @@ -239,7 +240,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) * it at vdso_base which is the "natural" base for it, but we might * fail and end up putting it elsewhere. */ - if (down_write_killable(>mmap_sem)) + if (mm_write_lock_killable(mm, )) return -EINTR; vdso_base = get_unmapped_area(NULL, 0, vdso_pages << PAGE_SHIFT, 0, 0); if (IS_ERR_VALUE(vdso_base)) { @@ -270,7 +271,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) rc = 0; out_up: - up_write(>mmap_sem); + mm_write_unlock(mm, ); return rc; } diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c index ff739b86df36..28c2c14319c8 100644 --- a/arch/s390/kvm/gaccess.c +++ b/arch/s390/kvm/gaccess.c @@ -1179,7 +1179,7 @@ int kvm_s390_shadow_fault(struct kvm_vcpu *vcpu, struct gmap *sg, int rc; DEFINE_RANGE_LOCK_FULL(mmrange); - down_read(>mm->mmap_sem); + mm_read_lock(sg->mm, ); /* * We don't want any guest-2 tables to change - so the parent * tables/pointers we read stay valid - unshadowing is however @@ -1209,6 +1209,6 @@ int kvm_s390_shadow_fault(struct kvm_vcpu *vcpu, struct gmap *sg, if (!rc) rc = gmap_shadow_page(sg, saddr, __pte(pte.val), ); ipte_unlock(vcpu); - up_read(>mm->mmap_sem); + mm_read_unlock(sg->mm, ); return rc; } diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index ba4c7092335a..942aeb6cbf1c 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -1420,6 +1420,7 @@ static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args) { uint8_t *keys; uint64_t hva; + DEFINE_RANGE_LOCK_FULL(mmrange); int srcu_idx, i, r = 0; if (args->flags != 0) @@ -1437,7 +1438,7 @@ static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args) if (!keys) return -ENOMEM; - down_read(>mm->mmap_sem); + mm_read_lock(current->mm, ); srcu_idx = srcu_read_lock(>srcu); for (i = 0; i < args->count; i++) { hva = gfn_to_hva(kvm, args->start_gfn + i); @@ -1451,7 +1452,7 @@ static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args) break; } srcu_read_unlock(>srcu, srcu_idx); - up_read(>mm->mmap_sem); + mm_read_unlock(current->mm, ); if (!r) { r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys, @@ -1468,6 +1469,7 @@ static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args) { uint8_t *keys; uint64_t hva; + DEFINE_RANGE_LOCK_FULL(mmrange); int srcu_idx, i, r = 0; if (args->flags != 0) @@ -1493,7 +1495,7 @@ static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args) if (r) goto out; - down_read(>mm->mmap_sem); + mm_read_lock(current->mm, ); srcu_idx = srcu_read_lock(>srcu); for (i = 0; i < args->count; i++) { hva = gfn_to_hva(kvm, args->start_gfn + i); @@ -1513,7 +1515,7 @@ static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args) break; } srcu_read_unlock(>srcu, srcu_idx); - up_read(>mm->mmap_sem); + mm_read_unlock(current->mm, ); out: kvfree(keys); return r; @@ -1543,6 +1545,7 @@ static int kvm_s390_get_cmma_bits(struct kvm *kvm, unsigned long bufsize, hva, pgstev, i, next, cur; int srcu_idx, peek, r = 0, rr; u8 *res; + DEFINE_RANGE_LOCK_FULL(mmrange); cur = args->start_gfn; i = next = pgstev = 0; @@ -1586,7 +1589,7 @@ static int kvm_s390_get_cmma_bits(struct kvm *kvm, args->start_gfn = cur; - down_read(>mm->mmap_sem); + mm_read_lock(kvm->mm, ); srcu_idx =