commit b5331379bc62611d1026173a09c73573384201d9 upstream.

When an MMU notifier call results in unmapping a range that spans multiple
PGDs, we end up calling into cond_resched_lock() when crossing a PGD boundary,
since this avoids running into RCU stalls during VM teardown. Unfortunately,
if the VM is destroyed as a result of OOM, then blocking is not permitted
and the call to the scheduler triggers the following BUG():

 | BUG: sleeping function called from invalid context at 
arch/arm64/kvm/mmu.c:394
 | in_atomic(): 1, irqs_disabled(): 0, non_block: 1, pid: 36, name: oom_reaper
 | INFO: lockdep is turned off.
 | CPU: 3 PID: 36 Comm: oom_reaper Not tainted 5.8.0 #1
 | Hardware name: QEMU QEMU Virtual Machine, BIOS 0.0.0 02/06/2015
 | Call trace:
 |  dump_backtrace+0x0/0x284
 |  show_stack+0x1c/0x28
 |  dump_stack+0xf0/0x1a4
 |  ___might_sleep+0x2bc/0x2cc
 |  unmap_stage2_range+0x160/0x1ac
 |  kvm_unmap_hva_range+0x1a0/0x1c8
 |  kvm_mmu_notifier_invalidate_range_start+0x8c/0xf8
 |  __mmu_notifier_invalidate_range_start+0x218/0x31c
 |  mmu_notifier_invalidate_range_start_nonblock+0x78/0xb0
 |  __oom_reap_task_mm+0x128/0x268
 |  oom_reap_task+0xac/0x298
 |  oom_reaper+0x178/0x17c
 |  kthread+0x1e4/0x1fc
 |  ret_from_fork+0x10/0x30

Use the new 'blockable' argument to kvm_unmap_hva_range() to ensure that
we only reschedule if blocking is permitted by the MMU notifier call.

Cc: <sta...@vger.kernel.org> # v4.19 only
Fixes: 8b3405e345b5 ("kvm: arm/arm64: Fix locking for kvm_free_stage2_pgd")
Cc: Marc Zyngier <m...@kernel.org>
Cc: Suzuki K Poulose <suzuki.poul...@arm.com>
Cc: James Morse <james.mo...@arm.com>
Signed-off-by: Will Deacon <w...@kernel.org>
Message-Id: <20200811102725.7121-3-w...@kernel.org>
Signed-off-by: Paolo Bonzini <pbonz...@redhat.com>
[will: Backport to 4.19; use 'blockable' instead of non-existent 
MMU_NOTIFIER_RANGE_BLOCKABLE flag]
Signed-off-by: Will Deacon <w...@kernel.org>
---
 virt/kvm/arm/mmu.c | 16 ++++++++++++----
 1 file changed, 12 insertions(+), 4 deletions(-)

diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c
index 3957ff0ecda5..41d6285c3da9 100644
--- a/virt/kvm/arm/mmu.c
+++ b/virt/kvm/arm/mmu.c
@@ -323,7 +323,8 @@ static void unmap_stage2_puds(struct kvm *kvm, pgd_t *pgd,
  * destroying the VM), otherwise another faulting VCPU may come in and mess
  * with things behind our backs.
  */
-static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
+static void __unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size,
+                                bool may_block)
 {
        pgd_t *pgd;
        phys_addr_t addr = start, end = start + size;
@@ -348,11 +349,16 @@ static void unmap_stage2_range(struct kvm *kvm, 
phys_addr_t start, u64 size)
                 * If the range is too large, release the kvm->mmu_lock
                 * to prevent starvation and lockup detector warnings.
                 */
-               if (next != end)
+               if (may_block && next != end)
                        cond_resched_lock(&kvm->mmu_lock);
        } while (pgd++, addr = next, addr != end);
 }
 
+static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
+{
+       __unmap_stage2_range(kvm, start, size, true);
+}
+
 static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd,
                              phys_addr_t addr, phys_addr_t end)
 {
@@ -1820,7 +1826,9 @@ static int handle_hva_to_gpa(struct kvm *kvm,
 
 static int kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void 
*data)
 {
-       unmap_stage2_range(kvm, gpa, size);
+       bool may_block = *(bool *)data;
+
+       __unmap_stage2_range(kvm, gpa, size, may_block);
        return 0;
 }
 
@@ -1831,7 +1839,7 @@ int kvm_unmap_hva_range(struct kvm *kvm,
                return 0;
 
        trace_kvm_unmap_hva_range(start, end);
-       handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL);
+       handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, &blockable);
        return 0;
 }
 
-- 
2.28.0.297.g1956fa8f8d-goog

Reply via email to