Same as kvm_riscv_gstage_wp_range, the possible valid pages should not be skipped if !found_leaf. Different from wp case, which can write-protect more than asked, unmap can't do that, no splitting is added right now but a warning is logged instead.
Signed-off-by: Wu Fei <[email protected]> --- arch/riscv/kvm/gstage.c | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/arch/riscv/kvm/gstage.c b/arch/riscv/kvm/gstage.c index 2b141a78ecac..d78f5aeb8b40 100644 --- a/arch/riscv/kvm/gstage.c +++ b/arch/riscv/kvm/gstage.c @@ -395,15 +395,19 @@ void kvm_riscv_gstage_unmap_range(struct kvm_gstage *gstage, if (ret) break; - if (!found_leaf) - goto next; - - if (!(addr & (page_size - 1)) && ((end - addr) >= page_size)) - kvm_riscv_gstage_op_pte(gstage, addr, ptep, - ptep_level, GSTAGE_OP_CLEAR); + if (!found_leaf) { + addr = ALIGN(addr + 1, page_size); + } else { + if (!(addr & (page_size - 1)) && ((end - addr) >= page_size)) + kvm_riscv_gstage_op_pte(gstage, addr, ptep, + ptep_level, GSTAGE_OP_CLEAR); + else { + WARN_ONCE(1, "Skip unmap range addr: %#llx, end: %#llx, page_size: %#lx\n", + addr, end, page_size); + } -next: - addr += page_size; + addr += page_size; + } /* * If the range is too large, release the kvm->mmu_lock -- 2.43.0

