On 05/12/2017 07:21, Wanpeng Li wrote:
> From: Wanpeng Li <[email protected]>
> 
> The below test case can cause infinite loop in kvm when ept=0.
> 
>     #include <unistd.h>
>     #include <sys/syscall.h>
>     #include <string.h>
>     #include <stdint.h>
>     #include <linux/kvm.h>
>     #include <fcntl.h>
>     #include <sys/ioctl.h>
>     
>     long r[5];
>     int main()
>     {
>       r[2] = open("/dev/kvm", O_RDONLY);
>       r[3] = ioctl(r[2], KVM_CREATE_VM, 0);
>       r[4] = ioctl(r[3], KVM_CREATE_VCPU, 7);
>       ioctl(r[4], KVM_RUN, 0);
>     }
> 
> It doesn't setup the memory regions, mmu_alloc_shadow/direct_roots() in 
> kvm return 1 when kvm fails to allocate root page table which can result 
> in beblow infinite loop:
> 
>     vcpu_run() {
>       for (;;) {
>               r = vcpu_enter_guest()::kvm_mmu_reload() returns 1 
>               if (r <= 0)
>                       break;
>               if (need_resched())
>                       cond_resched();
>       }
>     }
> 
> This patch fixes it by returning -ENOSPC when there is no available kvm mmu 
> page for root page table.
> 
> Cc: Paolo Bonzini <[email protected]>
> Cc: Radim Krčmář <[email protected]>
> Fixes: 26eeb53cf0f (KVM: MMU: Bail out immediately if there is no available 
> mmu page)
> Signed-off-by: Wanpeng Li <[email protected]>
> ---
>  arch/x86/kvm/mmu.c | 8 ++++----
>  1 file changed, 4 insertions(+), 4 deletions(-)
> 
> diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
> index c9aaa18..89da688 100644
> --- a/arch/x86/kvm/mmu.c
> +++ b/arch/x86/kvm/mmu.c
> @@ -3395,7 +3395,7 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
>               spin_lock(&vcpu->kvm->mmu_lock);
>               if(make_mmu_pages_available(vcpu) < 0) {
>                       spin_unlock(&vcpu->kvm->mmu_lock);
> -                     return 1;
> +                     return -ENOSPC;
>               }
>               sp = kvm_mmu_get_page(vcpu, 0, 0,
>                               vcpu->arch.mmu.shadow_root_level, 1, ACC_ALL);
> @@ -3410,7 +3410,7 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
>                       spin_lock(&vcpu->kvm->mmu_lock);
>                       if (make_mmu_pages_available(vcpu) < 0) {
>                               spin_unlock(&vcpu->kvm->mmu_lock);
> -                             return 1;
> +                             return -ENOSPC;
>                       }
>                       sp = kvm_mmu_get_page(vcpu, i << (30 - PAGE_SHIFT),
>                                       i << 30, PT32_ROOT_LEVEL, 1, ACC_ALL);
> @@ -3450,7 +3450,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
>               spin_lock(&vcpu->kvm->mmu_lock);
>               if (make_mmu_pages_available(vcpu) < 0) {
>                       spin_unlock(&vcpu->kvm->mmu_lock);
> -                     return 1;
> +                     return -ENOSPC;
>               }
>               sp = kvm_mmu_get_page(vcpu, root_gfn, 0,
>                               vcpu->arch.mmu.shadow_root_level, 0, ACC_ALL);
> @@ -3487,7 +3487,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
>               spin_lock(&vcpu->kvm->mmu_lock);
>               if (make_mmu_pages_available(vcpu) < 0) {
>                       spin_unlock(&vcpu->kvm->mmu_lock);
> -                     return 1;
> +                     return -ENOSPC;
>               }
>               sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30, PT32_ROOT_LEVEL,
>                                     0, ACC_ALL);
> 

Queued, thanks.

Reply via email to