On Mon, Mar 04, 2013 at 08:40:29PM +0100, Jan Kiszka wrote:
> The logic for calculating the value with which we call kvm_set_cr0/4 was
> broken (will definitely be visible with nested unrestricted guest mode
> support). Also, we performed the check regarding CR0_ALWAYSON too early
> when in guest mode.
> 
> What really needs to be done on both CR0 and CR4 is to mask out L1-owned
> bits and merge them in from GUEST_CR0/4. In contrast, arch.cr0/4 and
> arch.cr0/4_guest_owned_bits contain the mangled L0+L1 state and, thus,
> are not suited as input.
> 
> For both CRs, we can then apply the check against VMXON_CRx_ALWAYSON and
> refuse the update if it fails. To be fully consistent, we implement this
> check now also for CR4.
> 
> Finally, we have to set the shadow to the value L2 wanted to write
> originally.
> 
> Signed-off-by: Jan Kiszka <[email protected]>
> ---
> 
> Changes in v2:
>  - keep the non-misleading part of the comment in handle_set_cr0
> 
>  arch/x86/kvm/vmx.c |   46 +++++++++++++++++++++++++++++++---------------
>  1 files changed, 31 insertions(+), 15 deletions(-)
> 
> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
> index 7cc566b..832b7b4 100644
> --- a/arch/x86/kvm/vmx.c
> +++ b/arch/x86/kvm/vmx.c
> @@ -4605,37 +4605,53 @@ vmx_patch_hypercall(struct kvm_vcpu *vcpu, unsigned 
> char *hypercall)
>  /* called to set cr0 as appropriate for a mov-to-cr0 exit. */
>  static int handle_set_cr0(struct kvm_vcpu *vcpu, unsigned long val)
>  {
> -     if (to_vmx(vcpu)->nested.vmxon &&
> -         ((val & VMXON_CR0_ALWAYSON) != VMXON_CR0_ALWAYSON))
> -             return 1;
> -
>       if (is_guest_mode(vcpu)) {
> +             struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
> +             unsigned long orig_val = val;
> +
>               /*
>                * We get here when L2 changed cr0 in a way that did not change
>                * any of L1's shadowed bits (see nested_vmx_exit_handled_cr),
> -              * but did change L0 shadowed bits. This can currently happen
> -              * with the TS bit: L0 may want to leave TS on (for lazy fpu
> -              * loading) while pretending to allow the guest to change it.
> +              * but did change L0 shadowed bits.
>                */
> -             if (kvm_set_cr0(vcpu, (val & vcpu->arch.cr0_guest_owned_bits) |
> -                      (vcpu->arch.cr0 & ~vcpu->arch.cr0_guest_owned_bits)))
> +             val = (val & ~vmcs12->cr0_guest_host_mask) |
> +                     (vmcs_read64(GUEST_CR0) & vmcs12->cr0_guest_host_mask);
> +             if ((val & VMXON_CR0_ALWAYSON) != VMXON_CR0_ALWAYSON)
>                       return 1;
The more I look at it the more it looks correct to me. I will continue
looking, but I think we can move VMXON_CR0_ALWAYSON check to
vmx_set_cr0(). Same for cr4 case.

> -             vmcs_writel(CR0_READ_SHADOW, val);
> +
> +             if (kvm_set_cr0(vcpu, val))
> +                     return 1;
> +             vmcs_writel(CR0_READ_SHADOW, orig_val);
>               return 0;
> -     } else
> +     } else {
> +             if (to_vmx(vcpu)->nested.vmxon &&
> +                 ((val & VMXON_CR0_ALWAYSON) != VMXON_CR0_ALWAYSON))
> +                     return 1;
>               return kvm_set_cr0(vcpu, val);
> +     }
>  }
>  
>  static int handle_set_cr4(struct kvm_vcpu *vcpu, unsigned long val)
>  {
>       if (is_guest_mode(vcpu)) {
> -             if (kvm_set_cr4(vcpu, (val & vcpu->arch.cr4_guest_owned_bits) |
> -                      (vcpu->arch.cr4 & ~vcpu->arch.cr4_guest_owned_bits)))
> +             struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
> +             unsigned long orig_val = val;
> +
> +             val = (val & ~vmcs12->cr4_guest_host_mask) |
> +                     (vmcs_readl(GUEST_CR4) & vmcs12->cr4_guest_host_mask);
> +             if ((val & VMXON_CR4_ALWAYSON) != VMXON_CR4_ALWAYSON)
> +                     return 1;
> +
> +             if (kvm_set_cr4(vcpu, val))
>                       return 1;
> -             vmcs_writel(CR4_READ_SHADOW, val);
> +             vmcs_writel(CR4_READ_SHADOW, orig_val);
>               return 0;
> -     } else
> +     } else {
> +             if (to_vmx(vcpu)->nested.vmxon &&
> +                 ((val & VMXON_CR4_ALWAYSON) != VMXON_CR4_ALWAYSON))
> +                     return 1;
>               return kvm_set_cr4(vcpu, val);
> +     }
>  }
>  
>  /* called to set cr0 as approriate for clts instruction exit. */
> -- 
> 1.7.3.4
> 



--
                        Gleb.
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to [email protected]
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to