On Fri, Sep 05, 2008 at 09:51:18AM +0200, Alexander Graf wrote:
> This patch implements the GIF flag and the clgi and stgi instructions that
> set this flag. Only if the flag is set (default), interrupts can be received 
> by
> the CPU.
> 
> To keep the information about that somewhere, this patch adds a new hidden
> flags vector. that is used to store information that does not go into the
> vmcb, but is SVM specific.
> 
> v2 moves the hflags to x86 generic code
> 
> Signed-off-by: Alexander Graf <[EMAIL PROTECTED]>
> ---
>  arch/x86/kvm/svm.c         |   50 +++++++++++++++++++++++++++++++++++++++++--
>  include/asm-x86/kvm_host.h |    3 ++
>  2 files changed, 50 insertions(+), 3 deletions(-)
> 
> diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
> index d9b18cc..a6d9a74 100644
> --- a/arch/x86/kvm/svm.c
> +++ b/arch/x86/kvm/svm.c
> @@ -602,6 +602,8 @@ static void init_vmcb(struct vcpu_svm *svm)
>               save->cr4 = 0;
>       }
>       force_new_asid(&svm->vcpu);
> +
> +     svm->vcpu.arch.hflags = HF_GIF_MASK;
>  }
>  
>  static int svm_vcpu_reset(struct kvm_vcpu *vcpu)
> @@ -1139,6 +1141,44 @@ static int vmmcall_interception(struct vcpu_svm *svm, 
> struct kvm_run *kvm_run)
>       return 1;
>  }
>  
> +static int stgi_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
> +{
> +     if (svm->vmcb->save.cpl) {
> +             printk(KERN_ERR "%s: invalid cpl 0x%x at ip 0x%lx\n",
> +                    __func__, svm->vmcb->save.cpl, kvm_rip_read(&svm->vcpu));
> +             kvm_queue_exception(&svm->vcpu, GP_VECTOR);
> +             return 1;
> +     }

You need to check EFER.SVME here.

> +
> +     svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
> +     skip_emulated_instruction(&svm->vcpu);
> +
> +     svm->vcpu.arch.hflags |= HF_GIF_MASK;
> +
> +     return 1;
> +}
> +
> +static int clgi_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
> +{
> +     if (svm->vmcb->save.cpl) {
> +             printk(KERN_ERR "%s: invalid cpl 0x%x at ip 0x%lx\n",
> +                    __func__, svm->vmcb->save.cpl, kvm_rip_read(&svm->vcpu));
> +             kvm_queue_exception(&svm->vcpu, GP_VECTOR);
> +             return 1;
> +     }

And here too. I suggest implementing a function which checks EFER.SVME
and CPL and call it in every emulated SVM instruction.

> +
> +     svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
> +     skip_emulated_instruction(&svm->vcpu);
> +
> +     svm->vcpu.arch.hflags &= ~HF_GIF_MASK;
> +
> +     /* After a CLGI no interrupts should come */
> +     svm_clear_vintr(svm);
> +     svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
> +
> +     return 1;
> +}
> +
>  static int invalid_op_interception(struct vcpu_svm *svm,
>                                  struct kvm_run *kvm_run)
>  {
> @@ -1433,8 +1473,8 @@ static int (*svm_exit_handlers[])(struct vcpu_svm *svm,
>       [SVM_EXIT_VMMCALL]                      = vmmcall_interception,
>       [SVM_EXIT_VMLOAD]                       = invalid_op_interception,
>       [SVM_EXIT_VMSAVE]                       = invalid_op_interception,
> -     [SVM_EXIT_STGI]                         = invalid_op_interception,
> -     [SVM_EXIT_CLGI]                         = invalid_op_interception,
> +     [SVM_EXIT_STGI]                         = stgi_interception,
> +     [SVM_EXIT_CLGI]                         = clgi_interception,
>       [SVM_EXIT_SKINIT]                       = invalid_op_interception,
>       [SVM_EXIT_WBINVD]                       = emulate_on_interception,
>       [SVM_EXIT_MONITOR]                      = invalid_op_interception,
> @@ -1581,6 +1621,9 @@ static void svm_intr_assist(struct kvm_vcpu *vcpu)
>       if (!kvm_cpu_has_interrupt(vcpu))
>               goto out;
>  
> +     if (!(svm->vcpu.arch.hflags & HF_GIF_MASK))
> +             goto out;
> +
>       if (!(vmcb->save.rflags & X86_EFLAGS_IF) ||
>           (vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) ||
>           (vmcb->control.event_inj & SVM_EVTINJ_VALID)) {
> @@ -1632,7 +1675,8 @@ static void do_interrupt_requests(struct kvm_vcpu *vcpu,
>  
>       svm->vcpu.arch.interrupt_window_open =
>               (!(control->int_state & SVM_INTERRUPT_SHADOW_MASK) &&
> -              (svm->vmcb->save.rflags & X86_EFLAGS_IF));
> +              (svm->vmcb->save.rflags & X86_EFLAGS_IF) &&
> +              (svm->vcpu.arch.hflags & HF_GIF_MASK));
>  
>       if (svm->vcpu.arch.interrupt_window_open && svm->vcpu.arch.irq_summary)
>               /*
> diff --git a/include/asm-x86/kvm_host.h b/include/asm-x86/kvm_host.h
> index 982b6b2..3e25004 100644
> --- a/include/asm-x86/kvm_host.h
> +++ b/include/asm-x86/kvm_host.h
> @@ -245,6 +245,7 @@ struct kvm_vcpu_arch {
>       unsigned long cr3;
>       unsigned long cr4;
>       unsigned long cr8;
> +     u32 hflags;
>       u64 pdptrs[4]; /* pae */
>       u64 shadow_efer;
>       u64 apic_base;
> @@ -734,6 +735,8 @@ enum {
>       TASK_SWITCH_GATE = 3,
>  };
>  
> +#define HF_GIF_MASK          (1 << 0)
> +
>  /*
>   * Hardware virtualization extension instructions may fault if a
>   * reboot turns off virtualization while processes are running.
> -- 
> 1.5.6
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to