Apart from the MSR, the smi field of struct kvm_vcpu_events has to be translated into the corresponding CPUX86State fields. Also, memory transaction flags depend on SMM state, so pull it from struct kvm_run on every exit from KVM to userspace.
Signed-off-by: Paolo Bonzini <pbonz...@redhat.com> --- linux-headers/asm-x86/kvm.h | 11 ++++++- linux-headers/linux/kvm.h | 5 +++- target-i386/cpu.h | 1 + target-i386/kvm.c | 73 ++++++++++++++++++++++++++++++++++++++++----- 4 files changed, 81 insertions(+), 9 deletions(-) diff --git a/linux-headers/asm-x86/kvm.h b/linux-headers/asm-x86/kvm.h index d7dcef5..cf84866 100644 --- a/linux-headers/asm-x86/kvm.h +++ b/linux-headers/asm-x86/kvm.h @@ -48,6 +48,8 @@ /* Architectural interrupt line count. */ #define KVM_NR_INTERRUPTS 256 +#define KVM_RUN_X86_SMM (1 << 0) + struct kvm_memory_alias { __u32 slot; /* this has a different namespace than memory slots */ __u32 flags; @@ -281,6 +283,7 @@ struct kvm_reinject_control { #define KVM_VCPUEVENT_VALID_NMI_PENDING 0x00000001 #define KVM_VCPUEVENT_VALID_SIPI_VECTOR 0x00000002 #define KVM_VCPUEVENT_VALID_SHADOW 0x00000004 +#define KVM_VCPUEVENT_VALID_SMM 0x00000008 /* Interrupt shadow states */ #define KVM_X86_SHADOW_INT_MOV_SS 0x01 @@ -309,7 +312,13 @@ struct kvm_vcpu_events { } nmi; __u32 sipi_vector; __u32 flags; - __u32 reserved[10]; + struct { + __u8 smm; + __u8 pending; + __u8 smm_inside_nmi; + __u8 pad; + } smi; + __u32 reserved[9]; }; /* for KVM_GET/SET_DEBUGREGS */ diff --git a/linux-headers/linux/kvm.h b/linux-headers/linux/kvm.h index b96d978..15beebb 100644 --- a/linux-headers/linux/kvm.h +++ b/linux-headers/linux/kvm.h @@ -202,7 +202,7 @@ struct kvm_run { __u32 exit_reason; __u8 ready_for_interrupt_injection; __u8 if_flag; - __u8 padding2[2]; + __u16 flags; /* in (pre_kvm_run), out (post_kvm_run) */ __u64 cr8; @@ -814,6 +814,7 @@ struct kvm_ppc_smmu_info { #define KVM_CAP_S390_INJECT_IRQ 113 #define KVM_CAP_S390_IRQ_STATE 114 #define KVM_CAP_PPC_HWRNG 115 +#define KVM_CAP_X86_SMM 116 #ifdef KVM_CAP_IRQ_ROUTING @@ -1199,6 +1200,8 @@ struct kvm_s390_ucas_mapping { /* Available with KVM_CAP_S390_IRQ_STATE */ #define KVM_S390_SET_IRQ_STATE _IOW(KVMIO, 0xb5, struct kvm_s390_irq_state) #define KVM_S390_GET_IRQ_STATE _IOW(KVMIO, 0xb6, struct kvm_s390_irq_state) +/* Available with KVM_CAP_X86_SMM */ +#define KVM_SMI _IO(KVMIO, 0xb7) #define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0) #define KVM_DEV_ASSIGN_PCI_2_3 (1 << 1) diff --git a/target-i386/cpu.h b/target-i386/cpu.h index df6e885..684e3a0 100644 --- a/target-i386/cpu.h +++ b/target-i386/cpu.h @@ -314,6 +314,7 @@ #define MSR_P6_PERFCTR0 0xc1 +#define MSR_IA32_SMBASE 0x9e #define MSR_MTRRcap 0xfe #define MSR_MTRRcap_VCNT 8 #define MSR_MTRRcap_FIXRANGE_SUPPORT (1 << 8) diff --git a/target-i386/kvm.c b/target-i386/kvm.c index 009bf74..09b4fc7 100644 --- a/target-i386/kvm.c +++ b/target-i386/kvm.c @@ -74,6 +74,7 @@ static bool has_msr_feature_control; static bool has_msr_async_pf_en; static bool has_msr_pv_eoi_en; static bool has_msr_misc_enable; +static bool has_msr_smbase; static bool has_msr_bndcfgs; static bool has_msr_kvm_steal_time; static int lm_capable_kernel; @@ -820,6 +821,10 @@ static int kvm_get_supported_msrs(KVMState *s) has_msr_tsc_deadline = true; continue; } + if (kvm_msr_list->indices[i] == MSR_IA32_SMBASE) { + has_msr_smbase = true; + continue; + } if (kvm_msr_list->indices[i] == MSR_IA32_MISC_ENABLE) { has_msr_misc_enable = true; continue; @@ -1246,6 +1251,9 @@ static int kvm_put_msrs(X86CPU *cpu, int level) kvm_msr_entry_set(&msrs[n++], MSR_IA32_MISC_ENABLE, env->msr_ia32_misc_enable); } + if (has_msr_smbase) { + kvm_msr_entry_set(&msrs[n++], MSR_IA32_SMBASE, env->smbase); + } if (has_msr_bndcfgs) { kvm_msr_entry_set(&msrs[n++], MSR_IA32_BNDCFGS, env->msr_bndcfgs); } @@ -1607,6 +1615,9 @@ static int kvm_get_msrs(X86CPU *cpu) if (has_msr_misc_enable) { msrs[n++].index = MSR_IA32_MISC_ENABLE; } + if (has_msr_smbase) { + msrs[n++].index = MSR_IA32_SMBASE; + } if (has_msr_feature_control) { msrs[n++].index = MSR_IA32_FEATURE_CONTROL; } @@ -1761,6 +1772,9 @@ static int kvm_get_msrs(X86CPU *cpu) case MSR_IA32_MISC_ENABLE: env->msr_ia32_misc_enable = msrs[i].data; break; + case MSR_IA32_SMBASE: + env->smbase = msrs[i].data; + break; case MSR_IA32_FEATURE_CONTROL: env->msr_ia32_feature_control = msrs[i].data; break; @@ -1948,6 +1962,16 @@ static int kvm_put_vcpu_events(X86CPU *cpu, int level) events.sipi_vector = env->sipi_vector; + if (has_msr_smbase) { + events.smi.smm = !!(env->hflags & HF_SMM_MASK); + /* smi.pending is stored as CPU_INTERRUPT_SMI in cpu->interrupt_request. + * It is transferred to the kernel before the next vmentry + */ + events.smi.pending = 0; + events.smi.smm_inside_nmi = !!(env->hflags2 & HF2_SMM_INSIDE_NMI_MASK); + events.flags |= KVM_VCPUEVENT_VALID_SMM; + } + events.flags = 0; if (level >= KVM_PUT_RESET_STATE) { events.flags |= @@ -1967,6 +1991,7 @@ static int kvm_get_vcpu_events(X86CPU *cpu) return 0; } + memset(&events, 0, sizeof(events)); ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_VCPU_EVENTS, &events); if (ret < 0) { return ret; @@ -1988,6 +2013,24 @@ static int kvm_get_vcpu_events(X86CPU *cpu) env->hflags2 &= ~HF2_NMI_MASK; } + if (events.flags & KVM_VCPUEVENT_VALID_SMM) { + if (events.smi.smm) { + env->hflags |= HF_SMM_MASK; + } else { + env->hflags &= ~HF_SMM_MASK; + } + if (events.smi.pending) { + cpu_interrupt(CPU(cpu), CPU_INTERRUPT_SMI); + } else { + cpu_reset_interrupt(CPU(cpu), CPU_INTERRUPT_SMI); + } + if (events.smi.smm_inside_nmi) { + env->hflags2 |= HF2_SMM_INSIDE_NMI_MASK; + } else { + env->hflags2 &= ~HF2_SMM_INSIDE_NMI_MASK; + } + } + env->sipi_vector = events.sipi_vector; return 0; @@ -2191,13 +2234,24 @@ void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run) int ret; /* Inject NMI */ - if (cpu->interrupt_request & CPU_INTERRUPT_NMI) { - cpu->interrupt_request &= ~CPU_INTERRUPT_NMI; - DPRINTF("injected NMI\n"); - ret = kvm_vcpu_ioctl(cpu, KVM_NMI); - if (ret < 0) { - fprintf(stderr, "KVM: injection failed, NMI lost (%s)\n", - strerror(-ret)); + if (cpu->interrupt_request & (CPU_INTERRUPT_NMI | CPU_INTERRUPT_SMI)) { + if (cpu->interrupt_request & CPU_INTERRUPT_NMI) { + cpu->interrupt_request &= ~CPU_INTERRUPT_NMI; + DPRINTF("injected NMI\n"); + ret = kvm_vcpu_ioctl(cpu, KVM_NMI); + if (ret < 0) { + fprintf(stderr, "KVM: injection failed, NMI lost (%s)\n", + strerror(-ret)); + } + } + if (cpu->interrupt_request & CPU_INTERRUPT_SMI) { + cpu->interrupt_request &= ~CPU_INTERRUPT_SMI; + DPRINTF("injected SMI\n"); + ret = kvm_vcpu_ioctl(cpu, KVM_SMI); + if (ret < 0) { + fprintf(stderr, "KVM: injection failed, SMI lost (%s)\n", + strerror(-ret)); + } } } @@ -2252,6 +2306,11 @@ MemTxAttrs kvm_arch_post_run(CPUState *cpu, struct kvm_run *run) X86CPU *x86_cpu = X86_CPU(cpu); CPUX86State *env = &x86_cpu->env; + if (run->flags & KVM_RUN_X86_SMM) { + env->hflags |= HF_SMM_MASK; + } else { + env->hflags &= HF_SMM_MASK; + } if (run->if_flag) { env->eflags |= IF_MASK; } else { -- 1.8.3.1