Current AMD CPUs support masking of CPUID bits. Using this functionality,
a VMM can limit what features are exposed to the guest, even if it's not
using SVM/VMX.

While I'm not aware of any open source hypervisor that uses these MSRs
atm, VMware ESX does and patches exist for Xen, where trapping CPUID is
non-trivial.

This patch implements emulation for this masking, which is pretty trivial
because we're intercepting CPUID anyways.

Because it's so simple and can be pretty effective, I put it into the
generic code paths, so VMX benefits from it as well.

Signed-off-by: Alexander Graf <[email protected]>

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 863ea73..e2f0dde 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -370,6 +370,9 @@ struct kvm_vcpu_arch {
        unsigned long dr6;
        unsigned long dr7;
        unsigned long eff_db[KVM_NR_DB_REGS];
+
+       u64 cpuid_mask;
+       u64 cpuid_mask_ext;
 };
 
 struct kvm_mem_alias {
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 1890032..03b53ba 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -337,5 +337,7 @@
 
 #define MSR_VM_CR                       0xc0010114
 #define MSR_VM_HSAVE_PA                 0xc0010117
+#define MSR_VM_MASK_CPUID               0xc0011004
+#define MSR_VM_MASK_CPUID_EXT           0xc0011005
 
 #endif /* _ASM_X86_MSR_INDEX_H */
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 18bba94..83b4877 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -782,6 +784,12 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 
data)
                kvm_write_guest_time(vcpu);
                break;
        }
+       case MSR_VM_MASK_CPUID:
+               vcpu->arch.cpuid_mask = data;
+               break;
+       case MSR_VM_MASK_CPUID_EXT:
+               vcpu->arch.cpuid_mask_ext = data;
+               break;
        default:
                pr_unimpl(vcpu, "unhandled wrmsr: 0x%x data %llx\n", msr, data);
                return 1;
@@ -896,6 +904,12 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 
*pdata)
        case MSR_KVM_SYSTEM_TIME:
                data = vcpu->arch.time;
                break;
+       case MSR_VM_MASK_CPUID:
+               data = vcpu->arch.cpuid_mask;
+               break;
+       case MSR_VM_MASK_CPUID_EXT:
+               data = vcpu->arch.cpuid_mask_ext;
+               break;
        default:
                pr_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr);
                return 1;
@@ -2901,10 +2915,19 @@ void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
        kvm_register_write(vcpu, VCPU_REGS_RDX, 0);
        best = kvm_find_cpuid_entry(vcpu, function, index);
        if (best) {
+               u32 ecx = best->ecx;
+               u32 edx = best->edx;
                kvm_register_write(vcpu, VCPU_REGS_RAX, best->eax);
                kvm_register_write(vcpu, VCPU_REGS_RBX, best->ebx);
-               kvm_register_write(vcpu, VCPU_REGS_RCX, best->ecx);
-               kvm_register_write(vcpu, VCPU_REGS_RDX, best->edx);
+               if ( function == 1 ) {
+                       ecx &= (u32)vcpu->arch.cpuid_mask;
+                       edx &= (u32)(vcpu->arch.cpuid_mask >> 32);
+               } else if ( function == 0x80000001 ) {
+                       ecx &= (u32)vcpu->arch.cpuid_mask_ext;
+                       edx &= (u32)(vcpu->arch.cpuid_mask_ext >> 32);
+               }
+               kvm_register_write(vcpu, VCPU_REGS_RCX, ecx);
+               kvm_register_write(vcpu, VCPU_REGS_RDX, edx);
        }
        kvm_x86_ops->skip_emulated_instruction(vcpu);
        KVMTRACE_5D(CPUID, vcpu, function,
@@ -4089,6 +4112,8 @@ int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu)
        memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db));
        vcpu->arch.dr6 = DR6_FIXED_1;
        vcpu->arch.dr7 = DR7_FIXED_1;
+       vcpu->arch.cpuid_mask = 0xffffffffffffffff;
+       vcpu->arch.cpuid_mask_ext = 0xffffffffffffffff;
 
        return kvm_x86_ops->vcpu_reset(vcpu);
 }
-- 
1.5.6

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to [email protected]
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to