From: Prasad Joshi <[email protected]>

Write only SVM_KEY can be used to create a password protected mechanism
to clear VM_CR.LOCK.

This key can only be set when VM_CR.LOCK is unset. Once this key is set,
a write to it compares the written value with already set key. The
VM_CR.LOCK is unset only if the key matches. All reads from this MSR
must return 0.

Signed-off-by: Prasad Joshi <[email protected]>
---
 arch/x86/include/uapi/asm/msr-index.h |    1 +
 arch/x86/kvm/svm.c                    |   31 +++++++++++++++++++++++++++++++
 2 files changed, 32 insertions(+)

diff --git a/arch/x86/include/uapi/asm/msr-index.h 
b/arch/x86/include/uapi/asm/msr-index.h
index 892ce40..eda8346 100644
--- a/arch/x86/include/uapi/asm/msr-index.h
+++ b/arch/x86/include/uapi/asm/msr-index.h
@@ -170,6 +170,7 @@
 
 #define MSR_AMD64_PATCH_LEVEL          0x0000008b
 #define MSR_AMD64_TSC_RATIO            0xc0000104
+#define MSR_AMD64_SVM_KEY              0xc0010118
 #define MSR_AMD64_NB_CFG               0xc001001f
 #define MSR_AMD64_PATCH_LOADER         0xc0010020
 #define MSR_AMD64_OSVW_ID_LENGTH       0xc0010140
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index fcdfdea..73b2dc2 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -150,6 +150,8 @@ struct vcpu_svm {
 
        struct nested_state nested;
 
+       u64 svm_key; /* Write only SVM Key */
+
        bool nmi_singlestep;
 
        unsigned int3_injected;
@@ -3079,6 +3081,13 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned 
ecx, u64 *data)
        case MSR_VM_CR:
                *data = svm->nested.vm_cr_msr;
                break;
+       case MSR_AMD64_SVM_KEY:
+               /*
+                * SVM Key is password protected mechanism to manage
+                * VM_CR.LOCK and therefore reading it must always return 0.
+                */
+               *data = 0;
+               break;
        case MSR_IA32_UCODE_REV:
                *data = 0x01000065;
                break;
@@ -3132,6 +3141,26 @@ static int svm_set_vm_cr(struct kvm_vcpu *vcpu, u64 data)
        return 0;
 }
 
+static int svm_set_svm_key(struct kvm_vcpu *vcpu, u64 data)
+{
+       struct vcpu_svm *svm = to_svm(vcpu);
+
+       if (!boot_cpu_has(X86_FEATURE_SVML))
+               return 1;
+
+       if (!(svm->nested.vm_cr_msr & SVM_VM_CR_SVM_LOCK_MASK) && data) {
+               /* vm_cr.lock not set: set the password key */
+               svm->svm_key = data;
+       } else {
+               /* vm_cr.lock is set */
+               if (data && svm->svm_key == data) {
+                       /* password key matched: reset the lock */
+                       svm->nested.vm_cr_msr &= ~SVM_VM_CR_SVM_LOCK_MASK;
+               }
+       }
+       return 0;
+}
+
 static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
@@ -3191,6 +3220,8 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct 
msr_data *msr)
                break;
        case MSR_VM_CR:
                return svm_set_vm_cr(vcpu, data);
+        case MSR_AMD64_SVM_KEY:
+               return svm_set_svm_key(vcpu, data);
        case MSR_VM_IGNNE:
                vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", 
ecx, data);
                break;
-- 
1.7.10.4

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to [email protected]
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to