From: Tom Lendacky <[email protected]>

An SEV-ES vCPU requires additional VMCB vCPU load/put requirements. SEV-ES
hardware will restore certain registers on VMEXIT, but not save them on
VMRUM (see Table B-3 and Table B-4 of the AMD64 APM Volume 2), so make the
following changes:

General vCPU load changes:
  - During vCPU loading, perform a VMSAVE to the per-CPU SVM save area and
    save the current value of XCR0 to the per-CPU SVM save area as these
    registers will be restored on VMEXIT.

General vCPU put changes:
  - Do not attempt to restore registers that SEV-ES hardware has already
    restored on VMEXIT.

Signed-off-by: Tom Lendacky <[email protected]>
---
 arch/x86/kvm/svm/sev.c | 48 ++++++++++++++++++++++++++++++++++++++++++
 arch/x86/kvm/svm/svm.c | 36 +++++++++++++++++++------------
 arch/x86/kvm/svm/svm.h | 22 +++++++++++++------
 3 files changed, 87 insertions(+), 19 deletions(-)

diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index 7ed88f2e8d93..50018436863b 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -17,11 +17,14 @@
 #include <linux/trace_events.h>
 
 #include <asm/trapnr.h>
+#include <asm/fpu/internal.h>
 
 #include "x86.h"
 #include "svm.h"
 #include "trace.h"
 
+#define __ex(x) __kvm_handle_fault_on_reboot(x)
+
 static u8 sev_enc_bit;
 static int sev_flush_asids(void);
 static DECLARE_RWSEM(sev_deactivate_lock);
@@ -1599,3 +1602,48 @@ void sev_es_create_vcpu(struct vcpu_svm *svm)
                                            GHCB_VERSION_MIN,
                                            sev_enc_bit));
 }
+
+void sev_es_vcpu_load(struct vcpu_svm *svm, int cpu)
+{
+       struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
+       struct vmcb_save_area *hostsa;
+       unsigned int i;
+
+       /*
+        * As an SEV-ES guest, hardware will restore the host state on VMEXIT,
+        * of which one step is to perform a VMLOAD. Since hardware does not
+        * perform a VMSAVE on VMRUN, the host savearea must be updated.
+        */
+       asm volatile(__ex("vmsave") : : "a" (__sme_page_pa(sd->save_area)) : 
"memory");
+
+       /*
+        * Certain MSRs are restored on VMEXIT, only save ones that aren't
+        * saved via the vmsave above.
+        */
+       for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++) {
+               if (host_save_user_msrs[i].sev_es_restored)
+                       continue;
+
+               rdmsrl(host_save_user_msrs[i].index, svm->host_user_msrs[i]);
+       }
+
+       /* XCR0 is restored on VMEXIT, save the current host value */
+       hostsa = (struct vmcb_save_area *)(page_address(sd->save_area) + 0x400);
+       hostsa->xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
+}
+
+void sev_es_vcpu_put(struct vcpu_svm *svm)
+{
+       unsigned int i;
+
+       /*
+        * Certain MSRs are restored on VMEXIT and were saved with vmsave in
+        * sev_es_vcpu_load() above. Only restore ones that weren't.
+        */
+       for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++) {
+               if (host_save_user_msrs[i].sev_es_restored)
+                       continue;
+
+               wrmsrl(host_save_user_msrs[i].index, svm->host_user_msrs[i]);
+       }
+}
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index cb9b1d281adb..efefe8ba9759 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -1340,15 +1340,20 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int 
cpu)
                vmcb_mark_all_dirty(svm->vmcb);
        }
 
+       if (sev_es_guest(svm->vcpu.kvm)) {
+               sev_es_vcpu_load(svm, cpu);
+       } else {
 #ifdef CONFIG_X86_64
-       rdmsrl(MSR_GS_BASE, to_svm(vcpu)->host.gs_base);
+               rdmsrl(MSR_GS_BASE, to_svm(vcpu)->host.gs_base);
 #endif
-       savesegment(fs, svm->host.fs);
-       savesegment(gs, svm->host.gs);
-       svm->host.ldt = kvm_read_ldt();
+               savesegment(fs, svm->host.fs);
+               savesegment(gs, svm->host.gs);
+               svm->host.ldt = kvm_read_ldt();
 
-       for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
-               rdmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
+               for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
+                       rdmsrl(host_save_user_msrs[i].index,
+                              svm->host_user_msrs[i]);
+       }
 
        if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) {
                u64 tsc_ratio = vcpu->arch.tsc_scaling_ratio;
@@ -1376,19 +1381,24 @@ static void svm_vcpu_put(struct kvm_vcpu *vcpu)
        avic_vcpu_put(vcpu);
 
        ++vcpu->stat.host_state_reload;
-       kvm_load_ldt(svm->host.ldt);
+       if (sev_es_guest(svm->vcpu.kvm)) {
+               sev_es_vcpu_put(svm);
+       } else {
+               kvm_load_ldt(svm->host.ldt);
 #ifdef CONFIG_X86_64
-       loadsegment(fs, svm->host.fs);
-       wrmsrl(MSR_KERNEL_GS_BASE, current->thread.gsbase);
-       load_gs_index(svm->host.gs);
+               loadsegment(fs, svm->host.fs);
+               wrmsrl(MSR_KERNEL_GS_BASE, current->thread.gsbase);
+               load_gs_index(svm->host.gs);
 #else
 #ifdef CONFIG_X86_32_LAZY_GS
-       loadsegment(gs, svm->host.gs);
+               loadsegment(gs, svm->host.gs);
 #endif
 #endif
 
-       for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
-               wrmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
+               for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
+                       wrmsrl(host_save_user_msrs[i].index,
+                              svm->host_user_msrs[i]);
+       }
 }
 
 static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index 465e14a7146f..0812d70085d7 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -23,15 +23,23 @@
 
 #define __sme_page_pa(x) __sme_set(page_to_pfn(x) << PAGE_SHIFT)
 
-static const u32 host_save_user_msrs[] = {
+static const struct svm_host_save_msrs {
+       u32 index;              /* Index of the MSR */
+       bool sev_es_restored;   /* True if MSR is restored on SEV-ES VMEXIT */
+} host_save_user_msrs[] = {
 #ifdef CONFIG_X86_64
-       MSR_STAR, MSR_LSTAR, MSR_CSTAR, MSR_SYSCALL_MASK, MSR_KERNEL_GS_BASE,
-       MSR_FS_BASE,
+       { .index = MSR_STAR,                    .sev_es_restored = true },
+       { .index = MSR_LSTAR,                   .sev_es_restored = true },
+       { .index = MSR_CSTAR,                   .sev_es_restored = true },
+       { .index = MSR_SYSCALL_MASK,            .sev_es_restored = true },
+       { .index = MSR_KERNEL_GS_BASE,          .sev_es_restored = true },
+       { .index = MSR_FS_BASE,                 .sev_es_restored = true },
 #endif
-       MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
-       MSR_TSC_AUX,
+       { .index = MSR_IA32_SYSENTER_CS,        .sev_es_restored = true },
+       { .index = MSR_IA32_SYSENTER_ESP,       .sev_es_restored = true },
+       { .index = MSR_IA32_SYSENTER_EIP,       .sev_es_restored = true },
+       { .index = MSR_TSC_AUX,                 .sev_es_restored = false },
 };
-
 #define NR_HOST_SAVE_USER_MSRS ARRAY_SIZE(host_save_user_msrs)
 
 #define MSRPM_OFFSETS  16
@@ -573,6 +581,8 @@ int sev_es_string_io(struct vcpu_svm *svm, int size, 
unsigned int port, int in);
 void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector);
 void sev_es_init_vmcb(struct vcpu_svm *svm);
 void sev_es_create_vcpu(struct vcpu_svm *svm);
+void sev_es_vcpu_load(struct vcpu_svm *svm, int cpu);
+void sev_es_vcpu_put(struct vcpu_svm *svm);
 
 /* VMSA Accessor functions */
 
-- 
2.28.0

Reply via email to