The branch stable/13 has been updated by jhb:

URL: 
https://cgit.FreeBSD.org/src/commit/?id=5cf42db0f6b4d2fb4c581934b9eee643b6a5b698

commit 5cf42db0f6b4d2fb4c581934b9eee643b6a5b698
Author:     John Baldwin <[email protected]>
AuthorDate: 2022-11-18 17:57:48 +0000
Commit:     John Baldwin <[email protected]>
CommitDate: 2023-01-26 21:43:44 +0000

    vmm svm: Refactor per-vCPU data.
    
    - Allocate VMCBs separately to avoid excessive padding in struct
      svm_vcpu.
    
    - Allocate APIC pages dynamically directly in struct vlapic.
    
    - Move vm_mtrr into struct svm_vcpu rather than using a separate
      parallel array.
    
    Reviewed by:    corvink, markj
    Differential Revision:  https://reviews.freebsd.org/D37148
    
    (cherry picked from commit 215d2fd53f6c254cb900e1775abae86d3fdada65)
---
 sys/amd64/vmm/amd/svm.c       | 17 +++++++++++++----
 sys/amd64/vmm/amd/svm_msr.c   |  4 ++--
 sys/amd64/vmm/amd/svm_softc.h | 19 ++++++-------------
 3 files changed, 21 insertions(+), 19 deletions(-)

diff --git a/sys/amd64/vmm/amd/svm.c b/sys/amd64/vmm/amd/svm.c
index 1a7990383b09..0db1e45594fa 100644
--- a/sys/amd64/vmm/amd/svm.c
+++ b/sys/amd64/vmm/amd/svm.c
@@ -565,8 +565,6 @@ svm_init(struct vm *vm, pmap_t pmap)
        uint16_t maxcpus;
 
        svm_sc = malloc(sizeof (*svm_sc), M_SVM, M_WAITOK | M_ZERO);
-       if (((uintptr_t)svm_sc & PAGE_MASK) != 0)
-               panic("malloc of svm_softc not aligned on page boundary");
 
        svm_sc->msr_bitmap = contigmalloc(SVM_MSR_BITMAP_SIZE, M_SVM,
            M_WAITOK, 0, ~(vm_paddr_t)0, PAGE_SIZE, 0);
@@ -619,9 +617,11 @@ svm_init(struct vm *vm, pmap_t pmap)
        maxcpus = vm_get_maxcpus(svm_sc->vm);
        for (i = 0; i < maxcpus; i++) {
                vcpu = svm_get_vcpu(svm_sc, i);
+               vcpu->vmcb = malloc_aligned(sizeof(struct vmcb), PAGE_SIZE,
+                   M_SVM, M_WAITOK | M_ZERO);
                vcpu->nextrip = ~0;
                vcpu->lastcpu = NOCPU;
-               vcpu->vmcb_pa = vtophys(&vcpu->vmcb);
+               vcpu->vmcb_pa = vtophys(vcpu->vmcb);
                vmcb_init(svm_sc, i, iopm_pa, msrpm_pa, pml4_pa);
                svm_msr_guest_init(svm_sc, i);
        }
@@ -2149,7 +2149,14 @@ static void
 svm_cleanup(void *arg)
 {
        struct svm_softc *sc = arg;
+       struct svm_vcpu *vcpu;
+       uint16_t i, maxcpus;
 
+       maxcpus = vm_get_maxcpus(sc->vm);
+       for (i = 0; i < maxcpus; i++) {
+               vcpu = svm_get_vcpu(sc, i);
+               free(vcpu->vmcb, M_SVM);
+       }
        contigfree(sc->iopm_bitmap, SVM_IO_BITMAP_SIZE, M_SVM);
        contigfree(sc->msr_bitmap, SVM_MSR_BITMAP_SIZE, M_SVM);
        free(sc, M_SVM);
@@ -2400,7 +2407,8 @@ svm_vlapic_init(void *arg, int vcpuid)
        vlapic = malloc(sizeof(struct vlapic), M_SVM_VLAPIC, M_WAITOK | M_ZERO);
        vlapic->vm = svm_sc->vm;
        vlapic->vcpuid = vcpuid;
-       vlapic->apic_page = (struct LAPIC *)&svm_sc->apic_page[vcpuid];
+       vlapic->apic_page = malloc_aligned(PAGE_SIZE, PAGE_SIZE, M_SVM_VLAPIC,
+           M_WAITOK | M_ZERO);
 
        vlapic_init(vlapic);
 
@@ -2412,6 +2420,7 @@ svm_vlapic_cleanup(void *arg, struct vlapic *vlapic)
 {
 
         vlapic_cleanup(vlapic);
+       free(vlapic->apic_page, M_SVM_VLAPIC);
         free(vlapic, M_SVM_VLAPIC);
 }
 
diff --git a/sys/amd64/vmm/amd/svm_msr.c b/sys/amd64/vmm/amd/svm_msr.c
index 1a22f16cf48e..f0cea633a0cf 100644
--- a/sys/amd64/vmm/amd/svm_msr.c
+++ b/sys/amd64/vmm/amd/svm_msr.c
@@ -124,7 +124,7 @@ svm_rdmsr(struct svm_softc *sc, int vcpu, u_int num, 
uint64_t *result,
        case MSR_MTRR16kBase ... MSR_MTRR16kBase + 1:
        case MSR_MTRR64kBase:
        case MSR_MTRRVarBase ... MSR_MTRRVarBase + (VMM_MTRR_VAR_MAX * 2) - 1:
-               if (vm_rdmtrr(&sc->mtrr[vcpu], num, result) != 0) {
+               if (vm_rdmtrr(&sc->vcpu[vcpu].mtrr, num, result) != 0) {
                        vm_inject_gp(sc->vm, vcpu);
                }
                break;
@@ -156,7 +156,7 @@ svm_wrmsr(struct svm_softc *sc, int vcpu, u_int num, 
uint64_t val, bool *retu)
        case MSR_MTRR16kBase ... MSR_MTRR16kBase + 1:
        case MSR_MTRR64kBase:
        case MSR_MTRRVarBase ... MSR_MTRRVarBase + (VMM_MTRR_VAR_MAX * 2) - 1:
-               if (vm_wrmtrr(&sc->mtrr[vcpu], num, val) != 0) {
+               if (vm_wrmtrr(&sc->vcpu[vcpu].mtrr, num, val) != 0) {
                        vm_inject_gp(sc->vm, vcpu);
                }
                break;
diff --git a/sys/amd64/vmm/amd/svm_softc.h b/sys/amd64/vmm/amd/svm_softc.h
index 5f6a267617d2..b9e53ac9d4a0 100644
--- a/sys/amd64/vmm/amd/svm_softc.h
+++ b/sys/amd64/vmm/amd/svm_softc.h
@@ -41,12 +41,8 @@ struct asid {
        uint32_t        num;    /* range is [1, nasid - 1] */
 };
 
-/*
- * XXX separate out 'struct vmcb' from 'svm_vcpu' to avoid wasting space
- * due to VMCB alignment requirements.
- */
 struct svm_vcpu {
-       struct vmcb     vmcb;    /* hardware saved vcpu context */
+       struct vmcb     *vmcb;   /* hardware saved vcpu context */
        struct svm_regctx swctx; /* software saved vcpu context */
        uint64_t        vmcb_pa; /* VMCB physical address */
        uint64_t        nextrip; /* next instruction to be executed by guest */
@@ -54,23 +50,20 @@ struct svm_vcpu {
        uint32_t        dirty;   /* state cache bits that must be cleared */
        long            eptgen;  /* pmap->pm_eptgen when the vcpu last ran */
        struct asid     asid;
-} __aligned(PAGE_SIZE);
+       struct vm_mtrr  mtrr;
+};
 
 /*
  * SVM softc, one per virtual machine.
  */
 struct svm_softc {
-       uint8_t apic_page[VM_MAXCPU][PAGE_SIZE];
        struct svm_vcpu vcpu[VM_MAXCPU];
        vm_offset_t     nptp;                       /* nested page table */
        uint8_t         *iopm_bitmap;    /* shared by all vcpus */
        uint8_t         *msr_bitmap;    /* shared by all vcpus */
        struct vm       *vm;
-       struct vm_mtrr  mtrr[VM_MAXCPU];
 };
 
-CTASSERT((offsetof(struct svm_softc, nptp) & PAGE_MASK) == 0);
-
 static __inline struct svm_vcpu *
 svm_get_vcpu(struct svm_softc *sc, int vcpu)
 {
@@ -82,21 +75,21 @@ static __inline struct vmcb *
 svm_get_vmcb(struct svm_softc *sc, int vcpu)
 {
 
-       return (&(sc->vcpu[vcpu].vmcb));
+       return ((sc->vcpu[vcpu].vmcb));
 }
 
 static __inline struct vmcb_state *
 svm_get_vmcb_state(struct svm_softc *sc, int vcpu)
 {
 
-       return (&(sc->vcpu[vcpu].vmcb.state));
+       return (&(sc->vcpu[vcpu].vmcb->state));
 }
 
 static __inline struct vmcb_ctrl *
 svm_get_vmcb_ctrl(struct svm_softc *sc, int vcpu)
 {
 
-       return (&(sc->vcpu[vcpu].vmcb.ctrl));
+       return (&(sc->vcpu[vcpu].vmcb->ctrl));
 }
 
 static __inline struct svm_regctx *

Reply via email to