Semantically, SVM and VMX don't differ that much in those regards. Let's use
the same variable for both.

Signed-off-by: Ralf Ramsauer <[email protected]>
---
 hypervisor/arch/x86/include/asm/cell.h |  7 +++----
 hypervisor/arch/x86/svm.c              | 12 ++++++------
 hypervisor/arch/x86/vmx.c              | 12 ++++++------
 3 files changed, 15 insertions(+), 16 deletions(-)

diff --git a/hypervisor/arch/x86/include/asm/cell.h 
b/hypervisor/arch/x86/include/asm/cell.h
index 2223532a..71bd9976 100644
--- a/hypervisor/arch/x86/include/asm/cell.h
+++ b/hypervisor/arch/x86/include/asm/cell.h
@@ -26,16 +26,15 @@ struct arch_cell {
        /** Buffer for the EPT/NPT root-level page table. */
        u8 __attribute__((aligned(PAGE_SIZE))) root_table_page[PAGE_SIZE];
 
+       /* Intel: PIO access bitmap.
+        * AMD: I/O Permissions Map. */
+       u8 *io_bitmap;
        union {
                struct {
-                       /** PIO access bitmap. */
-                       u8 *io_bitmap;
                        /** Paging structures used for cell CPUs. */
                        struct paging_structures ept_structs;
                } vmx; /**< Intel VMX-specific fields. */
                struct {
-                       /** I/O Permissions Map. */
-                       u8 *iopm;
                        /** Paging structures used for cell CPUs and IOMMU. */
                        struct paging_structures npt_iommu_structs;
                } svm; /**< AMD SVM-specific fields. */
diff --git a/hypervisor/arch/x86/svm.c b/hypervisor/arch/x86/svm.c
index 67169e39..aa0f7820 100644
--- a/hypervisor/arch/x86/svm.c
+++ b/hypervisor/arch/x86/svm.c
@@ -137,7 +137,7 @@ static void set_svm_segment_from_segment(struct svm_segment 
*svm_segment,
 
 static void svm_set_cell_config(struct cell *cell, struct vmcb *vmcb)
 {
-       vmcb->iopm_base_pa = paging_hvirt2phys(cell->arch.svm.iopm);
+       vmcb->iopm_base_pa = paging_hvirt2phys(cell->arch.io_bitmap);
        vmcb->n_cr3 =
                paging_hvirt2phys(cell->arch.svm.npt_iommu_structs.root_table);
 }
@@ -324,8 +324,8 @@ int vcpu_vendor_cell_init(struct cell *cell)
        u64 flags;
 
        /* allocate iopm  */
-       cell->arch.svm.iopm = page_alloc(&mem_pool, IOPM_PAGES);
-       if (!cell->arch.svm.iopm)
+       cell->arch.io_bitmap = page_alloc(&mem_pool, IOPM_PAGES);
+       if (!cell->arch.io_bitmap)
                return err;
 
        /* build root NPT of cell */
@@ -354,7 +354,7 @@ int vcpu_vendor_cell_init(struct cell *cell)
        return 0;
 
 err_free_iopm:
-       page_free(&mem_pool, cell->arch.svm.iopm, IOPM_PAGES);
+       page_free(&mem_pool, cell->arch.io_bitmap, IOPM_PAGES);
 
        return err;
 }
@@ -396,7 +396,7 @@ void vcpu_vendor_cell_exit(struct cell *cell)
 {
        paging_destroy(&cell->arch.svm.npt_iommu_structs, XAPIC_BASE,
                       PAGE_SIZE, PAGING_NON_COHERENT);
-       page_free(&mem_pool, cell->arch.svm.iopm, IOPM_PAGES);
+       page_free(&mem_pool, cell->arch.io_bitmap, IOPM_PAGES);
 }
 
 int vcpu_init(struct per_cpu *cpu_data)
@@ -1026,7 +1026,7 @@ const u8 *vcpu_get_inst_bytes(const struct 
guest_paging_structures *pg_structs,
 void vcpu_vendor_get_cell_io_bitmap(struct cell *cell,
                                    struct vcpu_io_bitmap *iobm)
 {
-       iobm->data = cell->arch.svm.iopm;
+       iobm->data = cell->arch.io_bitmap;
        iobm->size = IOPM_PAGES * PAGE_SIZE;
 }
 
diff --git a/hypervisor/arch/x86/vmx.c b/hypervisor/arch/x86/vmx.c
index 5b3c5b8f..ae696e1d 100644
--- a/hypervisor/arch/x86/vmx.c
+++ b/hypervisor/arch/x86/vmx.c
@@ -337,8 +337,8 @@ int vcpu_vendor_cell_init(struct cell *cell)
        int err;
 
        /* allocate io_bitmap */
-       cell->arch.vmx.io_bitmap = page_alloc(&mem_pool, PIO_BITMAP_PAGES);
-       if (!cell->arch.vmx.io_bitmap)
+       cell->arch.io_bitmap = page_alloc(&mem_pool, PIO_BITMAP_PAGES);
+       if (!cell->arch.io_bitmap)
                return -ENOMEM;
 
        /* build root EPT of cell */
@@ -359,7 +359,7 @@ int vcpu_vendor_cell_init(struct cell *cell)
        return 0;
 
 err_free_io_bitmap:
-       page_free(&mem_pool, cell->arch.vmx.io_bitmap, PIO_BITMAP_PAGES);
+       page_free(&mem_pool, cell->arch.io_bitmap, PIO_BITMAP_PAGES);
 
        return err;
 }
@@ -394,7 +394,7 @@ void vcpu_vendor_cell_exit(struct cell *cell)
 {
        paging_destroy(&cell->arch.vmx.ept_structs, XAPIC_BASE, PAGE_SIZE,
                       PAGING_NON_COHERENT);
-       page_free(&mem_pool, cell->arch.vmx.io_bitmap, PIO_BITMAP_PAGES);
+       page_free(&mem_pool, cell->arch.io_bitmap, PIO_BITMAP_PAGES);
 }
 
 void vcpu_tlb_flush(void)
@@ -459,7 +459,7 @@ static bool vmx_set_cell_config(void)
        u8 *io_bitmap;
        bool ok = true;
 
-       io_bitmap = cell->arch.vmx.io_bitmap;
+       io_bitmap = cell->arch.io_bitmap;
        ok &= vmcs_write64(IO_BITMAP_A, paging_hvirt2phys(io_bitmap));
        ok &= vmcs_write64(IO_BITMAP_B,
                           paging_hvirt2phys(io_bitmap + PAGE_SIZE));
@@ -1239,7 +1239,7 @@ void vmx_entry_failure(void)
 void vcpu_vendor_get_cell_io_bitmap(struct cell *cell,
                                    struct vcpu_io_bitmap *iobm)
 {
-       iobm->data = cell->arch.vmx.io_bitmap;
+       iobm->data = cell->arch.io_bitmap;
        iobm->size = PIO_BITMAP_PAGES * PAGE_SIZE;
 }
 
-- 
2.22.0

-- 
You received this message because you are subscribed to the Google Groups 
"Jailhouse" group.
To unsubscribe from this group and stop receiving emails from it, send an email 
to [email protected].
To view this discussion on the web visit 
https://groups.google.com/d/msgid/jailhouse-dev/20190713181037.4358-4-ralf.ramsauer%40oth-regensburg.de.
For more options, visit https://groups.google.com/d/optout.

Reply via email to