It's pretty much the same, besides the number of pages that have to be allocated.
Signed-off-by: Ralf Ramsauer <[email protected]> --- hypervisor/arch/x86/include/asm/vcpu.h | 1 + hypervisor/arch/x86/svm.c | 34 +++++++++----------------- hypervisor/arch/x86/vcpu.c | 9 ++++++- hypervisor/arch/x86/vmx.c | 31 ++++++++--------------- 4 files changed, 31 insertions(+), 44 deletions(-) diff --git a/hypervisor/arch/x86/include/asm/vcpu.h b/hypervisor/arch/x86/include/asm/vcpu.h index b18b2b3a..24872f55 100644 --- a/hypervisor/arch/x86/include/asm/vcpu.h +++ b/hypervisor/arch/x86/include/asm/vcpu.h @@ -90,6 +90,7 @@ void vcpu_skip_emulated_instruction(unsigned int inst_len); void vcpu_vendor_get_cell_io_bitmap(struct cell *cell, struct vcpu_io_bitmap *out); +unsigned int vcpu_vendor_get_io_bitmap_pages(void); #define VCPU_CS_DPL_MASK BIT_MASK(6, 5) #define VCPU_CS_L (1 << 13) diff --git a/hypervisor/arch/x86/svm.c b/hypervisor/arch/x86/svm.c index aa0f7820..f2ea313e 100644 --- a/hypervisor/arch/x86/svm.c +++ b/hypervisor/arch/x86/svm.c @@ -320,14 +320,8 @@ int vcpu_vendor_early_init(void) int vcpu_vendor_cell_init(struct cell *cell) { - int err = -ENOMEM; u64 flags; - /* allocate iopm */ - cell->arch.io_bitmap = page_alloc(&mem_pool, IOPM_PAGES); - if (!cell->arch.io_bitmap) - return err; - /* build root NPT of cell */ cell->arch.svm.npt_iommu_structs.root_paging = npt_iommu_paging; cell->arch.svm.npt_iommu_structs.root_table = @@ -338,25 +332,16 @@ int vcpu_vendor_cell_init(struct cell *cell) * Map xAPIC as is; reads are passed, writes are trapped. */ flags = PAGE_READONLY_FLAGS | PAGE_FLAG_US | PAGE_FLAG_DEVICE; - err = paging_create(&cell->arch.svm.npt_iommu_structs, - XAPIC_BASE, PAGE_SIZE, XAPIC_BASE, - flags, PAGING_NON_COHERENT); + return paging_create(&cell->arch.svm.npt_iommu_structs, + XAPIC_BASE, PAGE_SIZE, XAPIC_BASE, + flags, PAGING_NON_COHERENT); } else { flags = PAGE_DEFAULT_FLAGS | PAGE_FLAG_DEVICE; - err = paging_create(&cell->arch.svm.npt_iommu_structs, - paging_hvirt2phys(avic_page), - PAGE_SIZE, XAPIC_BASE, - flags, PAGING_NON_COHERENT); + return paging_create(&cell->arch.svm.npt_iommu_structs, + paging_hvirt2phys(avic_page), + PAGE_SIZE, XAPIC_BASE, + flags, PAGING_NON_COHERENT); } - if (err) - goto err_free_iopm; - - return 0; - -err_free_iopm: - page_free(&mem_pool, cell->arch.io_bitmap, IOPM_PAGES); - - return err; } int vcpu_map_memory_region(struct cell *cell, @@ -1030,6 +1015,11 @@ void vcpu_vendor_get_cell_io_bitmap(struct cell *cell, iobm->size = IOPM_PAGES * PAGE_SIZE; } +unsigned int vcpu_vendor_get_io_bitmap_pages(void) +{ + return IOPM_PAGES; +} + #define VCPU_VENDOR_GET_REGISTER(__reg__) \ u64 vcpu_vendor_get_##__reg__(void) \ { \ diff --git a/hypervisor/arch/x86/vcpu.c b/hypervisor/arch/x86/vcpu.c index a1fb8660..4c074669 100644 --- a/hypervisor/arch/x86/vcpu.c +++ b/hypervisor/arch/x86/vcpu.c @@ -78,6 +78,7 @@ out_err: int vcpu_cell_init(struct cell *cell) { + const unsigned int io_bitmap_pages = vcpu_vendor_get_io_bitmap_pages(); const u8 *pio_bitmap = jailhouse_cell_pio_bitmap(cell->config); u32 pio_bitmap_size = cell->config->pio_bitmap_size; struct vcpu_io_bitmap cell_iobm, root_cell_iobm; @@ -86,9 +87,15 @@ int vcpu_cell_init(struct cell *cell) int err; u8 *b; + cell->arch.io_bitmap = page_alloc(&mem_pool, io_bitmap_pages); + if (!cell->arch.io_bitmap) + return -ENOMEM; + err = vcpu_vendor_cell_init(cell); - if (err) + if (err) { + page_free(&mem_pool, cell->arch.io_bitmap, io_bitmap_pages); return err; + } vcpu_vendor_get_cell_io_bitmap(cell, &cell_iobm); diff --git a/hypervisor/arch/x86/vmx.c b/hypervisor/arch/x86/vmx.c index ae696e1d..8552cabd 100644 --- a/hypervisor/arch/x86/vmx.c +++ b/hypervisor/arch/x86/vmx.c @@ -334,13 +334,6 @@ unsigned long arch_paging_gphys2phys(unsigned long gphys, unsigned long flags) int vcpu_vendor_cell_init(struct cell *cell) { - int err; - - /* allocate io_bitmap */ - cell->arch.io_bitmap = page_alloc(&mem_pool, PIO_BITMAP_PAGES); - if (!cell->arch.io_bitmap) - return -ENOMEM; - /* build root EPT of cell */ cell->arch.vmx.ept_structs.root_paging = ept_paging; cell->arch.vmx.ept_structs.root_table = @@ -348,20 +341,11 @@ int vcpu_vendor_cell_init(struct cell *cell) /* Map the special APIC access page into the guest's physical address * space at the default address (XAPIC_BASE) */ - err = paging_create(&cell->arch.vmx.ept_structs, - paging_hvirt2phys(apic_access_page), - PAGE_SIZE, XAPIC_BASE, - EPT_FLAG_READ | EPT_FLAG_WRITE | EPT_FLAG_WB_TYPE, - PAGING_NON_COHERENT); - if (err) - goto err_free_io_bitmap; - - return 0; - -err_free_io_bitmap: - page_free(&mem_pool, cell->arch.io_bitmap, PIO_BITMAP_PAGES); - - return err; + return paging_create(&cell->arch.vmx.ept_structs, + paging_hvirt2phys(apic_access_page), + PAGE_SIZE, XAPIC_BASE, + EPT_FLAG_READ | EPT_FLAG_WRITE | EPT_FLAG_WB_TYPE, + PAGING_NON_COHERENT); } int vcpu_map_memory_region(struct cell *cell, @@ -1243,6 +1227,11 @@ void vcpu_vendor_get_cell_io_bitmap(struct cell *cell, iobm->size = PIO_BITMAP_PAGES * PAGE_SIZE; } +unsigned int vcpu_vendor_get_io_bitmap_pages(void) +{ + return PIO_BITMAP_PAGES; +} + #define VCPU_VENDOR_GET_REGISTER(__reg__, __field__) \ u64 vcpu_vendor_get_##__reg__(void) \ { \ -- 2.22.0 -- You received this message because you are subscribed to the Google Groups "Jailhouse" group. To unsubscribe from this group and stop receiving emails from it, send an email to [email protected]. To view this discussion on the web visit https://groups.google.com/d/msgid/jailhouse-dev/20190713181037.4358-5-ralf.ramsauer%40oth-regensburg.de. For more options, visit https://groups.google.com/d/optout.
