From: Jan Kiszka <[email protected]>

This allows to mitigate CVE-2018-12207: On affected Intel machines, a
guest can trigger an unrecoverable machine check exception when running
a certain code pattern on an executable huge page. The suggested
mitigation pattern of Intel involves on-demand break-up of huge pages
when the guest tries to execute on them and also consolidating them into
non-executable huge pages dynamically. This pattern is not compatible
with the static and deterministic behavior of Jailhouse.

Therefore, this introduces a memory region flag to exclude huge page
mappings for a region. System configurators can use this flag for
executable regions on affected CPUs, while still allowing huge pages for
non-executable regions.

Signed-off-by: Jan Kiszka <[email protected]>
---

Changes in v2:
 - fix untested (well...) hugepage disabling logic
 - tagged some hypervisor mappings with PAGING_ALLOW_HUGE

 hypervisor/arch/arm-common/mmu_cell.c |  5 ++++-
 hypervisor/arch/x86/svm.c             |  5 ++++-
 hypervisor/arch/x86/vmx.c             |  6 ++++--
 hypervisor/arch/x86/vtd.c             |  5 ++++-
 hypervisor/include/jailhouse/paging.h |  5 +++++
 hypervisor/paging.c                   | 11 +++++++----
 hypervisor/setup.c                    |  5 +++--
 include/jailhouse/cell-config.h       |  3 ++-
 8 files changed, 33 insertions(+), 12 deletions(-)

diff --git a/hypervisor/arch/arm-common/mmu_cell.c 
b/hypervisor/arch/arm-common/mmu_cell.c
index a00997c3..56db2e8c 100644
--- a/hypervisor/arch/arm-common/mmu_cell.c
+++ b/hypervisor/arch/arm-common/mmu_cell.c
@@ -22,6 +22,7 @@ int arch_map_memory_region(struct cell *cell,
 {
        u64 phys_start = mem->phys_start;
        unsigned long access_flags = PTE_FLAG_VALID | PTE_ACCESS_FLAG;
+       unsigned long paging_flags = PAGING_COHERENT | PAGING_ALLOW_HUGE;
        int err = 0;

        if (mem->flags & JAILHOUSE_MEM_READ)
@@ -38,13 +39,15 @@ int arch_map_memory_region(struct cell *cell,
        if (!(mem->flags & JAILHOUSE_MEM_EXECUTE))
                flags |= S2_PAGE_ACCESS_XN;
        */
+       if (mem->flags & JAILHOUSE_MEM_NO_HUGEPAGES)
+               paging_flags &= ~PAGING_ALLOW_HUGE;

        err = iommu_map_memory_region(cell, mem);
        if (err)
                return err;

        err = paging_create(&cell->arch.mm, phys_start, mem->size,
-                           mem->virt_start, access_flags, PAGING_COHERENT);
+                           mem->virt_start, access_flags, paging_flags);
        if (err)
                iommu_unmap_memory_region(cell, mem);

diff --git a/hypervisor/arch/x86/svm.c b/hypervisor/arch/x86/svm.c
index 513c696c..d85d2b50 100644
--- a/hypervisor/arch/x86/svm.c
+++ b/hypervisor/arch/x86/svm.c
@@ -349,6 +349,7 @@ int vcpu_map_memory_region(struct cell *cell,
 {
        u64 phys_start = mem->phys_start;
        u64 access_flags = PAGE_FLAG_US; /* See APMv2, Section 15.25.5 */
+       u64 paging_flags = PAGING_COHERENT | PAGING_ALLOW_HUGE;

        if (mem->flags & JAILHOUSE_MEM_READ)
                access_flags |= PAGE_FLAG_PRESENT;
@@ -358,6 +359,8 @@ int vcpu_map_memory_region(struct cell *cell,
                access_flags |= PAGE_FLAG_NOEXECUTE;
        if (mem->flags & JAILHOUSE_MEM_COMM_REGION)
                phys_start = paging_hvirt2phys(&cell->comm_page);
+       if (mem->flags & JAILHOUSE_MEM_NO_HUGEPAGES)
+               paging_flags &= ~PAGING_ALLOW_HUGE;

        access_flags |= amd_iommu_get_memory_region_flags(mem);

@@ -367,7 +370,7 @@ int vcpu_map_memory_region(struct cell *cell,
         */
        return paging_create(&cell->arch.svm.npt_iommu_structs, phys_start,
                             mem->size, mem->virt_start, access_flags,
-                            PAGING_COHERENT);
+                            paging_flags);
 }

 int vcpu_unmap_memory_region(struct cell *cell,
diff --git a/hypervisor/arch/x86/vmx.c b/hypervisor/arch/x86/vmx.c
index f0a2534b..1cbc6417 100644
--- a/hypervisor/arch/x86/vmx.c
+++ b/hypervisor/arch/x86/vmx.c
@@ -353,6 +353,7 @@ int vcpu_map_memory_region(struct cell *cell,
 {
        u64 phys_start = mem->phys_start;
        unsigned long access_flags = EPT_FLAG_WB_TYPE;
+       unsigned long paging_flags = PAGING_NON_COHERENT | PAGING_ALLOW_HUGE;

        if (mem->flags & JAILHOUSE_MEM_READ)
                access_flags |= EPT_FLAG_READ;
@@ -362,10 +363,11 @@ int vcpu_map_memory_region(struct cell *cell,
                access_flags |= EPT_FLAG_EXECUTE;
        if (mem->flags & JAILHOUSE_MEM_COMM_REGION)
                phys_start = paging_hvirt2phys(&cell->comm_page);
+       if (mem->flags & JAILHOUSE_MEM_NO_HUGEPAGES)
+               paging_flags &= ~PAGING_ALLOW_HUGE;

        return paging_create(&cell->arch.vmx.ept_structs, phys_start, mem->size,
-                            mem->virt_start, access_flags,
-                            PAGING_NON_COHERENT);
+                            mem->virt_start, access_flags, paging_flags);
 }

 int vcpu_unmap_memory_region(struct cell *cell,
diff --git a/hypervisor/arch/x86/vtd.c b/hypervisor/arch/x86/vtd.c
index e5f9bfb0..51649662 100644
--- a/hypervisor/arch/x86/vtd.c
+++ b/hypervisor/arch/x86/vtd.c
@@ -751,6 +751,7 @@ int iommu_map_memory_region(struct cell *cell,
                            const struct jailhouse_memory *mem)
 {
        unsigned long access_flags = 0;
+       unsigned long paging_flags = PAGING_COHERENT | PAGING_ALLOW_HUGE;

        if (!(mem->flags & JAILHOUSE_MEM_DMA))
                return 0;
@@ -762,10 +763,12 @@ int iommu_map_memory_region(struct cell *cell,
                access_flags |= VTD_PAGE_READ;
        if (mem->flags & JAILHOUSE_MEM_WRITE)
                access_flags |= VTD_PAGE_WRITE;
+       if (mem->flags & JAILHOUSE_MEM_NO_HUGEPAGES)
+               paging_flags &= ~PAGING_ALLOW_HUGE;

        return paging_create(&cell->arch.vtd.pg_structs, mem->phys_start,
                             mem->size, mem->virt_start, access_flags,
-                            PAGING_COHERENT);
+                            paging_flags);
 }

 int iommu_unmap_memory_region(struct cell *cell,
diff --git a/hypervisor/include/jailhouse/paging.h 
b/hypervisor/include/jailhouse/paging.h
index dcf77829..96e3fdbc 100644
--- a/hypervisor/include/jailhouse/paging.h
+++ b/hypervisor/include/jailhouse/paging.h
@@ -63,6 +63,11 @@ struct page_pool {
 #define PAGING_NON_COHERENT    0
 /** Make changes visible to non-snooping readers, i.e. commit them to RAM. */
 #define PAGING_COHERENT                0x1
+
+/** Do not use huge pages for creating a mapping. */
+#define PAGING_NO_HUGE         0
+/** When possible, use huge pages for creating a mapping. */
+#define PAGING_ALLOW_HUGE      0x2
 /** @} */

 /** Page table reference. */
diff --git a/hypervisor/paging.c b/hypervisor/paging.c
index 94ca1812..99abaee7 100644
--- a/hypervisor/paging.c
+++ b/hypervisor/paging.c
@@ -304,7 +304,9 @@ int paging_create(const struct paging_structures 
*pg_structs,
                        pte = paging->get_entry(pt, virt);
                        if (paging->page_size > 0 &&
                            paging->page_size <= size &&
-                           ((phys | virt) & (paging->page_size - 1)) == 0) {
+                           ((phys | virt) & (paging->page_size - 1)) == 0 &&
+                           (paging_flags & PAGING_ALLOW_HUGE ||
+                            paging->page_size == PAGE_SIZE)) {
                                /*
                                 * We might be overwriting a more fine-grained
                                 * mapping, so release it first. This cannot
@@ -489,7 +491,7 @@ void *paging_map_device(unsigned long phys, unsigned long 
size)

        if (paging_create(&hv_paging_structs, phys, size, (unsigned long)virt,
                          PAGE_DEFAULT_FLAGS | PAGE_FLAG_DEVICE,
-                         PAGING_NON_COHERENT) != 0) {
+                         PAGING_NON_COHERENT | PAGING_ALLOW_HUGE) != 0) {
                page_free(&remap_pool, virt, PAGES(size));
                return NULL;
        }
@@ -611,7 +613,7 @@ int paging_map_all_per_cpu(unsigned int cpu, bool enable)
                        sizeof(struct per_cpu) - sizeof(struct public_per_cpu),
                        (unsigned long)cpu_data,
                        enable ? PAGE_DEFAULT_FLAGS : PAGE_NONPRESENT_FLAGS,
-                       PAGING_NON_COHERENT);
+                       PAGING_NON_COHERENT | PAGING_ALLOW_HUGE);
 }

 /**
@@ -667,7 +669,8 @@ int paging_init(void)
                             paging_hvirt2phys(&hypervisor_header),
                             system_config->hypervisor_memory.size,
                             (unsigned long)&hypervisor_header,
-                            PAGE_DEFAULT_FLAGS, PAGING_NON_COHERENT);
+                            PAGE_DEFAULT_FLAGS,
+                            PAGING_NON_COHERENT | PAGING_ALLOW_HUGE);
        if (err)
                return err;

diff --git a/hypervisor/setup.c b/hypervisor/setup.c
index 99a2b0c3..a49d857e 100644
--- a/hypervisor/setup.c
+++ b/hypervisor/setup.c
@@ -128,7 +128,8 @@ static void cpu_init(struct per_cpu *cpu_data)
        /* set up private mapping of per-CPU data structure */
        err = paging_create(&cpu_data->pg_structs, paging_hvirt2phys(cpu_data),
                            sizeof(*cpu_data), LOCAL_CPU_BASE,
-                           PAGE_DEFAULT_FLAGS, PAGING_NON_COHERENT);
+                           PAGE_DEFAULT_FLAGS,
+                           PAGING_NON_COHERENT | PAGING_ALLOW_HUGE);
        if (err)
                goto failed;

@@ -141,7 +142,7 @@ static void cpu_init(struct per_cpu *cpu_data)
        err = paging_create(&cpu_data->pg_structs, 0,
                            NUM_TEMPORARY_PAGES * PAGE_SIZE,
                            TEMPORARY_MAPPING_BASE, PAGE_NONPRESENT_FLAGS,
-                           PAGING_NON_COHERENT);
+                           PAGING_NON_COHERENT | PAGING_ALLOW_HUGE);
        if (err)
                goto failed;

diff --git a/include/jailhouse/cell-config.h b/include/jailhouse/cell-config.h
index b8e1f038..30ec5d06 100644
--- a/include/jailhouse/cell-config.h
+++ b/include/jailhouse/cell-config.h
@@ -113,7 +113,8 @@ struct jailhouse_cell_desc {
 #define JAILHOUSE_MEM_COMM_REGION      0x0020
 #define JAILHOUSE_MEM_LOADABLE         0x0040
 #define JAILHOUSE_MEM_ROOTSHARED       0x0080
-#define JAILHOUSE_MEM_IO_UNALIGNED     0x0100
+#define JAILHOUSE_MEM_NO_HUGEPAGES     0x0100
+#define JAILHOUSE_MEM_IO_UNALIGNED     0x8000
 #define JAILHOUSE_MEM_IO_WIDTH_SHIFT   16 /* uses bits 16..19 */
 #define JAILHOUSE_MEM_IO_8             (1 << JAILHOUSE_MEM_IO_WIDTH_SHIFT)
 #define JAILHOUSE_MEM_IO_16            (2 << JAILHOUSE_MEM_IO_WIDTH_SHIFT)
--
2.16.4

-- 
You received this message because you are subscribed to the Google Groups 
"Jailhouse" group.
To unsubscribe from this group and stop receiving emails from it, send an email 
to [email protected].
To view this discussion on the web visit 
https://groups.google.com/d/msgid/jailhouse-dev/7eb9b15e-615c-232a-b051-81c7cf9efca0%40web.de.

Reply via email to