This flushes all dcache entries related to a specific cell by mapping
each physical RAM page of the cell into the hypervisor and then perform
the requested flush on the corresponding virtual address. Those flushes
will be broadcast to all CPUs, thus the call only needs to be performed
once on any CPU in the system.

This pattern was bluntly stolen from KVM. It will serve as a building
block to emulate guest-issued set/way cache maintenance operations.

CC: Marc Zyngier <marc.zyng...@arm.com>
CC: Mark Rutland <mark.rutl...@arm.com>
Signed-off-by: Jan Kiszka <jan.kis...@siemens.com>
---
 hypervisor/arch/arm/include/asm/paging.h |  1 +
 hypervisor/arch/arm/mmu_cell.c           | 35 ++++++++++++++++++++++++++++++++
 2 files changed, 36 insertions(+)

diff --git a/hypervisor/arch/arm/include/asm/paging.h 
b/hypervisor/arch/arm/include/asm/paging.h
index 1177023..f2ee398 100644
--- a/hypervisor/arch/arm/include/asm/paging.h
+++ b/hypervisor/arch/arm/include/asm/paging.h
@@ -190,6 +190,7 @@ void arm_paging_cell_destroy(struct cell *cell);
 int arm_paging_vcpu_init(struct per_cpu *cpu_data);
 
 void arm_dcaches_flush(void *addr, long size, enum dcache_flush flush);
+void arm_cell_dcaches_flush(struct cell *cell, enum dcache_flush flush);
 
 /* return the bits supported for the physical address range for this
  * machine; in arch_paging_init this value will be kept in
diff --git a/hypervisor/arch/arm/mmu_cell.c b/hypervisor/arch/arm/mmu_cell.c
index 6bce1ab..baf9ba0 100644
--- a/hypervisor/arch/arm/mmu_cell.c
+++ b/hypervisor/arch/arm/mmu_cell.c
@@ -55,6 +55,41 @@ unsigned long arch_paging_gphys2phys(struct per_cpu 
*cpu_data,
        return paging_virt2phys(&cpu_data->cell->arch.mm, gphys, flags);
 }
 
+void arm_cell_dcaches_flush(struct cell *cell, enum dcache_flush flush)
+{
+       unsigned long vaddr = TEMPORARY_MAPPING_BASE +
+               this_cpu_id() * PAGE_SIZE * NUM_TEMPORARY_PAGES;
+       unsigned long region_addr, region_size, size;
+       struct jailhouse_memory const *mem;
+       unsigned int n;
+
+       for_each_mem_region(mem, cell->config, n) {
+               if (mem->flags & (JAILHOUSE_MEM_IO | JAILHOUSE_MEM_COMM_REGION))
+                       continue;
+
+               region_addr = mem->phys_start;
+               region_size = mem->size;
+
+               while (region_size > 0) {
+                       size = MIN(region_size,
+                                  NUM_TEMPORARY_PAGES * PAGE_SIZE);
+
+                       /* cannot fail, mapping area is preallocated */
+                       paging_create(&hv_paging_structs, region_addr, size,
+                                     vaddr, PAGE_DEFAULT_FLAGS,
+                                     PAGING_NON_COHERENT);
+
+                       arm_dcaches_flush((void *)vaddr, size, flush);
+
+                       region_addr += size;
+                       region_size -= size;
+               }
+       }
+
+       /* ensure completion of the flush */
+       dmb(ish);
+}
+
 int arm_paging_cell_init(struct cell *cell)
 {
        cell->arch.mm.root_paging = cell_paging;
-- 
2.1.4

-- 
You received this message because you are subscribed to the Google Groups 
"Jailhouse" group.
To unsubscribe from this group and stop receiving emails from it, send an email 
to jailhouse-dev+unsubscr...@googlegroups.com.
For more options, visit https://groups.google.com/d/optout.

Reply via email to