From: Antonios Motakis <[email protected]>

Initialize the hypervisor firmware, so we can finally enter
the arch independent entry() function. This includes setting
up a stack, and saving the state of the processor before
entering the hypervisor.

Signed-off-by: Antonios Motakis <[email protected]>
---
 hypervisor/arch/arm64/entry.S              | 46 ++++++++++++++++++++++++++++++
 hypervisor/arch/arm64/include/asm/percpu.h | 32 ++++++++++++++++++---
 2 files changed, 74 insertions(+), 4 deletions(-)

diff --git a/hypervisor/arch/arm64/entry.S b/hypervisor/arch/arm64/entry.S
index 99bb4b6..aff1647 100644
--- a/hypervisor/arch/arm64/entry.S
+++ b/hypervisor/arch/arm64/entry.S
@@ -23,7 +23,14 @@ arch_entry:
        /*
         * x0: cpuid
         *
+        * We don't have access to our own address space yet, so we will
+        * abuse some caller saved registers to preserve accross calls:
+        * x16: saved hyp vectors
+        * x17: cpuid
+        * x18: caller lr
         */
+       mov     x17, x0
+       mov     x18, x30
 
        /* Note 1: After turning MMU off the CPU can start bypassing caches.
         * But cached before data is kept in caches either until the CPU turns
@@ -38,6 +45,11 @@ arch_entry:
         * Invalidate is safe in guests.
         */
 
+       /* keep the linux stub EL2 vectors for later */
+       mov     x0, xzr
+       hvc     #0
+       mov     x16, x0
+
        /* install bootstrap_vectors */
        ldr     x0, =bootstrap_vectors
        hvc     #0
@@ -58,6 +70,40 @@ el2_entry:
        adr     x0, bootstrap_pt_l0
        bl      enable_mmu_el2
 
+       mov     x0, x17         /* preserved cpuid, will be passed to entry */
+       adrp    x1, __page_pool
+       mov     x2, #(1 << PERCPU_SIZE_SHIFT)
+       /*
+        * percpu data = pool + cpuid * shift
+        */
+       madd    x1, x2, x0, x1
+       msr     tpidr_el2, x1
+
+       /* set up the stack and push the root cell's callee saved registers */
+       add     sp, x1, #PERCPU_STACK_END
+       stp     x29, x18, [sp, #-16]!   /* note: our caller lr is in x18 */
+       stp     x27, x28, [sp, #-16]!
+       stp     x25, x26, [sp, #-16]!
+       stp     x23, x24, [sp, #-16]!
+       stp     x21, x22, [sp, #-16]!
+       stp     x19, x20, [sp, #-16]!
+       /*
+        * We pad the stack, so we can consistently access the guest
+        * registers from either the initialization, or the exception
+        * handling code paths. 19 caller saved registers plus the
+        * exit_reason, which we don't use on entry.
+        */
+       sub     sp, sp, 20 * 8
+
+       mov     x29, xzr        /* reset fp,lr */
+       mov     x30, xzr
+
+       /* save the Linux stub vectors we kept earlier */
+       add     x2, x1, #PERCPU_LINUX_SAVED_VECTORS
+       str     x16, [x2]
+
+       /* Call entry(cpuid, struct per_cpu*). Should not return. */
+       bl      entry
        b       .
 
        .globl enable_mmu_el2
diff --git a/hypervisor/arch/arm64/include/asm/percpu.h 
b/hypervisor/arch/arm64/include/asm/percpu.h
index a4f3663..9d1b6ec 100644
--- a/hypervisor/arch/arm64/include/asm/percpu.h
+++ b/hypervisor/arch/arm64/include/asm/percpu.h
@@ -16,12 +16,20 @@
 #include <jailhouse/types.h>
 #include <asm/paging.h>
 
+/* Keep in sync with struct per_cpu! */
+#define PERCPU_SIZE_SHIFT              13
+#define PERCPU_STACK_END               PAGE_SIZE
+#define PERCPU_LINUX_SAVED_VECTORS     PERCPU_STACK_END
+
 #ifndef __ASSEMBLY__
 
 #include <asm/cell.h>
 #include <asm/spinlock.h>
 
 struct per_cpu {
+       u8 stack[PAGE_SIZE];
+       unsigned long saved_vectors;
+
        /* common fields */
        unsigned int cpu_id;
        struct cell *cell;
@@ -35,8 +43,10 @@ struct per_cpu {
 
 static inline struct per_cpu *this_cpu_data(void)
 {
-       while (1);
-       return NULL;
+       struct per_cpu *cpu_data;
+
+       arm_read_sysreg(TPIDR_EL2, cpu_data);
+       return cpu_data;
 }
 
 #define DEFINE_PER_CPU_ACCESSOR(field)                                 \
@@ -50,8 +60,16 @@ DEFINE_PER_CPU_ACCESSOR(cell)
 
 static inline struct per_cpu *per_cpu(unsigned int cpu)
 {
-       while (1);
-       return NULL;
+       extern u8 __page_pool[];
+
+       return (struct per_cpu *)(__page_pool + (cpu << PERCPU_SIZE_SHIFT));
+}
+
+static inline struct registers *guest_regs(struct per_cpu *cpu_data)
+{
+       /* assumes that the cell registers are at the beginning of the stack */
+       return (struct registers *)(cpu_data->stack + PERCPU_STACK_END
+                       - sizeof(struct registers));
 }
 
 /* Validate defines */
@@ -59,7 +77,13 @@ static inline struct per_cpu *per_cpu(unsigned int cpu)
 
 static inline void __check_assumptions(void)
 {
+       struct per_cpu cpu_data;
+
        CHECK_ASSUMPTION(sizeof(unsigned long) == (8));
+       CHECK_ASSUMPTION(sizeof(struct per_cpu) == (1 << PERCPU_SIZE_SHIFT));
+       CHECK_ASSUMPTION(sizeof(cpu_data.stack) == PERCPU_STACK_END);
+       CHECK_ASSUMPTION(__builtin_offsetof(struct per_cpu, saved_vectors) ==
+                        PERCPU_LINUX_SAVED_VECTORS);
 }
 #endif /* !__ASSEMBLY__ */
 
-- 
2.8.0.rc3


-- 
You received this message because you are subscribed to the Google Groups 
"Jailhouse" group.
To unsubscribe from this group and stop receiving emails from it, send an email 
to [email protected].
For more options, visit https://groups.google.com/d/optout.

Reply via email to