From: Antonios Motakis <antonios.mota...@huawei.com>

Initialize the hypervisor firmware, so we can finally enter
the arch independent entry() function. This includes setting
up a stack, and saving the state of the processor before
entering the hypervisor.

Signed-off-by: Antonios Motakis <antonios.mota...@huawei.com>
[Jan: use asm-defines]
Signed-off-by: Jan Kiszka <jan.kis...@siemens.com>
---
 hypervisor/arch/arm64/asm-defines.c        |  8 ++++++
 hypervisor/arch/arm64/entry.S              | 44 ++++++++++++++++++++++++++++++
 hypervisor/arch/arm64/include/asm/percpu.h | 25 ++++++++++++++---
 3 files changed, 73 insertions(+), 4 deletions(-)

diff --git a/hypervisor/arch/arm64/asm-defines.c 
b/hypervisor/arch/arm64/asm-defines.c
index d3f166e..3209918 100644
--- a/hypervisor/arch/arm64/asm-defines.c
+++ b/hypervisor/arch/arm64/asm-defines.c
@@ -14,6 +14,7 @@
 
 #include <jailhouse/paging.h>
 #include <jailhouse/gen-defines.h>
+#include <asm/percpu.h>
 
 void common(void);
 
@@ -22,6 +23,13 @@ void common(void)
        OFFSET(DEBUG_CONSOLE_BASE, jailhouse_header, debug_console_base);
        BLANK();
 
+       DEFINE(PERCPU_STACK_END,
+              __builtin_offsetof(struct per_cpu, stack) + \
+              FIELD_SIZEOF(struct per_cpu, stack));
+       DEFINE(PERCPU_SIZE_SHIFT_ASM, PERCPU_SIZE_SHIFT);
+       OFFSET(PERCPU_SAVED_VECTORS, per_cpu, saved_vectors);
+       BLANK();
+
        DEFINE(DCACHE_CLEAN_ASM, DCACHE_CLEAN);
        DEFINE(DCACHE_INVALIDATE_ASM, DCACHE_INVALIDATE);
        DEFINE(DCACHE_CLEAN_AND_INVALIDATE_ASM, DCACHE_CLEAN_AND_INVALIDATE);
diff --git a/hypervisor/arch/arm64/entry.S b/hypervisor/arch/arm64/entry.S
index ab614ba..e42cab5 100644
--- a/hypervisor/arch/arm64/entry.S
+++ b/hypervisor/arch/arm64/entry.S
@@ -26,7 +26,12 @@ arch_entry:
         * We don't have access to our own address space yet, so we will
         * abuse some caller saved registers to preserve across calls:
         * x15: physical UART address
+        * x16: saved hyp vectors
+        * x17: cpuid
+        * x18: caller lr
         */
+       mov     x17, x0
+       mov     x18, x30
 
        /*
         * Access the just updated hypervisor_header prior to turning off the
@@ -48,6 +53,11 @@ arch_entry:
         * Invalidate is safe in guests.
         */
 
+       /* keep the linux stub EL2 vectors for later */
+       mov     x0, xzr
+       hvc     #0
+       mov     x16, x0
+
        /* install bootstrap_vectors */
        ldr     x0, =bootstrap_vectors
        hvc     #0
@@ -68,6 +78,40 @@ el2_entry:
        adr     x0, bootstrap_pt_l0
        bl      enable_mmu_el2
 
+       mov     x0, x17         /* preserved cpuid, will be passed to entry */
+       adrp    x1, __page_pool
+       mov     x2, #(1 << PERCPU_SIZE_SHIFT_ASM)
+       /*
+        * percpu data = pool + cpuid * shift
+        */
+       madd    x1, x2, x0, x1
+       msr     tpidr_el2, x1
+
+       /* set up the stack and push the root cell's callee saved registers */
+       add     sp, x1, #PERCPU_STACK_END
+       stp     x29, x18, [sp, #-16]!   /* note: our caller lr is in x18 */
+       stp     x27, x28, [sp, #-16]!
+       stp     x25, x26, [sp, #-16]!
+       stp     x23, x24, [sp, #-16]!
+       stp     x21, x22, [sp, #-16]!
+       stp     x19, x20, [sp, #-16]!
+       /*
+        * We pad the stack, so we can consistently access the guest
+        * registers from either the initialization, or the exception
+        * handling code paths. 19 caller saved registers plus the
+        * exit_reason, which we don't use on entry.
+        */
+       sub     sp, sp, 20 * 8
+
+       mov     x29, xzr        /* reset fp,lr */
+       mov     x30, xzr
+
+       /* save the Linux stub vectors we kept earlier */
+       add     x2, x1, #PERCPU_SAVED_VECTORS
+       str     x16, [x2]
+
+       /* Call entry(cpuid, struct per_cpu*). Should not return. */
+       bl      entry
        b       .
 
        .globl enable_mmu_el2
diff --git a/hypervisor/arch/arm64/include/asm/percpu.h 
b/hypervisor/arch/arm64/include/asm/percpu.h
index 42f6f69..a22b486 100644
--- a/hypervisor/arch/arm64/include/asm/percpu.h
+++ b/hypervisor/arch/arm64/include/asm/percpu.h
@@ -19,7 +19,14 @@
 
 #include <jailhouse/cell.h>
 
+/* Round up sizeof(struct per_cpu) to the next power of two. */
+#define PERCPU_SIZE_SHIFT \
+        (BITS_PER_LONG - __builtin_clzl(sizeof(struct per_cpu) - 1))
+
 struct per_cpu {
+       u8 stack[PAGE_SIZE];
+       unsigned long saved_vectors;
+
        /* common fields */
        unsigned int cpu_id;
        struct cell *cell;
@@ -33,8 +40,10 @@ struct per_cpu {
 
 static inline struct per_cpu *this_cpu_data(void)
 {
-       while (1);
-       return NULL;
+       struct per_cpu *cpu_data;
+
+       arm_read_sysreg(TPIDR_EL2, cpu_data);
+       return cpu_data;
 }
 
 #define DEFINE_PER_CPU_ACCESSOR(field)                                 \
@@ -48,8 +57,16 @@ DEFINE_PER_CPU_ACCESSOR(cell)
 
 static inline struct per_cpu *per_cpu(unsigned int cpu)
 {
-       while (1);
-       return NULL;
+       extern u8 __page_pool[];
+
+       return (struct per_cpu *)(__page_pool + (cpu << PERCPU_SIZE_SHIFT));
+}
+
+static inline struct registers *guest_regs(struct per_cpu *cpu_data)
+{
+       /* assumes that the cell registers are at the beginning of the stack */
+       return (struct registers *)(cpu_data->stack + sizeof(cpu_data->stack)
+                       - sizeof(struct registers));
 }
 #endif /* !__ASSEMBLY__ */
 
-- 
2.1.4

-- 
You received this message because you are subscribed to the Google Groups 
"Jailhouse" group.
To unsubscribe from this group and stop receiving emails from it, send an email 
to jailhouse-dev+unsubscr...@googlegroups.com.
For more options, visit https://groups.google.com/d/optout.

Reply via email to