From: Jan Kiszka <[email protected]>

This inline function is just a local helper to mmu_hyp.

Signed-off-by: Jan Kiszka <[email protected]>
---
 hypervisor/arch/arm/include/asm/setup.h | 16 ----------------
 hypervisor/arch/arm/setup.c             | 16 ++++++++++++++++
 2 files changed, 16 insertions(+), 16 deletions(-)

diff --git a/hypervisor/arch/arm/include/asm/setup.h 
b/hypervisor/arch/arm/include/asm/setup.h
index f555e4d3..a4d927c0 100644
--- a/hypervisor/arch/arm/include/asm/setup.h
+++ b/hypervisor/arch/arm/include/asm/setup.h
@@ -17,22 +17,6 @@
 
 #ifndef __ASSEMBLY__
 
-static inline void __attribute__((always_inline))
-cpu_prepare_return_el1(struct per_cpu *cpu_data, int return_code)
-{
-       cpu_data->linux_reg[0] = return_code;
-
-       asm volatile (
-               "msr    sp_svc, %0\n\t"
-               "msr    elr_hyp, %1\n\t"
-               "msr    spsr_hyp, %2\n\t"
-               :
-               : "r" (cpu_data->linux_sp +
-                      (NUM_ENTRY_REGS * sizeof(unsigned long))),
-                 "r" (cpu_data->linux_ret),
-                 "r" (cpu_data->linux_flags));
-}
-
 int switch_exception_level(struct per_cpu *cpu_data);
 
 #endif /* !__ASSEMBLY__ */
diff --git a/hypervisor/arch/arm/setup.c b/hypervisor/arch/arm/setup.c
index ec260a11..997bf438 100644
--- a/hypervisor/arch/arm/setup.c
+++ b/hypervisor/arch/arm/setup.c
@@ -90,6 +90,22 @@ int arch_init_late(void)
        return arm_init_late();
 }
 
+static inline void __attribute__((always_inline))
+cpu_prepare_return_el1(struct per_cpu *cpu_data, int return_code)
+{
+       cpu_data->linux_reg[0] = return_code;
+
+       asm volatile (
+               "msr    sp_svc, %0\n\t"
+               "msr    elr_hyp, %1\n\t"
+               "msr    spsr_hyp, %2\n\t"
+               :
+               : "r" (cpu_data->linux_sp +
+                      (NUM_ENTRY_REGS * sizeof(unsigned long))),
+                 "r" (cpu_data->linux_ret),
+                 "r" (cpu_data->linux_flags));
+}
+
 void __attribute__((noreturn)) arch_cpu_activate_vmm(struct per_cpu *cpu_data)
 {
        /* Return to the kernel */
-- 
2.12.3

-- 
You received this message because you are subscribed to the Google Groups 
"Jailhouse" group.
To unsubscribe from this group and stop receiving emails from it, send an email 
to [email protected].
For more options, visit https://groups.google.com/d/optout.

Reply via email to