Many functions will be shared with arm64.

This creates one new inter-module interface: arm_cpu_reset. It now takes
the desires PC after reset as parameter, instead of overwriting the
related register if cpu_on_entry is not the final value.

Signed-off-by: Jan Kiszka <jan.kis...@siemens.com>
---
 hypervisor/arch/arm-common/control.c      | 220 ++++++++++++++++++++++++++++++
 hypervisor/arch/arm/Makefile              |   1 +
 hypervisor/arch/arm/control.c             | 207 +---------------------------
 hypervisor/arch/arm/include/asm/control.h |   1 +
 4 files changed, 224 insertions(+), 205 deletions(-)
 create mode 100644 hypervisor/arch/arm-common/control.c

diff --git a/hypervisor/arch/arm-common/control.c 
b/hypervisor/arch/arm-common/control.c
new file mode 100644
index 0000000..9c40e53
--- /dev/null
+++ b/hypervisor/arch/arm-common/control.c
@@ -0,0 +1,220 @@
+/*
+ * Jailhouse, a Linux-based partitioning hypervisor
+ *
+ * Copyright (c) ARM Limited, 2014
+ * Copyright (c) Siemens AG, 2016
+ *
+ * Authors:
+ *  Jean-Philippe Brucker <jean-philippe.bruc...@arm.com>
+ *  Jan Kiszka <jan.kis...@siemens.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2.  See
+ * the COPYING file in the top-level directory.
+ */
+
+#include <jailhouse/control.h>
+#include <jailhouse/printk.h>
+#include <asm/control.h>
+#include <asm/psci.h>
+
+static void enter_cpu_off(struct per_cpu *cpu_data)
+{
+       cpu_data->park = false;
+       cpu_data->wait_for_poweron = true;
+}
+
+void arm_cpu_park(void)
+{
+       struct per_cpu *cpu_data = this_cpu_data();
+
+       spin_lock(&cpu_data->control_lock);
+       enter_cpu_off(cpu_data);
+       spin_unlock(&cpu_data->control_lock);
+
+       arm_cpu_reset(0);
+       arm_paging_vcpu_init(&parking_mm);
+}
+
+void arm_cpu_kick(unsigned int cpu_id)
+{
+       struct sgi sgi = {};
+
+       sgi.targets = 1 << cpu_id;
+       sgi.id = SGI_EVENT;
+       irqchip_send_sgi(&sgi);
+}
+
+void arch_suspend_cpu(unsigned int cpu_id)
+{
+       struct per_cpu *target_data = per_cpu(cpu_id);
+       bool target_suspended;
+
+       spin_lock(&target_data->control_lock);
+
+       target_data->suspend_cpu = true;
+       target_suspended = target_data->cpu_suspended;
+
+       spin_unlock(&target_data->control_lock);
+
+       if (!target_suspended) {
+               arm_cpu_kick(cpu_id);
+
+               while (!target_data->cpu_suspended)
+                       cpu_relax();
+       }
+}
+
+void arch_resume_cpu(unsigned int cpu_id)
+{
+       struct per_cpu *target_data = per_cpu(cpu_id);
+
+       /* take lock to avoid theoretical race with a pending suspension */
+       spin_lock(&target_data->control_lock);
+
+       target_data->suspend_cpu = false;
+
+       spin_unlock(&target_data->control_lock);
+}
+
+void arch_reset_cpu(unsigned int cpu_id)
+{
+       per_cpu(cpu_id)->reset = true;
+
+       arch_resume_cpu(cpu_id);
+}
+
+void arch_park_cpu(unsigned int cpu_id)
+{
+       per_cpu(cpu_id)->park = true;
+
+       arch_resume_cpu(cpu_id);
+}
+
+static void check_events(struct per_cpu *cpu_data)
+{
+       bool reset = false;
+
+       spin_lock(&cpu_data->control_lock);
+
+       do {
+               if (cpu_data->suspend_cpu)
+                       cpu_data->cpu_suspended = true;
+
+               spin_unlock(&cpu_data->control_lock);
+
+               while (cpu_data->suspend_cpu)
+                       cpu_relax();
+
+               spin_lock(&cpu_data->control_lock);
+
+               if (!cpu_data->suspend_cpu) {
+                       cpu_data->cpu_suspended = false;
+
+                       if (cpu_data->park) {
+                               enter_cpu_off(cpu_data);
+                               break;
+                       }
+
+                       if (cpu_data->reset) {
+                               cpu_data->reset = false;
+                               if (cpu_data->cpu_on_entry !=
+                                   PSCI_INVALID_ADDRESS) {
+                                       cpu_data->wait_for_poweron = false;
+                                       reset = true;
+                               } else {
+                                       enter_cpu_off(cpu_data);
+                               }
+                               break;
+                       }
+               }
+       } while (cpu_data->suspend_cpu);
+
+       if (cpu_data->flush_vcpu_caches) {
+               cpu_data->flush_vcpu_caches = false;
+               arm_paging_vcpu_flush_tlbs();
+       }
+
+       spin_unlock(&cpu_data->control_lock);
+
+       /*
+        * wait_for_poweron is only modified on this CPU, so checking outside of
+        * control_lock is fine.
+        */
+       if (cpu_data->wait_for_poweron)
+               arm_cpu_park();
+       else if (reset)
+               arm_cpu_reset(cpu_data->cpu_on_entry);
+}
+
+void arch_handle_sgi(struct per_cpu *cpu_data, u32 irqn,
+                    unsigned int count_event)
+{
+       switch (irqn) {
+       case SGI_INJECT:
+               cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_VSGI] += count_event;
+               irqchip_inject_pending(cpu_data);
+               break;
+       case SGI_EVENT:
+               cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_MANAGEMENT] +=
+                       count_event;
+               check_events(cpu_data);
+               break;
+       default:
+               printk("WARN: unknown SGI received %d\n", irqn);
+       }
+}
+
+/*
+ * Handle the maintenance interrupt, the rest is injected into the cell.
+ * Return true when the IRQ has been handled by the hyp.
+ */
+bool arch_handle_phys_irq(struct per_cpu *cpu_data, u32 irqn,
+                         unsigned int count_event)
+{
+       if (irqn == system_config->platform_info.arm.maintenance_irq) {
+               cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_MAINTENANCE] +=
+                       count_event;
+               irqchip_inject_pending(cpu_data);
+
+               return true;
+       }
+
+       cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_VIRQ] += count_event;
+       irqchip_set_pending(cpu_data, irqn);
+
+       return false;
+}
+
+void arch_cell_reset(struct cell *cell)
+{
+       arm_cell_dcaches_flush(cell, DCACHE_INVALIDATE);
+}
+
+/* Note: only supports synchronous flushing as triggered by config_commit! */
+void arch_flush_cell_vcpu_caches(struct cell *cell)
+{
+       unsigned int cpu;
+
+       for_each_cpu(cpu, cell->cpu_set)
+               if (cpu == this_cpu_id())
+                       arm_paging_vcpu_flush_tlbs();
+               else
+                       per_cpu(cpu)->flush_vcpu_caches = true;
+}
+
+void arch_config_commit(struct cell *cell_added_removed)
+{
+       irqchip_config_commit(cell_added_removed);
+}
+
+void __attribute__((noreturn)) arch_panic_stop(void)
+{
+       asm volatile ("1: wfi; b 1b");
+       __builtin_unreachable();
+}
+
+void arch_panic_park(void) __attribute__((alias("arm_cpu_park")));
+
+void arch_shutdown(void)
+{
+}
diff --git a/hypervisor/arch/arm/Makefile b/hypervisor/arch/arm/Makefile
index b844fb2..786f96b 100644
--- a/hypervisor/arch/arm/Makefile
+++ b/hypervisor/arch/arm/Makefile
@@ -23,6 +23,7 @@ obj-y += mmu_hyp.o caches.o smp.o
 obj-y += $(COMMON)/dbg-write.o $(COMMON)/lib.o $(COMMON)/psci.o
 obj-y += $(COMMON)/paging.o $(COMMON)/mmu_cell.o
 obj-y += $(COMMON)/irqchip.o $(COMMON)/gic-common.o
+obj-y += $(COMMON)/control.o
 obj-$(CONFIG_ARM_GIC_V2) += $(COMMON)/gic-v2.o
 obj-$(CONFIG_ARM_GIC_V3) += gic-v3.o
 obj-$(CONFIG_SERIAL_AMBA_PL011) += $(COMMON)/dbg-write-pl011.o
diff --git a/hypervisor/arch/arm/control.c b/hypervisor/arch/arm/control.c
index b2a2f09..c9d0f87 100644
--- a/hypervisor/arch/arm/control.c
+++ b/hypervisor/arch/arm/control.c
@@ -21,7 +21,7 @@
 #include <asm/smp.h>
 #include <asm/sysregs.h>
 
-static void cpu_reset(void)
+void arm_cpu_reset(unsigned long pc)
 {
        struct per_cpu *cpu_data = this_cpu_data();
        struct cell *cell = cpu_data->cell;
@@ -89,7 +89,7 @@ static void cpu_reset(void)
        arm_write_sysreg(TPIDRPRW, 0);
 
        arm_write_banked_reg(SPSR_hyp, RESET_PSR);
-       arm_write_banked_reg(ELR_hyp, cpu_data->cpu_on_entry);
+       arm_write_banked_reg(ELR_hyp, pc);
 
        /* transfer the context that may have been passed to PSCI_CPU_ON */
        regs->usr[1] = cpu_data->cpu_on_context;
@@ -101,25 +101,6 @@ static void cpu_reset(void)
        irqchip_cpu_reset(cpu_data);
 }
 
-static void enter_cpu_off(struct per_cpu *cpu_data)
-{
-       cpu_data->park = false;
-       cpu_data->wait_for_poweron = true;
-}
-
-void arm_cpu_park(void)
-{
-       struct per_cpu *cpu_data = this_cpu_data();
-
-       spin_lock(&cpu_data->control_lock);
-       enter_cpu_off(cpu_data);
-       spin_unlock(&cpu_data->control_lock);
-
-       cpu_reset();
-       arm_write_banked_reg(ELR_hyp, 0);
-       arm_paging_vcpu_init(&parking_mm);
-}
-
 static void arch_dump_exit(struct registers *regs, const char *reason)
 {
        unsigned long pc;
@@ -185,135 +166,6 @@ struct registers* arch_handle_exit(struct per_cpu 
*cpu_data,
        return regs;
 }
 
-void arm_cpu_kick(unsigned int cpu_id)
-{
-       struct sgi sgi = {};
-
-       sgi.targets = 1 << cpu_id;
-       sgi.id = SGI_EVENT;
-       irqchip_send_sgi(&sgi);
-}
-
-void arch_suspend_cpu(unsigned int cpu_id)
-{
-       struct per_cpu *target_data = per_cpu(cpu_id);
-       bool target_suspended;
-
-       spin_lock(&target_data->control_lock);
-
-       target_data->suspend_cpu = true;
-       target_suspended = target_data->cpu_suspended;
-
-       spin_unlock(&target_data->control_lock);
-
-       if (!target_suspended) {
-               arm_cpu_kick(cpu_id);
-
-               while (!target_data->cpu_suspended)
-                       cpu_relax();
-       }
-}
-
-void arch_resume_cpu(unsigned int cpu_id)
-{
-       struct per_cpu *target_data = per_cpu(cpu_id);
-
-       /* take lock to avoid theoretical race with a pending suspension */
-       spin_lock(&target_data->control_lock);
-
-       target_data->suspend_cpu = false;
-
-       spin_unlock(&target_data->control_lock);
-}
-
-void arch_reset_cpu(unsigned int cpu_id)
-{
-       per_cpu(cpu_id)->reset = true;
-
-       arch_resume_cpu(cpu_id);
-}
-
-void arch_park_cpu(unsigned int cpu_id)
-{
-       per_cpu(cpu_id)->park = true;
-
-       arch_resume_cpu(cpu_id);
-}
-
-static void check_events(struct per_cpu *cpu_data)
-{
-       bool reset = false;
-
-       spin_lock(&cpu_data->control_lock);
-
-       do {
-               if (cpu_data->suspend_cpu)
-                       cpu_data->cpu_suspended = true;
-
-               spin_unlock(&cpu_data->control_lock);
-
-               while (cpu_data->suspend_cpu)
-                       cpu_relax();
-
-               spin_lock(&cpu_data->control_lock);
-
-               if (!cpu_data->suspend_cpu) {
-                       cpu_data->cpu_suspended = false;
-
-                       if (cpu_data->park) {
-                               enter_cpu_off(cpu_data);
-                               break;
-                       }
-
-                       if (cpu_data->reset) {
-                               cpu_data->reset = false;
-                               if (cpu_data->cpu_on_entry !=
-                                   PSCI_INVALID_ADDRESS) {
-                                       cpu_data->wait_for_poweron = false;
-                                       reset = true;
-                               } else {
-                                       enter_cpu_off(cpu_data);
-                               }
-                               break;
-                       }
-               }
-       } while (cpu_data->suspend_cpu);
-
-       if (cpu_data->flush_vcpu_caches) {
-               cpu_data->flush_vcpu_caches = false;
-               arm_paging_vcpu_flush_tlbs();
-       }
-
-       spin_unlock(&cpu_data->control_lock);
-
-       /*
-        * wait_for_poweron is only modified on this CPU, so checking outside of
-        * control_lock is fine.
-        */
-       if (cpu_data->wait_for_poweron)
-               arm_cpu_park();
-       else if (reset)
-               cpu_reset();
-}
-
-void arch_handle_sgi(struct per_cpu *cpu_data, u32 irqn,
-                    unsigned int count_event)
-{
-       switch (irqn) {
-       case SGI_INJECT:
-               cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_VSGI] += count_event;
-               irqchip_inject_pending(cpu_data);
-               break;
-       case SGI_EVENT:
-               cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_MANAGEMENT] +=
-                       count_event;
-               check_events(cpu_data);
-               break;
-       default:
-               printk("WARN: unknown SGI received %d\n", irqn);
-       }
-}
-
 unsigned int arm_cpu_virt2phys(struct cell *cell, unsigned int virt_id)
 {
        unsigned int cpu;
@@ -326,27 +178,6 @@ unsigned int arm_cpu_virt2phys(struct cell *cell, unsigned 
int virt_id)
        return -1;
 }
 
-/*
- * Handle the maintenance interrupt, the rest is injected into the cell.
- * Return true when the IRQ has been handled by the hyp.
- */
-bool arch_handle_phys_irq(struct per_cpu *cpu_data, u32 irqn,
-                         unsigned int count_event)
-{
-       if (irqn == system_config->platform_info.arm.maintenance_irq) {
-               cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_MAINTENANCE] +=
-                       count_event;
-               irqchip_inject_pending(cpu_data);
-
-               return true;
-       }
-
-       cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_VIRQ] += count_event;
-       irqchip_set_pending(cpu_data, irqn);
-
-       return false;
-}
-
 int arch_cell_create(struct cell *cell)
 {
        int err;
@@ -402,37 +233,3 @@ void arch_cell_destroy(struct cell *cell)
 
        arm_paging_cell_destroy(cell);
 }
-
-void arch_cell_reset(struct cell *cell)
-{
-       arm_cell_dcaches_flush(cell, DCACHE_INVALIDATE);
-}
-
-/* Note: only supports synchronous flushing as triggered by config_commit! */
-void arch_flush_cell_vcpu_caches(struct cell *cell)
-{
-       unsigned int cpu;
-
-       for_each_cpu(cpu, cell->cpu_set)
-               if (cpu == this_cpu_id())
-                       arm_paging_vcpu_flush_tlbs();
-               else
-                       per_cpu(cpu)->flush_vcpu_caches = true;
-}
-
-void arch_config_commit(struct cell *cell_added_removed)
-{
-       irqchip_config_commit(cell_added_removed);
-}
-
-void __attribute__((noreturn)) arch_panic_stop(void)
-{
-       asm volatile ("1: wfi; b 1b");
-       __builtin_unreachable();
-}
-
-void arch_panic_park(void) __attribute__((alias("arm_cpu_park")));
-
-void arch_shutdown(void)
-{
-}
diff --git a/hypervisor/arch/arm/include/asm/control.h 
b/hypervisor/arch/arm/include/asm/control.h
index 1e405d1..481ca19 100644
--- a/hypervisor/arch/arm/include/asm/control.h
+++ b/hypervisor/arch/arm/include/asm/control.h
@@ -39,6 +39,7 @@ unsigned int arm_cpu_by_mpidr(struct cell *cell, unsigned 
long mpidr);
 void __attribute__((noreturn)) vmreturn(struct registers *guest_regs);
 void __attribute__((noreturn)) arch_shutdown_mmu(struct per_cpu *cpu_data);
 
+void arm_cpu_reset(unsigned long pc);
 void arm_cpu_park(void);
 void arm_cpu_kick(unsigned int cpu_id);
 
-- 
2.1.4

-- 
You received this message because you are subscribed to the Google Groups 
"Jailhouse" group.
To unsubscribe from this group and stop receiving emails from it, send an email 
to jailhouse-dev+unsubscr...@googlegroups.com.
For more options, visit https://groups.google.com/d/optout.

Reply via email to