With upcoming full hypervisor preemption it is possible that context
switching code will be called with IRQs already disabled. In this case
we don't want to enable them back. So we need to add logic that tracks
if IRQs are already disabled.

Signed-off-by: Volodymyr Babchuk <volodymyr_babc...@epam.com>
---
 xen/arch/arm/domain.c | 15 +++++++++------
 1 file changed, 9 insertions(+), 6 deletions(-)

diff --git a/xen/arch/arm/domain.c b/xen/arch/arm/domain.c
index 2ccf4449ea..3d4a1df4a4 100644
--- a/xen/arch/arm/domain.c
+++ b/xen/arch/arm/domain.c
@@ -310,7 +310,7 @@ static void update_runstate_area(struct vcpu *v)
     }
 }
 
-static void schedule_tail(struct vcpu *prev)
+static void schedule_tail(struct vcpu *prev, bool enable_irqs)
 {
     ASSERT(prev != current);
 
@@ -318,7 +318,8 @@ static void schedule_tail(struct vcpu *prev)
 
     ctxt_switch_to(current);
 
-    local_irq_enable();
+    if (enable_irqs)
+        local_irq_enable();
 
     sched_context_switched(prev, current);
 
@@ -333,7 +334,7 @@ static void continue_new_vcpu(struct vcpu *prev)
     current->arch.actlr = READ_SYSREG32(ACTLR_EL1);
     processor_vcpu_initialise(current);
 
-    schedule_tail(prev);
+    schedule_tail(prev, true);
 
     /* This matches preempt_disable() in schedule() */
     preempt_enable_no_sched();
@@ -350,19 +351,21 @@ static void continue_new_vcpu(struct vcpu *prev)
 
 void context_switch(struct vcpu *prev, struct vcpu *next)
 {
-    ASSERT(local_irq_is_enabled());
+    bool need_to_disable_irqs = local_irq_is_enabled();
+
     ASSERT(prev != next);
     ASSERT(!vcpu_cpu_dirty(next));
 
     update_runstate_area(prev);
 
-    local_irq_disable();
+    if (need_to_disable_irqs)
+        local_irq_disable();
 
     set_current(next);
 
     prev = __context_switch(prev, next);
 
-    schedule_tail(prev);
+    schedule_tail(prev, need_to_disable_irqs);
 }
 
 void continue_running(struct vcpu *same)
-- 
2.29.2

Reply via email to