We add the necessary call to task_isolation_enter() in the
prepare_exit_to_usermode() routine.  We already unconditionally
call into this routine if TIF_NOHZ is set, since that's where
we do the user_enter() call.

In addition, we add an overriding task_isolation_wait() call
that runs a nap instruction while waiting for an interrupt, to
make the task_isolation_enter() loop run in a lower-power state.

Signed-off-by: Chris Metcalf <[email protected]>
---
 arch/tile/kernel/process.c | 13 +++++++++++++
 arch/tile/kernel/ptrace.c  |  3 +++
 arch/tile/mm/homecache.c   |  5 ++++-
 3 files changed, 20 insertions(+), 1 deletion(-)

diff --git a/arch/tile/kernel/process.c b/arch/tile/kernel/process.c
index b5f30d376ce1..28aa0f8b45ef 100644
--- a/arch/tile/kernel/process.c
+++ b/arch/tile/kernel/process.c
@@ -29,6 +29,7 @@
 #include <linux/signal.h>
 #include <linux/delay.h>
 #include <linux/context_tracking.h>
+#include <linux/isolation.h>
 #include <asm/stack.h>
 #include <asm/switch_to.h>
 #include <asm/homecache.h>
@@ -70,6 +71,15 @@ void arch_cpu_idle(void)
        _cpu_idle();
 }
 
+#ifdef CONFIG_TASK_ISOLATION
+void task_isolation_wait(void)
+{
+       set_current_state(TASK_INTERRUPTIBLE);
+       _cpu_idle();
+       set_current_state(TASK_RUNNING);
+}
+#endif
+
 /*
  * Release a thread_info structure
  */
@@ -495,6 +505,9 @@ void prepare_exit_to_usermode(struct pt_regs *regs, u32 
thread_info_flags)
                        tracehook_notify_resume(regs);
                }
 
+               if (task_isolation_enabled())
+                       task_isolation_enter();
+
                local_irq_disable();
                thread_info_flags = READ_ONCE(current_thread_info()->flags);
 
diff --git a/arch/tile/kernel/ptrace.c b/arch/tile/kernel/ptrace.c
index bdc126faf741..04a7a6bf7d0a 100644
--- a/arch/tile/kernel/ptrace.c
+++ b/arch/tile/kernel/ptrace.c
@@ -265,6 +265,9 @@ int do_syscall_trace_enter(struct pt_regs *regs)
        if (secure_computing() == -1)
                return -1;
 
+       if ((work & _TIF_NOHZ) && task_isolation_strict())
+               task_isolation_syscall(regs->regs[TREG_SYSCALL_NR]);
+
        if (work & _TIF_SYSCALL_TRACE) {
                if (tracehook_report_syscall_entry(regs))
                        regs->regs[TREG_SYSCALL_NR] = -1;
diff --git a/arch/tile/mm/homecache.c b/arch/tile/mm/homecache.c
index 40ca30a9fee3..a79325113105 100644
--- a/arch/tile/mm/homecache.c
+++ b/arch/tile/mm/homecache.c
@@ -31,6 +31,7 @@
 #include <linux/smp.h>
 #include <linux/module.h>
 #include <linux/hugetlb.h>
+#include <linux/isolation.h>
 
 #include <asm/page.h>
 #include <asm/sections.h>
@@ -83,8 +84,10 @@ static void hv_flush_update(const struct cpumask 
*cache_cpumask,
         * Don't bother to update atomically; losing a count
         * here is not that critical.
         */
-       for_each_cpu(cpu, &mask)
+       for_each_cpu(cpu, &mask) {
                ++per_cpu(irq_stat, cpu).irq_hv_flush_count;
+               task_isolation_debug(cpu);
+       }
 }
 
 /*
-- 
2.1.2

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [email protected]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to