We recently modified how mmu_enabled works in order to speed
spinlocks up (see c33efcf3 and b141dbac). Unfortunately c33efcf3
was a bit hasty in removing mmu_set_enabled. I had forgotten
one of the reasons I introduced it was because secondaries
don't go through mmu_enable(), they go straight to asm_mmu_enable
from secondary_entry. This patch brings it back, albeit renamed
to mmu_mark_enabled to match mmu_mark_disabled.

The patch also moves mmu_mark_disabled from psci code to smp code,
as psci code shouldn't need to care about mmu stuff, and because
we now have a nice balance in the smp code - we mark the mmu disabled
for the secondary before we kick it, and then mark it enabled on the
init.

Finally, a bit unrelated, I've added a lengthy comment to
__mmu_enabled in order to warn about what can't be called from there.
We need to be cautious there because mmu_enabled() gets scattered
around easily, being in basic functions, and thus there's risk of
recursion.

Signed-off-by: Andrew Jones <[email protected]>
Reported-by: Levente Kurusa <[email protected]>
---

Couldn't test on arm32, as I usually do, since my board is out to lunch.
The issue was found on arm64, a mustang. Testing there shows this patch
resolves the issue. It works on tcg too, but it didn't have a problem
there in the first place...


 lib/arm/asm/mmu-api.h |  3 ++-
 lib/arm/mmu.c         | 26 +++++++++++++++++++-------
 lib/arm/psci.c        |  2 --
 lib/arm/smp.c         |  3 +++
 4 files changed, 24 insertions(+), 10 deletions(-)

diff --git a/lib/arm/asm/mmu-api.h b/lib/arm/asm/mmu-api.h
index f41738be7f9ec..bf0e6a0961416 100644
--- a/lib/arm/asm/mmu-api.h
+++ b/lib/arm/asm/mmu-api.h
@@ -7,8 +7,9 @@ static inline bool mmu_enabled(void)
 {
        return mmu_disabled_cpu_count == 0 || __mmu_enabled();
 }
-extern void mmu_enable(pgd_t *pgtable);
+extern void mmu_mark_enabled(int cpu);
 extern void mmu_mark_disabled(int cpu);
+extern void mmu_enable(pgd_t *pgtable);
 extern void mmu_disable(void);
 extern void mmu_enable_idmap(void);
 extern void mmu_init_io_sect(pgd_t *pgtable, unsigned long virt_offset);
diff --git a/lib/arm/mmu.c b/lib/arm/mmu.c
index a74d70cb56908..1b7e4fb3cd47e 100644
--- a/lib/arm/mmu.c
+++ b/lib/arm/mmu.c
@@ -22,17 +22,18 @@ bool __mmu_enabled(void)
 {
        int cpu = current_thread_info()->cpu;
 
+       /*
+        * mmu_enabled is called from places that are guarding the
+        * use of exclusive ops (which require the mmu to be enabled).
+        * That means we CANNOT call anything from here that may use a
+        * spinlock, atomic bitop, etc., otherwise we'll recurse.
+        * [cpumask_]test_bit is safe though.
+        */
        return !cpumask_test_cpu(cpu, &mmu_disabled_cpumask);
 }
 
-extern void asm_mmu_enable(phys_addr_t pgtable);
-void mmu_enable(pgd_t *pgtable)
+void mmu_mark_enabled(int cpu)
 {
-       int cpu = current_thread_info()->cpu;
-
-       asm_mmu_enable(__pa(pgtable));
-       flush_tlb_all();
-
        if (cpumask_test_and_clear_cpu(cpu, &mmu_disabled_cpumask))
                --mmu_disabled_cpu_count;
 }
@@ -43,6 +44,17 @@ void mmu_mark_disabled(int cpu)
                ++mmu_disabled_cpu_count;
 }
 
+extern void asm_mmu_enable(phys_addr_t pgtable);
+void mmu_enable(pgd_t *pgtable)
+{
+       int cpu = current_thread_info()->cpu;
+
+       asm_mmu_enable(__pa(pgtable));
+       flush_tlb_all();
+
+       mmu_mark_enabled(cpu);
+}
+
 extern void asm_mmu_disable(void);
 void mmu_disable(void)
 {
diff --git a/lib/arm/psci.c b/lib/arm/psci.c
index c4469c9ea1baa..aca88851171f5 100644
--- a/lib/arm/psci.c
+++ b/lib/arm/psci.c
@@ -9,7 +9,6 @@
 #include <asm/psci.h>
 #include <asm/setup.h>
 #include <asm/page.h>
-#include <asm/mmu-api.h>
 
 #define T PSCI_INVOKE_ARG_TYPE
 __attribute__((noinline))
@@ -30,7 +29,6 @@ int psci_cpu_on(unsigned long cpuid, unsigned long 
entry_point)
 extern void secondary_entry(void);
 int cpu_psci_cpu_boot(unsigned int cpu)
 {
-       mmu_mark_disabled(cpu);
        int err = psci_cpu_on(cpus[cpu], __pa(secondary_entry));
        if (err)
                printf("failed to boot CPU%d (%d)\n", cpu, err);
diff --git a/lib/arm/smp.c b/lib/arm/smp.c
index ca435dcd5f4a2..3cfc6d5ddedd0 100644
--- a/lib/arm/smp.c
+++ b/lib/arm/smp.c
@@ -9,6 +9,7 @@
 #include <alloc.h>
 #include <asm/thread_info.h>
 #include <asm/cpumask.h>
+#include <asm/barrier.h>
 #include <asm/mmu.h>
 #include <asm/psci.h>
 #include <asm/smp.h>
@@ -23,6 +24,7 @@ secondary_entry_fn secondary_cinit(void)
        secondary_entry_fn entry;
 
        thread_info_init(ti, 0);
+       mmu_mark_enabled(ti->cpu);
 
        /*
         * Save secondary_data.entry locally to avoid opening a race
@@ -45,6 +47,7 @@ void smp_boot_secondary(int cpu, secondary_entry_fn entry)
 
        secondary_data.stack = stack_base + THREAD_START_SP;
        secondary_data.entry = entry;
+       mmu_mark_disabled(cpu);
        assert(cpu_psci_cpu_boot(cpu) == 0);
 
        while (!cpu_online(cpu))
-- 
1.8.3.1

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to [email protected]
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to