The conversion of the hotplug locking to a percpu rwsem unearthed lock
ordering issues all over the place.

The jump_label code has two issues:

 1) Nested get_online_cpus() invocations

 2) Ordering problems vs. the cpus rwsem and the jump_label_mutex

To cure these, the following lock order has been established;

   cpus_rwsem -> jump_label_lock -> text_mutex

Even if not all architectures need protection against CPU hotplug, taking
cpus_rwsem before jump_label_lock is now mandatory in code pathes which
actually modify code and therefor need text_mutex protection.

Move the get_online_cpus() invocations into the core jump label code and
establish the proper lock order where required.

Signed-off-by: Thomas Gleixner <[email protected]>
Cc: Ralf Baechle <[email protected]>
Cc: "David S. Miller" <[email protected]>
Cc: Chris Metcalf <[email protected]>
Cc: Jason Baron <[email protected]>
---
 arch/mips/kernel/jump_label.c  |    2 --
 arch/sparc/kernel/jump_label.c |    2 --
 arch/tile/kernel/jump_label.c  |    2 --
 arch/x86/kernel/jump_label.c   |    2 --
 kernel/jump_label.c            |   20 ++++++++++++++------
 5 files changed, 14 insertions(+), 14 deletions(-)

Index: b/arch/mips/kernel/jump_label.c
===================================================================
--- a/arch/mips/kernel/jump_label.c
+++ b/arch/mips/kernel/jump_label.c
@@ -58,7 +58,6 @@ void arch_jump_label_transform(struct ju
                insn.word = 0; /* nop */
        }
 
-       get_online_cpus();
        mutex_lock(&text_mutex);
        if (IS_ENABLED(CONFIG_CPU_MICROMIPS)) {
                insn_p->halfword[0] = insn.word >> 16;
@@ -70,7 +69,6 @@ void arch_jump_label_transform(struct ju
                           (unsigned long)insn_p + sizeof(*insn_p));
 
        mutex_unlock(&text_mutex);
-       put_online_cpus();
 }
 
 #endif /* HAVE_JUMP_LABEL */
Index: b/arch/sparc/kernel/jump_label.c
===================================================================
--- a/arch/sparc/kernel/jump_label.c
+++ b/arch/sparc/kernel/jump_label.c
@@ -41,12 +41,10 @@ void arch_jump_label_transform(struct ju
                val = 0x01000000;
        }
 
-       get_online_cpus();
        mutex_lock(&text_mutex);
        *insn = val;
        flushi(insn);
        mutex_unlock(&text_mutex);
-       put_online_cpus();
 }
 
 #endif
Index: b/arch/tile/kernel/jump_label.c
===================================================================
--- a/arch/tile/kernel/jump_label.c
+++ b/arch/tile/kernel/jump_label.c
@@ -45,14 +45,12 @@ static void __jump_label_transform(struc
 void arch_jump_label_transform(struct jump_entry *e,
                                enum jump_label_type type)
 {
-       get_online_cpus();
        mutex_lock(&text_mutex);
 
        __jump_label_transform(e, type);
        flush_icache_range(e->code, e->code + sizeof(tilegx_bundle_bits));
 
        mutex_unlock(&text_mutex);
-       put_online_cpus();
 }
 
 __init_or_module void arch_jump_label_transform_static(struct jump_entry *e,
Index: b/arch/x86/kernel/jump_label.c
===================================================================
--- a/arch/x86/kernel/jump_label.c
+++ b/arch/x86/kernel/jump_label.c
@@ -105,11 +105,9 @@ static void __jump_label_transform(struc
 void arch_jump_label_transform(struct jump_entry *entry,
                               enum jump_label_type type)
 {
-       get_online_cpus();
        mutex_lock(&text_mutex);
        __jump_label_transform(entry, type, NULL, 0);
        mutex_unlock(&text_mutex);
-       put_online_cpus();
 }
 
 static enum {
Index: b/kernel/jump_label.c
===================================================================
--- a/kernel/jump_label.c
+++ b/kernel/jump_label.c
@@ -15,6 +15,7 @@
 #include <linux/static_key.h>
 #include <linux/jump_label_ratelimit.h>
 #include <linux/bug.h>
+#include <linux/cpu.h>
 
 #ifdef HAVE_JUMP_LABEL
 
@@ -124,6 +125,7 @@ void static_key_slow_inc(struct static_k
                        return;
        }
 
+       cpus_read_lock();
        jump_label_lock();
        if (atomic_read(&key->enabled) == 0) {
                atomic_set(&key->enabled, -1);
@@ -133,12 +135,14 @@ void static_key_slow_inc(struct static_k
                atomic_inc(&key->enabled);
        }
        jump_label_unlock();
+       cpus_read_unlock();
 }
 EXPORT_SYMBOL_GPL(static_key_slow_inc);
 
 static void __static_key_slow_dec(struct static_key *key,
                unsigned long rate_limit, struct delayed_work *work)
 {
+       cpus_read_lock();
        /*
         * The negative count check is valid even when a negative
         * key->enabled is in use by static_key_slow_inc(); a
@@ -149,6 +153,7 @@ static void __static_key_slow_dec(struct
        if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex)) {
                WARN(atomic_read(&key->enabled) < 0,
                     "jump label: negative count!\n");
+               cpus_read_unlock();
                return;
        }
 
@@ -159,6 +164,7 @@ static void __static_key_slow_dec(struct
                jump_label_update(key);
        }
        jump_label_unlock();
+       cpus_read_unlock();
 }
 
 static void jump_label_update_timeout(struct work_struct *work)
@@ -334,6 +340,7 @@ void __init jump_label_init(void)
        if (static_key_initialized)
                return;
 
+       cpus_read_lock();
        jump_label_lock();
        jump_label_sort_entries(iter_start, iter_stop);
 
@@ -353,6 +360,7 @@ void __init jump_label_init(void)
        }
        static_key_initialized = true;
        jump_label_unlock();
+       cpus_read_unlock();
 }
 
 #ifdef CONFIG_MODULES
@@ -590,28 +598,28 @@ jump_label_module_notify(struct notifier
        struct module *mod = data;
        int ret = 0;
 
+       cpus_read_lock();
+       jump_label_lock();
+
        switch (val) {
        case MODULE_STATE_COMING:
-               jump_label_lock();
                ret = jump_label_add_module(mod);
                if (ret) {
                        WARN(1, "Failed to allocatote memory: jump_label may 
not work properly.\n");
                        jump_label_del_module(mod);
                }
-               jump_label_unlock();
                break;
        case MODULE_STATE_GOING:
-               jump_label_lock();
                jump_label_del_module(mod);
-               jump_label_unlock();
                break;
        case MODULE_STATE_LIVE:
-               jump_label_lock();
                jump_label_invalidate_module_init(mod);
-               jump_label_unlock();
                break;
        }
 
+       jump_label_unlock();
+       cpus_read_unlock();
+
        return notifier_from_errno(ret);
 }
 


Reply via email to