[tip:smp/hotplug] jump_label: Pull get_online_cpus() into generic code

2017-04-20 Thread tip-bot for Peter Zijlstra (Intel)
Commit-ID:  82947f31231157d8ab70fa8961f23fd3887a3327
Gitweb: http://git.kernel.org/tip/82947f31231157d8ab70fa8961f23fd3887a3327
Author: Peter Zijlstra (Intel) 
AuthorDate: Tue, 18 Apr 2017 19:05:03 +0200
Committer:  Thomas Gleixner 
CommitDate: Thu, 20 Apr 2017 13:08:57 +0200

jump_label: Pull get_online_cpus() into generic code

This change does two things:

- it moves the get_online_cpus() call into generic code, with the aim of
  later providing some static_key ops that avoid it.

- as a side effect it inverts the lock order between cpu_hotplug_lock and
  jump_label_mutex.

Signed-off-by: Peter Zijlstra (Intel) 
Signed-off-by: Thomas Gleixner 
Cc: Sebastian Siewior 
Cc: Steven Rostedt 
Cc: jba...@akamai.com
Link: http://lkml.kernel.org/r/20170418103422.590118...@infradead.org

---
 arch/mips/kernel/jump_label.c  |  2 --
 arch/sparc/kernel/jump_label.c |  2 --
 arch/tile/kernel/jump_label.c  |  2 --
 arch/x86/kernel/jump_label.c   |  2 --
 kernel/jump_label.c| 14 ++
 5 files changed, 14 insertions(+), 8 deletions(-)

diff --git a/arch/mips/kernel/jump_label.c b/arch/mips/kernel/jump_label.c
index 3e586da..32e3168 100644
--- a/arch/mips/kernel/jump_label.c
+++ b/arch/mips/kernel/jump_label.c
@@ -58,7 +58,6 @@ void arch_jump_label_transform(struct jump_entry *e,
insn.word = 0; /* nop */
}
 
-   get_online_cpus();
mutex_lock(_mutex);
if (IS_ENABLED(CONFIG_CPU_MICROMIPS)) {
insn_p->halfword[0] = insn.word >> 16;
@@ -70,7 +69,6 @@ void arch_jump_label_transform(struct jump_entry *e,
   (unsigned long)insn_p + sizeof(*insn_p));
 
mutex_unlock(_mutex);
-   put_online_cpus();
 }
 
 #endif /* HAVE_JUMP_LABEL */
diff --git a/arch/sparc/kernel/jump_label.c b/arch/sparc/kernel/jump_label.c
index 07933b9..93adde1 100644
--- a/arch/sparc/kernel/jump_label.c
+++ b/arch/sparc/kernel/jump_label.c
@@ -41,12 +41,10 @@ void arch_jump_label_transform(struct jump_entry *entry,
val = 0x0100;
}
 
-   get_online_cpus();
mutex_lock(_mutex);
*insn = val;
flushi(insn);
mutex_unlock(_mutex);
-   put_online_cpus();
 }
 
 #endif
diff --git a/arch/tile/kernel/jump_label.c b/arch/tile/kernel/jump_label.c
index 07802d5..93931a4 100644
--- a/arch/tile/kernel/jump_label.c
+++ b/arch/tile/kernel/jump_label.c
@@ -45,14 +45,12 @@ static void __jump_label_transform(struct jump_entry *e,
 void arch_jump_label_transform(struct jump_entry *e,
enum jump_label_type type)
 {
-   get_online_cpus();
mutex_lock(_mutex);
 
__jump_label_transform(e, type);
flush_icache_range(e->code, e->code + sizeof(tilegx_bundle_bits));
 
mutex_unlock(_mutex);
-   put_online_cpus();
 }
 
 __init_or_module void arch_jump_label_transform_static(struct jump_entry *e,
diff --git a/arch/x86/kernel/jump_label.c b/arch/x86/kernel/jump_label.c
index c37bd0f..ab4f491 100644
--- a/arch/x86/kernel/jump_label.c
+++ b/arch/x86/kernel/jump_label.c
@@ -105,11 +105,9 @@ static void __jump_label_transform(struct jump_entry 
*entry,
 void arch_jump_label_transform(struct jump_entry *entry,
   enum jump_label_type type)
 {
-   get_online_cpus();
mutex_lock(_mutex);
__jump_label_transform(entry, type, NULL, 0);
mutex_unlock(_mutex);
-   put_online_cpus();
 }
 
 static enum {
diff --git a/kernel/jump_label.c b/kernel/jump_label.c
index 6c9cb20..f3afe07 100644
--- a/kernel/jump_label.c
+++ b/kernel/jump_label.c
@@ -15,6 +15,7 @@
 #include 
 #include 
 #include 
+#include 
 
 #ifdef HAVE_JUMP_LABEL
 
@@ -124,6 +125,12 @@ void static_key_slow_inc(struct static_key *key)
return;
}
 
+   /*
+* A number of architectures need to synchronize I$ across
+* the all CPUs, for that to be serialized against CPU hot-plug
+* we need to avoid CPUs coming online.
+*/
+   get_online_cpus();
jump_label_lock();
if (atomic_read(>enabled) == 0) {
atomic_set(>enabled, -1);
@@ -133,6 +140,7 @@ void static_key_slow_inc(struct static_key *key)
atomic_inc(>enabled);
}
jump_label_unlock();
+   put_online_cpus();
 }
 EXPORT_SYMBOL_GPL(static_key_slow_inc);
 
@@ -146,6 +154,7 @@ static void __static_key_slow_dec(struct static_key *key,
 * returns is unbalanced, because all other static_key_slow_inc()
 * instances block while the update is in progress.
 */
+   get_online_cpus();
if (!atomic_dec_and_mutex_lock(>enabled, _label_mutex)) {
WARN(atomic_read(>enabled) < 0,
 "jump label: negative count!\n");
@@ -159,6 +168,7 @@ static void __static_key_slow_dec(struct 

[tip:smp/hotplug] jump_label: Pull get_online_cpus() into generic code

2017-04-20 Thread tip-bot for Peter Zijlstra (Intel)
Commit-ID:  82947f31231157d8ab70fa8961f23fd3887a3327
Gitweb: http://git.kernel.org/tip/82947f31231157d8ab70fa8961f23fd3887a3327
Author: Peter Zijlstra (Intel) 
AuthorDate: Tue, 18 Apr 2017 19:05:03 +0200
Committer:  Thomas Gleixner 
CommitDate: Thu, 20 Apr 2017 13:08:57 +0200

jump_label: Pull get_online_cpus() into generic code

This change does two things:

- it moves the get_online_cpus() call into generic code, with the aim of
  later providing some static_key ops that avoid it.

- as a side effect it inverts the lock order between cpu_hotplug_lock and
  jump_label_mutex.

Signed-off-by: Peter Zijlstra (Intel) 
Signed-off-by: Thomas Gleixner 
Cc: Sebastian Siewior 
Cc: Steven Rostedt 
Cc: jba...@akamai.com
Link: http://lkml.kernel.org/r/20170418103422.590118...@infradead.org

---
 arch/mips/kernel/jump_label.c  |  2 --
 arch/sparc/kernel/jump_label.c |  2 --
 arch/tile/kernel/jump_label.c  |  2 --
 arch/x86/kernel/jump_label.c   |  2 --
 kernel/jump_label.c| 14 ++
 5 files changed, 14 insertions(+), 8 deletions(-)

diff --git a/arch/mips/kernel/jump_label.c b/arch/mips/kernel/jump_label.c
index 3e586da..32e3168 100644
--- a/arch/mips/kernel/jump_label.c
+++ b/arch/mips/kernel/jump_label.c
@@ -58,7 +58,6 @@ void arch_jump_label_transform(struct jump_entry *e,
insn.word = 0; /* nop */
}
 
-   get_online_cpus();
mutex_lock(_mutex);
if (IS_ENABLED(CONFIG_CPU_MICROMIPS)) {
insn_p->halfword[0] = insn.word >> 16;
@@ -70,7 +69,6 @@ void arch_jump_label_transform(struct jump_entry *e,
   (unsigned long)insn_p + sizeof(*insn_p));
 
mutex_unlock(_mutex);
-   put_online_cpus();
 }
 
 #endif /* HAVE_JUMP_LABEL */
diff --git a/arch/sparc/kernel/jump_label.c b/arch/sparc/kernel/jump_label.c
index 07933b9..93adde1 100644
--- a/arch/sparc/kernel/jump_label.c
+++ b/arch/sparc/kernel/jump_label.c
@@ -41,12 +41,10 @@ void arch_jump_label_transform(struct jump_entry *entry,
val = 0x0100;
}
 
-   get_online_cpus();
mutex_lock(_mutex);
*insn = val;
flushi(insn);
mutex_unlock(_mutex);
-   put_online_cpus();
 }
 
 #endif
diff --git a/arch/tile/kernel/jump_label.c b/arch/tile/kernel/jump_label.c
index 07802d5..93931a4 100644
--- a/arch/tile/kernel/jump_label.c
+++ b/arch/tile/kernel/jump_label.c
@@ -45,14 +45,12 @@ static void __jump_label_transform(struct jump_entry *e,
 void arch_jump_label_transform(struct jump_entry *e,
enum jump_label_type type)
 {
-   get_online_cpus();
mutex_lock(_mutex);
 
__jump_label_transform(e, type);
flush_icache_range(e->code, e->code + sizeof(tilegx_bundle_bits));
 
mutex_unlock(_mutex);
-   put_online_cpus();
 }
 
 __init_or_module void arch_jump_label_transform_static(struct jump_entry *e,
diff --git a/arch/x86/kernel/jump_label.c b/arch/x86/kernel/jump_label.c
index c37bd0f..ab4f491 100644
--- a/arch/x86/kernel/jump_label.c
+++ b/arch/x86/kernel/jump_label.c
@@ -105,11 +105,9 @@ static void __jump_label_transform(struct jump_entry 
*entry,
 void arch_jump_label_transform(struct jump_entry *entry,
   enum jump_label_type type)
 {
-   get_online_cpus();
mutex_lock(_mutex);
__jump_label_transform(entry, type, NULL, 0);
mutex_unlock(_mutex);
-   put_online_cpus();
 }
 
 static enum {
diff --git a/kernel/jump_label.c b/kernel/jump_label.c
index 6c9cb20..f3afe07 100644
--- a/kernel/jump_label.c
+++ b/kernel/jump_label.c
@@ -15,6 +15,7 @@
 #include 
 #include 
 #include 
+#include 
 
 #ifdef HAVE_JUMP_LABEL
 
@@ -124,6 +125,12 @@ void static_key_slow_inc(struct static_key *key)
return;
}
 
+   /*
+* A number of architectures need to synchronize I$ across
+* the all CPUs, for that to be serialized against CPU hot-plug
+* we need to avoid CPUs coming online.
+*/
+   get_online_cpus();
jump_label_lock();
if (atomic_read(>enabled) == 0) {
atomic_set(>enabled, -1);
@@ -133,6 +140,7 @@ void static_key_slow_inc(struct static_key *key)
atomic_inc(>enabled);
}
jump_label_unlock();
+   put_online_cpus();
 }
 EXPORT_SYMBOL_GPL(static_key_slow_inc);
 
@@ -146,6 +154,7 @@ static void __static_key_slow_dec(struct static_key *key,
 * returns is unbalanced, because all other static_key_slow_inc()
 * instances block while the update is in progress.
 */
+   get_online_cpus();
if (!atomic_dec_and_mutex_lock(>enabled, _label_mutex)) {
WARN(atomic_read(>enabled) < 0,
 "jump label: negative count!\n");
@@ -159,6 +168,7 @@ static void __static_key_slow_dec(struct static_key *key,
jump_label_update(key);
}
jump_label_unlock();
+   put_online_cpus();
 }