[GIT pull] locking fix for 4.7

2016-06-25 Thread Thomas Gleixner
Linus,

please pull the latest locking-urgent-for-linus git tree from:

   git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git 
locking-urgent-for-linus

A single fix to address a race in the static key logic.

Thanks,

tglx

-->
Paolo Bonzini (1):
  locking/static_key: Fix concurrent static_key_slow_inc()


 include/linux/jump_label.h | 16 +---
 kernel/jump_label.c| 36 +---
 2 files changed, 46 insertions(+), 6 deletions(-)

diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
index 0536524bb9eb..68904469fba1 100644
--- a/include/linux/jump_label.h
+++ b/include/linux/jump_label.h
@@ -117,13 +117,18 @@ struct module;
 
 #include 
 
+#ifdef HAVE_JUMP_LABEL
+
 static inline int static_key_count(struct static_key *key)
 {
-   return atomic_read(>enabled);
+   /*
+* -1 means the first static_key_slow_inc() is in progress.
+*  static_key_enabled() must return true, so return 1 here.
+*/
+   int n = atomic_read(>enabled);
+   return n >= 0 ? n : 1;
 }
 
-#ifdef HAVE_JUMP_LABEL
-
 #define JUMP_TYPE_FALSE0UL
 #define JUMP_TYPE_TRUE 1UL
 #define JUMP_TYPE_MASK 1UL
@@ -162,6 +167,11 @@ extern void jump_label_apply_nops(struct module *mod);
 
 #else  /* !HAVE_JUMP_LABEL */
 
+static inline int static_key_count(struct static_key *key)
+{
+   return atomic_read(>enabled);
+}
+
 static __always_inline void jump_label_init(void)
 {
static_key_initialized = true;
diff --git a/kernel/jump_label.c b/kernel/jump_label.c
index 05254eeb4b4e..4b353e0be121 100644
--- a/kernel/jump_label.c
+++ b/kernel/jump_label.c
@@ -58,13 +58,36 @@ static void jump_label_update(struct static_key *key);
 
 void static_key_slow_inc(struct static_key *key)
 {
+   int v, v1;
+
STATIC_KEY_CHECK_USE();
-   if (atomic_inc_not_zero(>enabled))
-   return;
+
+   /*
+* Careful if we get concurrent static_key_slow_inc() calls;
+* later calls must wait for the first one to _finish_ the
+* jump_label_update() process.  At the same time, however,
+* the jump_label_update() call below wants to see
+* static_key_enabled() for jumps to be updated properly.
+*
+* So give a special meaning to negative key->enabled: it sends
+* static_key_slow_inc() down the slow path, and it is non-zero
+* so it counts as "enabled" in jump_label_update().  Note that
+* atomic_inc_unless_negative() checks >= 0, so roll our own.
+*/
+   for (v = atomic_read(>enabled); v > 0; v = v1) {
+   v1 = atomic_cmpxchg(>enabled, v, v + 1);
+   if (likely(v1 == v))
+   return;
+   }
 
jump_label_lock();
-   if (atomic_inc_return(>enabled) == 1)
+   if (atomic_read(>enabled) == 0) {
+   atomic_set(>enabled, -1);
jump_label_update(key);
+   atomic_set(>enabled, 1);
+   } else {
+   atomic_inc(>enabled);
+   }
jump_label_unlock();
 }
 EXPORT_SYMBOL_GPL(static_key_slow_inc);
@@ -72,6 +95,13 @@ EXPORT_SYMBOL_GPL(static_key_slow_inc);
 static void __static_key_slow_dec(struct static_key *key,
unsigned long rate_limit, struct delayed_work *work)
 {
+   /*
+* The negative count check is valid even when a negative
+* key->enabled is in use by static_key_slow_inc(); a
+* __static_key_slow_dec() before the first static_key_slow_inc()
+* returns is unbalanced, because all other static_key_slow_inc()
+* instances block while the update is in progress.
+*/
if (!atomic_dec_and_mutex_lock(>enabled, _label_mutex)) {
WARN(atomic_read(>enabled) < 0,
 "jump label: negative count!\n");


[GIT pull] locking fix for 4.7

2016-06-25 Thread Thomas Gleixner
Linus,

please pull the latest locking-urgent-for-linus git tree from:

   git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git 
locking-urgent-for-linus

A single fix to address a race in the static key logic.

Thanks,

tglx

-->
Paolo Bonzini (1):
  locking/static_key: Fix concurrent static_key_slow_inc()


 include/linux/jump_label.h | 16 +---
 kernel/jump_label.c| 36 +---
 2 files changed, 46 insertions(+), 6 deletions(-)

diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
index 0536524bb9eb..68904469fba1 100644
--- a/include/linux/jump_label.h
+++ b/include/linux/jump_label.h
@@ -117,13 +117,18 @@ struct module;
 
 #include 
 
+#ifdef HAVE_JUMP_LABEL
+
 static inline int static_key_count(struct static_key *key)
 {
-   return atomic_read(>enabled);
+   /*
+* -1 means the first static_key_slow_inc() is in progress.
+*  static_key_enabled() must return true, so return 1 here.
+*/
+   int n = atomic_read(>enabled);
+   return n >= 0 ? n : 1;
 }
 
-#ifdef HAVE_JUMP_LABEL
-
 #define JUMP_TYPE_FALSE0UL
 #define JUMP_TYPE_TRUE 1UL
 #define JUMP_TYPE_MASK 1UL
@@ -162,6 +167,11 @@ extern void jump_label_apply_nops(struct module *mod);
 
 #else  /* !HAVE_JUMP_LABEL */
 
+static inline int static_key_count(struct static_key *key)
+{
+   return atomic_read(>enabled);
+}
+
 static __always_inline void jump_label_init(void)
 {
static_key_initialized = true;
diff --git a/kernel/jump_label.c b/kernel/jump_label.c
index 05254eeb4b4e..4b353e0be121 100644
--- a/kernel/jump_label.c
+++ b/kernel/jump_label.c
@@ -58,13 +58,36 @@ static void jump_label_update(struct static_key *key);
 
 void static_key_slow_inc(struct static_key *key)
 {
+   int v, v1;
+
STATIC_KEY_CHECK_USE();
-   if (atomic_inc_not_zero(>enabled))
-   return;
+
+   /*
+* Careful if we get concurrent static_key_slow_inc() calls;
+* later calls must wait for the first one to _finish_ the
+* jump_label_update() process.  At the same time, however,
+* the jump_label_update() call below wants to see
+* static_key_enabled() for jumps to be updated properly.
+*
+* So give a special meaning to negative key->enabled: it sends
+* static_key_slow_inc() down the slow path, and it is non-zero
+* so it counts as "enabled" in jump_label_update().  Note that
+* atomic_inc_unless_negative() checks >= 0, so roll our own.
+*/
+   for (v = atomic_read(>enabled); v > 0; v = v1) {
+   v1 = atomic_cmpxchg(>enabled, v, v + 1);
+   if (likely(v1 == v))
+   return;
+   }
 
jump_label_lock();
-   if (atomic_inc_return(>enabled) == 1)
+   if (atomic_read(>enabled) == 0) {
+   atomic_set(>enabled, -1);
jump_label_update(key);
+   atomic_set(>enabled, 1);
+   } else {
+   atomic_inc(>enabled);
+   }
jump_label_unlock();
 }
 EXPORT_SYMBOL_GPL(static_key_slow_inc);
@@ -72,6 +95,13 @@ EXPORT_SYMBOL_GPL(static_key_slow_inc);
 static void __static_key_slow_dec(struct static_key *key,
unsigned long rate_limit, struct delayed_work *work)
 {
+   /*
+* The negative count check is valid even when a negative
+* key->enabled is in use by static_key_slow_inc(); a
+* __static_key_slow_dec() before the first static_key_slow_inc()
+* returns is unbalanced, because all other static_key_slow_inc()
+* instances block while the update is in progress.
+*/
if (!atomic_dec_and_mutex_lock(>enabled, _label_mutex)) {
WARN(atomic_read(>enabled) < 0,
 "jump label: negative count!\n");