[tip:locking/core] locking/atomic/x86: Use 's64 *' for 'old' argument of atomic64_try_cmpxchg()

2017-07-25 Thread tip-bot for Dmitry Vyukov
Commit-ID:  007d185b44620f6ffa58f52476bed6e6d7d69d3b
Gitweb: http://git.kernel.org/tip/007d185b44620f6ffa58f52476bed6e6d7d69d3b
Author: Dmitry Vyukov 
AuthorDate: Sat, 17 Jun 2017 11:15:28 +0200
Committer:  Ingo Molnar 
CommitDate: Wed, 28 Jun 2017 18:55:55 +0200

locking/atomic/x86: Use 's64 *' for 'old' argument of atomic64_try_cmpxchg()

atomic64_try_cmpxchg() declares old argument as 'long *',
this makes it impossible to use it in portable code.
If caller passes 'long *', it becomes 32-bits on 32-bit arches.
If caller passes 's64 *', it does not compile on x86_64.

Change type of old argument to 's64 *' instead.

Signed-off-by: Dmitry Vyukov 
Cc: Andrew Morton 
Cc: Andrey Ryabinin 
Cc: Linus Torvalds 
Cc: Mark Rutland 
Cc: Peter Zijlstra 
Cc: Thomas Gleixner 
Cc: Will Deacon 
Cc: kasan-...@googlegroups.com
Cc: linux...@kvack.org
Link: 
http://lkml.kernel.org/r/fa6f77f2375150d26ea796a77e8b59195fd2ab13.1497690003.git.dvyu...@google.com
Signed-off-by: Ingo Molnar 
---
 arch/x86/include/asm/atomic64_64.h | 12 ++--
 arch/x86/include/asm/cmpxchg.h |  2 +-
 2 files changed, 7 insertions(+), 7 deletions(-)

diff --git a/arch/x86/include/asm/atomic64_64.h 
b/arch/x86/include/asm/atomic64_64.h
index 8db8879a..5d9de36 100644
--- a/arch/x86/include/asm/atomic64_64.h
+++ b/arch/x86/include/asm/atomic64_64.h
@@ -177,7 +177,7 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long 
old, long new)
 }
 
 #define atomic64_try_cmpxchg atomic64_try_cmpxchg
-static __always_inline bool atomic64_try_cmpxchg(atomic64_t *v, long *old, 
long new)
+static __always_inline bool atomic64_try_cmpxchg(atomic64_t *v, s64 *old, long 
new)
 {
return try_cmpxchg(>counter, old, new);
 }
@@ -198,7 +198,7 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
  */
 static inline bool atomic64_add_unless(atomic64_t *v, long a, long u)
 {
-   long c = atomic64_read(v);
+   s64 c = atomic64_read(v);
do {
if (unlikely(c == u))
return false;
@@ -217,7 +217,7 @@ static inline bool atomic64_add_unless(atomic64_t *v, long 
a, long u)
  */
 static inline long atomic64_dec_if_positive(atomic64_t *v)
 {
-   long dec, c = atomic64_read(v);
+   s64 dec, c = atomic64_read(v);
do {
dec = c - 1;
if (unlikely(dec < 0))
@@ -236,7 +236,7 @@ static inline void atomic64_and(long i, atomic64_t *v)
 
 static inline long atomic64_fetch_and(long i, atomic64_t *v)
 {
-   long val = atomic64_read(v);
+   s64 val = atomic64_read(v);
 
do {
} while (!atomic64_try_cmpxchg(v, , val & i));
@@ -253,7 +253,7 @@ static inline void atomic64_or(long i, atomic64_t *v)
 
 static inline long atomic64_fetch_or(long i, atomic64_t *v)
 {
-   long val = atomic64_read(v);
+   s64 val = atomic64_read(v);
 
do {
} while (!atomic64_try_cmpxchg(v, , val | i));
@@ -270,7 +270,7 @@ static inline void atomic64_xor(long i, atomic64_t *v)
 
 static inline long atomic64_fetch_xor(long i, atomic64_t *v)
 {
-   long val = atomic64_read(v);
+   s64 val = atomic64_read(v);
 
do {
} while (!atomic64_try_cmpxchg(v, , val ^ i));
diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
index d90296d..b5069e8 100644
--- a/arch/x86/include/asm/cmpxchg.h
+++ b/arch/x86/include/asm/cmpxchg.h
@@ -157,7 +157,7 @@ extern void __add_wrong_size(void)
 #define __raw_try_cmpxchg(_ptr, _pold, _new, size, lock)   \
 ({ \
bool success;   \
-   __typeof__(_ptr) _old = (_pold);\
+   __typeof__(_ptr) _old = (__typeof__(_ptr))(_pold);  \
__typeof__(*(_ptr)) __old = *_old;  \
__typeof__(*(_ptr)) __new = (_new); \
switch (size) { \


[tip:locking/core] locking/atomic/x86: Use 's64 *' for 'old' argument of atomic64_try_cmpxchg()

2017-07-25 Thread tip-bot for Dmitry Vyukov
Commit-ID:  007d185b44620f6ffa58f52476bed6e6d7d69d3b
Gitweb: http://git.kernel.org/tip/007d185b44620f6ffa58f52476bed6e6d7d69d3b
Author: Dmitry Vyukov 
AuthorDate: Sat, 17 Jun 2017 11:15:28 +0200
Committer:  Ingo Molnar 
CommitDate: Wed, 28 Jun 2017 18:55:55 +0200

locking/atomic/x86: Use 's64 *' for 'old' argument of atomic64_try_cmpxchg()

atomic64_try_cmpxchg() declares old argument as 'long *',
this makes it impossible to use it in portable code.
If caller passes 'long *', it becomes 32-bits on 32-bit arches.
If caller passes 's64 *', it does not compile on x86_64.

Change type of old argument to 's64 *' instead.

Signed-off-by: Dmitry Vyukov 
Cc: Andrew Morton 
Cc: Andrey Ryabinin 
Cc: Linus Torvalds 
Cc: Mark Rutland 
Cc: Peter Zijlstra 
Cc: Thomas Gleixner 
Cc: Will Deacon 
Cc: kasan-...@googlegroups.com
Cc: linux...@kvack.org
Link: 
http://lkml.kernel.org/r/fa6f77f2375150d26ea796a77e8b59195fd2ab13.1497690003.git.dvyu...@google.com
Signed-off-by: Ingo Molnar 
---
 arch/x86/include/asm/atomic64_64.h | 12 ++--
 arch/x86/include/asm/cmpxchg.h |  2 +-
 2 files changed, 7 insertions(+), 7 deletions(-)

diff --git a/arch/x86/include/asm/atomic64_64.h 
b/arch/x86/include/asm/atomic64_64.h
index 8db8879a..5d9de36 100644
--- a/arch/x86/include/asm/atomic64_64.h
+++ b/arch/x86/include/asm/atomic64_64.h
@@ -177,7 +177,7 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long 
old, long new)
 }
 
 #define atomic64_try_cmpxchg atomic64_try_cmpxchg
-static __always_inline bool atomic64_try_cmpxchg(atomic64_t *v, long *old, 
long new)
+static __always_inline bool atomic64_try_cmpxchg(atomic64_t *v, s64 *old, long 
new)
 {
return try_cmpxchg(>counter, old, new);
 }
@@ -198,7 +198,7 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
  */
 static inline bool atomic64_add_unless(atomic64_t *v, long a, long u)
 {
-   long c = atomic64_read(v);
+   s64 c = atomic64_read(v);
do {
if (unlikely(c == u))
return false;
@@ -217,7 +217,7 @@ static inline bool atomic64_add_unless(atomic64_t *v, long 
a, long u)
  */
 static inline long atomic64_dec_if_positive(atomic64_t *v)
 {
-   long dec, c = atomic64_read(v);
+   s64 dec, c = atomic64_read(v);
do {
dec = c - 1;
if (unlikely(dec < 0))
@@ -236,7 +236,7 @@ static inline void atomic64_and(long i, atomic64_t *v)
 
 static inline long atomic64_fetch_and(long i, atomic64_t *v)
 {
-   long val = atomic64_read(v);
+   s64 val = atomic64_read(v);
 
do {
} while (!atomic64_try_cmpxchg(v, , val & i));
@@ -253,7 +253,7 @@ static inline void atomic64_or(long i, atomic64_t *v)
 
 static inline long atomic64_fetch_or(long i, atomic64_t *v)
 {
-   long val = atomic64_read(v);
+   s64 val = atomic64_read(v);
 
do {
} while (!atomic64_try_cmpxchg(v, , val | i));
@@ -270,7 +270,7 @@ static inline void atomic64_xor(long i, atomic64_t *v)
 
 static inline long atomic64_fetch_xor(long i, atomic64_t *v)
 {
-   long val = atomic64_read(v);
+   s64 val = atomic64_read(v);
 
do {
} while (!atomic64_try_cmpxchg(v, , val ^ i));
diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
index d90296d..b5069e8 100644
--- a/arch/x86/include/asm/cmpxchg.h
+++ b/arch/x86/include/asm/cmpxchg.h
@@ -157,7 +157,7 @@ extern void __add_wrong_size(void)
 #define __raw_try_cmpxchg(_ptr, _pold, _new, size, lock)   \
 ({ \
bool success;   \
-   __typeof__(_ptr) _old = (_pold);\
+   __typeof__(_ptr) _old = (__typeof__(_ptr))(_pold);  \
__typeof__(*(_ptr)) __old = *_old;  \
__typeof__(*(_ptr)) __new = (_new); \
switch (size) { \


[tip:locking/core] locking/atomic/x86: Use 's64 *' for 'old' argument of atomic64_try_cmpxchg()

2017-06-22 Thread tip-bot for Dmitry Vyukov
Commit-ID:  f6dda790094b0d658b59cf108c52805f1f7c11e6
Gitweb: http://git.kernel.org/tip/f6dda790094b0d658b59cf108c52805f1f7c11e6
Author: Dmitry Vyukov 
AuthorDate: Sat, 17 Jun 2017 11:15:28 +0200
Committer:  Ingo Molnar 
CommitDate: Thu, 22 Jun 2017 10:19:56 +0200

locking/atomic/x86: Use 's64 *' for 'old' argument of atomic64_try_cmpxchg()

atomic64_try_cmpxchg() declares old argument as 'long *',
this makes it impossible to use it in portable code.
If caller passes 'long *', it becomes 32-bits on 32-bit arches.
If caller passes 's64 *', it does not compile on x86_64.

Change type of old argument to 's64 *' instead.

Signed-off-by: Dmitry Vyukov 
Cc: Andrew Morton 
Cc: Andrey Ryabinin 
Cc: Linus Torvalds 
Cc: Mark Rutland 
Cc: Peter Zijlstra 
Cc: Thomas Gleixner 
Cc: Will Deacon 
Cc: kasan-...@googlegroups.com
Cc: linux...@kvack.org
Link: 
http://lkml.kernel.org/r/fa6f77f2375150d26ea796a77e8b59195fd2ab13.1497690003.git.dvyu...@google.com
Signed-off-by: Ingo Molnar 
---
 arch/x86/include/asm/atomic64_64.h | 12 ++--
 arch/x86/include/asm/cmpxchg.h |  2 +-
 2 files changed, 7 insertions(+), 7 deletions(-)

diff --git a/arch/x86/include/asm/atomic64_64.h 
b/arch/x86/include/asm/atomic64_64.h
index 8db8879a..5d9de36 100644
--- a/arch/x86/include/asm/atomic64_64.h
+++ b/arch/x86/include/asm/atomic64_64.h
@@ -177,7 +177,7 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long 
old, long new)
 }
 
 #define atomic64_try_cmpxchg atomic64_try_cmpxchg
-static __always_inline bool atomic64_try_cmpxchg(atomic64_t *v, long *old, 
long new)
+static __always_inline bool atomic64_try_cmpxchg(atomic64_t *v, s64 *old, long 
new)
 {
return try_cmpxchg(>counter, old, new);
 }
@@ -198,7 +198,7 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
  */
 static inline bool atomic64_add_unless(atomic64_t *v, long a, long u)
 {
-   long c = atomic64_read(v);
+   s64 c = atomic64_read(v);
do {
if (unlikely(c == u))
return false;
@@ -217,7 +217,7 @@ static inline bool atomic64_add_unless(atomic64_t *v, long 
a, long u)
  */
 static inline long atomic64_dec_if_positive(atomic64_t *v)
 {
-   long dec, c = atomic64_read(v);
+   s64 dec, c = atomic64_read(v);
do {
dec = c - 1;
if (unlikely(dec < 0))
@@ -236,7 +236,7 @@ static inline void atomic64_and(long i, atomic64_t *v)
 
 static inline long atomic64_fetch_and(long i, atomic64_t *v)
 {
-   long val = atomic64_read(v);
+   s64 val = atomic64_read(v);
 
do {
} while (!atomic64_try_cmpxchg(v, , val & i));
@@ -253,7 +253,7 @@ static inline void atomic64_or(long i, atomic64_t *v)
 
 static inline long atomic64_fetch_or(long i, atomic64_t *v)
 {
-   long val = atomic64_read(v);
+   s64 val = atomic64_read(v);
 
do {
} while (!atomic64_try_cmpxchg(v, , val | i));
@@ -270,7 +270,7 @@ static inline void atomic64_xor(long i, atomic64_t *v)
 
 static inline long atomic64_fetch_xor(long i, atomic64_t *v)
 {
-   long val = atomic64_read(v);
+   s64 val = atomic64_read(v);
 
do {
} while (!atomic64_try_cmpxchg(v, , val ^ i));
diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
index d90296d..b5069e8 100644
--- a/arch/x86/include/asm/cmpxchg.h
+++ b/arch/x86/include/asm/cmpxchg.h
@@ -157,7 +157,7 @@ extern void __add_wrong_size(void)
 #define __raw_try_cmpxchg(_ptr, _pold, _new, size, lock)   \
 ({ \
bool success;   \
-   __typeof__(_ptr) _old = (_pold);\
+   __typeof__(_ptr) _old = (__typeof__(_ptr))(_pold);  \
__typeof__(*(_ptr)) __old = *_old;  \
__typeof__(*(_ptr)) __new = (_new); \
switch (size) { \


[tip:locking/core] locking/atomic/x86: Use 's64 *' for 'old' argument of atomic64_try_cmpxchg()

2017-06-22 Thread tip-bot for Dmitry Vyukov
Commit-ID:  f6dda790094b0d658b59cf108c52805f1f7c11e6
Gitweb: http://git.kernel.org/tip/f6dda790094b0d658b59cf108c52805f1f7c11e6
Author: Dmitry Vyukov 
AuthorDate: Sat, 17 Jun 2017 11:15:28 +0200
Committer:  Ingo Molnar 
CommitDate: Thu, 22 Jun 2017 10:19:56 +0200

locking/atomic/x86: Use 's64 *' for 'old' argument of atomic64_try_cmpxchg()

atomic64_try_cmpxchg() declares old argument as 'long *',
this makes it impossible to use it in portable code.
If caller passes 'long *', it becomes 32-bits on 32-bit arches.
If caller passes 's64 *', it does not compile on x86_64.

Change type of old argument to 's64 *' instead.

Signed-off-by: Dmitry Vyukov 
Cc: Andrew Morton 
Cc: Andrey Ryabinin 
Cc: Linus Torvalds 
Cc: Mark Rutland 
Cc: Peter Zijlstra 
Cc: Thomas Gleixner 
Cc: Will Deacon 
Cc: kasan-...@googlegroups.com
Cc: linux...@kvack.org
Link: 
http://lkml.kernel.org/r/fa6f77f2375150d26ea796a77e8b59195fd2ab13.1497690003.git.dvyu...@google.com
Signed-off-by: Ingo Molnar 
---
 arch/x86/include/asm/atomic64_64.h | 12 ++--
 arch/x86/include/asm/cmpxchg.h |  2 +-
 2 files changed, 7 insertions(+), 7 deletions(-)

diff --git a/arch/x86/include/asm/atomic64_64.h 
b/arch/x86/include/asm/atomic64_64.h
index 8db8879a..5d9de36 100644
--- a/arch/x86/include/asm/atomic64_64.h
+++ b/arch/x86/include/asm/atomic64_64.h
@@ -177,7 +177,7 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long 
old, long new)
 }
 
 #define atomic64_try_cmpxchg atomic64_try_cmpxchg
-static __always_inline bool atomic64_try_cmpxchg(atomic64_t *v, long *old, 
long new)
+static __always_inline bool atomic64_try_cmpxchg(atomic64_t *v, s64 *old, long 
new)
 {
return try_cmpxchg(>counter, old, new);
 }
@@ -198,7 +198,7 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
  */
 static inline bool atomic64_add_unless(atomic64_t *v, long a, long u)
 {
-   long c = atomic64_read(v);
+   s64 c = atomic64_read(v);
do {
if (unlikely(c == u))
return false;
@@ -217,7 +217,7 @@ static inline bool atomic64_add_unless(atomic64_t *v, long 
a, long u)
  */
 static inline long atomic64_dec_if_positive(atomic64_t *v)
 {
-   long dec, c = atomic64_read(v);
+   s64 dec, c = atomic64_read(v);
do {
dec = c - 1;
if (unlikely(dec < 0))
@@ -236,7 +236,7 @@ static inline void atomic64_and(long i, atomic64_t *v)
 
 static inline long atomic64_fetch_and(long i, atomic64_t *v)
 {
-   long val = atomic64_read(v);
+   s64 val = atomic64_read(v);
 
do {
} while (!atomic64_try_cmpxchg(v, , val & i));
@@ -253,7 +253,7 @@ static inline void atomic64_or(long i, atomic64_t *v)
 
 static inline long atomic64_fetch_or(long i, atomic64_t *v)
 {
-   long val = atomic64_read(v);
+   s64 val = atomic64_read(v);
 
do {
} while (!atomic64_try_cmpxchg(v, , val | i));
@@ -270,7 +270,7 @@ static inline void atomic64_xor(long i, atomic64_t *v)
 
 static inline long atomic64_fetch_xor(long i, atomic64_t *v)
 {
-   long val = atomic64_read(v);
+   s64 val = atomic64_read(v);
 
do {
} while (!atomic64_try_cmpxchg(v, , val ^ i));
diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
index d90296d..b5069e8 100644
--- a/arch/x86/include/asm/cmpxchg.h
+++ b/arch/x86/include/asm/cmpxchg.h
@@ -157,7 +157,7 @@ extern void __add_wrong_size(void)
 #define __raw_try_cmpxchg(_ptr, _pold, _new, size, lock)   \
 ({ \
bool success;   \
-   __typeof__(_ptr) _old = (_pold);\
+   __typeof__(_ptr) _old = (__typeof__(_ptr))(_pold);  \
__typeof__(*(_ptr)) __old = *_old;  \
__typeof__(*(_ptr)) __new = (_new); \
switch (size) { \