[PATCH v2 2/2] Remove rwsem_atomic_add() and rwsem_atomic_update()

2016-06-03 Thread Jason Low
The rwsem-xadd count has been converted to an atomic variable and the
rwsem code now directly uses atomic_long_add() and
atomic_long_add_return(), so we can remove the arch implementations of
rwsem_atomic_add() and rwsem_atomic_update().

Signed-off-by: Jason Low 
---
 arch/alpha/include/asm/rwsem.h | 42 --
 arch/ia64/include/asm/rwsem.h  |  7 ---
 arch/s390/include/asm/rwsem.h  | 37 -
 arch/x86/include/asm/rwsem.h   | 18 --
 include/asm-generic/rwsem.h| 16 
 5 files changed, 120 deletions(-)

diff --git a/arch/alpha/include/asm/rwsem.h b/arch/alpha/include/asm/rwsem.h
index b40021a..77873d0 100644
--- a/arch/alpha/include/asm/rwsem.h
+++ b/arch/alpha/include/asm/rwsem.h
@@ -191,47 +191,5 @@ static inline void __downgrade_write(struct rw_semaphore 
*sem)
rwsem_downgrade_wake(sem);
 }
 
-static inline void rwsem_atomic_add(long val, struct rw_semaphore *sem)
-{
-#ifndefCONFIG_SMP
-   sem->count += val;
-#else
-   long temp;
-   __asm__ __volatile__(
-   "1: ldq_l   %0,%1\n"
-   "   addq%0,%2,%0\n"
-   "   stq_c   %0,%1\n"
-   "   beq %0,2f\n"
-   ".subsection 2\n"
-   "2: br  1b\n"
-   ".previous"
-   :"=" (temp), "=m" (sem->count)
-   :"Ir" (val), "m" (sem->count));
-#endif
-}
-
-static inline long rwsem_atomic_update(long val, struct rw_semaphore *sem)
-{
-#ifndefCONFIG_SMP
-   sem->count += val;
-   return sem->count;
-#else
-   long ret, temp;
-   __asm__ __volatile__(
-   "1: ldq_l   %0,%1\n"
-   "   addq%0,%3,%2\n"
-   "   addq%0,%3,%0\n"
-   "   stq_c   %2,%1\n"
-   "   beq %2,2f\n"
-   ".subsection 2\n"
-   "2: br  1b\n"
-   ".previous"
-   :"=" (ret), "=m" (sem->count), "=" (temp)
-   :"Ir" (val), "m" (sem->count));
-
-   return ret;
-#endif
-}
-
 #endif /* __KERNEL__ */
 #endif /* _ALPHA_RWSEM_H */
diff --git a/arch/ia64/include/asm/rwsem.h b/arch/ia64/include/asm/rwsem.h
index c5d544f..8fa98dd 100644
--- a/arch/ia64/include/asm/rwsem.h
+++ b/arch/ia64/include/asm/rwsem.h
@@ -151,11 +151,4 @@ __downgrade_write (struct rw_semaphore *sem)
rwsem_downgrade_wake(sem);
 }
 
-/*
- * Implement atomic add functionality.  These used to be "inline" functions, 
but GCC v3.1
- * doesn't quite optimize this stuff right and ends up with bad calls to 
fetchandadd.
- */
-#define rwsem_atomic_add(delta, sem)   atomic64_add(delta, (atomic64_t 
*)(&(sem)->count))
-#define rwsem_atomic_update(delta, sem)atomic64_add_return(delta, 
(atomic64_t *)(&(sem)->count))
-
 #endif /* _ASM_IA64_RWSEM_H */
diff --git a/arch/s390/include/asm/rwsem.h b/arch/s390/include/asm/rwsem.h
index c75e447..597e7e9 100644
--- a/arch/s390/include/asm/rwsem.h
+++ b/arch/s390/include/asm/rwsem.h
@@ -207,41 +207,4 @@ static inline void __downgrade_write(struct rw_semaphore 
*sem)
rwsem_downgrade_wake(sem);
 }
 
-/*
- * implement atomic add functionality
- */
-static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
-{
-   signed long old, new;
-
-   asm volatile(
-   "   lg  %0,%2\n"
-   "0: lgr %1,%0\n"
-   "   agr %1,%4\n"
-   "   csg %0,%1,%2\n"
-   "   jl  0b"
-   : "=" (old), "=" (new), "=Q" (sem->count)
-   : "Q" (sem->count), "d" (delta)
-   : "cc", "memory");
-}
-
-/*
- * implement exchange and add functionality
- */
-static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
-{
-   signed long old, new;
-
-   asm volatile(
-   "   lg  %0,%2\n"
-   "0: lgr %1,%0\n"
-   "   agr %1,%4\n"
-   "   csg %0,%1,%2\n"
-   "   jl  0b"
-   : "=" (old), "=" (new), "=Q" (sem->count)
-   : "Q" (sem->count), "d" (delta)
-   : "cc", "memory");
-   return new;
-}
-
 #endif /* _S390_RWSEM_H */
diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
index 453744c..089ced4 100644
--- a/arch/x86/include/asm/rwsem.h
+++ b/arch/x86/include/asm/rwsem.h
@@ -213,23 +213,5 @@ static inline void __downgrade_write(struct rw_semaphore 
*sem)
 : "memory", "cc");
 }
 
-/*
- * implement atomic add functionality
- */
-static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
-{
-   asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
-: "+m" (sem->count)
-: "er" (delta));
-}
-
-/*
- * implement exchange and add functionality
- */
-static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
-{
-   return delta + xadd(>count, delta);
-}
-
 #endif /* __KERNEL__ */
 

[PATCH v2 2/2] Remove rwsem_atomic_add() and rwsem_atomic_update()

2016-06-03 Thread Jason Low
The rwsem-xadd count has been converted to an atomic variable and the
rwsem code now directly uses atomic_long_add() and
atomic_long_add_return(), so we can remove the arch implementations of
rwsem_atomic_add() and rwsem_atomic_update().

Signed-off-by: Jason Low 
---
 arch/alpha/include/asm/rwsem.h | 42 --
 arch/ia64/include/asm/rwsem.h  |  7 ---
 arch/s390/include/asm/rwsem.h  | 37 -
 arch/x86/include/asm/rwsem.h   | 18 --
 include/asm-generic/rwsem.h| 16 
 5 files changed, 120 deletions(-)

diff --git a/arch/alpha/include/asm/rwsem.h b/arch/alpha/include/asm/rwsem.h
index b40021a..77873d0 100644
--- a/arch/alpha/include/asm/rwsem.h
+++ b/arch/alpha/include/asm/rwsem.h
@@ -191,47 +191,5 @@ static inline void __downgrade_write(struct rw_semaphore 
*sem)
rwsem_downgrade_wake(sem);
 }
 
-static inline void rwsem_atomic_add(long val, struct rw_semaphore *sem)
-{
-#ifndefCONFIG_SMP
-   sem->count += val;
-#else
-   long temp;
-   __asm__ __volatile__(
-   "1: ldq_l   %0,%1\n"
-   "   addq%0,%2,%0\n"
-   "   stq_c   %0,%1\n"
-   "   beq %0,2f\n"
-   ".subsection 2\n"
-   "2: br  1b\n"
-   ".previous"
-   :"=" (temp), "=m" (sem->count)
-   :"Ir" (val), "m" (sem->count));
-#endif
-}
-
-static inline long rwsem_atomic_update(long val, struct rw_semaphore *sem)
-{
-#ifndefCONFIG_SMP
-   sem->count += val;
-   return sem->count;
-#else
-   long ret, temp;
-   __asm__ __volatile__(
-   "1: ldq_l   %0,%1\n"
-   "   addq%0,%3,%2\n"
-   "   addq%0,%3,%0\n"
-   "   stq_c   %2,%1\n"
-   "   beq %2,2f\n"
-   ".subsection 2\n"
-   "2: br  1b\n"
-   ".previous"
-   :"=" (ret), "=m" (sem->count), "=" (temp)
-   :"Ir" (val), "m" (sem->count));
-
-   return ret;
-#endif
-}
-
 #endif /* __KERNEL__ */
 #endif /* _ALPHA_RWSEM_H */
diff --git a/arch/ia64/include/asm/rwsem.h b/arch/ia64/include/asm/rwsem.h
index c5d544f..8fa98dd 100644
--- a/arch/ia64/include/asm/rwsem.h
+++ b/arch/ia64/include/asm/rwsem.h
@@ -151,11 +151,4 @@ __downgrade_write (struct rw_semaphore *sem)
rwsem_downgrade_wake(sem);
 }
 
-/*
- * Implement atomic add functionality.  These used to be "inline" functions, 
but GCC v3.1
- * doesn't quite optimize this stuff right and ends up with bad calls to 
fetchandadd.
- */
-#define rwsem_atomic_add(delta, sem)   atomic64_add(delta, (atomic64_t 
*)(&(sem)->count))
-#define rwsem_atomic_update(delta, sem)atomic64_add_return(delta, 
(atomic64_t *)(&(sem)->count))
-
 #endif /* _ASM_IA64_RWSEM_H */
diff --git a/arch/s390/include/asm/rwsem.h b/arch/s390/include/asm/rwsem.h
index c75e447..597e7e9 100644
--- a/arch/s390/include/asm/rwsem.h
+++ b/arch/s390/include/asm/rwsem.h
@@ -207,41 +207,4 @@ static inline void __downgrade_write(struct rw_semaphore 
*sem)
rwsem_downgrade_wake(sem);
 }
 
-/*
- * implement atomic add functionality
- */
-static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
-{
-   signed long old, new;
-
-   asm volatile(
-   "   lg  %0,%2\n"
-   "0: lgr %1,%0\n"
-   "   agr %1,%4\n"
-   "   csg %0,%1,%2\n"
-   "   jl  0b"
-   : "=" (old), "=" (new), "=Q" (sem->count)
-   : "Q" (sem->count), "d" (delta)
-   : "cc", "memory");
-}
-
-/*
- * implement exchange and add functionality
- */
-static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
-{
-   signed long old, new;
-
-   asm volatile(
-   "   lg  %0,%2\n"
-   "0: lgr %1,%0\n"
-   "   agr %1,%4\n"
-   "   csg %0,%1,%2\n"
-   "   jl  0b"
-   : "=" (old), "=" (new), "=Q" (sem->count)
-   : "Q" (sem->count), "d" (delta)
-   : "cc", "memory");
-   return new;
-}
-
 #endif /* _S390_RWSEM_H */
diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
index 453744c..089ced4 100644
--- a/arch/x86/include/asm/rwsem.h
+++ b/arch/x86/include/asm/rwsem.h
@@ -213,23 +213,5 @@ static inline void __downgrade_write(struct rw_semaphore 
*sem)
 : "memory", "cc");
 }
 
-/*
- * implement atomic add functionality
- */
-static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
-{
-   asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
-: "+m" (sem->count)
-: "er" (delta));
-}
-
-/*
- * implement exchange and add functionality
- */
-static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
-{
-   return delta + xadd(>count, delta);
-}
-
 #endif /* __KERNEL__ */
 #endif /*