[PATCH v2 7/7] arch/sparc: Enable queued spinlock support for SPARC

2017-05-19 Thread Babu Moger
This patch makes the necessary changes in SPARC architecture to enable
queued spinlock support. Here are some of the earlier discussions about
this feature.
https://lwn.net/Articles/561775/
https://lwn.net/Articles/590243/

Cleaned-up the spinlock_64.h. The definitions of arch_spin_xxx are
replaced by the function in 

Signed-off-by: Babu Moger 
Reviewed-by: Håkon Bugge 
Reviewed-by: Jane Chu 
Reviewed-by: Shannon Nelson 
Reviewed-by: Vijay Kumar 
---
 arch/sparc/Kconfig  |1 +
 arch/sparc/include/asm/qspinlock.h  |7 +++
 arch/sparc/include/asm/spinlock_64.h|   84 +--
 arch/sparc/include/asm/spinlock_types.h |5 ++
 4 files changed, 14 insertions(+), 83 deletions(-)
 create mode 100644 arch/sparc/include/asm/qspinlock.h

diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 9ec1d2f..d4a24ea 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -83,6 +83,7 @@ config SPARC64
select ARCH_SUPPORTS_ATOMIC_RMW
select HAVE_NMI
select ARCH_USE_QUEUED_RWLOCKS
+   select ARCH_USE_QUEUED_SPINLOCKS
 
 config ARCH_DEFCONFIG
string
diff --git a/arch/sparc/include/asm/qspinlock.h 
b/arch/sparc/include/asm/qspinlock.h
new file mode 100644
index 000..5ae9a28
--- /dev/null
+++ b/arch/sparc/include/asm/qspinlock.h
@@ -0,0 +1,7 @@
+#ifndef _ASM_SPARC_QSPINLOCK_H
+#define _ASM_SPARC_QSPINLOCK_H
+
+#include 
+#include 
+
+#endif /* _ASM_SPARC_QSPINLOCK_H */
diff --git a/arch/sparc/include/asm/spinlock_64.h 
b/arch/sparc/include/asm/spinlock_64.h
index 8901c2d..f7028f5 100644
--- a/arch/sparc/include/asm/spinlock_64.h
+++ b/arch/sparc/include/asm/spinlock_64.h
@@ -11,89 +11,7 @@
 #include 
 #include 
 #include 
-
-/* To get debugging spinlocks which detect and catch
- * deadlock situations, set CONFIG_DEBUG_SPINLOCK
- * and rebuild your kernel.
- */
-
-/* Because we play games to save cycles in the non-contention case, we
- * need to be extra careful about branch targets into the "spinning"
- * code.  They live in their own section, but the newer V9 branches
- * have a shorter range than the traditional 32-bit sparc branch
- * variants.  The rule is that the branches that go into and out of
- * the spinner sections must be pre-V9 branches.
- */
-
-#define arch_spin_is_locked(lp)((lp)->lock != 0)
-
-static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
-{
-   smp_cond_load_acquire(>lock, !VAL);
-}
-
-static inline void arch_spin_lock(arch_spinlock_t *lock)
-{
-   unsigned long tmp;
-
-   __asm__ __volatile__(
-"1:ldstub  [%1], %0\n"
-"  brnz,pn %0, 2f\n"
-"   nop\n"
-"  .subsection 2\n"
-"2:ldub[%1], %0\n"
-"  brnz,pt %0, 2b\n"
-"   nop\n"
-"  ba,a,pt %%xcc, 1b\n"
-"  .previous"
-   : "=" (tmp)
-   : "r" (lock)
-   : "memory");
-}
-
-static inline int arch_spin_trylock(arch_spinlock_t *lock)
-{
-   unsigned long result;
-
-   __asm__ __volatile__(
-"  ldstub  [%1], %0\n"
-   : "=r" (result)
-   : "r" (lock)
-   : "memory");
-
-   return (result == 0UL);
-}
-
-static inline void arch_spin_unlock(arch_spinlock_t *lock)
-{
-   __asm__ __volatile__(
-"  stb %%g0, [%0]"
-   : /* No outputs */
-   : "r" (lock)
-   : "memory");
-}
-
-static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long 
flags)
-{
-   unsigned long tmp1, tmp2;
-
-   __asm__ __volatile__(
-"1:ldstub  [%2], %0\n"
-"  brnz,pn %0, 2f\n"
-"   nop\n"
-"  .subsection 2\n"
-"2:rdpr%%pil, %1\n"
-"  wrpr%3, %%pil\n"
-"3:ldub[%2], %0\n"
-"  brnz,pt %0, 3b\n"
-"   nop\n"
-"  ba,pt   %%xcc, 1b\n"
-"   wrpr   %1, %%pil\n"
-"  .previous"
-   : "=" (tmp1), "=" (tmp2)
-   : "r"(lock), "r"(flags)
-   : "memory");
-}
+#include 
 
 #define arch_read_lock_flags(p, f) arch_read_lock(p)
 #define arch_write_lock_flags(p, f) arch_write_lock(p)
diff --git a/arch/sparc/include/asm/spinlock_types.h 
b/arch/sparc/include/asm/spinlock_types.h
index 64fce21..bce8ef4 100644
--- a/arch/sparc/include/asm/spinlock_types.h
+++ b/arch/sparc/include/asm/spinlock_types.h
@@ -1,11 +1,16 @@
 #ifndef __SPARC_SPINLOCK_TYPES_H
 #define __SPARC_SPINLOCK_TYPES_H
 
+#ifdef CONFIG_QUEUED_SPINLOCKS
+#include 
+#else
+
 typedef struct {
volatile unsigned char lock;
 } arch_spinlock_t;
 
 #define __ARCH_SPIN_LOCK_UNLOCKED  { 0 }
+#endif /* CONFIG_QUEUED_SPINLOCKS */
 
 #ifdef CONFIG_QUEUED_RWLOCKS
 #include 
-- 
1.7.1



[PATCH v2 7/7] arch/sparc: Enable queued spinlock support for SPARC

2017-05-19 Thread Babu Moger
This patch makes the necessary changes in SPARC architecture to enable
queued spinlock support. Here are some of the earlier discussions about
this feature.
https://lwn.net/Articles/561775/
https://lwn.net/Articles/590243/

Cleaned-up the spinlock_64.h. The definitions of arch_spin_xxx are
replaced by the function in 

Signed-off-by: Babu Moger 
Reviewed-by: Håkon Bugge 
Reviewed-by: Jane Chu 
Reviewed-by: Shannon Nelson 
Reviewed-by: Vijay Kumar 
---
 arch/sparc/Kconfig  |1 +
 arch/sparc/include/asm/qspinlock.h  |7 +++
 arch/sparc/include/asm/spinlock_64.h|   84 +--
 arch/sparc/include/asm/spinlock_types.h |5 ++
 4 files changed, 14 insertions(+), 83 deletions(-)
 create mode 100644 arch/sparc/include/asm/qspinlock.h

diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 9ec1d2f..d4a24ea 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -83,6 +83,7 @@ config SPARC64
select ARCH_SUPPORTS_ATOMIC_RMW
select HAVE_NMI
select ARCH_USE_QUEUED_RWLOCKS
+   select ARCH_USE_QUEUED_SPINLOCKS
 
 config ARCH_DEFCONFIG
string
diff --git a/arch/sparc/include/asm/qspinlock.h 
b/arch/sparc/include/asm/qspinlock.h
new file mode 100644
index 000..5ae9a28
--- /dev/null
+++ b/arch/sparc/include/asm/qspinlock.h
@@ -0,0 +1,7 @@
+#ifndef _ASM_SPARC_QSPINLOCK_H
+#define _ASM_SPARC_QSPINLOCK_H
+
+#include 
+#include 
+
+#endif /* _ASM_SPARC_QSPINLOCK_H */
diff --git a/arch/sparc/include/asm/spinlock_64.h 
b/arch/sparc/include/asm/spinlock_64.h
index 8901c2d..f7028f5 100644
--- a/arch/sparc/include/asm/spinlock_64.h
+++ b/arch/sparc/include/asm/spinlock_64.h
@@ -11,89 +11,7 @@
 #include 
 #include 
 #include 
-
-/* To get debugging spinlocks which detect and catch
- * deadlock situations, set CONFIG_DEBUG_SPINLOCK
- * and rebuild your kernel.
- */
-
-/* Because we play games to save cycles in the non-contention case, we
- * need to be extra careful about branch targets into the "spinning"
- * code.  They live in their own section, but the newer V9 branches
- * have a shorter range than the traditional 32-bit sparc branch
- * variants.  The rule is that the branches that go into and out of
- * the spinner sections must be pre-V9 branches.
- */
-
-#define arch_spin_is_locked(lp)((lp)->lock != 0)
-
-static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
-{
-   smp_cond_load_acquire(>lock, !VAL);
-}
-
-static inline void arch_spin_lock(arch_spinlock_t *lock)
-{
-   unsigned long tmp;
-
-   __asm__ __volatile__(
-"1:ldstub  [%1], %0\n"
-"  brnz,pn %0, 2f\n"
-"   nop\n"
-"  .subsection 2\n"
-"2:ldub[%1], %0\n"
-"  brnz,pt %0, 2b\n"
-"   nop\n"
-"  ba,a,pt %%xcc, 1b\n"
-"  .previous"
-   : "=" (tmp)
-   : "r" (lock)
-   : "memory");
-}
-
-static inline int arch_spin_trylock(arch_spinlock_t *lock)
-{
-   unsigned long result;
-
-   __asm__ __volatile__(
-"  ldstub  [%1], %0\n"
-   : "=r" (result)
-   : "r" (lock)
-   : "memory");
-
-   return (result == 0UL);
-}
-
-static inline void arch_spin_unlock(arch_spinlock_t *lock)
-{
-   __asm__ __volatile__(
-"  stb %%g0, [%0]"
-   : /* No outputs */
-   : "r" (lock)
-   : "memory");
-}
-
-static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long 
flags)
-{
-   unsigned long tmp1, tmp2;
-
-   __asm__ __volatile__(
-"1:ldstub  [%2], %0\n"
-"  brnz,pn %0, 2f\n"
-"   nop\n"
-"  .subsection 2\n"
-"2:rdpr%%pil, %1\n"
-"  wrpr%3, %%pil\n"
-"3:ldub[%2], %0\n"
-"  brnz,pt %0, 3b\n"
-"   nop\n"
-"  ba,pt   %%xcc, 1b\n"
-"   wrpr   %1, %%pil\n"
-"  .previous"
-   : "=" (tmp1), "=" (tmp2)
-   : "r"(lock), "r"(flags)
-   : "memory");
-}
+#include 
 
 #define arch_read_lock_flags(p, f) arch_read_lock(p)
 #define arch_write_lock_flags(p, f) arch_write_lock(p)
diff --git a/arch/sparc/include/asm/spinlock_types.h 
b/arch/sparc/include/asm/spinlock_types.h
index 64fce21..bce8ef4 100644
--- a/arch/sparc/include/asm/spinlock_types.h
+++ b/arch/sparc/include/asm/spinlock_types.h
@@ -1,11 +1,16 @@
 #ifndef __SPARC_SPINLOCK_TYPES_H
 #define __SPARC_SPINLOCK_TYPES_H
 
+#ifdef CONFIG_QUEUED_SPINLOCKS
+#include 
+#else
+
 typedef struct {
volatile unsigned char lock;
 } arch_spinlock_t;
 
 #define __ARCH_SPIN_LOCK_UNLOCKED  { 0 }
+#endif /* CONFIG_QUEUED_SPINLOCKS */
 
 #ifdef CONFIG_QUEUED_RWLOCKS
 #include 
-- 
1.7.1