[PATCH 10/22] openrisc: add spinlock implementation

2017-01-14 Thread Stafford Horne
From: Stefan Kristiansson 

Heavily based on the ARM implementation, this adds
ticket spinlock support for OpenRISC.

Signed-off-by: Stefan Kristiansson 
[sho...@gmail.com: fix tabs vs space checkpatch warning]
Signed-off-by: Stafford Horne 
---
 arch/openrisc/include/asm/spinlock.h   | 232 -
 arch/openrisc/include/asm/spinlock_types.h |  28 
 2 files changed, 259 insertions(+), 1 deletion(-)
 create mode 100644 arch/openrisc/include/asm/spinlock_types.h

diff --git a/arch/openrisc/include/asm/spinlock.h 
b/arch/openrisc/include/asm/spinlock.h
index fd00a3a..adf62a6 100644
--- a/arch/openrisc/include/asm/spinlock.h
+++ b/arch/openrisc/include/asm/spinlock.h
@@ -9,6 +9,9 @@
  * Copyright (C) 2003 Matjaz Breskvar 
  * Copyright (C) 2010-2011 Jonas Bonn 
  * et al.
+ * Copyright (C) 2014 Stefan Kristiansson 
+ *
+ * Ticket spinlocks, based on the ARM implementation.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -19,6 +22,233 @@
 #ifndef __ASM_OPENRISC_SPINLOCK_H
 #define __ASM_OPENRISC_SPINLOCK_H
 
-#error "or32 doesn't do SMP yet"
+#include 
+
+#define arch_spin_unlock_wait(lock) \
+   do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
+
+#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
+
+static inline void arch_spin_lock(arch_spinlock_t *lock)
+{
+   u32 newval;
+   arch_spinlock_t lockval;
+
+   __asm__ __volatile__(
+   "1: l.lwa   %0, 0(%2)   \n"
+   "   l.add   %1, %0, %3  \n"
+   "   l.swa   0(%2), %1   \n"
+   "   l.bnf   1b  \n"
+   "l.nop  \n"
+   : "=" (lockval), "=" (newval)
+   : "r" (>slock), "r" (1 << TICKET_SHIFT)
+   : "cc", "memory");
+
+   while (lockval.tickets.next != lockval.tickets.owner)
+   lockval.tickets.owner = ACCESS_ONCE(lock->tickets.owner);
+
+   smp_mb();
+}
+
+static inline int arch_spin_trylock(arch_spinlock_t *lock)
+{
+   unsigned long contended, tmp;
+   u32 slock;
+
+   /* contended = (lock->tickets.owner != lock->tickets.next) */
+   __asm__ __volatile__(
+   "1: l.lwa   %0, 0(%3)   \n"
+   "   l.srli  %1, %0, 16  \n"
+   "   l.andi  %2, %0, 0x  \n"
+   "   l.sfeq  %1, %2  \n"
+   "   l.bnf   1f  \n"
+   "l.ori  %1, r0, 1   \n"
+   "   l.add   %0, %0, %4  \n"
+   "   l.swa   0(%3), %0   \n"
+   "   l.bnf   1b  \n"
+   "l.ori  %1, r0, 0   \n"
+   "1: \n"
+   : "=" (slock), "=" (contended), "=" (tmp)
+   : "r" (>slock), "r" (1 << TICKET_SHIFT)
+   : "cc", "memory");
+
+   if (!contended) {
+   smp_mb();
+   return 1;
+   } else {
+   return 0;
+   }
+}
+
+static inline void arch_spin_unlock(arch_spinlock_t *lock)
+{
+   smp_mb();
+   lock->tickets.owner++;
+}
+
+static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
+{
+   return lock.tickets.owner == lock.tickets.next;
+}
+
+static inline int arch_spin_is_locked(arch_spinlock_t *lock)
+{
+   return !arch_spin_value_unlocked(ACCESS_ONCE(*lock));
+}
+
+static inline int arch_spin_is_contended(arch_spinlock_t *lock)
+{
+   struct __raw_tickets tickets = ACCESS_ONCE(lock->tickets);
+
+   return (tickets.next - tickets.owner) > 1;
+}
+#define arch_spin_is_contended arch_spin_is_contended
+
+/*
+ * RWLOCKS
+ *
+ *
+ * Write locks are easy - we just set bit 31.  When unlocking, we can
+ * just write zero since the lock is exclusively held.
+ */
+
+static inline void arch_write_lock(arch_rwlock_t *rw)
+{
+   unsigned long tmp;
+
+   __asm__ __volatile__(
+   "1: l.lwa   %0, 0(%1)   \n"
+   "   l.sfeqi %0, 0   \n"
+   "   l.bnf   1f  \n"
+   "l.nop  \n"
+   "   l.swa   0(%1), %2   \n"
+   "   l.bnf   1b  \n"
+   "l.nop  \n"
+   "1: \n"
+   : "=" (tmp)
+   : "r" (>lock), "r" (0x8000)
+   : "cc", "memory");
+
+   smp_mb();
+}
+
+static inline int arch_write_trylock(arch_rwlock_t *rw)
+{
+   unsigned long contended;
+
+   __asm__ __volatile__(
+   "1: l.lwa   %0, 0(%1)   \n"
+   "   l.sfeqi %0, 0  

[PATCH 10/22] openrisc: add spinlock implementation

2017-01-14 Thread Stafford Horne
From: Stefan Kristiansson 

Heavily based on the ARM implementation, this adds
ticket spinlock support for OpenRISC.

Signed-off-by: Stefan Kristiansson 
[sho...@gmail.com: fix tabs vs space checkpatch warning]
Signed-off-by: Stafford Horne 
---
 arch/openrisc/include/asm/spinlock.h   | 232 -
 arch/openrisc/include/asm/spinlock_types.h |  28 
 2 files changed, 259 insertions(+), 1 deletion(-)
 create mode 100644 arch/openrisc/include/asm/spinlock_types.h

diff --git a/arch/openrisc/include/asm/spinlock.h 
b/arch/openrisc/include/asm/spinlock.h
index fd00a3a..adf62a6 100644
--- a/arch/openrisc/include/asm/spinlock.h
+++ b/arch/openrisc/include/asm/spinlock.h
@@ -9,6 +9,9 @@
  * Copyright (C) 2003 Matjaz Breskvar 
  * Copyright (C) 2010-2011 Jonas Bonn 
  * et al.
+ * Copyright (C) 2014 Stefan Kristiansson 
+ *
+ * Ticket spinlocks, based on the ARM implementation.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -19,6 +22,233 @@
 #ifndef __ASM_OPENRISC_SPINLOCK_H
 #define __ASM_OPENRISC_SPINLOCK_H
 
-#error "or32 doesn't do SMP yet"
+#include 
+
+#define arch_spin_unlock_wait(lock) \
+   do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
+
+#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
+
+static inline void arch_spin_lock(arch_spinlock_t *lock)
+{
+   u32 newval;
+   arch_spinlock_t lockval;
+
+   __asm__ __volatile__(
+   "1: l.lwa   %0, 0(%2)   \n"
+   "   l.add   %1, %0, %3  \n"
+   "   l.swa   0(%2), %1   \n"
+   "   l.bnf   1b  \n"
+   "l.nop  \n"
+   : "=" (lockval), "=" (newval)
+   : "r" (>slock), "r" (1 << TICKET_SHIFT)
+   : "cc", "memory");
+
+   while (lockval.tickets.next != lockval.tickets.owner)
+   lockval.tickets.owner = ACCESS_ONCE(lock->tickets.owner);
+
+   smp_mb();
+}
+
+static inline int arch_spin_trylock(arch_spinlock_t *lock)
+{
+   unsigned long contended, tmp;
+   u32 slock;
+
+   /* contended = (lock->tickets.owner != lock->tickets.next) */
+   __asm__ __volatile__(
+   "1: l.lwa   %0, 0(%3)   \n"
+   "   l.srli  %1, %0, 16  \n"
+   "   l.andi  %2, %0, 0x  \n"
+   "   l.sfeq  %1, %2  \n"
+   "   l.bnf   1f  \n"
+   "l.ori  %1, r0, 1   \n"
+   "   l.add   %0, %0, %4  \n"
+   "   l.swa   0(%3), %0   \n"
+   "   l.bnf   1b  \n"
+   "l.ori  %1, r0, 0   \n"
+   "1: \n"
+   : "=" (slock), "=" (contended), "=" (tmp)
+   : "r" (>slock), "r" (1 << TICKET_SHIFT)
+   : "cc", "memory");
+
+   if (!contended) {
+   smp_mb();
+   return 1;
+   } else {
+   return 0;
+   }
+}
+
+static inline void arch_spin_unlock(arch_spinlock_t *lock)
+{
+   smp_mb();
+   lock->tickets.owner++;
+}
+
+static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
+{
+   return lock.tickets.owner == lock.tickets.next;
+}
+
+static inline int arch_spin_is_locked(arch_spinlock_t *lock)
+{
+   return !arch_spin_value_unlocked(ACCESS_ONCE(*lock));
+}
+
+static inline int arch_spin_is_contended(arch_spinlock_t *lock)
+{
+   struct __raw_tickets tickets = ACCESS_ONCE(lock->tickets);
+
+   return (tickets.next - tickets.owner) > 1;
+}
+#define arch_spin_is_contended arch_spin_is_contended
+
+/*
+ * RWLOCKS
+ *
+ *
+ * Write locks are easy - we just set bit 31.  When unlocking, we can
+ * just write zero since the lock is exclusively held.
+ */
+
+static inline void arch_write_lock(arch_rwlock_t *rw)
+{
+   unsigned long tmp;
+
+   __asm__ __volatile__(
+   "1: l.lwa   %0, 0(%1)   \n"
+   "   l.sfeqi %0, 0   \n"
+   "   l.bnf   1f  \n"
+   "l.nop  \n"
+   "   l.swa   0(%1), %2   \n"
+   "   l.bnf   1b  \n"
+   "l.nop  \n"
+   "1: \n"
+   : "=" (tmp)
+   : "r" (>lock), "r" (0x8000)
+   : "cc", "memory");
+
+   smp_mb();
+}
+
+static inline int arch_write_trylock(arch_rwlock_t *rw)
+{
+   unsigned long contended;
+
+   __asm__ __volatile__(
+   "1: l.lwa   %0, 0(%1)   \n"
+   "   l.sfeqi %0, 0   \n"
+   "   l.bnf   1f  \n"
+   "l.nop  \n"
+   "   l.swa0(%1), %2