Linus,

Please pull the latest locking-rwsem-for-linus git tree from:

   git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git 
locking-rwsem-for-linus

   # HEAD: 4544ba8c6b1743499cabb682897a469911845f15 locking/rwsem: Fix comment 
on register clobbering

This tree, by Michal Hocko, implements down_write_killable(). The main usecase 
will be to update mm_sem usage sites to use this new API, to allow the 
mm-reaper 
introduced in:

  aac453635549 mm, oom: introduce oom reaper

to tear down oom victim address spaces asynchronously with minimum latencies 
and 
without deadlock worries.

 Thanks,

        Ingo

------------------>
Borislav Petkov (1):
      locking/rwsem: Fix comment on register clobbering

Michal Hocko (12):
      locking/rwsem: Get rid of __down_write_nested()
      locking/rwsem: Drop explicit memory barriers
      locking/rwsem, xtensa: Drop superfluous arch specific implementation
      locking/rwsem, sh: Drop superfluous arch specific implementation
      locking/rwsem, sparc: Drop superfluous arch specific implementation
      locking/rwsem: Introduce basis for down_write_killable()
      locking/rwsem, alpha: Provide __down_write_killable()
      locking/rwsem, ia64: Provide __down_write_killable()
      locking/rwsem, s390: Provide __down_write_killable()
      locking/rwsem, x86: Provide __down_write_killable()
      locking/rwsem: Provide down_write_killable()
      locking/rwsem, x86: Add frame annotation for 
call_rwsem_down_write_failed_killable()

Peter Zijlstra (1):
      locking/rwsem: Fix down_write_killable()


 arch/alpha/include/asm/rwsem.h  |  18 +++++-
 arch/ia64/include/asm/rwsem.h   |  22 ++++++-
 arch/s390/include/asm/rwsem.h   |  18 ++++--
 arch/sh/include/asm/Kbuild      |   1 +
 arch/sh/include/asm/rwsem.h     | 132 ----------------------------------------
 arch/sparc/include/asm/Kbuild   |   1 +
 arch/sparc/include/asm/rwsem.h  | 124 -------------------------------------
 arch/x86/include/asm/rwsem.h    |  42 ++++++++-----
 arch/x86/lib/rwsem.S            |  16 ++++-
 arch/xtensa/include/asm/Kbuild  |   1 +
 arch/xtensa/include/asm/rwsem.h | 131 ---------------------------------------
 include/asm-generic/rwsem.h     |  13 +++-
 include/linux/lockdep.h         |  15 +++++
 include/linux/rwsem-spinlock.h  |   2 +-
 include/linux/rwsem.h           |   3 +
 kernel/locking/rwsem-spinlock.c |  19 +++++-
 kernel/locking/rwsem-xadd.c     |  38 ++++++++++--
 kernel/locking/rwsem.c          |  19 ++++++
 18 files changed, 189 insertions(+), 426 deletions(-)
 delete mode 100644 arch/sh/include/asm/rwsem.h
 delete mode 100644 arch/sparc/include/asm/rwsem.h
 delete mode 100644 arch/xtensa/include/asm/rwsem.h

diff --git a/arch/alpha/include/asm/rwsem.h b/arch/alpha/include/asm/rwsem.h
index a83bbea62c67..0131a7058778 100644
--- a/arch/alpha/include/asm/rwsem.h
+++ b/arch/alpha/include/asm/rwsem.h
@@ -63,7 +63,7 @@ static inline int __down_read_trylock(struct rw_semaphore 
*sem)
        return res >= 0 ? 1 : 0;
 }
 
-static inline void __down_write(struct rw_semaphore *sem)
+static inline long ___down_write(struct rw_semaphore *sem)
 {
        long oldcount;
 #ifndef        CONFIG_SMP
@@ -83,10 +83,24 @@ static inline void __down_write(struct rw_semaphore *sem)
        :"=&r" (oldcount), "=m" (sem->count), "=&r" (temp)
        :"Ir" (RWSEM_ACTIVE_WRITE_BIAS), "m" (sem->count) : "memory");
 #endif
-       if (unlikely(oldcount))
+       return oldcount;
+}
+
+static inline void __down_write(struct rw_semaphore *sem)
+{
+       if (unlikely(___down_write(sem)))
                rwsem_down_write_failed(sem);
 }
 
+static inline int __down_write_killable(struct rw_semaphore *sem)
+{
+       if (unlikely(___down_write(sem)))
+               if (IS_ERR(rwsem_down_write_failed_killable(sem)))
+                       return -EINTR;
+
+       return 0;
+}
+
 /*
  * trylock for writing -- returns 1 if successful, 0 if contention
  */
diff --git a/arch/ia64/include/asm/rwsem.h b/arch/ia64/include/asm/rwsem.h
index ce112472bdd6..8b23e070b844 100644
--- a/arch/ia64/include/asm/rwsem.h
+++ b/arch/ia64/include/asm/rwsem.h
@@ -49,8 +49,8 @@ __down_read (struct rw_semaphore *sem)
 /*
  * lock for writing
  */
-static inline void
-__down_write (struct rw_semaphore *sem)
+static inline long
+___down_write (struct rw_semaphore *sem)
 {
        long old, new;
 
@@ -59,10 +59,26 @@ __down_write (struct rw_semaphore *sem)
                new = old + RWSEM_ACTIVE_WRITE_BIAS;
        } while (cmpxchg_acq(&sem->count, old, new) != old);
 
-       if (old != 0)
+       return old;
+}
+
+static inline void
+__down_write (struct rw_semaphore *sem)
+{
+       if (___down_write(sem))
                rwsem_down_write_failed(sem);
 }
 
+static inline int
+__down_write_killable (struct rw_semaphore *sem)
+{
+       if (___down_write(sem))
+               if (IS_ERR(rwsem_down_write_failed_killable(sem)))
+                       return -EINTR;
+
+       return 0;
+}
+
 /*
  * unlock after reading
  */
diff --git a/arch/s390/include/asm/rwsem.h b/arch/s390/include/asm/rwsem.h
index fead491dfc28..c75e4471e618 100644
--- a/arch/s390/include/asm/rwsem.h
+++ b/arch/s390/include/asm/rwsem.h
@@ -90,7 +90,7 @@ static inline int __down_read_trylock(struct rw_semaphore 
*sem)
 /*
  * lock for writing
  */
-static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
+static inline long ___down_write(struct rw_semaphore *sem)
 {
        signed long old, new, tmp;
 
@@ -104,13 +104,23 @@ static inline void __down_write_nested(struct 
rw_semaphore *sem, int subclass)
                : "=&d" (old), "=&d" (new), "=Q" (sem->count)
                : "Q" (sem->count), "m" (tmp)
                : "cc", "memory");
-       if (old != 0)
-               rwsem_down_write_failed(sem);
+
+       return old;
 }
 
 static inline void __down_write(struct rw_semaphore *sem)
 {
-       __down_write_nested(sem, 0);
+       if (___down_write(sem))
+               rwsem_down_write_failed(sem);
+}
+
+static inline int __down_write_killable(struct rw_semaphore *sem)
+{
+       if (___down_write(sem))
+               if (IS_ERR(rwsem_down_write_failed_killable(sem)))
+                       return -EINTR;
+
+       return 0;
 }
 
 /*
diff --git a/arch/sh/include/asm/Kbuild b/arch/sh/include/asm/Kbuild
index a319745a7b63..751c3373a92c 100644
--- a/arch/sh/include/asm/Kbuild
+++ b/arch/sh/include/asm/Kbuild
@@ -26,6 +26,7 @@ generic-y += percpu.h
 generic-y += poll.h
 generic-y += preempt.h
 generic-y += resource.h
+generic-y += rwsem.h
 generic-y += sembuf.h
 generic-y += serial.h
 generic-y += shmbuf.h
diff --git a/arch/sh/include/asm/rwsem.h b/arch/sh/include/asm/rwsem.h
deleted file mode 100644
index edab57265293..000000000000
--- a/arch/sh/include/asm/rwsem.h
+++ /dev/null
@@ -1,132 +0,0 @@
-/*
- * include/asm-sh/rwsem.h: R/W semaphores for SH using the stuff
- * in lib/rwsem.c.
- */
-
-#ifndef _ASM_SH_RWSEM_H
-#define _ASM_SH_RWSEM_H
-
-#ifndef _LINUX_RWSEM_H
-#error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead"
-#endif
-
-#ifdef __KERNEL__
-
-#define RWSEM_UNLOCKED_VALUE           0x00000000
-#define RWSEM_ACTIVE_BIAS              0x00000001
-#define RWSEM_ACTIVE_MASK              0x0000ffff
-#define RWSEM_WAITING_BIAS             (-0x00010000)
-#define RWSEM_ACTIVE_READ_BIAS         RWSEM_ACTIVE_BIAS
-#define RWSEM_ACTIVE_WRITE_BIAS                (RWSEM_WAITING_BIAS + 
RWSEM_ACTIVE_BIAS)
-
-/*
- * lock for reading
- */
-static inline void __down_read(struct rw_semaphore *sem)
-{
-       if (atomic_inc_return((atomic_t *)(&sem->count)) > 0)
-               smp_wmb();
-       else
-               rwsem_down_read_failed(sem);
-}
-
-static inline int __down_read_trylock(struct rw_semaphore *sem)
-{
-       int tmp;
-
-       while ((tmp = sem->count) >= 0) {
-               if (tmp == cmpxchg(&sem->count, tmp,
-                                  tmp + RWSEM_ACTIVE_READ_BIAS)) {
-                       smp_wmb();
-                       return 1;
-               }
-       }
-       return 0;
-}
-
-/*
- * lock for writing
- */
-static inline void __down_write(struct rw_semaphore *sem)
-{
-       int tmp;
-
-       tmp = atomic_add_return(RWSEM_ACTIVE_WRITE_BIAS,
-                               (atomic_t *)(&sem->count));
-       if (tmp == RWSEM_ACTIVE_WRITE_BIAS)
-               smp_wmb();
-       else
-               rwsem_down_write_failed(sem);
-}
-
-static inline int __down_write_trylock(struct rw_semaphore *sem)
-{
-       int tmp;
-
-       tmp = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
-                     RWSEM_ACTIVE_WRITE_BIAS);
-       smp_wmb();
-       return tmp == RWSEM_UNLOCKED_VALUE;
-}
-
-/*
- * unlock after reading
- */
-static inline void __up_read(struct rw_semaphore *sem)
-{
-       int tmp;
-
-       smp_wmb();
-       tmp = atomic_dec_return((atomic_t *)(&sem->count));
-       if (tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0)
-               rwsem_wake(sem);
-}
-
-/*
- * unlock after writing
- */
-static inline void __up_write(struct rw_semaphore *sem)
-{
-       smp_wmb();
-       if (atomic_sub_return(RWSEM_ACTIVE_WRITE_BIAS,
-                             (atomic_t *)(&sem->count)) < 0)
-               rwsem_wake(sem);
-}
-
-/*
- * implement atomic add functionality
- */
-static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem)
-{
-       atomic_add(delta, (atomic_t *)(&sem->count));
-}
-
-/*
- * downgrade write lock to read lock
- */
-static inline void __downgrade_write(struct rw_semaphore *sem)
-{
-       int tmp;
-
-       smp_wmb();
-       tmp = atomic_add_return(-RWSEM_WAITING_BIAS, (atomic_t *)(&sem->count));
-       if (tmp < 0)
-               rwsem_downgrade_wake(sem);
-}
-
-static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
-{
-       __down_write(sem);
-}
-
-/*
- * implement exchange and add functionality
- */
-static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
-{
-       smp_mb();
-       return atomic_add_return(delta, (atomic_t *)(&sem->count));
-}
-
-#endif /* __KERNEL__ */
-#endif /* _ASM_SH_RWSEM_H */
diff --git a/arch/sparc/include/asm/Kbuild b/arch/sparc/include/asm/Kbuild
index e928618838bc..6024c26c0585 100644
--- a/arch/sparc/include/asm/Kbuild
+++ b/arch/sparc/include/asm/Kbuild
@@ -16,6 +16,7 @@ generic-y += mm-arch-hooks.h
 generic-y += module.h
 generic-y += mutex.h
 generic-y += preempt.h
+generic-y += rwsem.h
 generic-y += serial.h
 generic-y += trace_clock.h
 generic-y += types.h
diff --git a/arch/sparc/include/asm/rwsem.h b/arch/sparc/include/asm/rwsem.h
deleted file mode 100644
index 069bf4d663a1..000000000000
--- a/arch/sparc/include/asm/rwsem.h
+++ /dev/null
@@ -1,124 +0,0 @@
-/*
- * rwsem.h: R/W semaphores implemented using CAS
- *
- * Written by David S. Miller ([email protected]), 2001.
- * Derived from asm-i386/rwsem.h
- */
-#ifndef _SPARC64_RWSEM_H
-#define _SPARC64_RWSEM_H
-
-#ifndef _LINUX_RWSEM_H
-#error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead"
-#endif
-
-#ifdef __KERNEL__
-
-#define RWSEM_UNLOCKED_VALUE           0x00000000L
-#define RWSEM_ACTIVE_BIAS              0x00000001L
-#define RWSEM_ACTIVE_MASK              0xffffffffL
-#define RWSEM_WAITING_BIAS             (-RWSEM_ACTIVE_MASK-1)
-#define RWSEM_ACTIVE_READ_BIAS         RWSEM_ACTIVE_BIAS
-#define RWSEM_ACTIVE_WRITE_BIAS                (RWSEM_WAITING_BIAS + 
RWSEM_ACTIVE_BIAS)
-
-/*
- * lock for reading
- */
-static inline void __down_read(struct rw_semaphore *sem)
-{
-       if (unlikely(atomic64_inc_return((atomic64_t *)(&sem->count)) <= 0L))
-               rwsem_down_read_failed(sem);
-}
-
-static inline int __down_read_trylock(struct rw_semaphore *sem)
-{
-       long tmp;
-
-       while ((tmp = sem->count) >= 0L) {
-               if (tmp == cmpxchg(&sem->count, tmp,
-                                  tmp + RWSEM_ACTIVE_READ_BIAS)) {
-                       return 1;
-               }
-       }
-       return 0;
-}
-
-/*
- * lock for writing
- */
-static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
-{
-       long tmp;
-
-       tmp = atomic64_add_return(RWSEM_ACTIVE_WRITE_BIAS,
-                                 (atomic64_t *)(&sem->count));
-       if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS))
-               rwsem_down_write_failed(sem);
-}
-
-static inline void __down_write(struct rw_semaphore *sem)
-{
-       __down_write_nested(sem, 0);
-}
-
-static inline int __down_write_trylock(struct rw_semaphore *sem)
-{
-       long tmp;
-
-       tmp = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
-                     RWSEM_ACTIVE_WRITE_BIAS);
-       return tmp == RWSEM_UNLOCKED_VALUE;
-}
-
-/*
- * unlock after reading
- */
-static inline void __up_read(struct rw_semaphore *sem)
-{
-       long tmp;
-
-       tmp = atomic64_dec_return((atomic64_t *)(&sem->count));
-       if (unlikely(tmp < -1L && (tmp & RWSEM_ACTIVE_MASK) == 0L))
-               rwsem_wake(sem);
-}
-
-/*
- * unlock after writing
- */
-static inline void __up_write(struct rw_semaphore *sem)
-{
-       if (unlikely(atomic64_sub_return(RWSEM_ACTIVE_WRITE_BIAS,
-                                        (atomic64_t *)(&sem->count)) < 0L))
-               rwsem_wake(sem);
-}
-
-/*
- * implement atomic add functionality
- */
-static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
-{
-       atomic64_add(delta, (atomic64_t *)(&sem->count));
-}
-
-/*
- * downgrade write lock to read lock
- */
-static inline void __downgrade_write(struct rw_semaphore *sem)
-{
-       long tmp;
-
-       tmp = atomic64_add_return(-RWSEM_WAITING_BIAS, (atomic64_t 
*)(&sem->count));
-       if (tmp < 0L)
-               rwsem_downgrade_wake(sem);
-}
-
-/*
- * implement exchange and add functionality
- */
-static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
-{
-       return atomic64_add_return(delta, (atomic64_t *)(&sem->count));
-}
-
-#endif /* __KERNEL__ */
-
-#endif /* _SPARC64_RWSEM_H */
diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
index ceec86eb68e9..453744c1d347 100644
--- a/arch/x86/include/asm/rwsem.h
+++ b/arch/x86/include/asm/rwsem.h
@@ -99,26 +99,36 @@ static inline int __down_read_trylock(struct rw_semaphore 
*sem)
 /*
  * lock for writing
  */
-static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
+#define ____down_write(sem, slow_path)                 \
+({                                                     \
+       long tmp;                                       \
+       struct rw_semaphore* ret;                       \
+       asm volatile("# beginning down_write\n\t"       \
+                    LOCK_PREFIX "  xadd      %1,(%3)\n\t"      \
+                    /* adds 0xffff0001, returns the old value */ \
+                    "  test " __ASM_SEL(%w1,%k1) "," __ASM_SEL(%w1,%k1) "\n\t" 
\
+                    /* was the active mask 0 before? */\
+                    "  jz        1f\n"                 \
+                    "  call " slow_path "\n"           \
+                    "1:\n"                             \
+                    "# ending down_write"              \
+                    : "+m" (sem->count), "=d" (tmp), "=a" (ret)        \
+                    : "a" (sem), "1" (RWSEM_ACTIVE_WRITE_BIAS) \
+                    : "memory", "cc");                 \
+       ret;                                            \
+})
+
+static inline void __down_write(struct rw_semaphore *sem)
 {
-       long tmp;
-       asm volatile("# beginning down_write\n\t"
-                    LOCK_PREFIX "  xadd      %1,(%2)\n\t"
-                    /* adds 0xffff0001, returns the old value */
-                    "  test " __ASM_SEL(%w1,%k1) "," __ASM_SEL(%w1,%k1) "\n\t"
-                    /* was the active mask 0 before? */
-                    "  jz        1f\n"
-                    "  call call_rwsem_down_write_failed\n"
-                    "1:\n"
-                    "# ending down_write"
-                    : "+m" (sem->count), "=d" (tmp)
-                    : "a" (sem), "1" (RWSEM_ACTIVE_WRITE_BIAS)
-                    : "memory", "cc");
+       ____down_write(sem, "call_rwsem_down_write_failed");
 }
 
-static inline void __down_write(struct rw_semaphore *sem)
+static inline int __down_write_killable(struct rw_semaphore *sem)
 {
-       __down_write_nested(sem, 0);
+       if (IS_ERR(____down_write(sem, 
"call_rwsem_down_write_failed_killable")))
+               return -EINTR;
+
+       return 0;
 }
 
 /*
diff --git a/arch/x86/lib/rwsem.S b/arch/x86/lib/rwsem.S
index be110efa0096..bf2c6074efd2 100644
--- a/arch/x86/lib/rwsem.S
+++ b/arch/x86/lib/rwsem.S
@@ -29,8 +29,10 @@
  * there is contention on the semaphore.
  *
  * %eax contains the semaphore pointer on entry. Save the C-clobbered
- * registers (%eax, %edx and %ecx) except %eax whish is either a return
- * value or just clobbered..
+ * registers (%eax, %edx and %ecx) except %eax which is either a return
+ * value or just gets clobbered. Same is true for %edx so make sure GCC
+ * reloads it after the slow path, by making it hold a temporary, for
+ * example see ____down_write().
  */
 
 #define save_common_regs \
@@ -106,6 +108,16 @@ ENTRY(call_rwsem_down_write_failed)
        ret
 ENDPROC(call_rwsem_down_write_failed)
 
+ENTRY(call_rwsem_down_write_failed_killable)
+       FRAME_BEGIN
+       save_common_regs
+       movq %rax,%rdi
+       call rwsem_down_write_failed_killable
+       restore_common_regs
+       FRAME_END
+       ret
+ENDPROC(call_rwsem_down_write_failed_killable)
+
 ENTRY(call_rwsem_wake)
        FRAME_BEGIN
        /* do nothing if still outstanding active readers */
diff --git a/arch/xtensa/include/asm/Kbuild b/arch/xtensa/include/asm/Kbuild
index b56855a1382a..28cf4c5d65ef 100644
--- a/arch/xtensa/include/asm/Kbuild
+++ b/arch/xtensa/include/asm/Kbuild
@@ -22,6 +22,7 @@ generic-y += mm-arch-hooks.h
 generic-y += percpu.h
 generic-y += preempt.h
 generic-y += resource.h
+generic-y += rwsem.h
 generic-y += sections.h
 generic-y += siginfo.h
 generic-y += statfs.h
diff --git a/arch/xtensa/include/asm/rwsem.h b/arch/xtensa/include/asm/rwsem.h
deleted file mode 100644
index 249619e7e7f2..000000000000
--- a/arch/xtensa/include/asm/rwsem.h
+++ /dev/null
@@ -1,131 +0,0 @@
-/*
- * include/asm-xtensa/rwsem.h
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Largely copied from include/asm-ppc/rwsem.h
- *
- * Copyright (C) 2001 - 2005 Tensilica Inc.
- */
-
-#ifndef _XTENSA_RWSEM_H
-#define _XTENSA_RWSEM_H
-
-#ifndef _LINUX_RWSEM_H
-#error "Please don't include <asm/rwsem.h> directly, use <linux/rwsem.h> 
instead."
-#endif
-
-#define RWSEM_UNLOCKED_VALUE           0x00000000
-#define RWSEM_ACTIVE_BIAS              0x00000001
-#define RWSEM_ACTIVE_MASK              0x0000ffff
-#define RWSEM_WAITING_BIAS             (-0x00010000)
-#define RWSEM_ACTIVE_READ_BIAS         RWSEM_ACTIVE_BIAS
-#define RWSEM_ACTIVE_WRITE_BIAS                (RWSEM_WAITING_BIAS + 
RWSEM_ACTIVE_BIAS)
-
-/*
- * lock for reading
- */
-static inline void __down_read(struct rw_semaphore *sem)
-{
-       if (atomic_add_return(1,(atomic_t *)(&sem->count)) > 0)
-               smp_wmb();
-       else
-               rwsem_down_read_failed(sem);
-}
-
-static inline int __down_read_trylock(struct rw_semaphore *sem)
-{
-       int tmp;
-
-       while ((tmp = sem->count) >= 0) {
-               if (tmp == cmpxchg(&sem->count, tmp,
-                                  tmp + RWSEM_ACTIVE_READ_BIAS)) {
-                       smp_wmb();
-                       return 1;
-               }
-       }
-       return 0;
-}
-
-/*
- * lock for writing
- */
-static inline void __down_write(struct rw_semaphore *sem)
-{
-       int tmp;
-
-       tmp = atomic_add_return(RWSEM_ACTIVE_WRITE_BIAS,
-                               (atomic_t *)(&sem->count));
-       if (tmp == RWSEM_ACTIVE_WRITE_BIAS)
-               smp_wmb();
-       else
-               rwsem_down_write_failed(sem);
-}
-
-static inline int __down_write_trylock(struct rw_semaphore *sem)
-{
-       int tmp;
-
-       tmp = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
-                     RWSEM_ACTIVE_WRITE_BIAS);
-       smp_wmb();
-       return tmp == RWSEM_UNLOCKED_VALUE;
-}
-
-/*
- * unlock after reading
- */
-static inline void __up_read(struct rw_semaphore *sem)
-{
-       int tmp;
-
-       smp_wmb();
-       tmp = atomic_sub_return(1,(atomic_t *)(&sem->count));
-       if (tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0)
-               rwsem_wake(sem);
-}
-
-/*
- * unlock after writing
- */
-static inline void __up_write(struct rw_semaphore *sem)
-{
-       smp_wmb();
-       if (atomic_sub_return(RWSEM_ACTIVE_WRITE_BIAS,
-                             (atomic_t *)(&sem->count)) < 0)
-               rwsem_wake(sem);
-}
-
-/*
- * implement atomic add functionality
- */
-static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem)
-{
-       atomic_add(delta, (atomic_t *)(&sem->count));
-}
-
-/*
- * downgrade write lock to read lock
- */
-static inline void __downgrade_write(struct rw_semaphore *sem)
-{
-       int tmp;
-
-       smp_wmb();
-       tmp = atomic_add_return(-RWSEM_WAITING_BIAS, (atomic_t *)(&sem->count));
-       if (tmp < 0)
-               rwsem_downgrade_wake(sem);
-}
-
-/*
- * implement exchange and add functionality
- */
-static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
-{
-       smp_mb();
-       return atomic_add_return(delta, (atomic_t *)(&sem->count));
-}
-
-#endif /* _XTENSA_RWSEM_H */
diff --git a/include/asm-generic/rwsem.h b/include/asm-generic/rwsem.h
index d6d5dc98d7da..3fc94a046bf5 100644
--- a/include/asm-generic/rwsem.h
+++ b/include/asm-generic/rwsem.h
@@ -53,7 +53,7 @@ static inline int __down_read_trylock(struct rw_semaphore 
*sem)
 /*
  * lock for writing
  */
-static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
+static inline void __down_write(struct rw_semaphore *sem)
 {
        long tmp;
 
@@ -63,9 +63,16 @@ static inline void __down_write_nested(struct rw_semaphore 
*sem, int subclass)
                rwsem_down_write_failed(sem);
 }
 
-static inline void __down_write(struct rw_semaphore *sem)
+static inline int __down_write_killable(struct rw_semaphore *sem)
 {
-       __down_write_nested(sem, 0);
+       long tmp;
+
+       tmp = atomic_long_add_return_acquire(RWSEM_ACTIVE_WRITE_BIAS,
+                                    (atomic_long_t *)&sem->count);
+       if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS))
+               if (IS_ERR(rwsem_down_write_failed_killable(sem)))
+                       return -EINTR;
+       return 0;
 }
 
 static inline int __down_write_trylock(struct rw_semaphore *sem)
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index d026b190c530..accfe56d8c51 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -444,6 +444,18 @@ do {                                                       
        \
        lock_acquired(&(_lock)->dep_map, _RET_IP_);                     \
 } while (0)
 
+#define LOCK_CONTENDED_RETURN(_lock, try, lock)                        \
+({                                                             \
+       int ____err = 0;                                        \
+       if (!try(_lock)) {                                      \
+               lock_contended(&(_lock)->dep_map, _RET_IP_);    \
+               ____err = lock(_lock);                          \
+       }                                                       \
+       if (!____err)                                           \
+               lock_acquired(&(_lock)->dep_map, _RET_IP_);     \
+       ____err;                                                \
+})
+
 #else /* CONFIG_LOCK_STAT */
 
 #define lock_contended(lockdep_map, ip) do {} while (0)
@@ -452,6 +464,9 @@ do {                                                        
        \
 #define LOCK_CONTENDED(_lock, try, lock) \
        lock(_lock)
 
+#define LOCK_CONTENDED_RETURN(_lock, try, lock) \
+       lock(_lock)
+
 #endif /* CONFIG_LOCK_STAT */
 
 #ifdef CONFIG_LOCKDEP
diff --git a/include/linux/rwsem-spinlock.h b/include/linux/rwsem-spinlock.h
index 561e8615528d..ae0528b834cd 100644
--- a/include/linux/rwsem-spinlock.h
+++ b/include/linux/rwsem-spinlock.h
@@ -34,7 +34,7 @@ struct rw_semaphore {
 extern void __down_read(struct rw_semaphore *sem);
 extern int __down_read_trylock(struct rw_semaphore *sem);
 extern void __down_write(struct rw_semaphore *sem);
-extern void __down_write_nested(struct rw_semaphore *sem, int subclass);
+extern int __must_check __down_write_killable(struct rw_semaphore *sem);
 extern int __down_write_trylock(struct rw_semaphore *sem);
 extern void __up_read(struct rw_semaphore *sem);
 extern void __up_write(struct rw_semaphore *sem);
diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h
index 8f498cdde280..d1c12d160ace 100644
--- a/include/linux/rwsem.h
+++ b/include/linux/rwsem.h
@@ -14,6 +14,7 @@
 #include <linux/list.h>
 #include <linux/spinlock.h>
 #include <linux/atomic.h>
+#include <linux/err.h>
 #ifdef CONFIG_RWSEM_SPIN_ON_OWNER
 #include <linux/osq_lock.h>
 #endif
@@ -43,6 +44,7 @@ struct rw_semaphore {
 
 extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
 extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
+extern struct rw_semaphore *rwsem_down_write_failed_killable(struct 
rw_semaphore *sem);
 extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *);
 extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
 
@@ -116,6 +118,7 @@ extern int down_read_trylock(struct rw_semaphore *sem);
  * lock for writing
  */
 extern void down_write(struct rw_semaphore *sem);
+extern int __must_check down_write_killable(struct rw_semaphore *sem);
 
 /*
  * trylock for writing -- returns 1 if successful, 0 if contention
diff --git a/kernel/locking/rwsem-spinlock.c b/kernel/locking/rwsem-spinlock.c
index 3a5048572065..1591f6b3539f 100644
--- a/kernel/locking/rwsem-spinlock.c
+++ b/kernel/locking/rwsem-spinlock.c
@@ -191,11 +191,12 @@ int __down_read_trylock(struct rw_semaphore *sem)
 /*
  * get a write lock on the semaphore
  */
-void __sched __down_write_nested(struct rw_semaphore *sem, int subclass)
+int __sched __down_write_common(struct rw_semaphore *sem, int state)
 {
        struct rwsem_waiter waiter;
        struct task_struct *tsk;
        unsigned long flags;
+       int ret = 0;
 
        raw_spin_lock_irqsave(&sem->wait_lock, flags);
 
@@ -215,21 +216,33 @@ void __sched __down_write_nested(struct rw_semaphore 
*sem, int subclass)
                 */
                if (sem->count == 0)
                        break;
-               set_task_state(tsk, TASK_UNINTERRUPTIBLE);
+               if (signal_pending_state(state, current)) {
+                       ret = -EINTR;
+                       goto out;
+               }
+               set_task_state(tsk, state);
                raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
                schedule();
                raw_spin_lock_irqsave(&sem->wait_lock, flags);
        }
        /* got the lock */
        sem->count = -1;
+out:
        list_del(&waiter.list);
 
        raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
+
+       return ret;
 }
 
 void __sched __down_write(struct rw_semaphore *sem)
 {
-       __down_write_nested(sem, 0);
+       __down_write_common(sem, TASK_UNINTERRUPTIBLE);
+}
+
+int __sched __down_write_killable(struct rw_semaphore *sem)
+{
+       return __down_write_common(sem, TASK_KILLABLE);
 }
 
 /*
diff --git a/kernel/locking/rwsem-xadd.c b/kernel/locking/rwsem-xadd.c
index a4d4de05b2d1..09e30c6225e5 100644
--- a/kernel/locking/rwsem-xadd.c
+++ b/kernel/locking/rwsem-xadd.c
@@ -433,12 +433,13 @@ static inline bool rwsem_has_spinner(struct rw_semaphore 
*sem)
 /*
  * Wait until we successfully acquire the write lock
  */
-__visible
-struct rw_semaphore __sched *rwsem_down_write_failed(struct rw_semaphore *sem)
+static inline struct rw_semaphore *
+__rwsem_down_write_failed_common(struct rw_semaphore *sem, int state)
 {
        long count;
        bool waiting = true; /* any queued threads before us */
        struct rwsem_waiter waiter;
+       struct rw_semaphore *ret = sem;
 
        /* undo write bias from down_write operation, stop active locking */
        count = rwsem_atomic_update(-RWSEM_ACTIVE_WRITE_BIAS, sem);
@@ -478,7 +479,7 @@ struct rw_semaphore __sched *rwsem_down_write_failed(struct 
rw_semaphore *sem)
                count = rwsem_atomic_update(RWSEM_WAITING_BIAS, sem);
 
        /* wait until we successfully acquire the lock */
-       set_current_state(TASK_UNINTERRUPTIBLE);
+       set_current_state(state);
        while (true) {
                if (rwsem_try_write_lock(count, sem))
                        break;
@@ -486,21 +487,48 @@ struct rw_semaphore __sched 
*rwsem_down_write_failed(struct rw_semaphore *sem)
 
                /* Block until there are no active lockers. */
                do {
+                       if (signal_pending_state(state, current))
+                               goto out_nolock;
+
                        schedule();
-                       set_current_state(TASK_UNINTERRUPTIBLE);
+                       set_current_state(state);
                } while ((count = sem->count) & RWSEM_ACTIVE_MASK);
 
                raw_spin_lock_irq(&sem->wait_lock);
        }
        __set_current_state(TASK_RUNNING);
+       list_del(&waiter.list);
+       raw_spin_unlock_irq(&sem->wait_lock);
 
+       return ret;
+
+out_nolock:
+       __set_current_state(TASK_RUNNING);
+       raw_spin_lock_irq(&sem->wait_lock);
        list_del(&waiter.list);
+       if (list_empty(&sem->wait_list))
+               rwsem_atomic_update(-RWSEM_WAITING_BIAS, sem);
+       else
+               __rwsem_do_wake(sem, RWSEM_WAKE_ANY);
        raw_spin_unlock_irq(&sem->wait_lock);
 
-       return sem;
+       return ERR_PTR(-EINTR);
+}
+
+__visible struct rw_semaphore * __sched
+rwsem_down_write_failed(struct rw_semaphore *sem)
+{
+       return __rwsem_down_write_failed_common(sem, TASK_UNINTERRUPTIBLE);
 }
 EXPORT_SYMBOL(rwsem_down_write_failed);
 
+__visible struct rw_semaphore * __sched
+rwsem_down_write_failed_killable(struct rw_semaphore *sem)
+{
+       return __rwsem_down_write_failed_common(sem, TASK_KILLABLE);
+}
+EXPORT_SYMBOL(rwsem_down_write_failed_killable);
+
 /*
  * handle waking up a waiter on the semaphore
  * - up_read/up_write has decremented the active part of count if we come here
diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c
index 205be0ce34de..c817216c1615 100644
--- a/kernel/locking/rwsem.c
+++ b/kernel/locking/rwsem.c
@@ -55,6 +55,25 @@ void __sched down_write(struct rw_semaphore *sem)
 EXPORT_SYMBOL(down_write);
 
 /*
+ * lock for writing
+ */
+int __sched down_write_killable(struct rw_semaphore *sem)
+{
+       might_sleep();
+       rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_);
+
+       if (LOCK_CONTENDED_RETURN(sem, __down_write_trylock, 
__down_write_killable)) {
+               rwsem_release(&sem->dep_map, 1, _RET_IP_);
+               return -EINTR;
+       }
+
+       rwsem_set_owner(sem);
+       return 0;
+}
+
+EXPORT_SYMBOL(down_write_killable);
+
+/*
  * trylock for writing -- returns 1 if successful, 0 if contention
  */
 int down_write_trylock(struct rw_semaphore *sem)

Reply via email to