Commit-ID:  0aa1125fa8bc5e5f98317156728fa4d0293561a5
Gitweb:     http://git.kernel.org/tip/0aa1125fa8bc5e5f98317156728fa4d0293561a5
Author:     Kirill Tkhai <ktk...@virtuozzo.com>
AuthorDate: Mon, 19 Jun 2017 21:02:12 +0300
Committer:  Ingo Molnar <mi...@kernel.org>
CommitDate: Thu, 10 Aug 2017 12:28:55 +0200

locking/rwsem-spinlock: Add killable versions of __down_read()

Rename __down_read() in __down_read_common() and teach it
to abort waiting in case of pending signals and killable
state argument passed.

Note, that we shouldn't wake anybody up in EINTR path, as:

We check for signal_pending_state() after (!waiter.task)
test and under spinlock. So, current task wasn't able to
be woken up. It may be in two cases: a writer is owner
of the sem, or a writer is a first waiter of the sem.

If a writer is owner of the sem, no one else may work
with it in parallel. It will wake somebody, when it
call up_write() or downgrade_write().

If a writer is the first waiter, it will be woken up,
when the last active reader releases the sem, and
sem->count became 0.

Also note, that set_current_state() may be moved down
to schedule() (after !waiter.task check), as all
assignments in this type of semaphore (including wake_up),
occur under spinlock, so we can't miss anything.

Signed-off-by: Kirill Tkhai <ktk...@virtuozzo.com>
Signed-off-by: Peter Zijlstra (Intel) <pet...@infradead.org>
Cc: Linus Torvalds <torva...@linux-foundation.org>
Cc: Peter Zijlstra <pet...@infradead.org>
Cc: Thomas Gleixner <t...@linutronix.de>
Cc: a...@arndb.de
Cc: ava...@virtuozzo.com
Cc: da...@davemloft.net
Cc: fenghua...@intel.com
Cc: gorcu...@virtuozzo.com
Cc: heiko.carst...@de.ibm.com
Cc: h...@zytor.com
Cc: i...@jurassic.park.msu.ru
Cc: matts...@gmail.com
Cc: r...@twiddle.net
Cc: schwidef...@de.ibm.com
Cc: tony.l...@intel.com
Link: 
http://lkml.kernel.org/r/149789533283.9059.9829416940494747182.stgit@localhost.localdomain
Signed-off-by: Ingo Molnar <mi...@kernel.org>
---
 include/linux/rwsem-spinlock.h  |  1 +
 kernel/locking/rwsem-spinlock.c | 37 ++++++++++++++++++++++++++++---------
 2 files changed, 29 insertions(+), 9 deletions(-)

diff --git a/include/linux/rwsem-spinlock.h b/include/linux/rwsem-spinlock.h
index ae0528b..e784761 100644
--- a/include/linux/rwsem-spinlock.h
+++ b/include/linux/rwsem-spinlock.h
@@ -32,6 +32,7 @@ struct rw_semaphore {
 #define RWSEM_UNLOCKED_VALUE           0x00000000
 
 extern void __down_read(struct rw_semaphore *sem);
+extern int __must_check __down_read_killable(struct rw_semaphore *sem);
 extern int __down_read_trylock(struct rw_semaphore *sem);
 extern void __down_write(struct rw_semaphore *sem);
 extern int __must_check __down_write_killable(struct rw_semaphore *sem);
diff --git a/kernel/locking/rwsem-spinlock.c b/kernel/locking/rwsem-spinlock.c
index 20819df..0848634 100644
--- a/kernel/locking/rwsem-spinlock.c
+++ b/kernel/locking/rwsem-spinlock.c
@@ -126,7 +126,7 @@ __rwsem_wake_one_writer(struct rw_semaphore *sem)
 /*
  * get a read lock on the semaphore
  */
-void __sched __down_read(struct rw_semaphore *sem)
+int __sched __down_read_common(struct rw_semaphore *sem, int state)
 {
        struct rwsem_waiter waiter;
        unsigned long flags;
@@ -140,8 +140,6 @@ void __sched __down_read(struct rw_semaphore *sem)
                goto out;
        }
 
-       set_current_state(TASK_UNINTERRUPTIBLE);
-
        /* set up my own style of waitqueue */
        waiter.task = current;
        waiter.type = RWSEM_WAITING_FOR_READ;
@@ -149,20 +147,41 @@ void __sched __down_read(struct rw_semaphore *sem)
 
        list_add_tail(&waiter.list, &sem->wait_list);
 
-       /* we don't need to touch the semaphore struct anymore */
-       raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
-
        /* wait to be given the lock */
        for (;;) {
                if (!waiter.task)
                        break;
+               if (signal_pending_state(state, current))
+                       goto out_nolock;
+               set_current_state(state);
+               raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
                schedule();
-               set_current_state(TASK_UNINTERRUPTIBLE);
+               raw_spin_lock_irqsave(&sem->wait_lock, flags);
        }
 
-       __set_current_state(TASK_RUNNING);
+       raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
  out:
-       ;
+       return 0;
+
+out_nolock:
+       /*
+        * We didn't take the lock, so that there is a writer, which
+        * is owner or the first waiter of the sem. If it's a waiter,
+        * it will be woken by current owner. Not need to wake anybody.
+        */
+       list_del(&waiter.list);
+       raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
+       return -EINTR;
+}
+
+void __sched __down_read(struct rw_semaphore *sem)
+{
+       __down_read_common(sem, TASK_UNINTERRUPTIBLE);
+}
+
+int __sched __down_read_killable(struct rw_semaphore *sem)
+{
+       return __down_read_common(sem, TASK_KILLABLE);
 }
 
 /*

Reply via email to