Author: mjg
Date: Sat Nov 25 20:10:33 2017
New Revision: 326195
URL: https://svnweb.freebsd.org/changeset/base/326195

Log:
  locks: retry turnstile/sleepq loops on failed cmpset
  
  In order to go to sleep threads set waiter flags, but that can spuriously
  fail e.g. when a new reader arrives. Instead of unlocking everything and
  looping back, re-evaluate the new state while still holding the lock necessary
  to go to sleep.

Modified:
  head/sys/kern/kern_rwlock.c
  head/sys/kern/kern_sx.c

Modified: head/sys/kern/kern_rwlock.c
==============================================================================
--- head/sys/kern/kern_rwlock.c Sat Nov 25 20:08:11 2017        (r326194)
+++ head/sys/kern/kern_rwlock.c Sat Nov 25 20:10:33 2017        (r326195)
@@ -526,6 +526,7 @@ __rw_rlock_hard(struct rwlock *rw, struct thread *td, 
                 * recheck its state and restart the loop if needed.
                 */
                v = RW_READ_VALUE(rw);
+retry_ts:
                if (__rw_can_read(td, v, false)) {
                        turnstile_cancel(ts);
                        continue;
@@ -561,12 +562,9 @@ __rw_rlock_hard(struct rwlock *rw, struct thread *td, 
                 * lock and restart the loop.
                 */
                if (!(v & RW_LOCK_READ_WAITERS)) {
-                       if (!atomic_cmpset_ptr(&rw->rw_lock, v,
-                           v | RW_LOCK_READ_WAITERS)) {
-                               turnstile_cancel(ts);
-                               v = RW_READ_VALUE(rw);
-                               continue;
-                       }
+                       if (!atomic_fcmpset_ptr(&rw->rw_lock, &v,
+                           v | RW_LOCK_READ_WAITERS))
+                               goto retry_ts;
                        if (LOCK_LOG_TEST(&rw->lock_object, 0))
                                CTR2(KTR_LOCK, "%s: %p set read waiters flag",
                                    __func__, rw);
@@ -757,7 +755,9 @@ __rw_runlock_hard(struct rwlock *rw, struct thread *td
                 * last reader, so grab the turnstile lock.
                 */
                turnstile_chain_lock(&rw->lock_object);
-               v = rw->rw_lock & (RW_LOCK_WAITERS | RW_LOCK_WRITE_SPINNER);
+               v = RW_READ_VALUE(rw);
+retry_ts:
+               v &= (RW_LOCK_WAITERS | RW_LOCK_WRITE_SPINNER);
                MPASS(v & RW_LOCK_WAITERS);
 
                /*
@@ -782,12 +782,9 @@ __rw_runlock_hard(struct rwlock *rw, struct thread *td
                        x |= (v & RW_LOCK_READ_WAITERS);
                } else
                        queue = TS_SHARED_QUEUE;
-               if (!atomic_cmpset_rel_ptr(&rw->rw_lock, RW_READERS_LOCK(1) | v,
-                   x)) {
-                       turnstile_chain_unlock(&rw->lock_object);
-                       v = RW_READ_VALUE(rw);
-                       continue;
-               }
+               v |= RW_READERS_LOCK(1);
+               if (!atomic_fcmpset_rel_ptr(&rw->rw_lock, &v, x))
+                       goto retry_ts;
                if (LOCK_LOG_TEST(&rw->lock_object, 0))
                        CTR2(KTR_LOCK, "%s: %p last succeeded with waiters",
                            __func__, rw);
@@ -983,6 +980,7 @@ __rw_wlock_hard(volatile uintptr_t *c, uintptr_t v LOC
 #endif
                ts = turnstile_trywait(&rw->lock_object);
                v = RW_READ_VALUE(rw);
+retry_ts:
                owner = lv_rw_wowner(v);
 
 #ifdef ADAPTIVE_RWLOCKS
@@ -1010,16 +1008,14 @@ __rw_wlock_hard(volatile uintptr_t *c, uintptr_t v LOC
                x = v & (RW_LOCK_WAITERS | RW_LOCK_WRITE_SPINNER);
                if ((v & ~x) == RW_UNLOCKED) {
                        x &= ~RW_LOCK_WRITE_SPINNER;
-                       if (atomic_cmpset_acq_ptr(&rw->rw_lock, v, tid | x)) {
+                       if (atomic_fcmpset_acq_ptr(&rw->rw_lock, &v, tid | x)) {
                                if (x)
                                        turnstile_claim(ts);
                                else
                                        turnstile_cancel(ts);
                                break;
                        }
-                       turnstile_cancel(ts);
-                       v = RW_READ_VALUE(rw);
-                       continue;
+                       goto retry_ts;
                }
                /*
                 * If the RW_LOCK_WRITE_WAITERS flag isn't set, then try to
@@ -1027,12 +1023,9 @@ __rw_wlock_hard(volatile uintptr_t *c, uintptr_t v LOC
                 * again.
                 */
                if (!(v & RW_LOCK_WRITE_WAITERS)) {
-                       if (!atomic_cmpset_ptr(&rw->rw_lock, v,
-                           v | RW_LOCK_WRITE_WAITERS)) {
-                               turnstile_cancel(ts);
-                               v = RW_READ_VALUE(rw);
-                               continue;
-                       }
+                       if (!atomic_fcmpset_ptr(&rw->rw_lock, &v,
+                           v | RW_LOCK_WRITE_WAITERS))
+                               goto retry_ts;
                        if (LOCK_LOG_TEST(&rw->lock_object, 0))
                                CTR2(KTR_LOCK, "%s: %p set write waiters flag",
                                    __func__, rw);

Modified: head/sys/kern/kern_sx.c
==============================================================================
--- head/sys/kern/kern_sx.c     Sat Nov 25 20:08:11 2017        (r326194)
+++ head/sys/kern/kern_sx.c     Sat Nov 25 20:10:33 2017        (r326195)
@@ -665,6 +665,7 @@ _sx_xlock_hard(struct sx *sx, uintptr_t x, int opts LO
 
                sleepq_lock(&sx->lock_object);
                x = SX_READ_VALUE(sx);
+retry_sleepq:
 
                /*
                 * If the lock was released while spinning on the
@@ -704,17 +705,13 @@ _sx_xlock_hard(struct sx *sx, uintptr_t x, int opts LO
                 * fail, restart the loop.
                 */
                if (x == (SX_LOCK_UNLOCKED | SX_LOCK_EXCLUSIVE_WAITERS)) {
-                       if (atomic_cmpset_acq_ptr(&sx->sx_lock,
-                           SX_LOCK_UNLOCKED | SX_LOCK_EXCLUSIVE_WAITERS,
-                           tid | SX_LOCK_EXCLUSIVE_WAITERS)) {
-                               sleepq_release(&sx->lock_object);
-                               CTR2(KTR_LOCK, "%s: %p claimed by new writer",
-                                   __func__, sx);
-                               break;
-                       }
+                       if (!atomic_fcmpset_acq_ptr(&sx->sx_lock, &x,
+                           tid | SX_LOCK_EXCLUSIVE_WAITERS))
+                               goto retry_sleepq;
                        sleepq_release(&sx->lock_object);
-                       x = SX_READ_VALUE(sx);
-                       continue;
+                       CTR2(KTR_LOCK, "%s: %p claimed by new writer",
+                           __func__, sx);
+                       break;
                }
 
                /*
@@ -722,11 +719,9 @@ _sx_xlock_hard(struct sx *sx, uintptr_t x, int opts LO
                 * than loop back and retry.
                 */
                if (!(x & SX_LOCK_EXCLUSIVE_WAITERS)) {
-                       if (!atomic_cmpset_ptr(&sx->sx_lock, x,
+                       if (!atomic_fcmpset_ptr(&sx->sx_lock, &x,
                            x | SX_LOCK_EXCLUSIVE_WAITERS)) {
-                               sleepq_release(&sx->lock_object);
-                               x = SX_READ_VALUE(sx);
-                               continue;
+                               goto retry_sleepq;
                        }
                        if (LOCK_LOG_TEST(&sx->lock_object, 0))
                                CTR2(KTR_LOCK, "%s: %p set excl waiters flag",
@@ -986,7 +981,7 @@ _sx_slock_hard(struct sx *sx, int opts, uintptr_t x LO
                 */
                sleepq_lock(&sx->lock_object);
                x = SX_READ_VALUE(sx);
-
+retry_sleepq:
                /*
                 * The lock could have been released while we spun.
                 * In this case loop back and retry.
@@ -1019,12 +1014,9 @@ _sx_slock_hard(struct sx *sx, int opts, uintptr_t x LO
                 * back.
                 */
                if (!(x & SX_LOCK_SHARED_WAITERS)) {
-                       if (!atomic_cmpset_ptr(&sx->sx_lock, x,
-                           x | SX_LOCK_SHARED_WAITERS)) {
-                               sleepq_release(&sx->lock_object);
-                               x = SX_READ_VALUE(sx);
-                               continue;
-                       }
+                       if (!atomic_fcmpset_ptr(&sx->sx_lock, &x,
+                           x | SX_LOCK_SHARED_WAITERS))
+                               goto retry_sleepq;
                        if (LOCK_LOG_TEST(&sx->lock_object, 0))
                                CTR2(KTR_LOCK, "%s: %p set shared waiters flag",
                                    __func__, sx);
_______________________________________________
svn-src-all@freebsd.org mailing list
https://lists.freebsd.org/mailman/listinfo/svn-src-all
To unsubscribe, send any mail to "svn-src-all-unsubscr...@freebsd.org"

Reply via email to