Author: mjg
Date: Wed Jan 18 17:55:08 2017
New Revision: 312390
URL: https://svnweb.freebsd.org/changeset/base/312390

Log:
  sx: reduce lock accesses similarly to r311172
  
  Discussed with:       jhb
  Tested by:    pho (previous version)

Modified:
  head/sys/kern/kern_sx.c
  head/sys/sys/sx.h

Modified: head/sys/kern/kern_sx.c
==============================================================================
--- head/sys/kern/kern_sx.c     Wed Jan 18 17:53:57 2017        (r312389)
+++ head/sys/kern/kern_sx.c     Wed Jan 18 17:55:08 2017        (r312390)
@@ -563,8 +563,10 @@ _sx_xlock_hard(struct sx *sx, uintptr_t 
        lock_delay_arg_init(&lda, NULL);
 #endif
 
+       x = SX_READ_VALUE(sx);
+
        /* If we already hold an exclusive lock, then recurse. */
-       if (sx_xlocked(sx)) {
+       if (__predict_false(lv_sx_owner(x) == (struct thread *)tid)) {
                KASSERT((sx->lock_object.lo_flags & LO_RECURSABLE) != 0,
            ("_sx_xlock_hard: recursed on non-recursive sx %s @ %s:%d\n",
                    sx->lock_object.lo_name, file, line));
@@ -581,12 +583,15 @@ _sx_xlock_hard(struct sx *sx, uintptr_t 
 
 #ifdef KDTRACE_HOOKS
        all_time -= lockstat_nsecs(&sx->lock_object);
-       state = sx->sx_lock;
+       state = x;
 #endif
        for (;;) {
-               if (sx->sx_lock == SX_LOCK_UNLOCKED &&
-                   atomic_cmpset_acq_ptr(&sx->sx_lock, SX_LOCK_UNLOCKED, tid))
-                       break;
+               if (x == SX_LOCK_UNLOCKED) {
+                       if (atomic_cmpset_acq_ptr(&sx->sx_lock, x, tid))
+                               break;
+                       x = SX_READ_VALUE(sx);
+                       continue;
+               }
 #ifdef KDTRACE_HOOKS
                lda.spin_cnt++;
 #endif
@@ -601,11 +606,9 @@ _sx_xlock_hard(struct sx *sx, uintptr_t 
                 * running on another CPU, spin until the owner stops
                 * running or the state of the lock changes.
                 */
-               x = sx->sx_lock;
                if ((sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0) {
                        if ((x & SX_LOCK_SHARED) == 0) {
-                               x = SX_OWNER(x);
-                               owner = (struct thread *)x;
+                               owner = lv_sx_owner(x);
                                if (TD_IS_RUNNING(owner)) {
                                        if (LOCK_LOG_TEST(&sx->lock_object, 0))
                                                CTR3(KTR_LOCK,
@@ -616,9 +619,12 @@ _sx_xlock_hard(struct sx *sx, uintptr_t 
                                            "lockname:\"%s\"",
                                            sx->lock_object.lo_name);
                                        GIANT_SAVE();
-                                       while (SX_OWNER(sx->sx_lock) == x &&
-                                           TD_IS_RUNNING(owner))
+                                       do {
                                                lock_delay(&lda);
+                                               x = SX_READ_VALUE(sx);
+                                               owner = lv_sx_owner(x);
+                                       } while (owner != NULL &&
+                                                   TD_IS_RUNNING(owner));
                                        KTR_STATE0(KTR_SCHED, "thread",
                                            sched_tdname(curthread), "running");
                                        continue;
@@ -645,6 +651,7 @@ _sx_xlock_hard(struct sx *sx, uintptr_t 
                                }
                                KTR_STATE0(KTR_SCHED, "thread",
                                    sched_tdname(curthread), "running");
+                               x = SX_READ_VALUE(sx);
                                if (i != asx_loops)
                                        continue;
                        }
@@ -652,7 +659,7 @@ _sx_xlock_hard(struct sx *sx, uintptr_t 
 #endif
 
                sleepq_lock(&sx->lock_object);
-               x = sx->sx_lock;
+               x = SX_READ_VALUE(sx);
 
                /*
                 * If the lock was released while spinning on the
@@ -701,6 +708,7 @@ _sx_xlock_hard(struct sx *sx, uintptr_t 
                                break;
                        }
                        sleepq_release(&sx->lock_object);
+                       x = SX_READ_VALUE(sx);
                        continue;
                }
 
@@ -712,6 +720,7 @@ _sx_xlock_hard(struct sx *sx, uintptr_t 
                        if (!atomic_cmpset_ptr(&sx->sx_lock, x,
                            x | SX_LOCK_EXCLUSIVE_WAITERS)) {
                                sleepq_release(&sx->lock_object);
+                               x = SX_READ_VALUE(sx);
                                continue;
                        }
                        if (LOCK_LOG_TEST(&sx->lock_object, 0))
@@ -753,6 +762,7 @@ _sx_xlock_hard(struct sx *sx, uintptr_t 
                if (LOCK_LOG_TEST(&sx->lock_object, 0))
                        CTR2(KTR_LOCK, "%s: %p resuming from sleep queue",
                            __func__, sx);
+               x = SX_READ_VALUE(sx);
        }
 #ifdef KDTRACE_HOOKS
        all_time += lockstat_nsecs(&sx->lock_object);
@@ -872,20 +882,18 @@ _sx_slock_hard(struct sx *sx, int opts, 
        lock_delay_arg_init(&lda, NULL);
 #endif
 #ifdef KDTRACE_HOOKS
-       state = sx->sx_lock;
        all_time -= lockstat_nsecs(&sx->lock_object);
 #endif
+       x = SX_READ_VALUE(sx);
+#ifdef KDTRACE_HOOKS
+       state = x;
+#endif
 
        /*
         * As with rwlocks, we don't make any attempt to try to block
         * shared locks once there is an exclusive waiter.
         */
        for (;;) {
-#ifdef KDTRACE_HOOKS
-               lda.spin_cnt++;
-#endif
-               x = sx->sx_lock;
-
                /*
                 * If no other thread has an exclusive lock then try to bump up
                 * the count of sharers.  Since we have to preserve the state
@@ -903,8 +911,13 @@ _sx_slock_hard(struct sx *sx, int opts, 
                                            (void *)(x + SX_ONE_SHARER));
                                break;
                        }
+                       x = SX_READ_VALUE(sx);
                        continue;
                }
+#ifdef KDTRACE_HOOKS
+               lda.spin_cnt++;
+#endif
+
 #ifdef HWPMC_HOOKS
                PMC_SOFT_CALL( , , lock, failed);
 #endif
@@ -918,8 +931,7 @@ _sx_slock_hard(struct sx *sx, int opts, 
                 * changes.
                 */
                if ((sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0) {
-                       x = SX_OWNER(x);
-                       owner = (struct thread *)x;
+                       owner = lv_sx_owner(x);
                        if (TD_IS_RUNNING(owner)) {
                                if (LOCK_LOG_TEST(&sx->lock_object, 0))
                                        CTR3(KTR_LOCK,
@@ -929,9 +941,11 @@ _sx_slock_hard(struct sx *sx, int opts, 
                                    sched_tdname(curthread), "spinning",
                                    "lockname:\"%s\"", sx->lock_object.lo_name);
                                GIANT_SAVE();
-                               while (SX_OWNER(sx->sx_lock) == x &&
-                                   TD_IS_RUNNING(owner))
+                               do {
                                        lock_delay(&lda);
+                                       x = SX_READ_VALUE(sx);
+                                       owner = lv_sx_owner(x);
+                               } while (owner != NULL && TD_IS_RUNNING(owner));
                                KTR_STATE0(KTR_SCHED, "thread",
                                    sched_tdname(curthread), "running");
                                continue;
@@ -944,7 +958,7 @@ _sx_slock_hard(struct sx *sx, int opts, 
                 * start the process of blocking.
                 */
                sleepq_lock(&sx->lock_object);
-               x = sx->sx_lock;
+               x = SX_READ_VALUE(sx);
 
                /*
                 * The lock could have been released while we spun.
@@ -966,6 +980,7 @@ _sx_slock_hard(struct sx *sx, int opts, 
                        owner = (struct thread *)SX_OWNER(x);
                        if (TD_IS_RUNNING(owner)) {
                                sleepq_release(&sx->lock_object);
+                               x = SX_READ_VALUE(sx);
                                continue;
                        }
                }
@@ -980,6 +995,7 @@ _sx_slock_hard(struct sx *sx, int opts, 
                        if (!atomic_cmpset_ptr(&sx->sx_lock, x,
                            x | SX_LOCK_SHARED_WAITERS)) {
                                sleepq_release(&sx->lock_object);
+                               x = SX_READ_VALUE(sx);
                                continue;
                        }
                        if (LOCK_LOG_TEST(&sx->lock_object, 0))
@@ -1020,6 +1036,7 @@ _sx_slock_hard(struct sx *sx, int opts, 
                if (LOCK_LOG_TEST(&sx->lock_object, 0))
                        CTR2(KTR_LOCK, "%s: %p resuming from sleep queue",
                            __func__, sx);
+               x = SX_READ_VALUE(sx);
        }
 #ifdef KDTRACE_HOOKS
        all_time += lockstat_nsecs(&sx->lock_object);
@@ -1054,9 +1071,8 @@ _sx_sunlock_hard(struct sx *sx, const ch
        if (SCHEDULER_STOPPED())
                return;
 
+       x = SX_READ_VALUE(sx);
        for (;;) {
-               x = sx->sx_lock;
-
                /*
                 * We should never have sharers while at least one thread
                 * holds a shared lock.
@@ -1078,6 +1094,8 @@ _sx_sunlock_hard(struct sx *sx, const ch
                                            (void *)(x - SX_ONE_SHARER));
                                break;
                        }
+
+                       x = SX_READ_VALUE(sx);
                        continue;
                }
 
@@ -1094,6 +1112,7 @@ _sx_sunlock_hard(struct sx *sx, const ch
                                            __func__, sx);
                                break;
                        }
+                       x = SX_READ_VALUE(sx);
                        continue;
                }
 
@@ -1115,6 +1134,7 @@ _sx_sunlock_hard(struct sx *sx, const ch
                    SX_SHARERS_LOCK(1) | SX_LOCK_EXCLUSIVE_WAITERS,
                    SX_LOCK_UNLOCKED)) {
                        sleepq_release(&sx->lock_object);
+                       x = SX_READ_VALUE(sx);
                        continue;
                }
                if (LOCK_LOG_TEST(&sx->lock_object, 0))

Modified: head/sys/sys/sx.h
==============================================================================
--- head/sys/sys/sx.h   Wed Jan 18 17:53:57 2017        (r312389)
+++ head/sys/sys/sx.h   Wed Jan 18 17:55:08 2017        (r312390)
@@ -88,6 +88,11 @@
 
 #define        sx_recurse      lock_object.lo_data
 
+#define        SX_READ_VALUE(sx)       ((sx)->sx_lock)
+
+#define        lv_sx_owner(v) \
+       ((v & SX_LOCK_SHARED) ? NULL : (struct thread *)SX_OWNER(v))
+
 /*
  * Function prototipes.  Routines that start with an underscore are not part
  * of the public interface and are wrappered with a macro.
_______________________________________________
svn-src-all@freebsd.org mailing list
https://lists.freebsd.org/mailman/listinfo/svn-src-all
To unsubscribe, send any mail to "svn-src-all-unsubscr...@freebsd.org"

Reply via email to