Author: mjg
Date: Tue Nov 24 03:49:37 2020
New Revision: 367978
URL: https://svnweb.freebsd.org/changeset/base/367978

Log:
  locks: push lock_delay_arg_init calls down
  
  Minor cleanup to skip doing them when recursing on locks and so that
  they can act on found lock value if need be.

Modified:
  head/sys/kern/kern_lock.c
  head/sys/kern/kern_mutex.c
  head/sys/kern/kern_rwlock.c
  head/sys/kern/kern_sx.c

Modified: head/sys/kern/kern_lock.c
==============================================================================
--- head/sys/kern/kern_lock.c   Tue Nov 24 03:48:44 2020        (r367977)
+++ head/sys/kern/kern_lock.c   Tue Nov 24 03:49:37 2020        (r367978)
@@ -603,10 +603,10 @@ lockmgr_slock_hard(struct lock *lk, u_int flags, struc
        if (LK_CAN_WITNESS(flags))
                WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER,
                    file, line, flags & LK_INTERLOCK ? ilk : NULL);
+       x = lockmgr_read_value(lk);
        lock_delay_arg_init(&lda, &lockmgr_delay);
        if (!lk_adaptive)
                flags &= ~LK_ADAPTIVE;
-       x = lockmgr_read_value(lk);
        /*
         * The lock may already be locked exclusive by curthread,
         * avoid deadlock.

Modified: head/sys/kern/kern_mutex.c
==============================================================================
--- head/sys/kern/kern_mutex.c  Tue Nov 24 03:48:44 2020        (r367977)
+++ head/sys/kern/kern_mutex.c  Tue Nov 24 03:49:37 2020        (r367978)
@@ -535,12 +535,6 @@ __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t v)
        if (SCHEDULER_STOPPED_TD(td))
                return;
 
-#if defined(ADAPTIVE_MUTEXES)
-       lock_delay_arg_init(&lda, &mtx_delay);
-#elif defined(KDTRACE_HOOKS)
-       lock_delay_arg_init_noadapt(&lda);
-#endif
-
        if (__predict_false(v == MTX_UNOWNED))
                v = MTX_READ_VALUE(m);
 
@@ -562,6 +556,12 @@ __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t v)
        opts &= ~MTX_RECURSE;
 #endif
 
+#if defined(ADAPTIVE_MUTEXES)
+       lock_delay_arg_init(&lda, &mtx_delay);
+#elif defined(KDTRACE_HOOKS)
+       lock_delay_arg_init_noadapt(&lda);
+#endif
+
 #ifdef HWPMC_HOOKS
        PMC_SOFT_CALL( , , lock, failed);
 #endif
@@ -746,12 +746,12 @@ _mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t
        if (SCHEDULER_STOPPED())
                return;
 
-       lock_delay_arg_init(&lda, &mtx_spin_delay);
-
        if (LOCK_LOG_TEST(&m->lock_object, opts))
                CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m);
        KTR_STATE1(KTR_SCHED, "thread", sched_tdname((struct thread *)tid),
            "spinning", "lockname:\"%s\"", m->lock_object.lo_name);
+
+       lock_delay_arg_init(&lda, &mtx_spin_delay);
 
 #ifdef HWPMC_HOOKS
        PMC_SOFT_CALL( , , lock, failed);

Modified: head/sys/kern/kern_rwlock.c
==============================================================================
--- head/sys/kern/kern_rwlock.c Tue Nov 24 03:48:44 2020        (r367977)
+++ head/sys/kern/kern_rwlock.c Tue Nov 24 03:49:37 2020        (r367978)
@@ -948,11 +948,6 @@ __rw_wlock_hard(volatile uintptr_t *c, uintptr_t v LOC
        if (SCHEDULER_STOPPED())
                return;
 
-#if defined(ADAPTIVE_RWLOCKS)
-       lock_delay_arg_init(&lda, &rw_delay);
-#elif defined(KDTRACE_HOOKS)
-       lock_delay_arg_init_noadapt(&lda);
-#endif
        if (__predict_false(v == RW_UNLOCKED))
                v = RW_READ_VALUE(rw);
 
@@ -970,6 +965,12 @@ __rw_wlock_hard(volatile uintptr_t *c, uintptr_t v LOC
        if (LOCK_LOG_TEST(&rw->lock_object, 0))
                CTR5(KTR_LOCK, "%s: %s contested (lock=%p) at %s:%d", __func__,
                    rw->lock_object.lo_name, (void *)rw->rw_lock, file, line);
+
+#if defined(ADAPTIVE_RWLOCKS)
+       lock_delay_arg_init(&lda, &rw_delay);
+#elif defined(KDTRACE_HOOKS)
+       lock_delay_arg_init_noadapt(&lda);
+#endif
 
 #ifdef HWPMC_HOOKS
        PMC_SOFT_CALL( , , lock, failed);

Modified: head/sys/kern/kern_sx.c
==============================================================================
--- head/sys/kern/kern_sx.c     Tue Nov 24 03:48:44 2020        (r367977)
+++ head/sys/kern/kern_sx.c     Tue Nov 24 03:49:37 2020        (r367978)
@@ -620,12 +620,6 @@ _sx_xlock_hard(struct sx *sx, uintptr_t x, int opts LO
        if (SCHEDULER_STOPPED())
                return (0);
 
-#if defined(ADAPTIVE_SX)
-       lock_delay_arg_init(&lda, &sx_delay);
-#elif defined(KDTRACE_HOOKS)
-       lock_delay_arg_init_noadapt(&lda);
-#endif
-
        if (__predict_false(x == SX_LOCK_UNLOCKED))
                x = SX_READ_VALUE(sx);
 
@@ -644,6 +638,12 @@ _sx_xlock_hard(struct sx *sx, uintptr_t x, int opts LO
        if (LOCK_LOG_TEST(&sx->lock_object, 0))
                CTR5(KTR_LOCK, "%s: %s contested (lock=%p) at %s:%d", __func__,
                    sx->lock_object.lo_name, (void *)sx->sx_lock, file, line);
+
+#if defined(ADAPTIVE_SX)
+       lock_delay_arg_init(&lda, &sx_delay);
+#elif defined(KDTRACE_HOOKS)
+       lock_delay_arg_init_noadapt(&lda);
+#endif
 
 #ifdef HWPMC_HOOKS
        PMC_SOFT_CALL( , , lock, failed);
_______________________________________________
svn-src-all@freebsd.org mailing list
https://lists.freebsd.org/mailman/listinfo/svn-src-all
To unsubscribe, send any mail to "svn-src-all-unsubscr...@freebsd.org"

Reply via email to