Author: mjg
Date: Sat Oct 21 22:40:09 2017
New Revision: 324836
URL: https://svnweb.freebsd.org/changeset/base/324836

Log:
  mtx: implement thread lock fastpath
  
  MFC after:    1 week

Modified:
  head/sys/kern/kern_mutex.c
  head/sys/sys/mutex.h

Modified: head/sys/kern/kern_mutex.c
==============================================================================
--- head/sys/kern/kern_mutex.c  Sat Oct 21 21:58:24 2017        (r324835)
+++ head/sys/kern/kern_mutex.c  Sat Oct 21 22:40:09 2017        (r324836)
@@ -791,7 +791,67 @@ _mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t
 }
 #endif /* SMP */
 
+#ifdef INVARIANTS
+static void
+thread_lock_validate(struct mtx *m, int opts, const char *file, int line)
+{
+
+       KASSERT(m->mtx_lock != MTX_DESTROYED,
+           ("thread_lock() of destroyed mutex @ %s:%d", file, line));
+       KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
+           ("thread_lock() of sleep mutex %s @ %s:%d",
+           m->lock_object.lo_name, file, line));
+       if (mtx_owned(m))
+               KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0,
+                   ("thread_lock: recursed on non-recursive mutex %s @ 
%s:%d\n",
+                   m->lock_object.lo_name, file, line));
+       WITNESS_CHECKORDER(&m->lock_object,
+           opts | LOP_NEWORDER | LOP_EXCLUSIVE, file, line, NULL);
+}
+#else
+#define thread_lock_validate(m, opts, file, line) do { } while (0)
+#endif
+
+#ifndef LOCK_PROFILING
+#if LOCK_DEBUG > 0
 void
+_thread_lock(struct thread *td, int opts, const char *file, int line)
+#else
+void
+_thread_lock(struct thread *td)
+#endif
+{
+       struct mtx *m;
+       uintptr_t tid, v;
+
+       tid = (uintptr_t)curthread;
+
+       spinlock_enter();
+       m = td->td_lock;
+       thread_lock_validate(m, 0, file, line);
+       v = MTX_READ_VALUE(m);
+       if (__predict_true(v == MTX_UNOWNED)) {
+               if (__predict_false(!_mtx_obtain_lock(m, tid)))
+                       goto slowpath_unlocked;
+       } else if (v == tid) {
+               m->mtx_recurse++;
+       } else
+               goto slowpath_unlocked;
+       if (__predict_true(m == td->td_lock)) {
+               WITNESS_LOCK(&m->lock_object, LOP_EXCLUSIVE, file, line);
+               return;
+       }
+       if (m->mtx_recurse != 0)
+               m->mtx_recurse--;
+       else
+               _mtx_release_lock_quick(m);
+slowpath_unlocked:
+       spinlock_exit();
+       thread_lock_flags_(td, 0, 0, 0);
+}
+#endif
+
+void
 thread_lock_flags_(struct thread *td, int opts, const char *file, int line)
 {
        struct mtx *m;
@@ -834,17 +894,7 @@ retry:
                v = MTX_UNOWNED;
                spinlock_enter();
                m = td->td_lock;
-               KASSERT(m->mtx_lock != MTX_DESTROYED,
-                   ("thread_lock() of destroyed mutex @ %s:%d", file, line));
-               KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
-                   ("thread_lock() of sleep mutex %s @ %s:%d",
-                   m->lock_object.lo_name, file, line));
-               if (mtx_owned(m))
-                       KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0,
-           ("thread_lock: recursed on non-recursive mutex %s @ %s:%d\n",
-                           m->lock_object.lo_name, file, line));
-               WITNESS_CHECKORDER(&m->lock_object,
-                   opts | LOP_NEWORDER | LOP_EXCLUSIVE, file, line, NULL);
+               thread_lock_validate(m, opts, file, line);
                for (;;) {
                        if (_mtx_obtain_lock_fetch(m, &v, tid))
                                break;

Modified: head/sys/sys/mutex.h
==============================================================================
--- head/sys/sys/mutex.h        Sat Oct 21 21:58:24 2017        (r324835)
+++ head/sys/sys/mutex.h        Sat Oct 21 22:40:09 2017        (r324836)
@@ -127,9 +127,23 @@ void       __mtx_assert(const volatile uintptr_t *c, int 
wha
            int line);
 #endif
 void   thread_lock_flags_(struct thread *, int, const char *, int);
+#if LOCK_DEBUG > 0
+void   _thread_lock(struct thread *td, int opts, const char *file, int line);
+#else
+void   _thread_lock(struct thread *);
+#endif
 
+#if defined(LOCK_PROFILING) || defined(KLD_MODULE)
 #define        thread_lock(tdp)                                                
\
        thread_lock_flags_((tdp), 0, __FILE__, __LINE__)
+#elif LOCK_DEBUG > 0
+#define        thread_lock(tdp)                                                
\
+       _thread_lock((tdp), 0, __FILE__, __LINE__)
+#else
+#define        thread_lock(tdp)                                                
\
+       _thread_lock((tdp))
+#endif
+
 #define        thread_lock_flags(tdp, opt)                                     
\
        thread_lock_flags_((tdp), (opt), __FILE__, __LINE__)
 #define        thread_unlock(tdp)                                              
\
_______________________________________________
[email protected] mailing list
https://lists.freebsd.org/mailman/listinfo/svn-src-head
To unsubscribe, send any mail to "[email protected]"

Reply via email to