Author: jhb
Date: Tue Nov  9 20:46:41 2010
New Revision: 215054
URL: http://svn.freebsd.org/changeset/base/215054

Log:
  - Remove <machine/mutex.h>.  Most of the headers were empty, and the
    contents of the ones that were not empty were stale and unused.
  - Now that <machine/mutex.h> no longer exists, there is no need to allow it
    to override various helper macros in <sys/mutex.h>.
  - Rename various helper macros for low-level operations on mutexes to live
    in the _mtx_* or __mtx_* namespaces.  While here, change the names to more
    closely match the real API functions they are backing.
  - Drop support for including <sys/mutex.h> in assembly source files.
  
  Suggested by: bde (1, 2)

Deleted:
  head/sys/amd64/include/mutex.h
  head/sys/arm/include/mutex.h
  head/sys/i386/include/mutex.h
  head/sys/ia64/include/mutex.h
  head/sys/mips/include/mutex.h
  head/sys/pc98/include/mutex.h
  head/sys/powerpc/include/mutex.h
  head/sys/sparc64/include/mutex.h
  head/sys/sun4v/include/mutex.h
Modified:
  head/sys/kern/kern_mutex.c
  head/sys/sys/mutex.h

Modified: head/sys/kern/kern_mutex.c
==============================================================================
--- head/sys/kern/kern_mutex.c  Tue Nov  9 20:44:55 2010        (r215053)
+++ head/sys/kern/kern_mutex.c  Tue Nov  9 20:46:41 2010        (r215054)
@@ -200,7 +200,7 @@ _mtx_lock_flags(struct mtx *m, int opts,
        WITNESS_CHECKORDER(&m->lock_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE,
            file, line, NULL);
 
-       _get_sleep_lock(m, curthread, opts, file, line);
+       __mtx_lock(m, curthread, opts, file, line);
        LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file,
            line);
        WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
@@ -224,7 +224,7 @@ _mtx_unlock_flags(struct mtx *m, int opt
 
        if (m->mtx_recurse == 0)
                LOCKSTAT_PROFILE_RELEASE_LOCK(LS_MTX_UNLOCK_RELEASE, m);
-       _rel_sleep_lock(m, curthread, opts, file, line);
+       __mtx_unlock(m, curthread, opts, file, line);
 }
 
 void
@@ -243,7 +243,7 @@ _mtx_lock_spin_flags(struct mtx *m, int 
                    m->lock_object.lo_name, file, line));
        WITNESS_CHECKORDER(&m->lock_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE,
            file, line, NULL);
-       _get_spin_lock(m, curthread, opts, file, line);
+       __mtx_lock_spin(m, curthread, opts, file, line);
        LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file,
            line);
        WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
@@ -264,7 +264,7 @@ _mtx_unlock_spin_flags(struct mtx *m, in
            line);
        mtx_assert(m, MA_OWNED);
 
-       _rel_spin_lock(m);
+       __mtx_unlock_spin(m);
 }
 
 /*
@@ -293,7 +293,7 @@ _mtx_trylock(struct mtx *m, int opts, co
                atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
                rval = 1;
        } else
-               rval = _obtain_lock(m, (uintptr_t)curthread);
+               rval = _mtx_obtain_lock(m, (uintptr_t)curthread);
 
        LOCK_LOG_TRY("LOCK", &m->lock_object, opts, rval, file, line);
        if (rval) {
@@ -355,7 +355,7 @@ _mtx_lock_sleep(struct mtx *m, uintptr_t
                    "_mtx_lock_sleep: %s contested (lock=%p) at %s:%d",
                    m->lock_object.lo_name, (void *)m->mtx_lock, file, line);
 
-       while (!_obtain_lock(m, tid)) {
+       while (!_mtx_obtain_lock(m, tid)) {
 #ifdef KDTRACE_HOOKS
                spin_cnt++;
 #endif
@@ -511,7 +511,7 @@ _mtx_lock_spin(struct mtx *m, uintptr_t 
                CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m);
 
        lock_profile_obtain_lock_failed(&m->lock_object, &contested, &waittime);
-       while (!_obtain_lock(m, tid)) {
+       while (!_mtx_obtain_lock(m, tid)) {
 
                /* Give interrupts a chance while we spin. */
                spinlock_exit();
@@ -569,7 +569,7 @@ retry:
                            m->lock_object.lo_name, file, line));
                WITNESS_CHECKORDER(&m->lock_object,
                    opts | LOP_NEWORDER | LOP_EXCLUSIVE, file, line, NULL);
-               while (!_obtain_lock(m, tid)) {
+               while (!_mtx_obtain_lock(m, tid)) {
 #ifdef KDTRACE_HOOKS
                        spin_cnt++;
 #endif
@@ -597,7 +597,7 @@ retry:
                }
                if (m == td->td_lock)
                        break;
-               _rel_spin_lock(m);      /* does spinlock_exit() */
+               __mtx_unlock_spin(m);   /* does spinlock_exit() */
 #ifdef KDTRACE_HOOKS
                spin_cnt++;
 #endif
@@ -673,7 +673,7 @@ _mtx_unlock_sleep(struct mtx *m, int opt
                CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m);
        MPASS(ts != NULL);
        turnstile_broadcast(ts, TS_EXCLUSIVE_QUEUE);
-       _release_lock_quick(m);
+       _mtx_release_lock_quick(m);
 
        /*
         * This turnstile is now no longer associated with the mutex.  We can
@@ -685,7 +685,7 @@ _mtx_unlock_sleep(struct mtx *m, int opt
 
 /*
  * All the unlocking of MTX_SPIN locks is done inline.
- * See the _rel_spin_lock() macro for the details.
+ * See the __mtx_unlock_spin() macro for the details.
  */
 
 /*

Modified: head/sys/sys/mutex.h
==============================================================================
--- head/sys/sys/mutex.h        Tue Nov  9 20:44:55 2010        (r215053)
+++ head/sys/sys/mutex.h        Tue Nov  9 20:46:41 2010        (r215054)
@@ -32,7 +32,6 @@
 #ifndef _SYS_MUTEX_H_
 #define _SYS_MUTEX_H_
 
-#ifndef LOCORE
 #include <sys/queue.h>
 #include <sys/_lock.h>
 #include <sys/_mutex.h>
@@ -43,12 +42,6 @@
 #include <sys/lockstat.h>
 #include <machine/atomic.h>
 #include <machine/cpufunc.h>
-#endif /* _KERNEL_ */
-#endif /* !LOCORE */
-
-#include <machine/mutex.h>
-
-#ifdef _KERNEL
 
 /*
  * Mutex types and options passed to mtx_init().  MTX_QUIET and MTX_DUPOK
@@ -83,8 +76,6 @@
 
 #endif /* _KERNEL */
 
-#ifndef LOCORE
-
 /*
  * XXX: Friendly reminder to fix things in MP code that is presently being
  * XXX: worked on.
@@ -137,68 +128,59 @@ void      _thread_lock_flags(struct thread *,
 
 #define        mtx_recurse     lock_object.lo_data
 
-/*
- * We define our machine-independent (unoptimized) mutex micro-operations
- * here, if they are not already defined in the machine-dependent mutex.h 
- */
+/* Very simple operations on mtx_lock. */
 
 /* Try to obtain mtx_lock once. */
-#ifndef _obtain_lock
-#define _obtain_lock(mp, tid)                                          \
+#define _mtx_obtain_lock(mp, tid)                                      \
        atomic_cmpset_acq_ptr(&(mp)->mtx_lock, MTX_UNOWNED, (tid))
-#endif
 
 /* Try to release mtx_lock if it is unrecursed and uncontested. */
-#ifndef _release_lock
-#define _release_lock(mp, tid)                                         \
+#define _mtx_release_lock(mp, tid)                                     \
        atomic_cmpset_rel_ptr(&(mp)->mtx_lock, (tid), MTX_UNOWNED)
-#endif
 
 /* Release mtx_lock quickly, assuming we own it. */
-#ifndef _release_lock_quick
-#define _release_lock_quick(mp)                                                
\
+#define _mtx_release_lock_quick(mp)                                    \
        atomic_store_rel_ptr(&(mp)->mtx_lock, MTX_UNOWNED)
-#endif
 
 /*
- * Obtain a sleep lock inline, or call the "hard" function if we can't get it
- * easy.
+ * Full lock operations that are suitable to be inlined in non-debug
+ * kernels.  If the lock cannot be acquired or released trivially then
+ * the work is deferred to another function.
  */
-#ifndef _get_sleep_lock
-#define _get_sleep_lock(mp, tid, opts, file, line) do {                        
\
+
+/* Lock a normal mutex. */
+#define __mtx_lock(mp, tid, opts, file, line) do {                     \
        uintptr_t _tid = (uintptr_t)(tid);                              \
-       if (!_obtain_lock((mp), _tid))                                  \
+                                                                       \
+       if (!_mtx_obtain_lock((mp), _tid))                              \
                _mtx_lock_sleep((mp), _tid, (opts), (file), (line));    \
        else                                                            \
                LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_MTX_LOCK_ACQUIRE, \
                    mp, 0, 0, (file), (line));                          \
 } while (0)
-#endif
 
 /*
- * Obtain a spin lock inline, or call the "hard" function if we can't get it
- * easy. For spinlocks, we handle recursion inline (it turns out that function
- * calls can be significantly expensive on some architectures).
- * Since spin locks are not _too_ common, inlining this code is not too big 
- * a deal.
+ * Lock a spin mutex.  For spinlocks, we handle recursion inline (it
+ * turns out that function calls can be significantly expensive on
+ * some architectures).  Since spin locks are not _too_ common,
+ * inlining this code is not too big a deal.
  */
-#ifndef _get_spin_lock
 #ifdef SMP
-#define _get_spin_lock(mp, tid, opts, file, line) do { \
+#define __mtx_lock_spin(mp, tid, opts, file, line) do {                        
\
        uintptr_t _tid = (uintptr_t)(tid);                              \
+                                                                       \
        spinlock_enter();                                               \
-       if (!_obtain_lock((mp), _tid)) {                                \
+       if (!_mtx_obtain_lock((mp), _tid)) {                            \
                if ((mp)->mtx_lock == _tid)                             \
                        (mp)->mtx_recurse++;                            \
-               else {                                                  \
+               else                                                    \
                        _mtx_lock_spin((mp), _tid, (opts), (file), (line)); \
-               }                                                       \
        } else                                                          \
                LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_MTX_SPIN_LOCK_ACQUIRE, \
                    mp, 0, 0, (file), (line));                          \
 } while (0)
 #else /* SMP */
-#define _get_spin_lock(mp, tid, opts, file, line) do {                 \
+#define __mtx_lock_spin(mp, tid, opts, file, line) do {                        
\
        uintptr_t _tid = (uintptr_t)(tid);                              \
                                                                        \
        spinlock_enter();                                               \
@@ -206,49 +188,42 @@ void      _thread_lock_flags(struct thread *,
                (mp)->mtx_recurse++;                                    \
        else {                                                          \
                KASSERT((mp)->mtx_lock == MTX_UNOWNED, ("corrupt spinlock")); \
-               (mp)->mtx_lock = _tid;                          \
+               (mp)->mtx_lock = _tid;                                  \
        }                                                               \
 } while (0)
 #endif /* SMP */
-#endif
 
-/*
- * Release a sleep lock inline, or call the "hard" function if we can't do it
- * easy.
- */
-#ifndef _rel_sleep_lock
-#define _rel_sleep_lock(mp, tid, opts, file, line) do {                        
\
+/* Unlock a normal mutex. */
+#define __mtx_unlock(mp, tid, opts, file, line) do {                   \
        uintptr_t _tid = (uintptr_t)(tid);                              \
                                                                        \
-       if (!_release_lock((mp), _tid))                                 \
+       if (!_mtx_release_lock((mp), _tid))                             \
                _mtx_unlock_sleep((mp), (opts), (file), (line));        \
 } while (0)
-#endif
 
 /*
- * For spinlocks, we can handle everything inline, as it's pretty simple and
- * a function call would be too expensive (at least on some architectures).
- * Since spin locks are not _too_ common, inlining this code is not too big 
- * a deal.
+ * Unlock a spin mutex.  For spinlocks, we can handle everything
+ * inline, as it's pretty simple and a function call would be too
+ * expensive (at least on some architectures).  Since spin locks are
+ * not _too_ common, inlining this code is not too big a deal.
  *
  * Since we always perform a spinlock_enter() when attempting to acquire a
  * spin lock, we need to always perform a matching spinlock_exit() when
  * releasing a spin lock.  This includes the recursion cases.
  */
-#ifndef _rel_spin_lock
 #ifdef SMP
-#define _rel_spin_lock(mp) do {                                                
\
+#define __mtx_unlock_spin(mp) do {                                     \
        if (mtx_recursed((mp)))                                         \
                (mp)->mtx_recurse--;                                    \
        else {                                                          \
                LOCKSTAT_PROFILE_RELEASE_LOCK(LS_MTX_SPIN_UNLOCK_RELEASE, \
                        mp);                                            \
-               _release_lock_quick((mp));                              \
+               _mtx_release_lock_quick((mp));                          \
        }                                                               \
        spinlock_exit();                                                \
 } while (0)
 #else /* SMP */
-#define _rel_spin_lock(mp) do {                                                
\
+#define __mtx_unlock_spin(mp) do {                                     \
        if (mtx_recursed((mp)))                                         \
                (mp)->mtx_recurse--;                                    \
        else {                                                          \
@@ -259,7 +234,6 @@ void        _thread_lock_flags(struct thread *,
        spinlock_exit();                                                \
 } while (0)
 #endif /* SMP */
-#endif
 
 /*
  * Exported lock manipulation interface.
@@ -336,13 +310,13 @@ extern struct mtx_pool *mtxpool_sleep;
        _mtx_unlock_spin_flags((m), (opts), LOCK_FILE, LOCK_LINE)
 #else  /* LOCK_DEBUG == 0 && !MUTEX_NOINLINE */
 #define        mtx_lock_flags(m, opts)                                         
\
-       _get_sleep_lock((m), curthread, (opts), LOCK_FILE, LOCK_LINE)
+       __mtx_lock((m), curthread, (opts), LOCK_FILE, LOCK_LINE)
 #define        mtx_unlock_flags(m, opts)                                       
\
-       _rel_sleep_lock((m), curthread, (opts), LOCK_FILE, LOCK_LINE)
+       __mtx_unlock((m), curthread, (opts), LOCK_FILE, LOCK_LINE)
 #define        mtx_lock_spin_flags(m, opts)                                    
\
-       _get_spin_lock((m), curthread, (opts), LOCK_FILE, LOCK_LINE)
+       __mtx_lock_spin((m), curthread, (opts), LOCK_FILE, LOCK_LINE)
 #define        mtx_unlock_spin_flags(m, opts)                                  
\
-       _rel_spin_lock((m))
+       __mtx_unlock_spin((m))
 #endif /* LOCK_DEBUG > 0 || MUTEX_NOINLINE */
 
 #define mtx_trylock_flags(m, opts)                                     \
@@ -451,5 +425,4 @@ struct mtx_args {
 #define        MTX_NETWORK_LOCK        "network driver"
 
 #endif /* _KERNEL */
-#endif /* !LOCORE */
 #endif /* _SYS_MUTEX_H_ */
_______________________________________________
svn-src-head@freebsd.org mailing list
http://lists.freebsd.org/mailman/listinfo/svn-src-head
To unsubscribe, send any mail to "svn-src-head-unsubscr...@freebsd.org"

Reply via email to