wrowe 02/05/29 14:19:45
Modified: include apr_thread_mutex.h
include/arch/win32 thread_mutex.h
locks/beos thread_mutex.c
locks/netware thread_mutex.c
locks/os2 thread_mutex.c
locks/unix thread_mutex.c
locks/win32 thread_mutex.c
Log:
Split APR_THREAD_MUTEX_DEFAULT with a new APR_THREAD_MUTEX_UNNESTED
which guarentees unnested lock behavior, keep APR_THREAD_MUTEX_DEFAULT
on Win32, Netware and OS2 as nested locks, leave Unix and BeOS with
unnested locks by default.
Needs an implementation on Netware and OS2 for UNNESTED locks, for now
return APR_ENOTIMPL.
Required for absolute locks in series for Win32 on the same thread,
since CriticalSection objects are blindingly fast without contention,
but they will always nest on the same thread.
Revision Changes Path
1.11 +6 -1 apr/include/apr_thread_mutex.h
Index: apr_thread_mutex.h
===================================================================
RCS file: /home/cvs/apr/include/apr_thread_mutex.h,v
retrieving revision 1.10
retrieving revision 1.11
diff -u -r1.10 -r1.11
--- apr_thread_mutex.h 9 Apr 2002 06:56:55 -0000 1.10
+++ apr_thread_mutex.h 29 May 2002 21:19:44 -0000 1.11
@@ -79,6 +79,7 @@
#define APR_THREAD_MUTEX_DEFAULT 0x0
#define APR_THREAD_MUTEX_NESTED 0x1
+#define APR_THREAD_MUTEX_UNNESTED 0x2
/* Delayed the include to avoid a circular reference */
#include "apr_pools.h"
@@ -89,10 +90,14 @@
* stored.
* @param flags Or'ed value of:
* <PRE>
- * APR_THREAD_MUTEX_DEFAULT normal lock behavior (non-recursive).
+ * APR_THREAD_MUTEX_DEFAULT platform-optimal lock behavior.
* APR_THREAD_MUTEX_NESTED enable nested (recursive) locks.
+ * APR_THREAD_MUTEX_UNNESTED disable nested locks (non-recursive).
* </PRE>
* @param pool the pool from which to allocate the mutex.
+ * @tip Be cautious in using APR_THREAD_MUTEX_DEFAULT. While this is the
+ * most optimial mutex based on a given platform's performance
charateristics,
+ * it will behave as either a nested or an unnested lock.
*/
APR_DECLARE(apr_status_t) apr_thread_mutex_create(apr_thread_mutex_t **mutex,
unsigned int flags,
1.5 +14 -2 apr/include/arch/win32/thread_mutex.h
Index: thread_mutex.h
===================================================================
RCS file: /home/cvs/apr/include/arch/win32/thread_mutex.h,v
retrieving revision 1.4
retrieving revision 1.5
diff -u -r1.4 -r1.5
--- thread_mutex.h 9 Apr 2002 06:56:56 -0000 1.4
+++ thread_mutex.h 29 May 2002 21:19:44 -0000 1.5
@@ -57,9 +57,21 @@
#include "apr_pools.h"
+typedef enum thread_mutex_type {
+ thread_mutex_critical_section,
+ thread_mutex_unnested_event,
+ thread_mutex_nested_mutex
+} thread_mutex_type;
+
+/* handle applies only to unnested_event on all platforms
+ * and nested_mutex on Win9x only. Otherwise critical_section
+ * is used for NT nexted mutexes providing optimal performance.
+ */
struct apr_thread_mutex_t {
- apr_pool_t *pool;
- CRITICAL_SECTION section;
+ apr_pool_t *pool;
+ thread_mutex_type type;
+ HANDLE handle;
+ CRITICAL_SECTION section;
};
#endif /* THREAD_MUTEX_H */
1.8 +4 -0 apr/locks/beos/thread_mutex.c
Index: thread_mutex.c
===================================================================
RCS file: /home/cvs/apr/locks/beos/thread_mutex.c,v
retrieving revision 1.7
retrieving revision 1.8
diff -u -r1.7 -r1.8
--- thread_mutex.c 13 Mar 2002 20:39:20 -0000 1.7
+++ thread_mutex.c 29 May 2002 21:19:44 -0000 1.8
@@ -96,6 +96,10 @@
new_m->LockCount = 0;
new_m->Lock = stat;
new_m->pool = pool;
+
+ /* Optimal default is APR_THREAD_MUTEX_UNNESTED,
+ * no additional checks required for either flag.
+ */
new_m->nested = flags & APR_THREAD_MUTEX_NESTED;
apr_pool_cleanup_register(new_m->pool, (void *)new_m,
_thread_mutex_cleanup,
1.8 +5 -1 apr/locks/netware/thread_mutex.c
Index: thread_mutex.c
===================================================================
RCS file: /home/cvs/apr/locks/netware/thread_mutex.c,v
retrieving revision 1.7
retrieving revision 1.8
diff -u -r1.7 -r1.8
--- thread_mutex.c 13 Mar 2002 20:39:20 -0000 1.7
+++ thread_mutex.c 29 May 2002 21:19:44 -0000 1.8
@@ -73,6 +73,11 @@
{
apr_thread_mutex_t *new_mutex = NULL;
+ /* XXX: Implement _UNNESTED flavor and favor _DEFAULT for performance
+ */
+ if (flags & APR_THREAD_MUTEX_UNNESTED) {
+ return APR_ENOTIMPL;
+ }
new_mutex = (apr_thread_mutex_t *)apr_pcalloc(pool,
sizeof(apr_thread_mutex_t));
if(new_mutex ==NULL) {
@@ -80,7 +85,6 @@
}
new_mutex->pool = pool;
- /* FIXME: only use recursive locks if (flags & APR_THREAD_MUTEX_NESTED)
*/
new_mutex->mutex = NXMutexAlloc(NX_MUTEX_RECURSIVE, NULL, NULL);
if(new_mutex->mutex == NULL)
1.7 +3 -0 apr/locks/os2/thread_mutex.c
Index: thread_mutex.c
===================================================================
RCS file: /home/cvs/apr/locks/os2/thread_mutex.c,v
retrieving revision 1.6
retrieving revision 1.7
diff -u -r1.6 -r1.7
--- thread_mutex.c 13 Mar 2002 20:39:21 -0000 1.6
+++ thread_mutex.c 29 May 2002 21:19:44 -0000 1.7
@@ -69,6 +69,9 @@
+/* XXX: Need to respect APR_THREAD_MUTEX_[UN]NESTED flags argument
+ * or return APR_ENOTIMPL!!!
+ */
APR_DECLARE(apr_status_t) apr_thread_mutex_create(apr_thread_mutex_t **mutex,
unsigned int flags,
apr_pool_t *pool)
1.11 +4 -0 apr/locks/unix/thread_mutex.c
Index: thread_mutex.c
===================================================================
RCS file: /home/cvs/apr/locks/unix/thread_mutex.c,v
retrieving revision 1.10
retrieving revision 1.11
diff -u -r1.10 -r1.11
--- thread_mutex.c 28 Apr 2002 03:21:24 -0000 1.10
+++ thread_mutex.c 29 May 2002 21:19:44 -0000 1.11
@@ -89,6 +89,10 @@
}
new_mutex->pool = pool;
+
+ /* Optimal default is APR_THREAD_MUTEX_UNNESTED,
+ * no additional checks required for either flag.
+ */
new_mutex->nested = flags & APR_THREAD_MUTEX_NESTED;
if ((rv = pthread_mutexattr_init(&mattr))) {
1.10 +69 -12 apr/locks/win32/thread_mutex.c
Index: thread_mutex.c
===================================================================
RCS file: /home/cvs/apr/locks/win32/thread_mutex.c,v
retrieving revision 1.9
retrieving revision 1.10
diff -u -r1.9 -r1.10
--- thread_mutex.c 13 Mar 2002 20:39:22 -0000 1.9
+++ thread_mutex.c 29 May 2002 21:19:44 -0000 1.10
@@ -65,7 +65,14 @@
{
apr_thread_mutex_t *lock = data;
- DeleteCriticalSection(&lock->section);
+ if (lock->type == thread_mutex_critical_section) {
+ DeleteCriticalSection(&lock->section);
+ }
+ else {
+ if (!CloseHandle(lock->handle)) {
+ return apr_get_os_error();
+ }
+ }
return APR_SUCCESS;
}
@@ -76,9 +83,34 @@
(*mutex) = (apr_thread_mutex_t *)apr_palloc(pool, sizeof(**mutex));
(*mutex)->pool = pool;
- /* FIXME: Implement nested (aka recursive) locks or use a native
- * win32 implementation if available. */
- InitializeCriticalSection(&(*mutex)->section);
+
+ if (flags & APR_THREAD_MUTEX_UNNESTED) {
+ /* Use an auto-reset signaled event, ready to accept one
+ * waiting thread.
+ */
+ (*mutex)->type = thread_mutex_unnested_event;
+ (*mutex)->handle = CreateEvent(NULL, FALSE, TRUE, NULL);
+ }
+ else {
+#if APR_HAS_UNICODE_FS
+ /* Critical Sections are terrific, performance-wise, on NT.
+ * On Win9x, we cannot 'try' on a critical section, so we
+ * use a [slower] mutex object, instead.
+ */
+ IF_WIN_OS_IS_UNICODE {
+ (*mutex)->type = thread_mutex_critical_section;
+ InitializeCriticalSection(&(*mutex)->section);
+ }
+#endif
+#if APR_HAS_ANSI_FS
+ ELSE_WIN_OS_IS_ANSI {
+ (*mutex)->type = thread_mutex_nested_mutex;
+ (*mutex)->handle = CreateMutex(NULL, FALSE, NULL);
+
+ }
+#endif
+ }
+
apr_pool_cleanup_register((*mutex)->pool, (*mutex), thread_mutex_cleanup,
apr_pool_cleanup_null);
return APR_SUCCESS;
@@ -86,24 +118,49 @@
APR_DECLARE(apr_status_t) apr_thread_mutex_lock(apr_thread_mutex_t *mutex)
{
- EnterCriticalSection(&mutex->section);
+ if (mutex->type == thread_mutex_critical_section) {
+ EnterCriticalSection(&mutex->section);
+ }
+ else {
+ DWORD rv = WaitForSingleObject(mutex->handle, INFINITE);
+ if ((rv != WAIT_OBJECT_0) && (rv != WAIT_ABANDONED)) {
+ return (rv == WAIT_TIMEOUT) ? APR_EBUSY : apr_get_os_error();
+ }
+ }
return APR_SUCCESS;
}
APR_DECLARE(apr_status_t) apr_thread_mutex_trylock(apr_thread_mutex_t *mutex)
{
- if (apr_os_level < APR_WIN_NT) {
- return APR_ENOTIMPL;
- }
- if (TryEnterCriticalSection(&mutex->section)) {
- return APR_SUCCESS;
+ if (mutex->type == thread_mutex_critical_section) {
+ if (!TryEnterCriticalSection(&mutex->section)) {
+ return APR_EBUSY;
+ }
}
- return APR_EBUSY;
+ else {
+ DWORD rv = WaitForSingleObject(mutex->handle, 0);
+ if ((rv != WAIT_OBJECT_0) && (rv != WAIT_ABANDONED)) {
+ return (rv == WAIT_TIMEOUT) ? APR_EBUSY : apr_get_os_error();
+ }
+ }
+ return APR_SUCCESS;
}
APR_DECLARE(apr_status_t) apr_thread_mutex_unlock(apr_thread_mutex_t *mutex)
{
- LeaveCriticalSection(&mutex->section);
+ if (mutex->type == thread_mutex_critical_section) {
+ LeaveCriticalSection(&mutex->section);
+ }
+ else if (mutex->type == thread_mutex_unnested_event) {
+ if (!SetEvent(mutex->handle)) {
+ return apr_get_os_error();
+ }
+ }
+ else if (mutex->type == thread_mutex_nested_mutex) {
+ if (!ReleaseMutex(mutex->handle)) {
+ return apr_get_os_error();
+ }
+ }
return APR_SUCCESS;
}