> I built something very similar for my company last year, and it’s been running
> flawlessly in production at a few customer sites since, with avg. CPU usage
> ~50%
> around the clock. I even posted about it on the Python mailing list [1] where
> there was almost no resonance at that time. I never posted code, though --
> nobody seemed to be too interested.
I've never bothered to make this tidy and nice, especially the
function naming (PySpecial_*) leaves some things to be desired. It's
not too bad, though; it just doesn't have commit-ready quality. I
don't worry about this anymore, so I just post what I have. Maybe
someone can make use of it.
--- Python-2.5.2/Include/pythread.h.scheduling 2006-06-13 17:04:24.000000000 +0200
+++ Python-2.5.2/Include/pythread.h 2008-10-16 14:46:07.000000000 +0200
@@ -40,6 +40,20 @@
PyAPI_FUNC(void *) PyThread_get_key_value(int);
PyAPI_FUNC(void) PyThread_delete_key_value(int key);
+#ifndef _POSIX_THREADS
+#error Requires POSIX threads
+#endif
+
+PyAPI_FUNC(void *) PyThread_mutex_alloc(void);
+PyAPI_FUNC(void) PyThread_mutex_free(void *);
+PyAPI_FUNC(void) PyThread_mutex_lock(void *);
+PyAPI_FUNC(void) PyThread_mutex_unlock(void *);
+
+PyAPI_FUNC(void *) PyThread_cond_alloc(void);
+PyAPI_FUNC(void) PyThread_cond_free(void *);
+PyAPI_FUNC(void) PyThread_cond_wait(void *, void *);
+PyAPI_FUNC(void) PyThread_cond_signal(void *);
+
#ifdef __cplusplus
}
#endif
--- Python-2.5.2/Python/thread.c.scheduling 2006-07-21 09:59:47.000000000 +0200
+++ Python-2.5.2/Python/thread.c 2008-10-16 14:46:07.000000000 +0200
@@ -155,6 +155,56 @@
#endif
*/
+void *PyThread_mutex_alloc(void)
+{
+ pthread_mutex_t *m = malloc(sizeof(pthread_mutex_t));
+ if (pthread_mutex_init(m, NULL))
+ Py_FatalError("PyThread_mutex_alloc: pthread_mutex_init failed");
+ return m;
+}
+
+void PyThread_mutex_free(void *m)
+{
+ if (pthread_mutex_destroy(m))
+ Py_FatalError("PyThread_mutex_free: pthread_mutex_destroy failed");
+ free(m);
+}
+
+void PyThread_mutex_lock(void *m)
+{
+ pthread_mutex_lock(m);
+}
+
+void PyThread_mutex_unlock(void *m)
+{
+ pthread_mutex_unlock(m);
+}
+
+void *PyThread_cond_alloc(void)
+{
+ pthread_cond_t *c = malloc(sizeof(pthread_cond_t));
+ if (pthread_cond_init(c, NULL))
+ Py_FatalError("PyThread_cond_alloc: pthread_cond_init failed");
+ return c;
+}
+
+void PyThread_cond_free(void *c)
+{
+ if (pthread_cond_destroy(c))
+ Py_FatalError("PyThread_cond_free: pthread_cond_destroy failed");
+ free(c);
+}
+
+void PyThread_cond_wait(void *c, void *m)
+{
+ pthread_cond_wait(c, m);
+}
+
+void PyThread_cond_signal(void *c)
+{
+ pthread_cond_signal(c);
+}
+
/* return the current thread stack size */
size_t
PyThread_get_stacksize(void)
--- Python-2.5.2/Python/ceval.c.scheduling 2008-01-23 21:09:39.000000000 +0100
+++ Python-2.5.2/Python/ceval.c 2008-10-16 14:47:07.000000000 +0200
@@ -210,7 +210,31 @@
#endif
#include "pythread.h"
-static PyThread_type_lock interpreter_lock = 0; /* This is the GIL */
+typedef void *PySpecial_cond_type;
+
+struct special_linkstruct {
+ PySpecial_cond_type wait;
+ struct special_linkstruct *queue_next, *free_next;
+ int in_use;
+};
+
+typedef void *PySpecial_lock_type;
+
+typedef struct {
+ PySpecial_lock_type the_lock;
+ struct special_linkstruct *wait_queue, *wait_last, *free_queue;
+} PySpecialSemaphore;
+
+void
+PySpecial_init(PySpecialSemaphore *s)
+{
+ s->the_lock = PyThread_mutex_alloc();
+ s->wait_queue = NULL;
+ s->wait_last = NULL;
+ s->free_queue = NULL;
+}
+
+static PySpecialSemaphore *interpreter_lock = NULL; /* This is the GIL */
static long main_thread = 0;
int
@@ -219,26 +243,100 @@
return interpreter_lock != 0;
}
+static PySpecialSemaphore *allocate_special(void)
+{
+ PySpecialSemaphore *s = malloc(sizeof(PySpecialSemaphore));
+ PySpecial_init(s);
+ return s;
+}
+
+static struct special_linkstruct *allocate_special_linkstruct(void)
+{
+ struct special_linkstruct *ls = malloc(sizeof(struct special_linkstruct));
+ ls->wait = PyThread_cond_alloc();
+ ls->queue_next = NULL;
+ ls->free_next = NULL;
+ ls->in_use = 0;
+ return ls;
+}
+
+static void PySpecial_Lock(PySpecialSemaphore *s)
+{
+ struct special_linkstruct *ls;
+
+ PyThread_mutex_lock(s->the_lock);
+
+ if (!s->free_queue)
+ s->free_queue = allocate_special_linkstruct();
+
+ ls = s->free_queue;
+ s->free_queue = ls->free_next;
+
+ if (!s->wait_queue)
+ {
+ ls->in_use = 1;
+ s->wait_queue = ls;
+ s->wait_last = ls;
+ PyThread_mutex_unlock(s->the_lock);
+ return;
+ }
+
+ assert(s->wait_queue != ls);
+ assert(s->wait_last != ls);
+ assert(s->wait_last->queue_next == NULL);
+ assert(!ls->in_use);
+ s->wait_last->queue_next = ls;
+ s->wait_last = ls;
+ ls->in_use = 1;
+
+ while (s->wait_queue != ls)
+ PyThread_cond_wait(ls->wait, s->the_lock);
+
+ PyThread_mutex_unlock(s->the_lock);
+}
+
+static void PySpecial_Unlock(PySpecialSemaphore *s)
+{
+ struct special_linkstruct *ls;
+
+ PyThread_mutex_lock(s->the_lock);
+ ls = s->wait_queue;
+ assert(ls->in_use);
+
+ s->wait_queue = ls->queue_next;
+ if (s->wait_queue)
+ {
+ ls->queue_next = NULL;
+ PyThread_cond_signal(s->wait_queue->wait);
+ }
+ ls->in_use = 0;
+
+ ls->free_next = s->free_queue;
+ s->free_queue = ls;
+
+ PyThread_mutex_unlock(s->the_lock);
+}
+
void
PyEval_InitThreads(void)
{
if (interpreter_lock)
return;
- interpreter_lock = PyThread_allocate_lock();
- PyThread_acquire_lock(interpreter_lock, 1);
+ interpreter_lock = allocate_special();
+ PySpecial_Lock(interpreter_lock);
main_thread = PyThread_get_thread_ident();
}
void
PyEval_AcquireLock(void)
{
- PyThread_acquire_lock(interpreter_lock, 1);
+ PySpecial_Lock(interpreter_lock);
}
void
PyEval_ReleaseLock(void)
{
- PyThread_release_lock(interpreter_lock);
+ PySpecial_Unlock(interpreter_lock);
}
void
@@ -248,7 +346,7 @@
Py_FatalError("PyEval_AcquireThread: NULL new thread state");
/* Check someone has called PyEval_InitThreads() to create the lock */
assert(interpreter_lock);
- PyThread_acquire_lock(interpreter_lock, 1);
+ PySpecial_Lock(interpreter_lock);
if (PyThreadState_Swap(tstate) != NULL)
Py_FatalError(
"PyEval_AcquireThread: non-NULL old thread state");
@@ -261,7 +359,7 @@
Py_FatalError("PyEval_ReleaseThread: NULL thread state");
if (PyThreadState_Swap(NULL) != tstate)
Py_FatalError("PyEval_ReleaseThread: wrong thread state");
- PyThread_release_lock(interpreter_lock);
+ PySpecial_Unlock(interpreter_lock);
}
/* This function is called from PyOS_AfterFork to ensure that newly
@@ -278,8 +376,8 @@
much error-checking. Doing this cleanly would require
adding a new function to each thread_*.h. Instead, just
create a new lock and waste a little bit of memory */
- interpreter_lock = PyThread_allocate_lock();
- PyThread_acquire_lock(interpreter_lock, 1);
+ interpreter_lock = allocate_special();
+ PySpecial_Lock(interpreter_lock);
main_thread = PyThread_get_thread_ident();
}
#endif
@@ -296,7 +394,7 @@
Py_FatalError("PyEval_SaveThread: NULL tstate");
#ifdef WITH_THREAD
if (interpreter_lock)
- PyThread_release_lock(interpreter_lock);
+ PySpecial_Unlock(interpreter_lock);
#endif
return tstate;
}
@@ -309,7 +407,7 @@
#ifdef WITH_THREAD
if (interpreter_lock) {
int err = errno;
- PyThread_acquire_lock(interpreter_lock, 1);
+ PySpecial_Lock(interpreter_lock);
errno = err;
}
#endif
@@ -818,21 +916,23 @@
_Py_Ticker = 0;
}
#ifdef WITH_THREAD
- if (interpreter_lock) {
+ if (interpreter_lock && interpreter_lock->wait_queue) {
/* Give another thread a chance */
if (PyThreadState_Swap(NULL) != tstate)
Py_FatalError("ceval: tstate mix-up");
- PyThread_release_lock(interpreter_lock);
+ PySpecial_Unlock(interpreter_lock);
/* Other threads may run now */
- PyThread_acquire_lock(interpreter_lock, 1);
+ PySpecial_Lock(interpreter_lock);
if (PyThreadState_Swap(tstate) != NULL)
Py_FatalError("ceval: orphan tstate");
- /* Check for thread interrupts */
+ }
+ if (interpreter_lock) {
+ /* Check for thread interrupts */
if (tstate->async_exc != NULL) {
x = tstate->async_exc;
tstate->async_exc = NULL;
_______________________________________________
Python-Dev mailing list
[email protected]
http://mail.python.org/mailman/listinfo/python-dev
Unsubscribe:
http://mail.python.org/mailman/options/python-dev/archive%40mail-archive.com