Author: Amaury Forgeot d'Arc <[email protected]>
Branch: remove-PYPY_NOT_MAIN_FILE
Changeset: r57129:50181ffdc2af
Date: 2012-07-21 22:58 +0200
http://bitbucket.org/pypy/pypy/changeset/50181ffdc2af/

Log:    Split thread.h into header and implementation files.

diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py
--- a/pypy/module/cpyext/api.py
+++ b/pypy/module/cpyext/api.py
@@ -720,7 +720,7 @@
     global_code = '\n'.join(global_objects)
 
     prologue = ("#include <Python.h>\n"
-                "#include <src/thread.h>\n")
+                "#include <src/thread.c>\n")
     code = (prologue +
             struct_declaration_code +
             global_code +
diff --git a/pypy/module/thread/ll_thread.py b/pypy/module/thread/ll_thread.py
--- a/pypy/module/thread/ll_thread.py
+++ b/pypy/module/thread/ll_thread.py
@@ -12,10 +12,13 @@
 class error(Exception):
     pass
 
+pypydir = py.path.local(autopath.pypydir)
+translator_c_dir = pypydir / 'translator' / 'c'
+
 eci = ExternalCompilationInfo(
     includes = ['src/thread.h'],
-    separate_module_sources = [''],
-    include_dirs = [str(py.path.local(autopath.pypydir).join('translator', 
'c'))],
+    separate_module_files = [translator_c_dir / 'src' / 'thread.c'],
+    include_dirs = [translator_c_dir],
     export_symbols = ['RPyThreadGetIdent', 'RPyThreadLockInit',
                       'RPyThreadAcquireLock', 'RPyThreadReleaseLock',
                       'RPyGilAllocate', 'RPyGilYieldThread',
diff --git a/pypy/translator/c/genc.py b/pypy/translator/c/genc.py
--- a/pypy/translator/c/genc.py
+++ b/pypy/translator/c/genc.py
@@ -952,6 +952,7 @@
     files = [
         srcdir / 'profiling.c',
         srcdir / 'debug_print.c',
+        srcdir / 'thread.c',
     ]
     if _CYGWIN:
         files.append(srcdir / 'cygwin_wait.c')
diff --git a/pypy/translator/c/src/g_include.h 
b/pypy/translator/c/src/g_include.h
--- a/pypy/translator/c/src/g_include.h
+++ b/pypy/translator/c/src/g_include.h
@@ -46,7 +46,6 @@
 #  include "src/rtyper.h"
 #  include "src/debug_traceback.h"
 #  include "src/debug_alloc.h"
-#  include "src/ll_os.h"
 #  include "src/ll_strtod.h"
 #endif
 
diff --git a/pypy/translator/c/src/ll_os.h b/pypy/translator/c/src/ll_os.h
deleted file mode 100644
--- a/pypy/translator/c/src/ll_os.h
+++ /dev/null
@@ -1,10 +0,0 @@
-/************************************************************/
- /***  C header subsection: os module                      ***/
-
-/* NOTE NOTE NOTE: This whole file is going away...
-*/
-
-static int geterrno(void)    /* XXX only for rpython.rctypes, kill me */
-{
-    return errno;
-}
diff --git a/pypy/translator/c/src/profiling.c 
b/pypy/translator/c/src/profiling.c
--- a/pypy/translator/c/src/profiling.c
+++ b/pypy/translator/c/src/profiling.c
@@ -2,6 +2,8 @@
 #include <stddef.h>
 #if defined(__GNUC__) && defined(__linux__)
 
+/* Linux GCC implementation */
+
 #ifndef _GNU_SOURCE
 #define _GNU_SOURCE
 #include <sched.h>
@@ -31,6 +33,9 @@
 }
 
 #elif defined(_WIN32)
+
+/* Windows implementation */
+
 #include <windows.h>
 
 DWORD_PTR base_affinity_mask;
@@ -59,6 +64,9 @@
 }
 
 #else
+
+/* Empty implementations for other platforms */
 void pypy_setup_profiling() { }
 void pypy_teardown_profiling() { }
+
 #endif
diff --git a/pypy/translator/c/src/thread.c b/pypy/translator/c/src/thread.c
new file mode 100644
--- /dev/null
+++ b/pypy/translator/c/src/thread.c
@@ -0,0 +1,9 @@
+/* Thread implementation */
+#include "src/thread.h"
+
+#ifdef _WIN32
+#include "src/thread_nt.c"
+#else
+#include "src/thread_pthread.c"
+#endif
+
diff --git a/pypy/translator/c/src/thread_nt.c 
b/pypy/translator/c/src/thread_nt.c
new file mode 100644
--- /dev/null
+++ b/pypy/translator/c/src/thread_nt.c
@@ -0,0 +1,244 @@
+/* Copy-and-pasted from CPython */
+
+/* This code implemented by [email protected] */
+/* Fast NonRecursiveMutex support by Yakov Markovitch, [email protected] */
+/* Eliminated some memory leaks, [email protected] */
+
+#include <windows.h>
+#include <limits.h>
+#include <process.h>
+
+
+/*
+ * Thread support.
+ */
+
+#define RPyOpaque_INITEXPR_ThreadLock  { 0, 0, NULL }
+typedef struct RPyOpaque_ThreadLock NRMUTEX, *PNRMUTEX;
+
+typedef struct {
+       void (*func)(void);
+       long id;
+       HANDLE done;
+} callobj;
+
+static long _pypythread_stacksize = 0;
+
+/*
+ * Return the thread Id instead of an handle. The Id is said to uniquely
+   identify the thread in the system
+ */
+int RPyThreadGetIdent()
+{
+  return GetCurrentThreadId();
+}
+
+static void
+bootstrap(void *call)
+{
+       callobj *obj = (callobj*)call;
+       /* copy callobj since other thread might free it before we're done */
+       void (*func)(void) = obj->func;
+
+       obj->id = RPyThreadGetIdent();
+       ReleaseSemaphore(obj->done, 1, NULL);
+       func();
+}
+
+long RPyThreadStart(void (*func)(void))
+{
+       unsigned long rv;
+       callobj obj;
+
+       obj.id = -1;    /* guilty until proved innocent */
+       obj.func = func;
+       obj.done = CreateSemaphore(NULL, 0, 1, NULL);
+       if (obj.done == NULL)
+               return -1;
+
+       rv = _beginthread(bootstrap, _pypythread_stacksize, &obj);
+       if (rv == (unsigned long)-1) {
+               /* I've seen errno == EAGAIN here, which means "there are
+                * too many threads".
+                */
+               obj.id = -1;
+       }
+       else {
+               /* wait for thread to initialize, so we can get its id */
+               WaitForSingleObject(obj.done, INFINITE);
+               assert(obj.id != -1);
+       }
+       CloseHandle((HANDLE)obj.done);
+       return obj.id;
+}
+
+/************************************************************/
+
+/* minimum/maximum thread stack sizes supported */
+#define THREAD_MIN_STACKSIZE    0x8000      /* 32kB */
+#define THREAD_MAX_STACKSIZE    0x10000000  /* 256MB */
+
+long RPyThreadGetStackSize(void)
+{
+       return _pypythread_stacksize;
+}
+
+long RPyThreadSetStackSize(long newsize)
+{
+       if (newsize == 0) {    /* set to default */
+               _pypythread_stacksize = 0;
+               return 0;
+       }
+
+       /* check the range */
+       if (newsize >= THREAD_MIN_STACKSIZE && newsize < THREAD_MAX_STACKSIZE) {
+               _pypythread_stacksize = newsize;
+               return 0;
+       }
+       return -1;
+}
+
+/************************************************************/
+
+
+BOOL InitializeNonRecursiveMutex(PNRMUTEX mutex)
+{
+       mutex->owned = -1 ;  /* No threads have entered NonRecursiveMutex */
+       mutex->thread_id = 0 ;
+       mutex->hevent = CreateEvent(NULL, FALSE, FALSE, NULL) ;
+       return mutex->hevent != NULL ;  /* TRUE if the mutex is created */
+}
+
+VOID DeleteNonRecursiveMutex(PNRMUTEX mutex)
+{
+       /* No in-use check */
+       CloseHandle(mutex->hevent) ;
+       mutex->hevent = NULL ; /* Just in case */
+}
+
+DWORD EnterNonRecursiveMutex(PNRMUTEX mutex, BOOL wait)
+{
+       /* Assume that the thread waits successfully */
+       DWORD ret ;
+
+       /* InterlockedIncrement(&mutex->owned) == 0 means that no thread 
currently owns the mutex */
+       if (!wait)
+       {
+               if (InterlockedCompareExchange(&mutex->owned, 0, -1) != -1)
+                       return WAIT_TIMEOUT ;
+               ret = WAIT_OBJECT_0 ;
+       }
+       else
+               ret = InterlockedIncrement(&mutex->owned) ?
+                       /* Some thread owns the mutex, let's wait... */
+                       WaitForSingleObject(mutex->hevent, INFINITE) : 
WAIT_OBJECT_0 ;
+
+       mutex->thread_id = GetCurrentThreadId() ; /* We own it */
+       return ret ;
+}
+
+BOOL LeaveNonRecursiveMutex(PNRMUTEX mutex)
+{
+       /* We don't own the mutex */
+       mutex->thread_id = 0 ;
+       return
+               InterlockedDecrement(&mutex->owned) < 0 ||
+               SetEvent(mutex->hevent) ; /* Other threads are waiting, wake 
one on them up */
+}
+
+/************************************************************/
+
+void RPyThreadAfterFork(void)
+{
+}
+
+int RPyThreadLockInit(struct RPyOpaque_ThreadLock *lock)
+{
+  return InitializeNonRecursiveMutex(lock);
+}
+
+void RPyOpaqueDealloc_ThreadLock(struct RPyOpaque_ThreadLock *lock)
+{
+       if (lock->hevent != NULL)
+               DeleteNonRecursiveMutex(lock);
+}
+
+/*
+ * Return 1 on success if the lock was acquired
+ *
+ * and 0 if the lock was not acquired. This means a 0 is returned
+ * if the lock has already been acquired by this thread!
+ */
+int RPyThreadAcquireLock(struct RPyOpaque_ThreadLock *lock, int waitflag)
+{
+       return EnterNonRecursiveMutex(lock, (waitflag != 0 ? INFINITE : 0)) == 
WAIT_OBJECT_0;
+}
+
+void RPyThreadReleaseLock(struct RPyOpaque_ThreadLock *lock)
+{
+       if (!LeaveNonRecursiveMutex(lock))
+               /* XXX complain? */;
+}
+
+/************************************************************/
+
+char *RPyThreadTLS_Create(RPyThreadTLS *result)
+{
+       *result = TlsAlloc();
+       if (*result == TLS_OUT_OF_INDEXES)
+               return "out of thread-local storage indexes";
+       else
+               return NULL;
+}
+
+/************************************************************/
+/* GIL code                                                 */
+/************************************************************/
+
+static volatile LONG pending_acquires = -1;
+static CRITICAL_SECTION mutex_gil;
+static HANDLE cond_gil;
+
+long RPyGilAllocate(void)
+{
+    pending_acquires = 0;
+    InitializeCriticalSection(&mutex_gil);
+    EnterCriticalSection(&mutex_gil);
+    cond_gil = CreateEvent (NULL, FALSE, FALSE, NULL);
+    return 1;
+}
+
+long RPyGilYieldThread(void)
+{
+    /* can be called even before RPyGilAllocate(), but in this case,
+       pending_acquires will be -1 */
+    if (pending_acquires <= 0)
+        return 0;
+    InterlockedIncrement(&pending_acquires);
+    PulseEvent(cond_gil);
+
+    /* hack: the three following lines do a pthread_cond_wait(), and
+       normally specifying a timeout of INFINITE would be fine.  But the
+       first and second operations are not done atomically, so there is a
+       (small) risk that PulseEvent misses the WaitForSingleObject().
+       In this case the process will just sleep a few milliseconds. */
+    LeaveCriticalSection(&mutex_gil);
+    WaitForSingleObject(cond_gil, 15);
+    EnterCriticalSection(&mutex_gil);
+
+    InterlockedDecrement(&pending_acquires);
+    return 1;
+}
+
+void RPyGilRelease(void)
+{
+    LeaveCriticalSection(&mutex_gil);
+    PulseEvent(cond_gil);
+}
+
+void RPyGilAcquire(void)
+{
+    InterlockedIncrement(&pending_acquires);
+    EnterCriticalSection(&mutex_gil);
+    InterlockedDecrement(&pending_acquires);
+}
diff --git a/pypy/translator/c/src/thread_nt.h 
b/pypy/translator/c/src/thread_nt.h
--- a/pypy/translator/c/src/thread_nt.h
+++ b/pypy/translator/c/src/thread_nt.h
@@ -1,13 +1,4 @@
-/* Copy-and-pasted from CPython */
-
-/* This code implemented by [email protected] */
-/* Fast NonRecursiveMutex support by Yakov Markovitch, [email protected] */
-/* Eliminated some memory leaks, [email protected] */
-
 #include <windows.h>
-#include <limits.h>
-#include <process.h>
-
 
 /*
  * Thread support.
@@ -15,24 +6,14 @@
 
 #define RPyOpaque_INITEXPR_ThreadLock  { 0, 0, NULL }
 
-typedef struct {
-       void (*func)(void);
-       long id;
-       HANDLE done;
-} callobj;
-
 typedef struct RPyOpaque_ThreadLock {
        LONG   owned ;
        DWORD  thread_id ;
        HANDLE hevent ;
-} NRMUTEX, *PNRMUTEX ;
+};
 
 /* prototypes */
 long RPyThreadStart(void (*func)(void));
-BOOL InitializeNonRecursiveMutex(PNRMUTEX mutex);
-VOID DeleteNonRecursiveMutex(PNRMUTEX mutex);
-DWORD EnterNonRecursiveMutex(PNRMUTEX mutex, BOOL wait);
-BOOL LeaveNonRecursiveMutex(PNRMUTEX mutex);
 int RPyThreadLockInit(struct RPyOpaque_ThreadLock *lock);
 void RPyOpaqueDealloc_ThreadLock(struct RPyOpaque_ThreadLock *lock);
 int RPyThreadAcquireLock(struct RPyOpaque_ThreadLock *lock, int waitflag);
@@ -40,239 +21,8 @@
 long RPyThreadGetStackSize(void);
 long RPyThreadSetStackSize(long);
 
-
-/* implementations */
-
-#ifdef PYPY_MAIN_IMPLEMENTATION_FILE
-
-static long _pypythread_stacksize = 0;
-
-/*
- * Return the thread Id instead of an handle. The Id is said to uniquely
-   identify the thread in the system
- */
-int RPyThreadGetIdent()
-{
-  return GetCurrentThreadId();
-}
-
-static void
-bootstrap(void *call)
-{
-       callobj *obj = (callobj*)call;
-       /* copy callobj since other thread might free it before we're done */
-       void (*func)(void) = obj->func;
-
-       obj->id = RPyThreadGetIdent();
-       ReleaseSemaphore(obj->done, 1, NULL);
-       func();
-}
-
-long RPyThreadStart(void (*func)(void))
-{
-       unsigned long rv;
-       callobj obj;
-
-       obj.id = -1;    /* guilty until proved innocent */
-       obj.func = func;
-       obj.done = CreateSemaphore(NULL, 0, 1, NULL);
-       if (obj.done == NULL)
-               return -1;
-
-       rv = _beginthread(bootstrap, _pypythread_stacksize, &obj);
-       if (rv == (unsigned long)-1) {
-               /* I've seen errno == EAGAIN here, which means "there are
-                * too many threads".
-                */
-               obj.id = -1;
-       }
-       else {
-               /* wait for thread to initialize, so we can get its id */
-               WaitForSingleObject(obj.done, INFINITE);
-               assert(obj.id != -1);
-       }
-       CloseHandle((HANDLE)obj.done);
-       return obj.id;
-}
-
-/************************************************************/
-
-/* minimum/maximum thread stack sizes supported */
-#define THREAD_MIN_STACKSIZE    0x8000      /* 32kB */
-#define THREAD_MAX_STACKSIZE    0x10000000  /* 256MB */
-
-long RPyThreadGetStackSize(void)
-{
-       return _pypythread_stacksize;
-}
-
-long RPyThreadSetStackSize(long newsize)
-{
-       if (newsize == 0) {    /* set to default */
-               _pypythread_stacksize = 0;
-               return 0;
-       }
-
-       /* check the range */
-       if (newsize >= THREAD_MIN_STACKSIZE && newsize < THREAD_MAX_STACKSIZE) {
-               _pypythread_stacksize = newsize;
-               return 0;
-       }
-       return -1;
-}
-
-/************************************************************/
-
-
-BOOL InitializeNonRecursiveMutex(PNRMUTEX mutex)
-{
-       mutex->owned = -1 ;  /* No threads have entered NonRecursiveMutex */
-       mutex->thread_id = 0 ;
-       mutex->hevent = CreateEvent(NULL, FALSE, FALSE, NULL) ;
-       return mutex->hevent != NULL ;  /* TRUE if the mutex is created */
-}
-
-VOID DeleteNonRecursiveMutex(PNRMUTEX mutex)
-{
-       /* No in-use check */
-       CloseHandle(mutex->hevent) ;
-       mutex->hevent = NULL ; /* Just in case */
-}
-
-DWORD EnterNonRecursiveMutex(PNRMUTEX mutex, BOOL wait)
-{
-       /* Assume that the thread waits successfully */
-       DWORD ret ;
-
-       /* InterlockedIncrement(&mutex->owned) == 0 means that no thread 
currently owns the mutex */
-       if (!wait)
-       {
-               if (InterlockedCompareExchange(&mutex->owned, 0, -1) != -1)
-                       return WAIT_TIMEOUT ;
-               ret = WAIT_OBJECT_0 ;
-       }
-       else
-               ret = InterlockedIncrement(&mutex->owned) ?
-                       /* Some thread owns the mutex, let's wait... */
-                       WaitForSingleObject(mutex->hevent, INFINITE) : 
WAIT_OBJECT_0 ;
-
-       mutex->thread_id = GetCurrentThreadId() ; /* We own it */
-       return ret ;
-}
-
-BOOL LeaveNonRecursiveMutex(PNRMUTEX mutex)
-{
-       /* We don't own the mutex */
-       mutex->thread_id = 0 ;
-       return
-               InterlockedDecrement(&mutex->owned) < 0 ||
-               SetEvent(mutex->hevent) ; /* Other threads are waiting, wake 
one on them up */
-}
-
-/************************************************************/
-
-void RPyThreadAfterFork(void)
-{
-}
-
-int RPyThreadLockInit(struct RPyOpaque_ThreadLock *lock)
-{
-  return InitializeNonRecursiveMutex(lock);
-}
-
-void RPyOpaqueDealloc_ThreadLock(struct RPyOpaque_ThreadLock *lock)
-{
-       if (lock->hevent != NULL)
-               DeleteNonRecursiveMutex(lock);
-}
-
-/*
- * Return 1 on success if the lock was acquired
- *
- * and 0 if the lock was not acquired. This means a 0 is returned
- * if the lock has already been acquired by this thread!
- */
-int RPyThreadAcquireLock(struct RPyOpaque_ThreadLock *lock, int waitflag)
-{
-       return EnterNonRecursiveMutex(lock, (waitflag != 0 ? INFINITE : 0)) == 
WAIT_OBJECT_0;
-}
-
-void RPyThreadReleaseLock(struct RPyOpaque_ThreadLock *lock)
-{
-       if (!LeaveNonRecursiveMutex(lock))
-               /* XXX complain? */;
-}
-
-/************************************************************/
-
 /* Thread-local storage */
-#define RPyThreadTLS   DWORD
 #define __thread __declspec(thread)
-
-char *RPyThreadTLS_Create(RPyThreadTLS *result)
-{
-       *result = TlsAlloc();
-       if (*result == TLS_OUT_OF_INDEXES)
-               return "out of thread-local storage indexes";
-       else
-               return NULL;
-}
-
+typedef DWORD RPyThreadTLS;
 #define RPyThreadTLS_Get(key)          TlsGetValue(key)
 #define RPyThreadTLS_Set(key, value)   TlsSetValue(key, value)
-
-
-/************************************************************/
-/* GIL code                                                 */
-/************************************************************/
-
-static volatile LONG pending_acquires = -1;
-static CRITICAL_SECTION mutex_gil;
-static HANDLE cond_gil;
-
-long RPyGilAllocate(void)
-{
-    pending_acquires = 0;
-    InitializeCriticalSection(&mutex_gil);
-    EnterCriticalSection(&mutex_gil);
-    cond_gil = CreateEvent (NULL, FALSE, FALSE, NULL);
-    return 1;
-}
-
-long RPyGilYieldThread(void)
-{
-    /* can be called even before RPyGilAllocate(), but in this case,
-       pending_acquires will be -1 */
-    if (pending_acquires <= 0)
-        return 0;
-    InterlockedIncrement(&pending_acquires);
-    PulseEvent(cond_gil);
-
-    /* hack: the three following lines do a pthread_cond_wait(), and
-       normally specifying a timeout of INFINITE would be fine.  But the
-       first and second operations are not done atomically, so there is a
-       (small) risk that PulseEvent misses the WaitForSingleObject().
-       In this case the process will just sleep a few milliseconds. */
-    LeaveCriticalSection(&mutex_gil);
-    WaitForSingleObject(cond_gil, 15);
-    EnterCriticalSection(&mutex_gil);
-
-    InterlockedDecrement(&pending_acquires);
-    return 1;
-}
-
-void RPyGilRelease(void)
-{
-    LeaveCriticalSection(&mutex_gil);
-    PulseEvent(cond_gil);
-}
-
-void RPyGilAcquire(void)
-{
-    InterlockedIncrement(&pending_acquires);
-    EnterCriticalSection(&mutex_gil);
-    InterlockedDecrement(&pending_acquires);
-}
-
-
-#endif /* PYPY_MAIN_IMPLEMENTATION_FILE */
diff --git a/pypy/translator/c/src/thread_pthread.h 
b/pypy/translator/c/src/thread_pthread.h
--- a/pypy/translator/c/src/thread_pthread.h
+++ b/pypy/translator/c/src/thread_pthread.h
@@ -1,4 +1,3 @@
-
 /* Posix threads interface (from CPython) */
 
 /* XXX needs to detect HAVE_BROKEN_POSIX_SEMAPHORES properly; currently
@@ -7,20 +6,7 @@
 */
 
 #include <unistd.h>   /* for the _POSIX_xxx and _POSIX_THREAD_xxx defines */
-#include <stdlib.h>
 #include <pthread.h>
-#include <signal.h>
-#include <stdio.h>
-#include <errno.h>
-#include <assert.h>
-
-/* The following is hopefully equivalent to what CPython does
-   (which is trying to compile a snippet of code using it) */
-#ifdef PTHREAD_SCOPE_SYSTEM
-#  ifndef PTHREAD_SYSTEM_SCHED_SUPPORTED
-#    define PTHREAD_SYSTEM_SCHED_SUPPORTED
-#  endif
-#endif
 
 /* The POSIX spec says that implementations supporting the sem_*
    family of functions must indicate this by defining
@@ -35,16 +21,6 @@
 #endif
 #endif
 
-#if !defined(pthread_attr_default)
-#  define pthread_attr_default ((pthread_attr_t *)NULL)
-#endif
-#if !defined(pthread_mutexattr_default)
-#  define pthread_mutexattr_default ((pthread_mutexattr_t *)NULL)
-#endif
-#if !defined(pthread_condattr_default)
-#  define pthread_condattr_default ((pthread_condattr_t *)NULL)
-#endif
-
 /* Whether or not to use semaphores directly rather than emulating them with
  * mutexes and condition variables:
  */
@@ -55,8 +31,6 @@
 #endif
 
 
-#define CHECK_STATUS(name)  if (status != 0) { perror(name); error = 1; }
-
 /********************* structs ***********/
 
 #ifdef USE_SEMAPHORES
@@ -70,7 +44,7 @@
 
 #define RPyOpaque_INITEXPR_ThreadLock  { { /* sem */ }, 0 }
 
-#else                                      /* no semaphores */
+#else /* !USE_SEMAPHORE */
 
 /* A pthread mutex isn't sufficient to model the Python lock type
    (see explanations in CPython's Python/thread_pthread.h */
@@ -88,7 +62,7 @@
                PTHREAD_COND_INITIALIZER,       \
                PTHREAD_MUTEX_INITIALIZER       \
        }
-#endif                                     /* no semaphores */
+#endif /* USE_SEMAPHORE */
 
 /* prototypes */
 
@@ -105,478 +79,7 @@
 
 /* implementations */
 
-#ifdef PYPY_MAIN_IMPLEMENTATION_FILE
-
-/* The POSIX spec requires that use of pthread_attr_setstacksize
-   be conditional on _POSIX_THREAD_ATTR_STACKSIZE being defined. */
-#ifdef _POSIX_THREAD_ATTR_STACKSIZE
-# ifndef THREAD_STACK_SIZE
-#  define THREAD_STACK_SIZE   0   /* use default stack size */
-# endif
-/* for safety, ensure a viable minimum stacksize */
-# define THREAD_STACK_MIN    0x8000  /* 32kB */
-#else  /* !_POSIX_THREAD_ATTR_STACKSIZE */
-# ifdef THREAD_STACK_SIZE
-#  error "THREAD_STACK_SIZE defined but _POSIX_THREAD_ATTR_STACKSIZE undefined"
-# endif
-#endif
-
-/* XXX This implementation is considered (to quote Tim Peters) "inherently
-   hosed" because:
-     - It does not guarantee the promise that a non-zero integer is returned.
-     - The cast to long is inherently unsafe.
-     - It is not clear that the 'volatile' (for AIX?) and ugly casting in the
-       latter return statement (for Alpha OSF/1) are any longer necessary.
-*/
-long RPyThreadGetIdent(void)
-{
-       volatile pthread_t threadid;
-       /* Jump through some hoops for Alpha OSF/1 */
-       threadid = pthread_self();
-
-#ifdef __CYGWIN__
-       /* typedef __uint32_t pthread_t; */
-       return (long) threadid;
-#else
-       if (sizeof(pthread_t) <= sizeof(long))
-               return (long) threadid;
-       else
-               return (long) *(long *) &threadid;
-#endif
-}
-
-static long _pypythread_stacksize = 0;
-
-static void *bootstrap_pthread(void *func)
-{
-  ((void(*)(void))func)();
-  return NULL;
-}
-
-long RPyThreadStart(void (*func)(void))
-{
-       pthread_t th;
-       int status;
-#if defined(THREAD_STACK_SIZE) || defined(PTHREAD_SYSTEM_SCHED_SUPPORTED)
-       pthread_attr_t attrs;
-#endif
-#if defined(THREAD_STACK_SIZE)
-       size_t tss;
-#endif
-
-#if defined(THREAD_STACK_SIZE) || defined(PTHREAD_SYSTEM_SCHED_SUPPORTED)
-       pthread_attr_init(&attrs);
-#endif
-#ifdef THREAD_STACK_SIZE
-       tss = (_pypythread_stacksize != 0) ? _pypythread_stacksize
-               : THREAD_STACK_SIZE;
-       if (tss != 0)
-               pthread_attr_setstacksize(&attrs, tss);
-#endif
-#if defined(PTHREAD_SYSTEM_SCHED_SUPPORTED) && !defined(__FreeBSD__)
-        pthread_attr_setscope(&attrs, PTHREAD_SCOPE_SYSTEM);
-#endif
-
-       status = pthread_create(&th, 
-#if defined(THREAD_STACK_SIZE) || defined(PTHREAD_SYSTEM_SCHED_SUPPORTED)
-                                &attrs,
-#else
-                                (pthread_attr_t*)NULL,
-#endif
-                                bootstrap_pthread,
-                                (void *)func
-                                );
-
-#if defined(THREAD_STACK_SIZE) || defined(PTHREAD_SYSTEM_SCHED_SUPPORTED)
-       pthread_attr_destroy(&attrs);
-#endif
-       if (status != 0)
-            return -1;
-
-        pthread_detach(th);
-
-#ifdef __CYGWIN__
-       /* typedef __uint32_t pthread_t; */
-       return (long) th;
-#else
-       if (sizeof(pthread_t) <= sizeof(long))
-               return (long) th;
-       else
-               return (long) *(long *) &th;
-#endif
-}
-
-long RPyThreadGetStackSize(void)
-{
-       return _pypythread_stacksize;
-}
-
-long RPyThreadSetStackSize(long newsize)
-{
-#if defined(THREAD_STACK_SIZE)
-       pthread_attr_t attrs;
-       size_t tss_min;
-       int rc;
-#endif
-
-       if (newsize == 0) {    /* set to default */
-               _pypythread_stacksize = 0;
-               return 0;
-       }
-
-#if defined(THREAD_STACK_SIZE)
-# if defined(PTHREAD_STACK_MIN)
-       tss_min = PTHREAD_STACK_MIN > THREAD_STACK_MIN ? PTHREAD_STACK_MIN
-               : THREAD_STACK_MIN;
-# else
-       tss_min = THREAD_STACK_MIN;
-# endif
-       if (newsize >= tss_min) {
-               /* validate stack size by setting thread attribute */
-               if (pthread_attr_init(&attrs) == 0) {
-                       rc = pthread_attr_setstacksize(&attrs, newsize);
-                       pthread_attr_destroy(&attrs);
-                       if (rc == 0) {
-                               _pypythread_stacksize = newsize;
-                               return 0;
-                       }
-               }
-       }
-       return -1;
-#else
-       return -2;
-#endif
-}
-
-/************************************************************/
-#ifdef USE_SEMAPHORES
-/************************************************************/
-
-#include <semaphore.h>
-
-void RPyThreadAfterFork(void)
-{
-}
-
-int RPyThreadLockInit(struct RPyOpaque_ThreadLock *lock)
-{
-       int status, error = 0;
-       lock->initialized = 0;
-       status = sem_init(&lock->sem, 0, 1);
-       CHECK_STATUS("sem_init");
-       if (error)
-               return 0;
-       lock->initialized = 1;
-       return 1;
-}
-
-void RPyOpaqueDealloc_ThreadLock(struct RPyOpaque_ThreadLock *lock)
-{
-       int status, error = 0;
-       if (lock->initialized) {
-               status = sem_destroy(&lock->sem);
-               CHECK_STATUS("sem_destroy");
-               /* 'error' is ignored;
-                  CHECK_STATUS already printed an error message */
-       }
-}
-
-/*
- * As of February 2002, Cygwin thread implementations mistakenly report error
- * codes in the return value of the sem_ calls (like the pthread_ functions).
- * Correct implementations return -1 and put the code in errno. This supports
- * either.
- */
-static int
-rpythread_fix_status(int status)
-{
-       return (status == -1) ? errno : status;
-}
-
-int RPyThreadAcquireLock(struct RPyOpaque_ThreadLock *lock, int waitflag)
-{
-       int success;
-       sem_t *thelock = &lock->sem;
-       int status, error = 0;
-
-       do {
-               if (waitflag)
-                       status = rpythread_fix_status(sem_wait(thelock));
-               else
-                       status = rpythread_fix_status(sem_trywait(thelock));
-       } while (status == EINTR); /* Retry if interrupted by a signal */
-
-       if (waitflag) {
-               CHECK_STATUS("sem_wait");
-       } else if (status != EAGAIN) {
-               CHECK_STATUS("sem_trywait");
-       }
-       
-       success = (status == 0) ? 1 : 0;
-       return success;
-}
-
-void RPyThreadReleaseLock(struct RPyOpaque_ThreadLock *lock)
-{
-       sem_t *thelock = &lock->sem;
-       int status, error = 0;
-
-       status = sem_post(thelock);
-       CHECK_STATUS("sem_post");
-}
-
-/************************************************************/
-#else                                      /* no semaphores */
-/************************************************************/
-
-struct RPyOpaque_ThreadLock *alllocks;   /* doubly-linked list */
-
-void RPyThreadAfterFork(void)
-{
-       /* Mess.  We have no clue about how it works on CPython on OSX,
-          but the issue is that the state of mutexes is not really
-          preserved across a fork().  So we need to walk over all lock
-          objects here, and rebuild their mutex and condition variable.
-
-          See e.g. http://hackage.haskell.org/trac/ghc/ticket/1391 for
-          a similar bug about GHC.
-       */
-       struct RPyOpaque_ThreadLock *p = alllocks;
-       alllocks = NULL;
-       while (p) {
-               struct RPyOpaque_ThreadLock *next = p->next;
-               int was_locked = p->locked;
-               RPyThreadLockInit(p);
-               p->locked = was_locked;
-               p = next;
-       }
-}
-
-int RPyThreadLockInit(struct RPyOpaque_ThreadLock *lock)
-{
-       int status, error = 0;
-
-       lock->initialized = 0;
-       lock->locked = 0;
-
-       status = pthread_mutex_init(&lock->mut,
-                                   pthread_mutexattr_default);
-       CHECK_STATUS("pthread_mutex_init");
-
-       status = pthread_cond_init(&lock->lock_released,
-                                  pthread_condattr_default);
-       CHECK_STATUS("pthread_cond_init");
-
-       if (error)
-               return 0;
-       lock->initialized = 1;
-       /* add 'lock' in the doubly-linked list */
-       if (alllocks)
-               alllocks->prev = lock;
-       lock->next = alllocks;
-       lock->prev = NULL;
-       alllocks = lock;
-       return 1;
-}
-
-void RPyOpaqueDealloc_ThreadLock(struct RPyOpaque_ThreadLock *lock)
-{
-       int status, error = 0;
-       if (lock->initialized) {
-               /* remove 'lock' from the doubly-linked list */
-               if (lock->prev)
-                       lock->prev->next = lock->next;
-               else {
-                       assert(alllocks == lock);
-                       alllocks = lock->next;
-               }
-               if (lock->next)
-                       lock->next->prev = lock->prev;
-
-               status = pthread_mutex_destroy(&lock->mut);
-               CHECK_STATUS("pthread_mutex_destroy");
-
-               status = pthread_cond_destroy(&lock->lock_released);
-               CHECK_STATUS("pthread_cond_destroy");
-
-               /* 'error' is ignored;
-                  CHECK_STATUS already printed an error message */
-       }
-}
-
-int RPyThreadAcquireLock(struct RPyOpaque_ThreadLock *lock, int waitflag)
-{
-       int success;
-       int status, error = 0;
-
-       status = pthread_mutex_lock( &lock->mut );
-       CHECK_STATUS("pthread_mutex_lock[1]");
-       success = lock->locked == 0;
-
-       if ( !success && waitflag ) {
-               /* continue trying until we get the lock */
-
-               /* mut must be locked by me -- part of the condition
-                * protocol */
-               while ( lock->locked ) {
-                       status = pthread_cond_wait(&lock->lock_released,
-                                                  &lock->mut);
-                       CHECK_STATUS("pthread_cond_wait");
-               }
-               success = 1;
-       }
-       if (success) lock->locked = 1;
-       status = pthread_mutex_unlock( &lock->mut );
-       CHECK_STATUS("pthread_mutex_unlock[1]");
-
-       if (error) success = 0;
-       return success;
-}
-
-void RPyThreadReleaseLock(struct RPyOpaque_ThreadLock *lock)
-{
-       int status, error = 0;
-
-       status = pthread_mutex_lock( &lock->mut );
-       CHECK_STATUS("pthread_mutex_lock[3]");
-
-       lock->locked = 0;
-
-       status = pthread_mutex_unlock( &lock->mut );
-       CHECK_STATUS("pthread_mutex_unlock[3]");
-
-       /* wake up someone (anyone, if any) waiting on the lock */
-       status = pthread_cond_signal( &lock->lock_released );
-       CHECK_STATUS("pthread_cond_signal");
-}
-
-/************************************************************/
-#endif                                     /* no semaphores */
-/************************************************************/
-
-
 /* Thread-local storage */
-#define RPyThreadTLS   pthread_key_t
-
-char *RPyThreadTLS_Create(RPyThreadTLS *result)
-{
-       if (pthread_key_create(result, NULL) != 0)
-               return "out of thread-local storage keys";
-       else
-               return NULL;
-}
-
+typedef pthread_key_t RPyThreadTLS;
 #define RPyThreadTLS_Get(key)          pthread_getspecific(key)
 #define RPyThreadTLS_Set(key, value)   pthread_setspecific(key, value)
-
-
-/************************************************************/
-/* GIL code                                                 */
-/************************************************************/
-
-#ifdef __llvm__
-#  define HAS_ATOMIC_ADD
-#endif
-
-#ifdef __GNUC__
-#  if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1)
-#    define HAS_ATOMIC_ADD
-#  endif
-#endif
-
-#ifdef HAS_ATOMIC_ADD
-#  define atomic_add __sync_fetch_and_add
-#else
-#  if defined(__amd64__)
-#    define atomic_add(ptr, value)  asm volatile ("lock addq %0, %1"        \
-                                 : : "ri"(value), "m"(*(ptr)) : "memory")
-#  elif defined(__i386__)
-#    define atomic_add(ptr, value)  asm volatile ("lock addl %0, %1"        \
-                                 : : "ri"(value), "m"(*(ptr)) : "memory")
-#  else
-#    error "Please use gcc >= 4.1 or write a custom 'asm' for your CPU."
-#  endif
-#endif
-
-#define ASSERT_STATUS(call)                             \
-    if (call != 0) {                                    \
-        fprintf(stderr, "Fatal error: " #call "\n");    \
-        abort();                                        \
-    }
-
-static void _debug_print(const char *msg)
-{
-#if 0
-    int col = (int)pthread_self();
-    col = 31 + ((col / 8) % 8);
-    fprintf(stderr, "\033[%dm%s\033[0m", col, msg);
-#endif
-}
-
-static volatile long pending_acquires = -1;
-static pthread_mutex_t mutex_gil = PTHREAD_MUTEX_INITIALIZER;
-static pthread_cond_t cond_gil = PTHREAD_COND_INITIALIZER;
-
-static void assert_has_the_gil(void)
-{
-#ifdef RPY_ASSERT
-    assert(pthread_mutex_trylock(&mutex_gil) != 0);
-    assert(pending_acquires >= 0);
-#endif
-}
-
-long RPyGilAllocate(void)
-{
-    _debug_print("RPyGilAllocate\n");
-    pending_acquires = 0;
-    pthread_mutex_trylock(&mutex_gil);
-    assert_has_the_gil();
-    return 1;
-}
-
-long RPyGilYieldThread(void)
-{
-    /* can be called even before RPyGilAllocate(), but in this case,
-       pending_acquires will be -1 */
-#ifdef RPY_ASSERT
-    if (pending_acquires >= 0)
-        assert_has_the_gil();
-#endif
-    if (pending_acquires <= 0)
-        return 0;
-    atomic_add(&pending_acquires, 1L);
-    _debug_print("{");
-    ASSERT_STATUS(pthread_cond_signal(&cond_gil));
-    ASSERT_STATUS(pthread_cond_wait(&cond_gil, &mutex_gil));
-    _debug_print("}");
-    atomic_add(&pending_acquires, -1L);
-    assert_has_the_gil();
-    return 1;
-}
-
-void RPyGilRelease(void)
-{
-    _debug_print("RPyGilRelease\n");
-#ifdef RPY_ASSERT
-    assert(pending_acquires >= 0);
-#endif
-    assert_has_the_gil();
-    ASSERT_STATUS(pthread_mutex_unlock(&mutex_gil));
-    ASSERT_STATUS(pthread_cond_signal(&cond_gil));
-}
-
-void RPyGilAcquire(void)
-{
-    _debug_print("about to RPyGilAcquire...\n");
-#ifdef RPY_ASSERT
-    assert(pending_acquires >= 0);
-#endif
-    atomic_add(&pending_acquires, 1L);
-    ASSERT_STATUS(pthread_mutex_lock(&mutex_gil));
-    atomic_add(&pending_acquires, -1L);
-    assert_has_the_gil();
-    _debug_print("RPyGilAcquire\n");
-}
-
-
-#endif /* PYPY_MAIN_IMPLEMENTATION_FILE */
_______________________________________________
pypy-commit mailing list
[email protected]
http://mail.python.org/mailman/listinfo/pypy-commit

Reply via email to