We cannot use pthread_rwlock for these until we have reimplemented
pthread_rwlock with gsync, so fork __libc_rwlock off for now.
---
 htl/pt-alloc.c                      | 18 +++++++++---------
 htl/pt-create.c                     |  4 ++--
 htl/pt-internal.h                   |  8 ++++----
 sysdeps/generic/ldsodefs.h          |  2 +-
 sysdeps/htl/dl-support.c            |  2 +-
 sysdeps/htl/dl-thread_gscope_wait.c |  4 ++--
 sysdeps/htl/pt-key-delete.c         |  4 ++--
 sysdeps/mach/hurd/getrandom.c       | 14 +++++++-------
 sysdeps/mach/libc-lock.h            | 12 +++++++++++-
 9 files changed, 39 insertions(+), 29 deletions(-)

diff --git a/htl/pt-alloc.c b/htl/pt-alloc.c
index 257f7250d8..53833d3f20 100644
--- a/htl/pt-alloc.c
+++ b/htl/pt-alloc.c
@@ -125,7 +125,7 @@ __pthread_alloc (struct __pthread **pthread)
     }
 
 retry:
-  __libc_rwlock_wrlock (GL (dl_pthread_threads_lock));
+  __mach_rwlock_wrlock (GL (dl_pthread_threads_lock));
 
   if (GL (dl_pthread_num_threads) < __pthread_max_threads)
     {
@@ -134,7 +134,7 @@ retry:
       new->thread = 1 + GL (dl_pthread_num_threads)++;
       GL (dl_pthread_threads)[new->thread - 1] = NULL;
 
-      __libc_rwlock_unlock (GL (dl_pthread_threads_lock));
+      __mach_rwlock_unlock (GL (dl_pthread_threads_lock));
 
       *pthread = new;
       return 0;
@@ -143,7 +143,7 @@ retry:
   else if (GL (dl_pthread_num_threads) >= PTHREAD_THREADS_MAX)
     {
       /* We have reached the limit on the number of threads per process.  */
-      __libc_rwlock_unlock (GL (dl_pthread_threads_lock));
+      __mach_rwlock_unlock (GL (dl_pthread_threads_lock));
 
       free (new);
       return EAGAIN;
@@ -155,7 +155,7 @@ retry:
      memory allocation, since that's a potentially blocking operation.  */
   max_threads = __pthread_max_threads;
 
-  __libc_rwlock_unlock (GL (dl_pthread_threads_lock));
+  __mach_rwlock_unlock (GL (dl_pthread_threads_lock));
 
   /* Allocate a new lookup table that's twice as large.  */
   new_max_threads
@@ -167,13 +167,13 @@ retry:
       return ENOMEM;
     }
 
-  __libc_rwlock_wrlock (GL (dl_pthread_threads_lock));
+  __mach_rwlock_wrlock (GL (dl_pthread_threads_lock));
 
   /* Check if nobody else has already enlarged the table.  */
   if (max_threads != __pthread_max_threads)
     {
       /* Yep, they did.  */
-      __libc_rwlock_unlock (GL (dl_pthread_threads_lock));
+      __mach_rwlock_unlock (GL (dl_pthread_threads_lock));
 
       /* Free the newly allocated table and try again to allocate a slot.  */
       free (threads);
@@ -196,7 +196,7 @@ retry:
   new->thread = 1 + GL (dl_pthread_num_threads)++;
   GL (dl_pthread_threads)[new->thread - 1] = NULL;
 
-  __libc_rwlock_unlock (GL (dl_pthread_threads_lock));
+  __mach_rwlock_unlock (GL (dl_pthread_threads_lock));
 
   free (old_threads);
 
@@ -211,7 +211,7 @@ __pthread_init_static_tls (struct link_map *map)
 {
   int i;
 
-  __libc_rwlock_wrlock (GL (dl_pthread_threads_lock));
+  __mach_rwlock_wrlock (GL (dl_pthread_threads_lock));
   for (i = 0; i < GL (dl_pthread_num_threads); ++i)
     {
       struct __pthread *t = GL (dl_pthread_threads)[i];
@@ -231,6 +231,6 @@ __pthread_init_static_tls (struct link_map *map)
       memset (__mempcpy (dest, map->l_tls_initimage, 
map->l_tls_initimage_size),
              '\0', map->l_tls_blocksize - map->l_tls_initimage_size);
     }
-  __libc_rwlock_unlock (GL (dl_pthread_threads_lock));
+  __mach_rwlock_unlock (GL (dl_pthread_threads_lock));
 }
 libc_hidden_def (__pthread_init_static_tls)
diff --git a/htl/pt-create.c b/htl/pt-create.c
index 2a43285372..a66b6ebdcf 100644
--- a/htl/pt-create.c
+++ b/htl/pt-create.c
@@ -240,9 +240,9 @@ __pthread_create_internal (struct __pthread **thread,
      could use __thread_setid, however, we only lock for reading as no
      other thread should be using this entry (we also assume that the
      store is atomic).  */
-  __libc_rwlock_rdlock (GL (dl_pthread_threads_lock));
+  __mach_rwlock_rdlock (GL (dl_pthread_threads_lock));
   GL (dl_pthread_threads)[pthread->thread - 1] = pthread;
-  __libc_rwlock_unlock (GL (dl_pthread_threads_lock));
+  __mach_rwlock_unlock (GL (dl_pthread_threads_lock));
 
   /* At this point it is possible to guess our pthread ID.  We have to
      make sure that all functions taking a pthread_t argument can
diff --git a/htl/pt-internal.h b/htl/pt-internal.h
index 5a3104830c..e662a8d951 100644
--- a/htl/pt-internal.h
+++ b/htl/pt-internal.h
@@ -178,16 +178,16 @@ libc_hidden_proto (__pthread_max_threads)
 
 #define __pthread_getid(thread) \
   ({ struct __pthread *__t = NULL;                                           \
-     __libc_rwlock_rdlock (GL (dl_pthread_threads_lock));                    \
+     __mach_rwlock_rdlock (GL (dl_pthread_threads_lock));                    \
      if (thread <= __pthread_max_threads)                                    \
        __t = GL (dl_pthread_threads)[thread - 1];                            \
-     __libc_rwlock_unlock (GL (dl_pthread_threads_lock));                    \
+     __mach_rwlock_unlock (GL (dl_pthread_threads_lock));                    \
      __t; })
 
 #define __pthread_setid(thread, pthread) \
-  __libc_rwlock_wrlock (GL (dl_pthread_threads_lock));                       \
+  __mach_rwlock_wrlock (GL (dl_pthread_threads_lock));                       \
   GL (dl_pthread_threads)[thread - 1] = pthread;                             \
-  __libc_rwlock_unlock (GL (dl_pthread_threads_lock));
+  __mach_rwlock_unlock (GL (dl_pthread_threads_lock));
 
 /* Similar to pthread_self, but returns the thread descriptor instead
    of the thread ID.  */
diff --git a/sysdeps/generic/ldsodefs.h b/sysdeps/generic/ldsodefs.h
index 76afc5df7d..46a7119b3a 100644
--- a/sysdeps/generic/ldsodefs.h
+++ b/sysdeps/generic/ldsodefs.h
@@ -478,7 +478,7 @@ struct rtld_global
 
   /* Array of __pthread structures and its lock.  */
   EXTERN struct __pthread **_dl_pthread_threads;
-  __libc_rwlock_define (EXTERN, _dl_pthread_threads_lock)
+  __mach_rwlock_define (EXTERN, _dl_pthread_threads_lock)
 #endif
 #ifdef SHARED
 };
diff --git a/sysdeps/htl/dl-support.c b/sysdeps/htl/dl-support.c
index 0c1edf2bbd..5b07a8f67b 100644
--- a/sysdeps/htl/dl-support.c
+++ b/sysdeps/htl/dl-support.c
@@ -20,4 +20,4 @@
 
 int _dl_pthread_num_threads;
 struct __pthread **_dl_pthread_threads;
-__libc_rwlock_define_initialized (, _dl_pthread_threads_lock)
+__mach_rwlock_define_initialized (, _dl_pthread_threads_lock)
diff --git a/sysdeps/htl/dl-thread_gscope_wait.c 
b/sysdeps/htl/dl-thread_gscope_wait.c
index 095618eac1..d8428d4491 100644
--- a/sysdeps/htl/dl-thread_gscope_wait.c
+++ b/sysdeps/htl/dl-thread_gscope_wait.c
@@ -39,7 +39,7 @@ __thread_gscope_wait (void)
   struct __pthread *t;
   int *gscope_flagp;
 
-  __libc_rwlock_rdlock (GL (dl_pthread_threads_lock));
+  __mach_rwlock_rdlock (GL (dl_pthread_threads_lock));
 
   /* Iterate over the list of threads.  */
   for (i = 0; i < GL (dl_pthread_num_threads); ++i)
@@ -63,5 +63,5 @@ __thread_gscope_wait (void)
       while (*gscope_flagp == THREAD_GSCOPE_FLAG_WAIT);
     }
 
-  __libc_rwlock_unlock (GL (dl_pthread_threads_lock));
+  __mach_rwlock_unlock (GL (dl_pthread_threads_lock));
 }
diff --git a/sysdeps/htl/pt-key-delete.c b/sysdeps/htl/pt-key-delete.c
index 018a021ee1..88f5e33b01 100644
--- a/sysdeps/htl/pt-key-delete.c
+++ b/sysdeps/htl/pt-key-delete.c
@@ -41,7 +41,7 @@ __pthread_key_delete (pthread_key_t key)
       __pthread_key_destructors[key] = PTHREAD_KEY_INVALID;
       __pthread_key_invalid_count++;
 
-      __libc_rwlock_rdlock (GL (dl_pthread_threads_lock));
+      __mach_rwlock_rdlock (GL (dl_pthread_threads_lock));
       for (i = 0; i < GL (dl_pthread_num_threads); ++i)
        {
          struct __pthread *t;
@@ -64,7 +64,7 @@ __pthread_key_delete (pthread_key_t key)
                t->thread_specifics[key] = 0;
            }
        }
-      __libc_rwlock_unlock (GL (dl_pthread_threads_lock));
+      __mach_rwlock_unlock (GL (dl_pthread_threads_lock));
     }
 
   __pthread_mutex_unlock (&__pthread_key_lock);
diff --git a/sysdeps/mach/hurd/getrandom.c b/sysdeps/mach/hurd/getrandom.c
index 6647a0f778..a3fde4dc59 100644
--- a/sysdeps/mach/hurd/getrandom.c
+++ b/sysdeps/mach/hurd/getrandom.c
@@ -20,7 +20,7 @@
 #include <sys/random.h>
 #include <fcntl.h>
 
-__libc_rwlock_define_initialized (static, lock);
+__mach_rwlock_define_initialized (static, lock);
 static file_t random_server, random_server_nonblock,
               urandom_server, urandom_server_nonblock;
 
@@ -75,14 +75,14 @@ __getrandom (void *buffer, size_t length, unsigned int 
flags)
     return length;
 
 again:
-  __libc_rwlock_rdlock (lock);
+  __mach_rwlock_rdlock (lock);
   server = *cached_server;
   if (MACH_PORT_VALID (server))
     /* Attempt to read some random data using this port.  */
     err = __io_read (server, &data, &nread, -1, length);
   else
     err = MACH_SEND_INVALID_DEST;
-  __libc_rwlock_unlock (lock);
+  __mach_rwlock_unlock (lock);
 
   if (err == MACH_SEND_INVALID_DEST || err == MIG_SERVER_DIED)
     {
@@ -92,13 +92,13 @@ again:
       /* Slow path: the cached port didn't work, or there was no
          cached port in the first place.  */
 
-      __libc_rwlock_wrlock (lock);
+      __mach_rwlock_wrlock (lock);
       server = *cached_server;
       if (server != oldserver)
         {
           /* Someone else must have refetched the port while we were
              waiting for the lock. */
-          __libc_rwlock_unlock (lock);
+          __mach_rwlock_unlock (lock);
           goto again;
         }
 
@@ -111,7 +111,7 @@ again:
                                       MACH_PORT_RIGHT_SEND, &urefs);
           if (!err && urefs > 0)
             {
-              __libc_rwlock_unlock (lock);
+              __mach_rwlock_unlock (lock);
               goto again;
             }
 
@@ -121,7 +121,7 @@ again:
 
       server = *cached_server = __file_name_lookup (random_source,
                                                     open_flags, 0);
-      __libc_rwlock_unlock (lock);
+      __mach_rwlock_unlock (lock);
       if (!MACH_PORT_VALID (server))
        {
          if (errno == ENOENT)
diff --git a/sysdeps/mach/libc-lock.h b/sysdeps/mach/libc-lock.h
index 236a24ad80..ddeae544f4 100644
--- a/sysdeps/mach/libc-lock.h
+++ b/sysdeps/mach/libc-lock.h
@@ -145,7 +145,17 @@ typedef struct __libc_lock_recursive_opaque__ 
__libc_lock_recursive_t;
 #define __rtld_lock_unlock_recursive(NAME) \
   __libc_lock_unlock_recursive (NAME)
 
-/* XXX for now */
+/* XXX for now, waiting for a futex-based pthread_rwlock implementation */
+#define __mach_rwlock_define           __libc_lock_define
+#define __mach_rwlock_define_initialized __libc_lock_define_initialized
+#define __mach_rwlock_init             __libc_lock_init
+#define __mach_rwlock_fini             __libc_lock_fini
+#define __mach_rwlock_rdlock           __libc_lock_lock
+#define __mach_rwlock_wrlock           __libc_lock_lock
+#define __mach_rwlock_tryrdlock                __libc_lock_trylock
+#define __mach_rwlock_trywrlock                __libc_lock_trylock
+#define __mach_rwlock_unlock           __libc_lock_unlock
+
 #define __libc_rwlock_define           __libc_lock_define
 #define __libc_rwlock_define_initialized __libc_lock_define_initialized
 #define __libc_rwlock_init             __libc_lock_init
-- 
2.51.0


Reply via email to