Author: sthibaul-guest
Date: 2007-07-21 00:27:17 +0000 (Sat, 21 Jul 2007)
New Revision: 2425

Added:
   
glibc-package/trunk/debian/patches/hurd-i386/local-atomic-no-multiple_threads.diff
   glibc-package/trunk/debian/patches/hurd-i386/local-gscope.diff
   glibc-package/trunk/debian/patches/hurd-i386/local-no-strerror_l.diff
   glibc-package/trunk/debian/patches/hurd-i386/submitted-lock-intern.diff
   glibc-package/trunk/debian/patches/series.hurd-i386
Modified:
   glibc-package/trunk/debian/changelog
   glibc-package/trunk/debian/patches/series
   glibc-package/trunk/debian/sysdeps/depflags.pl
Log:
  * hurd-i386/local-atomic-no-multiple_threads.diff: new patch, hurd-i386
    doesn't need the multiple_threads field.
  * hurd-i386/local-gscope.diff: new patch, backport of the CVS global scope.
  * hurd-i386/local-no-strerror_l.diff: new patch to disable non-implemented
    strerror_l().
  * hurd-i386/submitted-lock-intern.diff: new patch to fix a header inclusion.
  * sysdeps/depflags.pl: make libc0.3 depend on TLS-enabled hurd packages.


Modified: glibc-package/trunk/debian/changelog
===================================================================
--- glibc-package/trunk/debian/changelog        2007-07-20 23:40:55 UTC (rev 
2424)
+++ glibc-package/trunk/debian/changelog        2007-07-21 00:27:17 UTC (rev 
2425)
@@ -31,6 +31,13 @@
   * hurd-i386/local-tls-support.diff: new patch to support TLS.
   * debian/sysdeps/hurd.mk (libc_extra_config_options): Removed
     --without-__thread, --without-tls and libc_cv_z_relro=no.
+  * hurd-i386/local-atomic-no-multiple_threads.diff: new patch, hurd-i386
+    doesn't need the multiple_threads field.
+  * hurd-i386/local-gscope.diff: new patch, backport of the CVS global scope.
+  * hurd-i386/local-no-strerror_l.diff: new patch to disable non-implemented
+    strerror_l().
+  * hurd-i386/submitted-lock-intern.diff: new patch to fix a header inclusion.
+  * sysdeps/depflags.pl: make libc0.3 depend on TLS-enabled hurd packages.
 
  -- Pierre Habouzit <[EMAIL PROTECTED]>  Fri, 20 Jul 2007 19:19:54 +0200
 

Added: 
glibc-package/trunk/debian/patches/hurd-i386/local-atomic-no-multiple_threads.diff
===================================================================
--- 
glibc-package/trunk/debian/patches/hurd-i386/local-atomic-no-multiple_threads.diff
                          (rev 0)
+++ 
glibc-package/trunk/debian/patches/hurd-i386/local-atomic-no-multiple_threads.diff
  2007-07-21 00:27:17 UTC (rev 2425)
@@ -0,0 +1,249 @@
+On the Hurd we always at least start the sigthread anyway.  Let's avoid forking
+the file (which would mean having to maintain it)
+
+diff -ur glibc-2.6-orig/sysdeps/i386/i486/bits/atomic.h 
glibc-2.6/build-tree/glibc-2.6/sysdeps/i386/i486/bits/atomic.h
+--- glibc-2.6-orig/sysdeps/i386/i486/bits/atomic.h     2007-02-17 
07:38:25.000000000 +0100
++++ glibc-2.6/sysdeps/i386/i486/bits/atomic.h  2007-07-20 03:45:18.000000000 
+0200
+@@ -87,35 +87,26 @@
+ 
+ #define __arch_c_compare_and_exchange_val_8_acq(mem, newval, oldval) \
+   ({ __typeof (*mem) ret;                                                   \
+-     __asm __volatile ("cmpl $0, %%gs:%P5\n\t"                                
\
+-                       "je 0f\n\t"                                            
\
+-                       "lock\n"                                               
\
+-                       "0:\tcmpxchgb %b2, %1"                               \
++     __asm __volatile ("lock\n"                                               
\
++                       "\tcmpxchgb %b2, %1"                                 \
+                      : "=a" (ret), "=m" (*mem)                              \
+-                     : "q" (newval), "m" (*mem), "0" (oldval),              \
+-                       "i" (offsetof (tcbhead_t, multiple_threads)));       \
++                     : "q" (newval), "m" (*mem), "0" (oldval));             \
+      ret; })
+ 
+ #define __arch_c_compare_and_exchange_val_16_acq(mem, newval, oldval) \
+   ({ __typeof (*mem) ret;                                                   \
+-     __asm __volatile ("cmpl $0, %%gs:%P5\n\t"                                
\
+-                       "je 0f\n\t"                                            
\
+-                       "lock\n"                                               
\
+-                       "0:\tcmpxchgw %w2, %1"                               \
++     __asm __volatile ("lock\n"                                               
\
++                       "\tcmpxchgw %w2, %1"                                 \
+                      : "=a" (ret), "=m" (*mem)                              \
+-                     : "r" (newval), "m" (*mem), "0" (oldval),              \
+-                       "i" (offsetof (tcbhead_t, multiple_threads)));       \
++                     : "r" (newval), "m" (*mem), "0" (oldval));             \
+      ret; })
+ 
+ #define __arch_c_compare_and_exchange_val_32_acq(mem, newval, oldval) \
+   ({ __typeof (*mem) ret;                                                   \
+-     __asm __volatile ("cmpl $0, %%gs:%P5\n\t"                                
\
+-                       "je 0f\n\t"                                            
\
+-                       "lock\n"                                               
\
+-                       "0:\tcmpxchgl %2, %1"                                \
++     __asm __volatile ("lock\n"                                               
\
++                       "\tcmpxchgl %2, %1"                                  \
+                      : "=a" (ret), "=m" (*mem)                              \
+-                     : "r" (newval), "m" (*mem), "0" (oldval),              \
+-                       "i" (offsetof (tcbhead_t, multiple_threads)));       \
++                     : "r" (newval), "m" (*mem), "0" (oldval));             \
+      ret; })
+ 
+ /* XXX We do not really need 64-bit compare-and-exchange.  At least
+@@ -148,10 +139,8 @@
+ #  define __arch_c_compare_and_exchange_val_64_acq(mem, newval, oldval) \
+   ({ __typeof (*mem) ret;                                                   \
+      __asm __volatile ("xchgl %2, %%ebx\n\t"                                \
+-                     "cmpl $0, %%gs:%P7\n\t"                                \
+-                     "je 0f\n\t"                                            \
+                      "lock\n"                                               \
+-                     "0:\tcmpxchg8b %1\n\t"                                 \
++                     "\tcmpxchg8b %1\n\t"                                   \
+                      "xchgl %2, %%ebx"                                      \
+                      : "=A" (ret), "=m" (*mem)                              \
+                      : "DS" (((unsigned long long int) (newval))            \
+@@ -159,8 +148,7 @@
+                        "c" (((unsigned long long int) (newval)) >> 32),     \
+                        "m" (*mem), "a" (((unsigned long long int) (oldval)) \
+                                         & 0xffffffff),                      \
+-                       "d" (((unsigned long long int) (oldval)) >> 32),     \
+-                       "i" (offsetof (tcbhead_t, multiple_threads)));       \
++                       "d" (((unsigned long long int) (oldval)) >> 32));    \
+      ret; })
+ # else
+ #  define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \
+@@ -177,18 +165,15 @@
+ 
+ #  define __arch_c_compare_and_exchange_val_64_acq(mem, newval, oldval) \
+   ({ __typeof (*mem) ret;                                                   \
+-     __asm __volatile ("cmpl $0, %%gs:%P7\n\t"                                
      \
+-                     "je 0f\n\t"                                            \
+-                     "lock\n"                                               \
+-                     "0:\tcmpxchg8b %1"                                     \
++     __asm __volatile ("lock\n"                                               
      \
++                     "\tcmpxchg8b %1"                               \
+                      : "=A" (ret), "=m" (*mem)                              \
+                      : "b" (((unsigned long long int) (newval))             \
+                             & 0xffffffff),                                  \
+                        "c" (((unsigned long long int) (newval)) >> 32),     \
+                        "m" (*mem), "a" (((unsigned long long int) (oldval)) \
+                                         & 0xffffffff),                      \
+-                       "d" (((unsigned long long int) (oldval)) >> 32),     \
+-                       "i" (offsetof (tcbhead_t, multiple_threads)));       \
++                       "d" (((unsigned long long int) (oldval)) >> 32));    \
+      ret; })
+ # endif
+ #endif
+@@ -223,18 +208,15 @@
+      if (sizeof (*mem) == 1)                                                \
+        __asm __volatile (lock "xaddb %b0, %1"                               \
+                        : "=q" (__result), "=m" (*mem)                       \
+-                       : "0" (__addval), "m" (*mem),                        \
+-                         "i" (offsetof (tcbhead_t, multiple_threads)));     \
++                       : "0" (__addval), "m" (*mem));                       \
+      else if (sizeof (*mem) == 2)                                           \
+        __asm __volatile (lock "xaddw %w0, %1"                               \
+                        : "=r" (__result), "=m" (*mem)                       \
+-                       : "0" (__addval), "m" (*mem),                        \
+-                         "i" (offsetof (tcbhead_t, multiple_threads)));     \
++                       : "0" (__addval), "m" (*mem));                       \
+      else if (sizeof (*mem) == 4)                                           \
+        __asm __volatile (lock "xaddl %0, %1"                                \
+                        : "=r" (__result), "=m" (*mem)                       \
+-                       : "0" (__addval), "m" (*mem),                        \
+-                         "i" (offsetof (tcbhead_t, multiple_threads)));     \
++                       : "0" (__addval), "m" (*mem));                       \
+      else                                                                   \
+        {                                                                    \
+        __typeof (mem) __memp = (mem);                                       \
+@@ -256,7 +238,7 @@
+ #endif
+ 
+ #define __arch_exchange_and_add_cprefix \
+-  "cmpl $0, %%gs:%P4\n\tje 0f\n\tlock\n0:\t"
++  "lock\n\t"
+ 
+ #define catomic_exchange_and_add(mem, value) \
+   __arch_exchange_and_add_body (__arch_exchange_and_add_cprefix, __arch_c,    
\
+@@ -272,18 +254,15 @@
+     else if (sizeof (*mem) == 1)                                            \
+       __asm __volatile (lock "addb %b1, %0"                                 \
+                       : "=m" (*mem)                                         \
+-                      : "iq" (value), "m" (*mem),                           \
+-                        "i" (offsetof (tcbhead_t, multiple_threads)));      \
++                      : "iq" (value), "m" (*mem));                          \
+     else if (sizeof (*mem) == 2)                                            \
+       __asm __volatile (lock "addw %w1, %0"                                 \
+                       : "=m" (*mem)                                         \
+-                      : "ir" (value), "m" (*mem),                           \
+-                        "i" (offsetof (tcbhead_t, multiple_threads)));      \
++                      : "ir" (value), "m" (*mem));                          \
+     else if (sizeof (*mem) == 4)                                            \
+       __asm __volatile (lock "addl %1, %0"                                  \
+                       : "=m" (*mem)                                         \
+-                      : "ir" (value), "m" (*mem),                           \
+-                        "i" (offsetof (tcbhead_t, multiple_threads)));      \
++                      : "ir" (value), "m" (*mem));                          \
+     else                                                                    \
+       {                                                                       
      \
+       __typeof (value) __addval = (value);                                  \
+@@ -301,7 +280,7 @@
+   __arch_add_body (LOCK_PREFIX, __arch, mem, value)
+ 
+ #define __arch_add_cprefix \
+-  "cmpl $0, %%gs:%P3\n\tje 0f\n\tlock\n0:\t"
++  "lock\n\t"
+ 
+ #define catomic_add(mem, value) \
+   __arch_add_body (__arch_add_cprefix, __arch_c, mem, value)
+@@ -350,18 +329,15 @@
+     if (sizeof (*mem) == 1)                                                 \
+       __asm __volatile (lock "incb %b0"                                       
      \
+                       : "=m" (*mem)                                         \
+-                      : "m" (*mem),                                         \
+-                        "i" (offsetof (tcbhead_t, multiple_threads)));      \
++                      : "m" (*mem));                                        \
+     else if (sizeof (*mem) == 2)                                            \
+       __asm __volatile (lock "incw %w0"                                       
      \
+                       : "=m" (*mem)                                         \
+-                      : "m" (*mem),                                         \
+-                        "i" (offsetof (tcbhead_t, multiple_threads)));      \
++                      : "m" (*mem));                                        \
+     else if (sizeof (*mem) == 4)                                            \
+       __asm __volatile (lock "incl %0"                                        
      \
+                       : "=m" (*mem)                                         \
+-                      : "m" (*mem),                                         \
+-                        "i" (offsetof (tcbhead_t, multiple_threads)));      \
++                      : "m" (*mem));                                        \
+     else                                                                    \
+       {                                                                       
      \
+       __typeof (mem) __memp = (mem);                                        \
+@@ -377,7 +353,7 @@
+ #define atomic_increment(mem) __arch_increment_body (LOCK_PREFIX, __arch, mem)
+ 
+ #define __arch_increment_cprefix \
+-  "cmpl $0, %%gs:%P2\n\tje 0f\n\tlock\n0:\t"
++  "lock\n\t"
+ 
+ #define catomic_increment(mem) \
+   __arch_increment_body (__arch_increment_cprefix, __arch_c, mem)
+@@ -407,18 +383,15 @@
+     if (sizeof (*mem) == 1)                                                 \
+       __asm __volatile (lock "decb %b0"                                       
      \
+                       : "=m" (*mem)                                         \
+-                      : "m" (*mem),                                         \
+-                        "i" (offsetof (tcbhead_t, multiple_threads)));      \
++                      : "m" (*mem));                                        \
+     else if (sizeof (*mem) == 2)                                            \
+       __asm __volatile (lock "decw %w0"                                       
      \
+                       : "=m" (*mem)                                         \
+-                      : "m" (*mem),                                         \
+-                        "i" (offsetof (tcbhead_t, multiple_threads)));      \
++                      : "m" (*mem));                                        \
+     else if (sizeof (*mem) == 4)                                            \
+       __asm __volatile (lock "decl %0"                                        
      \
+                       : "=m" (*mem)                                         \
+-                      : "m" (*mem),                                         \
+-                        "i" (offsetof (tcbhead_t, multiple_threads)));      \
++                      : "m" (*mem));                                        \
+     else                                                                    \
+       {                                                                       
      \
+       __typeof (mem) __memp = (mem);                                        \
+@@ -434,7 +407,7 @@
+ #define atomic_decrement(mem) __arch_decrement_body (LOCK_PREFIX, __arch, mem)
+ 
+ #define __arch_decrement_cprefix \
+-  "cmpl $0, %%gs:%P2\n\tje 0f\n\tlock\n0:\t"
++  "lock\n\t"
+ 
+ #define catomic_decrement(mem) \
+   __arch_decrement_body (__arch_decrement_cprefix, __arch_c, mem)
+@@ -524,18 +497,15 @@
+     if (sizeof (*mem) == 1)                                                 \
+       __asm __volatile (lock "orb %b1, %0"                                  \
+                       : "=m" (*mem)                                         \
+-                      : "iq" (mask), "m" (*mem),                            \
+-                        "i" (offsetof (tcbhead_t, multiple_threads)));      \
++                      : "iq" (mask), "m" (*mem));                           \
+     else if (sizeof (*mem) == 2)                                            \
+       __asm __volatile (lock "orw %w1, %0"                                  \
+                       : "=m" (*mem)                                         \
+-                      : "ir" (mask), "m" (*mem),                            \
+-                        "i" (offsetof (tcbhead_t, multiple_threads)));      \
++                      : "ir" (mask), "m" (*mem));                           \
+     else if (sizeof (*mem) == 4)                                            \
+       __asm __volatile (lock "orl %1, %0"                                   \
+                       : "=m" (*mem)                                         \
+-                      : "ir" (mask), "m" (*mem),                            \
+-                        "i" (offsetof (tcbhead_t, multiple_threads)));      \
++                      : "ir" (mask), "m" (*mem));                           \
+     else                                                                    \
+       abort ();                                                               
      \
+   } while (0)
+@@ -543,6 +513,6 @@
+ #define atomic_or(mem, mask) __arch_or_body (LOCK_PREFIX, mem, mask)
+ 
+ #define __arch_or_cprefix \
+-  "cmpl $0, %%gs:%P3\n\tje 0f\n\tlock\n0:\t"
++  "lock\n\t"
+ 
+ #define catomic_or(mem, mask) __arch_or_body (__arch_or_cprefix, mem, mask)

Added: glibc-package/trunk/debian/patches/hurd-i386/local-gscope.diff
===================================================================
--- glibc-package/trunk/debian/patches/hurd-i386/local-gscope.diff              
                (rev 0)
+++ glibc-package/trunk/debian/patches/hurd-i386/local-gscope.diff      
2007-07-21 00:27:17 UTC (rev 2425)
@@ -0,0 +1,466 @@
+mrlocks have gone from CVS head, so implementing them would be a loss of time,
+when backporting the GSCOPE_SET/RESET/WAIT can be actually done easily.
+
+diff -ur glibc-2.6-orig/elf/dl-close.c 
glibc-2.6/build-tree/glibc-2.6/elf/dl-close.c
+--- glibc-2.6-orig/elf/dl-close.c      2007-05-11 20:46:34.000000000 +0200
++++ glibc-2.6/elf/dl-close.c   2007-07-20 03:58:27.000000000 +0200
+@@ -32,6 +32,8 @@
+ #include <sys/mman.h>
+ #include <sysdep-cancel.h>
+ 
++#include "gscope.h"
++
+ 
+ /* Type of the constructor functions.  */
+ typedef void (*fini_t) (void);
+@@ -228,6 +230,7 @@
+   bool do_audit = GLRO(dl_naudit) > 0 && !ns->_ns_loaded->l_auditing;
+ #endif
+   bool unload_any = false;
++  bool scope_mem_left = false;
+   unsigned int first_loaded = ~0;
+   for (unsigned int i = 0; i < nloaded; ++i)
+     {
+@@ -400,18 +403,18 @@
+ 
+             struct r_scope_elem **old = imap->l_scope;
+ 
+-            if (RTLD_SINGLE_THREAD_P)
+-              imap->l_scope = newp;
+-            else
+-              {
+-                __rtld_mrlock_change (imap->l_scope_lock);
+-                imap->l_scope = newp;
+-                __rtld_mrlock_done (imap->l_scope_lock);
+-              }
++            imap->l_scope = newp;
+ 
+             /* No user anymore, we can free it now.  */
+             if (old != imap->l_scope_mem)
+-              free (old);
++              {
++                if (_dl_scope_free (old))
++                  /* If _dl_scope_free used THREAD_GSCOPE_WAIT (),
++                     no need to repeat it. */
++                  scope_mem_left = false;
++              }
++            else
++              scope_mem_left = true;
+ 
+             imap->l_scope_max = new_size;
+           }
+@@ -457,6 +460,20 @@
+   r->r_state = RT_DELETE;
+   _dl_debug_state ();
+ 
++  if (!RTLD_SINGLE_THREAD_P
++      && (scope_mem_left
++       || (GL(dl_scope_free_list) != NULL
++           && GL(dl_scope_free_list)->count)))
++    {
++      THREAD_GSCOPE_WAIT ();
++
++      /* Now we can free any queued old scopes.  */
++      struct dl_scope_free_list *fsl = GL(dl_scope_free_list);
++      if (fsl != NULL)
++        while (fsl->count > 0)
++        free (fsl->list[--fsl->count]);
++    }
++
+   size_t tls_free_start;
+   size_t tls_free_end;
+   tls_free_start = tls_free_end = NO_TLS_OFFSET;
+@@ -769,4 +786,8 @@
+          malloc), and in the static library it's in .bss space.  */
+       free_slotinfo (&GL(dl_tls_dtv_slotinfo_list)->next);
+     }
++
++  void *scope_free_list = GL(dl_scope_free_list);
++  GL(dl_scope_free_list) = NULL;
++  free(scope_free_list);
+ }
+diff -ur glibc-2.6-orig/elf/dl-lookup.c 
glibc-2.6/build-tree/glibc-2.6/elf/dl-lookup.c
+--- glibc-2.6-orig/elf/dl-lookup.c     2007-01-15 21:45:53.000000000 +0100
++++ glibc-2.6/elf/dl-lookup.c  2007-07-20 03:07:43.000000000 +0200
+@@ -86,7 +86,7 @@
+ /* Add extra dependency on MAP to UNDEF_MAP.  */
+ static int
+ internal_function
+-add_dependency (struct link_map *undef_map, struct link_map *map, int flags)
++add_dependency (struct link_map *undef_map, struct link_map *map)
+ {
+   struct link_map **list;
+   struct link_map *runp;
+@@ -99,18 +99,8 @@
+   if (undef_map == map)
+     return 0;
+ 
+-  /* Make sure nobody can unload the object while we are at it.
+-     If we hold a scope lock drop it now to avoid ABBA locking problems.  */
+-  if ((flags & DL_LOOKUP_SCOPE_LOCK) != 0 && !RTLD_SINGLE_THREAD_P)
+-    {
+-      __rtld_mrlock_unlock (undef_map->l_scope_lock);
+-
+-      __rtld_lock_lock_recursive (GL(dl_load_lock));
+-
+-      __rtld_mrlock_lock (undef_map->l_scope_lock);
+-    }
+-  else
+-    __rtld_lock_lock_recursive (GL(dl_load_lock));
++  /* Make sure nobody can unload the object while we are at it. */
++  __rtld_lock_lock_recursive (GL(dl_load_lock));
+ 
+   /* Avoid references to objects which cannot be unloaded anyway.  */
+   if (map->l_type != lt_loaded
+@@ -237,10 +227,9 @@
+ 
+   bump_num_relocations ();
+ 
+-  /* No other flag than DL_LOOKUP_ADD_DEPENDENCY and DL_LOOKUP_SCOPE_LOCK
+-     is allowed if we look up a versioned symbol.  */
+-  assert (version == NULL || (flags & ~(DL_LOOKUP_ADD_DEPENDENCY
+-                                      | DL_LOOKUP_SCOPE_LOCK)) == 0);
++  /* No other flag than DL_LOOKUP_ADD_DEPENDENCY is allowed if we look
++     up a versioned symbol.  */
++  assert (version == NULL || (flags & ~(DL_LOOKUP_ADD_DEPENDENCY)) == 0);
+ 
+   size_t i = 0;
+   if (__builtin_expect (skip_map != NULL, 0))
+@@ -350,13 +339,11 @@
+        runtime lookups.  */
+       && (flags & DL_LOOKUP_ADD_DEPENDENCY) != 0
+       /* Add UNDEF_MAP to the dependencies.  */
+-      && add_dependency (undef_map, current_value.m, flags) < 0)
++      && add_dependency (undef_map, current_value.m) < 0)
+       /* Something went wrong.  Perhaps the object we tried to reference
+        was just removed.  Try finding another definition.  */
+-      return _dl_lookup_symbol_x (undef_name, undef_map, ref,
+-                                (flags & DL_LOOKUP_SCOPE_LOCK) == 0
+-                                ? symbol_scope : undef_map->l_scope, version,
+-                                type_class, flags, skip_map);
++      return _dl_lookup_symbol_x (undef_name, undef_map, ref, symbol_scope,
++                                version, type_class, flags, skip_map);
+ 
+   /* The object is used.  */
+   current_value.m->l_used = 1;
+diff -ur glibc-2.6-orig/elf/dl-object.c 
glibc-2.6/build-tree/glibc-2.6/elf/dl-object.c
+--- glibc-2.6-orig/elf/dl-object.c     2006-10-28 01:11:41.000000000 +0200
++++ glibc-2.6/elf/dl-object.c  2007-07-20 03:07:17.000000000 +0200
+@@ -25,6 +25,8 @@
+ 
+ #include <assert.h>
+ 
++#include "gscope.h"
++
+ 
+ /* Allocate a `struct link_map' for a new object being loaded,
+    and enter it into the _dl_loaded list.  */
+@@ -85,11 +87,6 @@
+   new->l_scope = new->l_scope_mem;
+   new->l_scope_max = sizeof (new->l_scope_mem) / sizeof (new->l_scope_mem[0]);
+ 
+-  /* No need to initialize the scope lock if the initializer is zero.  */
+-#if _RTLD_MRLOCK_INITIALIZER != 0
+-  __rtld_mrlock_initialize (new->l_scope_lock);
+-#endif
+-
+   /* Counter for the scopes we have to handle.  */
+   idx = 0;
+ 
+diff -ur glibc-2.6-orig/elf/dl-open.c 
glibc-2.6/build-tree/glibc-2.6/elf/dl-open.c
+--- glibc-2.6-orig/elf/dl-open.c       2007-05-11 23:34:32.000000000 +0200
++++ glibc-2.6/elf/dl-open.c    2007-07-20 04:22:28.000000000 +0200
+@@ -32,9 +32,11 @@
+ #include <bp-sym.h>
+ #include <caller.h>
+ #include <sysdep-cancel.h>
++#include <atomic.h>
+ 
+ #include <dl-dst.h>
+ 
++#include "gscope.h"
+ 
+ extern ElfW(Addr) _dl_sysdep_start (void **start_argptr,
+                                   void (*dl_main) (const ElfW(Phdr) *phdr,
+@@ -154,6 +156,40 @@
+   return 0;
+ }
+ 
++int
++_dl_scope_free (struct r_scope_elem **old)
++{
++  struct dl_scope_free_list *fsl;
++#define DL_SCOPE_FREE_LIST_SIZE (sizeof (fsl->list) / sizeof (fsl->list[0]))
++
++  if (RTLD_SINGLE_THREAD_P)
++    free (old);
++  else if ((fsl = GL(dl_scope_free_list)) == NULL)
++    {
++      GL(dl_scope_free_list) = fsl = malloc (sizeof (*fsl));
++      if (fsl == NULL)
++      {
++        THREAD_GSCOPE_WAIT ();
++        free (old);
++        return 1;
++      }
++      else
++      {
++        fsl->list[0] = old;
++        fsl->count = 1;
++      }
++    }
++  else if (fsl->count < DL_SCOPE_FREE_LIST_SIZE)
++    fsl->list[fsl->count++] = old;
++  else
++    {
++      THREAD_GSCOPE_WAIT ();
++      while (fsl->count > 0)
++      free (fsl->list[--fsl->count]);
++      return 1;
++    }
++  return 0;
++}
+ 
+ static void
+ dl_open_worker (void *a)
+@@ -418,17 +454,10 @@
+             memcpy (newp, imap->l_scope, cnt * sizeof (imap->l_scope[0]));
+             struct r_scope_elem **old = imap->l_scope;
+ 
+-            if (RTLD_SINGLE_THREAD_P)
+-              imap->l_scope = newp;
+-            else
+-              {
+-                __rtld_mrlock_change (imap->l_scope_lock);
+-                imap->l_scope = newp;
+-                __rtld_mrlock_done (imap->l_scope_lock);
+-              }
++            imap->l_scope = newp;
+ 
+             if (old != imap->l_scope_mem)
+-              free (old);
++              _dl_scope_free (old);
+ 
+             imap->l_scope_max = new_size;
+           }
+diff -ur glibc-2.6-orig/elf/dl-runtime.c 
glibc-2.6/build-tree/glibc-2.6/elf/dl-runtime.c
+--- glibc-2.6-orig/elf/dl-runtime.c    2007-01-15 21:46:54.000000000 +0100
++++ glibc-2.6/elf/dl-runtime.c 2007-07-20 03:07:14.000000000 +0200
+@@ -27,6 +27,8 @@
+ #include <sysdep-cancel.h>
+ #include "dynamic-link.h"
+ 
++#include "gscope.h"
++
+ #if (!defined ELF_MACHINE_NO_RELA && !defined ELF_MACHINE_PLT_REL) \
+     || ELF_MACHINE_NO_REL
+ # define PLTREL  ElfW(Rela)
+@@ -97,17 +99,15 @@
+        not necessary for objects which cannot be unloaded or when
+        we are not using any threads (yet).  */
+       int flags = DL_LOOKUP_ADD_DEPENDENCY;
+-      if (l->l_type == lt_loaded && !RTLD_SINGLE_THREAD_P)
+-      {
+-        __rtld_mrlock_lock (l->l_scope_lock);
+-        flags |= DL_LOOKUP_SCOPE_LOCK;
+-      }
++      if (!RTLD_SINGLE_THREAD_P)
++      THREAD_GSCOPE_SET_FLAG ();
+ 
+       result = _dl_lookup_symbol_x (strtab + sym->st_name, l, &sym, 
l->l_scope,
+                                   version, ELF_RTYPE_CLASS_PLT, flags, NULL);
+ 
+-      if ((flags & DL_LOOKUP_SCOPE_LOCK) != 0)
+-      __rtld_mrlock_unlock (l->l_scope_lock);
++      /* We are done with the global scope.  */
++      if (!RTLD_SINGLE_THREAD_P)
++      THREAD_GSCOPE_RESET_FLAG ();
+ 
+       /* Currently result contains the base load address (or link map)
+        of the object that defines sym.  Now add in the symbol
+@@ -191,18 +191,16 @@
+            not necessary for objects which cannot be unloaded or when
+            we are not using any threads (yet).  */
+         int flags = DL_LOOKUP_ADD_DEPENDENCY;
+-        if (l->l_type == lt_loaded && !RTLD_SINGLE_THREAD_P)
+-          {
+-            __rtld_mrlock_lock (l->l_scope_lock);
+-            flags |= DL_LOOKUP_SCOPE_LOCK;
+-          }
++        if (!RTLD_SINGLE_THREAD_P)
++          THREAD_GSCOPE_SET_FLAG ();
+ 
+         result = _dl_lookup_symbol_x (strtab + refsym->st_name, l,
+                                       &defsym, l->l_scope, version,
+                                       ELF_RTYPE_CLASS_PLT, flags, NULL);
+ 
+-        if ((flags & DL_LOOKUP_SCOPE_LOCK) != 0)
+-          __rtld_mrlock_unlock (l->l_scope_lock);
++        /* We are done with the global scope.  */
++        if (!RTLD_SINGLE_THREAD_P)
++          THREAD_GSCOPE_RESET_FLAG ();
+ 
+         /* Currently result contains the base load address (or link map)
+            of the object that defines sym.  Now add in the symbol
+diff -ur glibc-2.6-orig/elf/dl-support.c 
glibc-2.6/build-tree/glibc-2.6/elf/dl-support.c
+--- glibc-2.6-orig/elf/dl-support.c    2007-07-19 23:45:07.000000000 +0200
++++ glibc-2.6/elf/dl-support.c 2007-07-20 04:33:59.000000000 +0200
+@@ -33,6 +33,8 @@
+ #include <unsecvars.h>
+ #include <hp-timing.h>
+ 
++#include "gscope.h"
++
+ extern char *__progname;
+ char **_dl_argv = &__progname;        /* This is checked for some error 
messages.  */
+ 
+@@ -137,6 +139,8 @@
+ int (*_dl_make_stack_executable_hook) (void **) internal_function
+   = _dl_make_stack_executable;
+ 
++int volatile _dl_thread_gscope_count;
++struct dl_scope_free_list *_dl_scope_free_list;
+ 
+ #ifdef NEED_DL_SYSINFO
+ /* Needed for improved syscall handling on at least x86/Linux.  */
+diff -ur glibc-2.6-orig/elf/dl-sym.c 
glibc-2.6/build-tree/glibc-2.6/elf/dl-sym.c
+--- glibc-2.6-orig/elf/dl-sym.c        2007-01-15 21:47:44.000000000 +0100
++++ glibc-2.6/elf/dl-sym.c     2007-07-20 03:07:15.000000000 +0200
+@@ -28,6 +28,8 @@
+ #include <sysdep-cancel.h>
+ #include <dl-tls.h>
+ 
++#include "gscope.h"
++
+ 
+ #ifdef SHARED
+ /* Systems which do not have tls_index also probably have to define
+@@ -113,29 +115,29 @@
+        the initial binary.  And then the more complex part
+        where the object is dynamically loaded and the scope
+        array can change.  */
+-      if (match->l_type != lt_loaded || RTLD_SINGLE_THREAD_P)
++      if (RTLD_SINGLE_THREAD_P)
+       result = GLRO(dl_lookup_symbol_x) (name, match, &ref,
+                                          match->l_scope, vers, 0,
+                                          flags | DL_LOOKUP_ADD_DEPENDENCY,
+                                          NULL);
+       else
+       {
+-        __rtld_mrlock_lock (match->l_scope_lock);
+-
+         struct call_dl_lookup_args args;
+         args.name = name;
+         args.map = match;
+         args.vers = vers;
+-        args.flags = flags | DL_LOOKUP_ADD_DEPENDENCY | DL_LOOKUP_SCOPE_LOCK;
++        args.flags = flags | DL_LOOKUP_ADD_DEPENDENCY;
+         args.refp = &ref;
+ 
++        THREAD_GSCOPE_SET_FLAG ();
++
+         const char *objname;
+         const char *errstring = NULL;
+         bool malloced;
+         int err = GLRO(dl_catch_error) (&objname, &errstring, &malloced,
+                                         call_dl_lookup, &args);
+ 
+-        __rtld_mrlock_unlock (match->l_scope_lock);
++        THREAD_GSCOPE_RESET_FLAG ();
+ 
+         if (__builtin_expect (errstring != NULL, 0))
+           {
+diff -ur glibc-2.6-orig/sysdeps/generic/ldsodefs.h 
glibc-2.6/build-tree/glibc-2.6/sysdeps/generic/ldsodefs.h
+--- glibc-2.6-orig/sysdeps/generic/ldsodefs.h  2007-07-19 23:45:14.000000000 
+0200
++++ glibc-2.6/sysdeps/generic/ldsodefs.h       2007-07-20 04:41:36.000000000 
+0200
+@@ -38,7 +38,6 @@
+ #include <bits/libc-lock.h>
+ #include <hp-timing.h>
+ #include <tls.h>
+-#include <rtld-lowlevel.h>
+ 
+ __BEGIN_DECLS
+ 
+@@ -486,6 +485,12 @@
+ 
+   EXTERN void (*_dl_init_static_tls) (struct link_map *);
+ 
++  EXTERN struct dl_scope_free_list
++  {
++    size_t count;
++    struct r_scope_elem **list[50];
++  } *_dl_scope_free_list;
++  EXTERN volatile int _dl_thread_gscope_count;
+ #ifdef SHARED
+ };
+ # define __rtld_global_attribute__
+@@ -838,9 +843,7 @@
+     DL_LOOKUP_ADD_DEPENDENCY = 1,
+     /* Return most recent version instead of default version for
+        unversioned lookup.  */
+-    DL_LOOKUP_RETURN_NEWEST = 2,
+-    /* Set if the scopr lock in the UNDEF_MAP is taken.  */
+-    DL_LOOKUP_SCOPE_LOCK = 4
++    DL_LOOKUP_RETURN_NEWEST = 2
+   };
+ 
+ /* Lookup versioned symbol.  */
+@@ -1048,6 +1051,11 @@
+                      Lmid_t nsid, int argc, char *argv[], char *env[])
+      attribute_hidden;
+ 
++/* Free or queue for freeing scope OLD.  If other threads might be 
++   in the middle of _dl_fixup, _dl_profile_fixup or dl*sym using the 
++   old scope, OLD can't be freed until no thread is using it.  */ 
++extern int _dl_scope_free (struct r_scope_elem **old) attribute_hidden; 
++
+ /* Add module to slot information data.  */
+ extern void _dl_add_to_slotinfo (struct link_map  *l) attribute_hidden;
+ 
+--- glibc-2.6-orig/include/link.h      2007-05-11 06:38:05.000000000 +0000
++++ glibc-2.6/include/link.h   2007-07-20 17:14:35.000000000 +0000
+@@ -44,7 +44,6 @@
+ #include <dl-lookupcfg.h>
+ #include <tls.h>
+ #include <bits/libc-lock.h>
+-#include <rtld-lowlevel.h>
+ 
+ 
+ /* Some internal data structures of the dynamic linker used in the
+@@ -220,8 +219,6 @@
+     /* This is an array defining the lookup scope for this link map.
+        There are initially at most three different scope lists.  */
+     struct r_scope_elem **l_scope;
+-    /* We need to protect using the SCOPEREC.  */
+-    __rtld_mrlock_define (, l_scope_lock)
+ 
+     /* A similar array, this time only with the local scope.  This is
+        used occasionally.  */
+--- /dev/null  2005-07-17 23:39:37.000000000 +0000
++++ glibc-2.6/elf/gscope.h     2007-07-19 23:00:57.000000000 +0000
+@@ -0,0 +1,14 @@
++#ifndef _GSCOPE_H
++#define _GSCOPE_H 1
++#include <mach/mach_traps.h>
++/* Temporary poor-man's global scope switch support: just busy-waits */
++#define THREAD_GSCOPE_SET_FLAG() \
++      asm volatile ("lock incl %0":"=m"(GL(dl_thread_gscope_count)))
++#define THREAD_GSCOPE_RESET_FLAG() \
++      asm volatile ("lock decl %0":"=m"(GL(dl_thread_gscope_count)))
++#define THREAD_GSCOPE_WAIT() \
++  while (GL(dl_thread_gscope_count)) { \
++    __swtch_pri (0); \
++  }
++#endif
+--- /dev/null  2007-07-20 20:36:08.980000000 +0200
++++ glibc-2.6/sysdeps/mach/hurd/sysdep-cancel.h        2007-07-20 
01:24:22.000000000 +0200
+@@ -0,0 +1,9 @@
++#include <sysdep.h>
++
++/* Always multi-thread (since there's at least the sig handler), but no
++   handling enabled.  */
++#define SINGLE_THREAD_P (0)
++#define RTLD_SINGLE_THREAD_P (0)
++#define LIBC_CANCEL_ASYNC()   0 /* Just a dummy value.  */
++#define LIBC_CANCEL_RESET(val)        ((void)(val)) /* Nothing, but evaluate 
it.  */
++#define LIBC_CANCEL_HANDLED() /* Nothing.  */

Added: glibc-package/trunk/debian/patches/hurd-i386/local-no-strerror_l.diff
===================================================================
--- glibc-package/trunk/debian/patches/hurd-i386/local-no-strerror_l.diff       
                        (rev 0)
+++ glibc-package/trunk/debian/patches/hurd-i386/local-no-strerror_l.diff       
2007-07-21 00:27:17 UTC (rev 2425)
@@ -0,0 +1,32 @@
+Needs porting. Since this is new with 2.6, just disable for now, and please
+someone take the time to port it.
+
+diff -ur glibc-2.6-orig/string/strerror_l.c 
glibc-2.6/build-tree/glibc-2.6/string/strerror_l.c
+--- glibc-2.6-orig/string/strerror_l.c 2007-03-17 18:09:07.000000000 +0100
++++ glibc-2.6/string/strerror_l.c      2007-07-20 05:05:40.000000000 +0200
+@@ -23,6 +23,8 @@
+ #include <string.h>
+ #include <sys/param.h>
+ 
++#if 0
++Needs porting
+ 
+ static __thread char *last_value;
+ 
+@@ -70,3 +72,4 @@
+ text_set_element (__libc_subfreeres, strerror_thread_freeres);
+ # endif
+ #endif
++#endif
+--- glibc-2.6-orig/string/string.h     2007-02-01 16:08:52.000000000 +0000
++++ glibc-2.6/string/string.h  2007-07-20 23:50:59.000000000 +0000
+@@ -285,7 +285,8 @@
+ 
+ #ifdef __USE_GNU
+ /* Translate error number to string according to the locale L.  */
+-extern char *strerror_l (int __errnum, __locale_t __l) __THROW;
++/* Not yet ported on the Hurd.  */
++/* extern char *strerror_l (int __errnum, __locale_t __l) __THROW; */
+ #endif
+ 
+ 

Added: glibc-package/trunk/debian/patches/hurd-i386/submitted-lock-intern.diff
===================================================================
--- glibc-package/trunk/debian/patches/hurd-i386/submitted-lock-intern.diff     
                        (rev 0)
+++ glibc-package/trunk/debian/patches/hurd-i386/submitted-lock-intern.diff     
2007-07-21 00:27:17 UTC (rev 2425)
@@ -0,0 +1,11 @@
+diff -ur glibc-2.6-orig/mach/lock-intern.h 
glibc-2.6/build-tree/glibc-2.6/mach/lock-intern.h
+--- glibc-2.6-orig/mach/lock-intern.h  2007-06-10 21:38:14.000000000 +0000
++++ glibc-2.6/mach/lock-intern.h       2007-06-10 21:40:47.000000000 +0000
+@@ -19,6 +19,7 @@
+ #ifndef _LOCK_INTERN_H
+ #define       _LOCK_INTERN_H
+ 
++#include <sys/cdefs.h>
+ #include <machine-lock.h>
+ 
+ #ifndef _EXTERN_INLINE

Modified: glibc-package/trunk/debian/patches/series
===================================================================
--- glibc-package/trunk/debian/patches/series   2007-07-20 23:40:55 UTC (rev 
2424)
+++ glibc-package/trunk/debian/patches/series   2007-07-21 00:27:17 UTC (rev 
2425)
@@ -68,6 +68,7 @@
 hurd-i386/submitted-sysvshm.diff 
 hurd-i386/submitted-trivial.diff -p0
 hurd-i386/submitted-ioctl-unsigned-size_t.diff -p0
+hurd-i386/submitted-lock-intern.diff
 
 i386/local-biarch.diff 
 i386/local-cmov.diff -p0

Added: glibc-package/trunk/debian/patches/series.hurd-i386
===================================================================
--- glibc-package/trunk/debian/patches/series.hurd-i386                         
(rev 0)
+++ glibc-package/trunk/debian/patches/series.hurd-i386 2007-07-21 00:27:17 UTC 
(rev 2425)
@@ -0,0 +1,3 @@
+hurd-i386/local-gscope.diff
+hurd-i386/local-atomic-no-multiple_threads.diff
+hurd-i386/local-no-strerror_l.diff

Modified: glibc-package/trunk/debian/sysdeps/depflags.pl
===================================================================
--- glibc-package/trunk/debian/sysdeps/depflags.pl      2007-07-20 23:40:55 UTC 
(rev 2424)
+++ glibc-package/trunk/debian/sysdeps/depflags.pl      2007-07-21 00:27:17 UTC 
(rev 2425)
@@ -24,7 +24,7 @@
     push @{$libc_dev_c{'Conflicts'}}, 'glibc2-dev';
     push @{$libc_c{'Replaces'}}, 'glibc2';
     push @{$libc_c{'Conflicts'}}, 'glibc2';
-    push @{$libc_c{'Depends'}}, 'hurd (>= 20010718-1)';
+    push @{$libc_c{'Depends'}}, 'hurd (>= 20070606-1+SVN)';
 }
 if ($DEB_HOST_ARCH_OS eq "linux") {
     push @{$libc_c{'Suggests'}}, 'locales';


-- 
To UNSUBSCRIBE, email to [EMAIL PROTECTED]
with a subject of "unsubscribe". Trouble? Contact [EMAIL PROTECTED]

Reply via email to