Instead of maintaining a pointer to the `sos_memory' array, maintain an
index that tells the next free position. When atomic operations are
available, the allocation boils down to a single fetch-and-add
operation.
---
include/libunwind_i.h | 6 ++++--
src/mi/flush_cache.c | 2 +-
src/mi/mempool.c | 35 ++++++++++-------------------------
3 files changed, 15 insertions(+), 28 deletions(-)
diff --git a/include/libunwind_i.h b/include/libunwind_i.h
index 91439b1..493cb12 100644
--- a/include/libunwind_i.h
+++ b/include/libunwind_i.h
@@ -141,11 +141,12 @@ cmpxchg_ptr (void *addr, void *old, void *new)
return AO_compare_and_swap(u.aop, (AO_t) old, (AO_t) new);
}
# define fetch_and_add1(_ptr) AO_fetch_and_add1(_ptr)
+# define fetch_and_add(_ptr, value) AO_fetch_and_add(_ptr, value)
/* GCC 3.2.0 on HP-UX crashes on cmpxchg_ptr() */
# if !(defined(__hpux) && __GNUC__ == 3 && __GNUC_MINOR__ == 2)
# define HAVE_CMPXCHG
# endif
-# define HAVE_FETCH_AND_ADD1
+# define HAVE_FETCH_AND_ADD
#else
# ifdef HAVE_IA64INTRIN_H
# include <ia64intrin.h>
@@ -163,8 +164,9 @@ cmpxchg_ptr (void *addr, void *old, void *new)
return __sync_bool_compare_and_swap(u.vlp, (long) old, (long) new);
}
# define fetch_and_add1(_ptr) __sync_fetch_and_add(_ptr, 1)
+# define fetch_and_add(_ptr, value) __sync_fetch_and_add(_ptr, value)
# define HAVE_CMPXCHG
-# define HAVE_FETCH_AND_ADD1
+# define HAVE_FETCH_AND_ADD
# endif
#endif
#define atomic_read(ptr) (*(ptr))
diff --git a/src/mi/flush_cache.c b/src/mi/flush_cache.c
index c5650ba..2e88fa8 100644
--- a/src/mi/flush_cache.c
+++ b/src/mi/flush_cache.c
@@ -50,7 +50,7 @@ unw_flush_cache (unw_addr_space_t as, unw_word_t lo,
unw_word_t hi)
unw_flush_cache() is allowed to flush more than the requested
range. */
-#ifdef HAVE_FETCH_AND_ADD1
+#ifdef HAVE_FETCH_AND_ADD
fetch_and_add1 (&as->cache_generation);
#else
# warning unw_flush_cache(): need a way to atomically increment an integer.
diff --git a/src/mi/mempool.c b/src/mi/mempool.c
index f00e29b..adef6c5 100644
--- a/src/mi/mempool.c
+++ b/src/mi/mempool.c
@@ -40,48 +40,33 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE. */
#endif
static char sos_memory[SOS_MEMORY_SIZE] __attribute__((aligned(MAX_ALIGN)));
-static char *sos_memp;
+static size_t sos_memory_freepos;
static size_t pg_size;
HIDDEN void *
sos_alloc (size_t size)
{
- char *mem;
-
-#ifdef HAVE_CMPXCHG
- char *old_mem;
+ size_t pos;
size = UNW_ALIGN(size, MAX_ALIGN);
- if (!sos_memp)
- cmpxchg_ptr (&sos_memp, 0, sos_memory);
- do
- {
- old_mem = sos_memp;
- mem = (char *) UNW_ALIGN((unsigned long) old_mem, MAX_ALIGN);
- mem += size;
- assert (mem < sos_memory + sizeof (sos_memory));
- }
- while (!cmpxchg_ptr (&sos_memp, old_mem, mem));
+#ifdef HAVE_FETCH_AND_ADD
+ pos = fetch_and_add (&sos_memory_freepos, size);
#else
static define_lock (sos_lock);
intrmask_t saved_mask;
- size = UNW_ALIGN(size, MAX_ALIGN);
-
lock_acquire (&sos_lock, saved_mask);
{
- if (!sos_memp)
- sos_memp = sos_memory;
-
- mem = (char *) UNW_ALIGN((unsigned long) sos_memp, MAX_ALIGN);
- mem += size;
- assert (mem < sos_memory + sizeof (sos_memory));
- sos_memp = mem;
+ pos = sos_memory_freepos;
+ sos_memory_freepos += size;
}
lock_release (&sos_lock, saved_mask);
#endif
- return mem;
+
+ assert ((pos+size) <= SOS_MEMORY_SIZE);
+
+ return &sos_memory[pos];
}
/* Must be called while holding the mempool lock. */
--
1.7.9.5
_______________________________________________
Libunwind-devel mailing list
[email protected]
https://lists.nongnu.org/mailman/listinfo/libunwind-devel