raster pushed a commit to branch master.

http://git.enlightenment.org/core/efl.git/commit/?id=7787b16e20bef12967b8a6bd74d6d8e6516a8066

commit 7787b16e20bef12967b8a6bd74d6d8e6516a8066
Author: Carsten Haitzler (Rasterman) <ras...@rasterman.com>
Date:   Tue Dec 3 09:18:35 2019 +0000

    eina - threadqueue - revert series of comments that moved to mempools
    
    Revert "eina: remove no longer used function 
_eina_thread_queue_msg_block_real_free"
    This reverts commit 695b44526c968787374fd421327422a6eea710a7.
    
    Revert "eina/threadqueue: use mempool_del for hash free function"
    This reverts commit b0cb3b935a8faf2d67bae38a54683946cb01d0b9.
    
    Revert "eina_thread_queue: use normal mempools for block allocation"
    This reverts commit 14ae3e3dec7866e74f2990dca417eac44da41058.
    
    Why? Threadqueue is a highly performance sensitive API.
    _eina_thread_queue_msg_block_new() may be called quite often. Doing a
    hash lookup to then find a mempool handle to then allocate from was
    not the same as what was there and was going to be far more costly.
    This would have actual performance impact as we have to compute a hash
    and rummage through a hash, hunt for an environment var too. The
    original code looked at a spare block pool where blocks *MAY* be of
    different sizes (not always the same size so using a mempool is
    actually wrong and will stop threadqueue from being able to send
    larger messages at all). If you send large messages, larger blocks would
    have been allocated and put in this pool. In almost all cases the first
    item in the pool would be big enough so we don't hunt and the find pulls
    out the first memory, resets the fields that are needed and returns that
    block. If it needs a bigger one, it does hunt. This is going to be
    rare that such big blocks are needed so I never tried to optimize this
    (but it could be done with an array of sizes to make a walk to find
    the right sized element cheap if the need arises).
    
    Performance dropped quite a lot. On aarch64 The above mempool usage
    dropped message rate from 1037251 msg/sec to 610316. On x86 it was even
    worse. It dropped from 2815775 msg/sec to 378653.
    
    So backing this out sees the message rate is 7.4 times faster and on
    aarch64 it's 1.7 times faster.
    
    So moving to a mempool was actually just wrong (size is not always the
    same). Also this ended up with a mempool of 64k for thread queue blocks even
    if we only sent messages sporadically, as opposed to a single 4kb
    block. So backing this out saves memory by only having 1 or 2 4k blocks
    around most of the time, not a 64k mempool.
    
    So the above patch then follow-on patches were done without accounting
    for the performance implications. There were good reasons to do what I
    did - because this code was highly tuned even to the point where I
    used atomics instead of locks specifically to cut down some contention
    overhead. Beware when you change something that there may be steep
    performance implications. 7.4 times faster to go back to what was
    there is a great example.
---
 src/lib/eina/eina_thread_queue.c | 103 +++++++++++++++++++++++++++++----------
 1 file changed, 76 insertions(+), 27 deletions(-)

diff --git a/src/lib/eina/eina_thread_queue.c b/src/lib/eina/eina_thread_queue.c
index d6ba62df26..531800dac7 100644
--- a/src/lib/eina/eina_thread_queue.c
+++ b/src/lib/eina/eina_thread_queue.c
@@ -74,7 +74,9 @@ struct _Eina_Thread_Queue_Msg_Block
 // avoid reallocation via malloc/free etc. to avoid free memory pages and
 // pressure on the malloc subsystem
 static int _eina_thread_queue_log_dom = -1;
+static int _eina_thread_queue_block_pool_count = 0;
 static Eina_Spinlock _eina_thread_queue_block_pool_lock;
+static Eina_Thread_Queue_Msg_Block *_eina_thread_queue_block_pool = NULL;
 
 #ifdef ERR
 # undef ERR
@@ -86,51 +88,57 @@ static Eina_Spinlock _eina_thread_queue_block_pool_lock;
 #endif
 #define DBG(...) EINA_LOG_DOM_DBG(_eina_thread_queue_log_dom, __VA_ARGS__)
 
-static Eina_Hash *mempools;
-
 // api's to get message blocks from the pool or put them back in
 static Eina_Thread_Queue_Msg_Block *
 _eina_thread_queue_msg_block_new(int size)
 {
    Eina_Thread_Queue_Msg_Block *blk;
-   Eina_Mempool *mp;
-   size_t mp_size = sizeof(Eina_Thread_Queue_Msg_Block) - 
sizeof(Eina_Thread_Queue_Msg) + size;
 
    eina_spinlock_take(&(_eina_thread_queue_block_pool_lock));
-   mp = eina_hash_find(mempools, &size);
-   if (!mp)
+   if (_eina_thread_queue_block_pool)
      {
-        const char *choice = getenv("EINA_MEMPOOL");
-        if ((!choice) || (!choice[0]))
-          choice = "chained_mempool";
-        mp = eina_mempool_add(choice, "Eina_Thread_Queue_Msg_Block", NULL, 
mp_size, 16);
-        eina_hash_add(mempools, &size, mp);
+        blk = _eina_thread_queue_block_pool;
+        if (blk->size >= size)
+          {
+             blk->first = 0;
+             blk->last = 0;
+             blk->ref = 0;
+             blk->full = 0;
+             _eina_thread_queue_block_pool = blk->next;
+             blk->next = NULL;
+             _eina_thread_queue_block_pool_count--;
+             eina_spinlock_release(&(_eina_thread_queue_block_pool_lock));
+             return blk;
+          }
+        blk = NULL;
      }
    eina_spinlock_release(&(_eina_thread_queue_block_pool_lock));
 
-   blk = eina_mempool_calloc(mp, mp_size);
+   blk = malloc(sizeof(Eina_Thread_Queue_Msg_Block) -
+                sizeof(Eina_Thread_Queue_Msg) +
+                size);
    if (!blk)
      {
         ERR("Thread queue block buffer of size %i allocation failed", size);
         return NULL;
      }
+   blk->next = NULL;
 #ifndef ATOMIC
    eina_spinlock_new(&(blk->lock_ref));
    eina_spinlock_new(&(blk->lock_first));
 #endif
    eina_lock_new(&(blk->lock_non_0_ref));
    blk->size = size;
+   blk->first = 0;
+   blk->last = 0;
+   blk->ref = 0;
+   blk->full = 0;
    return blk;
 }
 
 static void
-_eina_thread_queue_msg_block_free(Eina_Thread_Queue_Msg_Block *blk)
+_eina_thread_queue_msg_block_real_free(Eina_Thread_Queue_Msg_Block *blk)
 {
-   Eina_Mempool *mp;
-
-   eina_spinlock_take(&(_eina_thread_queue_block_pool_lock));
-   mp = eina_hash_find(mempools, &blk->size);
-   eina_spinlock_release(&(_eina_thread_queue_block_pool_lock));
    eina_lock_take(&(blk->lock_non_0_ref));
    eina_lock_release(&(blk->lock_non_0_ref));
    eina_lock_free(&(blk->lock_non_0_ref));
@@ -142,7 +150,29 @@ 
_eina_thread_queue_msg_block_free(Eina_Thread_Queue_Msg_Block *blk)
    eina_spinlock_release(&(blk->lock_first));
    eina_spinlock_free(&(blk->lock_first));
 #endif
-   eina_mempool_free(mp, blk);
+   free(blk);
+}
+
+static void
+_eina_thread_queue_msg_block_free(Eina_Thread_Queue_Msg_Block *blk)
+{
+   if (blk->size == MIN_SIZE)
+     {
+        eina_spinlock_take(&(_eina_thread_queue_block_pool_lock));
+        if (_eina_thread_queue_block_pool_count < 20)
+          {
+             _eina_thread_queue_block_pool_count++;
+             blk->next = _eina_thread_queue_block_pool;
+             _eina_thread_queue_block_pool = blk;
+             eina_spinlock_release(&(_eina_thread_queue_block_pool_lock));
+          }
+        else
+          {
+             eina_spinlock_release(&(_eina_thread_queue_block_pool_lock));
+             _eina_thread_queue_msg_block_real_free(blk);
+          }
+     }
+   else _eina_thread_queue_msg_block_real_free(blk);
 }
 
 static Eina_Bool
@@ -154,6 +184,21 @@ _eina_thread_queue_msg_block_pool_init(void)
 static void
 _eina_thread_queue_msg_block_pool_shutdown(void)
 {
+   eina_spinlock_take(&(_eina_thread_queue_block_pool_lock));
+   while (_eina_thread_queue_block_pool)
+     {
+        Eina_Thread_Queue_Msg_Block *blk, *blknext;
+
+        for (;;)
+          {
+             blk = _eina_thread_queue_block_pool;
+             if (!blk) break;
+             blknext = blk->next;
+             _eina_thread_queue_msg_block_real_free(blk);
+             _eina_thread_queue_block_pool = blknext;
+          }
+     }
+   eina_spinlock_release(&(_eina_thread_queue_block_pool_lock));
    eina_spinlock_free(&_eina_thread_queue_block_pool_lock);
 }
 
@@ -186,15 +231,19 @@ _eina_thread_queue_msg_alloc(Eina_Thread_Queue *thq, int 
size, Eina_Thread_Queue
    size = ((size + 7) >> 3) << 3;
    if (!thq->data)
      {
-        size = MAX(size, MIN_SIZE);
-        thq->data = _eina_thread_queue_msg_block_new(size);
+        if (size < MIN_SIZE)
+          thq->data = _eina_thread_queue_msg_block_new(MIN_SIZE);
+        else
+          thq->data = _eina_thread_queue_msg_block_new(size);
         thq->last = thq->data;
      }
    blk = thq->last;
    if (blk->full)
      {
-        size = MAX(size, MIN_SIZE);
-        blk->next = _eina_thread_queue_msg_block_new(size);
+        if (size < MIN_SIZE)
+          blk->next = _eina_thread_queue_msg_block_new(MIN_SIZE);
+        else
+          blk->next = _eina_thread_queue_msg_block_new(size);
         blk = blk->next;
         thq->last = blk;
      }
@@ -206,8 +255,10 @@ _eina_thread_queue_msg_alloc(Eina_Thread_Queue *thq, int 
size, Eina_Thread_Queue
      }
    else
      {
-        size = MAX(size, MIN_SIZE);
-        blk->next = _eina_thread_queue_msg_block_new(size);
+        if (size < MIN_SIZE)
+          blk->next = _eina_thread_queue_msg_block_new(MIN_SIZE);
+        else
+          blk->next = _eina_thread_queue_msg_block_new(size);
         blk = blk->next;
         thq->last = blk;
         blk->last += size;
@@ -335,7 +386,6 @@ eina_thread_queue_init(void)
         ERR("Cannot init thread queue block pool spinlock");
         return EINA_FALSE;
      }
-   mempools = eina_hash_int32_new((Eina_Free_Cb)eina_mempool_del);
    return EINA_TRUE;
 }
 
@@ -344,7 +394,6 @@ eina_thread_queue_shutdown(void)
 {
    _eina_thread_queue_msg_block_pool_shutdown();
    eina_log_domain_unregister(_eina_thread_queue_log_dom);
-   eina_hash_free(mempools);
    return EINA_TRUE;
 }
 

-- 


Reply via email to