To avoid excessive memory overhead for pools, defer the allocation of
the pool ring until odp_pool_create() is called. This keeps pool memory
overhead proportional to the number of pools actually in use rather
than the architected maximum number of pools.

This patch addresses Bug https://bugs.linaro.org/show_bug.cgi?id=2765

Signed-off-by: Bill Fischofer <bill.fischo...@linaro.org>
---
 platform/linux-generic/include/odp_pool_internal.h |  3 ++-
 platform/linux-generic/odp_pool.c                  | 23 +++++++++++++++++-----
 2 files changed, 20 insertions(+), 6 deletions(-)

diff --git a/platform/linux-generic/include/odp_pool_internal.h 
b/platform/linux-generic/include/odp_pool_internal.h
index 5d7b817..4915bda 100644
--- a/platform/linux-generic/include/odp_pool_internal.h
+++ b/platform/linux-generic/include/odp_pool_internal.h
@@ -69,7 +69,8 @@ typedef struct pool_t {
 
        pool_cache_t     local_cache[ODP_THREAD_COUNT_MAX];
 
-       pool_ring_t      ring;
+       odp_shm_t        ring_shm;
+       pool_ring_t     *ring;
 
 } pool_t;
 
diff --git a/platform/linux-generic/odp_pool.c 
b/platform/linux-generic/odp_pool.c
index 4be3827..c1f479d 100644
--- a/platform/linux-generic/odp_pool.c
+++ b/platform/linux-generic/odp_pool.c
@@ -143,7 +143,7 @@ static void flush_cache(pool_cache_t *cache, pool_t *pool)
        uint32_t mask;
        uint32_t cache_num, i, data;
 
-       ring = &pool->ring.hdr;
+       ring = &pool->ring->hdr;
        mask = pool->ring_mask;
        cache_num = cache->num;
 
@@ -172,6 +172,7 @@ static pool_t *reserve_pool(void)
 {
        int i;
        pool_t *pool;
+       char ring_name[ODP_POOL_NAME_LEN];
 
        for (i = 0; i < ODP_CONFIG_POOLS; i++) {
                pool = pool_entry(i);
@@ -180,6 +181,16 @@ static pool_t *reserve_pool(void)
                if (pool->reserved == 0) {
                        pool->reserved = 1;
                        UNLOCK(&pool->lock);
+                       sprintf(ring_name, "_odp_pool_ring_%d", i);
+                       pool->ring_shm =
+                               odp_shm_reserve(ring_name,
+                                               sizeof(pool_ring_t),
+                                               ODP_CACHE_LINE_SIZE, 0);
+                       if (pool->ring_shm == ODP_SHM_INVALID) {
+                               ODP_ERR("Unable to alloc pool ring %d\n", i);
+                               break;
+                       }
+                       pool->ring = odp_shm_addr(pool->ring_shm);
                        return pool;
                }
                UNLOCK(&pool->lock);
@@ -214,7 +225,7 @@ static void init_buffers(pool_t *pool)
        int type;
        uint32_t seg_size;
 
-       ring = &pool->ring.hdr;
+       ring = &pool->ring->hdr;
        mask = pool->ring_mask;
        type = pool->params.type;
 
@@ -411,7 +422,7 @@ static odp_pool_t pool_create(const char *name, 
odp_pool_param_t *params,
                pool->uarea_base_addr = odp_shm_addr(pool->uarea_shm);
        }
 
-       ring_init(&pool->ring.hdr);
+       ring_init(&pool->ring->hdr);
        init_buffers(pool);
 
        return pool->pool_hdl;
@@ -533,6 +544,8 @@ int odp_pool_destroy(odp_pool_t pool_hdl)
                odp_shm_free(pool->uarea_shm);
 
        pool->reserved = 0;
+       odp_shm_free(pool->ring_shm);
+       pool->ring = NULL;
        UNLOCK(&pool->lock);
 
        return 0;
@@ -589,7 +602,7 @@ int buffer_alloc_multi(pool_t *pool, odp_buffer_t buf[],
        pool_cache_t *cache;
        uint32_t cache_num, num_ch, num_deq, burst;
 
-       ring  = &pool->ring.hdr;
+       ring  = &pool->ring->hdr;
        mask  = pool->ring_mask;
        cache = local.cache[pool->pool_idx];
 
@@ -668,7 +681,7 @@ static inline void buffer_free_to_pool(uint32_t pool_id,
 
        cache = local.cache[pool_id];
        pool  = pool_entry(pool_id);
-       ring  = &pool->ring.hdr;
+       ring  = &pool->ring->hdr;
        mask  = pool->ring_mask;
 
        /* Special case of a very large free. Move directly to
-- 
2.7.4

Reply via email to