Signed-off-by: Cunming Liang <cunming.liang at intel.com>
---
 lib/librte_mempool/rte_mempool.h | 22 +++++++++++-----------
 1 file changed, 11 insertions(+), 11 deletions(-)

diff --git a/lib/librte_mempool/rte_mempool.h b/lib/librte_mempool/rte_mempool.h
index 3314651..bf4117b 100644
--- a/lib/librte_mempool/rte_mempool.h
+++ b/lib/librte_mempool/rte_mempool.h
@@ -159,13 +159,13 @@ struct rte_mempool {
        unsigned private_data_size;      /**< Size of private data. */

 #if RTE_MEMPOOL_CACHE_MAX_SIZE > 0
-       /** Per-lcore local cache. */
-       struct rte_mempool_cache local_cache[RTE_MAX_LCORE];
+       /** Per-lthread local cache. */
+       struct rte_mempool_cache local_cache[RTE_MAX_THREAD];
 #endif

 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
-       /** Per-lcore statistics. */
-       struct rte_mempool_debug_stats stats[RTE_MAX_LCORE];
+       /** Per-lthread statistics. */
+       struct rte_mempool_debug_stats stats[RTE_MAX_THREAD];
 #endif

        /* Address translation support, starts from next cache line. */
@@ -199,9 +199,9 @@ struct rte_mempool {
  */
 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
 #define __MEMPOOL_STAT_ADD(mp, name, n) do {                   \
-               unsigned __lcore_id = rte_lcore_id();           \
-               mp->stats[__lcore_id].name##_objs += n;         \
-               mp->stats[__lcore_id].name##_bulk += 1;         \
+               unsigned __thread_id = rte_linear_thread_id();  \
+               mp->stats[__thread_id].name##_objs += n;                \
+               mp->stats[__thread_id].name##_bulk += 1;                \
        } while(0)
 #else
 #define __MEMPOOL_STAT_ADD(mp, name, n) do {} while(0)
@@ -758,7 +758,7 @@ __mempool_put_bulk(struct rte_mempool *mp, void * const 
*obj_table,
        struct rte_mempool_cache *cache;
        uint32_t index;
        void **cache_objs;
-       unsigned lcore_id = rte_lcore_id();
+       unsigned tid = rte_linear_thread_id();
        uint32_t cache_size = mp->cache_size;
        uint32_t flushthresh = mp->cache_flushthresh;
 #endif /* RTE_MEMPOOL_CACHE_MAX_SIZE > 0 */
@@ -775,7 +775,7 @@ __mempool_put_bulk(struct rte_mempool *mp, void * const 
*obj_table,
        if (unlikely(n > RTE_MEMPOOL_CACHE_MAX_SIZE))
                goto ring_enqueue;

-       cache = &mp->local_cache[lcore_id];
+       cache = &mp->local_cache[tid];
        cache_objs = &cache->objs[cache->len];

        /*
@@ -948,14 +948,14 @@ __mempool_get_bulk(struct rte_mempool *mp, void 
**obj_table,
        struct rte_mempool_cache *cache;
        uint32_t index, len;
        void **cache_objs;
-       unsigned lcore_id = rte_lcore_id();
+       unsigned tid = rte_linear_thread_id();
        uint32_t cache_size = mp->cache_size;

        /* cache is not enabled or single consumer */
        if (unlikely(cache_size == 0 || is_mc == 0 || n >= cache_size))
                goto ring_dequeue;

-       cache = &mp->local_cache[lcore_id];
+       cache = &mp->local_cache[tid];
        cache_objs = cache->objs;

        /* Can this be satisfied from the cache? */
-- 
1.8.1.4

Reply via email to