The new rte_stack library is derived from the mempool handler, so this
commit removes duplicated code and simplifies the handler by migrating it
to this new API.

Signed-off-by: Gage Eads <gage.e...@intel.com>
Reviewed-by: Olivier Matz <olivier.m...@6wind.com>
---
 MAINTAINERS                               |  2 +-
 drivers/mempool/stack/Makefile            |  3 +-
 drivers/mempool/stack/meson.build         |  6 +-
 drivers/mempool/stack/rte_mempool_stack.c | 93 +++++++++----------------------
 4 files changed, 33 insertions(+), 71 deletions(-)

diff --git a/MAINTAINERS b/MAINTAINERS
index 5fca30823..4e088d2bd 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -282,7 +282,6 @@ M: Andrew Rybchenko <arybche...@solarflare.com>
 F: lib/librte_mempool/
 F: drivers/mempool/Makefile
 F: drivers/mempool/ring/
-F: drivers/mempool/stack/
 F: doc/guides/prog_guide/mempool_lib.rst
 F: app/test/test_mempool*
 F: app/test/test_func_reentrancy.c
@@ -410,6 +409,7 @@ M: Gage Eads <gage.e...@intel.com>
 M: Olivier Matz <olivier.m...@6wind.com>
 F: lib/librte_stack/
 F: doc/guides/prog_guide/stack_lib.rst
+F: drivers/mempool/stack/
 
 
 Memory Pool Drivers
diff --git a/drivers/mempool/stack/Makefile b/drivers/mempool/stack/Makefile
index 0444aedad..1681a62bc 100644
--- a/drivers/mempool/stack/Makefile
+++ b/drivers/mempool/stack/Makefile
@@ -10,10 +10,11 @@ LIB = librte_mempool_stack.a
 
 CFLAGS += -O3
 CFLAGS += $(WERROR_FLAGS)
+CFLAGS += -DALLOW_EXPERIMENTAL_API
 
 # Headers
 CFLAGS += -I$(RTE_SDK)/lib/librte_mempool
-LDLIBS += -lrte_eal -lrte_mempool -lrte_ring
+LDLIBS += -lrte_eal -lrte_mempool -lrte_stack
 
 EXPORT_MAP := rte_mempool_stack_version.map
 
diff --git a/drivers/mempool/stack/meson.build 
b/drivers/mempool/stack/meson.build
index b75a3bb56..03e369a41 100644
--- a/drivers/mempool/stack/meson.build
+++ b/drivers/mempool/stack/meson.build
@@ -1,4 +1,8 @@
 # SPDX-License-Identifier: BSD-3-Clause
-# Copyright(c) 2017 Intel Corporation
+# Copyright(c) 2017-2019 Intel Corporation
+
+allow_experimental_apis = true
 
 sources = files('rte_mempool_stack.c')
+
+deps += ['stack']
diff --git a/drivers/mempool/stack/rte_mempool_stack.c 
b/drivers/mempool/stack/rte_mempool_stack.c
index e6d504af5..25ccdb9af 100644
--- a/drivers/mempool/stack/rte_mempool_stack.c
+++ b/drivers/mempool/stack/rte_mempool_stack.c
@@ -1,39 +1,29 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2016 Intel Corporation
+ * Copyright(c) 2016-2019 Intel Corporation
  */
 
 #include <stdio.h>
 #include <rte_mempool.h>
-#include <rte_malloc.h>
-
-struct rte_mempool_stack {
-       rte_spinlock_t sl;
-
-       uint32_t size;
-       uint32_t len;
-       void *objs[];
-};
+#include <rte_stack.h>
 
 static int
 stack_alloc(struct rte_mempool *mp)
 {
-       struct rte_mempool_stack *s;
-       unsigned n = mp->size;
-       int size = sizeof(*s) + (n+16)*sizeof(void *);
-
-       /* Allocate our local memory structure */
-       s = rte_zmalloc_socket("mempool-stack",
-                       size,
-                       RTE_CACHE_LINE_SIZE,
-                       mp->socket_id);
-       if (s == NULL) {
-               RTE_LOG(ERR, MEMPOOL, "Cannot allocate stack!\n");
-               return -ENOMEM;
+       char name[RTE_STACK_NAMESIZE];
+       struct rte_stack *s;
+       int ret;
+
+       ret = snprintf(name, sizeof(name),
+                      RTE_MEMPOOL_MZ_FORMAT, mp->name);
+       if (ret < 0 || ret >= (int)sizeof(name)) {
+               rte_errno = ENAMETOOLONG;
+               return -rte_errno;
        }
 
-       rte_spinlock_init(&s->sl);
+       s = rte_stack_create(name, mp->size, mp->socket_id, 0);
+       if (s == NULL)
+               return -rte_errno;
 
-       s->size = n;
        mp->pool_data = s;
 
        return 0;
@@ -41,69 +31,36 @@ stack_alloc(struct rte_mempool *mp)
 
 static int
 stack_enqueue(struct rte_mempool *mp, void * const *obj_table,
-               unsigned n)
+             unsigned int n)
 {
-       struct rte_mempool_stack *s = mp->pool_data;
-       void **cache_objs;
-       unsigned index;
-
-       rte_spinlock_lock(&s->sl);
-       cache_objs = &s->objs[s->len];
-
-       /* Is there sufficient space in the stack ? */
-       if ((s->len + n) > s->size) {
-               rte_spinlock_unlock(&s->sl);
-               return -ENOBUFS;
-       }
-
-       /* Add elements back into the cache */
-       for (index = 0; index < n; ++index, obj_table++)
-               cache_objs[index] = *obj_table;
-
-       s->len += n;
+       struct rte_stack *s = mp->pool_data;
 
-       rte_spinlock_unlock(&s->sl);
-       return 0;
+       return rte_stack_push(s, obj_table, n) == 0 ? -ENOBUFS : 0;
 }
 
 static int
 stack_dequeue(struct rte_mempool *mp, void **obj_table,
-               unsigned n)
+             unsigned int n)
 {
-       struct rte_mempool_stack *s = mp->pool_data;
-       void **cache_objs;
-       unsigned index, len;
-
-       rte_spinlock_lock(&s->sl);
-
-       if (unlikely(n > s->len)) {
-               rte_spinlock_unlock(&s->sl);
-               return -ENOENT;
-       }
+       struct rte_stack *s = mp->pool_data;
 
-       cache_objs = s->objs;
-
-       for (index = 0, len = s->len - 1; index < n;
-                       ++index, len--, obj_table++)
-               *obj_table = cache_objs[len];
-
-       s->len -= n;
-       rte_spinlock_unlock(&s->sl);
-       return 0;
+       return rte_stack_pop(s, obj_table, n) == 0 ? -ENOBUFS : 0;
 }
 
 static unsigned
 stack_get_count(const struct rte_mempool *mp)
 {
-       struct rte_mempool_stack *s = mp->pool_data;
+       struct rte_stack *s = mp->pool_data;
 
-       return s->len;
+       return rte_stack_count(s);
 }
 
 static void
 stack_free(struct rte_mempool *mp)
 {
-       rte_free((void *)(mp->pool_data));
+       struct rte_stack *s = mp->pool_data;
+
+       rte_stack_free(s);
 }
 
 static struct rte_mempool_ops ops_stack = {
-- 
2.13.6

Reply via email to