The patch adds 'rte_epoll_wait' and 'rte_epoll_ctl' for async event wakeup.
It defines 'struct rte_epoll_event' as the event param.
The 'op' uses the same enum as epoll_wait/ctl does.
The epoll event support to carry a raw user data and to register a callback
which is exectuted during wakeup.
Signed-off-by: Cunming Liang
---
v9 changes
- rework on coding style
v8 changes
- support delete event in safety during the wakeup execution
- add EINTR process during epoll_wait
v7 changes
- split v6[4/8] into two patches, one for epoll event(this one)
another for rx intr(next patch)
- introduce rte_epoll_event definition
- rte_epoll_wait/ctl for more generic RTE epoll API
v6 changes
- split rte_intr_wait_rx_pkt into two function, wait and set.
- rewrite rte_intr_rx_wait/rte_intr_rx_set to remove queue visibility on eal.
- rte_intr_rx_wait to support multiplexing.
- allow epfd as input to support flexible event fd combination.
lib/librte_eal/linuxapp/eal/eal_interrupts.c | 137 +
.../linuxapp/eal/include/exec-env/rte_interrupts.h | 82 +++-
lib/librte_eal/linuxapp/eal/rte_eal_version.map| 3 +
3 files changed, 219 insertions(+), 3 deletions(-)
diff --git a/lib/librte_eal/linuxapp/eal/eal_interrupts.c
b/lib/librte_eal/linuxapp/eal/eal_interrupts.c
index 3a84b3c..2f56000 100644
--- a/lib/librte_eal/linuxapp/eal/eal_interrupts.c
+++ b/lib/librte_eal/linuxapp/eal/eal_interrupts.c
@@ -69,6 +69,8 @@
#define EAL_INTR_EPOLL_WAIT_FOREVER (-1)
+static RTE_DEFINE_PER_LCORE(int, _epfd) = -1; /**< epoll fd per thread */
+
/**
* union for pipe fds.
*/
@@ -895,3 +897,138 @@ rte_eal_intr_init(void)
return -ret;
}
+static int
+eal_epoll_process_event(struct epoll_event *evs, unsigned int n,
+ struct rte_epoll_event *events)
+{
+ unsigned int i, count = 0;
+ struct rte_epoll_event *rev;
+
+ for (i = 0; i < n; i++) {
+ rev = evs[i].data.ptr;
+ if (!rev || !rte_atomic32_cmpset(>status, RTE_EPOLL_VALID,
+RTE_EPOLL_EXEC))
+ continue;
+
+ events[count].status= RTE_EPOLL_VALID;
+ events[count].fd= rev->fd;
+ events[count].epfd = rev->epfd;
+ events[count].epdata.event = rev->epdata.event;
+ events[count].epdata.data = rev->epdata.data;
+ if (rev->epdata.cb_fun)
+ rev->epdata.cb_fun(rev->fd,
+ rev->epdata.cb_arg);
+
+ rte_compiler_barrier();
+ rev->status = RTE_EPOLL_VALID;
+ count++;
+ }
+ return count;
+}
+
+static inline int
+eal_init_tls_epfd(void)
+{
+ int pfd = epoll_create(255);
+ if (pfd < 0) {
+ RTE_LOG(ERR, EAL,
+ "Cannot create epoll instance\n");
+ return -1;
+ }
+ return pfd;
+}
+
+int
+rte_intr_tls_epfd(void)
+{
+ if (RTE_PER_LCORE(_epfd) == -1)
+ RTE_PER_LCORE(_epfd) = eal_init_tls_epfd();
+
+ return RTE_PER_LCORE(_epfd);
+}
+
+int
+rte_epoll_wait(int epfd, struct rte_epoll_event *events,
+ int maxevents, int timeout)
+{
+ struct epoll_event evs[maxevents];
+ int rc;
+
+ if (!events) {
+ RTE_LOG(ERR, EAL, "rte_epoll_event can't be NULL\n");
+ return -1;
+ }
+
+ /* using per thread epoll fd */
+ if (epfd == RTE_EPOLL_PER_THREAD)
+ epfd = rte_intr_tls_epfd();
+
+ while (1) {
+ rc = epoll_wait(epfd, evs, maxevents, timeout);
+ if (likely(rc > 0)) {
+ /* epoll_wait has at least one fd ready to read */
+ rc = eal_epoll_process_event(evs, rc, events);
+ break;
+ } else if (rc < 0) {
+ if (errno == EINTR)
+ continue;
+ /* epoll_wait fail */
+ RTE_LOG(ERR, EAL, "epoll_wait returns with fail %s\n",
+ strerror(errno));
+ rc = -1;
+ break;
+ }
+ }
+
+ return rc;
+}
+
+static inline void
+eal_epoll_data_safe_free(struct rte_epoll_event *ev)
+{
+ while (!rte_atomic32_cmpset(>status, RTE_EPOLL_VALID,
+ RTE_EPOLL_INVALID))
+ while (ev->status != RTE_EPOLL_VALID)
+ rte_pause();
+ memset(>epdata, 0, sizeof(ev->epdata));
+ ev->fd = -1;
+ ev->epfd = -1;
+}
+
+int
+rte_epoll_ctl(int epfd, int op, int fd,
+ struct rte_epoll_event *event)
+{
+ struct epoll_event ev;
+
+ if (!event) {
+ RTE_LOG(ERR, EAL, "rte_epoll_event can't be NULL\n");
+ return -1;
+ }
+
+