Oops, I realized the last patch did a write(2) while holding a pthread_mutex.
I've never used pthread_mutexes, but I suspect this isn't allowed. The attached
updated patch delays the write() until after the pthread_mutex_unlock().
Sorry about the confusion, rick
----- Original Message -----
> > > I don't know anything about gluster's poll implementation so I may
> > > be totally wrong, but would it be possible to use an eventfd (or a
> > > pipe if eventfd is not supported) to signal the need to add more
> > > file descriptors to the poll call ?
> > >
> > >
> > > The poll call should listen on this new fd. When we need to change
> > > the fd list, we should simply write to the eventfd or pipe from
> > > another thread. This will cause the poll call to return and we will
> > > be able to change the fd list without having a short timeout nor
> > > having to decide on any trade-off.
> >
> >
> > Thats a nice idea. Based on my understanding of why timeouts are being
> > used, this approach can work.
>
> The own-thread code which preceded the current poll implementation did
> something similar, using a pipe fd to be woken up for new *outgoing*
> messages. That code still exists, and might provide some insight into
> how to do this for the current poll code.
> _______________________________________________
> [email protected] mailing list
> https://lists.freebsd.org/mailman/listinfo/freebsd-fs
> To unsubscribe, send any mail to "[email protected]"
>
--- glusterfs-3.7.6/libglusterfs/src/event-poll.c.sav 2016-01-06 15:58:03.522286000 -0500
+++ glusterfs-3.7.6/libglusterfs/src/event-poll.c 2016-01-08 18:14:57.658652000 -0500
@@ -180,6 +180,15 @@ event_pool_new_poll (int count, int even
return event_pool;
}
+static void
+event_pool_changed (struct event_pool *event_pool)
+{
+
+ /* Write a byte into the breaker pipe to wake up poll(). */
+ if (event_pool->breaker[1] >= 0)
+ write(event_pool->breaker[1], "X", 1);
+}
+
static int
event_register_poll (struct event_pool *event_pool, int fd,
@@ -187,6 +196,7 @@ event_register_poll (struct event_pool *
void *data, int poll_in, int poll_out)
{
int idx = -1;
+ int changed = 0;
GF_VALIDATE_OR_GOTO ("event", event_pool, out);
@@ -245,10 +255,13 @@ event_register_poll (struct event_pool *
}
event_pool->changed = 1;
+ changed = 1;
}
unlock:
pthread_mutex_unlock (&event_pool->mutex);
+ if (changed != 0)
+ event_pool_changed(event_pool);
out:
return idx;
@@ -259,6 +272,7 @@ static int
event_unregister_poll (struct event_pool *event_pool, int fd, int idx_hint)
{
int idx = -1;
+ int changed = 0;
GF_VALIDATE_OR_GOTO ("event", event_pool, out);
@@ -276,9 +290,12 @@ event_unregister_poll (struct event_pool
event_pool->reg[idx] = event_pool->reg[--event_pool->used];
event_pool->changed = 1;
+ changed = 1;
}
unlock:
pthread_mutex_unlock (&event_pool->mutex);
+ if (changed != 0)
+ event_pool_changed(event_pool);
out:
return idx;
@@ -304,6 +321,7 @@ event_select_on_poll (struct event_pool
int poll_in, int poll_out)
{
int idx = -1;
+ int changed = 0;
GF_VALIDATE_OR_GOTO ("event", event_pool, out);
@@ -349,11 +367,15 @@ event_select_on_poll (struct event_pool
break;
}
- if (poll_in + poll_out > -2)
+ if (poll_in + poll_out > -2) {
event_pool->changed = 1;
+ changed = 1;
+ }
}
unlock:
pthread_mutex_unlock (&event_pool->mutex);
+ if (changed != 0)
+ event_pool_changed(event_pool);
out:
return idx;
@@ -448,6 +470,7 @@ event_dispatch_poll (struct event_pool *
int size = 0;
int i = 0;
int ret = -1;
+ char x;
GF_VALIDATE_OR_GOTO ("event", event_pool, out);
@@ -472,7 +495,7 @@ event_dispatch_poll (struct event_pool *
size = event_dispatch_poll_resize (event_pool, ufds, size);
ufds = event_pool->evcache;
- ret = poll (ufds, size, 1);
+ ret = poll (ufds, size, -1);
if (ret == 0)
/* timeout */
@@ -482,7 +505,13 @@ event_dispatch_poll (struct event_pool *
/* sys call */
continue;
- for (i = 0; i < size; i++) {
+ if (ufds[0].revents != 0 && event_pool->breaker[0] >= 0) {
+ /* Just read all the junk in the breaker pipe. */
+ while (read(event_pool->breaker[0], &x, 1) > 0)
+ ;
+ }
+
+ for (i = 1; i < size; i++) {
if (!ufds[i].revents)
continue;
_______________________________________________
Gluster-devel mailing list
[email protected]
http://www.gluster.org/mailman/listinfo/gluster-devel