On Wed, 14 Nov 2018 10:25:32 -0800 Davidlohr Bueso <d...@stgolabs.net> wrote:

> There is no reason why we rearm the waitiqueue upon every
> fetch_events retry (for when events are found yet send_events()
> fails). If nothing else, this saves four lock operations per
> retry, and furthermore reduces the scope of the lock even
> further.
> 
> ..
>
> --- a/fs/eventpoll.c
> +++ b/fs/eventpoll.c
> @@ -1749,6 +1749,7 @@ static int ep_poll(struct eventpoll *ep, struct 
> epoll_event __user *events,
>  {
>       int res = 0, eavail, timed_out = 0;
>       u64 slack = 0;
> +     bool waiter = false;
>       wait_queue_entry_t wait;
>       ktime_t expires, *to = NULL;
>  
> @@ -1786,6 +1787,15 @@ static int ep_poll(struct eventpoll *ep, struct 
> epoll_event __user *events,
>       if (eavail)
>               goto send_events;
>  
> +     if (!waiter) {
> +             waiter = true;
> +             init_waitqueue_entry(&wait, current);
> +
> +             spin_lock_irq(&ep->wq.lock);
> +             __add_wait_queue_exclusive(&ep->wq, &wait);
> +             spin_unlock_irq(&ep->wq.lock);
> +     }
> +
>       /*
>        * Busy poll timed out.  Drop NAPI ID for now, we can add
>        * it back in when we have moved a socket with a valid NAPI
> @@ -1798,10 +1808,6 @@ static int ep_poll(struct eventpoll *ep, struct 
> epoll_event __user *events,
>        * We need to sleep here, and we will be wake up by
>        * ep_poll_callback() when events will become available.
>        */
> -     init_waitqueue_entry(&wait, current);
> -     spin_lock_irq(&ep->wq.lock);
> -     __add_wait_queue_exclusive(&ep->wq, &wait);
> -     spin_unlock_irq(&ep->wq.lock);

Why was this moved to before the ep_reset_busy_poll_napi_id() call? 
That movement placed the code ahead of the block comment which serves
to explain its function.


This?  Which also fixes that comment and reflows it to use 80 cols.

--- a/fs/eventpoll.c~fs-epoll-deal-with-wait_queue-only-once-fix
+++ a/fs/eventpoll.c
@@ -1787,15 +1787,6 @@ fetch_events:
        if (eavail)
                goto send_events;
 
-       if (!waiter) {
-               waiter = true;
-               init_waitqueue_entry(&wait, current);
-
-               spin_lock_irq(&ep->wq.lock);
-               __add_wait_queue_exclusive(&ep->wq, &wait);
-               spin_unlock_irq(&ep->wq.lock);
-       }
-
        /*
         * Busy poll timed out.  Drop NAPI ID for now, we can add
         * it back in when we have moved a socket with a valid NAPI
@@ -1804,10 +1795,18 @@ fetch_events:
        ep_reset_busy_poll_napi_id(ep);
 
        /*
-        * We don't have any available event to return to the caller.
-        * We need to sleep here, and we will be wake up by
-        * ep_poll_callback() when events will become available.
+        * We don't have any available event to return to the caller.  We need
+        * to sleep here, and we will be woken by ep_poll_callback() when events
+        * become available.
         */
+       if (!waiter) {
+               waiter = true;
+               init_waitqueue_entry(&wait, current);
+
+               spin_lock_irq(&ep->wq.lock);
+               __add_wait_queue_exclusive(&ep->wq, &wait);
+               spin_unlock_irq(&ep->wq.lock);
+       }
 
        for (;;) {
                /*
_

Reply via email to