A very naive select/poll busy-poll support.
Add busy-polling to sock_poll().
When poll/select have nothing to report, call the low-level
sock_poll() again until we are out of time or we find something.
Right now we poll every socket once, this is suboptimal
but improves latency when the number of sockets polled is not large.

Signed-off-by: Alexander Duyck <[email protected]>
Signed-off-by: Jesse Brandeburg <[email protected]>
Tested-by: Willem de Bruijn <[email protected]>
Signed-off-by: Eliezer Tamir <[email protected]>
---

 fs/select.c  |    7 +++++++
 net/socket.c |   10 +++++++++-
 2 files changed, 16 insertions(+), 1 deletions(-)

diff --git a/fs/select.c b/fs/select.c
index 8c1c96c..f116bf0 100644
--- a/fs/select.c
+++ b/fs/select.c
@@ -27,6 +27,7 @@
 #include <linux/rcupdate.h>
 #include <linux/hrtimer.h>
 #include <linux/sched/rt.h>
+#include <net/ll_poll.h>
 
 #include <asm/uaccess.h>
 
@@ -400,6 +401,7 @@ int do_select(int n, fd_set_bits *fds, struct timespec 
*end_time)
        poll_table *wait;
        int retval, i, timed_out = 0;
        unsigned long slack = 0;
+       cycles_t ll_time = ll_end_time();
 
        rcu_read_lock();
        retval = max_select_fd(n, fds);
@@ -486,6 +488,8 @@ int do_select(int n, fd_set_bits *fds, struct timespec 
*end_time)
                        break;
                }
 
+               if (can_poll_ll(ll_time))
+                       continue;
                /*
                 * If this is the first loop and we have a timeout
                 * given, then we convert to ktime_t and set the to
@@ -750,6 +754,7 @@ static int do_poll(unsigned int nfds,  struct poll_list 
*list,
        ktime_t expire, *to = NULL;
        int timed_out = 0, count = 0;
        unsigned long slack = 0;
+       cycles_t ll_time = ll_end_time();
 
        /* Optimise the no-wait case */
        if (end_time && !end_time->tv_sec && !end_time->tv_nsec) {
@@ -795,6 +800,8 @@ static int do_poll(unsigned int nfds,  struct poll_list 
*list,
                if (count || timed_out)
                        break;
 
+               if (can_poll_ll(ll_time))
+                       continue;
                /*
                 * If this is the first loop and we have a timeout
                 * given, then we convert to ktime_t and set the to
diff --git a/net/socket.c b/net/socket.c
index 721f4e7..c34dad0 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -1148,13 +1148,21 @@ EXPORT_SYMBOL(sock_create_lite);
 /* No kernel lock held - perfect */
 static unsigned int sock_poll(struct file *file, poll_table *wait)
 {
+       unsigned int poll_result;
        struct socket *sock;
 
        /*
         *      We can't return errors to poll, so it's either yes or no.
         */
        sock = file->private_data;
-       return sock->ops->poll(file, sock, wait);
+
+       poll_result = sock->ops->poll(file, sock, wait);
+
+       if (wait && !(poll_result & wait->_key) &&
+               sk_valid_ll(sock->sk) && sk_poll_ll(sock->sk, 1))
+                       poll_result = sock->ops->poll(file, sock, NULL);
+
+       return poll_result;
 }
 
 static int sock_mmap(struct file *file, struct vm_area_struct *vma)


------------------------------------------------------------------------------
How ServiceNow helps IT people transform IT departments:
1. A cloud service to automate IT design, transition and operations
2. Dashboards that offer high-level views of enterprise services
3. A single system of record for all IT processes
http://p.sf.net/sfu/servicenow-d2d-j
_______________________________________________
E1000-devel mailing list
[email protected]
https://lists.sourceforge.net/lists/listinfo/e1000-devel
To learn more about Intel&#174; Ethernet, visit 
http://communities.intel.com/community/wired

Reply via email to