[RFC PATCH 2/3] use hrtimer in select and pselect

2007-03-04 Thread Arnd Bergmann
This changes the select and pselect system calls to use the
new schedule_timeout_hr function. Since many applications
use the select function instead of nanosleep, this provides
a higher resolution sleep to them.

BUG: the same needs to be done for the compat syscalls, the
current patch breaks building on 64 bit machines.

Signed-off-by: Arnd Bergmann <[EMAIL PROTECTED]>

Index: linux-cg/fs/select.c
===
--- linux-cg.orig/fs/select.c
+++ linux-cg/fs/select.c
@@ -189,7 +189,7 @@ get_max:
 #define POLLOUT_SET (POLLWRBAND | POLLWRNORM | POLLOUT | POLLERR)
 #define POLLEX_SET (POLLPRI)
 
-int do_select(int n, fd_set_bits *fds, s64 *timeout)
+int do_select(int n, fd_set_bits *fds, ktime_t *timeout)
 {
struct poll_wqueues table;
poll_table *wait;
@@ -205,12 +205,11 @@ int do_select(int n, fd_set_bits *fds, s
 
poll_initwait();
wait = 
-   if (!*timeout)
+   if (timeout && !timeout->tv64)
wait = NULL;
retval = 0;
for (;;) {
unsigned long *rinp, *routp, *rexp, *inp, *outp, *exp;
-   long __timeout;
 
set_current_state(TASK_INTERRUPTIBLE);
 
@@ -266,27 +265,19 @@ int do_select(int n, fd_set_bits *fds, s
*rexp = res_ex;
}
wait = NULL;
-   if (retval || !*timeout || signal_pending(current))
+   if (retval || (timeout && !timeout->tv64)
+   || signal_pending(current))
break;
if(table.error) {
retval = table.error;
break;
}
 
-   if (*timeout < 0) {
+   if (!timeout || timeout->tv64 < 0)
/* Wait indefinitely */
-   __timeout = MAX_SCHEDULE_TIMEOUT;
-   } else if (unlikely(*timeout >= (s64)MAX_SCHEDULE_TIMEOUT - 1)) 
{
-   /* Wait for longer than MAX_SCHEDULE_TIMEOUT. Do it in 
a loop */
-   __timeout = MAX_SCHEDULE_TIMEOUT - 1;
-   *timeout -= __timeout;
-   } else {
-   __timeout = *timeout;
-   *timeout = 0;
-   }
-   __timeout = schedule_timeout(__timeout);
-   if (*timeout >= 0)
-   *timeout += __timeout;
+   schedule();
+   else
+   *timeout = schedule_timeout_hr(*timeout);
}
__set_current_state(TASK_RUNNING);
 
@@ -307,7 +298,7 @@ int do_select(int n, fd_set_bits *fds, s
((unsigned long) (MAX_SCHEDULE_TIMEOUT / HZ)-1)
 
 static int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp,
-  fd_set __user *exp, s64 *timeout)
+  fd_set __user *exp, ktime_t *timeout)
 {
fd_set_bits fds;
void *bits;
@@ -384,7 +375,7 @@ out_nofds:
 asmlinkage long sys_select(int n, fd_set __user *inp, fd_set __user *outp,
fd_set __user *exp, struct timeval __user *tvp)
 {
-   s64 timeout = -1;
+   ktime_t timeout, *timeoutp = NULL;
struct timeval tv;
int ret;
 
@@ -395,24 +386,20 @@ asmlinkage long sys_select(int n, fd_set
if (tv.tv_sec < 0 || tv.tv_usec < 0)
return -EINVAL;
 
+   timeout = timeval_to_ktime(tv);
/* Cast to u64 to make GCC stop complaining */
-   if ((u64)tv.tv_sec >= (u64)MAX_INT64_SECONDS)
-   timeout = -1;   /* infinite */
-   else {
-   timeout = ROUND_UP(tv.tv_usec, USEC_PER_SEC/HZ);
-   timeout += tv.tv_sec * HZ;
-   }
+   if ((u64)tv.tv_sec < (u64)MAX_INT64_SECONDS)
+   timeoutp = 
}
 
-   ret = core_sys_select(n, inp, outp, exp, );
+   ret = core_sys_select(n, inp, outp, exp, timeoutp);
 
if (tvp) {
struct timeval rtv;
 
if (current->personality & STICKY_TIMEOUTS)
goto sticky;
-   rtv.tv_usec = jiffies_to_usecs(do_div((*(u64*)), HZ));
-   rtv.tv_sec = timeout;
+   rtv = ktime_to_timeval(timeout);
if (timeval_compare(, ) >= 0)
rtv = tv;
if (copy_to_user(tvp, , sizeof(rtv))) {
@@ -438,7 +425,7 @@ asmlinkage long sys_pselect7(int n, fd_s
fd_set __user *exp, struct timespec __user *tsp,
const sigset_t __user *sigmask, size_t sigsetsize)
 {
-   s64 timeout = MAX_SCHEDULE_TIMEOUT;
+   ktime_t timeout, *timeoutp = NULL;
sigset_t ksigmask, sigsaved;
struct timespec ts;
int ret;
@@ -450,13 +437,11 @@ asmlinkage long sys_pselect7(int n, fd_s
if (ts.tv_sec < 0 || 

[RFC PATCH 2/3] use hrtimer in select and pselect

2007-03-04 Thread Arnd Bergmann
This changes the select and pselect system calls to use the
new schedule_timeout_hr function. Since many applications
use the select function instead of nanosleep, this provides
a higher resolution sleep to them.

BUG: the same needs to be done for the compat syscalls, the
current patch breaks building on 64 bit machines.

Signed-off-by: Arnd Bergmann [EMAIL PROTECTED]

Index: linux-cg/fs/select.c
===
--- linux-cg.orig/fs/select.c
+++ linux-cg/fs/select.c
@@ -189,7 +189,7 @@ get_max:
 #define POLLOUT_SET (POLLWRBAND | POLLWRNORM | POLLOUT | POLLERR)
 #define POLLEX_SET (POLLPRI)
 
-int do_select(int n, fd_set_bits *fds, s64 *timeout)
+int do_select(int n, fd_set_bits *fds, ktime_t *timeout)
 {
struct poll_wqueues table;
poll_table *wait;
@@ -205,12 +205,11 @@ int do_select(int n, fd_set_bits *fds, s
 
poll_initwait(table);
wait = table.pt;
-   if (!*timeout)
+   if (timeout  !timeout-tv64)
wait = NULL;
retval = 0;
for (;;) {
unsigned long *rinp, *routp, *rexp, *inp, *outp, *exp;
-   long __timeout;
 
set_current_state(TASK_INTERRUPTIBLE);
 
@@ -266,27 +265,19 @@ int do_select(int n, fd_set_bits *fds, s
*rexp = res_ex;
}
wait = NULL;
-   if (retval || !*timeout || signal_pending(current))
+   if (retval || (timeout  !timeout-tv64)
+   || signal_pending(current))
break;
if(table.error) {
retval = table.error;
break;
}
 
-   if (*timeout  0) {
+   if (!timeout || timeout-tv64  0)
/* Wait indefinitely */
-   __timeout = MAX_SCHEDULE_TIMEOUT;
-   } else if (unlikely(*timeout = (s64)MAX_SCHEDULE_TIMEOUT - 1)) 
{
-   /* Wait for longer than MAX_SCHEDULE_TIMEOUT. Do it in 
a loop */
-   __timeout = MAX_SCHEDULE_TIMEOUT - 1;
-   *timeout -= __timeout;
-   } else {
-   __timeout = *timeout;
-   *timeout = 0;
-   }
-   __timeout = schedule_timeout(__timeout);
-   if (*timeout = 0)
-   *timeout += __timeout;
+   schedule();
+   else
+   *timeout = schedule_timeout_hr(*timeout);
}
__set_current_state(TASK_RUNNING);
 
@@ -307,7 +298,7 @@ int do_select(int n, fd_set_bits *fds, s
((unsigned long) (MAX_SCHEDULE_TIMEOUT / HZ)-1)
 
 static int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp,
-  fd_set __user *exp, s64 *timeout)
+  fd_set __user *exp, ktime_t *timeout)
 {
fd_set_bits fds;
void *bits;
@@ -384,7 +375,7 @@ out_nofds:
 asmlinkage long sys_select(int n, fd_set __user *inp, fd_set __user *outp,
fd_set __user *exp, struct timeval __user *tvp)
 {
-   s64 timeout = -1;
+   ktime_t timeout, *timeoutp = NULL;
struct timeval tv;
int ret;
 
@@ -395,24 +386,20 @@ asmlinkage long sys_select(int n, fd_set
if (tv.tv_sec  0 || tv.tv_usec  0)
return -EINVAL;
 
+   timeout = timeval_to_ktime(tv);
/* Cast to u64 to make GCC stop complaining */
-   if ((u64)tv.tv_sec = (u64)MAX_INT64_SECONDS)
-   timeout = -1;   /* infinite */
-   else {
-   timeout = ROUND_UP(tv.tv_usec, USEC_PER_SEC/HZ);
-   timeout += tv.tv_sec * HZ;
-   }
+   if ((u64)tv.tv_sec  (u64)MAX_INT64_SECONDS)
+   timeoutp = timeout;
}
 
-   ret = core_sys_select(n, inp, outp, exp, timeout);
+   ret = core_sys_select(n, inp, outp, exp, timeoutp);
 
if (tvp) {
struct timeval rtv;
 
if (current-personality  STICKY_TIMEOUTS)
goto sticky;
-   rtv.tv_usec = jiffies_to_usecs(do_div((*(u64*)timeout), HZ));
-   rtv.tv_sec = timeout;
+   rtv = ktime_to_timeval(timeout);
if (timeval_compare(rtv, tv) = 0)
rtv = tv;
if (copy_to_user(tvp, rtv, sizeof(rtv))) {
@@ -438,7 +425,7 @@ asmlinkage long sys_pselect7(int n, fd_s
fd_set __user *exp, struct timespec __user *tsp,
const sigset_t __user *sigmask, size_t sigsetsize)
 {
-   s64 timeout = MAX_SCHEDULE_TIMEOUT;
+   ktime_t timeout, *timeoutp = NULL;
sigset_t ksigmask, sigsaved;
struct timespec ts;
int ret;
@@ -450,13 +437,11 @@ asmlinkage long sys_pselect7(int n, fd_s