Re: [PATCH 2.6.21-rc3-mm2 4/4] sys_futex64 : allows 64bit futexes

2007-03-15 Thread Ulrich Drepper
Andrew Morton wrote:
> Well OK.  But that doesn't actually explain why 64-bit mutexes are needed.
> It just says they are required.

I can show you the code but it's not easy to understand.  For
complicated syn objects like rwlocks the state information is more than
just locked or not.  Currently we have to use internal locks, modify the
state information, and then release it.  This is terribly inefficient
when many threads are used.  With 64bit futexes the state information
can be kept in the futex object and we don't need an internal lock,
hence the speed-up.

-- 
➧ Ulrich Drepper ➧ Red Hat, Inc. ➧ 444 Castro St ➧ Mountain View, CA ❖



signature.asc
Description: OpenPGP digital signature


Re: [PATCH 2.6.21-rc3-mm2 4/4] sys_futex64 : allows 64bit futexes

2007-03-15 Thread Andrew Morton
On Thu, 15 Mar 2007 12:12:11 -0700 Ulrich Drepper <[EMAIL PROTECTED]> wrote:

> Andrew Morton wrote:
> > Why do we want 64-bit futexes?
> 
> I sent this to you already on 1/12/2007:
> 
> http://udrepper.livejournal.com/13123.html
> 

Well OK.  But that doesn't actually explain why 64-bit mutexes are needed.
It just says they are required.
-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


Re: [PATCH 2.6.21-rc3-mm2 4/4] sys_futex64 : allows 64bit futexes

2007-03-15 Thread Ulrich Drepper
Andrew Morton wrote:
> Why do we want 64-bit futexes?

I sent this to you already on 1/12/2007:

http://udrepper.livejournal.com/13123.html

-- 
➧ Ulrich Drepper ➧ Red Hat, Inc. ➧ 444 Castro St ➧ Mountain View, CA ❖



signature.asc
Description: OpenPGP digital signature


Re: [PATCH 2.6.21-rc3-mm2 4/4] sys_futex64 : allows 64bit futexes

2007-03-15 Thread Andrew Morton
> On Tue, 13 Mar 2007 10:52:07 +0100 [EMAIL PROTECTED] wrote:
> This last patch is an adaptation of the sys_futex64 syscall provided in -rt
> patch (originally written by Ingo). It allows the use of 64bit futex.
> 
> I have re-worked most of the code to avoid the duplication of the code.
> 
> It does not provide the functionality for all architectures (only for x64 for 
> now).

What a lot of code.

Why do we want 64-bit futexes?
-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[PATCH 2.6.21-rc3-mm2 4/4] sys_futex64 : allows 64bit futexes

2007-03-13 Thread Pierre . Peiffer
This last patch is an adaptation of the sys_futex64 syscall provided in -rt
patch (originally written by Ingo). It allows the use of 64bit futex.

I have re-worked most of the code to avoid the duplication of the code.

It does not provide the functionality for all architectures (only for x64 for 
now).

Signed-off-by: Pierre Peiffer <[EMAIL PROTECTED]>

---
 include/asm-x86_64/futex.h  |  113 
 include/asm-x86_64/unistd.h |4 
 include/linux/futex.h   |7 -
 include/linux/syscalls.h|3 
 kernel/futex.c  |  248 +++-
 kernel/futex_compat.c   |3 
 kernel/sys_ni.c |1 
 7 files changed, 301 insertions(+), 78 deletions(-)

Index: b/include/asm-x86_64/futex.h
===
--- a/include/asm-x86_64/futex.h
+++ b/include/asm-x86_64/futex.h
@@ -41,6 +41,39 @@
  "=&r" (tem)   \
: "r" (oparg), "i" (-EFAULT), "m" (*uaddr), "1" (0))
 
+#define __futex_atomic_op1_64(insn, ret, oldval, uaddr, oparg) \
+  __asm__ __volatile ( \
+"1:" insn "\n" \
+"2:.section .fixup,\"ax\"\n\
+3: movq%3, %1\n\
+   jmp 2b\n\
+   .previous\n\
+   .section __ex_table,\"a\"\n\
+   .align  8\n\
+   .quad   1b,3b\n\
+   .previous"  \
+   : "=r" (oldval), "=r" (ret), "=m" (*uaddr)  \
+   : "i" (-EFAULT), "m" (*uaddr), "0" (oparg), "1" (0))
+
+#define __futex_atomic_op2_64(insn, ret, oldval, uaddr, oparg) \
+  __asm__ __volatile ( \
+"1:movq%2, %0\n\
+   movq%0, %3\n"   \
+   insn "\n"   \
+"2:" LOCK_PREFIX "cmpxchgq %3, %2\n\
+   jnz 1b\n\
+3: .section .fixup,\"ax\"\n\
+4: movq%5, %1\n\
+   jmp 3b\n\
+   .previous\n\
+   .section __ex_table,\"a\"\n\
+   .align  8\n\
+   .quad   1b,4b,2b,4b\n\
+   .previous"  \
+   : "=&a" (oldval), "=&r" (ret), "=m" (*uaddr),   \
+ "=&r" (tem)   \
+   : "r" (oparg), "i" (-EFAULT), "m" (*uaddr), "1" (0))
+
 static inline int
 futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
 {
@@ -95,6 +128,60 @@ futex_atomic_op_inuser (int encoded_op, 
 }
 
 static inline int
+futex_atomic_op_inuser64 (int encoded_op, u64 __user *uaddr)
+{
+   int op = (encoded_op >> 28) & 7;
+   int cmp = (encoded_op >> 24) & 15;
+   u64 oparg = (encoded_op << 8) >> 20;
+   u64 cmparg = (encoded_op << 20) >> 20;
+   u64 oldval = 0, ret, tem;
+
+   if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
+   oparg = 1 << oparg;
+
+   if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u64)))
+   return -EFAULT;
+
+   inc_preempt_count();
+
+   switch (op) {
+   case FUTEX_OP_SET:
+   __futex_atomic_op1_64("xchgq %0, %2", ret, oldval, uaddr, 
oparg);
+   break;
+   case FUTEX_OP_ADD:
+   __futex_atomic_op1_64(LOCK_PREFIX "xaddq %0, %2", ret, oldval,
+  uaddr, oparg);
+   break;
+   case FUTEX_OP_OR:
+   __futex_atomic_op2_64("orq %4, %3", ret, oldval, uaddr, oparg);
+   break;
+   case FUTEX_OP_ANDN:
+   __futex_atomic_op2_64("andq %4, %3", ret, oldval, uaddr, 
~oparg);
+   break;
+   case FUTEX_OP_XOR:
+   __futex_atomic_op2_64("xorq %4, %3", ret, oldval, uaddr, oparg);
+   break;
+   default:
+   ret = -ENOSYS;
+   }
+
+   dec_preempt_count();
+
+   if (!ret) {
+   switch (cmp) {
+   case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
+   case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
+   case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
+   case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
+   case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
+   case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
+   default: ret = -ENOSYS;
+   }
+   }
+   return ret;
+}
+
+static inline int
 futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
 {
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
@@ -121,5 +208,31 @@ futex_atomic_cmpxchg_inatomic(int __user
return oldval;
 }
 
+static inline u64
+futex_atomic_cmpxchg_inatomic64(u64 __user *uaddr, u64 oldval, u64 newval)
+{
+   if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u64)))
+   return -EFAULT;
+
+   __asm__ __volatile__(
+   "1: " LOCK_PREFIX "cmpx