This last patch is an adaptation of the sys_futex64 syscall provided in -rt
patch (originally written by Ingo). It allows the use of 64bit futex.
I have re-worked most of the code to avoid the duplication of the code.
It does not provide the functionality for all architectures (only for x64 for
now).
Signed-off-by: Pierre Peiffer <[EMAIL PROTECTED]>
---
include/asm-x86_64/futex.h | 113
include/asm-x86_64/unistd.h |4
include/linux/futex.h |7 -
include/linux/syscalls.h|3
kernel/futex.c | 248 +++-
kernel/futex_compat.c |3
kernel/sys_ni.c |1
7 files changed, 301 insertions(+), 78 deletions(-)
Index: b/include/asm-x86_64/futex.h
===
--- a/include/asm-x86_64/futex.h
+++ b/include/asm-x86_64/futex.h
@@ -41,6 +41,39 @@
"=&r" (tem) \
: "r" (oparg), "i" (-EFAULT), "m" (*uaddr), "1" (0))
+#define __futex_atomic_op1_64(insn, ret, oldval, uaddr, oparg) \
+ __asm__ __volatile ( \
+"1:" insn "\n" \
+"2:.section .fixup,\"ax\"\n\
+3: movq%3, %1\n\
+ jmp 2b\n\
+ .previous\n\
+ .section __ex_table,\"a\"\n\
+ .align 8\n\
+ .quad 1b,3b\n\
+ .previous" \
+ : "=r" (oldval), "=r" (ret), "=m" (*uaddr) \
+ : "i" (-EFAULT), "m" (*uaddr), "0" (oparg), "1" (0))
+
+#define __futex_atomic_op2_64(insn, ret, oldval, uaddr, oparg) \
+ __asm__ __volatile ( \
+"1:movq%2, %0\n\
+ movq%0, %3\n" \
+ insn "\n" \
+"2:" LOCK_PREFIX "cmpxchgq %3, %2\n\
+ jnz 1b\n\
+3: .section .fixup,\"ax\"\n\
+4: movq%5, %1\n\
+ jmp 3b\n\
+ .previous\n\
+ .section __ex_table,\"a\"\n\
+ .align 8\n\
+ .quad 1b,4b,2b,4b\n\
+ .previous" \
+ : "=&a" (oldval), "=&r" (ret), "=m" (*uaddr), \
+ "=&r" (tem) \
+ : "r" (oparg), "i" (-EFAULT), "m" (*uaddr), "1" (0))
+
static inline int
futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
{
@@ -95,6 +128,60 @@ futex_atomic_op_inuser (int encoded_op,
}
static inline int
+futex_atomic_op_inuser64 (int encoded_op, u64 __user *uaddr)
+{
+ int op = (encoded_op >> 28) & 7;
+ int cmp = (encoded_op >> 24) & 15;
+ u64 oparg = (encoded_op << 8) >> 20;
+ u64 cmparg = (encoded_op << 20) >> 20;
+ u64 oldval = 0, ret, tem;
+
+ if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
+ oparg = 1 << oparg;
+
+ if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u64)))
+ return -EFAULT;
+
+ inc_preempt_count();
+
+ switch (op) {
+ case FUTEX_OP_SET:
+ __futex_atomic_op1_64("xchgq %0, %2", ret, oldval, uaddr,
oparg);
+ break;
+ case FUTEX_OP_ADD:
+ __futex_atomic_op1_64(LOCK_PREFIX "xaddq %0, %2", ret, oldval,
+ uaddr, oparg);
+ break;
+ case FUTEX_OP_OR:
+ __futex_atomic_op2_64("orq %4, %3", ret, oldval, uaddr, oparg);
+ break;
+ case FUTEX_OP_ANDN:
+ __futex_atomic_op2_64("andq %4, %3", ret, oldval, uaddr,
~oparg);
+ break;
+ case FUTEX_OP_XOR:
+ __futex_atomic_op2_64("xorq %4, %3", ret, oldval, uaddr, oparg);
+ break;
+ default:
+ ret = -ENOSYS;
+ }
+
+ dec_preempt_count();
+
+ if (!ret) {
+ switch (cmp) {
+ case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
+ case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
+ case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
+ case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
+ case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
+ case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
+ default: ret = -ENOSYS;
+ }
+ }
+ return ret;
+}
+
+static inline int
futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
{
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
@@ -121,5 +208,31 @@ futex_atomic_cmpxchg_inatomic(int __user
return oldval;
}
+static inline u64
+futex_atomic_cmpxchg_inatomic64(u64 __user *uaddr, u64 oldval, u64 newval)
+{
+ if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u64)))
+ return -EFAULT;
+
+ __asm__ __volatile__(
+ "1: " LOCK_PREFIX "cmpx