kernel-ark/include/asm-s390/futex.h
Heiko Carstens d8ad075ef6 [S390] don't call handle_mm_fault() if in an atomic context.
There are several places in the futex code where a spin_lock is held
and still uaccesses happen. Deadlocks are avoided by increasing the
preempt count. The pagefault handler will then not take any locks
but will immediately search the fixup tables.

Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
2007-01-09 10:18:50 +01:00

53 lines
1.3 KiB
C

#ifndef _ASM_S390_FUTEX_H
#define _ASM_S390_FUTEX_H
#ifdef __KERNEL__
#include <linux/futex.h>
#include <linux/uaccess.h>
#include <asm/errno.h>
static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
{
int op = (encoded_op >> 28) & 7;
int cmp = (encoded_op >> 24) & 15;
int oparg = (encoded_op << 8) >> 20;
int cmparg = (encoded_op << 20) >> 20;
int oldval, ret;
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
oparg = 1 << oparg;
if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
return -EFAULT;
pagefault_disable();
ret = uaccess.futex_atomic_op(op, uaddr, oparg, &oldval);
pagefault_enable();
if (!ret) {
switch (cmp) {
case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
default: ret = -ENOSYS;
}
}
return ret;
}
static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr,
int oldval, int newval)
{
if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
return -EFAULT;
return uaccess.futex_atomic_cmpxchg(uaddr, oldval, newval);
}
#endif /* __KERNEL__ */
#endif /* _ASM_S390_FUTEX_H */