summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/futex.c38
-rw-r--r--kernel/futex_compat.c9
2 files changed, 43 insertions, 4 deletions
diff --git a/kernel/futex.c b/kernel/futex.c
index c21f667c63f6..06968cd79200 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -60,6 +60,8 @@
#include "rtmutex_common.h"
+int __read_mostly futex_cmpxchg_enabled;
+
#define FUTEX_HASHBITS (CONFIG_BASE_SMALL ? 4 : 8)
/*
@@ -469,6 +471,8 @@ void exit_pi_state_list(struct task_struct *curr)
struct futex_hash_bucket *hb;
union futex_key key;
+ if (!futex_cmpxchg_enabled)
+ return;
/*
* We are a ZOMBIE and nobody can enqueue itself on
* pi_state_list anymore, but we have to be careful
@@ -1870,6 +1874,8 @@ asmlinkage long
sys_set_robust_list(struct robust_list_head __user *head,
size_t len)
{
+ if (!futex_cmpxchg_enabled)
+ return -ENOSYS;
/*
* The kernel knows only one size for now:
*/
@@ -1894,6 +1900,9 @@ sys_get_robust_list(int pid, struct robust_list_head __user * __user *head_ptr,
struct robust_list_head __user *head;
unsigned long ret;
+ if (!futex_cmpxchg_enabled)
+ return -ENOSYS;
+
if (!pid)
head = current->robust_list;
else {
@@ -1997,6 +2006,9 @@ void exit_robust_list(struct task_struct *curr)
unsigned long futex_offset;
int rc;
+ if (!futex_cmpxchg_enabled)
+ return;
+
/*
* Fetch the list head (which was registered earlier, via
* sys_set_robust_list()):
@@ -2051,7 +2063,7 @@ void exit_robust_list(struct task_struct *curr)
long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
u32 __user *uaddr2, u32 val2, u32 val3)
{
- int ret;
+ int ret = -ENOSYS;
int cmd = op & FUTEX_CMD_MASK;
struct rw_semaphore *fshared = NULL;
@@ -2083,13 +2095,16 @@ long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
ret = futex_wake_op(uaddr, fshared, uaddr2, val, val2, val3);
break;
case FUTEX_LOCK_PI:
- ret = futex_lock_pi(uaddr, fshared, val, timeout, 0);
+ if (futex_cmpxchg_enabled)
+ ret = futex_lock_pi(uaddr, fshared, val, timeout, 0);
break;
case FUTEX_UNLOCK_PI:
- ret = futex_unlock_pi(uaddr, fshared);
+ if (futex_cmpxchg_enabled)
+ ret = futex_unlock_pi(uaddr, fshared);
break;
case FUTEX_TRYLOCK_PI:
- ret = futex_lock_pi(uaddr, fshared, 0, timeout, 1);
+ if (futex_cmpxchg_enabled)
+ ret = futex_lock_pi(uaddr, fshared, 0, timeout, 1);
break;
default:
ret = -ENOSYS;
@@ -2145,8 +2160,23 @@ static struct file_system_type futex_fs_type = {
static int __init init(void)
{
+ u32 curval;
int i;
+ /*
+ * This will fail and we want it. Some arch implementations do
+ * runtime detection of the futex_atomic_cmpxchg_inatomic()
+ * functionality. We want to know that before we call in any
+ * of the complex code paths. Also we want to prevent
+ * registration of robust lists in that case. NULL is
+ * guaranteed to fault and we get -EFAULT on functional
+ * implementation, the non functional ones will return
+ * -ENOSYS.
+ */
+ curval = cmpxchg_futex_value_locked(NULL, 0, 0);
+ if (curval == -EFAULT)
+ futex_cmpxchg_enabled = 1;
+
for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
plist_head_init(&futex_queues[i].chain, &futex_queues[i].lock);
spin_lock_init(&futex_queues[i].lock);
diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
index 7d5e4b016f39..ff90f049f8f6 100644
--- a/kernel/futex_compat.c
+++ b/kernel/futex_compat.c
@@ -54,6 +54,9 @@ void compat_exit_robust_list(struct task_struct *curr)
compat_long_t futex_offset;
int rc;
+ if (!futex_cmpxchg_enabled)
+ return;
+
/*
* Fetch the list head (which was registered earlier, via
* sys_set_robust_list()):
@@ -115,6 +118,9 @@ asmlinkage long
compat_sys_set_robust_list(struct compat_robust_list_head __user *head,
compat_size_t len)
{
+ if (!futex_cmpxchg_enabled)
+ return -ENOSYS;
+
if (unlikely(len != sizeof(*head)))
return -EINVAL;
@@ -130,6 +136,9 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
struct compat_robust_list_head __user *head;
unsigned long ret;
+ if (!futex_cmpxchg_enabled)
+ return -ENOSYS;
+
if (!pid)
head = current->compat_robust_list;
else {