[PATCH] lightweight robust futexes updates
- fix: initialize the robust list(s) to NULL in copy_process. - doc update - cleanup: rename _inuser to _inatomic - __user cleanups and other small cleanups Signed-off-by: Ingo Molnar <mingo@elte.hu> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Arjan van de Ven <arjan@infradead.org> Cc: Ulrich Drepper <drepper@redhat.com> Cc: Andi Kleen <ak@muc.de> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
Родитель
8fdd6c6df7
Коммит
8f17d3a504
|
@ -142,8 +142,6 @@ On insertion:
|
|||
of the 'lock word', to the linked list starting at 'head', and
|
||||
4) clear the 'list_op_pending' word.
|
||||
|
||||
XXX I am particularly unsure of the following -pj XXX
|
||||
|
||||
On removal:
|
||||
1) set the 'list_op_pending' word to the address of the 'lock word'
|
||||
to be removed,
|
||||
|
|
|
@ -213,6 +213,6 @@ robust-mutex testcases.
|
|||
All other architectures should build just fine too - but they wont have
|
||||
the new syscalls yet.
|
||||
|
||||
Architectures need to implement the new futex_atomic_cmpxchg_inuser()
|
||||
Architectures need to implement the new futex_atomic_cmpxchg_inatomic()
|
||||
inline function before writing up the syscalls (that function returns
|
||||
-ENOSYS right now).
|
||||
|
|
|
@ -10,7 +10,7 @@
|
|||
extern int futex_atomic_op_inuser(int encoded_op, int __user *uaddr);
|
||||
|
||||
static inline int
|
||||
futex_atomic_cmpxchg_inuser(int __user *uaddr, int oldval, int newval)
|
||||
futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
|
||||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
|
|
@ -50,7 +50,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
|
|||
}
|
||||
|
||||
static inline int
|
||||
futex_atomic_cmpxchg_inuser(int __user *uaddr, int oldval, int newval)
|
||||
futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
|
||||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
|
|
@ -105,7 +105,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
|
|||
}
|
||||
|
||||
static inline int
|
||||
futex_atomic_cmpxchg_inuser(int __user *uaddr, int oldval, int newval)
|
||||
futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
|
||||
{
|
||||
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
|
||||
return -EFAULT;
|
||||
|
|
|
@ -100,7 +100,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
|
|||
}
|
||||
|
||||
static inline int
|
||||
futex_atomic_cmpxchg_inuser(int __user *uaddr, int oldval, int newval)
|
||||
futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
|
||||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
|
|
@ -82,7 +82,7 @@ static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
|
|||
}
|
||||
|
||||
static inline int
|
||||
futex_atomic_cmpxchg_inuser(int __user *uaddr, int oldval, int newval)
|
||||
futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
|
||||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
|
|
@ -95,7 +95,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
|
|||
}
|
||||
|
||||
static inline int
|
||||
futex_atomic_cmpxchg_inuser(int __user *uaddr, int oldval, int newval)
|
||||
futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
|
||||
{
|
||||
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
|
||||
return -EFAULT;
|
||||
|
|
|
@ -100,7 +100,7 @@ long do_futex(unsigned long uaddr, int op, int val,
|
|||
unsigned long timeout, unsigned long uaddr2, int val2,
|
||||
int val3);
|
||||
|
||||
extern int handle_futex_death(unsigned int *uaddr, struct task_struct *curr);
|
||||
extern int handle_futex_death(u32 __user *uaddr, struct task_struct *curr);
|
||||
|
||||
#ifdef CONFIG_FUTEX
|
||||
extern void exit_robust_list(struct task_struct *curr);
|
||||
|
|
|
@ -1061,7 +1061,10 @@ static task_t *copy_process(unsigned long clone_flags,
|
|||
* Clear TID on mm_release()?
|
||||
*/
|
||||
p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr: NULL;
|
||||
|
||||
p->robust_list = NULL;
|
||||
#ifdef CONFIG_COMPAT
|
||||
p->compat_robust_list = NULL;
|
||||
#endif
|
||||
/*
|
||||
* sigaltstack should be cleared when sharing the same VM
|
||||
*/
|
||||
|
|
|
@ -913,15 +913,15 @@ err_unlock:
|
|||
* Process a futex-list entry, check whether it's owned by the
|
||||
* dying task, and do notification if so:
|
||||
*/
|
||||
int handle_futex_death(unsigned int *uaddr, struct task_struct *curr)
|
||||
int handle_futex_death(u32 __user *uaddr, struct task_struct *curr)
|
||||
{
|
||||
unsigned int futex_val;
|
||||
u32 uval;
|
||||
|
||||
repeat:
|
||||
if (get_user(futex_val, uaddr))
|
||||
retry:
|
||||
if (get_user(uval, uaddr))
|
||||
return -1;
|
||||
|
||||
if ((futex_val & FUTEX_TID_MASK) == curr->pid) {
|
||||
if ((uval & FUTEX_TID_MASK) == curr->pid) {
|
||||
/*
|
||||
* Ok, this dying thread is truly holding a futex
|
||||
* of interest. Set the OWNER_DIED bit atomically
|
||||
|
@ -932,12 +932,11 @@ repeat:
|
|||
* thread-death.) The rest of the cleanup is done in
|
||||
* userspace.
|
||||
*/
|
||||
if (futex_atomic_cmpxchg_inuser(uaddr, futex_val,
|
||||
futex_val | FUTEX_OWNER_DIED) !=
|
||||
futex_val)
|
||||
goto repeat;
|
||||
if (futex_atomic_cmpxchg_inatomic(uaddr, uval,
|
||||
uval | FUTEX_OWNER_DIED) != uval)
|
||||
goto retry;
|
||||
|
||||
if (futex_val & FUTEX_WAITERS)
|
||||
if (uval & FUTEX_WAITERS)
|
||||
futex_wake((unsigned long)uaddr, 1);
|
||||
}
|
||||
return 0;
|
||||
|
@ -985,7 +984,6 @@ void exit_robust_list(struct task_struct *curr)
|
|||
if (handle_futex_death((void *)entry + futex_offset,
|
||||
curr))
|
||||
return;
|
||||
|
||||
/*
|
||||
* Fetch the next entry in the list:
|
||||
*/
|
||||
|
|
|
@ -121,9 +121,9 @@ err_unlock:
|
|||
return ret;
|
||||
}
|
||||
|
||||
asmlinkage long compat_sys_futex(u32 __user *uaddr, int op, int val,
|
||||
asmlinkage long compat_sys_futex(u32 __user *uaddr, int op, u32 val,
|
||||
struct compat_timespec __user *utime, u32 __user *uaddr2,
|
||||
int val3)
|
||||
u32 val3)
|
||||
{
|
||||
struct timespec t;
|
||||
unsigned long timeout = MAX_SCHEDULE_TIMEOUT;
|
||||
|
@ -137,6 +137,5 @@ asmlinkage long compat_sys_futex(u32 __user *uaddr, int op, int val,
|
|||
if (op >= FUTEX_REQUEUE)
|
||||
val2 = (int) (unsigned long) utime;
|
||||
|
||||
return do_futex((unsigned long)uaddr, op, val, timeout,
|
||||
(unsigned long)uaddr2, val2, val3);
|
||||
return do_futex(uaddr, op, val, timeout, uaddr2, val2, val3);
|
||||
}
|
||||
|
|
Загрузка…
Ссылка в новой задаче