move exit_task_work() past exit_files() et.al.
... and get rid of PF_EXITING check in task_work_add(). Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
This commit is contained in:
Родитель
67d1214551
Коммит
ed3e694d78
|
@ -953,14 +953,11 @@ void do_exit(long code)
|
||||||
exit_signals(tsk); /* sets PF_EXITING */
|
exit_signals(tsk); /* sets PF_EXITING */
|
||||||
/*
|
/*
|
||||||
* tsk->flags are checked in the futex code to protect against
|
* tsk->flags are checked in the futex code to protect against
|
||||||
* an exiting task cleaning up the robust pi futexes, and in
|
* an exiting task cleaning up the robust pi futexes.
|
||||||
* task_work_add() to avoid the race with exit_task_work().
|
|
||||||
*/
|
*/
|
||||||
smp_mb();
|
smp_mb();
|
||||||
raw_spin_unlock_wait(&tsk->pi_lock);
|
raw_spin_unlock_wait(&tsk->pi_lock);
|
||||||
|
|
||||||
exit_task_work(tsk);
|
|
||||||
|
|
||||||
if (unlikely(in_atomic()))
|
if (unlikely(in_atomic()))
|
||||||
printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n",
|
printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n",
|
||||||
current->comm, task_pid_nr(current),
|
current->comm, task_pid_nr(current),
|
||||||
|
@ -995,6 +992,7 @@ void do_exit(long code)
|
||||||
exit_shm(tsk);
|
exit_shm(tsk);
|
||||||
exit_files(tsk);
|
exit_files(tsk);
|
||||||
exit_fs(tsk);
|
exit_fs(tsk);
|
||||||
|
exit_task_work(tsk);
|
||||||
check_stack_usage();
|
check_stack_usage();
|
||||||
exit_thread();
|
exit_thread();
|
||||||
|
|
||||||
|
|
|
@ -5,34 +5,26 @@
|
||||||
int
|
int
|
||||||
task_work_add(struct task_struct *task, struct callback_head *twork, bool notify)
|
task_work_add(struct task_struct *task, struct callback_head *twork, bool notify)
|
||||||
{
|
{
|
||||||
|
struct callback_head *last, *first;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
int err = -ESRCH;
|
|
||||||
|
|
||||||
#ifndef TIF_NOTIFY_RESUME
|
|
||||||
if (notify)
|
|
||||||
return -ENOTSUPP;
|
|
||||||
#endif
|
|
||||||
/*
|
/*
|
||||||
* We must not insert the new work if the task has already passed
|
* Not inserting the new work if the task has already passed
|
||||||
* exit_task_work(). We rely on do_exit()->raw_spin_unlock_wait()
|
* exit_task_work() is the responisbility of callers.
|
||||||
* and check PF_EXITING under pi_lock.
|
|
||||||
*/
|
*/
|
||||||
raw_spin_lock_irqsave(&task->pi_lock, flags);
|
raw_spin_lock_irqsave(&task->pi_lock, flags);
|
||||||
if (likely(!(task->flags & PF_EXITING))) {
|
last = task->task_works;
|
||||||
struct callback_head *last = task->task_works;
|
first = last ? last->next : twork;
|
||||||
struct callback_head *first = last ? last->next : twork;
|
|
||||||
twork->next = first;
|
twork->next = first;
|
||||||
if (last)
|
if (last)
|
||||||
last->next = twork;
|
last->next = twork;
|
||||||
task->task_works = twork;
|
task->task_works = twork;
|
||||||
err = 0;
|
|
||||||
}
|
|
||||||
raw_spin_unlock_irqrestore(&task->pi_lock, flags);
|
raw_spin_unlock_irqrestore(&task->pi_lock, flags);
|
||||||
|
|
||||||
/* test_and_set_bit() implies mb(), see tracehook_notify_resume(). */
|
/* test_and_set_bit() implies mb(), see tracehook_notify_resume(). */
|
||||||
if (likely(!err) && notify)
|
if (notify)
|
||||||
set_notify_resume(task);
|
set_notify_resume(task);
|
||||||
return err;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct callback_head *
|
struct callback_head *
|
||||||
|
|
Загрузка…
Ссылка в новой задаче