cgroup: simplify double-check locking in cgroup_attach_proc
To keep the complexity of the double-check locking in one place, move the thread_group_leader check up into attach_task_by_pid(). This allows us to use a goto instead of returning -EAGAIN. While at it, convert a couple of returns to gotos and use rcu for the !pid case also in order to simplify the logic. Changes in V2: * https://lkml.org/lkml/2011/12/22/86 (Tejun Heo) * Use a goto instead of returning -EAGAIN Signed-off-by: Mandeep Singh Baines <msb@chromium.org> Acked-by: Li Zefan <lizf@cn.fujitsu.com> Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: containers@lists.linux-foundation.org Cc: cgroups@vger.kernel.org Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Paul Menage <paul@paulmenage.org>
This commit is contained in:
Родитель
245282557c
Коммит
b78949ebfb
|
@ -2104,19 +2104,6 @@ static int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
|
|||
|
||||
/* prevent changes to the threadgroup list while we take a snapshot. */
|
||||
read_lock(&tasklist_lock);
|
||||
if (!thread_group_leader(leader)) {
|
||||
/*
|
||||
* a race with de_thread from another thread's exec() may strip
|
||||
* us of our leadership, making while_each_thread unsafe to use
|
||||
* on this task. if this happens, there is no choice but to
|
||||
* throw this task away and try again (from cgroup_procs_write);
|
||||
* this is "double-double-toil-and-trouble-check locking".
|
||||
*/
|
||||
read_unlock(&tasklist_lock);
|
||||
retval = -EAGAIN;
|
||||
goto out_free_group_list;
|
||||
}
|
||||
|
||||
tsk = leader;
|
||||
i = 0;
|
||||
do {
|
||||
|
@ -2245,22 +2232,14 @@ static int attach_task_by_pid(struct cgroup *cgrp, u64 pid, bool threadgroup)
|
|||
if (!cgroup_lock_live_group(cgrp))
|
||||
return -ENODEV;
|
||||
|
||||
retry_find_task:
|
||||
rcu_read_lock();
|
||||
if (pid) {
|
||||
rcu_read_lock();
|
||||
tsk = find_task_by_vpid(pid);
|
||||
if (!tsk) {
|
||||
rcu_read_unlock();
|
||||
cgroup_unlock();
|
||||
return -ESRCH;
|
||||
}
|
||||
if (threadgroup) {
|
||||
/*
|
||||
* RCU protects this access, since tsk was found in the
|
||||
* tid map. a race with de_thread may cause group_leader
|
||||
* to stop being the leader, but cgroup_attach_proc will
|
||||
* detect it later.
|
||||
*/
|
||||
tsk = tsk->group_leader;
|
||||
ret= -ESRCH;
|
||||
goto out_unlock_cgroup;
|
||||
}
|
||||
/*
|
||||
* even if we're attaching all tasks in the thread group, we
|
||||
|
@ -2271,29 +2250,38 @@ static int attach_task_by_pid(struct cgroup *cgrp, u64 pid, bool threadgroup)
|
|||
cred->euid != tcred->uid &&
|
||||
cred->euid != tcred->suid) {
|
||||
rcu_read_unlock();
|
||||
cgroup_unlock();
|
||||
return -EACCES;
|
||||
ret = -EACCES;
|
||||
goto out_unlock_cgroup;
|
||||
}
|
||||
get_task_struct(tsk);
|
||||
rcu_read_unlock();
|
||||
} else {
|
||||
if (threadgroup)
|
||||
tsk = current->group_leader;
|
||||
else
|
||||
tsk = current;
|
||||
get_task_struct(tsk);
|
||||
}
|
||||
|
||||
threadgroup_lock(tsk);
|
||||
} else
|
||||
tsk = current;
|
||||
|
||||
if (threadgroup)
|
||||
ret = cgroup_attach_proc(cgrp, tsk);
|
||||
else
|
||||
ret = cgroup_attach_task(cgrp, tsk);
|
||||
tsk = tsk->group_leader;
|
||||
get_task_struct(tsk);
|
||||
rcu_read_unlock();
|
||||
|
||||
threadgroup_lock(tsk);
|
||||
if (threadgroup) {
|
||||
if (!thread_group_leader(tsk)) {
|
||||
/*
|
||||
* a race with de_thread from another thread's exec()
|
||||
* may strip us of our leadership, if this happens,
|
||||
* there is no choice but to throw this task away and
|
||||
* try again; this is
|
||||
* "double-double-toil-and-trouble-check locking".
|
||||
*/
|
||||
threadgroup_unlock(tsk);
|
||||
put_task_struct(tsk);
|
||||
goto retry_find_task;
|
||||
}
|
||||
ret = cgroup_attach_proc(cgrp, tsk);
|
||||
} else
|
||||
ret = cgroup_attach_task(cgrp, tsk);
|
||||
threadgroup_unlock(tsk);
|
||||
|
||||
put_task_struct(tsk);
|
||||
out_unlock_cgroup:
|
||||
cgroup_unlock();
|
||||
return ret;
|
||||
}
|
||||
|
@ -2305,16 +2293,7 @@ static int cgroup_tasks_write(struct cgroup *cgrp, struct cftype *cft, u64 pid)
|
|||
|
||||
static int cgroup_procs_write(struct cgroup *cgrp, struct cftype *cft, u64 tgid)
|
||||
{
|
||||
int ret;
|
||||
do {
|
||||
/*
|
||||
* attach_proc fails with -EAGAIN if threadgroup leadership
|
||||
* changes in the middle of the operation, in which case we need
|
||||
* to find the task_struct for the new leader and start over.
|
||||
*/
|
||||
ret = attach_task_by_pid(cgrp, tgid, true);
|
||||
} while (ret == -EAGAIN);
|
||||
return ret;
|
||||
return attach_task_by_pid(cgrp, tgid, true);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
Загрузка…
Ссылка в новой задаче