sched: Fix race in task_group()

Stefan reported a crash on a kernel before a3e5d1091c ("sched:
Don't call task_group() too many times in set_task_rq()"), he
found the reason to be that the multiple task_group()
invocations in set_task_rq() returned different values.

Looking at all that I found a lack of serialization and plain
wrong comments.

The below tries to fix it using an extra pointer which is
updated under the appropriate scheduler locks. Its not pretty,
but I can't really see another way given how all the cgroup
stuff works.

Reported-and-tested-by: Stefan Bader <stefan.bader@canonical.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/r/1340364965.18025.71.camel@twins
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Peter Zijlstra 2012-06-22 13:36:05 +02:00 коммит произвёл Ingo Molnar
Родитель 88b8dac0a1
Коммит 8323f26ce3
4 изменённых файлов: 33 добавлений и 16 удалений

Просмотреть файл

@ -123,6 +123,15 @@ extern struct group_info init_groups;
extern struct cred init_cred; extern struct cred init_cred;
extern struct task_group root_task_group;
#ifdef CONFIG_CGROUP_SCHED
# define INIT_CGROUP_SCHED(tsk) \
.sched_task_group = &root_task_group,
#else
# define INIT_CGROUP_SCHED(tsk)
#endif
#ifdef CONFIG_PERF_EVENTS #ifdef CONFIG_PERF_EVENTS
# define INIT_PERF_EVENTS(tsk) \ # define INIT_PERF_EVENTS(tsk) \
.perf_event_mutex = \ .perf_event_mutex = \
@ -161,6 +170,7 @@ extern struct cred init_cred;
}, \ }, \
.tasks = LIST_HEAD_INIT(tsk.tasks), \ .tasks = LIST_HEAD_INIT(tsk.tasks), \
INIT_PUSHABLE_TASKS(tsk) \ INIT_PUSHABLE_TASKS(tsk) \
INIT_CGROUP_SCHED(tsk) \
.ptraced = LIST_HEAD_INIT(tsk.ptraced), \ .ptraced = LIST_HEAD_INIT(tsk.ptraced), \
.ptrace_entry = LIST_HEAD_INIT(tsk.ptrace_entry), \ .ptrace_entry = LIST_HEAD_INIT(tsk.ptrace_entry), \
.real_parent = &tsk, \ .real_parent = &tsk, \

Просмотреть файл

@ -1245,6 +1245,9 @@ struct task_struct {
const struct sched_class *sched_class; const struct sched_class *sched_class;
struct sched_entity se; struct sched_entity se;
struct sched_rt_entity rt; struct sched_rt_entity rt;
#ifdef CONFIG_CGROUP_SCHED
struct task_group *sched_task_group;
#endif
#ifdef CONFIG_PREEMPT_NOTIFIERS #ifdef CONFIG_PREEMPT_NOTIFIERS
/* list of struct preempt_notifier: */ /* list of struct preempt_notifier: */
@ -2724,7 +2727,7 @@ extern int sched_group_set_rt_period(struct task_group *tg,
extern long sched_group_rt_period(struct task_group *tg); extern long sched_group_rt_period(struct task_group *tg);
extern int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk); extern int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk);
#endif #endif
#endif #endif /* CONFIG_CGROUP_SCHED */
extern int task_can_switch_user(struct user_struct *up, extern int task_can_switch_user(struct user_struct *up,
struct task_struct *tsk); struct task_struct *tsk);

Просмотреть файл

@ -1096,7 +1096,7 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
* a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks. * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks.
* *
* sched_move_task() holds both and thus holding either pins the cgroup, * sched_move_task() holds both and thus holding either pins the cgroup,
* see set_task_rq(). * see task_group().
* *
* Furthermore, all task_rq users should acquire both locks, see * Furthermore, all task_rq users should acquire both locks, see
* task_rq_lock(). * task_rq_lock().
@ -7658,6 +7658,7 @@ void sched_destroy_group(struct task_group *tg)
*/ */
void sched_move_task(struct task_struct *tsk) void sched_move_task(struct task_struct *tsk)
{ {
struct task_group *tg;
int on_rq, running; int on_rq, running;
unsigned long flags; unsigned long flags;
struct rq *rq; struct rq *rq;
@ -7672,6 +7673,12 @@ void sched_move_task(struct task_struct *tsk)
if (unlikely(running)) if (unlikely(running))
tsk->sched_class->put_prev_task(rq, tsk); tsk->sched_class->put_prev_task(rq, tsk);
tg = container_of(task_subsys_state_check(tsk, cpu_cgroup_subsys_id,
lockdep_is_held(&tsk->sighand->siglock)),
struct task_group, css);
tg = autogroup_task_group(tsk, tg);
tsk->sched_task_group = tg;
#ifdef CONFIG_FAIR_GROUP_SCHED #ifdef CONFIG_FAIR_GROUP_SCHED
if (tsk->sched_class->task_move_group) if (tsk->sched_class->task_move_group)
tsk->sched_class->task_move_group(tsk, on_rq); tsk->sched_class->task_move_group(tsk, on_rq);

Просмотреть файл

@ -538,22 +538,19 @@ extern int group_balance_cpu(struct sched_group *sg);
/* /*
* Return the group to which this tasks belongs. * Return the group to which this tasks belongs.
* *
* We use task_subsys_state_check() and extend the RCU verification with * We cannot use task_subsys_state() and friends because the cgroup
* pi->lock and rq->lock because cpu_cgroup_attach() holds those locks for each * subsystem changes that value before the cgroup_subsys::attach() method
* task it moves into the cgroup. Therefore by holding either of those locks, * is called, therefore we cannot pin it and might observe the wrong value.
* we pin the task to the current cgroup. *
* The same is true for autogroup's p->signal->autogroup->tg, the autogroup
* core changes this before calling sched_move_task().
*
* Instead we use a 'copy' which is updated from sched_move_task() while
* holding both task_struct::pi_lock and rq::lock.
*/ */
static inline struct task_group *task_group(struct task_struct *p) static inline struct task_group *task_group(struct task_struct *p)
{ {
struct task_group *tg; return p->sched_task_group;
struct cgroup_subsys_state *css;
css = task_subsys_state_check(p, cpu_cgroup_subsys_id,
lockdep_is_held(&p->pi_lock) ||
lockdep_is_held(&task_rq(p)->lock));
tg = container_of(css, struct task_group, css);
return autogroup_task_group(p, tg);
} }
/* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */ /* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */