sched: normalize_rt_tasks(): Don't use _irqsave for tasklist_lock, use task_rq_lock()

1. read_lock(tasklist_lock) does not need to disable irqs.

2. ->mm != NULL is a common mistake, use PF_KTHREAD.

3. The second ->mm check can be simply removed.

4. task_rq_lock() looks better than raw_spin_lock(&p->pi_lock) +
   __task_rq_lock().

Signed-off-by: Oleg Nesterov <oleg@redhat.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Kirill Tkhai <tkhai@yandex.ru>
Cc: Mike Galbraith <umgwanakikbuti@gmail.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Link: http://lkml.kernel.org/r/20140921193338.GA28621@redhat.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Oleg Nesterov 2014-09-21 21:33:38 +02:00 коммит произвёл Ingo Molnar
Родитель 8651c65844
Коммит 3472eaa1f1
1 изменённых файлов: 6 добавлений и 10 удалений

Просмотреть файл

@ -7220,12 +7220,12 @@ void normalize_rt_tasks(void)
unsigned long flags;
struct rq *rq;
read_lock_irqsave(&tasklist_lock, flags);
read_lock(&tasklist_lock);
for_each_process_thread(g, p) {
/*
* Only normalize user tasks:
*/
if (!p->mm)
if (p->flags & PF_KTHREAD)
continue;
p->se.exec_start = 0;
@ -7240,20 +7240,16 @@ void normalize_rt_tasks(void)
* Renice negative nice level userspace
* tasks back to 0:
*/
if (task_nice(p) < 0 && p->mm)
if (task_nice(p) < 0)
set_user_nice(p, 0);
continue;
}
raw_spin_lock(&p->pi_lock);
rq = __task_rq_lock(p);
rq = task_rq_lock(p, &flags);
normalize_task(rq, p);
__task_rq_unlock(rq);
raw_spin_unlock(&p->pi_lock);
task_rq_unlock(rq, p, &flags);
}
read_unlock_irqrestore(&tasklist_lock, flags);
read_unlock(&tasklist_lock);
}
#endif /* CONFIG_MAGIC_SYSRQ */