2011-10-25 12:00:11 +04:00
|
|
|
#include "sched.h"
|
|
|
|
|
2007-07-09 20:51:58 +04:00
|
|
|
/*
|
|
|
|
* idle-task scheduling class.
|
|
|
|
*
|
|
|
|
* (NOTE: these are not related to SCHED_IDLE tasks which are
|
2012-04-02 12:00:44 +04:00
|
|
|
* handled in sched/fair.c)
|
2007-07-09 20:51:58 +04:00
|
|
|
*/
|
|
|
|
|
2008-01-25 23:08:09 +03:00
|
|
|
#ifdef CONFIG_SMP
|
2010-03-24 20:34:10 +03:00
|
|
|
static int
|
2013-10-07 14:29:16 +04:00
|
|
|
select_task_rq_idle(struct task_struct *p, int cpu, int sd_flag, int flags)
|
2008-01-25 23:08:09 +03:00
|
|
|
{
|
|
|
|
return task_cpu(p); /* IDLE tasks as never migrated */
|
|
|
|
}
|
sched: Fix wrong rq's runnable_avg update with rt tasks
The current update of the rq's load can be erroneous when RT
tasks are involved.
The update of the load of a rq that becomes idle, is done only
if the avg_idle is less than sysctl_sched_migration_cost. If RT
tasks and short idle duration alternate, the runnable_avg will
not be updated correctly and the time will be accounted as idle
time when a CFS task wakes up.
A new idle_enter function is called when the next task is the
idle function so the elapsed time will be accounted as run time
in the load of the rq, whatever the average idle time is. The
function update_rq_runnable_avg is removed from idle_balance.
When a RT task is scheduled on an idle CPU, the update of the
rq's load is not done when the rq exit idle state because CFS's
functions are not called. Then, the idle_balance, which is
called just before entering the idle function, updates the rq's
load and makes the assumption that the elapsed time since the
last update, was only running time.
As a consequence, the rq's load of a CPU that only runs a
periodic RT task, is close to LOAD_AVG_MAX whatever the running
duration of the RT task is.
A new idle_exit function is called when the prev task is the
idle function so the elapsed time will be accounted as idle time
in the rq's load.
Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Acked-by: Steven Rostedt <rostedt@goodmis.org>
Cc: linaro-kernel@lists.linaro.org
Cc: peterz@infradead.org
Cc: pjt@google.com
Cc: fweisbec@gmail.com
Cc: efault@gmx.de
Link: http://lkml.kernel.org/r/1366302867-5055-1-git-send-email-vincent.guittot@linaro.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2013-04-18 20:34:26 +04:00
|
|
|
|
|
|
|
static void pre_schedule_idle(struct rq *rq, struct task_struct *prev)
|
|
|
|
{
|
|
|
|
idle_exit_fair(rq);
|
2013-05-03 05:39:05 +04:00
|
|
|
rq_last_tick_reset(rq);
|
sched: Fix wrong rq's runnable_avg update with rt tasks
The current update of the rq's load can be erroneous when RT
tasks are involved.
The update of the load of a rq that becomes idle, is done only
if the avg_idle is less than sysctl_sched_migration_cost. If RT
tasks and short idle duration alternate, the runnable_avg will
not be updated correctly and the time will be accounted as idle
time when a CFS task wakes up.
A new idle_enter function is called when the next task is the
idle function so the elapsed time will be accounted as run time
in the load of the rq, whatever the average idle time is. The
function update_rq_runnable_avg is removed from idle_balance.
When a RT task is scheduled on an idle CPU, the update of the
rq's load is not done when the rq exit idle state because CFS's
functions are not called. Then, the idle_balance, which is
called just before entering the idle function, updates the rq's
load and makes the assumption that the elapsed time since the
last update, was only running time.
As a consequence, the rq's load of a CPU that only runs a
periodic RT task, is close to LOAD_AVG_MAX whatever the running
duration of the RT task is.
A new idle_exit function is called when the prev task is the
idle function so the elapsed time will be accounted as idle time
in the rq's load.
Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Acked-by: Steven Rostedt <rostedt@goodmis.org>
Cc: linaro-kernel@lists.linaro.org
Cc: peterz@infradead.org
Cc: pjt@google.com
Cc: fweisbec@gmail.com
Cc: efault@gmx.de
Link: http://lkml.kernel.org/r/1366302867-5055-1-git-send-email-vincent.guittot@linaro.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2013-04-18 20:34:26 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
static void post_schedule_idle(struct rq *rq)
|
|
|
|
{
|
|
|
|
idle_enter_fair(rq);
|
|
|
|
}
|
2008-01-25 23:08:09 +03:00
|
|
|
#endif /* CONFIG_SMP */
|
2007-07-09 20:51:58 +04:00
|
|
|
/*
|
|
|
|
* Idle tasks are unconditionally rescheduled:
|
|
|
|
*/
|
2009-09-14 21:55:44 +04:00
|
|
|
static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int flags)
|
2007-07-09 20:51:58 +04:00
|
|
|
{
|
|
|
|
resched_task(rq->idle);
|
|
|
|
}
|
|
|
|
|
2007-08-09 13:16:48 +04:00
|
|
|
static struct task_struct *pick_next_task_idle(struct rq *rq)
|
2007-07-09 20:51:58 +04:00
|
|
|
{
|
|
|
|
schedstat_inc(rq, sched_goidle);
|
sched: Fix wrong rq's runnable_avg update with rt tasks
The current update of the rq's load can be erroneous when RT
tasks are involved.
The update of the load of a rq that becomes idle, is done only
if the avg_idle is less than sysctl_sched_migration_cost. If RT
tasks and short idle duration alternate, the runnable_avg will
not be updated correctly and the time will be accounted as idle
time when a CFS task wakes up.
A new idle_enter function is called when the next task is the
idle function so the elapsed time will be accounted as run time
in the load of the rq, whatever the average idle time is. The
function update_rq_runnable_avg is removed from idle_balance.
When a RT task is scheduled on an idle CPU, the update of the
rq's load is not done when the rq exit idle state because CFS's
functions are not called. Then, the idle_balance, which is
called just before entering the idle function, updates the rq's
load and makes the assumption that the elapsed time since the
last update, was only running time.
As a consequence, the rq's load of a CPU that only runs a
periodic RT task, is close to LOAD_AVG_MAX whatever the running
duration of the RT task is.
A new idle_exit function is called when the prev task is the
idle function so the elapsed time will be accounted as idle time
in the rq's load.
Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Acked-by: Steven Rostedt <rostedt@goodmis.org>
Cc: linaro-kernel@lists.linaro.org
Cc: peterz@infradead.org
Cc: pjt@google.com
Cc: fweisbec@gmail.com
Cc: efault@gmx.de
Link: http://lkml.kernel.org/r/1366302867-5055-1-git-send-email-vincent.guittot@linaro.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2013-04-18 20:34:26 +04:00
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
/* Trigger the post schedule to do an idle_enter for CFS */
|
|
|
|
rq->post_schedule = 1;
|
|
|
|
#endif
|
2007-07-09 20:51:58 +04:00
|
|
|
return rq->idle;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* It is not legal to sleep in the idle task - print a warning
|
|
|
|
* message if some code attempts to do it:
|
|
|
|
*/
|
|
|
|
static void
|
2010-03-24 18:38:48 +03:00
|
|
|
dequeue_task_idle(struct rq *rq, struct task_struct *p, int flags)
|
2007-07-09 20:51:58 +04:00
|
|
|
{
|
2009-11-17 16:28:38 +03:00
|
|
|
raw_spin_unlock_irq(&rq->lock);
|
2009-12-20 16:23:57 +03:00
|
|
|
printk(KERN_ERR "bad: scheduling from the idle thread!\n");
|
2007-07-09 20:51:58 +04:00
|
|
|
dump_stack();
|
2009-11-17 16:28:38 +03:00
|
|
|
raw_spin_lock_irq(&rq->lock);
|
2007-07-09 20:51:58 +04:00
|
|
|
}
|
|
|
|
|
2007-08-09 13:16:49 +04:00
|
|
|
static void put_prev_task_idle(struct rq *rq, struct task_struct *prev)
|
2007-07-09 20:51:58 +04:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2008-01-25 23:08:29 +03:00
|
|
|
static void task_tick_idle(struct rq *rq, struct task_struct *curr, int queued)
|
2007-07-09 20:51:58 +04:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2007-10-15 19:00:08 +04:00
|
|
|
static void set_curr_task_idle(struct rq *rq)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2011-01-17 19:03:27 +03:00
|
|
|
static void switched_to_idle(struct rq *rq, struct task_struct *p)
|
2008-01-25 23:08:22 +03:00
|
|
|
{
|
2011-01-25 18:30:03 +03:00
|
|
|
BUG();
|
2008-01-25 23:08:22 +03:00
|
|
|
}
|
|
|
|
|
2011-01-17 19:03:27 +03:00
|
|
|
static void
|
|
|
|
prio_changed_idle(struct rq *rq, struct task_struct *p, int oldprio)
|
2008-01-25 23:08:22 +03:00
|
|
|
{
|
2011-01-25 18:30:03 +03:00
|
|
|
BUG();
|
2008-01-25 23:08:22 +03:00
|
|
|
}
|
|
|
|
|
2010-01-14 06:21:52 +03:00
|
|
|
static unsigned int get_rr_interval_idle(struct rq *rq, struct task_struct *task)
|
2009-09-21 05:31:53 +04:00
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2007-07-09 20:51:58 +04:00
|
|
|
/*
|
|
|
|
* Simple, special scheduling class for the per-CPU idle tasks:
|
|
|
|
*/
|
2011-10-25 12:00:11 +04:00
|
|
|
const struct sched_class idle_sched_class = {
|
2007-10-15 19:00:12 +04:00
|
|
|
/* .next is NULL */
|
2007-07-09 20:51:58 +04:00
|
|
|
/* no enqueue/yield_task for idle tasks */
|
|
|
|
|
|
|
|
/* dequeue is not valid, we print a debug message there: */
|
|
|
|
.dequeue_task = dequeue_task_idle,
|
|
|
|
|
|
|
|
.check_preempt_curr = check_preempt_curr_idle,
|
|
|
|
|
|
|
|
.pick_next_task = pick_next_task_idle,
|
|
|
|
.put_prev_task = put_prev_task_idle,
|
|
|
|
|
2007-10-24 20:23:51 +04:00
|
|
|
#ifdef CONFIG_SMP
|
2008-10-22 11:25:26 +04:00
|
|
|
.select_task_rq = select_task_rq_idle,
|
sched: Fix wrong rq's runnable_avg update with rt tasks
The current update of the rq's load can be erroneous when RT
tasks are involved.
The update of the load of a rq that becomes idle, is done only
if the avg_idle is less than sysctl_sched_migration_cost. If RT
tasks and short idle duration alternate, the runnable_avg will
not be updated correctly and the time will be accounted as idle
time when a CFS task wakes up.
A new idle_enter function is called when the next task is the
idle function so the elapsed time will be accounted as run time
in the load of the rq, whatever the average idle time is. The
function update_rq_runnable_avg is removed from idle_balance.
When a RT task is scheduled on an idle CPU, the update of the
rq's load is not done when the rq exit idle state because CFS's
functions are not called. Then, the idle_balance, which is
called just before entering the idle function, updates the rq's
load and makes the assumption that the elapsed time since the
last update, was only running time.
As a consequence, the rq's load of a CPU that only runs a
periodic RT task, is close to LOAD_AVG_MAX whatever the running
duration of the RT task is.
A new idle_exit function is called when the prev task is the
idle function so the elapsed time will be accounted as idle time
in the rq's load.
Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Acked-by: Steven Rostedt <rostedt@goodmis.org>
Cc: linaro-kernel@lists.linaro.org
Cc: peterz@infradead.org
Cc: pjt@google.com
Cc: fweisbec@gmail.com
Cc: efault@gmx.de
Link: http://lkml.kernel.org/r/1366302867-5055-1-git-send-email-vincent.guittot@linaro.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2013-04-18 20:34:26 +04:00
|
|
|
.pre_schedule = pre_schedule_idle,
|
|
|
|
.post_schedule = post_schedule_idle,
|
2007-10-24 20:23:51 +04:00
|
|
|
#endif
|
2007-07-09 20:51:58 +04:00
|
|
|
|
2007-10-15 19:00:08 +04:00
|
|
|
.set_curr_task = set_curr_task_idle,
|
2007-07-09 20:51:58 +04:00
|
|
|
.task_tick = task_tick_idle,
|
2008-01-25 23:08:22 +03:00
|
|
|
|
2009-09-21 05:31:53 +04:00
|
|
|
.get_rr_interval = get_rr_interval_idle,
|
|
|
|
|
2008-01-25 23:08:22 +03:00
|
|
|
.prio_changed = prio_changed_idle,
|
|
|
|
.switched_to = switched_to_idle,
|
2007-07-09 20:51:58 +04:00
|
|
|
};
|