2011-10-25 12:00:11 +04:00
|
|
|
#include "sched.h"
|
|
|
|
|
2010-09-22 15:53:15 +04:00
|
|
|
/*
|
|
|
|
* stop-task scheduling class.
|
|
|
|
*
|
|
|
|
* The stop task is the highest priority task in the system, it preempts
|
|
|
|
* everything and will be preempted by nothing.
|
|
|
|
*
|
|
|
|
* See kernel/stop_machine.c
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
static int
|
2013-10-07 14:29:16 +04:00
|
|
|
select_task_rq_stop(struct task_struct *p, int cpu, int sd_flag, int flags)
|
2010-09-22 15:53:15 +04:00
|
|
|
{
|
|
|
|
return task_cpu(p); /* stop tasks as never migrate */
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_SMP */
|
|
|
|
|
|
|
|
static void
|
|
|
|
check_preempt_curr_stop(struct rq *rq, struct task_struct *p, int flags)
|
|
|
|
{
|
2010-10-31 14:37:04 +03:00
|
|
|
/* we're never preempted */
|
2010-09-22 15:53:15 +04:00
|
|
|
}
|
|
|
|
|
2012-02-11 09:05:00 +04:00
|
|
|
static struct task_struct *
|
2016-09-21 16:38:10 +03:00
|
|
|
pick_next_task_stop(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
|
2010-09-22 15:53:15 +04:00
|
|
|
{
|
|
|
|
struct task_struct *stop = rq->stop;
|
|
|
|
|
2014-08-20 13:47:32 +04:00
|
|
|
if (!stop || !task_on_rq_queued(stop))
|
2012-02-11 09:05:00 +04:00
|
|
|
return NULL;
|
2010-09-22 15:53:15 +04:00
|
|
|
|
2014-02-12 13:49:30 +04:00
|
|
|
put_prev_task(rq, prev);
|
2012-02-11 09:05:00 +04:00
|
|
|
|
|
|
|
stop->se.exec_start = rq_clock_task(rq);
|
|
|
|
|
|
|
|
return stop;
|
2010-09-22 15:53:15 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
enqueue_task_stop(struct rq *rq, struct task_struct *p, int flags)
|
|
|
|
{
|
2014-05-09 03:00:14 +04:00
|
|
|
add_nr_running(rq, 1);
|
2010-09-22 15:53:15 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
dequeue_task_stop(struct rq *rq, struct task_struct *p, int flags)
|
|
|
|
{
|
2014-05-09 03:00:14 +04:00
|
|
|
sub_nr_running(rq, 1);
|
2010-09-22 15:53:15 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
static void yield_task_stop(struct rq *rq)
|
|
|
|
{
|
|
|
|
BUG(); /* the stop task should never yield, its pointless. */
|
|
|
|
}
|
|
|
|
|
|
|
|
static void put_prev_task_stop(struct rq *rq, struct task_struct *prev)
|
|
|
|
{
|
2012-08-04 07:44:14 +04:00
|
|
|
struct task_struct *curr = rq->curr;
|
|
|
|
u64 delta_exec;
|
|
|
|
|
2013-04-12 03:51:02 +04:00
|
|
|
delta_exec = rq_clock_task(rq) - curr->se.exec_start;
|
2012-08-04 07:44:14 +04:00
|
|
|
if (unlikely((s64)delta_exec < 0))
|
|
|
|
delta_exec = 0;
|
|
|
|
|
|
|
|
schedstat_set(curr->se.statistics.exec_max,
|
|
|
|
max(curr->se.statistics.exec_max, delta_exec));
|
|
|
|
|
|
|
|
curr->se.sum_exec_runtime += delta_exec;
|
|
|
|
account_group_exec_runtime(curr, delta_exec);
|
|
|
|
|
2013-04-12 03:51:02 +04:00
|
|
|
curr->se.exec_start = rq_clock_task(rq);
|
2012-08-04 07:44:14 +04:00
|
|
|
cpuacct_charge(curr, delta_exec);
|
2010-09-22 15:53:15 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
static void task_tick_stop(struct rq *rq, struct task_struct *curr, int queued)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
static void set_curr_task_stop(struct rq *rq)
|
|
|
|
{
|
2012-08-04 07:44:14 +04:00
|
|
|
struct task_struct *stop = rq->stop;
|
|
|
|
|
2013-04-12 03:51:02 +04:00
|
|
|
stop->se.exec_start = rq_clock_task(rq);
|
2010-09-22 15:53:15 +04:00
|
|
|
}
|
|
|
|
|
2011-01-17 19:03:27 +03:00
|
|
|
static void switched_to_stop(struct rq *rq, struct task_struct *p)
|
2010-09-22 15:53:15 +04:00
|
|
|
{
|
|
|
|
BUG(); /* its impossible to change to this class */
|
|
|
|
}
|
|
|
|
|
2011-01-17 19:03:27 +03:00
|
|
|
static void
|
|
|
|
prio_changed_stop(struct rq *rq, struct task_struct *p, int oldprio)
|
2010-09-22 15:53:15 +04:00
|
|
|
{
|
|
|
|
BUG(); /* how!?, what priority? */
|
|
|
|
}
|
|
|
|
|
|
|
|
static unsigned int
|
|
|
|
get_rr_interval_stop(struct rq *rq, struct task_struct *task)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-11-24 01:04:52 +03:00
|
|
|
static void update_curr_stop(struct rq *rq)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2010-09-22 15:53:15 +04:00
|
|
|
/*
|
|
|
|
* Simple, special scheduling class for the per-CPU stop tasks:
|
|
|
|
*/
|
2011-10-25 12:00:11 +04:00
|
|
|
const struct sched_class stop_sched_class = {
|
sched/deadline: Add SCHED_DEADLINE structures & implementation
Introduces the data structures, constants and symbols needed for
SCHED_DEADLINE implementation.
Core data structure of SCHED_DEADLINE are defined, along with their
initializers. Hooks for checking if a task belong to the new policy
are also added where they are needed.
Adds a scheduling class, in sched/dl.c and a new policy called
SCHED_DEADLINE. It is an implementation of the Earliest Deadline
First (EDF) scheduling algorithm, augmented with a mechanism (called
Constant Bandwidth Server, CBS) that makes it possible to isolate
the behaviour of tasks between each other.
The typical -deadline task will be made up of a computation phase
(instance) which is activated on a periodic or sporadic fashion. The
expected (maximum) duration of such computation is called the task's
runtime; the time interval by which each instance need to be completed
is called the task's relative deadline. The task's absolute deadline
is dynamically calculated as the time instant a task (better, an
instance) activates plus the relative deadline.
The EDF algorithms selects the task with the smallest absolute
deadline as the one to be executed first, while the CBS ensures each
task to run for at most its runtime every (relative) deadline
length time interval, avoiding any interference between different
tasks (bandwidth isolation).
Thanks to this feature, also tasks that do not strictly comply with
the computational model sketched above can effectively use the new
policy.
To summarize, this patch:
- introduces the data structures, constants and symbols needed;
- implements the core logic of the scheduling algorithm in the new
scheduling class file;
- provides all the glue code between the new scheduling class and
the core scheduler and refines the interactions between sched/dl
and the other existing scheduling classes.
Signed-off-by: Dario Faggioli <raistlin@linux.it>
Signed-off-by: Michael Trimarchi <michael@amarulasolutions.com>
Signed-off-by: Fabio Checconi <fchecconi@gmail.com>
Signed-off-by: Juri Lelli <juri.lelli@gmail.com>
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1383831828-15501-4-git-send-email-juri.lelli@gmail.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2013-11-28 14:14:43 +04:00
|
|
|
.next = &dl_sched_class,
|
2010-09-22 15:53:15 +04:00
|
|
|
|
|
|
|
.enqueue_task = enqueue_task_stop,
|
|
|
|
.dequeue_task = dequeue_task_stop,
|
|
|
|
.yield_task = yield_task_stop,
|
|
|
|
|
|
|
|
.check_preempt_curr = check_preempt_curr_stop,
|
|
|
|
|
|
|
|
.pick_next_task = pick_next_task_stop,
|
|
|
|
.put_prev_task = put_prev_task_stop,
|
|
|
|
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
.select_task_rq = select_task_rq_stop,
|
2015-05-15 18:43:35 +03:00
|
|
|
.set_cpus_allowed = set_cpus_allowed_common,
|
2010-09-22 15:53:15 +04:00
|
|
|
#endif
|
|
|
|
|
|
|
|
.set_curr_task = set_curr_task_stop,
|
|
|
|
.task_tick = task_tick_stop,
|
|
|
|
|
|
|
|
.get_rr_interval = get_rr_interval_stop,
|
|
|
|
|
|
|
|
.prio_changed = prio_changed_stop,
|
|
|
|
.switched_to = switched_to_stop,
|
2014-11-24 01:04:52 +03:00
|
|
|
.update_curr = update_curr_stop,
|
2010-09-22 15:53:15 +04:00
|
|
|
};
|