nohz: Restart nohz full tick from irq exit

Restart the tick when necessary from the irq exit path. It makes nohz
full more flexible, simplify the related IPIs and doesn't bring
significant overhead on irq exit.

In a longer term view, it will allow us to piggyback the nohz kick
on the scheduler IPI in the future instead of sending a dedicated IPI
that often doubles the scheduler IPI on task wakeup. This will require
more changes though including careful review of resched_curr() callers
to include nohz full needs.

Reviewed-by: Rik van Riel <riel@redhat.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Preeti U Murthy <preeti@linux.vnet.ibm.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Viresh Kumar <viresh.kumar@linaro.org>
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
This commit is contained in:
Frederic Weisbecker 2015-05-27 19:22:08 +02:00
Родитель 5944935943
Коммит 73738a95d0
2 изменённых файлов: 10 добавлений и 32 удалений

Просмотреть файл

@ -147,7 +147,6 @@ static inline void tick_nohz_full_add_cpus_to(struct cpumask *mask)
cpumask_or(mask, mask, tick_nohz_full_mask);
}
extern void __tick_nohz_full_check(void);
extern void tick_nohz_full_kick(void);
extern void tick_nohz_full_kick_cpu(int cpu);
extern void tick_nohz_full_kick_all(void);
@ -156,7 +155,6 @@ extern void __tick_nohz_task_switch(struct task_struct *tsk);
static inline bool tick_nohz_full_enabled(void) { return false; }
static inline bool tick_nohz_full_cpu(int cpu) { return false; }
static inline void tick_nohz_full_add_cpus_to(struct cpumask *mask) { }
static inline void __tick_nohz_full_check(void) { }
static inline void tick_nohz_full_kick_cpu(int cpu) { }
static inline void tick_nohz_full_kick(void) { }
static inline void tick_nohz_full_kick_all(void) { }
@ -190,12 +188,6 @@ static inline void housekeeping_affine(struct task_struct *t)
#endif
}
static inline void tick_nohz_full_check(void)
{
if (tick_nohz_full_enabled())
__tick_nohz_full_check();
}
static inline void tick_nohz_task_switch(struct task_struct *tsk)
{
if (tick_nohz_full_enabled())

Просмотреть файл

@ -197,25 +197,9 @@ static bool can_stop_full_tick(void)
return true;
}
static void tick_nohz_restart_sched_tick(struct tick_sched *ts, ktime_t now);
/*
* Re-evaluate the need for the tick on the current CPU
* and restart it if necessary.
*/
void __tick_nohz_full_check(void)
{
struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
if (tick_nohz_full_cpu(smp_processor_id())) {
if (ts->tick_stopped && !can_stop_full_tick())
tick_nohz_restart_sched_tick(ts, ktime_get());
}
}
static void nohz_full_kick_work_func(struct irq_work *work)
{
__tick_nohz_full_check();
/* Empty, the tick restart happens on tick_nohz_irq_exit() */
}
static DEFINE_PER_CPU(struct irq_work, nohz_full_kick_work) = {
@ -250,7 +234,7 @@ void tick_nohz_full_kick_cpu(int cpu)
static void nohz_full_kick_ipi(void *info)
{
__tick_nohz_full_check();
/* Empty, the tick restart happens on tick_nohz_irq_exit() */
}
/*
@ -703,7 +687,9 @@ out:
return tick;
}
static void tick_nohz_full_stop_tick(struct tick_sched *ts)
static void tick_nohz_restart_sched_tick(struct tick_sched *ts, ktime_t now);
static void tick_nohz_full_update_tick(struct tick_sched *ts)
{
#ifdef CONFIG_NO_HZ_FULL
int cpu = smp_processor_id();
@ -714,10 +700,10 @@ static void tick_nohz_full_stop_tick(struct tick_sched *ts)
if (!ts->tick_stopped && ts->nohz_mode == NOHZ_MODE_INACTIVE)
return;
if (!can_stop_full_tick())
return;
tick_nohz_stop_sched_tick(ts, ktime_get(), cpu);
if (can_stop_full_tick())
tick_nohz_stop_sched_tick(ts, ktime_get(), cpu);
else if (ts->tick_stopped)
tick_nohz_restart_sched_tick(ts, ktime_get());
#endif
}
@ -847,7 +833,7 @@ void tick_nohz_irq_exit(void)
if (ts->inidle)
__tick_nohz_idle_enter(ts);
else
tick_nohz_full_stop_tick(ts);
tick_nohz_full_update_tick(ts);
}
/**