clockevents: Fix cpu_down() race for hrtimer based broadcasting
It was found when doing a hotplug stress test on POWER, that the machine either hit softlockups or rcu_sched stall warnings. The issue was traced to commit:7cba160ad7
("powernv/cpuidle: Redesign idle states management") which exposed the cpu_down() race with hrtimer based broadcast mode:5d1638acb9
("tick: Introduce hrtimer based broadcast") The race is the following: Assume CPU1 is the CPU which holds the hrtimer broadcasting duty before it is taken down. CPU0 CPU1 cpu_down() take_cpu_down() disable_interrupts() cpu_die() while (CPU1 != CPU_DEAD) { msleep(100); switch_to_idle(); stop_cpu_timer(); schedule_broadcast(); } tick_cleanup_cpu_dead() take_over_broadcast() So after CPU1 disabled interrupts it cannot handle the broadcast hrtimer anymore, so CPU0 will be stuck forever. Fix this by explicitly taking over broadcast duty before cpu_die(). This is a temporary workaround. What we really want is a callback in the clockevent device which allows us to do that from the dying CPU by pushing the hrtimer onto a different cpu. That might involve an IPI and is definitely more complex than this immediate fix. Changelog was picked up from: https://lkml.org/lkml/2015/2/16/213 Suggested-by: Thomas Gleixner <tglx@linutronix.de> Tested-by: Nicolas Pitre <nico@linaro.org> Signed-off-by: Preeti U. Murthy <preeti@linux.vnet.ibm.com> Cc: linuxppc-dev@lists.ozlabs.org Cc: mpe@ellerman.id.au Cc: nicolas.pitre@linaro.org Cc: peterz@infradead.org Cc: rjw@rjwysocki.net Fixes: http://linuxppc.10917.n7.nabble.com/offlining-cpus-breakage-td88619.html Link: http://lkml.kernel.org/r/20150330092410.24979.59887.stgit@preeti.in.ibm.com [ Merged it to the latest timer tree, renamed the callback, tidied up the changelog. ] Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Родитель
9eed56e889
Коммит
345527b1ed
|
@ -36,6 +36,12 @@ extern void tick_irq_enter(void);
|
|||
static inline void tick_irq_enter(void) { }
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) && defined(CONFIG_TICK_ONESHOT)
|
||||
extern void hotplug_cpu__broadcast_tick_pull(int dead_cpu);
|
||||
#else
|
||||
static inline void hotplug_cpu__broadcast_tick_pull(int dead_cpu) { }
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_NO_HZ_COMMON
|
||||
extern int tick_nohz_tick_stopped(void);
|
||||
extern void tick_nohz_idle_enter(void);
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
#include <linux/gfp.h>
|
||||
#include <linux/suspend.h>
|
||||
#include <linux/lockdep.h>
|
||||
#include <linux/tick.h>
|
||||
#include <trace/events/power.h>
|
||||
|
||||
#include "smpboot.h"
|
||||
|
@ -411,6 +412,7 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
|
|||
while (!idle_cpu(cpu))
|
||||
cpu_relax();
|
||||
|
||||
hotplug_cpu__broadcast_tick_pull(cpu);
|
||||
/* This actually kills the CPU. */
|
||||
__cpu_die(cpu);
|
||||
|
||||
|
|
|
@ -680,14 +680,19 @@ static void broadcast_shutdown_local(struct clock_event_device *bc,
|
|||
clockevents_set_state(dev, CLOCK_EVT_STATE_SHUTDOWN);
|
||||
}
|
||||
|
||||
static void broadcast_move_bc(int deadcpu)
|
||||
void hotplug_cpu__broadcast_tick_pull(int deadcpu)
|
||||
{
|
||||
struct clock_event_device *bc = tick_broadcast_device.evtdev;
|
||||
struct clock_event_device *bc;
|
||||
unsigned long flags;
|
||||
|
||||
if (!bc || !broadcast_needs_cpu(bc, deadcpu))
|
||||
return;
|
||||
/* This moves the broadcast assignment to this cpu */
|
||||
clockevents_program_event(bc, bc->next_event, 1);
|
||||
raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
|
||||
bc = tick_broadcast_device.evtdev;
|
||||
|
||||
if (bc && broadcast_needs_cpu(bc, deadcpu)) {
|
||||
/* This moves the broadcast assignment to this CPU: */
|
||||
clockevents_program_event(bc, bc->next_event, 1);
|
||||
}
|
||||
raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -924,8 +929,6 @@ void tick_shutdown_broadcast_oneshot(unsigned int *cpup)
|
|||
cpumask_clear_cpu(cpu, tick_broadcast_pending_mask);
|
||||
cpumask_clear_cpu(cpu, tick_broadcast_force_mask);
|
||||
|
||||
broadcast_move_bc(cpu);
|
||||
|
||||
raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
|
||||
}
|
||||
|
||||
|
|
Загрузка…
Ссылка в новой задаче