cpufreq: powernv: Fix hardlockup due to synchronous smp_call in timer interrupt
gpstate_timer_handler() uses synchronous smp_call to set the pstate
on the requested core. This causes the below hard lockup:
smp_call_function_single+0x110/0x180 (unreliable)
smp_call_function_any+0x180/0x250
gpstate_timer_handler+0x1e8/0x580
call_timer_fn+0x50/0x1c0
expire_timers+0x138/0x1f0
run_timer_softirq+0x1e8/0x270
__do_softirq+0x158/0x3e4
irq_exit+0xe8/0x120
timer_interrupt+0x9c/0xe0
decrementer_common+0x114/0x120
-- interrupt: 901 at doorbell_global_ipi+0x34/0x50
LR = arch_send_call_function_ipi_mask+0x120/0x130
arch_send_call_function_ipi_mask+0x4c/0x130
smp_call_function_many+0x340/0x450
pmdp_invalidate+0x98/0xe0
change_huge_pmd+0xe0/0x270
change_protection_range+0xb88/0xe40
mprotect_fixup+0x140/0x340
SyS_mprotect+0x1b4/0x350
system_call+0x58/0x6c
One way to avoid this is removing the smp-call. We can ensure that the
timer always runs on one of the policy-cpus. If the timer gets
migrated to a cpu outside the policy then re-queue it back on the
policy->cpus. This way we can get rid of the smp-call which was being
used to set the pstate on the policy->cpus.
Fixes: 7bc54b652f
("timers, cpufreq/powernv: Initialize the gpstate timer as pinned")
Cc: stable@vger.kernel.org # v4.8+
Reported-by: Nicholas Piggin <npiggin@gmail.com>
Reported-by: Pridhiviraj Paidipeddi <ppaidipe@linux.vnet.ibm.com>
Signed-off-by: Shilpasri G Bhat <shilpa.bhat@linux.vnet.ibm.com>
Acked-by: Nicholas Piggin <npiggin@gmail.com>
Acked-by: Viresh Kumar <viresh.kumar@linaro.org>
Acked-by: Vaidyanathan Srinivasan <svaidy@linux.vnet.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
This commit is contained in:
Родитель
ac61c11566
Коммит
c0f7f5b6c6
|
@ -679,6 +679,16 @@ void gpstate_timer_handler(struct timer_list *t)
|
|||
|
||||
if (!spin_trylock(&gpstates->gpstate_lock))
|
||||
return;
|
||||
/*
|
||||
* If the timer has migrated to the different cpu then bring
|
||||
* it back to one of the policy->cpus
|
||||
*/
|
||||
if (!cpumask_test_cpu(raw_smp_processor_id(), policy->cpus)) {
|
||||
gpstates->timer.expires = jiffies + msecs_to_jiffies(1);
|
||||
add_timer_on(&gpstates->timer, cpumask_first(policy->cpus));
|
||||
spin_unlock(&gpstates->gpstate_lock);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* If PMCR was last updated was using fast_swtich then
|
||||
|
@ -718,10 +728,8 @@ void gpstate_timer_handler(struct timer_list *t)
|
|||
if (gpstate_idx != gpstates->last_lpstate_idx)
|
||||
queue_gpstate_timer(gpstates);
|
||||
|
||||
set_pstate(&freq_data);
|
||||
spin_unlock(&gpstates->gpstate_lock);
|
||||
|
||||
/* Timer may get migrated to a different cpu on cpu hot unplug */
|
||||
smp_call_function_any(policy->cpus, set_pstate, &freq_data, 1);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
Загрузка…
Ссылка в новой задаче