cpu/hotplug: Rework sparse_irq locking in bringup_cpu()
There is no harm to hold sparse_irq lock until the upcoming CPU completes in cpuhp_online_idle(). This allows to remove cpu_online() synchronization from architecture code. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Tested-by: Michael Kelley <mikelley@microsoft.com> Tested-by: Oleksandr Natalenko <oleksandr@natalenko.name> Tested-by: Helge Deller <deller@gmx.de> # parisc Tested-by: Guilherme G. Piccoli <gpiccoli@igalia.com> # Steam Deck Link: https://lore.kernel.org/r/20230512205256.263722880@linutronix.de
This commit is contained in:
Родитель
c8b7fb09d1
Коммит
22b612e2d5
34
kernel/cpu.c
34
kernel/cpu.c
|
@ -558,7 +558,7 @@ static int cpuhp_kick_ap(int cpu, struct cpuhp_cpu_state *st,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int bringup_wait_for_ap(unsigned int cpu)
|
||||
static int bringup_wait_for_ap_online(unsigned int cpu)
|
||||
{
|
||||
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
|
||||
|
||||
|
@ -579,15 +579,12 @@ static int bringup_wait_for_ap(unsigned int cpu)
|
|||
*/
|
||||
if (!cpu_smt_allowed(cpu))
|
||||
return -ECANCELED;
|
||||
|
||||
if (st->target <= CPUHP_AP_ONLINE_IDLE)
|
||||
return 0;
|
||||
|
||||
return cpuhp_kick_ap(cpu, st, st->target);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int bringup_cpu(unsigned int cpu)
|
||||
{
|
||||
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
|
||||
struct task_struct *idle = idle_thread_get(cpu);
|
||||
int ret;
|
||||
|
||||
|
@ -600,16 +597,33 @@ static int bringup_cpu(unsigned int cpu)
|
|||
/*
|
||||
* Some architectures have to walk the irq descriptors to
|
||||
* setup the vector space for the cpu which comes online.
|
||||
* Prevent irq alloc/free across the bringup.
|
||||
*
|
||||
* Prevent irq alloc/free across the bringup by acquiring the
|
||||
* sparse irq lock. Hold it until the upcoming CPU completes the
|
||||
* startup in cpuhp_online_idle() which allows to avoid
|
||||
* intermediate synchronization points in the architecture code.
|
||||
*/
|
||||
irq_lock_sparse();
|
||||
|
||||
/* Arch-specific enabling code. */
|
||||
ret = __cpu_up(cpu, idle);
|
||||
irq_unlock_sparse();
|
||||
if (ret)
|
||||
return ret;
|
||||
return bringup_wait_for_ap(cpu);
|
||||
goto out_unlock;
|
||||
|
||||
ret = bringup_wait_for_ap_online(cpu);
|
||||
if (ret)
|
||||
goto out_unlock;
|
||||
|
||||
irq_unlock_sparse();
|
||||
|
||||
if (st->target <= CPUHP_AP_ONLINE_IDLE)
|
||||
return 0;
|
||||
|
||||
return cpuhp_kick_ap(cpu, st, st->target);
|
||||
|
||||
out_unlock:
|
||||
irq_unlock_sparse();
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int finish_cpu(unsigned int cpu)
|
||||
|
|
Загрузка…
Ссылка в новой задаче