Two fixes for the SMP core:
- Make the warning condition in flush_smp_call_function_queue() correct, which checks a just emptied list head for being empty instead of validating that there was no pending entry on the offlined CPU at all. - The @cpu member of struct cpuhp_cpu_state is initialized when the CPU hotplug thread for the upcoming CPU is created. That's too late because the creation of the thread can fail and then the following rollback operates on CPU0. Get rid of the CPU member and hand the CPU number to the involved functions directly. -----BEGIN PGP SIGNATURE----- iQJHBAABCgAxFiEEQp8+kY+LLUocC4bMphj1TA10mKEFAmJb4skTHHRnbHhAbGlu dXRyb25peC5kZQAKCRCmGPVMDXSYoUYaD/9SWzwEqwKICAhWWSMtRg6cvtVTAKGL xWXNb5ZxLA8MdG9CYu7WaUF7mU3E8o7kF/dItvkdj0/9mg/IqcVnP1CEOfp0ioHc 1cbAZShORfFlOowO1WR9Xe3MSZB2mXPHsjh3FWPeGTDxnSByMDXROMFyotxji+7q g1ZsyVbq3+hSVTOhnhpxrh8MS3qcCAsgYtHKRnPjOE/tqbNXSmbhmeuN7QBKLVp6 AD6DaWj8jGtZ8yzbpm0Ve3ZsjH+1SdZAzm4yhh3FHMKsSOwB8yuNwq1oNbbEjxWu mTg+LCBBMGYybSTa9sUpDG5Xfc3/dyWnzkGnmp7M8bfElrI3x4d+sMdVhfFRjEXB xlXmNqRoExgZwifyoEJbmSbG2SgLiWqyLqgpUvLS/yHud6IurFbu36TrByYRMMsG CyaiLMdWIlBibo+xGkVgnLyc2io98KeoLJoc35HM7VYyn9oNI4hUU9OJDIkG5D2i lyS+qMTFInFSOm7L5u/SUTYe4I0sXYJ6uEXxhR/Gi3ITXb7aLOJyxhNtq//u1dmg 6vZHgs8HZWylG3LxVn1QERWotVlPuNPRilsUGCVMsCHjOFnoPy+eM3ANtR2x/uaE XpQ58jnhkhPXZfmnsjDEILkWB67J0/9FujbcPvixH8SRvUgBB7xRRbuo9zXPchhw VDKy1BCIcZ93gA== =XNLP -----END PGP SIGNATURE----- Merge tag 'smp-urgent-2022-04-17' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip Pull SMP fixes from Thomas Gleixner: "Two fixes for the SMP core: - Make the warning condition in flush_smp_call_function_queue() correct, which checked a just emptied list head for being empty instead of validating that there was no pending entry on the offlined CPU at all. - The @cpu member of struct cpuhp_cpu_state is initialized when the CPU hotplug thread for the upcoming CPU is created. That's too late because the creation of the thread can fail and then the following rollback operates on CPU0. Get rid of the CPU member and hand the CPU number to the involved functions directly" * tag 'smp-urgent-2022-04-17' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: cpu/hotplug: Remove the 'cpu' member of cpuhp_cpu_state smp: Fix offline cpu check in flush_smp_call_function_queue()
This commit is contained in:
Коммит
0e59732ed6
36
kernel/cpu.c
36
kernel/cpu.c
|
@ -71,7 +71,6 @@ struct cpuhp_cpu_state {
|
|||
bool rollback;
|
||||
bool single;
|
||||
bool bringup;
|
||||
int cpu;
|
||||
struct hlist_node *node;
|
||||
struct hlist_node *last;
|
||||
enum cpuhp_state cb_state;
|
||||
|
@ -475,7 +474,7 @@ static inline bool cpu_smt_allowed(unsigned int cpu) { return true; }
|
|||
#endif
|
||||
|
||||
static inline enum cpuhp_state
|
||||
cpuhp_set_state(struct cpuhp_cpu_state *st, enum cpuhp_state target)
|
||||
cpuhp_set_state(int cpu, struct cpuhp_cpu_state *st, enum cpuhp_state target)
|
||||
{
|
||||
enum cpuhp_state prev_state = st->state;
|
||||
bool bringup = st->state < target;
|
||||
|
@ -486,14 +485,15 @@ cpuhp_set_state(struct cpuhp_cpu_state *st, enum cpuhp_state target)
|
|||
st->target = target;
|
||||
st->single = false;
|
||||
st->bringup = bringup;
|
||||
if (cpu_dying(st->cpu) != !bringup)
|
||||
set_cpu_dying(st->cpu, !bringup);
|
||||
if (cpu_dying(cpu) != !bringup)
|
||||
set_cpu_dying(cpu, !bringup);
|
||||
|
||||
return prev_state;
|
||||
}
|
||||
|
||||
static inline void
|
||||
cpuhp_reset_state(struct cpuhp_cpu_state *st, enum cpuhp_state prev_state)
|
||||
cpuhp_reset_state(int cpu, struct cpuhp_cpu_state *st,
|
||||
enum cpuhp_state prev_state)
|
||||
{
|
||||
bool bringup = !st->bringup;
|
||||
|
||||
|
@ -520,8 +520,8 @@ cpuhp_reset_state(struct cpuhp_cpu_state *st, enum cpuhp_state prev_state)
|
|||
}
|
||||
|
||||
st->bringup = bringup;
|
||||
if (cpu_dying(st->cpu) != !bringup)
|
||||
set_cpu_dying(st->cpu, !bringup);
|
||||
if (cpu_dying(cpu) != !bringup)
|
||||
set_cpu_dying(cpu, !bringup);
|
||||
}
|
||||
|
||||
/* Regular hotplug invocation of the AP hotplug thread */
|
||||
|
@ -541,15 +541,16 @@ static void __cpuhp_kick_ap(struct cpuhp_cpu_state *st)
|
|||
wait_for_ap_thread(st, st->bringup);
|
||||
}
|
||||
|
||||
static int cpuhp_kick_ap(struct cpuhp_cpu_state *st, enum cpuhp_state target)
|
||||
static int cpuhp_kick_ap(int cpu, struct cpuhp_cpu_state *st,
|
||||
enum cpuhp_state target)
|
||||
{
|
||||
enum cpuhp_state prev_state;
|
||||
int ret;
|
||||
|
||||
prev_state = cpuhp_set_state(st, target);
|
||||
prev_state = cpuhp_set_state(cpu, st, target);
|
||||
__cpuhp_kick_ap(st);
|
||||
if ((ret = st->result)) {
|
||||
cpuhp_reset_state(st, prev_state);
|
||||
cpuhp_reset_state(cpu, st, prev_state);
|
||||
__cpuhp_kick_ap(st);
|
||||
}
|
||||
|
||||
|
@ -581,7 +582,7 @@ static int bringup_wait_for_ap(unsigned int cpu)
|
|||
if (st->target <= CPUHP_AP_ONLINE_IDLE)
|
||||
return 0;
|
||||
|
||||
return cpuhp_kick_ap(st, st->target);
|
||||
return cpuhp_kick_ap(cpu, st, st->target);
|
||||
}
|
||||
|
||||
static int bringup_cpu(unsigned int cpu)
|
||||
|
@ -704,7 +705,7 @@ static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
|
|||
ret, cpu, cpuhp_get_step(st->state)->name,
|
||||
st->state);
|
||||
|
||||
cpuhp_reset_state(st, prev_state);
|
||||
cpuhp_reset_state(cpu, st, prev_state);
|
||||
if (can_rollback_cpu(st))
|
||||
WARN_ON(cpuhp_invoke_callback_range(false, cpu, st,
|
||||
prev_state));
|
||||
|
@ -721,7 +722,6 @@ static void cpuhp_create(unsigned int cpu)
|
|||
|
||||
init_completion(&st->done_up);
|
||||
init_completion(&st->done_down);
|
||||
st->cpu = cpu;
|
||||
}
|
||||
|
||||
static int cpuhp_should_run(unsigned int cpu)
|
||||
|
@ -875,7 +875,7 @@ static int cpuhp_kick_ap_work(unsigned int cpu)
|
|||
cpuhp_lock_release(true);
|
||||
|
||||
trace_cpuhp_enter(cpu, st->target, prev_state, cpuhp_kick_ap_work);
|
||||
ret = cpuhp_kick_ap(st, st->target);
|
||||
ret = cpuhp_kick_ap(cpu, st, st->target);
|
||||
trace_cpuhp_exit(cpu, st->state, prev_state, ret);
|
||||
|
||||
return ret;
|
||||
|
@ -1107,7 +1107,7 @@ static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
|
|||
ret, cpu, cpuhp_get_step(st->state)->name,
|
||||
st->state);
|
||||
|
||||
cpuhp_reset_state(st, prev_state);
|
||||
cpuhp_reset_state(cpu, st, prev_state);
|
||||
|
||||
if (st->state < prev_state)
|
||||
WARN_ON(cpuhp_invoke_callback_range(true, cpu, st,
|
||||
|
@ -1134,7 +1134,7 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
|
|||
|
||||
cpuhp_tasks_frozen = tasks_frozen;
|
||||
|
||||
prev_state = cpuhp_set_state(st, target);
|
||||
prev_state = cpuhp_set_state(cpu, st, target);
|
||||
/*
|
||||
* If the current CPU state is in the range of the AP hotplug thread,
|
||||
* then we need to kick the thread.
|
||||
|
@ -1165,7 +1165,7 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
|
|||
ret = cpuhp_down_callbacks(cpu, st, target);
|
||||
if (ret && st->state < prev_state) {
|
||||
if (st->state == CPUHP_TEARDOWN_CPU) {
|
||||
cpuhp_reset_state(st, prev_state);
|
||||
cpuhp_reset_state(cpu, st, prev_state);
|
||||
__cpuhp_kick_ap(st);
|
||||
} else {
|
||||
WARN(1, "DEAD callback error for CPU%d", cpu);
|
||||
|
@ -1352,7 +1352,7 @@ static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target)
|
|||
|
||||
cpuhp_tasks_frozen = tasks_frozen;
|
||||
|
||||
cpuhp_set_state(st, target);
|
||||
cpuhp_set_state(cpu, st, target);
|
||||
/*
|
||||
* If the current CPU state is in the range of the AP hotplug thread,
|
||||
* then we need to kick the thread once more.
|
||||
|
|
|
@ -579,7 +579,7 @@ static void flush_smp_call_function_queue(bool warn_cpu_offline)
|
|||
|
||||
/* There shouldn't be any pending callbacks on an offline CPU. */
|
||||
if (unlikely(warn_cpu_offline && !cpu_online(smp_processor_id()) &&
|
||||
!warned && !llist_empty(head))) {
|
||||
!warned && entry != NULL)) {
|
||||
warned = true;
|
||||
WARN(1, "IPI on offline CPU %d\n", smp_processor_id());
|
||||
|
||||
|
|
Загрузка…
Ссылка в новой задаче