- Revert an attempt to not spread IRQ threads on isolated CPUs which has

a bunch of problems.
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEEzv7L6UO9uDPlPSfHEsHwGGHeVUoFAmAfyw4ACgkQEsHwGGHe
 VUotPg/+NcDvVH7OBd6dkhTpH/B+VdUneDze2mfayX4O7/VIyQBPB4pyaphiTyiO
 BnbYZpd9ZM77eQKRLUGWMx38AcaO7ttwP/vxRha6Ic9+wIglXsTX3/cO5AJvPVun
 OtMvyR/Ej8cUCXCIJDLRgjxyOSd1PF7wHs8ZTsNAcTexxzUbdD8a/G5Pyq5xTpVF
 XBwhJt5Q2gwZSzauYobQh/E65nBIoFX0hRYlIjXSe92dLAQP/9Q4qJPExqJ4UHyj
 j7oVuErOpCPNBoe8I9QUlMTQ9KykXvaIc80KQ1VwoTN9lIni/yANot8aIjaQiE7l
 JDr8fTwIs1i9k9h1OYKKpmcFWjICSz+xm/NFfn6Z1VtT8Ftn6S70iFqm4mND/q0O
 ciSdU8DIqf15lTzYlsgrAqY2XnpTXmr2XsFkSBW2zqchh5tRCgrgGCAMM2LOoNWe
 V3TAU9BiAnXgYK68CEUyi2wfeXHo+MeD1YijTthUkAEs8Z2m27kF8rq/t7UQUnoj
 Z/gIQvRGr2ZB7bnSI96VKk9tOOyq77vbLz+enN7a8d2goJKU39YNUl+R4Dq9Cwg2
 Yu7ryDcX3GcYjJUlw8KBUmW8vUijh9LuYbvbIHyQSt+VFQLihHM6CLbdJ+bnesUn
 kcwFrXz4oDCxgApKcp60Wsm7KAAhgQF3D1Wfjyp2IyGNlXwhPD0=
 =Z/Pv
 -----END PGP SIGNATURE-----

Merge tag 'sched_urgent_for_v5.11_rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull scheduler fix from Borislav Petkov:
 "Revert an attempt to not spread IRQ threads on isolated CPUs which has
  a bunch of problems"

* tag 'sched_urgent_for_v5.11_rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  Revert "lib: Restrict cpumask_local_spread to houskeeping CPUs"
This commit is contained in:
Linus Torvalds 2021-02-07 10:03:43 -08:00
Родитель 814daadbf0 2452483d95
Коммит 6fed85df5d
1 изменённых файлов: 5 добавлений и 11 удалений

Просмотреть файл

@ -6,7 +6,6 @@
#include <linux/export.h>
#include <linux/memblock.h>
#include <linux/numa.h>
#include <linux/sched/isolation.h>
/**
* cpumask_next - get the next cpu in a cpumask
@ -206,27 +205,22 @@ void __init free_bootmem_cpumask_var(cpumask_var_t mask)
*/
unsigned int cpumask_local_spread(unsigned int i, int node)
{
int cpu, hk_flags;
const struct cpumask *mask;
int cpu;
hk_flags = HK_FLAG_DOMAIN | HK_FLAG_MANAGED_IRQ;
mask = housekeeping_cpumask(hk_flags);
/* Wrap: we always want a cpu. */
i %= cpumask_weight(mask);
i %= num_online_cpus();
if (node == NUMA_NO_NODE) {
for_each_cpu(cpu, mask) {
for_each_cpu(cpu, cpu_online_mask)
if (i-- == 0)
return cpu;
}
} else {
/* NUMA first. */
for_each_cpu_and(cpu, cpumask_of_node(node), mask) {
for_each_cpu_and(cpu, cpumask_of_node(node), cpu_online_mask)
if (i-- == 0)
return cpu;
}
for_each_cpu(cpu, mask) {
for_each_cpu(cpu, cpu_online_mask) {
/* Skip NUMA nodes, done above. */
if (cpumask_test_cpu(cpu, cpumask_of_node(node)))
continue;