Updates for the interrupt subsystem:

Core:
 
   - Provide a new interface for affinity hints to provide a separation
     between hint and actual affinity change which has become a hidden
     property of the current interface
 
   - Fix up the in tree usage of the affinity hint interfaces
 
  Drivers:
 
   - No new irqchip drivers!
 
   - Fix GICv3 redistributor table reservation with RT across kexec
 
   - Fix GICv4.1 redistributor view of the VPE table across kexec
 
   - Add support for extra interrupts on spear-shirq
 
   - Make obtaining some interrupts optional for the Renesas drivers
 
   - Various cleanups and bug fixes
 -----BEGIN PGP SIGNATURE-----
 
 iQJHBAABCgAxFiEEQp8+kY+LLUocC4bMphj1TA10mKEFAmHf9v0THHRnbHhAbGlu
 dXRyb25peC5kZQAKCRCmGPVMDXSYoRK6D/9bQmyITmJ4KLn0HZ1DsvkuR/GB7I8v
 yTF99FxIi/F0jlJ7+87Hdm68cfYPMahpiHqSlsf/QE2kkuWYDJmMaPUao14XMdG6
 jxrJ1OZtZXeDXyAWkB/gjmiuqyW/e/Myndg0UNUrJ66GqKfxfxtz1/4GfLjgDpIu
 TfZQdojvo6T7NTVnU8aAkgKUhM2jL/HxPiR3VUJ+VneSfwKLHzr3+lTY9zkSvJ8s
 ATqqGn6+GugJmDWaCI13IJcmBhPU/Gvs+Eqnwz7Xez/6wJftYvJh7vGec3ixS9pw
 skjPDnwuHcPl+h0mYMv7ySN7WuqTr0iqYIepdvLUfq6D1WjnHvF5XNcV4W7EzPJN
 B/pBosJ97ZAiHgrWsb35/S3bJ0mnB3Ib4WOOIcnRM36JUdNZrnKJntCsyrrmUsYA
 s6J1og9Ut7it+F9OFvsuZ2pUv25U8BlzhgfJen8Z0fzV1/2f5LQN0gQGVxqVpwkg
 3Cmd5Rmy5h2vlcKKHklLxIP24+UMIb2WyhsDiZ/qYH3zSFFnQPUJ6fvmZIxN/fPx
 exU5O8kgsXSwauXWHJJBb+qhKNcUNvUwKGHNMAvM9mh1xytU6ZowjTqqOlCfBWlg
 dRXT2xI0ex7liXek6yXa4lN1tabIdnvmYTmueUoFiOCqbUPBO8LTutjdehsUMa4d
 xV0a8WEzuk9Q/A==
 =myJA
 -----END PGP SIGNATURE-----

Merge tag 'irq-core-2022-01-13' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull irq updates from Thomas Gleixner:
 "Updates for the interrupt subsystem:

  Core:

   - Provide a new interface for affinity hints to provide a separation
     between hint and actual affinity change which has become a hidden
     property of the current interface

   - Fix up the in tree usage of the affinity hint interfaces

  Drivers:

   - No new irqchip drivers!

   - Fix GICv3 redistributor table reservation with RT across kexec

   - Fix GICv4.1 redistributor view of the VPE table across kexec

   - Add support for extra interrupts on spear-shirq

   - Make obtaining some interrupts optional for the Renesas drivers

   - Various cleanups and bug fixes"

* tag 'irq-core-2022-01-13' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (25 commits)
  irqchip/renesas-intc-irqpin: Use platform_get_irq_optional() to get the interrupt
  irqchip/renesas-irqc: Use platform_get_irq_optional() to get the interrupt
  irqchip/gic-v4: Disable redistributors' view of the VPE table at boot time
  irqchip/ingenic-tcu: Use correctly sized arguments for bit field
  irqchip/gic-v2m: Add const to of_device_id
  irqchip/imx-gpcv2: Mark imx_gpcv2_instance with __ro_after_init
  irqchip/spear-shirq: Add support for IRQ 0..6
  irqchip/gic-v3-its: Limit memreserve cpuhp state lifetime
  irqchip/gic-v3-its: Postpone LPI pending table freeing and memreserve
  irqchip/gic-v3-its: Give the percpu rdist struct its own flags field
  net/mlx4: Use irq_update_affinity_hint()
  net/mlx5: Use irq_set_affinity_and_hint()
  hinic: Use irq_set_affinity_and_hint()
  scsi: lpfc: Use irq_set_affinity()
  mailbox: Use irq_update_affinity_hint()
  ixgbe: Use irq_update_affinity_hint()
  be2net: Use irq_update_affinity_hint()
  enic: Use irq_update_affinity_hint()
  RDMA/irdma: Use irq_update_affinity_hint()
  scsi: mpt3sas: Use irq_set_affinity_and_hint()
  ...
This commit is contained in:
Linus Torvalds 2022-01-13 08:53:45 -08:00
Родитель 455e73a07f 67d50b5f91
Коммит 147cc5838c
26 изменённых файлов: 228 добавлений и 86 удалений

Просмотреть файл

@ -550,7 +550,7 @@ static void irdma_destroy_irq(struct irdma_pci_f *rf,
struct irdma_sc_dev *dev = &rf->sc_dev; struct irdma_sc_dev *dev = &rf->sc_dev;
dev->irq_ops->irdma_dis_irq(dev, msix_vec->idx); dev->irq_ops->irdma_dis_irq(dev, msix_vec->idx);
irq_set_affinity_hint(msix_vec->irq, NULL); irq_update_affinity_hint(msix_vec->irq, NULL);
free_irq(msix_vec->irq, dev_id); free_irq(msix_vec->irq, dev_id);
} }
@ -1100,7 +1100,7 @@ irdma_cfg_ceq_vector(struct irdma_pci_f *rf, struct irdma_ceq *iwceq,
} }
cpumask_clear(&msix_vec->mask); cpumask_clear(&msix_vec->mask);
cpumask_set_cpu(msix_vec->cpu_affinity, &msix_vec->mask); cpumask_set_cpu(msix_vec->cpu_affinity, &msix_vec->mask);
irq_set_affinity_hint(msix_vec->irq, &msix_vec->mask); irq_update_affinity_hint(msix_vec->irq, &msix_vec->mask);
if (status) { if (status) {
ibdev_dbg(&rf->iwdev->ibdev, "ERR: ceq irq config fail\n"); ibdev_dbg(&rf->iwdev->ibdev, "ERR: ceq irq config fail\n");
return IRDMA_ERR_CFG; return IRDMA_ERR_CFG;

Просмотреть файл

@ -405,7 +405,7 @@ err_free_v2m:
return ret; return ret;
} }
static struct of_device_id gicv2m_device_id[] = { static const struct of_device_id gicv2m_device_id[] = {
{ .compatible = "arm,gic-v2m-frame", }, { .compatible = "arm,gic-v2m-frame", },
{}, {},
}; };

Просмотреть файл

@ -46,6 +46,10 @@
#define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0) #define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0)
#define RDIST_FLAGS_RD_TABLES_PREALLOCATED (1 << 1) #define RDIST_FLAGS_RD_TABLES_PREALLOCATED (1 << 1)
#define RD_LOCAL_LPI_ENABLED BIT(0)
#define RD_LOCAL_PENDTABLE_PREALLOCATED BIT(1)
#define RD_LOCAL_MEMRESERVE_DONE BIT(2)
static u32 lpi_id_bits; static u32 lpi_id_bits;
/* /*
@ -3044,7 +3048,7 @@ static void its_cpu_init_lpis(void)
phys_addr_t paddr; phys_addr_t paddr;
u64 val, tmp; u64 val, tmp;
if (gic_data_rdist()->lpi_enabled) if (gic_data_rdist()->flags & RD_LOCAL_LPI_ENABLED)
return; return;
val = readl_relaxed(rbase + GICR_CTLR); val = readl_relaxed(rbase + GICR_CTLR);
@ -3063,15 +3067,13 @@ static void its_cpu_init_lpis(void)
paddr &= GENMASK_ULL(51, 16); paddr &= GENMASK_ULL(51, 16);
WARN_ON(!gic_check_reserved_range(paddr, LPI_PENDBASE_SZ)); WARN_ON(!gic_check_reserved_range(paddr, LPI_PENDBASE_SZ));
its_free_pending_table(gic_data_rdist()->pend_page); gic_data_rdist()->flags |= RD_LOCAL_PENDTABLE_PREALLOCATED;
gic_data_rdist()->pend_page = NULL;
goto out; goto out;
} }
pend_page = gic_data_rdist()->pend_page; pend_page = gic_data_rdist()->pend_page;
paddr = page_to_phys(pend_page); paddr = page_to_phys(pend_page);
WARN_ON(gic_reserve_range(paddr, LPI_PENDBASE_SZ));
/* set PROPBASE */ /* set PROPBASE */
val = (gic_rdists->prop_table_pa | val = (gic_rdists->prop_table_pa |
@ -3158,10 +3160,11 @@ static void its_cpu_init_lpis(void)
/* Make sure the GIC has seen the above */ /* Make sure the GIC has seen the above */
dsb(sy); dsb(sy);
out: out:
gic_data_rdist()->lpi_enabled = true; gic_data_rdist()->flags |= RD_LOCAL_LPI_ENABLED;
pr_info("GICv3: CPU%d: using %s LPI pending table @%pa\n", pr_info("GICv3: CPU%d: using %s LPI pending table @%pa\n",
smp_processor_id(), smp_processor_id(),
gic_data_rdist()->pend_page ? "allocated" : "reserved", gic_data_rdist()->flags & RD_LOCAL_PENDTABLE_PREALLOCATED ?
"reserved" : "allocated",
&paddr); &paddr);
} }
@ -5138,7 +5141,7 @@ static int redist_disable_lpis(void)
* *
* If running with preallocated tables, there is nothing to do. * If running with preallocated tables, there is nothing to do.
*/ */
if (gic_data_rdist()->lpi_enabled || if ((gic_data_rdist()->flags & RD_LOCAL_LPI_ENABLED) ||
(gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED)) (gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED))
return 0; return 0;
@ -5200,6 +5203,51 @@ int its_cpu_init(void)
return 0; return 0;
} }
static void rdist_memreserve_cpuhp_cleanup_workfn(struct work_struct *work)
{
cpuhp_remove_state_nocalls(gic_rdists->cpuhp_memreserve_state);
gic_rdists->cpuhp_memreserve_state = CPUHP_INVALID;
}
static DECLARE_WORK(rdist_memreserve_cpuhp_cleanup_work,
rdist_memreserve_cpuhp_cleanup_workfn);
static int its_cpu_memreserve_lpi(unsigned int cpu)
{
struct page *pend_page;
int ret = 0;
/* This gets to run exactly once per CPU */
if (gic_data_rdist()->flags & RD_LOCAL_MEMRESERVE_DONE)
return 0;
pend_page = gic_data_rdist()->pend_page;
if (WARN_ON(!pend_page)) {
ret = -ENOMEM;
goto out;
}
/*
* If the pending table was pre-programmed, free the memory we
* preemptively allocated. Otherwise, reserve that memory for
* later kexecs.
*/
if (gic_data_rdist()->flags & RD_LOCAL_PENDTABLE_PREALLOCATED) {
its_free_pending_table(pend_page);
gic_data_rdist()->pend_page = NULL;
} else {
phys_addr_t paddr = page_to_phys(pend_page);
WARN_ON(gic_reserve_range(paddr, LPI_PENDBASE_SZ));
}
out:
/* Last CPU being brought up gets to issue the cleanup */
if (cpumask_equal(&cpus_booted_once_mask, cpu_possible_mask))
schedule_work(&rdist_memreserve_cpuhp_cleanup_work);
gic_data_rdist()->flags |= RD_LOCAL_MEMRESERVE_DONE;
return ret;
}
static const struct of_device_id its_device_id[] = { static const struct of_device_id its_device_id[] = {
{ .compatible = "arm,gic-v3-its", }, { .compatible = "arm,gic-v3-its", },
{}, {},
@ -5383,6 +5431,26 @@ static void __init its_acpi_probe(void)
static void __init its_acpi_probe(void) { } static void __init its_acpi_probe(void) { }
#endif #endif
int __init its_lpi_memreserve_init(void)
{
int state;
if (!efi_enabled(EFI_CONFIG_TABLES))
return 0;
gic_rdists->cpuhp_memreserve_state = CPUHP_INVALID;
state = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
"irqchip/arm/gicv3/memreserve:online",
its_cpu_memreserve_lpi,
NULL);
if (state < 0)
return state;
gic_rdists->cpuhp_memreserve_state = state;
return 0;
}
int __init its_init(struct fwnode_handle *handle, struct rdists *rdists, int __init its_init(struct fwnode_handle *handle, struct rdists *rdists,
struct irq_domain *parent_domain) struct irq_domain *parent_domain)
{ {

Просмотреть файл

@ -920,6 +920,22 @@ static int __gic_update_rdist_properties(struct redist_region *region,
{ {
u64 typer = gic_read_typer(ptr + GICR_TYPER); u64 typer = gic_read_typer(ptr + GICR_TYPER);
/* Boot-time cleanip */
if ((typer & GICR_TYPER_VLPIS) && (typer & GICR_TYPER_RVPEID)) {
u64 val;
/* Deactivate any present vPE */
val = gicr_read_vpendbaser(ptr + SZ_128K + GICR_VPENDBASER);
if (val & GICR_VPENDBASER_Valid)
gicr_write_vpendbaser(GICR_VPENDBASER_PendingLast,
ptr + SZ_128K + GICR_VPENDBASER);
/* Mark the VPE table as invalid */
val = gicr_read_vpropbaser(ptr + SZ_128K + GICR_VPROPBASER);
val &= ~GICR_VPROPBASER_4_1_VALID;
gicr_write_vpropbaser(val, ptr + SZ_128K + GICR_VPROPBASER);
}
gic_data.rdists.has_vlpis &= !!(typer & GICR_TYPER_VLPIS); gic_data.rdists.has_vlpis &= !!(typer & GICR_TYPER_VLPIS);
/* RVPEID implies some form of DirectLPI, no matter what the doc says... :-/ */ /* RVPEID implies some form of DirectLPI, no matter what the doc says... :-/ */
@ -1802,6 +1818,7 @@ static int __init gic_init_bases(void __iomem *dist_base,
if (gic_dist_supports_lpis()) { if (gic_dist_supports_lpis()) {
its_init(handle, &gic_data.rdists, gic_data.domain); its_init(handle, &gic_data.rdists, gic_data.domain);
its_cpu_init(); its_cpu_init();
its_lpi_memreserve_init();
} else { } else {
if (IS_ENABLED(CONFIG_ARM_GIC_V2M)) if (IS_ENABLED(CONFIG_ARM_GIC_V2M))
gicv2m_init(handle, gic_data.domain); gicv2m_init(handle, gic_data.domain);

Просмотреть файл

@ -26,7 +26,7 @@ struct gpcv2_irqchip_data {
u32 cpu2wakeup; u32 cpu2wakeup;
}; };
static struct gpcv2_irqchip_data *imx_gpcv2_instance; static struct gpcv2_irqchip_data *imx_gpcv2_instance __ro_after_init;
static void __iomem *gpcv2_idx_to_reg(struct gpcv2_irqchip_data *cd, int i) static void __iomem *gpcv2_idx_to_reg(struct gpcv2_irqchip_data *cd, int i)
{ {

Просмотреть файл

@ -28,6 +28,7 @@ static void ingenic_tcu_intc_cascade(struct irq_desc *desc)
struct irq_chip_generic *gc = irq_get_domain_generic_chip(domain, 0); struct irq_chip_generic *gc = irq_get_domain_generic_chip(domain, 0);
struct regmap *map = gc->private; struct regmap *map = gc->private;
uint32_t irq_reg, irq_mask; uint32_t irq_reg, irq_mask;
unsigned long bits;
unsigned int i; unsigned int i;
regmap_read(map, TCU_REG_TFR, &irq_reg); regmap_read(map, TCU_REG_TFR, &irq_reg);
@ -36,8 +37,9 @@ static void ingenic_tcu_intc_cascade(struct irq_desc *desc)
chained_irq_enter(irq_chip, desc); chained_irq_enter(irq_chip, desc);
irq_reg &= ~irq_mask; irq_reg &= ~irq_mask;
bits = irq_reg;
for_each_set_bit(i, (unsigned long *)&irq_reg, 32) for_each_set_bit(i, &bits, 32)
generic_handle_domain_irq(domain, i); generic_handle_domain_irq(domain, i);
chained_irq_exit(irq_chip, desc); chained_irq_exit(irq_chip, desc);

Просмотреть файл

@ -375,7 +375,6 @@ static int intc_irqpin_probe(struct platform_device *pdev)
struct intc_irqpin_priv *p; struct intc_irqpin_priv *p;
struct intc_irqpin_iomem *i; struct intc_irqpin_iomem *i;
struct resource *io[INTC_IRQPIN_REG_NR]; struct resource *io[INTC_IRQPIN_REG_NR];
struct resource *irq;
struct irq_chip *irq_chip; struct irq_chip *irq_chip;
void (*enable_fn)(struct irq_data *d); void (*enable_fn)(struct irq_data *d);
void (*disable_fn)(struct irq_data *d); void (*disable_fn)(struct irq_data *d);
@ -418,12 +417,14 @@ static int intc_irqpin_probe(struct platform_device *pdev)
/* allow any number of IRQs between 1 and INTC_IRQPIN_MAX */ /* allow any number of IRQs between 1 and INTC_IRQPIN_MAX */
for (k = 0; k < INTC_IRQPIN_MAX; k++) { for (k = 0; k < INTC_IRQPIN_MAX; k++) {
irq = platform_get_resource(pdev, IORESOURCE_IRQ, k); ret = platform_get_irq_optional(pdev, k);
if (!irq) if (ret == -ENXIO)
break; break;
if (ret < 0)
goto err0;
p->irq[k].p = p; p->irq[k].p = p;
p->irq[k].requested_irq = irq->start; p->irq[k].requested_irq = ret;
} }
nirqs = k; nirqs = k;

Просмотреть файл

@ -126,7 +126,6 @@ static int irqc_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev; struct device *dev = &pdev->dev;
const char *name = dev_name(dev); const char *name = dev_name(dev);
struct irqc_priv *p; struct irqc_priv *p;
struct resource *irq;
int ret; int ret;
int k; int k;
@ -142,13 +141,15 @@ static int irqc_probe(struct platform_device *pdev)
/* allow any number of IRQs between 1 and IRQC_IRQ_MAX */ /* allow any number of IRQs between 1 and IRQC_IRQ_MAX */
for (k = 0; k < IRQC_IRQ_MAX; k++) { for (k = 0; k < IRQC_IRQ_MAX; k++) {
irq = platform_get_resource(pdev, IORESOURCE_IRQ, k); ret = platform_get_irq_optional(pdev, k);
if (!irq) if (ret == -ENXIO)
break; break;
if (ret < 0)
goto err_runtime_pm_disable;
p->irq[k].p = p; p->irq[k].p = p;
p->irq[k].hw_irq = k; p->irq[k].hw_irq = k;
p->irq[k].requested_irq = irq->start; p->irq[k].requested_irq = ret;
} }
p->number_of_irqs = k; p->number_of_irqs = k;

Просмотреть файл

@ -149,6 +149,8 @@ static struct spear_shirq spear320_shirq_ras3 = {
.offset = 0, .offset = 0,
.nr_irqs = 7, .nr_irqs = 7,
.mask = ((0x1 << 7) - 1) << 0, .mask = ((0x1 << 7) - 1) << 0,
.irq_chip = &dummy_irq_chip,
.status_reg = SPEAR320_INT_STS_MASK_REG,
}; };
static struct spear_shirq spear320_shirq_ras1 = { static struct spear_shirq spear320_shirq_ras1 = {

Просмотреть файл

@ -1298,7 +1298,7 @@ static int flexrm_startup(struct mbox_chan *chan)
val = (num_online_cpus() < val) ? val / num_online_cpus() : 1; val = (num_online_cpus() < val) ? val / num_online_cpus() : 1;
cpumask_set_cpu((ring->num / val) % num_online_cpus(), cpumask_set_cpu((ring->num / val) % num_online_cpus(),
&ring->irq_aff_hint); &ring->irq_aff_hint);
ret = irq_set_affinity_hint(ring->irq, &ring->irq_aff_hint); ret = irq_update_affinity_hint(ring->irq, &ring->irq_aff_hint);
if (ret) { if (ret) {
dev_err(ring->mbox->dev, dev_err(ring->mbox->dev,
"failed to set IRQ affinity hint for ring%d\n", "failed to set IRQ affinity hint for ring%d\n",
@ -1425,7 +1425,7 @@ static void flexrm_shutdown(struct mbox_chan *chan)
/* Release IRQ */ /* Release IRQ */
if (ring->irq_requested) { if (ring->irq_requested) {
irq_set_affinity_hint(ring->irq, NULL); irq_update_affinity_hint(ring->irq, NULL);
free_irq(ring->irq, ring); free_irq(ring->irq, ring);
ring->irq_requested = false; ring->irq_requested = false;
} }

Просмотреть файл

@ -150,10 +150,10 @@ static void enic_set_affinity_hint(struct enic *enic)
!cpumask_available(enic->msix[i].affinity_mask) || !cpumask_available(enic->msix[i].affinity_mask) ||
cpumask_empty(enic->msix[i].affinity_mask)) cpumask_empty(enic->msix[i].affinity_mask))
continue; continue;
err = irq_set_affinity_hint(enic->msix_entry[i].vector, err = irq_update_affinity_hint(enic->msix_entry[i].vector,
enic->msix[i].affinity_mask); enic->msix[i].affinity_mask);
if (err) if (err)
netdev_warn(enic->netdev, "irq_set_affinity_hint failed, err %d\n", netdev_warn(enic->netdev, "irq_update_affinity_hint failed, err %d\n",
err); err);
} }
@ -173,7 +173,7 @@ static void enic_unset_affinity_hint(struct enic *enic)
int i; int i;
for (i = 0; i < enic->intr_count; i++) for (i = 0; i < enic->intr_count; i++)
irq_set_affinity_hint(enic->msix_entry[i].vector, NULL); irq_update_affinity_hint(enic->msix_entry[i].vector, NULL);
} }
static int enic_udp_tunnel_set_port(struct net_device *netdev, static int enic_udp_tunnel_set_port(struct net_device *netdev,

Просмотреть файл

@ -3491,7 +3491,7 @@ static int be_msix_register(struct be_adapter *adapter)
if (status) if (status)
goto err_msix; goto err_msix;
irq_set_affinity_hint(vec, eqo->affinity_mask); irq_update_affinity_hint(vec, eqo->affinity_mask);
} }
return 0; return 0;
@ -3552,7 +3552,7 @@ static void be_irq_unregister(struct be_adapter *adapter)
/* MSIx */ /* MSIx */
for_all_evt_queues(adapter, eqo, i) { for_all_evt_queues(adapter, eqo, i) {
vec = be_msix_vec_get(adapter, eqo); vec = be_msix_vec_get(adapter, eqo);
irq_set_affinity_hint(vec, NULL); irq_update_affinity_hint(vec, NULL);
free_irq(vec, eqo); free_irq(vec, eqo);
} }

Просмотреть файл

@ -548,7 +548,7 @@ static int rx_request_irq(struct hinic_rxq *rxq)
goto err_req_irq; goto err_req_irq;
cpumask_set_cpu(qp->q_id % num_online_cpus(), &rq->affinity_mask); cpumask_set_cpu(qp->q_id % num_online_cpus(), &rq->affinity_mask);
err = irq_set_affinity_hint(rq->irq, &rq->affinity_mask); err = irq_set_affinity_and_hint(rq->irq, &rq->affinity_mask);
if (err) if (err)
goto err_irq_affinity; goto err_irq_affinity;
@ -565,7 +565,7 @@ static void rx_free_irq(struct hinic_rxq *rxq)
{ {
struct hinic_rq *rq = rxq->rq; struct hinic_rq *rq = rxq->rq;
irq_set_affinity_hint(rq->irq, NULL); irq_update_affinity_hint(rq->irq, NULL);
free_irq(rq->irq, rxq); free_irq(rq->irq, rxq);
rx_del_napi(rxq); rx_del_napi(rxq);
} }

Просмотреть файл

@ -3915,10 +3915,10 @@ static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
* *
* get_cpu_mask returns a static constant mask with * get_cpu_mask returns a static constant mask with
* a permanent lifetime so it's ok to pass to * a permanent lifetime so it's ok to pass to
* irq_set_affinity_hint without making a copy. * irq_update_affinity_hint without making a copy.
*/ */
cpu = cpumask_local_spread(q_vector->v_idx, -1); cpu = cpumask_local_spread(q_vector->v_idx, -1);
irq_set_affinity_hint(irq_num, get_cpu_mask(cpu)); irq_update_affinity_hint(irq_num, get_cpu_mask(cpu));
} }
vsi->irqs_ready = true; vsi->irqs_ready = true;
@ -3929,7 +3929,7 @@ free_queue_irqs:
vector--; vector--;
irq_num = pf->msix_entries[base + vector].vector; irq_num = pf->msix_entries[base + vector].vector;
irq_set_affinity_notifier(irq_num, NULL); irq_set_affinity_notifier(irq_num, NULL);
irq_set_affinity_hint(irq_num, NULL); irq_update_affinity_hint(irq_num, NULL);
free_irq(irq_num, &vsi->q_vectors[vector]); free_irq(irq_num, &vsi->q_vectors[vector]);
} }
return err; return err;
@ -4750,7 +4750,7 @@ static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
/* clear the affinity notifier in the IRQ descriptor */ /* clear the affinity notifier in the IRQ descriptor */
irq_set_affinity_notifier(irq_num, NULL); irq_set_affinity_notifier(irq_num, NULL);
/* remove our suggested affinity mask for this IRQ */ /* remove our suggested affinity mask for this IRQ */
irq_set_affinity_hint(irq_num, NULL); irq_update_affinity_hint(irq_num, NULL);
synchronize_irq(irq_num); synchronize_irq(irq_num);
free_irq(irq_num, vsi->q_vectors[i]); free_irq(irq_num, vsi->q_vectors[i]);

Просмотреть файл

@ -492,10 +492,10 @@ iavf_request_traffic_irqs(struct iavf_adapter *adapter, char *basename)
irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify); irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
/* Spread the IRQ affinity hints across online CPUs. Note that /* Spread the IRQ affinity hints across online CPUs. Note that
* get_cpu_mask returns a mask with a permanent lifetime so * get_cpu_mask returns a mask with a permanent lifetime so
* it's safe to use as a hint for irq_set_affinity_hint. * it's safe to use as a hint for irq_update_affinity_hint.
*/ */
cpu = cpumask_local_spread(q_vector->v_idx, -1); cpu = cpumask_local_spread(q_vector->v_idx, -1);
irq_set_affinity_hint(irq_num, get_cpu_mask(cpu)); irq_update_affinity_hint(irq_num, get_cpu_mask(cpu));
} }
return 0; return 0;
@ -505,7 +505,7 @@ free_queue_irqs:
vector--; vector--;
irq_num = adapter->msix_entries[vector + NONQ_VECS].vector; irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
irq_set_affinity_notifier(irq_num, NULL); irq_set_affinity_notifier(irq_num, NULL);
irq_set_affinity_hint(irq_num, NULL); irq_update_affinity_hint(irq_num, NULL);
free_irq(irq_num, &adapter->q_vectors[vector]); free_irq(irq_num, &adapter->q_vectors[vector]);
} }
return err; return err;
@ -557,7 +557,7 @@ static void iavf_free_traffic_irqs(struct iavf_adapter *adapter)
for (vector = 0; vector < q_vectors; vector++) { for (vector = 0; vector < q_vectors; vector++) {
irq_num = adapter->msix_entries[vector + NONQ_VECS].vector; irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
irq_set_affinity_notifier(irq_num, NULL); irq_set_affinity_notifier(irq_num, NULL);
irq_set_affinity_hint(irq_num, NULL); irq_update_affinity_hint(irq_num, NULL);
free_irq(irq_num, &adapter->q_vectors[vector]); free_irq(irq_num, &adapter->q_vectors[vector]);
} }
} }

Просмотреть файл

@ -3247,8 +3247,8 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
/* If Flow Director is enabled, set interrupt affinity */ /* If Flow Director is enabled, set interrupt affinity */
if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
/* assign the mask for this irq */ /* assign the mask for this irq */
irq_set_affinity_hint(entry->vector, irq_update_affinity_hint(entry->vector,
&q_vector->affinity_mask); &q_vector->affinity_mask);
} }
} }
@ -3264,8 +3264,8 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
free_queue_irqs: free_queue_irqs:
while (vector) { while (vector) {
vector--; vector--;
irq_set_affinity_hint(adapter->msix_entries[vector].vector, irq_update_affinity_hint(adapter->msix_entries[vector].vector,
NULL); NULL);
free_irq(adapter->msix_entries[vector].vector, free_irq(adapter->msix_entries[vector].vector,
adapter->q_vector[vector]); adapter->q_vector[vector]);
} }
@ -3398,7 +3398,7 @@ static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
continue; continue;
/* clear the affinity_mask in the IRQ descriptor */ /* clear the affinity_mask in the IRQ descriptor */
irq_set_affinity_hint(entry->vector, NULL); irq_update_affinity_hint(entry->vector, NULL);
free_irq(entry->vector, q_vector); free_irq(entry->vector, q_vector);
} }

Просмотреть файл

@ -244,9 +244,9 @@ static void mlx4_set_eq_affinity_hint(struct mlx4_priv *priv, int vec)
cpumask_empty(eq->affinity_mask)) cpumask_empty(eq->affinity_mask))
return; return;
hint_err = irq_set_affinity_hint(eq->irq, eq->affinity_mask); hint_err = irq_update_affinity_hint(eq->irq, eq->affinity_mask);
if (hint_err) if (hint_err)
mlx4_warn(dev, "irq_set_affinity_hint failed, err %d\n", hint_err); mlx4_warn(dev, "irq_update_affinity_hint failed, err %d\n", hint_err);
} }
#endif #endif
@ -1123,9 +1123,7 @@ static void mlx4_free_irqs(struct mlx4_dev *dev)
for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i)
if (eq_table->eq[i].have_irq) { if (eq_table->eq[i].have_irq) {
free_cpumask_var(eq_table->eq[i].affinity_mask); free_cpumask_var(eq_table->eq[i].affinity_mask);
#if defined(CONFIG_SMP) irq_update_affinity_hint(eq_table->eq[i].irq, NULL);
irq_set_affinity_hint(eq_table->eq[i].irq, NULL);
#endif
free_irq(eq_table->eq[i].irq, eq_table->eq + i); free_irq(eq_table->eq[i].irq, eq_table->eq + i);
eq_table->eq[i].have_irq = 0; eq_table->eq[i].have_irq = 0;
} }

Просмотреть файл

@ -129,11 +129,11 @@ static void irq_release(struct mlx5_irq *irq)
struct mlx5_irq_pool *pool = irq->pool; struct mlx5_irq_pool *pool = irq->pool;
xa_erase(&pool->irqs, irq->index); xa_erase(&pool->irqs, irq->index);
/* free_irq requires that affinity and rmap will be cleared /* free_irq requires that affinity_hint and rmap will be cleared
* before calling it. This is why there is asymmetry with set_rmap * before calling it. This is why there is asymmetry with set_rmap
* which should be called after alloc_irq but before request_irq. * which should be called after alloc_irq but before request_irq.
*/ */
irq_set_affinity_hint(irq->irqn, NULL); irq_update_affinity_hint(irq->irqn, NULL);
free_cpumask_var(irq->mask); free_cpumask_var(irq->mask);
free_irq(irq->irqn, &irq->nh); free_irq(irq->irqn, &irq->nh);
kfree(irq); kfree(irq);
@ -238,7 +238,7 @@ struct mlx5_irq *mlx5_irq_alloc(struct mlx5_irq_pool *pool, int i,
} }
if (affinity) { if (affinity) {
cpumask_copy(irq->mask, affinity); cpumask_copy(irq->mask, affinity);
irq_set_affinity_hint(irq->irqn, irq->mask); irq_set_affinity_and_hint(irq->irqn, irq->mask);
} }
irq->pool = pool; irq->pool = pool;
irq->refcount = 1; irq->refcount = 1;
@ -251,7 +251,7 @@ struct mlx5_irq *mlx5_irq_alloc(struct mlx5_irq_pool *pool, int i,
} }
return irq; return irq;
err_xa: err_xa:
irq_set_affinity_hint(irq->irqn, NULL); irq_update_affinity_hint(irq->irqn, NULL);
free_cpumask_var(irq->mask); free_cpumask_var(irq->mask);
err_cpumask: err_cpumask:
free_irq(irq->irqn, &irq->nh); free_irq(irq->irqn, &irq->nh);

Просмотреть файл

@ -12709,7 +12709,7 @@ lpfc_irq_set_aff(struct lpfc_hba_eq_hdl *eqhdl, unsigned int cpu)
cpumask_clear(&eqhdl->aff_mask); cpumask_clear(&eqhdl->aff_mask);
cpumask_set_cpu(cpu, &eqhdl->aff_mask); cpumask_set_cpu(cpu, &eqhdl->aff_mask);
irq_set_status_flags(eqhdl->irq, IRQ_NO_BALANCING); irq_set_status_flags(eqhdl->irq, IRQ_NO_BALANCING);
irq_set_affinity_hint(eqhdl->irq, &eqhdl->aff_mask); irq_set_affinity(eqhdl->irq, &eqhdl->aff_mask);
} }
/** /**
@ -12998,7 +12998,6 @@ cfg_fail_out:
for (--index; index >= 0; index--) { for (--index; index >= 0; index--) {
eqhdl = lpfc_get_eq_hdl(index); eqhdl = lpfc_get_eq_hdl(index);
lpfc_irq_clear_aff(eqhdl); lpfc_irq_clear_aff(eqhdl);
irq_set_affinity_hint(eqhdl->irq, NULL);
free_irq(eqhdl->irq, eqhdl); free_irq(eqhdl->irq, eqhdl);
} }
@ -13159,7 +13158,6 @@ lpfc_sli4_disable_intr(struct lpfc_hba *phba)
for (index = 0; index < phba->cfg_irq_chann; index++) { for (index = 0; index < phba->cfg_irq_chann; index++) {
eqhdl = lpfc_get_eq_hdl(index); eqhdl = lpfc_get_eq_hdl(index);
lpfc_irq_clear_aff(eqhdl); lpfc_irq_clear_aff(eqhdl);
irq_set_affinity_hint(eqhdl->irq, NULL);
free_irq(eqhdl->irq, eqhdl); free_irq(eqhdl->irq, eqhdl);
} }
} else { } else {

Просмотреть файл

@ -5720,7 +5720,7 @@ megasas_setup_irqs_msix(struct megasas_instance *instance, u8 is_probe)
"Failed to register IRQ for vector %d.\n", i); "Failed to register IRQ for vector %d.\n", i);
for (j = 0; j < i; j++) { for (j = 0; j < i; j++) {
if (j < instance->low_latency_index_start) if (j < instance->low_latency_index_start)
irq_set_affinity_hint( irq_update_affinity_hint(
pci_irq_vector(pdev, j), NULL); pci_irq_vector(pdev, j), NULL);
free_irq(pci_irq_vector(pdev, j), free_irq(pci_irq_vector(pdev, j),
&instance->irq_context[j]); &instance->irq_context[j]);
@ -5763,7 +5763,7 @@ megasas_destroy_irqs(struct megasas_instance *instance) {
if (instance->msix_vectors) if (instance->msix_vectors)
for (i = 0; i < instance->msix_vectors; i++) { for (i = 0; i < instance->msix_vectors; i++) {
if (i < instance->low_latency_index_start) if (i < instance->low_latency_index_start)
irq_set_affinity_hint( irq_update_affinity_hint(
pci_irq_vector(instance->pdev, i), NULL); pci_irq_vector(instance->pdev, i), NULL);
free_irq(pci_irq_vector(instance->pdev, i), free_irq(pci_irq_vector(instance->pdev, i),
&instance->irq_context[i]); &instance->irq_context[i]);
@ -5894,22 +5894,25 @@ int megasas_get_device_list(struct megasas_instance *instance)
} }
/** /**
* megasas_set_high_iops_queue_affinity_hint - Set affinity hint for high IOPS queues * megasas_set_high_iops_queue_affinity_and_hint - Set affinity and hint
* @instance: Adapter soft state * for high IOPS queues
* return: void * @instance: Adapter soft state
* return: void
*/ */
static inline void static inline void
megasas_set_high_iops_queue_affinity_hint(struct megasas_instance *instance) megasas_set_high_iops_queue_affinity_and_hint(struct megasas_instance *instance)
{ {
int i; int i;
int local_numa_node; unsigned int irq;
const struct cpumask *mask;
if (instance->perf_mode == MR_BALANCED_PERF_MODE) { if (instance->perf_mode == MR_BALANCED_PERF_MODE) {
local_numa_node = dev_to_node(&instance->pdev->dev); mask = cpumask_of_node(dev_to_node(&instance->pdev->dev));
for (i = 0; i < instance->low_latency_index_start; i++) for (i = 0; i < instance->low_latency_index_start; i++) {
irq_set_affinity_hint(pci_irq_vector(instance->pdev, i), irq = pci_irq_vector(instance->pdev, i);
cpumask_of_node(local_numa_node)); irq_set_affinity_and_hint(irq, mask);
}
} }
} }
@ -5998,7 +6001,7 @@ megasas_alloc_irq_vectors(struct megasas_instance *instance)
instance->msix_vectors = 0; instance->msix_vectors = 0;
if (instance->smp_affinity_enable) if (instance->smp_affinity_enable)
megasas_set_high_iops_queue_affinity_hint(instance); megasas_set_high_iops_queue_affinity_and_hint(instance);
} }
/** /**

Просмотреть файл

@ -3086,6 +3086,7 @@ _base_check_enable_msix(struct MPT3SAS_ADAPTER *ioc)
void void
mpt3sas_base_free_irq(struct MPT3SAS_ADAPTER *ioc) mpt3sas_base_free_irq(struct MPT3SAS_ADAPTER *ioc)
{ {
unsigned int irq;
struct adapter_reply_queue *reply_q, *next; struct adapter_reply_queue *reply_q, *next;
if (list_empty(&ioc->reply_queue_list)) if (list_empty(&ioc->reply_queue_list))
@ -3098,9 +3099,10 @@ mpt3sas_base_free_irq(struct MPT3SAS_ADAPTER *ioc)
continue; continue;
} }
if (ioc->smp_affinity_enable) if (ioc->smp_affinity_enable) {
irq_set_affinity_hint(pci_irq_vector(ioc->pdev, irq = pci_irq_vector(ioc->pdev, reply_q->msix_index);
reply_q->msix_index), NULL); irq_update_affinity_hint(irq, NULL);
}
free_irq(pci_irq_vector(ioc->pdev, reply_q->msix_index), free_irq(pci_irq_vector(ioc->pdev, reply_q->msix_index),
reply_q); reply_q);
kfree(reply_q); kfree(reply_q);
@ -3167,18 +3169,15 @@ out:
* @ioc: per adapter object * @ioc: per adapter object
* *
* The enduser would need to set the affinity via /proc/irq/#/smp_affinity * The enduser would need to set the affinity via /proc/irq/#/smp_affinity
*
* It would nice if we could call irq_set_affinity, however it is not
* an exported symbol
*/ */
static void static void
_base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc) _base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc)
{ {
unsigned int cpu, nr_cpus, nr_msix, index = 0; unsigned int cpu, nr_cpus, nr_msix, index = 0, irq;
struct adapter_reply_queue *reply_q; struct adapter_reply_queue *reply_q;
int local_numa_node;
int iopoll_q_count = ioc->reply_queue_count - int iopoll_q_count = ioc->reply_queue_count -
ioc->iopoll_q_start_index; ioc->iopoll_q_start_index;
const struct cpumask *mask;
if (!_base_is_controller_msix_enabled(ioc)) if (!_base_is_controller_msix_enabled(ioc))
return; return;
@ -3201,11 +3200,11 @@ _base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc)
* corresponding to high iops queues. * corresponding to high iops queues.
*/ */
if (ioc->high_iops_queues) { if (ioc->high_iops_queues) {
local_numa_node = dev_to_node(&ioc->pdev->dev); mask = cpumask_of_node(dev_to_node(&ioc->pdev->dev));
for (index = 0; index < ioc->high_iops_queues; for (index = 0; index < ioc->high_iops_queues;
index++) { index++) {
irq_set_affinity_hint(pci_irq_vector(ioc->pdev, irq = pci_irq_vector(ioc->pdev, index);
index), cpumask_of_node(local_numa_node)); irq_set_affinity_and_hint(irq, mask);
} }
} }

Просмотреть файл

@ -329,7 +329,46 @@ extern int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask);
extern int irq_can_set_affinity(unsigned int irq); extern int irq_can_set_affinity(unsigned int irq);
extern int irq_select_affinity(unsigned int irq); extern int irq_select_affinity(unsigned int irq);
extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m); extern int __irq_apply_affinity_hint(unsigned int irq, const struct cpumask *m,
bool setaffinity);
/**
* irq_update_affinity_hint - Update the affinity hint
* @irq: Interrupt to update
* @m: cpumask pointer (NULL to clear the hint)
*
* Updates the affinity hint, but does not change the affinity of the interrupt.
*/
static inline int
irq_update_affinity_hint(unsigned int irq, const struct cpumask *m)
{
return __irq_apply_affinity_hint(irq, m, false);
}
/**
* irq_set_affinity_and_hint - Update the affinity hint and apply the provided
* cpumask to the interrupt
* @irq: Interrupt to update
* @m: cpumask pointer (NULL to clear the hint)
*
* Updates the affinity hint and if @m is not NULL it applies it as the
* affinity of that interrupt.
*/
static inline int
irq_set_affinity_and_hint(unsigned int irq, const struct cpumask *m)
{
return __irq_apply_affinity_hint(irq, m, true);
}
/*
* Deprecated. Use irq_update_affinity_hint() or irq_set_affinity_and_hint()
* instead.
*/
static inline int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
{
return irq_set_affinity_and_hint(irq, m);
}
extern int irq_update_affinity_desc(unsigned int irq, extern int irq_update_affinity_desc(unsigned int irq,
struct irq_affinity_desc *affinity); struct irq_affinity_desc *affinity);
@ -361,6 +400,18 @@ static inline int irq_can_set_affinity(unsigned int irq)
static inline int irq_select_affinity(unsigned int irq) { return 0; } static inline int irq_select_affinity(unsigned int irq) { return 0; }
static inline int irq_update_affinity_hint(unsigned int irq,
const struct cpumask *m)
{
return -EINVAL;
}
static inline int irq_set_affinity_and_hint(unsigned int irq,
const struct cpumask *m)
{
return -EINVAL;
}
static inline int irq_set_affinity_hint(unsigned int irq, static inline int irq_set_affinity_hint(unsigned int irq,
const struct cpumask *m) const struct cpumask *m)
{ {

Просмотреть файл

@ -615,7 +615,7 @@ struct rdists {
void __iomem *rd_base; void __iomem *rd_base;
struct page *pend_page; struct page *pend_page;
phys_addr_t phys_base; phys_addr_t phys_base;
bool lpi_enabled; u64 flags;
cpumask_t *vpe_table_mask; cpumask_t *vpe_table_mask;
void *vpe_l1_base; void *vpe_l1_base;
} __percpu *rdist; } __percpu *rdist;
@ -624,6 +624,7 @@ struct rdists {
u64 flags; u64 flags;
u32 gicd_typer; u32 gicd_typer;
u32 gicd_typer2; u32 gicd_typer2;
int cpuhp_memreserve_state;
bool has_vlpis; bool has_vlpis;
bool has_rvpeid; bool has_rvpeid;
bool has_direct_lpi; bool has_direct_lpi;
@ -632,6 +633,7 @@ struct rdists {
struct irq_domain; struct irq_domain;
struct fwnode_handle; struct fwnode_handle;
int __init its_lpi_memreserve_init(void);
int its_cpu_init(void); int its_cpu_init(void);
int its_init(struct fwnode_handle *handle, struct rdists *rdists, int its_init(struct fwnode_handle *handle, struct rdists *rdists,
struct irq_domain *domain); struct irq_domain *domain);

Просмотреть файл

@ -131,7 +131,7 @@ struct irq_domain_ops {
#endif #endif
}; };
extern struct irq_domain_ops irq_generic_chip_ops; extern const struct irq_domain_ops irq_generic_chip_ops;
struct irq_domain_chip_generic; struct irq_domain_chip_generic;

Просмотреть файл

@ -451,7 +451,7 @@ static void irq_unmap_generic_chip(struct irq_domain *d, unsigned int virq)
} }
struct irq_domain_ops irq_generic_chip_ops = { const struct irq_domain_ops irq_generic_chip_ops = {
.map = irq_map_generic_chip, .map = irq_map_generic_chip,
.unmap = irq_unmap_generic_chip, .unmap = irq_unmap_generic_chip,
.xlate = irq_domain_xlate_onetwocell, .xlate = irq_domain_xlate_onetwocell,

Просмотреть файл

@ -486,7 +486,8 @@ int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask)
} }
EXPORT_SYMBOL_GPL(irq_force_affinity); EXPORT_SYMBOL_GPL(irq_force_affinity);
int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m) int __irq_apply_affinity_hint(unsigned int irq, const struct cpumask *m,
bool setaffinity)
{ {
unsigned long flags; unsigned long flags;
struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
@ -495,12 +496,11 @@ int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
return -EINVAL; return -EINVAL;
desc->affinity_hint = m; desc->affinity_hint = m;
irq_put_desc_unlock(desc, flags); irq_put_desc_unlock(desc, flags);
/* set the initial affinity to prevent every interrupt being on CPU0 */ if (m && setaffinity)
if (m)
__irq_set_affinity(irq, m, false); __irq_set_affinity(irq, m, false);
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(irq_set_affinity_hint); EXPORT_SYMBOL_GPL(__irq_apply_affinity_hint);
static void irq_affinity_notify(struct work_struct *work) static void irq_affinity_notify(struct work_struct *work)
{ {