Merge branch 'irq-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull irq updates from Thomas Gleixner:
 "The irq department delivers:

   - new core infrastructure to allow better management of multi-queue
     devices (interrupt spreading, node aware descriptor allocation ...)

   - a new interrupt flow handler to support the new fangled Intel VMD
     devices.

   - yet another new interrupt controller driver.

   - a series of fixes which addresses sparse warnings, missing
     includes, missing static declarations etc from Ben Dooks.

   - a fix for the error handling in the hierarchical domain allocation
     code.

   - the usual pile of small updates to core and driver code"

* 'irq-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (46 commits)
  genirq: Fix missing irq allocation affinity hint
  irqdomain: Fix irq_domain_alloc_irqs_recursive() error handling
  irq/Documentation: Correct result of echnoing 5 to smp_affinity
  MAINTAINERS: Remove Jiang Liu from irq domains
  genirq/msi: Fix broken debug output
  genirq: Add a helper to spread an affinity mask for MSI/MSI-X vectors
  genirq/msi: Make use of affinity aware allocations
  genirq: Use affinity hint in irqdesc allocation
  genirq: Add affinity hint to irq allocation
  genirq: Introduce IRQD_AFFINITY_MANAGED flag
  genirq/msi: Remove unused MSI_FLAG_IDENTITY_MAP
  irqchip/s3c24xx: Fixup IO accessors for big endian
  irqchip/exynos-combiner: Fix usage of __raw IO
  irqdomain: Fix disposal of mappings for interrupt hierarchies
  irqchip/aspeed-vic: Add irq controller for Aspeed
  doc/devicetree: Add Aspeed VIC bindings
  x86/PCI/VMD: Use untracked irq handler
  genirq: Add untracked irq handler
  irqchip/mips-gic: Populate irq_domain names
  irqchip/gicv3-its: Implement two-level(indirect) device table support
  ...
This commit is contained in:
Linus Torvalds 2016-07-25 21:35:03 -07:00
Родитель 55392c4c06 eb0dc47ab6
Коммит e65805251f
44 изменённых файлов: 1269 добавлений и 324 удалений

Просмотреть файл

@ -21,6 +21,7 @@ Main node required properties:
"arm,pl390"
"arm,tc11mp-gic"
"brcm,brahma-b15-gic"
"nvidia,tegra210-agic"
"qcom,msm-8660-qgic"
"qcom,msm-qgic2"
- interrupt-controller : Identifies the node as an interrupt controller
@ -68,7 +69,7 @@ Optional
"ic_clk" (for "arm,arm11mp-gic")
"PERIPHCLKEN" (for "arm,cortex-a15-gic")
"PERIPHCLK", "PERIPHCLKEN" (for "arm,cortex-a9-gic")
"clk" (for "arm,gic-400")
"clk" (for "arm,gic-400" and "nvidia,tegra210")
"gclk" (for "arm,pl390")
- power-domains : A phandle and PM domain specifier as defined by bindings of

Просмотреть файл

@ -0,0 +1,22 @@
Aspeed Vectored Interrupt Controller
These bindings are for the Aspeed AST2400 interrupt controller register layout.
The SoC has an legacy register layout, but this driver does not support that
mode of operation.
Required properties:
- compatible : should be "aspeed,ast2400-vic".
- interrupt-controller : Identifies the node as an interrupt controller
- #interrupt-cells : Specifies the number of cells needed to encode an
interrupt source. The value shall be 1.
Example:
vic: interrupt-controller@1e6c0080 {
compatible = "aspeed,ast2400-vic";
interrupt-controller;
#interrupt-cells = <1>;
reg = <0x1e6c0080 0x80>;
};

Просмотреть файл

@ -725,7 +725,7 @@ IRQ, you can set it by doing:
> echo 1 > /proc/irq/10/smp_affinity
This means that only the first CPU will handle the IRQ, but you can also echo
5 which means that only the first and fourth CPU can handle the IRQ.
5 which means that only the first and third CPU can handle the IRQ.
The contents of each smp_affinity file is the same by default:

Просмотреть файл

@ -6235,7 +6235,6 @@ F: Documentation/devicetree/bindings/interrupt-controller/
F: drivers/irqchip/
IRQ DOMAINS (IRQ NUMBER MAPPING LIBRARY)
M: Jiang Liu <jiang.liu@linux.intel.com>
M: Marc Zyngier <marc.zyngier@arm.com>
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq/core

Просмотреть файл

@ -242,7 +242,7 @@ unsigned int irq_alloc(unsigned int dev_handle, unsigned int dev_ino)
{
int irq;
irq = __irq_alloc_descs(-1, 1, 1, numa_node_id(), NULL);
irq = __irq_alloc_descs(-1, 1, 1, numa_node_id(), NULL, NULL);
if (irq <= 0)
goto out;

Просмотреть файл

@ -981,7 +981,7 @@ static int alloc_irq_from_domain(struct irq_domain *domain, int ioapic, u32 gsi,
return __irq_domain_alloc_irqs(domain, irq, 1,
ioapic_alloc_attr_node(info),
info, legacy);
info, legacy, NULL);
}
/*
@ -1014,7 +1014,8 @@ static int alloc_isa_irq_from_domain(struct irq_domain *domain,
info->ioapic_pin))
return -ENOMEM;
} else {
irq = __irq_domain_alloc_irqs(domain, irq, 1, node, info, true);
irq = __irq_domain_alloc_irqs(domain, irq, 1, node, info, true,
NULL);
if (irq >= 0) {
irq_data = irq_domain_get_irq_data(domain, irq);
data = irq_data->chip_data;

Просмотреть файл

@ -195,7 +195,7 @@ static int vmd_msi_init(struct irq_domain *domain, struct msi_domain_info *info,
vmdirq->virq = virq;
irq_domain_set_info(domain, virq, vmdirq->irq->vmd_vector, info->chip,
vmdirq, handle_simple_irq, vmd, NULL);
vmdirq, handle_untracked_irq, vmd, NULL);
return 0;
}

Просмотреть файл

@ -8,6 +8,12 @@ config ARM_GIC
select IRQ_DOMAIN_HIERARCHY
select MULTI_IRQ_HANDLER
config ARM_GIC_PM
bool
depends on PM
select ARM_GIC
select PM_CLK
config ARM_GIC_MAX_NR
int
default 2 if ARCH_REALVIEW

Просмотреть файл

@ -24,6 +24,7 @@ obj-$(CONFIG_ARCH_SUNXI) += irq-sun4i.o
obj-$(CONFIG_ARCH_SUNXI) += irq-sunxi-nmi.o
obj-$(CONFIG_ARCH_SPEAR3XX) += spear-shirq.o
obj-$(CONFIG_ARM_GIC) += irq-gic.o irq-gic-common.o
obj-$(CONFIG_ARM_GIC_PM) += irq-gic-pm.o
obj-$(CONFIG_REALVIEW_DT) += irq-gic-realview.o
obj-$(CONFIG_ARM_GIC_V2M) += irq-gic-v2m.o
obj-$(CONFIG_ARM_GIC_V3) += irq-gic-v3.o irq-gic-common.o
@ -69,3 +70,4 @@ obj-$(CONFIG_PIC32_EVIC) += irq-pic32-evic.o
obj-$(CONFIG_MVEBU_ODMI) += irq-mvebu-odmi.o
obj-$(CONFIG_LS_SCFG_MSI) += irq-ls-scfg-msi.o
obj-$(CONFIG_EZNPS_GIC) += irq-eznps.o
obj-$(CONFIG_ARCH_ASPEED) += irq-aspeed-vic.o

Просмотреть файл

@ -55,14 +55,14 @@ static void combiner_mask_irq(struct irq_data *data)
{
u32 mask = 1 << (data->hwirq % 32);
__raw_writel(mask, combiner_base(data) + COMBINER_ENABLE_CLEAR);
writel_relaxed(mask, combiner_base(data) + COMBINER_ENABLE_CLEAR);
}
static void combiner_unmask_irq(struct irq_data *data)
{
u32 mask = 1 << (data->hwirq % 32);
__raw_writel(mask, combiner_base(data) + COMBINER_ENABLE_SET);
writel_relaxed(mask, combiner_base(data) + COMBINER_ENABLE_SET);
}
static void combiner_handle_cascade_irq(struct irq_desc *desc)
@ -75,7 +75,7 @@ static void combiner_handle_cascade_irq(struct irq_desc *desc)
chained_irq_enter(chip, desc);
spin_lock(&irq_controller_lock);
status = __raw_readl(chip_data->base + COMBINER_INT_STATUS);
status = readl_relaxed(chip_data->base + COMBINER_INT_STATUS);
spin_unlock(&irq_controller_lock);
status &= chip_data->irq_mask;
@ -135,7 +135,7 @@ static void __init combiner_init_one(struct combiner_chip_data *combiner_data,
combiner_data->parent_irq = irq;
/* Disable all interrupts */
__raw_writel(combiner_data->irq_mask, base + COMBINER_ENABLE_CLEAR);
writel_relaxed(combiner_data->irq_mask, base + COMBINER_ENABLE_CLEAR);
}
static int combiner_irq_domain_xlate(struct irq_domain *d,
@ -218,7 +218,7 @@ static int combiner_suspend(void)
for (i = 0; i < max_nr; i++)
combiner_data[i].pm_save =
__raw_readl(combiner_data[i].base + COMBINER_ENABLE_SET);
readl_relaxed(combiner_data[i].base + COMBINER_ENABLE_SET);
return 0;
}
@ -235,9 +235,9 @@ static void combiner_resume(void)
int i;
for (i = 0; i < max_nr; i++) {
__raw_writel(combiner_data[i].irq_mask,
writel_relaxed(combiner_data[i].irq_mask,
combiner_data[i].base + COMBINER_ENABLE_CLEAR);
__raw_writel(combiner_data[i].pm_save,
writel_relaxed(combiner_data[i].pm_save,
combiner_data[i].base + COMBINER_ENABLE_SET);
}
}

Просмотреть файл

@ -541,7 +541,7 @@ static void armada_370_xp_mpic_resume(void)
writel(1, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
}
struct syscore_ops armada_370_xp_mpic_syscore_ops = {
static struct syscore_ops armada_370_xp_mpic_syscore_ops = {
.suspend = armada_370_xp_mpic_suspend,
.resume = armada_370_xp_mpic_resume,
};

Просмотреть файл

@ -0,0 +1,230 @@
/*
* Copyright (C) 2015 - Ben Herrenschmidt, IBM Corp.
*
* Driver for Aspeed "new" VIC as found in SoC generation 3 and later
*
* Based on irq-vic.c:
*
* Copyright (C) 1999 - 2003 ARM Limited
* Copyright (C) 2000 Deep Blue Solutions Ltd
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/export.h>
#include <linux/init.h>
#include <linux/list.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/irqchip.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/syscore_ops.h>
#include <linux/device.h>
#include <linux/slab.h>
#include <asm/exception.h>
#include <asm/irq.h>
/* These definitions correspond to the "new mapping" of the
* register set that interleaves "high" and "low". The offsets
* below are for the "low" register, add 4 to get to the high one
*/
#define AVIC_IRQ_STATUS 0x00
#define AVIC_FIQ_STATUS 0x08
#define AVIC_RAW_STATUS 0x10
#define AVIC_INT_SELECT 0x18
#define AVIC_INT_ENABLE 0x20
#define AVIC_INT_ENABLE_CLR 0x28
#define AVIC_INT_TRIGGER 0x30
#define AVIC_INT_TRIGGER_CLR 0x38
#define AVIC_INT_SENSE 0x40
#define AVIC_INT_DUAL_EDGE 0x48
#define AVIC_INT_EVENT 0x50
#define AVIC_EDGE_CLR 0x58
#define AVIC_EDGE_STATUS 0x60
#define NUM_IRQS 64
struct aspeed_vic {
void __iomem *base;
u32 edge_sources[2];
struct irq_domain *dom;
};
static struct aspeed_vic *system_avic;
static void vic_init_hw(struct aspeed_vic *vic)
{
u32 sense;
/* Disable all interrupts */
writel(0xffffffff, vic->base + AVIC_INT_ENABLE_CLR);
writel(0xffffffff, vic->base + AVIC_INT_ENABLE_CLR + 4);
/* Make sure no soft trigger is on */
writel(0xffffffff, vic->base + AVIC_INT_TRIGGER_CLR);
writel(0xffffffff, vic->base + AVIC_INT_TRIGGER_CLR + 4);
/* Set everything to be IRQ */
writel(0, vic->base + AVIC_INT_SELECT);
writel(0, vic->base + AVIC_INT_SELECT + 4);
/* Some interrupts have a programable high/low level trigger
* (4 GPIO direct inputs), for now we assume this was configured
* by firmware. We read which ones are edge now.
*/
sense = readl(vic->base + AVIC_INT_SENSE);
vic->edge_sources[0] = ~sense;
sense = readl(vic->base + AVIC_INT_SENSE + 4);
vic->edge_sources[1] = ~sense;
/* Clear edge detection latches */
writel(0xffffffff, vic->base + AVIC_EDGE_CLR);
writel(0xffffffff, vic->base + AVIC_EDGE_CLR + 4);
}
static void __exception_irq_entry avic_handle_irq(struct pt_regs *regs)
{
struct aspeed_vic *vic = system_avic;
u32 stat, irq;
for (;;) {
irq = 0;
stat = readl_relaxed(vic->base + AVIC_IRQ_STATUS);
if (!stat) {
stat = readl_relaxed(vic->base + AVIC_IRQ_STATUS + 4);
irq = 32;
}
if (stat == 0)
break;
irq += ffs(stat) - 1;
handle_domain_irq(vic->dom, irq, regs);
}
}
static void avic_ack_irq(struct irq_data *d)
{
struct aspeed_vic *vic = irq_data_get_irq_chip_data(d);
unsigned int sidx = d->hwirq >> 5;
unsigned int sbit = 1u << (d->hwirq & 0x1f);
/* Clear edge latch for edge interrupts, nop for level */
if (vic->edge_sources[sidx] & sbit)
writel(sbit, vic->base + AVIC_EDGE_CLR + sidx * 4);
}
static void avic_mask_irq(struct irq_data *d)
{
struct aspeed_vic *vic = irq_data_get_irq_chip_data(d);
unsigned int sidx = d->hwirq >> 5;
unsigned int sbit = 1u << (d->hwirq & 0x1f);
writel(sbit, vic->base + AVIC_INT_ENABLE_CLR + sidx * 4);
}
static void avic_unmask_irq(struct irq_data *d)
{
struct aspeed_vic *vic = irq_data_get_irq_chip_data(d);
unsigned int sidx = d->hwirq >> 5;
unsigned int sbit = 1u << (d->hwirq & 0x1f);
writel(sbit, vic->base + AVIC_INT_ENABLE + sidx * 4);
}
/* For level irq, faster than going through a nop "ack" and mask */
static void avic_mask_ack_irq(struct irq_data *d)
{
struct aspeed_vic *vic = irq_data_get_irq_chip_data(d);
unsigned int sidx = d->hwirq >> 5;
unsigned int sbit = 1u << (d->hwirq & 0x1f);
/* First mask */
writel(sbit, vic->base + AVIC_INT_ENABLE_CLR + sidx * 4);
/* Then clear edge latch for edge interrupts */
if (vic->edge_sources[sidx] & sbit)
writel(sbit, vic->base + AVIC_EDGE_CLR + sidx * 4);
}
static struct irq_chip avic_chip = {
.name = "AVIC",
.irq_ack = avic_ack_irq,
.irq_mask = avic_mask_irq,
.irq_unmask = avic_unmask_irq,
.irq_mask_ack = avic_mask_ack_irq,
};
static int avic_map(struct irq_domain *d, unsigned int irq,
irq_hw_number_t hwirq)
{
struct aspeed_vic *vic = d->host_data;
unsigned int sidx = hwirq >> 5;
unsigned int sbit = 1u << (hwirq & 0x1f);
/* Check if interrupt exists */
if (sidx > 1)
return -EPERM;
if (vic->edge_sources[sidx] & sbit)
irq_set_chip_and_handler(irq, &avic_chip, handle_edge_irq);
else
irq_set_chip_and_handler(irq, &avic_chip, handle_level_irq);
irq_set_chip_data(irq, vic);
irq_set_probe(irq);
return 0;
}
static struct irq_domain_ops avic_dom_ops = {
.map = avic_map,
.xlate = irq_domain_xlate_onetwocell,
};
static int __init avic_of_init(struct device_node *node,
struct device_node *parent)
{
void __iomem *regs;
struct aspeed_vic *vic;
if (WARN(parent, "non-root Aspeed VIC not supported"))
return -EINVAL;
if (WARN(system_avic, "duplicate Aspeed VIC not supported"))
return -EINVAL;
regs = of_iomap(node, 0);
if (WARN_ON(!regs))
return -EIO;
vic = kzalloc(sizeof(struct aspeed_vic), GFP_KERNEL);
if (WARN_ON(!vic)) {
iounmap(regs);
return -ENOMEM;
}
vic->base = regs;
/* Initialize soures, all masked */
vic_init_hw(vic);
/* Ready to receive interrupts */
system_avic = vic;
set_handle_irq(avic_handle_irq);
/* Register our domain */
vic->dom = irq_domain_add_simple(node, NUM_IRQS, 0,
&avic_dom_ops, vic);
return 0;
}
IRQCHIP_DECLARE(aspeed_new_vic, "aspeed,ast2400-vic", avic_of_init);

Просмотреть файл

@ -52,7 +52,6 @@
#include <linux/irqdomain.h>
#include <asm/exception.h>
#include <asm/mach/irq.h>
/* Put the bank and irq (32 bits) into the hwirq */
#define MAKE_HWIRQ(b, n) ((b << 5) | (n))
@ -242,7 +241,7 @@ static void __exception_irq_entry bcm2835_handle_irq(
u32 hwirq;
while ((hwirq = get_next_armctrl_hwirq()) != ~0)
handle_IRQ(irq_linear_revmap(intc.domain, hwirq), regs);
handle_domain_irq(intc.domain, hwirq, regs);
}
static void bcm2836_chained_handle_irq(struct irq_desc *desc)

Просмотреть файл

@ -180,7 +180,7 @@ __exception_irq_entry bcm2836_arm_irqchip_handle_irq(struct pt_regs *regs)
} else if (stat) {
u32 hwirq = ffs(stat) - 1;
handle_IRQ(irq_linear_revmap(intc.domain, hwirq), regs);
handle_domain_irq(intc.domain, hwirq, regs);
}
}
@ -224,8 +224,8 @@ static struct notifier_block bcm2836_arm_irqchip_cpu_notifier = {
};
#ifdef CONFIG_ARM
int __init bcm2836_smp_boot_secondary(unsigned int cpu,
struct task_struct *idle)
static int __init bcm2836_smp_boot_secondary(unsigned int cpu,
struct task_struct *idle)
{
unsigned long secondary_startup_phys =
(unsigned long)virt_to_phys((void *)secondary_startup);

Просмотреть файл

@ -215,7 +215,7 @@ static int __init bcm7120_l2_intc_iomap_3380(struct device_node *dn,
return 0;
}
int __init bcm7120_l2_intc_probe(struct device_node *dn,
static int __init bcm7120_l2_intc_probe(struct device_node *dn,
struct device_node *parent,
int (*iomap_regs_fn)(struct device_node *,
struct bcm7120_l2_intc_data *),
@ -339,15 +339,15 @@ out_unmap:
return ret;
}
int __init bcm7120_l2_intc_probe_7120(struct device_node *dn,
struct device_node *parent)
static int __init bcm7120_l2_intc_probe_7120(struct device_node *dn,
struct device_node *parent)
{
return bcm7120_l2_intc_probe(dn, parent, bcm7120_l2_intc_iomap_7120,
"BCM7120 L2");
}
int __init bcm7120_l2_intc_probe_3380(struct device_node *dn,
struct device_node *parent)
static int __init bcm7120_l2_intc_probe_3380(struct device_node *dn,
struct device_node *parent)
{
return bcm7120_l2_intc_probe(dn, parent, bcm7120_l2_intc_iomap_3380,
"BCM3380 L2");

Просмотреть файл

@ -112,8 +112,8 @@ static void brcmstb_l2_intc_resume(struct irq_data *d)
irq_gc_unlock(gc);
}
int __init brcmstb_l2_intc_of_init(struct device_node *np,
struct device_node *parent)
static int __init brcmstb_l2_intc_of_init(struct device_node *np,
struct device_node *parent)
{
unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
struct brcmstb_l2_intc_data *data;

Просмотреть файл

@ -90,8 +90,8 @@ int gic_configure_irq(unsigned int irq, unsigned int type,
return ret;
}
void __init gic_dist_config(void __iomem *base, int gic_irqs,
void (*sync_access)(void))
void gic_dist_config(void __iomem *base, int gic_irqs,
void (*sync_access)(void))
{
unsigned int i;

Просмотреть файл

@ -0,0 +1,184 @@
/*
* Copyright (C) 2016 NVIDIA CORPORATION, All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/module.h>
#include <linux/clk.h>
#include <linux/of_device.h>
#include <linux/of_irq.h>
#include <linux/irqchip/arm-gic.h>
#include <linux/platform_device.h>
#include <linux/pm_clock.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
struct gic_clk_data {
unsigned int num_clocks;
const char *const *clocks;
};
static int gic_runtime_resume(struct device *dev)
{
struct gic_chip_data *gic = dev_get_drvdata(dev);
int ret;
ret = pm_clk_resume(dev);
if (ret)
return ret;
/*
* On the very first resume, the pointer to the driver data
* will be NULL and this is intentional, because we do not
* want to restore the GIC on the very first resume. So if
* the pointer is not valid just return.
*/
if (!gic)
return 0;
gic_dist_restore(gic);
gic_cpu_restore(gic);
return 0;
}
static int gic_runtime_suspend(struct device *dev)
{
struct gic_chip_data *gic = dev_get_drvdata(dev);
gic_dist_save(gic);
gic_cpu_save(gic);
return pm_clk_suspend(dev);
}
static int gic_get_clocks(struct device *dev, const struct gic_clk_data *data)
{
struct clk *clk;
unsigned int i;
int ret;
if (!dev || !data)
return -EINVAL;
ret = pm_clk_create(dev);
if (ret)
return ret;
for (i = 0; i < data->num_clocks; i++) {
clk = of_clk_get_by_name(dev->of_node, data->clocks[i]);
if (IS_ERR(clk)) {
dev_err(dev, "failed to get clock %s\n",
data->clocks[i]);
ret = PTR_ERR(clk);
goto error;
}
ret = pm_clk_add_clk(dev, clk);
if (ret) {
dev_err(dev, "failed to add clock at index %d\n", i);
clk_put(clk);
goto error;
}
}
return 0;
error:
pm_clk_destroy(dev);
return ret;
}
static int gic_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
const struct gic_clk_data *data;
struct gic_chip_data *gic;
int ret, irq;
data = of_device_get_match_data(&pdev->dev);
if (!data) {
dev_err(&pdev->dev, "no device match found\n");
return -ENODEV;
}
irq = irq_of_parse_and_map(dev->of_node, 0);
if (!irq) {
dev_err(dev, "no parent interrupt found!\n");
return -EINVAL;
}
ret = gic_get_clocks(dev, data);
if (ret)
goto irq_dispose;
pm_runtime_enable(dev);
ret = pm_runtime_get_sync(dev);
if (ret < 0)
goto rpm_disable;
ret = gic_of_init_child(dev, &gic, irq);
if (ret)
goto rpm_put;
platform_set_drvdata(pdev, gic);
pm_runtime_put(dev);
dev_info(dev, "GIC IRQ controller registered\n");
return 0;
rpm_put:
pm_runtime_put_sync(dev);
rpm_disable:
pm_runtime_disable(dev);
pm_clk_destroy(dev);
irq_dispose:
irq_dispose_mapping(irq);
return ret;
}
static const struct dev_pm_ops gic_pm_ops = {
SET_RUNTIME_PM_OPS(gic_runtime_suspend,
gic_runtime_resume, NULL)
};
static const char * const gic400_clocks[] = {
"clk",
};
static const struct gic_clk_data gic400_data = {
.num_clocks = ARRAY_SIZE(gic400_clocks),
.clocks = gic400_clocks,
};
static const struct of_device_id gic_match[] = {
{ .compatible = "nvidia,tegra210-agic", .data = &gic400_data },
{},
};
MODULE_DEVICE_TABLE(of, gic_match);
static struct platform_driver gic_driver = {
.probe = gic_probe,
.driver = {
.name = "gic",
.of_match_table = gic_match,
.pm = &gic_pm_ops,
}
};
builtin_platform_driver(gic_driver);

Просмотреть файл

@ -24,6 +24,7 @@
#include <linux/of_pci.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/irqchip/arm-gic.h>
/*
* MSI_TYPER:

Просмотреть файл

@ -56,13 +56,14 @@ struct its_collection {
};
/*
* The ITS_BASER structure - contains memory information and cached
* value of BASER register configuration.
* The ITS_BASER structure - contains memory information, cached
* value of BASER register configuration and ITS page size.
*/
struct its_baser {
void *base;
u64 val;
u32 order;
u32 psz;
};
/*
@ -824,6 +825,182 @@ static const char *its_base_type_string[] = {
[GITS_BASER_TYPE_RESERVED7] = "Reserved (7)",
};
static u64 its_read_baser(struct its_node *its, struct its_baser *baser)
{
u32 idx = baser - its->tables;
return readq_relaxed(its->base + GITS_BASER + (idx << 3));
}
static void its_write_baser(struct its_node *its, struct its_baser *baser,
u64 val)
{
u32 idx = baser - its->tables;
writeq_relaxed(val, its->base + GITS_BASER + (idx << 3));
baser->val = its_read_baser(its, baser);
}
static int its_setup_baser(struct its_node *its, struct its_baser *baser,
u64 cache, u64 shr, u32 psz, u32 order,
bool indirect)
{
u64 val = its_read_baser(its, baser);
u64 esz = GITS_BASER_ENTRY_SIZE(val);
u64 type = GITS_BASER_TYPE(val);
u32 alloc_pages;
void *base;
u64 tmp;
retry_alloc_baser:
alloc_pages = (PAGE_ORDER_TO_SIZE(order) / psz);
if (alloc_pages > GITS_BASER_PAGES_MAX) {
pr_warn("ITS@%pa: %s too large, reduce ITS pages %u->%u\n",
&its->phys_base, its_base_type_string[type],
alloc_pages, GITS_BASER_PAGES_MAX);
alloc_pages = GITS_BASER_PAGES_MAX;
order = get_order(GITS_BASER_PAGES_MAX * psz);
}
base = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
if (!base)
return -ENOMEM;
retry_baser:
val = (virt_to_phys(base) |
(type << GITS_BASER_TYPE_SHIFT) |
((esz - 1) << GITS_BASER_ENTRY_SIZE_SHIFT) |
((alloc_pages - 1) << GITS_BASER_PAGES_SHIFT) |
cache |
shr |
GITS_BASER_VALID);
val |= indirect ? GITS_BASER_INDIRECT : 0x0;
switch (psz) {
case SZ_4K:
val |= GITS_BASER_PAGE_SIZE_4K;
break;
case SZ_16K:
val |= GITS_BASER_PAGE_SIZE_16K;
break;
case SZ_64K:
val |= GITS_BASER_PAGE_SIZE_64K;
break;
}
its_write_baser(its, baser, val);
tmp = baser->val;
if ((val ^ tmp) & GITS_BASER_SHAREABILITY_MASK) {
/*
* Shareability didn't stick. Just use
* whatever the read reported, which is likely
* to be the only thing this redistributor
* supports. If that's zero, make it
* non-cacheable as well.
*/
shr = tmp & GITS_BASER_SHAREABILITY_MASK;
if (!shr) {
cache = GITS_BASER_nC;
__flush_dcache_area(base, PAGE_ORDER_TO_SIZE(order));
}
goto retry_baser;
}
if ((val ^ tmp) & GITS_BASER_PAGE_SIZE_MASK) {
/*
* Page size didn't stick. Let's try a smaller
* size and retry. If we reach 4K, then
* something is horribly wrong...
*/
free_pages((unsigned long)base, order);
baser->base = NULL;
switch (psz) {
case SZ_16K:
psz = SZ_4K;
goto retry_alloc_baser;
case SZ_64K:
psz = SZ_16K;
goto retry_alloc_baser;
}
}
if (val != tmp) {
pr_err("ITS@%pa: %s doesn't stick: %lx %lx\n",
&its->phys_base, its_base_type_string[type],
(unsigned long) val, (unsigned long) tmp);
free_pages((unsigned long)base, order);
return -ENXIO;
}
baser->order = order;
baser->base = base;
baser->psz = psz;
tmp = indirect ? GITS_LVL1_ENTRY_SIZE : esz;
pr_info("ITS@%pa: allocated %d %s @%lx (%s, esz %d, psz %dK, shr %d)\n",
&its->phys_base, (int)(PAGE_ORDER_TO_SIZE(order) / tmp),
its_base_type_string[type],
(unsigned long)virt_to_phys(base),
indirect ? "indirect" : "flat", (int)esz,
psz / SZ_1K, (int)shr >> GITS_BASER_SHAREABILITY_SHIFT);
return 0;
}
static bool its_parse_baser_device(struct its_node *its, struct its_baser *baser,
u32 psz, u32 *order)
{
u64 esz = GITS_BASER_ENTRY_SIZE(its_read_baser(its, baser));
u64 val = GITS_BASER_InnerShareable | GITS_BASER_WaWb;
u32 ids = its->device_ids;
u32 new_order = *order;
bool indirect = false;
/* No need to enable Indirection if memory requirement < (psz*2)bytes */
if ((esz << ids) > (psz * 2)) {
/*
* Find out whether hw supports a single or two-level table by
* table by reading bit at offset '62' after writing '1' to it.
*/
its_write_baser(its, baser, val | GITS_BASER_INDIRECT);
indirect = !!(baser->val & GITS_BASER_INDIRECT);
if (indirect) {
/*
* The size of the lvl2 table is equal to ITS page size
* which is 'psz'. For computing lvl1 table size,
* subtract ID bits that sparse lvl2 table from 'ids'
* which is reported by ITS hardware times lvl1 table
* entry size.
*/
ids -= ilog2(psz / esz);
esz = GITS_LVL1_ENTRY_SIZE;
}
}
/*
* Allocate as many entries as required to fit the
* range of device IDs that the ITS can grok... The ID
* space being incredibly sparse, this results in a
* massive waste of memory if two-level device table
* feature is not supported by hardware.
*/
new_order = max_t(u32, get_order(esz << ids), new_order);
if (new_order >= MAX_ORDER) {
new_order = MAX_ORDER - 1;
ids = ilog2(PAGE_ORDER_TO_SIZE(new_order) / esz);
pr_warn("ITS@%pa: Device Table too large, reduce ids %u->%u\n",
&its->phys_base, its->device_ids, ids);
}
*order = new_order;
return indirect;
}
static void its_free_tables(struct its_node *its)
{
int i;
@ -837,167 +1014,52 @@ static void its_free_tables(struct its_node *its)
}
}
static int its_alloc_tables(const char *node_name, struct its_node *its)
static int its_alloc_tables(struct its_node *its)
{
int err;
int i;
int psz = SZ_64K;
u64 typer = readq_relaxed(its->base + GITS_TYPER);
u32 ids = GITS_TYPER_DEVBITS(typer);
u64 shr = GITS_BASER_InnerShareable;
u64 cache;
u64 typer;
u32 ids;
u64 cache = GITS_BASER_WaWb;
u32 psz = SZ_64K;
int err, i;
if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_22375) {
/*
* erratum 22375: only alloc 8MB table size
* erratum 24313: ignore memory access type
*/
cache = 0;
ids = 0x14; /* 20 bits, 8MB */
} else {
cache = GITS_BASER_WaWb;
typer = readq_relaxed(its->base + GITS_TYPER);
ids = GITS_TYPER_DEVBITS(typer);
* erratum 22375: only alloc 8MB table size
* erratum 24313: ignore memory access type
*/
cache = GITS_BASER_nCnB;
ids = 0x14; /* 20 bits, 8MB */
}
its->device_ids = ids;
for (i = 0; i < GITS_BASER_NR_REGS; i++) {
u64 val = readq_relaxed(its->base + GITS_BASER + i * 8);
struct its_baser *baser = its->tables + i;
u64 val = its_read_baser(its, baser);
u64 type = GITS_BASER_TYPE(val);
u64 entry_size = GITS_BASER_ENTRY_SIZE(val);
int order = get_order(psz);
int alloc_pages;
u64 tmp;
void *base;
u32 order = get_order(psz);
bool indirect = false;
if (type == GITS_BASER_TYPE_NONE)
continue;
/*
* Allocate as many entries as required to fit the
* range of device IDs that the ITS can grok... The ID
* space being incredibly sparse, this results in a
* massive waste of memory.
*
* For other tables, only allocate a single page.
*/
if (type == GITS_BASER_TYPE_DEVICE) {
/*
* 'order' was initialized earlier to the default page
* granule of the the ITS. We can't have an allocation
* smaller than that. If the requested allocation
* is smaller, round up to the default page granule.
*/
order = max(get_order((1UL << ids) * entry_size),
order);
if (order >= MAX_ORDER) {
order = MAX_ORDER - 1;
pr_warn("%s: Device Table too large, reduce its page order to %u\n",
node_name, order);
}
if (type == GITS_BASER_TYPE_DEVICE)
indirect = its_parse_baser_device(its, baser, psz, &order);
err = its_setup_baser(its, baser, cache, shr, psz, order, indirect);
if (err < 0) {
its_free_tables(its);
return err;
}
retry_alloc_baser:
alloc_pages = (PAGE_ORDER_TO_SIZE(order) / psz);
if (alloc_pages > GITS_BASER_PAGES_MAX) {
alloc_pages = GITS_BASER_PAGES_MAX;
order = get_order(GITS_BASER_PAGES_MAX * psz);
pr_warn("%s: Device Table too large, reduce its page order to %u (%u pages)\n",
node_name, order, alloc_pages);
}
base = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
if (!base) {
err = -ENOMEM;
goto out_free;
}
its->tables[i].base = base;
its->tables[i].order = order;
retry_baser:
val = (virt_to_phys(base) |
(type << GITS_BASER_TYPE_SHIFT) |
((entry_size - 1) << GITS_BASER_ENTRY_SIZE_SHIFT) |
cache |
shr |
GITS_BASER_VALID);
switch (psz) {
case SZ_4K:
val |= GITS_BASER_PAGE_SIZE_4K;
break;
case SZ_16K:
val |= GITS_BASER_PAGE_SIZE_16K;
break;
case SZ_64K:
val |= GITS_BASER_PAGE_SIZE_64K;
break;
}
val |= alloc_pages - 1;
its->tables[i].val = val;
writeq_relaxed(val, its->base + GITS_BASER + i * 8);
tmp = readq_relaxed(its->base + GITS_BASER + i * 8);
if ((val ^ tmp) & GITS_BASER_SHAREABILITY_MASK) {
/*
* Shareability didn't stick. Just use
* whatever the read reported, which is likely
* to be the only thing this redistributor
* supports. If that's zero, make it
* non-cacheable as well.
*/
shr = tmp & GITS_BASER_SHAREABILITY_MASK;
if (!shr) {
cache = GITS_BASER_nC;
__flush_dcache_area(base, PAGE_ORDER_TO_SIZE(order));
}
goto retry_baser;
}
if ((val ^ tmp) & GITS_BASER_PAGE_SIZE_MASK) {
/*
* Page size didn't stick. Let's try a smaller
* size and retry. If we reach 4K, then
* something is horribly wrong...
*/
free_pages((unsigned long)base, order);
its->tables[i].base = NULL;
switch (psz) {
case SZ_16K:
psz = SZ_4K;
goto retry_alloc_baser;
case SZ_64K:
psz = SZ_16K;
goto retry_alloc_baser;
}
}
if (val != tmp) {
pr_err("ITS: %s: GITS_BASER%d doesn't stick: %lx %lx\n",
node_name, i,
(unsigned long) val, (unsigned long) tmp);
err = -ENXIO;
goto out_free;
}
pr_info("ITS: allocated %d %s @%lx (psz %dK, shr %d)\n",
(int)(PAGE_ORDER_TO_SIZE(order) / entry_size),
its_base_type_string[type],
(unsigned long)virt_to_phys(base),
psz / SZ_1K, (int)shr >> GITS_BASER_SHAREABILITY_SHIFT);
/* Update settings which will be used for next BASERn */
psz = baser->psz;
cache = baser->val & GITS_BASER_CACHEABILITY_MASK;
shr = baser->val & GITS_BASER_SHAREABILITY_MASK;
}
return 0;
out_free:
its_free_tables(its);
return err;
}
static int its_alloc_collections(struct its_node *its)
@ -1185,10 +1247,57 @@ static struct its_baser *its_get_baser(struct its_node *its, u32 type)
return NULL;
}
static bool its_alloc_device_table(struct its_node *its, u32 dev_id)
{
struct its_baser *baser;
struct page *page;
u32 esz, idx;
__le64 *table;
baser = its_get_baser(its, GITS_BASER_TYPE_DEVICE);
/* Don't allow device id that exceeds ITS hardware limit */
if (!baser)
return (ilog2(dev_id) < its->device_ids);
/* Don't allow device id that exceeds single, flat table limit */
esz = GITS_BASER_ENTRY_SIZE(baser->val);
if (!(baser->val & GITS_BASER_INDIRECT))
return (dev_id < (PAGE_ORDER_TO_SIZE(baser->order) / esz));
/* Compute 1st level table index & check if that exceeds table limit */
idx = dev_id >> ilog2(baser->psz / esz);
if (idx >= (PAGE_ORDER_TO_SIZE(baser->order) / GITS_LVL1_ENTRY_SIZE))
return false;
table = baser->base;
/* Allocate memory for 2nd level table */
if (!table[idx]) {
page = alloc_pages(GFP_KERNEL | __GFP_ZERO, get_order(baser->psz));
if (!page)
return false;
/* Flush Lvl2 table to PoC if hw doesn't support coherency */
if (!(baser->val & GITS_BASER_SHAREABILITY_MASK))
__flush_dcache_area(page_address(page), baser->psz);
table[idx] = cpu_to_le64(page_to_phys(page) | GITS_BASER_VALID);
/* Flush Lvl1 entry to PoC if hw doesn't support coherency */
if (!(baser->val & GITS_BASER_SHAREABILITY_MASK))
__flush_dcache_area(table + idx, GITS_LVL1_ENTRY_SIZE);
/* Ensure updated table contents are visible to ITS hardware */
dsb(sy);
}
return true;
}
static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
int nvecs)
{
struct its_baser *baser;
struct its_device *dev;
unsigned long *lpi_map;
unsigned long flags;
@ -1199,14 +1308,7 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
int nr_ites;
int sz;
baser = its_get_baser(its, GITS_BASER_TYPE_DEVICE);
/* Don't allow 'dev_id' that exceeds single, flat table limit */
if (baser) {
if (dev_id >= (PAGE_ORDER_TO_SIZE(baser->order) /
GITS_BASER_ENTRY_SIZE(baser->val)))
return NULL;
} else if (ilog2(dev_id) >= its->device_ids)
if (!its_alloc_device_table(its, dev_id))
return NULL;
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
@ -1569,7 +1671,7 @@ static int __init its_probe(struct device_node *node,
its_enable_quirks(its);
err = its_alloc_tables(node->full_name, its);
err = its_alloc_tables(its);
if (err)
goto out_free_cmd;

Просмотреть файл

@ -75,7 +75,7 @@ struct gic_chip_data {
void __iomem *raw_dist_base;
void __iomem *raw_cpu_base;
u32 percpu_offset;
#ifdef CONFIG_CPU_PM
#if defined(CONFIG_CPU_PM) || defined(CONFIG_ARM_GIC_PM)
u32 saved_spi_enable[DIV_ROUND_UP(1020, 32)];
u32 saved_spi_active[DIV_ROUND_UP(1020, 32)];
u32 saved_spi_conf[DIV_ROUND_UP(1020, 16)];
@ -449,7 +449,7 @@ static void gic_cpu_if_up(struct gic_chip_data *gic)
}
static void __init gic_dist_init(struct gic_chip_data *gic)
static void gic_dist_init(struct gic_chip_data *gic)
{
unsigned int i;
u32 cpumask;
@ -528,14 +528,14 @@ int gic_cpu_if_down(unsigned int gic_nr)
return 0;
}
#ifdef CONFIG_CPU_PM
#if defined(CONFIG_CPU_PM) || defined(CONFIG_ARM_GIC_PM)
/*
* Saves the GIC distributor registers during suspend or idle. Must be called
* with interrupts disabled but before powering down the GIC. After calling
* this function, no interrupts will be delivered by the GIC, and another
* platform-specific wakeup source must be enabled.
*/
static void gic_dist_save(struct gic_chip_data *gic)
void gic_dist_save(struct gic_chip_data *gic)
{
unsigned int gic_irqs;
void __iomem *dist_base;
@ -574,7 +574,7 @@ static void gic_dist_save(struct gic_chip_data *gic)
* handled normally, but any edge interrupts that occured will not be seen by
* the GIC and need to be handled by the platform-specific wakeup source.
*/
static void gic_dist_restore(struct gic_chip_data *gic)
void gic_dist_restore(struct gic_chip_data *gic)
{
unsigned int gic_irqs;
unsigned int i;
@ -620,7 +620,7 @@ static void gic_dist_restore(struct gic_chip_data *gic)
writel_relaxed(GICD_ENABLE, dist_base + GIC_DIST_CTRL);
}
static void gic_cpu_save(struct gic_chip_data *gic)
void gic_cpu_save(struct gic_chip_data *gic)
{
int i;
u32 *ptr;
@ -650,7 +650,7 @@ static void gic_cpu_save(struct gic_chip_data *gic)
}
static void gic_cpu_restore(struct gic_chip_data *gic)
void gic_cpu_restore(struct gic_chip_data *gic)
{
int i;
u32 *ptr;
@ -727,7 +727,7 @@ static struct notifier_block gic_notifier_block = {
.notifier_call = gic_notifier,
};
static int __init gic_pm_init(struct gic_chip_data *gic)
static int gic_pm_init(struct gic_chip_data *gic)
{
gic->saved_ppi_enable = __alloc_percpu(DIV_ROUND_UP(32, 32) * 4,
sizeof(u32));
@ -757,7 +757,7 @@ free_ppi_enable:
return -ENOMEM;
}
#else
static int __init gic_pm_init(struct gic_chip_data *gic)
static int gic_pm_init(struct gic_chip_data *gic)
{
return 0;
}
@ -1032,32 +1032,31 @@ static const struct irq_domain_ops gic_irq_domain_ops = {
.unmap = gic_irq_domain_unmap,
};
static int __init __gic_init_bases(struct gic_chip_data *gic, int irq_start,
struct fwnode_handle *handle)
static void gic_init_chip(struct gic_chip_data *gic, struct device *dev,
const char *name, bool use_eoimode1)
{
irq_hw_number_t hwirq_base;
int gic_irqs, irq_base, i, ret;
if (WARN_ON(!gic || gic->domain))
return -EINVAL;
/* Initialize irq_chip */
gic->chip = gic_chip;
gic->chip.name = name;
gic->chip.parent_device = dev;
if (static_key_true(&supports_deactivate) && gic == &gic_data[0]) {
if (use_eoimode1) {
gic->chip.irq_mask = gic_eoimode1_mask_irq;
gic->chip.irq_eoi = gic_eoimode1_eoi_irq;
gic->chip.irq_set_vcpu_affinity = gic_irq_set_vcpu_affinity;
gic->chip.name = kasprintf(GFP_KERNEL, "GICv2");
} else {
gic->chip.name = kasprintf(GFP_KERNEL, "GIC-%d",
(int)(gic - &gic_data[0]));
}
#ifdef CONFIG_SMP
if (gic == &gic_data[0])
gic->chip.irq_set_affinity = gic_set_affinity;
#endif
}
static int gic_init_bases(struct gic_chip_data *gic, int irq_start,
struct fwnode_handle *handle)
{
irq_hw_number_t hwirq_base;
int gic_irqs, irq_base, ret;
if (IS_ENABLED(CONFIG_GIC_NON_BANKED) && gic->percpu_offset) {
/* Frankein-GIC without banked registers... */
@ -1138,23 +1137,6 @@ static int __init __gic_init_bases(struct gic_chip_data *gic, int irq_start,
goto error;
}
if (gic == &gic_data[0]) {
/*
* Initialize the CPU interface map to all CPUs.
* It will be refined as each CPU probes its ID.
* This is only necessary for the primary GIC.
*/
for (i = 0; i < NR_GIC_CPU_IF; i++)
gic_cpu_map[i] = 0xff;
#ifdef CONFIG_SMP
set_smp_cross_call(gic_raise_softirq);
register_cpu_notifier(&gic_cpu_notifier);
#endif
set_handle_irq(gic_handle_irq);
if (static_key_true(&supports_deactivate))
pr_info("GIC: Using split EOI/Deactivate mode\n");
}
gic_dist_init(gic);
ret = gic_cpu_init(gic);
if (ret)
@ -1172,7 +1154,47 @@ error:
free_percpu(gic->cpu_base.percpu_base);
}
kfree(gic->chip.name);
return ret;
}
static int __init __gic_init_bases(struct gic_chip_data *gic,
int irq_start,
struct fwnode_handle *handle)
{
char *name;
int i, ret;
if (WARN_ON(!gic || gic->domain))
return -EINVAL;
if (gic == &gic_data[0]) {
/*
* Initialize the CPU interface map to all CPUs.
* It will be refined as each CPU probes its ID.
* This is only necessary for the primary GIC.
*/
for (i = 0; i < NR_GIC_CPU_IF; i++)
gic_cpu_map[i] = 0xff;
#ifdef CONFIG_SMP
set_smp_cross_call(gic_raise_softirq);
register_cpu_notifier(&gic_cpu_notifier);
#endif
set_handle_irq(gic_handle_irq);
if (static_key_true(&supports_deactivate))
pr_info("GIC: Using split EOI/Deactivate mode\n");
}
if (static_key_true(&supports_deactivate) && gic == &gic_data[0]) {
name = kasprintf(GFP_KERNEL, "GICv2");
gic_init_chip(gic, NULL, name, true);
} else {
name = kasprintf(GFP_KERNEL, "GIC-%d", (int)(gic-&gic_data[0]));
gic_init_chip(gic, NULL, name, false);
}
ret = gic_init_bases(gic, irq_start, handle);
if (ret)
kfree(name);
return ret;
}
@ -1250,7 +1272,7 @@ static bool gic_check_eoimode(struct device_node *node, void __iomem **base)
return true;
}
static int __init gic_of_setup(struct gic_chip_data *gic, struct device_node *node)
static int gic_of_setup(struct gic_chip_data *gic, struct device_node *node)
{
if (!gic || !node)
return -EINVAL;
@ -1274,6 +1296,34 @@ error:
return -ENOMEM;
}
int gic_of_init_child(struct device *dev, struct gic_chip_data **gic, int irq)
{
int ret;
if (!dev || !dev->of_node || !gic || !irq)
return -EINVAL;
*gic = devm_kzalloc(dev, sizeof(**gic), GFP_KERNEL);
if (!*gic)
return -ENOMEM;
gic_init_chip(*gic, dev, dev->of_node->name, false);
ret = gic_of_setup(*gic, dev->of_node);
if (ret)
return ret;
ret = gic_init_bases(*gic, -1, &dev->of_node->fwnode);
if (ret) {
gic_teardown(*gic);
return ret;
}
irq_set_chained_handler_and_data(irq, gic_handle_cascade_irq, *gic);
return 0;
}
static void __init gic_of_setup_kvm_info(struct device_node *node)
{
int ret;
@ -1353,7 +1403,11 @@ IRQCHIP_DECLARE(cortex_a7_gic, "arm,cortex-a7-gic", gic_of_init);
IRQCHIP_DECLARE(msm_8660_qgic, "qcom,msm-8660-qgic", gic_of_init);
IRQCHIP_DECLARE(msm_qgic2, "qcom,msm-qgic2", gic_of_init);
IRQCHIP_DECLARE(pl390, "arm,pl390", gic_of_init);
#else
int gic_of_init_child(struct device *dev, struct gic_chip_data **gic, int irq)
{
return -ENOTSUPP;
}
#endif
#ifdef CONFIG_ACPI

Просмотреть файл

@ -1042,12 +1042,14 @@ static void __init __gic_init(unsigned long gic_base_addr,
&gic_irq_domain_ops, NULL);
if (!gic_irq_domain)
panic("Failed to add GIC IRQ domain");
gic_irq_domain->name = "mips-gic-irq";
gic_dev_domain = irq_domain_add_hierarchy(gic_irq_domain, 0,
GIC_NUM_LOCAL_INTRS + gic_shared_intrs,
node, &gic_dev_domain_ops, NULL);
if (!gic_dev_domain)
panic("Failed to add GIC DEV domain");
gic_dev_domain->name = "mips-gic-dev";
gic_ipi_domain = irq_domain_add_hierarchy(gic_irq_domain,
IRQ_DOMAIN_FLAG_IPI_PER_CPU,
@ -1056,6 +1058,7 @@ static void __init __gic_init(unsigned long gic_base_addr,
if (!gic_ipi_domain)
panic("Failed to add GIC IPI domain");
gic_ipi_domain->name = "mips-gic-ipi";
gic_ipi_domain->bus_token = DOMAIN_BUS_IPI;
if (node &&

Просмотреть файл

@ -23,6 +23,8 @@
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/irqchip/irq-omap-intc.h>
/* Define these here for now until we drop all board-files */
#define OMAP24XX_IC_BASE 0x480fe000
#define OMAP34XX_IC_BASE 0x48200000

Просмотреть файл

@ -92,9 +92,9 @@ static void s3c_irq_mask(struct irq_data *data)
unsigned long mask;
unsigned int irqno;
mask = __raw_readl(intc->reg_mask);
mask = readl_relaxed(intc->reg_mask);
mask |= (1UL << irq_data->offset);
__raw_writel(mask, intc->reg_mask);
writel_relaxed(mask, intc->reg_mask);
if (parent_intc) {
parent_data = &parent_intc->irqs[irq_data->parent_irq];
@ -119,9 +119,9 @@ static void s3c_irq_unmask(struct irq_data *data)
unsigned long mask;
unsigned int irqno;
mask = __raw_readl(intc->reg_mask);
mask = readl_relaxed(intc->reg_mask);
mask &= ~(1UL << irq_data->offset);
__raw_writel(mask, intc->reg_mask);
writel_relaxed(mask, intc->reg_mask);
if (parent_intc) {
irqno = irq_find_mapping(parent_intc->domain,
@ -136,9 +136,9 @@ static inline void s3c_irq_ack(struct irq_data *data)
struct s3c_irq_intc *intc = irq_data->intc;
unsigned long bitval = 1UL << irq_data->offset;
__raw_writel(bitval, intc->reg_pending);
writel_relaxed(bitval, intc->reg_pending);
if (intc->reg_intpnd)
__raw_writel(bitval, intc->reg_intpnd);
writel_relaxed(bitval, intc->reg_intpnd);
}
static int s3c_irq_type(struct irq_data *data, unsigned int type)
@ -172,9 +172,9 @@ static int s3c_irqext_type_set(void __iomem *gpcon_reg,
unsigned long newvalue = 0, value;
/* Set the GPIO to external interrupt mode */
value = __raw_readl(gpcon_reg);
value = readl_relaxed(gpcon_reg);
value = (value & ~(3 << gpcon_offset)) | (0x02 << gpcon_offset);
__raw_writel(value, gpcon_reg);
writel_relaxed(value, gpcon_reg);
/* Set the external interrupt to pointed trigger type */
switch (type)
@ -208,9 +208,9 @@ static int s3c_irqext_type_set(void __iomem *gpcon_reg,
return -EINVAL;
}
value = __raw_readl(extint_reg);
value = readl_relaxed(extint_reg);
value = (value & ~(7 << extint_offset)) | (newvalue << extint_offset);
__raw_writel(value, extint_reg);
writel_relaxed(value, extint_reg);
return 0;
}
@ -315,8 +315,8 @@ static void s3c_irq_demux(struct irq_desc *desc)
chained_irq_enter(chip, desc);
src = __raw_readl(sub_intc->reg_pending);
msk = __raw_readl(sub_intc->reg_mask);
src = readl_relaxed(sub_intc->reg_pending);
msk = readl_relaxed(sub_intc->reg_mask);
src &= ~msk;
src &= irq_data->sub_bits;
@ -337,7 +337,7 @@ static inline int s3c24xx_handle_intc(struct s3c_irq_intc *intc,
int pnd;
int offset;
pnd = __raw_readl(intc->reg_intpnd);
pnd = readl_relaxed(intc->reg_intpnd);
if (!pnd)
return false;
@ -352,7 +352,7 @@ static inline int s3c24xx_handle_intc(struct s3c_irq_intc *intc,
*
* Thanks to Klaus, Shannon, et al for helping to debug this problem
*/
offset = __raw_readl(intc->reg_intpnd + 4);
offset = readl_relaxed(intc->reg_intpnd + 4);
/* Find the bit manually, when the offset is wrong.
* The pending register only ever contains the one bit of the next
@ -406,7 +406,7 @@ int s3c24xx_set_fiq(unsigned int irq, bool on)
intmod = 0;
}
__raw_writel(intmod, S3C2410_INTMOD);
writel_relaxed(intmod, S3C2410_INTMOD);
return 0;
}
@ -508,14 +508,14 @@ static void s3c24xx_clear_intc(struct s3c_irq_intc *intc)
last = 0;
for (i = 0; i < 4; i++) {
pend = __raw_readl(reg_source);
pend = readl_relaxed(reg_source);
if (pend == 0 || pend == last)
break;
__raw_writel(pend, intc->reg_pending);
writel_relaxed(pend, intc->reg_pending);
if (intc->reg_intpnd)
__raw_writel(pend, intc->reg_intpnd);
writel_relaxed(pend, intc->reg_intpnd);
pr_info("irq: clearing pending status %08x\n", (int)pend);
last = pend;

Просмотреть файл

@ -29,6 +29,11 @@
static struct irq_domain *sirfsoc_irqdomain;
static void __iomem *sirfsoc_irq_get_regbase(void)
{
return (void __iomem __force *)sirfsoc_irqdomain->host_data;
}
static __init void sirfsoc_alloc_gc(void __iomem *base)
{
unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
@ -53,7 +58,7 @@ static __init void sirfsoc_alloc_gc(void __iomem *base)
static void __exception_irq_entry sirfsoc_handle_irq(struct pt_regs *regs)
{
void __iomem *base = sirfsoc_irqdomain->host_data;
void __iomem *base = sirfsoc_irq_get_regbase();
u32 irqstat;
irqstat = readl_relaxed(base + SIRFSOC_INIT_IRQ_ID);
@ -94,7 +99,7 @@ static struct sirfsoc_irq_status sirfsoc_irq_st;
static int sirfsoc_irq_suspend(void)
{
void __iomem *base = sirfsoc_irqdomain->host_data;
void __iomem *base = sirfsoc_irq_get_regbase();
sirfsoc_irq_st.mask0 = readl_relaxed(base + SIRFSOC_INT_RISC_MASK0);
sirfsoc_irq_st.mask1 = readl_relaxed(base + SIRFSOC_INT_RISC_MASK1);
@ -106,7 +111,7 @@ static int sirfsoc_irq_suspend(void)
static void sirfsoc_irq_resume(void)
{
void __iomem *base = sirfsoc_irqdomain->host_data;
void __iomem *base = sirfsoc_irq_get_regbase();
writel_relaxed(sirfsoc_irq_st.mask0, base + SIRFSOC_INT_RISC_MASK0);
writel_relaxed(sirfsoc_irq_st.mask1, base + SIRFSOC_INT_RISC_MASK1);

Просмотреть файл

@ -90,7 +90,7 @@ static struct tegra_ictlr_info *lic;
static inline void tegra_ictlr_write_mask(struct irq_data *d, unsigned long reg)
{
void __iomem *base = d->chip_data;
void __iomem *base = (void __iomem __force *)d->chip_data;
u32 mask;
mask = BIT(d->hwirq % 32);
@ -266,7 +266,7 @@ static int tegra_ictlr_domain_alloc(struct irq_domain *domain,
irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i,
&tegra_ictlr_chip,
info->base[ictlr]);
(void __force *)info->base[ictlr]);
}
parent_fwspec = *fwspec;

Просмотреть файл

@ -167,7 +167,7 @@ static int vic_suspend(void)
return 0;
}
struct syscore_ops vic_syscore_ops = {
static struct syscore_ops vic_syscore_ops = {
.suspend = vic_suspend,
.resume = vic_resume,
};
@ -517,7 +517,8 @@ int __init vic_init_cascaded(void __iomem *base, unsigned int parent_irq,
EXPORT_SYMBOL_GPL(vic_init_cascaded);
#ifdef CONFIG_OF
int __init vic_of_init(struct device_node *node, struct device_node *parent)
static int __init vic_of_init(struct device_node *node,
struct device_node *parent)
{
void __iomem *regs;
u32 interrupt_mask = ~0;

Просмотреть файл

@ -278,6 +278,8 @@ extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m);
extern int
irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify);
struct cpumask *irq_create_affinity_mask(unsigned int *nr_vecs);
#else /* CONFIG_SMP */
static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m)
@ -308,6 +310,12 @@ irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
{
return 0;
}
static inline struct cpumask *irq_create_affinity_mask(unsigned int *nr_vecs)
{
*nr_vecs = 1;
return NULL;
}
#endif /* CONFIG_SMP */
/*

Просмотреть файл

@ -197,6 +197,7 @@ struct irq_data {
* IRQD_IRQ_INPROGRESS - In progress state of the interrupt
* IRQD_WAKEUP_ARMED - Wakeup mode armed
* IRQD_FORWARDED_TO_VCPU - The interrupt is forwarded to a VCPU
* IRQD_AFFINITY_MANAGED - Affinity is auto-managed by the kernel
*/
enum {
IRQD_TRIGGER_MASK = 0xf,
@ -212,6 +213,7 @@ enum {
IRQD_IRQ_INPROGRESS = (1 << 18),
IRQD_WAKEUP_ARMED = (1 << 19),
IRQD_FORWARDED_TO_VCPU = (1 << 20),
IRQD_AFFINITY_MANAGED = (1 << 21),
};
#define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors)
@ -305,6 +307,11 @@ static inline void irqd_clr_forwarded_to_vcpu(struct irq_data *d)
__irqd_to_state(d) &= ~IRQD_FORWARDED_TO_VCPU;
}
static inline bool irqd_affinity_is_managed(struct irq_data *d)
{
return __irqd_to_state(d) & IRQD_AFFINITY_MANAGED;
}
#undef __irqd_to_state
static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
@ -315,6 +322,7 @@ static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
/**
* struct irq_chip - hardware interrupt chip descriptor
*
* @parent_device: pointer to parent device for irqchip
* @name: name for /proc/interrupts
* @irq_startup: start up the interrupt (defaults to ->enable if NULL)
* @irq_shutdown: shut down the interrupt (defaults to ->disable if NULL)
@ -354,6 +362,7 @@ static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
* @flags: chip specific flags
*/
struct irq_chip {
struct device *parent_device;
const char *name;
unsigned int (*irq_startup)(struct irq_data *data);
void (*irq_shutdown)(struct irq_data *data);
@ -482,12 +491,15 @@ extern void handle_fasteoi_irq(struct irq_desc *desc);
extern void handle_edge_irq(struct irq_desc *desc);
extern void handle_edge_eoi_irq(struct irq_desc *desc);
extern void handle_simple_irq(struct irq_desc *desc);
extern void handle_untracked_irq(struct irq_desc *desc);
extern void handle_percpu_irq(struct irq_desc *desc);
extern void handle_percpu_devid_irq(struct irq_desc *desc);
extern void handle_bad_irq(struct irq_desc *desc);
extern void handle_nested_irq(unsigned int irq);
extern int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg);
extern int irq_chip_pm_get(struct irq_data *data);
extern int irq_chip_pm_put(struct irq_data *data);
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
extern void irq_chip_enable_parent(struct irq_data *data);
extern void irq_chip_disable_parent(struct irq_data *data);
@ -701,11 +713,11 @@ static inline struct cpumask *irq_data_get_affinity_mask(struct irq_data *d)
unsigned int arch_dynirq_lower_bound(unsigned int from);
int __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node,
struct module *owner);
struct module *owner, const struct cpumask *affinity);
/* use macros to avoid needing export.h for THIS_MODULE */
#define irq_alloc_descs(irq, from, cnt, node) \
__irq_alloc_descs(irq, from, cnt, node, THIS_MODULE)
__irq_alloc_descs(irq, from, cnt, node, THIS_MODULE, NULL)
#define irq_alloc_desc(node) \
irq_alloc_descs(-1, 0, 1, node)

Просмотреть файл

@ -204,6 +204,7 @@
#define GITS_BASER_NR_REGS 8
#define GITS_BASER_VALID (1UL << 63)
#define GITS_BASER_INDIRECT (1UL << 62)
#define GITS_BASER_nCnB (0UL << 59)
#define GITS_BASER_nC (1UL << 59)
#define GITS_BASER_RaWt (2UL << 59)
@ -228,6 +229,7 @@
#define GITS_BASER_PAGE_SIZE_64K (2UL << GITS_BASER_PAGE_SIZE_SHIFT)
#define GITS_BASER_PAGE_SIZE_MASK (3UL << GITS_BASER_PAGE_SIZE_SHIFT)
#define GITS_BASER_PAGES_MAX 256
#define GITS_BASER_PAGES_SHIFT (0)
#define GITS_BASER_TYPE_NONE 0
#define GITS_BASER_TYPE_DEVICE 1
@ -238,6 +240,8 @@
#define GITS_BASER_TYPE_RESERVED6 6
#define GITS_BASER_TYPE_RESERVED7 7
#define GITS_LVL1_ENTRY_SIZE (8UL)
/*
* ITS commands
*/

Просмотреть файл

@ -101,9 +101,14 @@
#include <linux/irqdomain.h>
struct device_node;
struct gic_chip_data;
void gic_cascade_irq(unsigned int gic_nr, unsigned int irq);
int gic_cpu_if_down(unsigned int gic_nr);
void gic_cpu_save(struct gic_chip_data *gic);
void gic_cpu_restore(struct gic_chip_data *gic);
void gic_dist_save(struct gic_chip_data *gic);
void gic_dist_restore(struct gic_chip_data *gic);
/*
* Subdrivers that need some preparatory work can initialize their
@ -111,6 +116,12 @@ int gic_cpu_if_down(unsigned int gic_nr);
*/
int gic_of_init(struct device_node *node, struct device_node *parent);
/*
* Initialises and registers a non-root or child GIC chip. Memory for
* the gic_chip_data structure is dynamically allocated.
*/
int gic_of_init_child(struct device *dev, struct gic_chip_data **gic, int irq);
/*
* Legacy platforms not converted to DT yet must use this to init
* their GIC

Просмотреть файл

@ -39,6 +39,7 @@ struct irq_domain;
struct of_device_id;
struct irq_chip;
struct irq_data;
struct cpumask;
/* Number of irqs reserved for a legacy isa controller */
#define NUM_ISA_INTERRUPTS 16
@ -217,7 +218,8 @@ extern struct irq_domain *irq_find_matching_fwspec(struct irq_fwspec *fwspec,
enum irq_domain_bus_token bus_token);
extern void irq_set_default_host(struct irq_domain *host);
extern int irq_domain_alloc_descs(int virq, unsigned int nr_irqs,
irq_hw_number_t hwirq, int node);
irq_hw_number_t hwirq, int node,
const struct cpumask *affinity);
static inline struct fwnode_handle *of_node_to_fwnode(struct device_node *node)
{
@ -389,7 +391,7 @@ static inline struct irq_domain *irq_domain_add_hierarchy(struct irq_domain *par
extern int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base,
unsigned int nr_irqs, int node, void *arg,
bool realloc);
bool realloc, const struct cpumask *affinity);
extern void irq_domain_free_irqs(unsigned int virq, unsigned int nr_irqs);
extern void irq_domain_activate_irq(struct irq_data *irq_data);
extern void irq_domain_deactivate_irq(struct irq_data *irq_data);
@ -397,7 +399,8 @@ extern void irq_domain_deactivate_irq(struct irq_data *irq_data);
static inline int irq_domain_alloc_irqs(struct irq_domain *domain,
unsigned int nr_irqs, int node, void *arg)
{
return __irq_domain_alloc_irqs(domain, -1, nr_irqs, node, arg, false);
return __irq_domain_alloc_irqs(domain, -1, nr_irqs, node, arg, false,
NULL);
}
extern int irq_domain_alloc_irqs_recursive(struct irq_domain *domain,
@ -452,6 +455,9 @@ static inline int irq_domain_alloc_irqs(struct irq_domain *domain,
return -1;
}
static inline void irq_domain_free_irqs(unsigned int virq,
unsigned int nr_irqs) { }
static inline bool irq_domain_is_hierarchy(struct irq_domain *domain)
{
return false;

Просмотреть файл

@ -47,6 +47,7 @@ struct fsl_mc_msi_desc {
* @nvec_used: The number of vectors used
* @dev: Pointer to the device which uses this descriptor
* @msg: The last set MSI message cached for reuse
* @affinity: Optional pointer to a cpu affinity mask for this descriptor
*
* @masked: [PCI MSI/X] Mask bits
* @is_msix: [PCI MSI/X] True if MSI-X
@ -67,6 +68,7 @@ struct msi_desc {
unsigned int nvec_used;
struct device *dev;
struct msi_msg msg;
const struct cpumask *affinity;
union {
/* PCI MSI/X specific data */
@ -264,12 +266,10 @@ enum {
* callbacks.
*/
MSI_FLAG_USE_DEF_CHIP_OPS = (1 << 1),
/* Build identity map between hwirq and irq */
MSI_FLAG_IDENTITY_MAP = (1 << 2),
/* Support multiple PCI MSI interrupts */
MSI_FLAG_MULTI_PCI_MSI = (1 << 3),
MSI_FLAG_MULTI_PCI_MSI = (1 << 2),
/* Support PCI MSIX interrupts */
MSI_FLAG_PCI_MSIX = (1 << 4),
MSI_FLAG_PCI_MSIX = (1 << 3),
};
int msi_domain_set_affinity(struct irq_data *data, const struct cpumask *mask,

Просмотреть файл

@ -9,3 +9,4 @@ obj-$(CONFIG_GENERIC_IRQ_MIGRATION) += cpuhotplug.o
obj-$(CONFIG_PM_SLEEP) += pm.o
obj-$(CONFIG_GENERIC_MSI_IRQ) += msi.o
obj-$(CONFIG_GENERIC_IRQ_IPI) += ipi.o
obj-$(CONFIG_SMP) += affinity.o

61
kernel/irq/affinity.c Normal file
Просмотреть файл

@ -0,0 +1,61 @@
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/cpu.h>
static int get_first_sibling(unsigned int cpu)
{
unsigned int ret;
ret = cpumask_first(topology_sibling_cpumask(cpu));
if (ret < nr_cpu_ids)
return ret;
return cpu;
}
/*
* Take a map of online CPUs and the number of available interrupt vectors
* and generate an output cpumask suitable for spreading MSI/MSI-X vectors
* so that they are distributed as good as possible around the CPUs. If
* more vectors than CPUs are available we'll map one to each CPU,
* otherwise we map one to the first sibling of each socket.
*
* If there are more vectors than CPUs we will still only have one bit
* set per CPU, but interrupt code will keep on assigning the vectors from
* the start of the bitmap until we run out of vectors.
*/
struct cpumask *irq_create_affinity_mask(unsigned int *nr_vecs)
{
struct cpumask *affinity_mask;
unsigned int max_vecs = *nr_vecs;
if (max_vecs == 1)
return NULL;
affinity_mask = kzalloc(cpumask_size(), GFP_KERNEL);
if (!affinity_mask) {
*nr_vecs = 1;
return NULL;
}
if (max_vecs >= num_online_cpus()) {
cpumask_copy(affinity_mask, cpu_online_mask);
*nr_vecs = num_online_cpus();
} else {
unsigned int vecs = 0, cpu;
for_each_online_cpu(cpu) {
if (cpu == get_first_sibling(cpu)) {
cpumask_set_cpu(cpu, affinity_mask);
vecs++;
}
if (--max_vecs == 0)
break;
}
*nr_vecs = vecs;
}
return affinity_mask;
}

Просмотреть файл

@ -426,6 +426,49 @@ out_unlock:
}
EXPORT_SYMBOL_GPL(handle_simple_irq);
/**
* handle_untracked_irq - Simple and software-decoded IRQs.
* @desc: the interrupt description structure for this irq
*
* Untracked interrupts are sent from a demultiplexing interrupt
* handler when the demultiplexer does not know which device it its
* multiplexed irq domain generated the interrupt. IRQ's handled
* through here are not subjected to stats tracking, randomness, or
* spurious interrupt detection.
*
* Note: Like handle_simple_irq, the caller is expected to handle
* the ack, clear, mask and unmask issues if necessary.
*/
void handle_untracked_irq(struct irq_desc *desc)
{
unsigned int flags = 0;
raw_spin_lock(&desc->lock);
if (!irq_may_run(desc))
goto out_unlock;
desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
desc->istate |= IRQS_PENDING;
goto out_unlock;
}
desc->istate &= ~IRQS_PENDING;
irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS);
raw_spin_unlock(&desc->lock);
__handle_irq_event_percpu(desc, &flags);
raw_spin_lock(&desc->lock);
irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
out_unlock:
raw_spin_unlock(&desc->lock);
}
EXPORT_SYMBOL_GPL(handle_untracked_irq);
/*
* Called unconditionally from handle_level_irq() and only for oneshot
* interrupts from handle_fasteoi_irq()
@ -1093,3 +1136,43 @@ int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
return 0;
}
/**
* irq_chip_pm_get - Enable power for an IRQ chip
* @data: Pointer to interrupt specific data
*
* Enable the power to the IRQ chip referenced by the interrupt data
* structure.
*/
int irq_chip_pm_get(struct irq_data *data)
{
int retval;
if (IS_ENABLED(CONFIG_PM) && data->chip->parent_device) {
retval = pm_runtime_get_sync(data->chip->parent_device);
if (retval < 0) {
pm_runtime_put_noidle(data->chip->parent_device);
return retval;
}
}
return 0;
}
/**
* irq_chip_pm_put - Disable power for an IRQ chip
* @data: Pointer to interrupt specific data
*
* Disable the power to the IRQ chip referenced by the interrupt data
* structure, belongs. Note that power will only be disabled, once this
* function has been called for all IRQs that have called irq_chip_pm_get().
*/
int irq_chip_pm_put(struct irq_data *data)
{
int retval = 0;
if (IS_ENABLED(CONFIG_PM) && data->chip->parent_device)
retval = pm_runtime_put(data->chip->parent_device);
return (retval < 0) ? retval : 0;
}

Просмотреть файл

@ -132,10 +132,10 @@ void __irq_wake_thread(struct irq_desc *desc, struct irqaction *action)
wake_up_process(action->thread);
}
irqreturn_t handle_irq_event_percpu(struct irq_desc *desc)
irqreturn_t __handle_irq_event_percpu(struct irq_desc *desc, unsigned int *flags)
{
irqreturn_t retval = IRQ_NONE;
unsigned int flags = 0, irq = desc->irq_data.irq;
unsigned int irq = desc->irq_data.irq;
struct irqaction *action;
for_each_action_of_desc(desc, action) {
@ -164,7 +164,7 @@ irqreturn_t handle_irq_event_percpu(struct irq_desc *desc)
/* Fall through to add to randomness */
case IRQ_HANDLED:
flags |= action->flags;
*flags |= action->flags;
break;
default:
@ -174,7 +174,17 @@ irqreturn_t handle_irq_event_percpu(struct irq_desc *desc)
retval |= res;
}
add_interrupt_randomness(irq, flags);
return retval;
}
irqreturn_t handle_irq_event_percpu(struct irq_desc *desc)
{
irqreturn_t retval;
unsigned int flags = 0;
retval = __handle_irq_event_percpu(desc, &flags);
add_interrupt_randomness(desc->irq_data.irq, flags);
if (!noirqdebug)
note_interrupt(desc, retval);

Просмотреть файл

@ -7,6 +7,7 @@
*/
#include <linux/irqdesc.h>
#include <linux/kernel_stat.h>
#include <linux/pm_runtime.h>
#ifdef CONFIG_SPARSE_IRQ
# define IRQ_BITMAP_BITS (NR_IRQS + 8196)
@ -83,6 +84,7 @@ extern void irq_mark_irq(unsigned int irq);
extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr);
irqreturn_t __handle_irq_event_percpu(struct irq_desc *desc, unsigned int *flags);
irqreturn_t handle_irq_event_percpu(struct irq_desc *desc);
irqreturn_t handle_irq_event(struct irq_desc *desc);
@ -105,6 +107,8 @@ static inline void unregister_handler_proc(unsigned int irq,
struct irqaction *action) { }
#endif
extern bool irq_can_set_affinity_usr(unsigned int irq);
extern int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask);
extern void irq_set_thread_affinity(struct irq_desc *desc);

Просмотреть файл

@ -76,14 +76,14 @@ int irq_reserve_ipi(struct irq_domain *domain,
}
}
virq = irq_domain_alloc_descs(-1, nr_irqs, 0, NUMA_NO_NODE);
virq = irq_domain_alloc_descs(-1, nr_irqs, 0, NUMA_NO_NODE, NULL);
if (virq <= 0) {
pr_warn("Can't reserve IPI, failed to alloc descs\n");
return -ENOMEM;
}
virq = __irq_domain_alloc_irqs(domain, virq, nr_irqs, NUMA_NO_NODE,
(void *) dest, true);
(void *) dest, true, NULL);
if (virq <= 0) {
pr_warn("Can't reserve IPI, failed to alloc hw irqs\n");

Просмотреть файл

@ -68,9 +68,13 @@ static int alloc_masks(struct irq_desc *desc, gfp_t gfp, int node)
return 0;
}
static void desc_smp_init(struct irq_desc *desc, int node)
static void desc_smp_init(struct irq_desc *desc, int node,
const struct cpumask *affinity)
{
cpumask_copy(desc->irq_common_data.affinity, irq_default_affinity);
if (!affinity)
affinity = irq_default_affinity;
cpumask_copy(desc->irq_common_data.affinity, affinity);
#ifdef CONFIG_GENERIC_PENDING_IRQ
cpumask_clear(desc->pending_mask);
#endif
@ -82,11 +86,12 @@ static void desc_smp_init(struct irq_desc *desc, int node)
#else
static inline int
alloc_masks(struct irq_desc *desc, gfp_t gfp, int node) { return 0; }
static inline void desc_smp_init(struct irq_desc *desc, int node) { }
static inline void
desc_smp_init(struct irq_desc *desc, int node, const struct cpumask *affinity) { }
#endif
static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node,
struct module *owner)
const struct cpumask *affinity, struct module *owner)
{
int cpu;
@ -107,7 +112,7 @@ static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node,
desc->owner = owner;
for_each_possible_cpu(cpu)
*per_cpu_ptr(desc->kstat_irqs, cpu) = 0;
desc_smp_init(desc, node);
desc_smp_init(desc, node, affinity);
}
int nr_irqs = NR_IRQS;
@ -158,7 +163,9 @@ void irq_unlock_sparse(void)
mutex_unlock(&sparse_irq_lock);
}
static struct irq_desc *alloc_desc(int irq, int node, struct module *owner)
static struct irq_desc *alloc_desc(int irq, int node, unsigned int flags,
const struct cpumask *affinity,
struct module *owner)
{
struct irq_desc *desc;
gfp_t gfp = GFP_KERNEL;
@ -178,7 +185,8 @@ static struct irq_desc *alloc_desc(int irq, int node, struct module *owner)
lockdep_set_class(&desc->lock, &irq_desc_lock_class);
init_rcu_head(&desc->rcu);
desc_set_defaults(irq, desc, node, owner);
desc_set_defaults(irq, desc, node, affinity, owner);
irqd_set(&desc->irq_data, flags);
return desc;
@ -223,13 +231,32 @@ static void free_desc(unsigned int irq)
}
static int alloc_descs(unsigned int start, unsigned int cnt, int node,
struct module *owner)
const struct cpumask *affinity, struct module *owner)
{
const struct cpumask *mask = NULL;
struct irq_desc *desc;
int i;
unsigned int flags;
int i, cpu = -1;
if (affinity && cpumask_empty(affinity))
return -EINVAL;
flags = affinity ? IRQD_AFFINITY_MANAGED : 0;
for (i = 0; i < cnt; i++) {
desc = alloc_desc(start + i, node, owner);
if (affinity) {
cpu = cpumask_next(cpu, affinity);
if (cpu >= nr_cpu_ids)
cpu = cpumask_first(affinity);
node = cpu_to_node(cpu);
/*
* For single allocations we use the caller provided
* mask otherwise we use the mask of the target cpu
*/
mask = cnt == 1 ? affinity : cpumask_of(cpu);
}
desc = alloc_desc(start + i, node, flags, mask, owner);
if (!desc)
goto err;
mutex_lock(&sparse_irq_lock);
@ -277,7 +304,7 @@ int __init early_irq_init(void)
nr_irqs = initcnt;
for (i = 0; i < initcnt; i++) {
desc = alloc_desc(i, node, NULL);
desc = alloc_desc(i, node, 0, NULL, NULL);
set_bit(i, allocated_irqs);
irq_insert_desc(i, desc);
}
@ -311,7 +338,7 @@ int __init early_irq_init(void)
alloc_masks(&desc[i], GFP_KERNEL, node);
raw_spin_lock_init(&desc[i].lock);
lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
desc_set_defaults(i, &desc[i], node, NULL);
desc_set_defaults(i, &desc[i], node, NULL, NULL);
}
return arch_early_irq_init();
}
@ -328,11 +355,12 @@ static void free_desc(unsigned int irq)
unsigned long flags;
raw_spin_lock_irqsave(&desc->lock, flags);
desc_set_defaults(irq, desc, irq_desc_get_node(desc), NULL);
desc_set_defaults(irq, desc, irq_desc_get_node(desc), NULL, NULL);
raw_spin_unlock_irqrestore(&desc->lock, flags);
}
static inline int alloc_descs(unsigned int start, unsigned int cnt, int node,
const struct cpumask *affinity,
struct module *owner)
{
u32 i;
@ -453,12 +481,15 @@ EXPORT_SYMBOL_GPL(irq_free_descs);
* @cnt: Number of consecutive irqs to allocate.
* @node: Preferred node on which the irq descriptor should be allocated
* @owner: Owning module (can be NULL)
* @affinity: Optional pointer to an affinity mask which hints where the
* irq descriptors should be allocated and which default
* affinities to use
*
* Returns the first irq number or error code
*/
int __ref
__irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node,
struct module *owner)
struct module *owner, const struct cpumask *affinity)
{
int start, ret;
@ -494,7 +525,7 @@ __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node,
bitmap_set(allocated_irqs, start, cnt);
mutex_unlock(&sparse_irq_lock);
return alloc_descs(start, cnt, node, owner);
return alloc_descs(start, cnt, node, affinity, owner);
err:
mutex_unlock(&sparse_irq_lock);
@ -512,7 +543,7 @@ EXPORT_SYMBOL_GPL(__irq_alloc_descs);
*/
unsigned int irq_alloc_hwirqs(int cnt, int node)
{
int i, irq = __irq_alloc_descs(-1, 0, cnt, node, NULL);
int i, irq = __irq_alloc_descs(-1, 0, cnt, node, NULL, NULL);
if (irq < 0)
return 0;

Просмотреть файл

@ -481,7 +481,7 @@ unsigned int irq_create_mapping(struct irq_domain *domain,
}
/* Allocate a virtual interrupt number */
virq = irq_domain_alloc_descs(-1, 1, hwirq, of_node_to_nid(of_node));
virq = irq_domain_alloc_descs(-1, 1, hwirq, of_node_to_nid(of_node), NULL);
if (virq <= 0) {
pr_debug("-> virq allocation failed\n");
return 0;
@ -567,6 +567,7 @@ static void of_phandle_args_to_fwspec(struct of_phandle_args *irq_data,
unsigned int irq_create_fwspec_mapping(struct irq_fwspec *fwspec)
{
struct irq_domain *domain;
struct irq_data *irq_data;
irq_hw_number_t hwirq;
unsigned int type = IRQ_TYPE_NONE;
int virq;
@ -588,15 +589,46 @@ unsigned int irq_create_fwspec_mapping(struct irq_fwspec *fwspec)
if (irq_domain_translate(domain, fwspec, &hwirq, &type))
return 0;
if (irq_domain_is_hierarchy(domain)) {
/*
* WARN if the irqchip returns a type with bits
* outside the sense mask set and clear these bits.
*/
if (WARN_ON(type & ~IRQ_TYPE_SENSE_MASK))
type &= IRQ_TYPE_SENSE_MASK;
/*
* If we've already configured this interrupt,
* don't do it again, or hell will break loose.
*/
virq = irq_find_mapping(domain, hwirq);
if (virq) {
/*
* If we've already configured this interrupt,
* don't do it again, or hell will break loose.
* If the trigger type is not specified or matches the
* current trigger type then we are done so return the
* interrupt number.
*/
virq = irq_find_mapping(domain, hwirq);
if (virq)
if (type == IRQ_TYPE_NONE || type == irq_get_trigger_type(virq))
return virq;
/*
* If the trigger type has not been set yet, then set
* it now and return the interrupt number.
*/
if (irq_get_trigger_type(virq) == IRQ_TYPE_NONE) {
irq_data = irq_get_irq_data(virq);
if (!irq_data)
return 0;
irqd_set_trigger_type(irq_data, type);
return virq;
}
pr_warn("type mismatch, failed to map hwirq-%lu for %s!\n",
hwirq, of_node_full_name(to_of_node(fwspec->fwnode)));
return 0;
}
if (irq_domain_is_hierarchy(domain)) {
virq = irq_domain_alloc_irqs(domain, 1, NUMA_NO_NODE, fwspec);
if (virq <= 0)
return 0;
@ -607,10 +639,18 @@ unsigned int irq_create_fwspec_mapping(struct irq_fwspec *fwspec)
return virq;
}
/* Set type if specified and different than the current one */
if (type != IRQ_TYPE_NONE &&
type != irq_get_trigger_type(virq))
irq_set_irq_type(virq, type);
irq_data = irq_get_irq_data(virq);
if (!irq_data) {
if (irq_domain_is_hierarchy(domain))
irq_domain_free_irqs(virq, 1);
else
irq_dispose_mapping(virq);
return 0;
}
/* Store trigger type */
irqd_set_trigger_type(irq_data, type);
return virq;
}
EXPORT_SYMBOL_GPL(irq_create_fwspec_mapping);
@ -640,8 +680,12 @@ void irq_dispose_mapping(unsigned int virq)
if (WARN_ON(domain == NULL))
return;
irq_domain_disassociate(domain, virq);
irq_free_desc(virq);
if (irq_domain_is_hierarchy(domain)) {
irq_domain_free_irqs(virq, 1);
} else {
irq_domain_disassociate(domain, virq);
irq_free_desc(virq);
}
}
EXPORT_SYMBOL_GPL(irq_dispose_mapping);
@ -835,19 +879,23 @@ const struct irq_domain_ops irq_domain_simple_ops = {
EXPORT_SYMBOL_GPL(irq_domain_simple_ops);
int irq_domain_alloc_descs(int virq, unsigned int cnt, irq_hw_number_t hwirq,
int node)
int node, const struct cpumask *affinity)
{
unsigned int hint;
if (virq >= 0) {
virq = irq_alloc_descs(virq, virq, cnt, node);
virq = __irq_alloc_descs(virq, virq, cnt, node, THIS_MODULE,
affinity);
} else {
hint = hwirq % nr_irqs;
if (hint == 0)
hint++;
virq = irq_alloc_descs_from(hint, cnt, node);
if (virq <= 0 && hint > 1)
virq = irq_alloc_descs_from(1, cnt, node);
virq = __irq_alloc_descs(-1, hint, cnt, node, THIS_MODULE,
affinity);
if (virq <= 0 && hint > 1) {
virq = __irq_alloc_descs(-1, 1, cnt, node, THIS_MODULE,
affinity);
}
}
return virq;
@ -1144,8 +1192,10 @@ int irq_domain_alloc_irqs_recursive(struct irq_domain *domain,
if (recursive)
ret = irq_domain_alloc_irqs_recursive(parent, irq_base,
nr_irqs, arg);
if (ret >= 0)
ret = domain->ops->alloc(domain, irq_base, nr_irqs, arg);
if (ret < 0)
return ret;
ret = domain->ops->alloc(domain, irq_base, nr_irqs, arg);
if (ret < 0 && recursive)
irq_domain_free_irqs_recursive(parent, irq_base, nr_irqs);
@ -1160,6 +1210,7 @@ int irq_domain_alloc_irqs_recursive(struct irq_domain *domain,
* @node: NUMA node id for memory allocation
* @arg: domain specific argument
* @realloc: IRQ descriptors have already been allocated if true
* @affinity: Optional irq affinity mask for multiqueue devices
*
* Allocate IRQ numbers and initialized all data structures to support
* hierarchy IRQ domains.
@ -1175,7 +1226,7 @@ int irq_domain_alloc_irqs_recursive(struct irq_domain *domain,
*/
int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base,
unsigned int nr_irqs, int node, void *arg,
bool realloc)
bool realloc, const struct cpumask *affinity)
{
int i, ret, virq;
@ -1193,7 +1244,8 @@ int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base,
if (realloc && irq_base >= 0) {
virq = irq_base;
} else {
virq = irq_domain_alloc_descs(irq_base, nr_irqs, 0, node);
virq = irq_domain_alloc_descs(irq_base, nr_irqs, 0, node,
affinity);
if (virq < 0) {
pr_debug("cannot allocate IRQ(base %d, count %d)\n",
irq_base, nr_irqs);

Просмотреть файл

@ -115,12 +115,12 @@ EXPORT_SYMBOL(synchronize_irq);
#ifdef CONFIG_SMP
cpumask_var_t irq_default_affinity;
static int __irq_can_set_affinity(struct irq_desc *desc)
static bool __irq_can_set_affinity(struct irq_desc *desc)
{
if (!desc || !irqd_can_balance(&desc->irq_data) ||
!desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
return 0;
return 1;
return false;
return true;
}
/**
@ -133,6 +133,21 @@ int irq_can_set_affinity(unsigned int irq)
return __irq_can_set_affinity(irq_to_desc(irq));
}
/**
* irq_can_set_affinity_usr - Check if affinity of a irq can be set from user space
* @irq: Interrupt to check
*
* Like irq_can_set_affinity() above, but additionally checks for the
* AFFINITY_MANAGED flag.
*/
bool irq_can_set_affinity_usr(unsigned int irq)
{
struct irq_desc *desc = irq_to_desc(irq);
return __irq_can_set_affinity(desc) &&
!irqd_affinity_is_managed(&desc->irq_data);
}
/**
* irq_set_thread_affinity - Notify irq threads to adjust affinity
* @desc: irq descriptor which has affitnity changed
@ -338,10 +353,11 @@ static int setup_affinity(struct irq_desc *desc, struct cpumask *mask)
return 0;
/*
* Preserve an userspace affinity setup, but make sure that
* one of the targets is online.
* Preserve the managed affinity setting and an userspace affinity
* setup, but make sure that one of the targets is online.
*/
if (irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) {
if (irqd_affinity_is_managed(&desc->irq_data) ||
irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) {
if (cpumask_intersects(desc->irq_common_data.affinity,
cpu_online_mask))
set = desc->irq_common_data.affinity;
@ -1116,6 +1132,13 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
new->irq = irq;
/*
* If the trigger type is not specified by the caller,
* then use the default for this interrupt.
*/
if (!(new->flags & IRQF_TRIGGER_MASK))
new->flags |= irqd_get_trigger_type(&desc->irq_data);
/*
* Check whether the interrupt nests into another interrupt
* thread.
@ -1409,10 +1432,18 @@ int setup_irq(unsigned int irq, struct irqaction *act)
if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
return -EINVAL;
retval = irq_chip_pm_get(&desc->irq_data);
if (retval < 0)
return retval;
chip_bus_lock(desc);
retval = __setup_irq(irq, desc, act);
chip_bus_sync_unlock(desc);
if (retval)
irq_chip_pm_put(&desc->irq_data);
return retval;
}
EXPORT_SYMBOL_GPL(setup_irq);
@ -1506,6 +1537,7 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
}
}
irq_chip_pm_put(&desc->irq_data);
module_put(desc->owner);
kfree(action->secondary);
return action;
@ -1648,11 +1680,16 @@ int request_threaded_irq(unsigned int irq, irq_handler_t handler,
action->name = devname;
action->dev_id = dev_id;
retval = irq_chip_pm_get(&desc->irq_data);
if (retval < 0)
return retval;
chip_bus_lock(desc);
retval = __setup_irq(irq, desc, action);
chip_bus_sync_unlock(desc);
if (retval) {
irq_chip_pm_put(&desc->irq_data);
kfree(action->secondary);
kfree(action);
}
@ -1730,7 +1767,14 @@ void enable_percpu_irq(unsigned int irq, unsigned int type)
if (!desc)
return;
/*
* If the trigger type is not specified by the caller, then
* use the default for this interrupt.
*/
type &= IRQ_TYPE_SENSE_MASK;
if (type == IRQ_TYPE_NONE)
type = irqd_get_trigger_type(&desc->irq_data);
if (type != IRQ_TYPE_NONE) {
int ret;
@ -1822,6 +1866,7 @@ static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_
unregister_handler_proc(irq, action);
irq_chip_pm_put(&desc->irq_data);
module_put(desc->owner);
return action;
@ -1884,10 +1929,18 @@ int setup_percpu_irq(unsigned int irq, struct irqaction *act)
if (!desc || !irq_settings_is_per_cpu_devid(desc))
return -EINVAL;
retval = irq_chip_pm_get(&desc->irq_data);
if (retval < 0)
return retval;
chip_bus_lock(desc);
retval = __setup_irq(irq, desc, act);
chip_bus_sync_unlock(desc);
if (retval)
irq_chip_pm_put(&desc->irq_data);
return retval;
}
@ -1931,12 +1984,18 @@ int request_percpu_irq(unsigned int irq, irq_handler_t handler,
action->name = devname;
action->percpu_dev_id = dev_id;
retval = irq_chip_pm_get(&desc->irq_data);
if (retval < 0)
return retval;
chip_bus_lock(desc);
retval = __setup_irq(irq, desc, action);
chip_bus_sync_unlock(desc);
if (retval)
if (retval) {
irq_chip_pm_put(&desc->irq_data);
kfree(action);
}
return retval;
}

Просмотреть файл

@ -324,7 +324,7 @@ int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
struct msi_domain_ops *ops = info->ops;
msi_alloc_info_t arg;
struct msi_desc *desc;
int i, ret, virq = -1;
int i, ret, virq;
ret = msi_domain_prepare_irqs(domain, dev, nvec, &arg);
if (ret)
@ -332,13 +332,10 @@ int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
for_each_msi_entry(desc, dev) {
ops->set_desc(&arg, desc);
if (info->flags & MSI_FLAG_IDENTITY_MAP)
virq = (int)ops->get_hwirq(info, &arg);
else
virq = -1;
virq = __irq_domain_alloc_irqs(domain, virq, desc->nvec_used,
dev_to_node(dev), &arg, false);
virq = __irq_domain_alloc_irqs(domain, -1, desc->nvec_used,
dev_to_node(dev), &arg, false,
desc->affinity);
if (virq < 0) {
ret = -ENOSPC;
if (ops->handle_error)
@ -356,6 +353,7 @@ int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
ops->msi_finish(&arg, 0);
for_each_msi_entry(desc, dev) {
virq = desc->irq;
if (desc->nvec_used == 1)
dev_dbg(dev, "irq %d for MSI\n", virq);
else

Просмотреть файл

@ -96,7 +96,7 @@ static ssize_t write_irq_affinity(int type, struct file *file,
cpumask_var_t new_value;
int err;
if (!irq_can_set_affinity(irq) || no_irq_affinity)
if (!irq_can_set_affinity_usr(irq) || no_irq_affinity)
return -EIO;
if (!alloc_cpumask_var(&new_value, GFP_KERNEL))
@ -311,7 +311,6 @@ void register_handler_proc(unsigned int irq, struct irqaction *action)
!name_unique(irq, action))
return;
memset(name, 0, MAX_NAMELEN);
snprintf(name, MAX_NAMELEN, "%s", action->name);
/* create /proc/irq/1234/handler/ */
@ -340,7 +339,6 @@ void register_irq_proc(unsigned int irq, struct irq_desc *desc)
if (desc->dir)
goto out_unlock;
memset(name, 0, MAX_NAMELEN);
sprintf(name, "%d", irq);
/* create /proc/irq/1234 */
@ -386,7 +384,6 @@ void unregister_irq_proc(unsigned int irq, struct irq_desc *desc)
#endif
remove_proc_entry("spurious", desc->dir);
memset(name, 0, MAX_NAMELEN);
sprintf(name, "%u", irq);
remove_proc_entry(name, root_irq_dir);
}
@ -421,12 +418,8 @@ void init_irq_proc(void)
/*
* Create entries for all existing IRQs.
*/
for_each_irq_desc(irq, desc) {
if (!desc)
continue;
for_each_irq_desc(irq, desc)
register_irq_proc(irq, desc);
}
}
#ifdef CONFIG_GENERIC_IRQ_SHOW