Merge branch 'remotes/lorenzo/pci/mmio-dma-ranges'

- Consolidate DT "dma-ranges" parsing and convert all host drivers to use
    shared parsing (Rob Herring)

* remotes/lorenzo/pci/mmio-dma-ranges:
  PCI: Make devm_of_pci_get_host_bridge_resources() static
  PCI: rcar: Use inbound resources for setup
  PCI: iproc: Use inbound resources for setup
  PCI: xgene: Use inbound resources for setup
  PCI: v3-semi: Use inbound resources for setup
  PCI: ftpci100: Use inbound resources for setup
  PCI: of: Add inbound resource parsing to helpers
  PCI: versatile: Enable COMPILE_TEST
  PCI: versatile: Remove usage of PHYS_OFFSET
  PCI: versatile: Use pci_parse_request_of_pci_ranges()
  PCI: xilinx-nwl: Use pci_parse_request_of_pci_ranges()
  PCI: xilinx: Use pci_parse_request_of_pci_ranges()
  PCI: xgene: Use pci_parse_request_of_pci_ranges()
  PCI: v3-semi: Use pci_parse_request_of_pci_ranges()
  PCI: rockchip: Drop storing driver private outbound resource data
  PCI: rockchip: Use pci_parse_request_of_pci_ranges()
  PCI: mobiveil: Use pci_parse_request_of_pci_ranges()
  PCI: mediatek: Use pci_parse_request_of_pci_ranges()
  PCI: iproc: Use pci_parse_request_of_pci_ranges()
  PCI: faraday: Use pci_parse_request_of_pci_ranges()
  PCI: dwc: Use pci_parse_request_of_pci_ranges()
  PCI: altera: Use pci_parse_request_of_pci_ranges()
  PCI: aardvark: Use pci_parse_request_of_pci_ranges()
  PCI: Export pci_parse_request_of_pci_ranges()
  resource: Add a resource_list_first_type helper

# Conflicts:
#	drivers/pci/controller/pcie-rcar.c
This commit is contained in:
Bjorn Helgaas 2019-11-28 08:54:53 -06:00
Родитель d8ddab6363 3b55809cf9
Коммит 7bd4c4a7b0
23 изменённых файлов: 272 добавлений и 604 удалений

Просмотреть файл

@ -107,7 +107,7 @@ config PCI_V3_SEMI
config PCI_VERSATILE
bool "ARM Versatile PB PCI controller"
depends on ARCH_VERSATILE
depends on ARCH_VERSATILE || COMPILE_TEST
config PCIE_IPROC
tristate

Просмотреть файл

@ -179,7 +179,7 @@ static int cdns_pcie_host_init(struct device *dev,
int err;
/* Parse our PCI ranges and request their resources */
err = pci_parse_request_of_pci_ranges(dev, resources, &bus_range);
err = pci_parse_request_of_pci_ranges(dev, resources, NULL, &bus_range);
if (err)
return err;

Просмотреть файл

@ -321,7 +321,7 @@ int dw_pcie_host_init(struct pcie_port *pp)
struct device *dev = pci->dev;
struct device_node *np = dev->of_node;
struct platform_device *pdev = to_platform_device(dev);
struct resource_entry *win, *tmp;
struct resource_entry *win;
struct pci_bus *child;
struct pci_host_bridge *bridge;
struct resource *cfg_res;
@ -344,31 +344,20 @@ int dw_pcie_host_init(struct pcie_port *pp)
if (!bridge)
return -ENOMEM;
ret = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff,
&bridge->windows, &pp->io_base);
if (ret)
return ret;
ret = devm_request_pci_bus_resources(dev, &bridge->windows);
ret = pci_parse_request_of_pci_ranges(dev, &bridge->windows,
&bridge->dma_ranges, NULL);
if (ret)
return ret;
/* Get the I/O and memory ranges from DT */
resource_list_for_each_entry_safe(win, tmp, &bridge->windows) {
resource_list_for_each_entry(win, &bridge->windows) {
switch (resource_type(win->res)) {
case IORESOURCE_IO:
ret = devm_pci_remap_iospace(dev, win->res,
pp->io_base);
if (ret) {
dev_warn(dev, "Error %d: failed to map resource %pR\n",
ret, win->res);
resource_list_destroy_entry(win);
} else {
pp->io = win->res;
pp->io->name = "I/O";
pp->io_size = resource_size(pp->io);
pp->io_bus_addr = pp->io->start - win->offset;
}
pp->io = win->res;
pp->io->name = "I/O";
pp->io_size = resource_size(pp->io);
pp->io_bus_addr = pp->io->start - win->offset;
pp->io_base = pci_pio_to_address(pp->io->start);
break;
case IORESOURCE_MEM:
pp->mem = win->res;

Просмотреть файл

@ -190,7 +190,6 @@
struct advk_pcie {
struct platform_device *pdev;
void __iomem *base;
struct list_head resources;
struct irq_domain *irq_domain;
struct irq_chip irq_chip;
struct irq_domain *msi_domain;
@ -953,63 +952,11 @@ static irqreturn_t advk_pcie_irq_handler(int irq, void *arg)
return IRQ_HANDLED;
}
static int advk_pcie_parse_request_of_pci_ranges(struct advk_pcie *pcie)
{
int err, res_valid = 0;
struct device *dev = &pcie->pdev->dev;
struct resource_entry *win, *tmp;
resource_size_t iobase;
INIT_LIST_HEAD(&pcie->resources);
err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff,
&pcie->resources, &iobase);
if (err)
return err;
err = devm_request_pci_bus_resources(dev, &pcie->resources);
if (err)
goto out_release_res;
resource_list_for_each_entry_safe(win, tmp, &pcie->resources) {
struct resource *res = win->res;
switch (resource_type(res)) {
case IORESOURCE_IO:
err = devm_pci_remap_iospace(dev, res, iobase);
if (err) {
dev_warn(dev, "error %d: failed to map resource %pR\n",
err, res);
resource_list_destroy_entry(win);
}
break;
case IORESOURCE_MEM:
res_valid |= !(res->flags & IORESOURCE_PREFETCH);
break;
case IORESOURCE_BUS:
pcie->root_bus_nr = res->start;
break;
}
}
if (!res_valid) {
dev_err(dev, "non-prefetchable memory resource required\n");
err = -EINVAL;
goto out_release_res;
}
return 0;
out_release_res:
pci_free_resource_list(&pcie->resources);
return err;
}
static int advk_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct advk_pcie *pcie;
struct resource *res;
struct resource *res, *bus;
struct pci_host_bridge *bridge;
int ret, irq;
@ -1034,11 +981,13 @@ static int advk_pcie_probe(struct platform_device *pdev)
return ret;
}
ret = advk_pcie_parse_request_of_pci_ranges(pcie);
ret = pci_parse_request_of_pci_ranges(dev, &bridge->windows,
&bridge->dma_ranges, &bus);
if (ret) {
dev_err(dev, "Failed to parse resources\n");
return ret;
}
pcie->root_bus_nr = bus->start;
advk_pcie_setup_hw(pcie);
@ -1057,7 +1006,6 @@ static int advk_pcie_probe(struct platform_device *pdev)
return ret;
}
list_splice_init(&pcie->resources, &bridge->windows);
bridge->dev.parent = dev;
bridge->sysdata = pcie;
bridge->busnr = 0;

Просмотреть файл

@ -375,12 +375,11 @@ static int faraday_pci_setup_cascaded_irq(struct faraday_pci *p)
return 0;
}
static int faraday_pci_parse_map_dma_ranges(struct faraday_pci *p,
struct device_node *np)
static int faraday_pci_parse_map_dma_ranges(struct faraday_pci *p)
{
struct of_pci_range range;
struct of_pci_range_parser parser;
struct device *dev = p->dev;
struct pci_host_bridge *bridge = pci_host_bridge_from_priv(p);
struct resource_entry *entry;
u32 confreg[3] = {
FARADAY_PCI_MEM1_BASE_SIZE,
FARADAY_PCI_MEM2_BASE_SIZE,
@ -389,19 +388,13 @@ static int faraday_pci_parse_map_dma_ranges(struct faraday_pci *p,
int i = 0;
u32 val;
if (of_pci_dma_range_parser_init(&parser, np)) {
dev_err(dev, "missing dma-ranges property\n");
return -EINVAL;
}
/*
* Get the dma-ranges from the device tree
*/
for_each_of_pci_range(&parser, &range) {
u64 end = range.pci_addr + range.size - 1;
resource_list_for_each_entry(entry, &bridge->dma_ranges) {
u64 pci_addr = entry->res->start - entry->offset;
u64 end = entry->res->end - entry->offset;
int ret;
ret = faraday_res_to_memcfg(range.pci_addr, range.size, &val);
ret = faraday_res_to_memcfg(pci_addr,
resource_size(entry->res), &val);
if (ret) {
dev_err(dev,
"DMA range %d: illegal MEM resource size\n", i);
@ -409,7 +402,7 @@ static int faraday_pci_parse_map_dma_ranges(struct faraday_pci *p,
}
dev_info(dev, "DMA MEM%d BASE: 0x%016llx -> 0x%016llx config %08x\n",
i + 1, range.pci_addr, end, val);
i + 1, pci_addr, end, val);
if (i <= 2) {
faraday_raw_pci_write_config(p, 0, 0, confreg[i],
4, val);
@ -430,10 +423,8 @@ static int faraday_pci_probe(struct platform_device *pdev)
const struct faraday_pci_variant *variant =
of_device_get_match_data(dev);
struct resource *regs;
resource_size_t io_base;
struct resource_entry *win;
struct faraday_pci *p;
struct resource *mem;
struct resource *io;
struct pci_host_bridge *host;
struct clk *clk;
@ -441,7 +432,6 @@ static int faraday_pci_probe(struct platform_device *pdev)
unsigned char cur_bus_speed = PCI_SPEED_33MHz;
int ret;
u32 val;
LIST_HEAD(res);
host = devm_pci_alloc_host_bridge(dev, sizeof(*p));
if (!host)
@ -480,44 +470,21 @@ static int faraday_pci_probe(struct platform_device *pdev)
if (IS_ERR(p->base))
return PTR_ERR(p->base);
ret = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff,
&res, &io_base);
ret = pci_parse_request_of_pci_ranges(dev, &host->windows,
&host->dma_ranges, NULL);
if (ret)
return ret;
ret = devm_request_pci_bus_resources(dev, &res);
if (ret)
return ret;
/* Get the I/O and memory ranges from DT */
resource_list_for_each_entry(win, &res) {
switch (resource_type(win->res)) {
case IORESOURCE_IO:
io = win->res;
io->name = "Gemini PCI I/O";
if (!faraday_res_to_memcfg(io->start - win->offset,
resource_size(io), &val)) {
/* setup I/O space size */
writel(val, p->base + PCI_IOSIZE);
} else {
dev_err(dev, "illegal IO mem size\n");
return -EINVAL;
}
ret = devm_pci_remap_iospace(dev, io, io_base);
if (ret) {
dev_warn(dev, "error %d: failed to map resource %pR\n",
ret, io);
continue;
}
break;
case IORESOURCE_MEM:
mem = win->res;
mem->name = "Gemini PCI MEM";
break;
case IORESOURCE_BUS:
break;
default:
break;
win = resource_list_first_type(&host->windows, IORESOURCE_IO);
if (win) {
io = win->res;
if (!faraday_res_to_memcfg(io->start - win->offset,
resource_size(io), &val)) {
/* setup I/O space size */
writel(val, p->base + PCI_IOSIZE);
} else {
dev_err(dev, "illegal IO mem size\n");
return -EINVAL;
}
}
@ -565,11 +532,10 @@ static int faraday_pci_probe(struct platform_device *pdev)
cur_bus_speed = PCI_SPEED_66MHz;
}
ret = faraday_pci_parse_map_dma_ranges(p, dev->of_node);
ret = faraday_pci_parse_map_dma_ranges(p);
if (ret)
return ret;
list_splice_init(&res, &host->windows);
ret = pci_scan_root_bus_bridge(host);
if (ret) {
dev_err(dev, "failed to scan host: %d\n", ret);
@ -581,7 +547,6 @@ static int faraday_pci_probe(struct platform_device *pdev)
pci_bus_assign_resources(p->bus);
pci_bus_add_devices(p->bus);
pci_free_resource_list(&res);
return 0;
}

Просмотреть файл

@ -27,7 +27,7 @@ static struct pci_config_window *gen_pci_init(struct device *dev,
struct pci_config_window *cfg;
/* Parse our PCI ranges and request their resources */
err = pci_parse_request_of_pci_ranges(dev, resources, &bus_range);
err = pci_parse_request_of_pci_ranges(dev, resources, NULL, &bus_range);
if (err)
return ERR_PTR(err);

Просмотреть файл

@ -241,10 +241,8 @@ struct v3_pci {
void __iomem *config_base;
struct pci_bus *bus;
u32 config_mem;
u32 io_mem;
u32 non_pre_mem;
u32 pre_mem;
phys_addr_t io_bus_addr;
phys_addr_t non_pre_bus_addr;
phys_addr_t pre_bus_addr;
struct regmap *map;
@ -520,35 +518,22 @@ static int v3_integrator_init(struct v3_pci *v3)
}
static int v3_pci_setup_resource(struct v3_pci *v3,
resource_size_t io_base,
struct pci_host_bridge *host,
struct resource_entry *win)
{
struct device *dev = v3->dev;
struct resource *mem;
struct resource *io;
int ret;
switch (resource_type(win->res)) {
case IORESOURCE_IO:
io = win->res;
io->name = "V3 PCI I/O";
v3->io_mem = io_base;
v3->io_bus_addr = io->start - win->offset;
dev_dbg(dev, "I/O window %pR, bus addr %pap\n",
io, &v3->io_bus_addr);
ret = devm_pci_remap_iospace(dev, io, io_base);
if (ret) {
dev_warn(dev,
"error %d: failed to map resource %pR\n",
ret, io);
return ret;
}
/* Setup window 2 - PCI I/O */
writel(v3_addr_to_lb_base2(v3->io_mem) |
writel(v3_addr_to_lb_base2(pci_pio_to_address(io->start)) |
V3_LB_BASE2_ENABLE,
v3->base + V3_LB_BASE2);
writew(v3_addr_to_lb_map2(v3->io_bus_addr),
writew(v3_addr_to_lb_map2(io->start - win->offset),
v3->base + V3_LB_MAP2);
break;
case IORESOURCE_MEM:
@ -613,28 +598,30 @@ static int v3_pci_setup_resource(struct v3_pci *v3,
}
static int v3_get_dma_range_config(struct v3_pci *v3,
struct of_pci_range *range,
struct resource_entry *entry,
u32 *pci_base, u32 *pci_map)
{
struct device *dev = v3->dev;
u64 cpu_end = range->cpu_addr + range->size - 1;
u64 pci_end = range->pci_addr + range->size - 1;
u64 cpu_addr = entry->res->start;
u64 cpu_end = entry->res->end;
u64 pci_end = cpu_end - entry->offset;
u64 pci_addr = entry->res->start - entry->offset;
u32 val;
if (range->pci_addr & ~V3_PCI_BASE_M_ADR_BASE) {
if (pci_addr & ~V3_PCI_BASE_M_ADR_BASE) {
dev_err(dev, "illegal range, only PCI bits 31..20 allowed\n");
return -EINVAL;
}
val = ((u32)range->pci_addr) & V3_PCI_BASE_M_ADR_BASE;
val = ((u32)pci_addr) & V3_PCI_BASE_M_ADR_BASE;
*pci_base = val;
if (range->cpu_addr & ~V3_PCI_MAP_M_MAP_ADR) {
if (cpu_addr & ~V3_PCI_MAP_M_MAP_ADR) {
dev_err(dev, "illegal range, only CPU bits 31..20 allowed\n");
return -EINVAL;
}
val = ((u32)range->cpu_addr) & V3_PCI_MAP_M_MAP_ADR;
val = ((u32)cpu_addr) & V3_PCI_MAP_M_MAP_ADR;
switch (range->size) {
switch (resource_size(entry->res)) {
case SZ_1M:
val |= V3_LB_BASE_ADR_SIZE_1MB;
break;
@ -682,8 +669,8 @@ static int v3_get_dma_range_config(struct v3_pci *v3,
dev_dbg(dev,
"DMA MEM CPU: 0x%016llx -> 0x%016llx => "
"PCI: 0x%016llx -> 0x%016llx base %08x map %08x\n",
range->cpu_addr, cpu_end,
range->pci_addr, pci_end,
cpu_addr, cpu_end,
pci_addr, pci_end,
*pci_base, *pci_map);
return 0;
@ -692,24 +679,16 @@ static int v3_get_dma_range_config(struct v3_pci *v3,
static int v3_pci_parse_map_dma_ranges(struct v3_pci *v3,
struct device_node *np)
{
struct of_pci_range range;
struct of_pci_range_parser parser;
struct pci_host_bridge *bridge = pci_host_bridge_from_priv(v3);
struct device *dev = v3->dev;
struct resource_entry *entry;
int i = 0;
if (of_pci_dma_range_parser_init(&parser, np)) {
dev_err(dev, "missing dma-ranges property\n");
return -EINVAL;
}
/*
* Get the dma-ranges from the device tree
*/
for_each_of_pci_range(&parser, &range) {
resource_list_for_each_entry(entry, &bridge->dma_ranges) {
int ret;
u32 pci_base, pci_map;
ret = v3_get_dma_range_config(v3, &range, &pci_base, &pci_map);
ret = v3_get_dma_range_config(v3, entry, &pci_base, &pci_map);
if (ret)
return ret;
@ -732,7 +711,6 @@ static int v3_pci_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
resource_size_t io_base;
struct resource *regs;
struct resource_entry *win;
struct v3_pci *v3;
@ -741,7 +719,6 @@ static int v3_pci_probe(struct platform_device *pdev)
u16 val;
int irq;
int ret;
LIST_HEAD(res);
host = pci_alloc_host_bridge(sizeof(*v3));
if (!host)
@ -793,12 +770,8 @@ static int v3_pci_probe(struct platform_device *pdev)
if (IS_ERR(v3->config_base))
return PTR_ERR(v3->config_base);
ret = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, &res,
&io_base);
if (ret)
return ret;
ret = devm_request_pci_bus_resources(dev, &res);
ret = pci_parse_request_of_pci_ranges(dev, &host->windows,
&host->dma_ranges, NULL);
if (ret)
return ret;
@ -852,8 +825,8 @@ static int v3_pci_probe(struct platform_device *pdev)
writew(val, v3->base + V3_PCI_CMD);
/* Get the I/O and memory ranges from DT */
resource_list_for_each_entry(win, &res) {
ret = v3_pci_setup_resource(v3, io_base, host, win);
resource_list_for_each_entry(win, &host->windows) {
ret = v3_pci_setup_resource(v3, host, win);
if (ret) {
dev_err(dev, "error setting up resources\n");
return ret;
@ -931,7 +904,6 @@ static int v3_pci_probe(struct platform_device *pdev)
val |= V3_SYSTEM_M_LOCK;
writew(val, v3->base + V3_SYSTEM);
list_splice_init(&res, &host->windows);
ret = pci_scan_root_bus_bridge(host);
if (ret) {
dev_err(dev, "failed to register host: %d\n", ret);

Просмотреть файл

@ -62,65 +62,16 @@ static struct pci_ops pci_versatile_ops = {
.write = pci_generic_config_write,
};
static int versatile_pci_parse_request_of_pci_ranges(struct device *dev,
struct list_head *res)
{
int err, mem = 1, res_valid = 0;
resource_size_t iobase;
struct resource_entry *win, *tmp;
err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, res, &iobase);
if (err)
return err;
err = devm_request_pci_bus_resources(dev, res);
if (err)
goto out_release_res;
resource_list_for_each_entry_safe(win, tmp, res) {
struct resource *res = win->res;
switch (resource_type(res)) {
case IORESOURCE_IO:
err = devm_pci_remap_iospace(dev, res, iobase);
if (err) {
dev_warn(dev, "error %d: failed to map resource %pR\n",
err, res);
resource_list_destroy_entry(win);
}
break;
case IORESOURCE_MEM:
res_valid |= !(res->flags & IORESOURCE_PREFETCH);
writel(res->start >> 28, PCI_IMAP(mem));
writel(PHYS_OFFSET >> 28, PCI_SMAP(mem));
mem++;
break;
}
}
if (res_valid)
return 0;
dev_err(dev, "non-prefetchable memory resource required\n");
err = -EINVAL;
out_release_res:
pci_free_resource_list(res);
return err;
}
static int versatile_pci_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct resource *res;
int ret, i, myslot = -1;
struct resource_entry *entry;
int ret, i, myslot = -1, mem = 1;
u32 val;
void __iomem *local_pci_cfg_base;
struct pci_bus *bus, *child;
struct pci_host_bridge *bridge;
LIST_HEAD(pci_res);
bridge = devm_pci_alloc_host_bridge(dev, 0);
if (!bridge)
@ -141,10 +92,19 @@ static int versatile_pci_probe(struct platform_device *pdev)
if (IS_ERR(versatile_cfg_base[1]))
return PTR_ERR(versatile_cfg_base[1]);
ret = versatile_pci_parse_request_of_pci_ranges(dev, &pci_res);
ret = pci_parse_request_of_pci_ranges(dev, &bridge->windows,
NULL, NULL);
if (ret)
return ret;
resource_list_for_each_entry(entry, &bridge->windows) {
if (resource_type(entry->res) == IORESOURCE_MEM) {
writel(entry->res->start >> 28, PCI_IMAP(mem));
writel(__pa(PAGE_OFFSET) >> 28, PCI_SMAP(mem));
mem++;
}
}
/*
* We need to discover the PCI core first to configure itself
* before the main PCI probing is performed
@ -177,9 +137,9 @@ static int versatile_pci_probe(struct platform_device *pdev)
/*
* Configure the PCI inbound memory windows to be 1:1 mapped to SDRAM
*/
writel(PHYS_OFFSET, local_pci_cfg_base + PCI_BASE_ADDRESS_0);
writel(PHYS_OFFSET, local_pci_cfg_base + PCI_BASE_ADDRESS_1);
writel(PHYS_OFFSET, local_pci_cfg_base + PCI_BASE_ADDRESS_2);
writel(__pa(PAGE_OFFSET), local_pci_cfg_base + PCI_BASE_ADDRESS_0);
writel(__pa(PAGE_OFFSET), local_pci_cfg_base + PCI_BASE_ADDRESS_1);
writel(__pa(PAGE_OFFSET), local_pci_cfg_base + PCI_BASE_ADDRESS_2);
/*
* For many years the kernel and QEMU were symbiotically buggy
@ -197,7 +157,6 @@ static int versatile_pci_probe(struct platform_device *pdev)
pci_add_flags(PCI_ENABLE_PROC_DOMAINS);
pci_add_flags(PCI_REASSIGN_ALL_BUS);
list_splice_init(&pci_res, &bridge->windows);
bridge->dev.parent = dev;
bridge->sysdata = NULL;
bridge->busnr = 0;

Просмотреть файл

@ -405,15 +405,13 @@ static void xgene_pcie_setup_cfg_reg(struct xgene_pcie_port *port)
xgene_pcie_writel(port, CFGCTL, EN_REG);
}
static int xgene_pcie_map_ranges(struct xgene_pcie_port *port,
struct list_head *res,
resource_size_t io_base)
static int xgene_pcie_map_ranges(struct xgene_pcie_port *port)
{
struct pci_host_bridge *bridge = pci_host_bridge_from_priv(port);
struct resource_entry *window;
struct device *dev = port->dev;
int ret;
resource_list_for_each_entry(window, res) {
resource_list_for_each_entry(window, &bridge->windows) {
struct resource *res = window->res;
u64 restype = resource_type(res);
@ -421,11 +419,9 @@ static int xgene_pcie_map_ranges(struct xgene_pcie_port *port,
switch (restype) {
case IORESOURCE_IO:
xgene_pcie_setup_ob_reg(port, res, OMR3BARL, io_base,
xgene_pcie_setup_ob_reg(port, res, OMR3BARL,
pci_pio_to_address(res->start),
res->start - window->offset);
ret = devm_pci_remap_iospace(dev, res, io_base);
if (ret < 0)
return ret;
break;
case IORESOURCE_MEM:
if (res->flags & IORESOURCE_PREFETCH)
@ -485,27 +481,28 @@ static int xgene_pcie_select_ib_reg(u8 *ib_reg_mask, u64 size)
}
static void xgene_pcie_setup_ib_reg(struct xgene_pcie_port *port,
struct of_pci_range *range, u8 *ib_reg_mask)
struct resource_entry *entry,
u8 *ib_reg_mask)
{
void __iomem *cfg_base = port->cfg_base;
struct device *dev = port->dev;
void *bar_addr;
u32 pim_reg;
u64 cpu_addr = range->cpu_addr;
u64 pci_addr = range->pci_addr;
u64 size = range->size;
u64 cpu_addr = entry->res->start;
u64 pci_addr = cpu_addr - entry->offset;
u64 size = resource_size(entry->res);
u64 mask = ~(size - 1) | EN_REG;
u32 flags = PCI_BASE_ADDRESS_MEM_TYPE_64;
u32 bar_low;
int region;
region = xgene_pcie_select_ib_reg(ib_reg_mask, range->size);
region = xgene_pcie_select_ib_reg(ib_reg_mask, size);
if (region < 0) {
dev_warn(dev, "invalid pcie dma-range config\n");
return;
}
if (range->flags & IORESOURCE_PREFETCH)
if (entry->res->flags & IORESOURCE_PREFETCH)
flags |= PCI_BASE_ADDRESS_MEM_PREFETCH;
bar_low = pcie_bar_low_val((u32)cpu_addr, flags);
@ -536,25 +533,13 @@ static void xgene_pcie_setup_ib_reg(struct xgene_pcie_port *port,
static int xgene_pcie_parse_map_dma_ranges(struct xgene_pcie_port *port)
{
struct device_node *np = port->node;
struct of_pci_range range;
struct of_pci_range_parser parser;
struct device *dev = port->dev;
struct pci_host_bridge *bridge = pci_host_bridge_from_priv(port);
struct resource_entry *entry;
u8 ib_reg_mask = 0;
if (of_pci_dma_range_parser_init(&parser, np)) {
dev_err(dev, "missing dma-ranges property\n");
return -EINVAL;
}
resource_list_for_each_entry(entry, &bridge->dma_ranges)
xgene_pcie_setup_ib_reg(port, entry, &ib_reg_mask);
/* Get the dma-ranges from DT */
for_each_of_pci_range(&parser, &range) {
u64 end = range.cpu_addr + range.size - 1;
dev_dbg(dev, "0x%08x 0x%016llx..0x%016llx -> 0x%016llx\n",
range.flags, range.cpu_addr, end, range.pci_addr);
xgene_pcie_setup_ib_reg(port, &range, &ib_reg_mask);
}
return 0;
}
@ -567,8 +552,7 @@ static void xgene_pcie_clear_config(struct xgene_pcie_port *port)
xgene_pcie_writel(port, i, 0);
}
static int xgene_pcie_setup(struct xgene_pcie_port *port, struct list_head *res,
resource_size_t io_base)
static int xgene_pcie_setup(struct xgene_pcie_port *port)
{
struct device *dev = port->dev;
u32 val, lanes = 0, speed = 0;
@ -580,7 +564,7 @@ static int xgene_pcie_setup(struct xgene_pcie_port *port, struct list_head *res,
val = (XGENE_PCIE_DEVICEID << 16) | XGENE_PCIE_VENDORID;
xgene_pcie_writel(port, BRIDGE_CFG_0, val);
ret = xgene_pcie_map_ranges(port, res, io_base);
ret = xgene_pcie_map_ranges(port);
if (ret)
return ret;
@ -607,11 +591,9 @@ static int xgene_pcie_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct device_node *dn = dev->of_node;
struct xgene_pcie_port *port;
resource_size_t iobase = 0;
struct pci_bus *bus, *child;
struct pci_host_bridge *bridge;
int ret;
LIST_HEAD(res);
bridge = devm_pci_alloc_host_bridge(dev, sizeof(*port));
if (!bridge)
@ -634,20 +616,15 @@ static int xgene_pcie_probe(struct platform_device *pdev)
if (ret)
return ret;
ret = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, &res,
&iobase);
ret = pci_parse_request_of_pci_ranges(dev, &bridge->windows,
&bridge->dma_ranges, NULL);
if (ret)
return ret;
ret = devm_request_pci_bus_resources(dev, &res);
ret = xgene_pcie_setup(port);
if (ret)
goto error;
return ret;
ret = xgene_pcie_setup(port, &res, iobase);
if (ret)
goto error;
list_splice_init(&res, &bridge->windows);
bridge->dev.parent = dev;
bridge->sysdata = port;
bridge->busnr = 0;
@ -657,7 +634,7 @@ static int xgene_pcie_probe(struct platform_device *pdev)
ret = pci_scan_root_bus_bridge(bridge);
if (ret < 0)
goto error;
return ret;
bus = bridge->bus;
@ -666,10 +643,6 @@ static int xgene_pcie_probe(struct platform_device *pdev)
pcie_bus_configure_settings(child);
pci_bus_add_devices(bus);
return 0;
error:
pci_free_resource_list(&res);
return ret;
}
static const struct of_device_id xgene_pcie_match_table[] = {

Просмотреть файл

@ -92,7 +92,6 @@ struct altera_pcie {
u8 root_bus_nr;
struct irq_domain *irq_domain;
struct resource bus_range;
struct list_head resources;
const struct altera_pcie_data *pcie_data;
};
@ -670,39 +669,6 @@ static void altera_pcie_isr(struct irq_desc *desc)
chained_irq_exit(chip, desc);
}
static int altera_pcie_parse_request_of_pci_ranges(struct altera_pcie *pcie)
{
int err, res_valid = 0;
struct device *dev = &pcie->pdev->dev;
struct resource_entry *win;
err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff,
&pcie->resources, NULL);
if (err)
return err;
err = devm_request_pci_bus_resources(dev, &pcie->resources);
if (err)
goto out_release_res;
resource_list_for_each_entry(win, &pcie->resources) {
struct resource *res = win->res;
if (resource_type(res) == IORESOURCE_MEM)
res_valid |= !(res->flags & IORESOURCE_PREFETCH);
}
if (res_valid)
return 0;
dev_err(dev, "non-prefetchable memory resource required\n");
err = -EINVAL;
out_release_res:
pci_free_resource_list(&pcie->resources);
return err;
}
static int altera_pcie_init_irq_domain(struct altera_pcie *pcie)
{
struct device *dev = &pcie->pdev->dev;
@ -833,9 +799,8 @@ static int altera_pcie_probe(struct platform_device *pdev)
return ret;
}
INIT_LIST_HEAD(&pcie->resources);
ret = altera_pcie_parse_request_of_pci_ranges(pcie);
ret = pci_parse_request_of_pci_ranges(dev, &bridge->windows,
&bridge->dma_ranges, NULL);
if (ret) {
dev_err(dev, "Failed add resources\n");
return ret;
@ -853,7 +818,6 @@ static int altera_pcie_probe(struct platform_device *pdev)
cra_writel(pcie, P2A_INT_ENA_ALL, P2A_INT_ENABLE);
altera_pcie_host_init(pcie);
list_splice_init(&pcie->resources, &bridge->windows);
bridge->dev.parent = dev;
bridge->sysdata = pcie;
bridge->busnr = pcie->root_bus_nr;
@ -884,7 +848,6 @@ static int altera_pcie_remove(struct platform_device *pdev)
pci_stop_root_bus(bridge->bus);
pci_remove_root_bus(bridge->bus);
pci_free_resource_list(&pcie->resources);
altera_pcie_irq_teardown(pcie);
return 0;

Просмотреть файл

@ -43,8 +43,6 @@ static int iproc_pcie_pltfm_probe(struct platform_device *pdev)
struct iproc_pcie *pcie;
struct device_node *np = dev->of_node;
struct resource reg;
resource_size_t iobase = 0;
LIST_HEAD(resources);
struct pci_host_bridge *bridge;
int ret;
@ -97,8 +95,8 @@ static int iproc_pcie_pltfm_probe(struct platform_device *pdev)
if (IS_ERR(pcie->phy))
return PTR_ERR(pcie->phy);
ret = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, &resources,
&iobase);
ret = pci_parse_request_of_pci_ranges(dev, &bridge->windows,
&bridge->dma_ranges, NULL);
if (ret) {
dev_err(dev, "unable to get PCI host bridge resources\n");
return ret;
@ -113,10 +111,9 @@ static int iproc_pcie_pltfm_probe(struct platform_device *pdev)
pcie->map_irq = of_irq_parse_and_map_pci;
}
ret = iproc_pcie_setup(pcie, &resources);
ret = iproc_pcie_setup(pcie, &bridge->windows);
if (ret) {
dev_err(dev, "PCIe controller setup failed\n");
pci_free_resource_list(&resources);
return ret;
}

Просмотреть файл

@ -1122,15 +1122,16 @@ static int iproc_pcie_ib_write(struct iproc_pcie *pcie, int region_idx,
}
static int iproc_pcie_setup_ib(struct iproc_pcie *pcie,
struct of_pci_range *range,
struct resource_entry *entry,
enum iproc_pcie_ib_map_type type)
{
struct device *dev = pcie->dev;
struct iproc_pcie_ib *ib = &pcie->ib;
int ret;
unsigned int region_idx, size_idx;
u64 axi_addr = range->cpu_addr, pci_addr = range->pci_addr;
resource_size_t size = range->size;
u64 axi_addr = entry->res->start;
u64 pci_addr = entry->res->start - entry->offset;
resource_size_t size = resource_size(entry->res);
/* iterate through all IARR mapping regions */
for (region_idx = 0; region_idx < ib->nr_regions; region_idx++) {
@ -1182,66 +1183,19 @@ err_ib:
return ret;
}
static int iproc_pcie_add_dma_range(struct device *dev,
struct list_head *resources,
struct of_pci_range *range)
{
struct resource *res;
struct resource_entry *entry, *tmp;
struct list_head *head = resources;
res = devm_kzalloc(dev, sizeof(struct resource), GFP_KERNEL);
if (!res)
return -ENOMEM;
resource_list_for_each_entry(tmp, resources) {
if (tmp->res->start < range->cpu_addr)
head = &tmp->node;
}
res->start = range->cpu_addr;
res->end = res->start + range->size - 1;
entry = resource_list_create_entry(res, 0);
if (!entry)
return -ENOMEM;
entry->offset = res->start - range->cpu_addr;
resource_list_add(entry, head);
return 0;
}
static int iproc_pcie_map_dma_ranges(struct iproc_pcie *pcie)
{
struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
struct of_pci_range range;
struct of_pci_range_parser parser;
int ret;
LIST_HEAD(resources);
struct resource_entry *entry;
int ret = 0;
/* Get the dma-ranges from DT */
ret = of_pci_dma_range_parser_init(&parser, pcie->dev->of_node);
if (ret)
return ret;
for_each_of_pci_range(&parser, &range) {
ret = iproc_pcie_add_dma_range(pcie->dev,
&resources,
&range);
if (ret)
goto out;
resource_list_for_each_entry(entry, &host->dma_ranges) {
/* Each range entry corresponds to an inbound mapping region */
ret = iproc_pcie_setup_ib(pcie, &range, IPROC_PCIE_IB_MAP_MEM);
ret = iproc_pcie_setup_ib(pcie, entry, IPROC_PCIE_IB_MAP_MEM);
if (ret)
goto out;
break;
}
list_splice_init(&resources, &host->dma_ranges);
return 0;
out:
pci_free_resource_list(&resources);
return ret;
}
@ -1302,13 +1256,16 @@ static int iproce_pcie_get_msi(struct iproc_pcie *pcie,
static int iproc_pcie_paxb_v2_msi_steer(struct iproc_pcie *pcie, u64 msi_addr)
{
int ret;
struct of_pci_range range;
struct resource_entry entry;
memset(&range, 0, sizeof(range));
range.size = SZ_32K;
range.pci_addr = range.cpu_addr = msi_addr & ~(range.size - 1);
memset(&entry, 0, sizeof(entry));
entry.res = &entry.__res;
ret = iproc_pcie_setup_ib(pcie, &range, IPROC_PCIE_IB_MAP_IO);
msi_addr &= ~(SZ_32K - 1);
entry.res->start = msi_addr;
entry.res->end = msi_addr + SZ_32K - 1;
ret = iproc_pcie_setup_ib(pcie, &entry, IPROC_PCIE_IB_MAP_IO);
return ret;
}
@ -1524,10 +1481,6 @@ int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res)
return ret;
}
ret = devm_request_pci_bus_resources(dev, res);
if (ret)
return ret;
ret = phy_init(pcie->phy);
if (ret) {
dev_err(dev, "unable to initialize PCIe PHY\n");
@ -1571,7 +1524,6 @@ int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res)
if (iproc_pcie_msi_enable(pcie))
dev_info(dev, "not using iProc MSI\n");
list_splice_init(res, &host->windows);
host->busnr = 0;
host->dev.parent = dev;
host->ops = &iproc_pcie_ops;

Просмотреть файл

@ -216,7 +216,6 @@ struct mtk_pcie {
void __iomem *base;
struct clk *free_ck;
struct resource mem;
struct list_head ports;
const struct mtk_pcie_soc *soc;
unsigned int busnr;
@ -661,11 +660,19 @@ static int mtk_pcie_setup_irq(struct mtk_pcie_port *port,
static int mtk_pcie_startup_port_v2(struct mtk_pcie_port *port)
{
struct mtk_pcie *pcie = port->pcie;
struct resource *mem = &pcie->mem;
struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
struct resource *mem = NULL;
struct resource_entry *entry;
const struct mtk_pcie_soc *soc = port->pcie->soc;
u32 val;
int err;
entry = resource_list_first_type(&host->windows, IORESOURCE_MEM);
if (entry)
mem = entry->res;
if (!mem)
return -EINVAL;
/* MT7622 platforms need to enable LTSSM and ASPM from PCIe subsys */
if (pcie->base) {
val = readl(pcie->base + PCIE_SYS_CFG_V2);
@ -1023,39 +1030,15 @@ static int mtk_pcie_setup(struct mtk_pcie *pcie)
struct mtk_pcie_port *port, *tmp;
struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
struct list_head *windows = &host->windows;
struct resource_entry *win, *tmp_win;
resource_size_t io_base;
struct resource *bus;
int err;
err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff,
windows, &io_base);
err = pci_parse_request_of_pci_ranges(dev, windows,
&host->dma_ranges, &bus);
if (err)
return err;
err = devm_request_pci_bus_resources(dev, windows);
if (err < 0)
return err;
/* Get the I/O and memory ranges from DT */
resource_list_for_each_entry_safe(win, tmp_win, windows) {
switch (resource_type(win->res)) {
case IORESOURCE_IO:
err = devm_pci_remap_iospace(dev, win->res, io_base);
if (err) {
dev_warn(dev, "error %d: failed to map resource %pR\n",
err, win->res);
resource_list_destroy_entry(win);
}
break;
case IORESOURCE_MEM:
memcpy(&pcie->mem, win->res, sizeof(*win->res));
pcie->mem.name = "non-prefetchable";
break;
case IORESOURCE_BUS:
pcie->busnr = win->res->start;
break;
}
}
pcie->busnr = bus->start;
for_each_available_child_of_node(node, child) {
int slot;

Просмотреть файл

@ -140,7 +140,6 @@ struct mobiveil_msi { /* MSI information */
struct mobiveil_pcie {
struct platform_device *pdev;
struct list_head resources;
void __iomem *config_axi_slave_base; /* endpoint config base */
void __iomem *csr_axi_slave_base; /* root port config base */
void __iomem *apb_csr_base; /* MSI register base */
@ -580,6 +579,7 @@ static void mobiveil_pcie_enable_msi(struct mobiveil_pcie *pcie)
static int mobiveil_host_init(struct mobiveil_pcie *pcie)
{
struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
u32 value, pab_ctrl, type;
struct resource_entry *win;
@ -636,7 +636,7 @@ static int mobiveil_host_init(struct mobiveil_pcie *pcie)
program_ib_windows(pcie, WIN_NUM_0, 0, 0, MEM_WINDOW_TYPE, IB_WIN_SIZE);
/* Get the I/O and memory ranges from DT */
resource_list_for_each_entry(win, &pcie->resources) {
resource_list_for_each_entry(win, &bridge->windows) {
if (resource_type(win->res) == IORESOURCE_MEM)
type = MEM_WINDOW_TYPE;
else if (resource_type(win->res) == IORESOURCE_IO)
@ -862,7 +862,6 @@ static int mobiveil_pcie_probe(struct platform_device *pdev)
struct pci_bus *child;
struct pci_host_bridge *bridge;
struct device *dev = &pdev->dev;
resource_size_t iobase;
int ret;
/* allocate the PCIe port */
@ -880,11 +879,9 @@ static int mobiveil_pcie_probe(struct platform_device *pdev)
return ret;
}
INIT_LIST_HEAD(&pcie->resources);
/* parse the host bridge base addresses from the device tree file */
ret = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff,
&pcie->resources, &iobase);
ret = pci_parse_request_of_pci_ranges(dev, &bridge->windows,
&bridge->dma_ranges, NULL);
if (ret) {
dev_err(dev, "Getting bridge resources failed\n");
return ret;
@ -897,24 +894,19 @@ static int mobiveil_pcie_probe(struct platform_device *pdev)
ret = mobiveil_host_init(pcie);
if (ret) {
dev_err(dev, "Failed to initialize host\n");
goto error;
return ret;
}
/* initialize the IRQ domains */
ret = mobiveil_pcie_init_irq_domain(pcie);
if (ret) {
dev_err(dev, "Failed creating IRQ Domain\n");
goto error;
return ret;
}
irq_set_chained_handler_and_data(pcie->irq, mobiveil_pcie_isr, pcie);
ret = devm_request_pci_bus_resources(dev, &pcie->resources);
if (ret)
goto error;
/* Initialize bridge */
list_splice_init(&pcie->resources, &bridge->windows);
bridge->dev.parent = dev;
bridge->sysdata = pcie;
bridge->busnr = pcie->root_bus_nr;
@ -925,13 +917,13 @@ static int mobiveil_pcie_probe(struct platform_device *pdev)
ret = mobiveil_bringup_link(pcie);
if (ret) {
dev_info(dev, "link bring-up failed\n");
goto error;
return ret;
}
/* setup the kernel resources for the newly added PCIe root bus */
ret = pci_scan_root_bus_bridge(bridge);
if (ret)
goto error;
return ret;
bus = bridge->bus;
@ -941,9 +933,6 @@ static int mobiveil_pcie_probe(struct platform_device *pdev)
pci_bus_add_devices(bus);
return 0;
error:
pci_free_resource_list(&pcie->resources);
return ret;
}
static const struct of_device_id mobiveil_pcie_of_match[] = {

Просмотреть файл

@ -1017,16 +1017,16 @@ err_irq1:
}
static int rcar_pcie_inbound_ranges(struct rcar_pcie *pcie,
struct of_pci_range *range,
struct resource_entry *entry,
int *index)
{
u64 restype = range->flags;
u64 cpu_addr = range->cpu_addr;
u64 cpu_end = range->cpu_addr + range->size;
u64 pci_addr = range->pci_addr;
u64 restype = entry->res->flags;
u64 cpu_addr = entry->res->start;
u64 cpu_end = entry->res->end;
u64 pci_addr = entry->res->start - entry->offset;
u32 flags = LAM_64BIT | LAR_ENABLE;
u64 mask;
u64 size;
u64 size = resource_size(entry->res);
int idx = *index;
if (restype & IORESOURCE_PREFETCH)
@ -1046,9 +1046,7 @@ static int rcar_pcie_inbound_ranges(struct rcar_pcie *pcie,
unsigned long nr_zeros = __ffs64(cpu_addr);
u64 alignment = 1ULL << nr_zeros;
size = min(range->size, alignment);
} else {
size = range->size;
size = min(size, alignment);
}
/* Hardware supports max 4GiB inbound region */
size = min(size, 1ULL << 32);
@ -1081,30 +1079,19 @@ static int rcar_pcie_inbound_ranges(struct rcar_pcie *pcie,
return 0;
}
static int rcar_pcie_parse_map_dma_ranges(struct rcar_pcie *pcie,
struct device_node *np)
static int rcar_pcie_parse_map_dma_ranges(struct rcar_pcie *pcie)
{
struct of_pci_range range;
struct of_pci_range_parser parser;
int index = 0;
int err;
struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
struct resource_entry *entry;
int index = 0, err = 0;
if (of_pci_dma_range_parser_init(&parser, np))
return -EINVAL;
/* Get the dma-ranges from DT */
for_each_of_pci_range(&parser, &range) {
u64 end = range.cpu_addr + range.size - 1;
dev_dbg(pcie->dev, "0x%08x 0x%016llx..0x%016llx -> 0x%016llx\n",
range.flags, range.cpu_addr, end, range.pci_addr);
err = rcar_pcie_inbound_ranges(pcie, &range, &index);
resource_list_for_each_entry(entry, &bridge->dma_ranges) {
err = rcar_pcie_inbound_ranges(pcie, entry, &index);
if (err)
return err;
break;
}
return 0;
return err;
}
static const struct of_device_id rcar_pcie_of_match[] = {
@ -1141,7 +1128,8 @@ static int rcar_pcie_probe(struct platform_device *pdev)
pcie->dev = dev;
platform_set_drvdata(pdev, pcie);
err = pci_parse_request_of_pci_ranges(dev, &pcie->resources, NULL);
err = pci_parse_request_of_pci_ranges(dev, &pcie->resources,
&bridge->dma_ranges, NULL);
if (err)
goto err_free_bridge;
@ -1164,7 +1152,7 @@ static int rcar_pcie_probe(struct platform_device *pdev)
goto err_unmap_msi_irqs;
}
err = rcar_pcie_parse_map_dma_ranges(pcie, dev->of_node);
err = rcar_pcie_parse_map_dma_ranges(pcie);
if (err)
goto err_clk_disable;

Просмотреть файл

@ -795,19 +795,28 @@ static int rockchip_pcie_prog_ib_atu(struct rockchip_pcie *rockchip,
static int rockchip_pcie_cfg_atu(struct rockchip_pcie *rockchip)
{
struct device *dev = rockchip->dev;
struct pci_host_bridge *bridge = pci_host_bridge_from_priv(rockchip);
struct resource_entry *entry;
u64 pci_addr, size;
int offset;
int err;
int reg_no;
rockchip_pcie_cfg_configuration_accesses(rockchip,
AXI_WRAPPER_TYPE0_CFG);
entry = resource_list_first_type(&bridge->windows, IORESOURCE_MEM);
if (!entry)
return -ENODEV;
for (reg_no = 0; reg_no < (rockchip->mem_size >> 20); reg_no++) {
size = resource_size(entry->res);
pci_addr = entry->res->start - entry->offset;
rockchip->msg_bus_addr = pci_addr;
for (reg_no = 0; reg_no < (size >> 20); reg_no++) {
err = rockchip_pcie_prog_ob_atu(rockchip, reg_no + 1,
AXI_WRAPPER_MEM_WRITE,
20 - 1,
rockchip->mem_bus_addr +
(reg_no << 20),
pci_addr + (reg_no << 20),
0);
if (err) {
dev_err(dev, "program RC mem outbound ATU failed\n");
@ -821,14 +830,20 @@ static int rockchip_pcie_cfg_atu(struct rockchip_pcie *rockchip)
return err;
}
offset = rockchip->mem_size >> 20;
for (reg_no = 0; reg_no < (rockchip->io_size >> 20); reg_no++) {
entry = resource_list_first_type(&bridge->windows, IORESOURCE_IO);
if (!entry)
return -ENODEV;
size = resource_size(entry->res);
pci_addr = entry->res->start - entry->offset;
offset = size >> 20;
for (reg_no = 0; reg_no < (size >> 20); reg_no++) {
err = rockchip_pcie_prog_ob_atu(rockchip,
reg_no + 1 + offset,
AXI_WRAPPER_IO_WRITE,
20 - 1,
rockchip->io_bus_addr +
(reg_no << 20),
pci_addr + (reg_no << 20),
0);
if (err) {
dev_err(dev, "program RC io outbound ATU failed\n");
@ -841,8 +856,7 @@ static int rockchip_pcie_cfg_atu(struct rockchip_pcie *rockchip)
AXI_WRAPPER_NOR_MSG,
20 - 1, 0, 0);
rockchip->msg_bus_addr = rockchip->mem_bus_addr +
((reg_no + offset) << 20);
rockchip->msg_bus_addr += ((reg_no + offset) << 20);
return err;
}
@ -935,14 +949,9 @@ static int rockchip_pcie_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct pci_bus *bus, *child;
struct pci_host_bridge *bridge;
struct resource_entry *win;
resource_size_t io_base;
struct resource *mem;
struct resource *io;
struct resource *bus_res;
int err;
LIST_HEAD(res);
if (!dev->of_node)
return -ENODEV;
@ -980,56 +989,23 @@ static int rockchip_pcie_probe(struct platform_device *pdev)
if (err < 0)
goto err_deinit_port;
err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff,
&res, &io_base);
err = pci_parse_request_of_pci_ranges(dev, &bridge->windows,
&bridge->dma_ranges, &bus_res);
if (err)
goto err_remove_irq_domain;
err = devm_request_pci_bus_resources(dev, &res);
if (err)
goto err_free_res;
/* Get the I/O and memory ranges from DT */
resource_list_for_each_entry(win, &res) {
switch (resource_type(win->res)) {
case IORESOURCE_IO:
io = win->res;
io->name = "I/O";
rockchip->io_size = resource_size(io);
rockchip->io_bus_addr = io->start - win->offset;
err = pci_remap_iospace(io, io_base);
if (err) {
dev_warn(dev, "error %d: failed to map resource %pR\n",
err, io);
continue;
}
rockchip->io = io;
break;
case IORESOURCE_MEM:
mem = win->res;
mem->name = "MEM";
rockchip->mem_size = resource_size(mem);
rockchip->mem_bus_addr = mem->start - win->offset;
break;
case IORESOURCE_BUS:
rockchip->root_bus_nr = win->res->start;
break;
default:
continue;
}
}
rockchip->root_bus_nr = bus_res->start;
err = rockchip_pcie_cfg_atu(rockchip);
if (err)
goto err_unmap_iospace;
goto err_remove_irq_domain;
rockchip->msg_region = devm_ioremap(dev, rockchip->msg_bus_addr, SZ_1M);
if (!rockchip->msg_region) {
err = -ENOMEM;
goto err_unmap_iospace;
goto err_remove_irq_domain;
}
list_splice_init(&res, &bridge->windows);
bridge->dev.parent = dev;
bridge->sysdata = rockchip;
bridge->busnr = 0;
@ -1039,7 +1015,7 @@ static int rockchip_pcie_probe(struct platform_device *pdev)
err = pci_scan_root_bus_bridge(bridge);
if (err < 0)
goto err_unmap_iospace;
goto err_remove_irq_domain;
bus = bridge->bus;
@ -1053,10 +1029,6 @@ static int rockchip_pcie_probe(struct platform_device *pdev)
pci_bus_add_devices(bus);
return 0;
err_unmap_iospace:
pci_unmap_iospace(rockchip->io);
err_free_res:
pci_free_resource_list(&res);
err_remove_irq_domain:
irq_domain_remove(rockchip->irq_domain);
err_deinit_port:
@ -1080,7 +1052,6 @@ static int rockchip_pcie_remove(struct platform_device *pdev)
pci_stop_root_bus(rockchip->root_bus);
pci_remove_root_bus(rockchip->root_bus);
pci_unmap_iospace(rockchip->io);
irq_domain_remove(rockchip->irq_domain);
rockchip_pcie_deinit_phys(rockchip);

Просмотреть файл

@ -304,13 +304,8 @@ struct rockchip_pcie {
struct irq_domain *irq_domain;
int offset;
struct pci_bus *root_bus;
struct resource *io;
phys_addr_t io_bus_addr;
u32 io_size;
void __iomem *msg_region;
u32 mem_size;
phys_addr_t msg_bus_addr;
phys_addr_t mem_bus_addr;
bool is_rc;
struct resource *mem_res;
};

Просмотреть файл

@ -821,8 +821,6 @@ static int nwl_pcie_probe(struct platform_device *pdev)
struct pci_bus *child;
struct pci_host_bridge *bridge;
int err;
resource_size_t iobase = 0;
LIST_HEAD(res);
bridge = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
if (!bridge)
@ -845,24 +843,19 @@ static int nwl_pcie_probe(struct platform_device *pdev)
return err;
}
err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, &res,
&iobase);
err = pci_parse_request_of_pci_ranges(dev, &bridge->windows,
&bridge->dma_ranges, NULL);
if (err) {
dev_err(dev, "Getting bridge resources failed\n");
return err;
}
err = devm_request_pci_bus_resources(dev, &res);
if (err)
goto error;
err = nwl_pcie_init_irq_domain(pcie);
if (err) {
dev_err(dev, "Failed creating IRQ Domain\n");
goto error;
return err;
}
list_splice_init(&res, &bridge->windows);
bridge->dev.parent = dev;
bridge->sysdata = pcie;
bridge->busnr = pcie->root_busno;
@ -874,13 +867,13 @@ static int nwl_pcie_probe(struct platform_device *pdev)
err = nwl_pcie_enable_msi(pcie);
if (err < 0) {
dev_err(dev, "failed to enable MSI support: %d\n", err);
goto error;
return err;
}
}
err = pci_scan_root_bus_bridge(bridge);
if (err)
goto error;
return err;
bus = bridge->bus;
@ -889,10 +882,6 @@ static int nwl_pcie_probe(struct platform_device *pdev)
pcie_bus_configure_settings(child);
pci_bus_add_devices(bus);
return 0;
error:
pci_free_resource_list(&res);
return err;
}
static struct platform_driver nwl_pcie_driver = {

Просмотреть файл

@ -619,8 +619,6 @@ static int xilinx_pcie_probe(struct platform_device *pdev)
struct pci_bus *bus, *child;
struct pci_host_bridge *bridge;
int err;
resource_size_t iobase = 0;
LIST_HEAD(res);
if (!dev->of_node)
return -ENODEV;
@ -647,19 +645,13 @@ static int xilinx_pcie_probe(struct platform_device *pdev)
return err;
}
err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, &res,
&iobase);
err = pci_parse_request_of_pci_ranges(dev, &bridge->windows,
&bridge->dma_ranges, NULL);
if (err) {
dev_err(dev, "Getting bridge resources failed\n");
return err;
}
err = devm_request_pci_bus_resources(dev, &res);
if (err)
goto error;
list_splice_init(&res, &bridge->windows);
bridge->dev.parent = dev;
bridge->sysdata = port;
bridge->busnr = 0;
@ -673,7 +665,7 @@ static int xilinx_pcie_probe(struct platform_device *pdev)
#endif
err = pci_scan_root_bus_bridge(bridge);
if (err < 0)
goto error;
return err;
bus = bridge->bus;
@ -682,10 +674,6 @@ static int xilinx_pcie_probe(struct platform_device *pdev)
pcie_bus_configure_settings(child);
pci_bus_add_devices(bus);
return 0;
error:
pci_free_resource_list(&res);
return err;
}
static const struct of_device_id xilinx_pcie_of_match[] = {

Просмотреть файл

@ -236,7 +236,6 @@ void of_pci_check_probe_only(void)
}
EXPORT_SYMBOL_GPL(of_pci_check_probe_only);
#if defined(CONFIG_OF_ADDRESS)
/**
* devm_of_pci_get_host_bridge_resources() - Resource-managed parsing of PCI
* host bridge resources from DT
@ -255,16 +254,18 @@ EXPORT_SYMBOL_GPL(of_pci_check_probe_only);
* It returns zero if the range parsing has been successful or a standard error
* value if it failed.
*/
int devm_of_pci_get_host_bridge_resources(struct device *dev,
static int devm_of_pci_get_host_bridge_resources(struct device *dev,
unsigned char busno, unsigned char bus_max,
struct list_head *resources, resource_size_t *io_base)
struct list_head *resources,
struct list_head *ib_resources,
resource_size_t *io_base)
{
struct device_node *dev_node = dev->of_node;
struct resource *res, tmp_res;
struct resource *bus_range;
struct of_pci_range range;
struct of_pci_range_parser parser;
char range_type[4];
const char *range_type;
int err;
if (io_base)
@ -298,12 +299,12 @@ int devm_of_pci_get_host_bridge_resources(struct device *dev,
for_each_of_pci_range(&parser, &range) {
/* Read next ranges element */
if ((range.flags & IORESOURCE_TYPE_BITS) == IORESOURCE_IO)
snprintf(range_type, 4, " IO");
range_type = "IO";
else if ((range.flags & IORESOURCE_TYPE_BITS) == IORESOURCE_MEM)
snprintf(range_type, 4, "MEM");
range_type = "MEM";
else
snprintf(range_type, 4, "err");
dev_info(dev, " %s %#010llx..%#010llx -> %#010llx\n",
range_type = "err";
dev_info(dev, " %6s %#012llx..%#012llx -> %#012llx\n",
range_type, range.cpu_addr,
range.cpu_addr + range.size - 1, range.pci_addr);
@ -340,14 +341,54 @@ int devm_of_pci_get_host_bridge_resources(struct device *dev,
pci_add_resource_offset(resources, res, res->start - range.pci_addr);
}
/* Check for dma-ranges property */
if (!ib_resources)
return 0;
err = of_pci_dma_range_parser_init(&parser, dev_node);
if (err)
return 0;
dev_dbg(dev, "Parsing dma-ranges property...\n");
for_each_of_pci_range(&parser, &range) {
struct resource_entry *entry;
/*
* If we failed translation or got a zero-sized region
* then skip this range
*/
if (((range.flags & IORESOURCE_TYPE_BITS) != IORESOURCE_MEM) ||
range.cpu_addr == OF_BAD_ADDR || range.size == 0)
continue;
dev_info(dev, " %6s %#012llx..%#012llx -> %#012llx\n",
"IB MEM", range.cpu_addr,
range.cpu_addr + range.size - 1, range.pci_addr);
err = of_pci_range_to_resource(&range, dev_node, &tmp_res);
if (err)
continue;
res = devm_kmemdup(dev, &tmp_res, sizeof(tmp_res), GFP_KERNEL);
if (!res) {
err = -ENOMEM;
goto failed;
}
/* Keep the resource list sorted */
resource_list_for_each_entry(entry, ib_resources)
if (entry->res->start > res->start)
break;
pci_add_resource_offset(&entry->node, res,
res->start - range.pci_addr);
}
return 0;
failed:
pci_free_resource_list(resources);
return err;
}
EXPORT_SYMBOL_GPL(devm_of_pci_get_host_bridge_resources);
#endif /* CONFIG_OF_ADDRESS */
#if IS_ENABLED(CONFIG_OF_IRQ)
/**
@ -482,6 +523,7 @@ EXPORT_SYMBOL_GPL(of_irq_parse_and_map_pci);
int pci_parse_request_of_pci_ranges(struct device *dev,
struct list_head *resources,
struct list_head *ib_resources,
struct resource **bus_range)
{
int err, res_valid = 0;
@ -489,8 +531,10 @@ int pci_parse_request_of_pci_ranges(struct device *dev,
struct resource_entry *win, *tmp;
INIT_LIST_HEAD(resources);
if (ib_resources)
INIT_LIST_HEAD(ib_resources);
err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, resources,
&iobase);
ib_resources, &iobase);
if (err)
return err;
@ -530,6 +574,7 @@ int pci_parse_request_of_pci_ranges(struct device *dev,
pci_free_resource_list(resources);
return err;
}
EXPORT_SYMBOL_GPL(pci_parse_request_of_pci_ranges);
#endif /* CONFIG_PCI */

Просмотреть файл

@ -641,19 +641,6 @@ static inline void pci_set_bus_of_node(struct pci_bus *bus) { }
static inline void pci_release_bus_of_node(struct pci_bus *bus) { }
#endif /* CONFIG_OF */
#if defined(CONFIG_OF_ADDRESS)
int devm_of_pci_get_host_bridge_resources(struct device *dev,
unsigned char busno, unsigned char bus_max,
struct list_head *resources, resource_size_t *io_base);
#else
static inline int devm_of_pci_get_host_bridge_resources(struct device *dev,
unsigned char busno, unsigned char bus_max,
struct list_head *resources, resource_size_t *io_base)
{
return -EINVAL;
}
#endif
#ifdef CONFIG_PCIEAER
void pci_no_aer(void);
void pci_aer_init(struct pci_dev *dev);

Просмотреть файл

@ -2260,6 +2260,7 @@ struct irq_domain;
struct irq_domain *pci_host_bridge_of_msi_domain(struct pci_bus *bus);
int pci_parse_request_of_pci_ranges(struct device *dev,
struct list_head *resources,
struct list_head *ib_resources,
struct resource **bus_range);
/* Arch may override this (weak) */
@ -2268,9 +2269,11 @@ struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus);
#else /* CONFIG_OF */
static inline struct irq_domain *
pci_host_bridge_of_msi_domain(struct pci_bus *bus) { return NULL; }
static inline int pci_parse_request_of_pci_ranges(struct device *dev,
struct list_head *resources,
struct resource **bus_range)
static inline int
pci_parse_request_of_pci_ranges(struct device *dev,
struct list_head *resources,
struct list_head *ib_resources,
struct resource **bus_range)
{
return -EINVAL;
}

Просмотреть файл

@ -66,4 +66,16 @@ resource_list_destroy_entry(struct resource_entry *entry)
#define resource_list_for_each_entry_safe(entry, tmp, list) \
list_for_each_entry_safe((entry), (tmp), (list), node)
static inline struct resource_entry *
resource_list_first_type(struct list_head *list, unsigned long type)
{
struct resource_entry *entry;
resource_list_for_each_entry(entry, list) {
if (resource_type(entry->res) == type)
return entry;
}
return NULL;
}
#endif /* _LINUX_RESOURCE_EXT_H */