Merge branch 'remotes/lorenzo/pci/dwc'
- Support multiple ATU memory regions (Rob Herring) - Warn if non-prefetchable memory aperture is > 32-bit (Vidya Sagar) - Allow programming ATU for >4GB memory (Vidya Sagar) - Move ATU offset out of driver match data (Rob Herring) - Move "dbi", "dbi2", and "addr_space" resource setup to common code (Rob Herring) - Remove unneeded function wrappers (Rob Herring) - Ensure all outbound ATU windows are reset to reduce dependencies on bootloader (Rob Herring) - Use the default MSI irq_chip for dra7xx (Rob Herring) - Drop the .set_num_vectors() host op (Rob Herring) - Move MSI interrupt setup into DWC common code (Rob Herring) - Rework and simplify DWC MSI initialization (Rob Herring) - Move link handling to DWC common code (Rob Herring) - Move dw_pcie_msi_init() calls to DWC common code (Rob Herring) - Move dw_pcie_setup_rc() calls to DWC common code (Rob Herring) - Remove unnecessary wrappers around dw_pcie_host_init() (Rob Herring) - Revert "keystone: Drop duplicated 'num-viewport'" to prepare for detecting number of iATU regions without help from DT (Rob Herring) - Move inbound and outbound windows to common struct (Rob Herring) - Detect number of DWC iATU windows from device registers (Rob Herring) - Drop samsung,exynos5440-pcie binding (Marek Szyprowski) - Add samsung,exynos-pcie and samsung,exynos-pcie-phy bindings for Exynos5433 variant (Marek Szyprowski) - Rework phy-exynos-pcie driver to support Exynos5433 PCIe PHY (Jaehoon Chung) - Rework pci-exynos.c to support Exynos5433 PCIe host (Jaehoon Chung) - Move tegra "dbi" accesses to post common DWC initialization (Vidya Sagar) - Read tegra dbi" base address in application logic (Vidya Sagar) - Fix tegra ASPM-L1SS advertisement disable code (Vidya Sagar) - Set Tegra194 DesignWare IP version to 0x490A (Vidya Sagar) - Continue tegra unconfig sequence even if parts fail (Vidya Sagar) - Check return value of tegra_pcie_init_controller() (Vidya Sagar) - Disable tegra LTSSM during L2 entry (Vidya Sagar) - Add SM8250 SoC PCIe DT bindings and support (Manivannan Sadhasivam) - Add SM8250 BDF to SID mapping (Manivannan Sadhasivam) - Set 32-bit DMA mask for DWC MSI target address allocation (Vidya Sagar) * remotes/lorenzo/pci/dwc: PCI: dwc: Set 32-bit DMA mask for MSI target address allocation PCI: qcom: Add support for configuring BDF to SID mapping for SM8250 PCI: qcom: Add SM8250 SoC support dt-bindings: pci: qcom: Document PCIe bindings for SM8250 SoC PCI: tegra: Disable LTSSM during L2 entry PCI: tegra: Check return value of tegra_pcie_init_controller() PCI: tegra: Continue unconfig sequence even if parts fail PCI: tegra: Set DesignWare IP version PCI: tegra: Fix ASPM-L1SS advertisement disable code PCI: tegra: Read "dbi" base address to program in application logic PCI: tegra: Move "dbi" accesses to post common DWC initialization PCI: dwc: exynos: Rework the driver to support Exynos5433 variant phy: samsung: phy-exynos-pcie: rework driver to support Exynos5433 PCIe PHY dt-bindings: phy: exynos: add the samsung,exynos-pcie-phy binding dt-bindings: PCI: exynos: add the samsung,exynos-pcie binding dt-bindings: PCI: exynos: drop samsung,exynos5440-pcie binding PCI: dwc: Detect number of iATU windows PCI: dwc: Move inbound and outbound windows to common struct Revert "PCI: dwc/keystone: Drop duplicated 'num-viewport'" PCI: dwc: Remove unnecessary wrappers around dw_pcie_host_init() PCI: dwc: Move dw_pcie_setup_rc() to DWC common code PCI: dwc: Move dw_pcie_msi_init() into core PCI: dwc: Move link handling into common code PCI: dwc: Rework MSI initialization PCI: dwc: Move MSI interrupt setup into DWC common code PCI: dwc: Drop the .set_num_vectors() host op PCI: dwc/dra7xx: Use the common MSI irq_chip PCI: dwc: Ensure all outbound ATU windows are reset PCI: dwc/intel-gw: Remove some unneeded function wrappers PCI: dwc: Move "dbi", "dbi2", and "addr_space" resource setup into common code PCI: dwc/intel-gw: Move ATU offset out of driver match data PCI: dwc: Add support to program ATU for >4GB memory PCI: of: Warn if non-prefetchable memory aperture size is > 32-bit PCI: dwc: Support multiple ATU memory regions
This commit is contained in:
Коммит
ff9f1683b6
|
@ -13,6 +13,7 @@
|
|||
- "qcom,pcie-ipq8074" for ipq8074
|
||||
- "qcom,pcie-qcs404" for qcs404
|
||||
- "qcom,pcie-sdm845" for sdm845
|
||||
- "qcom,pcie-sm8250" for sm8250
|
||||
|
||||
- reg:
|
||||
Usage: required
|
||||
|
@ -27,6 +28,7 @@
|
|||
- "dbi" DesignWare PCIe registers
|
||||
- "elbi" External local bus interface registers
|
||||
- "config" PCIe configuration space
|
||||
- "atu" ATU address space (optional)
|
||||
|
||||
- device_type:
|
||||
Usage: required
|
||||
|
@ -131,7 +133,7 @@
|
|||
- "slave_bus" AXI Slave clock
|
||||
|
||||
-clock-names:
|
||||
Usage: required for sdm845
|
||||
Usage: required for sdm845 and sm8250
|
||||
Value type: <stringlist>
|
||||
Definition: Should contain the following entries
|
||||
- "aux" Auxiliary clock
|
||||
|
@ -206,7 +208,7 @@
|
|||
- "ahb" AHB reset
|
||||
|
||||
- reset-names:
|
||||
Usage: required for sdm845
|
||||
Usage: required for sdm845 and sm8250
|
||||
Value type: <stringlist>
|
||||
Definition: Should contain the following entries
|
||||
- "pci" PCIe core reset
|
||||
|
|
|
@ -0,0 +1,119 @@
|
|||
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
|
||||
%YAML 1.2
|
||||
---
|
||||
$id: http://devicetree.org/schemas/pci/samsung,exynos-pcie.yaml#
|
||||
$schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
|
||||
title: Samsung SoC series PCIe Host Controller Device Tree Bindings
|
||||
|
||||
maintainers:
|
||||
- Marek Szyprowski <m.szyprowski@samsung.com>
|
||||
- Jaehoon Chung <jh80.chung@samsung.com>
|
||||
|
||||
description: |+
|
||||
Exynos5433 SoC PCIe host controller is based on the Synopsys DesignWare
|
||||
PCIe IP and thus inherits all the common properties defined in
|
||||
designware-pcie.txt.
|
||||
|
||||
allOf:
|
||||
- $ref: /schemas/pci/pci-bus.yaml#
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
const: samsung,exynos5433-pcie
|
||||
|
||||
reg:
|
||||
items:
|
||||
- description: Data Bus Interface (DBI) registers.
|
||||
- description: External Local Bus interface (ELBI) registers.
|
||||
- description: PCIe configuration space region.
|
||||
|
||||
reg-names:
|
||||
items:
|
||||
- const: dbi
|
||||
- const: elbi
|
||||
- const: config
|
||||
|
||||
interrupts:
|
||||
maxItems: 1
|
||||
|
||||
clocks:
|
||||
items:
|
||||
- description: PCIe bridge clock
|
||||
- description: PCIe bus clock
|
||||
|
||||
clock-names:
|
||||
items:
|
||||
- const: pcie
|
||||
- const: pcie_bus
|
||||
|
||||
phys:
|
||||
maxItems: 1
|
||||
|
||||
vdd10-supply:
|
||||
description:
|
||||
Phandle to a regulator that provides 1.0V power to the PCIe block.
|
||||
|
||||
vdd18-supply:
|
||||
description:
|
||||
Phandle to a regulator that provides 1.8V power to the PCIe block.
|
||||
|
||||
num-lanes:
|
||||
const: 1
|
||||
|
||||
num-viewport:
|
||||
const: 3
|
||||
|
||||
required:
|
||||
- reg
|
||||
- reg-names
|
||||
- interrupts
|
||||
- "#address-cells"
|
||||
- "#size-cells"
|
||||
- "#interrupt-cells"
|
||||
- interrupt-map
|
||||
- interrupt-map-mask
|
||||
- ranges
|
||||
- bus-range
|
||||
- device_type
|
||||
- num-lanes
|
||||
- num-viewport
|
||||
- clocks
|
||||
- clock-names
|
||||
- phys
|
||||
- vdd10-supply
|
||||
- vdd18-supply
|
||||
|
||||
unevaluatedProperties: false
|
||||
|
||||
examples:
|
||||
- |
|
||||
#include <dt-bindings/interrupt-controller/irq.h>
|
||||
#include <dt-bindings/interrupt-controller/arm-gic.h>
|
||||
#include <dt-bindings/clock/exynos5433.h>
|
||||
|
||||
pcie: pcie@15700000 {
|
||||
compatible = "samsung,exynos5433-pcie";
|
||||
reg = <0x15700000 0x1000>, <0x156b0000 0x1000>, <0x0c000000 0x1000>;
|
||||
reg-names = "dbi", "elbi", "config";
|
||||
#address-cells = <3>;
|
||||
#size-cells = <2>;
|
||||
#interrupt-cells = <1>;
|
||||
device_type = "pci";
|
||||
interrupts = <GIC_SPI 245 IRQ_TYPE_LEVEL_HIGH>;
|
||||
clocks = <&cmu_fsys CLK_PCIE>, <&cmu_fsys CLK_PCLK_PCIE_PHY>;
|
||||
clock-names = "pcie", "pcie_bus";
|
||||
phys = <&pcie_phy>;
|
||||
pinctrl-0 = <&pcie_bus &pcie_wlanen>;
|
||||
pinctrl-names = "default";
|
||||
num-lanes = <1>;
|
||||
num-viewport = <3>;
|
||||
bus-range = <0x00 0xff>;
|
||||
ranges = <0x81000000 0 0 0x0c001000 0 0x00010000>,
|
||||
<0x82000000 0 0x0c011000 0x0c011000 0 0x03feefff>;
|
||||
vdd10-supply = <&ldo6_reg>;
|
||||
vdd18-supply = <&ldo7_reg>;
|
||||
interrupt-map-mask = <0 0 0 0>;
|
||||
interrupt-map = <0 0 0 0 &gic GIC_SPI 245 IRQ_TYPE_LEVEL_HIGH>;
|
||||
};
|
||||
...
|
|
@ -1,58 +0,0 @@
|
|||
* Samsung Exynos 5440 PCIe interface
|
||||
|
||||
This PCIe host controller is based on the Synopsys DesignWare PCIe IP
|
||||
and thus inherits all the common properties defined in designware-pcie.txt.
|
||||
|
||||
Required properties:
|
||||
- compatible: "samsung,exynos5440-pcie"
|
||||
- reg: base addresses and lengths of the PCIe controller,
|
||||
- reg-names : First name should be set to "elbi".
|
||||
And use the "config" instead of getting the configuration address space
|
||||
from "ranges".
|
||||
NOTE: When using the "config" property, reg-names must be set.
|
||||
- interrupts: A list of interrupt outputs for level interrupt,
|
||||
pulse interrupt, special interrupt.
|
||||
- phys: From PHY binding. Phandle for the generic PHY.
|
||||
Refer to Documentation/devicetree/bindings/phy/samsung-phy.txt
|
||||
|
||||
For other common properties, refer to
|
||||
Documentation/devicetree/bindings/pci/designware-pcie.txt
|
||||
|
||||
Example:
|
||||
|
||||
SoC-specific DT Entry (with using PHY framework):
|
||||
|
||||
pcie_phy0: pcie-phy@270000 {
|
||||
...
|
||||
reg = <0x270000 0x1000>, <0x271000 0x40>;
|
||||
reg-names = "phy", "block";
|
||||
...
|
||||
};
|
||||
|
||||
pcie@290000 {
|
||||
compatible = "samsung,exynos5440-pcie", "snps,dw-pcie";
|
||||
reg = <0x290000 0x1000>, <0x40000000 0x1000>;
|
||||
reg-names = "elbi", "config";
|
||||
clocks = <&clock 28>, <&clock 27>;
|
||||
clock-names = "pcie", "pcie_bus";
|
||||
#address-cells = <3>;
|
||||
#size-cells = <2>;
|
||||
device_type = "pci";
|
||||
phys = <&pcie_phy0>;
|
||||
ranges = <0x81000000 0 0 0x60001000 0 0x00010000
|
||||
0x82000000 0 0x60011000 0x60011000 0 0x1ffef000>;
|
||||
#interrupt-cells = <1>;
|
||||
interrupt-map-mask = <0 0 0 0>;
|
||||
interrupt-map = <0 0 0 0 &gic GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>;
|
||||
num-lanes = <4>;
|
||||
};
|
||||
|
||||
Board-specific DT Entry:
|
||||
|
||||
pcie@290000 {
|
||||
reset-gpio = <&pin_ctrl 5 0>;
|
||||
};
|
||||
|
||||
pcie@2a0000 {
|
||||
reset-gpio = <&pin_ctrl 22 0>;
|
||||
};
|
|
@ -0,0 +1,51 @@
|
|||
# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
|
||||
%YAML 1.2
|
||||
---
|
||||
$id: http://devicetree.org/schemas/phy/samsung,exynos-pcie-phy.yaml#
|
||||
$schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
|
||||
title: Samsung SoC series PCIe PHY Device Tree Bindings
|
||||
|
||||
maintainers:
|
||||
- Marek Szyprowski <m.szyprowski@samsung.com>
|
||||
- Jaehoon Chung <jh80.chung@samsung.com>
|
||||
|
||||
properties:
|
||||
"#phy-cells":
|
||||
const: 0
|
||||
|
||||
compatible:
|
||||
const: samsung,exynos5433-pcie-phy
|
||||
|
||||
reg:
|
||||
maxItems: 1
|
||||
|
||||
samsung,pmu-syscon:
|
||||
$ref: '/schemas/types.yaml#/definitions/phandle'
|
||||
description: phandle for PMU system controller interface, used to
|
||||
control PMU registers bits for PCIe PHY
|
||||
|
||||
samsung,fsys-sysreg:
|
||||
$ref: '/schemas/types.yaml#/definitions/phandle'
|
||||
description: phandle for FSYS sysreg interface, used to control
|
||||
sysreg registers bits for PCIe PHY
|
||||
|
||||
required:
|
||||
- "#phy-cells"
|
||||
- compatible
|
||||
- reg
|
||||
- samsung,pmu-syscon
|
||||
- samsung,fsys-sysreg
|
||||
|
||||
additionalProperties: false
|
||||
|
||||
examples:
|
||||
- |
|
||||
pcie_phy: pcie-phy@15680000 {
|
||||
compatible = "samsung,exynos5433-pcie-phy";
|
||||
reg = <0x15680000 0x1000>;
|
||||
samsung,pmu-syscon = <&pmu_system_controller>;
|
||||
samsung,fsys-sysreg = <&syscon_fsys>;
|
||||
#phy-cells = <0>;
|
||||
};
|
||||
...
|
|
@ -83,10 +83,15 @@ config PCIE_DW_PLAT_EP
|
|||
selected.
|
||||
|
||||
config PCI_EXYNOS
|
||||
bool "Samsung Exynos PCIe controller"
|
||||
depends on SOC_EXYNOS5440 || COMPILE_TEST
|
||||
tristate "Samsung Exynos PCIe controller"
|
||||
depends on ARCH_EXYNOS || COMPILE_TEST
|
||||
depends on PCI_MSI_IRQ_DOMAIN
|
||||
select PCIE_DW_HOST
|
||||
help
|
||||
Enables support for the PCIe controller in the Samsung Exynos SoCs
|
||||
to work in host mode. The PCI controller is based on the DesignWare
|
||||
hardware and therefore the driver re-uses the DesignWare core
|
||||
functions to implement the driver.
|
||||
|
||||
config PCI_IMX6
|
||||
bool "Freescale i.MX6/7/8 PCIe controller"
|
||||
|
@ -169,6 +174,7 @@ config PCIE_QCOM
|
|||
depends on OF && (ARCH_QCOM || COMPILE_TEST)
|
||||
depends on PCI_MSI_IRQ_DOMAIN
|
||||
select PCIE_DW_HOST
|
||||
select CRC8
|
||||
help
|
||||
Say Y here to enable PCIe controller support on Qualcomm SoCs. The
|
||||
PCIe controller uses the DesignWare core plus Qualcomm-specific
|
||||
|
|
|
@ -181,11 +181,6 @@ static int dra7xx_pcie_host_init(struct pcie_port *pp)
|
|||
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
|
||||
struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
|
||||
|
||||
dw_pcie_setup_rc(pp);
|
||||
|
||||
dra7xx_pcie_establish_link(pci);
|
||||
dw_pcie_wait_for_link(pci);
|
||||
dw_pcie_msi_init(pp);
|
||||
dra7xx_pcie_enable_interrupts(dra7xx);
|
||||
|
||||
return 0;
|
||||
|
@ -377,133 +372,8 @@ static int dra7xx_pcie_init_irq_domain(struct pcie_port *pp)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void dra7xx_pcie_setup_msi_msg(struct irq_data *d, struct msi_msg *msg)
|
||||
{
|
||||
struct pcie_port *pp = irq_data_get_irq_chip_data(d);
|
||||
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
|
||||
u64 msi_target;
|
||||
|
||||
msi_target = (u64)pp->msi_data;
|
||||
|
||||
msg->address_lo = lower_32_bits(msi_target);
|
||||
msg->address_hi = upper_32_bits(msi_target);
|
||||
|
||||
msg->data = d->hwirq;
|
||||
|
||||
dev_dbg(pci->dev, "msi#%d address_hi %#x address_lo %#x\n",
|
||||
(int)d->hwirq, msg->address_hi, msg->address_lo);
|
||||
}
|
||||
|
||||
static int dra7xx_pcie_msi_set_affinity(struct irq_data *d,
|
||||
const struct cpumask *mask,
|
||||
bool force)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static void dra7xx_pcie_bottom_mask(struct irq_data *d)
|
||||
{
|
||||
struct pcie_port *pp = irq_data_get_irq_chip_data(d);
|
||||
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
|
||||
unsigned int res, bit, ctrl;
|
||||
unsigned long flags;
|
||||
|
||||
raw_spin_lock_irqsave(&pp->lock, flags);
|
||||
|
||||
ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
|
||||
res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
|
||||
bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
|
||||
|
||||
pp->irq_mask[ctrl] |= BIT(bit);
|
||||
dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + res,
|
||||
pp->irq_mask[ctrl]);
|
||||
|
||||
raw_spin_unlock_irqrestore(&pp->lock, flags);
|
||||
}
|
||||
|
||||
static void dra7xx_pcie_bottom_unmask(struct irq_data *d)
|
||||
{
|
||||
struct pcie_port *pp = irq_data_get_irq_chip_data(d);
|
||||
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
|
||||
unsigned int res, bit, ctrl;
|
||||
unsigned long flags;
|
||||
|
||||
raw_spin_lock_irqsave(&pp->lock, flags);
|
||||
|
||||
ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
|
||||
res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
|
||||
bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
|
||||
|
||||
pp->irq_mask[ctrl] &= ~BIT(bit);
|
||||
dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + res,
|
||||
pp->irq_mask[ctrl]);
|
||||
|
||||
raw_spin_unlock_irqrestore(&pp->lock, flags);
|
||||
}
|
||||
|
||||
static void dra7xx_pcie_bottom_ack(struct irq_data *d)
|
||||
{
|
||||
struct pcie_port *pp = irq_data_get_irq_chip_data(d);
|
||||
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
|
||||
unsigned int res, bit, ctrl;
|
||||
|
||||
ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
|
||||
res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
|
||||
bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
|
||||
|
||||
dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_STATUS + res, BIT(bit));
|
||||
}
|
||||
|
||||
static struct irq_chip dra7xx_pci_msi_bottom_irq_chip = {
|
||||
.name = "DRA7XX-PCI-MSI",
|
||||
.irq_ack = dra7xx_pcie_bottom_ack,
|
||||
.irq_compose_msi_msg = dra7xx_pcie_setup_msi_msg,
|
||||
.irq_set_affinity = dra7xx_pcie_msi_set_affinity,
|
||||
.irq_mask = dra7xx_pcie_bottom_mask,
|
||||
.irq_unmask = dra7xx_pcie_bottom_unmask,
|
||||
};
|
||||
|
||||
static int dra7xx_pcie_msi_host_init(struct pcie_port *pp)
|
||||
{
|
||||
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
|
||||
struct device *dev = pci->dev;
|
||||
u32 ctrl, num_ctrls;
|
||||
int ret;
|
||||
|
||||
pp->msi_irq_chip = &dra7xx_pci_msi_bottom_irq_chip;
|
||||
|
||||
num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
|
||||
/* Initialize IRQ Status array */
|
||||
for (ctrl = 0; ctrl < num_ctrls; ctrl++) {
|
||||
pp->irq_mask[ctrl] = ~0;
|
||||
dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK +
|
||||
(ctrl * MSI_REG_CTRL_BLOCK_SIZE),
|
||||
pp->irq_mask[ctrl]);
|
||||
dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_ENABLE +
|
||||
(ctrl * MSI_REG_CTRL_BLOCK_SIZE),
|
||||
~0);
|
||||
}
|
||||
|
||||
ret = dw_pcie_allocate_domains(pp);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
pp->msi_data = dma_map_single_attrs(dev, &pp->msi_msg,
|
||||
sizeof(pp->msi_msg),
|
||||
DMA_FROM_DEVICE,
|
||||
DMA_ATTR_SKIP_CPU_SYNC);
|
||||
ret = dma_mapping_error(dev, pp->msi_data);
|
||||
if (ret) {
|
||||
dev_err(dev, "Failed to map MSI data\n");
|
||||
pp->msi_data = 0;
|
||||
dw_pcie_free_msi(pp);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const struct dw_pcie_host_ops dra7xx_pcie_host_ops = {
|
||||
.host_init = dra7xx_pcie_host_init,
|
||||
.msi_host_init = dra7xx_pcie_msi_host_init,
|
||||
};
|
||||
|
||||
static void dra7xx_pcie_ep_init(struct dw_pcie_ep *ep)
|
||||
|
@ -578,7 +448,6 @@ static int __init dra7xx_add_pcie_ep(struct dra7xx_pcie *dra7xx,
|
|||
{
|
||||
int ret;
|
||||
struct dw_pcie_ep *ep;
|
||||
struct resource *res;
|
||||
struct device *dev = &pdev->dev;
|
||||
struct dw_pcie *pci = dra7xx->pci;
|
||||
|
||||
|
@ -594,13 +463,6 @@ static int __init dra7xx_add_pcie_ep(struct dra7xx_pcie *dra7xx,
|
|||
if (IS_ERR(pci->dbi_base2))
|
||||
return PTR_ERR(pci->dbi_base2);
|
||||
|
||||
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
|
||||
if (!res)
|
||||
return -EINVAL;
|
||||
|
||||
ep->phys_base = res->start;
|
||||
ep->addr_size = resource_size(res);
|
||||
|
||||
ret = dw_pcie_ep_init(ep);
|
||||
if (ret) {
|
||||
dev_err(dev, "failed to initialize endpoint\n");
|
||||
|
@ -622,6 +484,9 @@ static int __init dra7xx_add_pcie_port(struct dra7xx_pcie *dra7xx,
|
|||
if (pp->irq < 0)
|
||||
return pp->irq;
|
||||
|
||||
/* MSI IRQ is muxed */
|
||||
pp->msi_irq = -ENODEV;
|
||||
|
||||
ret = dra7xx_pcie_init_irq_domain(pp);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
|
|
@ -2,26 +2,23 @@
|
|||
/*
|
||||
* PCIe host controller driver for Samsung Exynos SoCs
|
||||
*
|
||||
* Copyright (C) 2013 Samsung Electronics Co., Ltd.
|
||||
* Copyright (C) 2013-2020 Samsung Electronics Co., Ltd.
|
||||
* https://www.samsung.com
|
||||
*
|
||||
* Author: Jingoo Han <jg1.han@samsung.com>
|
||||
* Jaehoon Chung <jh80.chung@samsung.com>
|
||||
*/
|
||||
|
||||
#include <linux/clk.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/gpio.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/of_gpio.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/phy/phy.h>
|
||||
#include <linux/resource.h>
|
||||
#include <linux/signal.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/regulator/consumer.h>
|
||||
|
||||
#include "pcie-designware.h"
|
||||
|
||||
|
@ -37,102 +34,43 @@
|
|||
#define PCIE_IRQ_SPECIAL 0x008
|
||||
#define PCIE_IRQ_EN_PULSE 0x00c
|
||||
#define PCIE_IRQ_EN_LEVEL 0x010
|
||||
#define IRQ_MSI_ENABLE BIT(2)
|
||||
#define PCIE_IRQ_EN_SPECIAL 0x014
|
||||
#define PCIE_PWR_RESET 0x018
|
||||
#define PCIE_SW_WAKE 0x018
|
||||
#define PCIE_BUS_EN BIT(1)
|
||||
#define PCIE_CORE_RESET 0x01c
|
||||
#define PCIE_CORE_RESET_ENABLE BIT(0)
|
||||
#define PCIE_STICKY_RESET 0x020
|
||||
#define PCIE_NONSTICKY_RESET 0x024
|
||||
#define PCIE_APP_INIT_RESET 0x028
|
||||
#define PCIE_APP_LTSSM_ENABLE 0x02c
|
||||
#define PCIE_ELBI_RDLH_LINKUP 0x064
|
||||
#define PCIE_ELBI_RDLH_LINKUP 0x074
|
||||
#define PCIE_ELBI_XMLH_LINKUP BIT(4)
|
||||
#define PCIE_ELBI_LTSSM_ENABLE 0x1
|
||||
#define PCIE_ELBI_SLV_AWMISC 0x11c
|
||||
#define PCIE_ELBI_SLV_ARMISC 0x120
|
||||
#define PCIE_ELBI_SLV_DBI_ENABLE BIT(21)
|
||||
|
||||
struct exynos_pcie_mem_res {
|
||||
void __iomem *elbi_base; /* DT 0th resource: PCIe CTRL */
|
||||
};
|
||||
|
||||
struct exynos_pcie_clk_res {
|
||||
struct clk *clk;
|
||||
struct clk *bus_clk;
|
||||
};
|
||||
|
||||
struct exynos_pcie {
|
||||
struct dw_pcie *pci;
|
||||
struct exynos_pcie_mem_res *mem_res;
|
||||
struct exynos_pcie_clk_res *clk_res;
|
||||
const struct exynos_pcie_ops *ops;
|
||||
int reset_gpio;
|
||||
|
||||
struct dw_pcie pci;
|
||||
void __iomem *elbi_base;
|
||||
struct clk *clk;
|
||||
struct clk *bus_clk;
|
||||
struct phy *phy;
|
||||
struct regulator_bulk_data supplies[2];
|
||||
};
|
||||
|
||||
struct exynos_pcie_ops {
|
||||
int (*get_mem_resources)(struct platform_device *pdev,
|
||||
struct exynos_pcie *ep);
|
||||
int (*get_clk_resources)(struct exynos_pcie *ep);
|
||||
int (*init_clk_resources)(struct exynos_pcie *ep);
|
||||
void (*deinit_clk_resources)(struct exynos_pcie *ep);
|
||||
};
|
||||
|
||||
static int exynos5440_pcie_get_mem_resources(struct platform_device *pdev,
|
||||
struct exynos_pcie *ep)
|
||||
static int exynos_pcie_init_clk_resources(struct exynos_pcie *ep)
|
||||
{
|
||||
struct dw_pcie *pci = ep->pci;
|
||||
struct device *dev = pci->dev;
|
||||
|
||||
ep->mem_res = devm_kzalloc(dev, sizeof(*ep->mem_res), GFP_KERNEL);
|
||||
if (!ep->mem_res)
|
||||
return -ENOMEM;
|
||||
|
||||
ep->mem_res->elbi_base = devm_platform_ioremap_resource(pdev, 0);
|
||||
if (IS_ERR(ep->mem_res->elbi_base))
|
||||
return PTR_ERR(ep->mem_res->elbi_base);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int exynos5440_pcie_get_clk_resources(struct exynos_pcie *ep)
|
||||
{
|
||||
struct dw_pcie *pci = ep->pci;
|
||||
struct device *dev = pci->dev;
|
||||
|
||||
ep->clk_res = devm_kzalloc(dev, sizeof(*ep->clk_res), GFP_KERNEL);
|
||||
if (!ep->clk_res)
|
||||
return -ENOMEM;
|
||||
|
||||
ep->clk_res->clk = devm_clk_get(dev, "pcie");
|
||||
if (IS_ERR(ep->clk_res->clk)) {
|
||||
dev_err(dev, "Failed to get pcie rc clock\n");
|
||||
return PTR_ERR(ep->clk_res->clk);
|
||||
}
|
||||
|
||||
ep->clk_res->bus_clk = devm_clk_get(dev, "pcie_bus");
|
||||
if (IS_ERR(ep->clk_res->bus_clk)) {
|
||||
dev_err(dev, "Failed to get pcie bus clock\n");
|
||||
return PTR_ERR(ep->clk_res->bus_clk);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int exynos5440_pcie_init_clk_resources(struct exynos_pcie *ep)
|
||||
{
|
||||
struct dw_pcie *pci = ep->pci;
|
||||
struct device *dev = pci->dev;
|
||||
struct device *dev = ep->pci.dev;
|
||||
int ret;
|
||||
|
||||
ret = clk_prepare_enable(ep->clk_res->clk);
|
||||
ret = clk_prepare_enable(ep->clk);
|
||||
if (ret) {
|
||||
dev_err(dev, "cannot enable pcie rc clock");
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = clk_prepare_enable(ep->clk_res->bus_clk);
|
||||
ret = clk_prepare_enable(ep->bus_clk);
|
||||
if (ret) {
|
||||
dev_err(dev, "cannot enable pcie bus clock");
|
||||
goto err_bus_clk;
|
||||
|
@ -141,24 +79,17 @@ static int exynos5440_pcie_init_clk_resources(struct exynos_pcie *ep)
|
|||
return 0;
|
||||
|
||||
err_bus_clk:
|
||||
clk_disable_unprepare(ep->clk_res->clk);
|
||||
clk_disable_unprepare(ep->clk);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void exynos5440_pcie_deinit_clk_resources(struct exynos_pcie *ep)
|
||||
static void exynos_pcie_deinit_clk_resources(struct exynos_pcie *ep)
|
||||
{
|
||||
clk_disable_unprepare(ep->clk_res->bus_clk);
|
||||
clk_disable_unprepare(ep->clk_res->clk);
|
||||
clk_disable_unprepare(ep->bus_clk);
|
||||
clk_disable_unprepare(ep->clk);
|
||||
}
|
||||
|
||||
static const struct exynos_pcie_ops exynos5440_pcie_ops = {
|
||||
.get_mem_resources = exynos5440_pcie_get_mem_resources,
|
||||
.get_clk_resources = exynos5440_pcie_get_clk_resources,
|
||||
.init_clk_resources = exynos5440_pcie_init_clk_resources,
|
||||
.deinit_clk_resources = exynos5440_pcie_deinit_clk_resources,
|
||||
};
|
||||
|
||||
static void exynos_pcie_writel(void __iomem *base, u32 val, u32 reg)
|
||||
{
|
||||
writel(val, base + reg);
|
||||
|
@ -173,115 +104,71 @@ static void exynos_pcie_sideband_dbi_w_mode(struct exynos_pcie *ep, bool on)
|
|||
{
|
||||
u32 val;
|
||||
|
||||
val = exynos_pcie_readl(ep->mem_res->elbi_base, PCIE_ELBI_SLV_AWMISC);
|
||||
val = exynos_pcie_readl(ep->elbi_base, PCIE_ELBI_SLV_AWMISC);
|
||||
if (on)
|
||||
val |= PCIE_ELBI_SLV_DBI_ENABLE;
|
||||
else
|
||||
val &= ~PCIE_ELBI_SLV_DBI_ENABLE;
|
||||
exynos_pcie_writel(ep->mem_res->elbi_base, val, PCIE_ELBI_SLV_AWMISC);
|
||||
exynos_pcie_writel(ep->elbi_base, val, PCIE_ELBI_SLV_AWMISC);
|
||||
}
|
||||
|
||||
static void exynos_pcie_sideband_dbi_r_mode(struct exynos_pcie *ep, bool on)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
val = exynos_pcie_readl(ep->mem_res->elbi_base, PCIE_ELBI_SLV_ARMISC);
|
||||
val = exynos_pcie_readl(ep->elbi_base, PCIE_ELBI_SLV_ARMISC);
|
||||
if (on)
|
||||
val |= PCIE_ELBI_SLV_DBI_ENABLE;
|
||||
else
|
||||
val &= ~PCIE_ELBI_SLV_DBI_ENABLE;
|
||||
exynos_pcie_writel(ep->mem_res->elbi_base, val, PCIE_ELBI_SLV_ARMISC);
|
||||
exynos_pcie_writel(ep->elbi_base, val, PCIE_ELBI_SLV_ARMISC);
|
||||
}
|
||||
|
||||
static void exynos_pcie_assert_core_reset(struct exynos_pcie *ep)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
val = exynos_pcie_readl(ep->mem_res->elbi_base, PCIE_CORE_RESET);
|
||||
val = exynos_pcie_readl(ep->elbi_base, PCIE_CORE_RESET);
|
||||
val &= ~PCIE_CORE_RESET_ENABLE;
|
||||
exynos_pcie_writel(ep->mem_res->elbi_base, val, PCIE_CORE_RESET);
|
||||
exynos_pcie_writel(ep->mem_res->elbi_base, 0, PCIE_PWR_RESET);
|
||||
exynos_pcie_writel(ep->mem_res->elbi_base, 0, PCIE_STICKY_RESET);
|
||||
exynos_pcie_writel(ep->mem_res->elbi_base, 0, PCIE_NONSTICKY_RESET);
|
||||
exynos_pcie_writel(ep->elbi_base, val, PCIE_CORE_RESET);
|
||||
exynos_pcie_writel(ep->elbi_base, 0, PCIE_STICKY_RESET);
|
||||
exynos_pcie_writel(ep->elbi_base, 0, PCIE_NONSTICKY_RESET);
|
||||
}
|
||||
|
||||
static void exynos_pcie_deassert_core_reset(struct exynos_pcie *ep)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
val = exynos_pcie_readl(ep->mem_res->elbi_base, PCIE_CORE_RESET);
|
||||
val = exynos_pcie_readl(ep->elbi_base, PCIE_CORE_RESET);
|
||||
val |= PCIE_CORE_RESET_ENABLE;
|
||||
|
||||
exynos_pcie_writel(ep->mem_res->elbi_base, val, PCIE_CORE_RESET);
|
||||
exynos_pcie_writel(ep->mem_res->elbi_base, 1, PCIE_STICKY_RESET);
|
||||
exynos_pcie_writel(ep->mem_res->elbi_base, 1, PCIE_NONSTICKY_RESET);
|
||||
exynos_pcie_writel(ep->mem_res->elbi_base, 1, PCIE_APP_INIT_RESET);
|
||||
exynos_pcie_writel(ep->mem_res->elbi_base, 0, PCIE_APP_INIT_RESET);
|
||||
exynos_pcie_writel(ep->elbi_base, val, PCIE_CORE_RESET);
|
||||
exynos_pcie_writel(ep->elbi_base, 1, PCIE_STICKY_RESET);
|
||||
exynos_pcie_writel(ep->elbi_base, 1, PCIE_NONSTICKY_RESET);
|
||||
exynos_pcie_writel(ep->elbi_base, 1, PCIE_APP_INIT_RESET);
|
||||
exynos_pcie_writel(ep->elbi_base, 0, PCIE_APP_INIT_RESET);
|
||||
}
|
||||
|
||||
static void exynos_pcie_assert_reset(struct exynos_pcie *ep)
|
||||
static int exynos_pcie_start_link(struct dw_pcie *pci)
|
||||
{
|
||||
struct dw_pcie *pci = ep->pci;
|
||||
struct device *dev = pci->dev;
|
||||
struct exynos_pcie *ep = to_exynos_pcie(pci);
|
||||
u32 val;
|
||||
|
||||
if (ep->reset_gpio >= 0)
|
||||
devm_gpio_request_one(dev, ep->reset_gpio,
|
||||
GPIOF_OUT_INIT_HIGH, "RESET");
|
||||
}
|
||||
|
||||
static int exynos_pcie_establish_link(struct exynos_pcie *ep)
|
||||
{
|
||||
struct dw_pcie *pci = ep->pci;
|
||||
struct pcie_port *pp = &pci->pp;
|
||||
struct device *dev = pci->dev;
|
||||
|
||||
if (dw_pcie_link_up(pci)) {
|
||||
dev_err(dev, "Link already up\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
exynos_pcie_assert_core_reset(ep);
|
||||
|
||||
phy_reset(ep->phy);
|
||||
|
||||
exynos_pcie_writel(ep->mem_res->elbi_base, 1,
|
||||
PCIE_PWR_RESET);
|
||||
|
||||
phy_power_on(ep->phy);
|
||||
phy_init(ep->phy);
|
||||
|
||||
exynos_pcie_deassert_core_reset(ep);
|
||||
dw_pcie_setup_rc(pp);
|
||||
exynos_pcie_assert_reset(ep);
|
||||
val = exynos_pcie_readl(ep->elbi_base, PCIE_SW_WAKE);
|
||||
val &= ~PCIE_BUS_EN;
|
||||
exynos_pcie_writel(ep->elbi_base, val, PCIE_SW_WAKE);
|
||||
|
||||
/* assert LTSSM enable */
|
||||
exynos_pcie_writel(ep->mem_res->elbi_base, PCIE_ELBI_LTSSM_ENABLE,
|
||||
exynos_pcie_writel(ep->elbi_base, PCIE_ELBI_LTSSM_ENABLE,
|
||||
PCIE_APP_LTSSM_ENABLE);
|
||||
|
||||
/* check if the link is up or not */
|
||||
if (!dw_pcie_wait_for_link(pci))
|
||||
return 0;
|
||||
|
||||
phy_power_off(ep->phy);
|
||||
return -ETIMEDOUT;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void exynos_pcie_clear_irq_pulse(struct exynos_pcie *ep)
|
||||
{
|
||||
u32 val;
|
||||
u32 val = exynos_pcie_readl(ep->elbi_base, PCIE_IRQ_PULSE);
|
||||
|
||||
val = exynos_pcie_readl(ep->mem_res->elbi_base, PCIE_IRQ_PULSE);
|
||||
exynos_pcie_writel(ep->mem_res->elbi_base, val, PCIE_IRQ_PULSE);
|
||||
}
|
||||
|
||||
static void exynos_pcie_enable_irq_pulse(struct exynos_pcie *ep)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
/* enable INTX interrupt */
|
||||
val = IRQ_INTA_ASSERT | IRQ_INTB_ASSERT |
|
||||
IRQ_INTC_ASSERT | IRQ_INTD_ASSERT;
|
||||
exynos_pcie_writel(ep->mem_res->elbi_base, val, PCIE_IRQ_EN_PULSE);
|
||||
exynos_pcie_writel(ep->elbi_base, val, PCIE_IRQ_PULSE);
|
||||
}
|
||||
|
||||
static irqreturn_t exynos_pcie_irq_handler(int irq, void *arg)
|
||||
|
@ -292,26 +179,14 @@ static irqreturn_t exynos_pcie_irq_handler(int irq, void *arg)
|
|||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static void exynos_pcie_msi_init(struct exynos_pcie *ep)
|
||||
static void exynos_pcie_enable_irq_pulse(struct exynos_pcie *ep)
|
||||
{
|
||||
struct dw_pcie *pci = ep->pci;
|
||||
struct pcie_port *pp = &pci->pp;
|
||||
u32 val;
|
||||
u32 val = IRQ_INTA_ASSERT | IRQ_INTB_ASSERT |
|
||||
IRQ_INTC_ASSERT | IRQ_INTD_ASSERT;
|
||||
|
||||
dw_pcie_msi_init(pp);
|
||||
|
||||
/* enable MSI interrupt */
|
||||
val = exynos_pcie_readl(ep->mem_res->elbi_base, PCIE_IRQ_EN_LEVEL);
|
||||
val |= IRQ_MSI_ENABLE;
|
||||
exynos_pcie_writel(ep->mem_res->elbi_base, val, PCIE_IRQ_EN_LEVEL);
|
||||
}
|
||||
|
||||
static void exynos_pcie_enable_interrupts(struct exynos_pcie *ep)
|
||||
{
|
||||
exynos_pcie_enable_irq_pulse(ep);
|
||||
|
||||
if (IS_ENABLED(CONFIG_PCI_MSI))
|
||||
exynos_pcie_msi_init(ep);
|
||||
exynos_pcie_writel(ep->elbi_base, val, PCIE_IRQ_EN_PULSE);
|
||||
exynos_pcie_writel(ep->elbi_base, 0, PCIE_IRQ_EN_LEVEL);
|
||||
exynos_pcie_writel(ep->elbi_base, 0, PCIE_IRQ_EN_SPECIAL);
|
||||
}
|
||||
|
||||
static u32 exynos_pcie_read_dbi(struct dw_pcie *pci, void __iomem *base,
|
||||
|
@ -370,13 +245,9 @@ static struct pci_ops exynos_pci_ops = {
|
|||
static int exynos_pcie_link_up(struct dw_pcie *pci)
|
||||
{
|
||||
struct exynos_pcie *ep = to_exynos_pcie(pci);
|
||||
u32 val;
|
||||
u32 val = exynos_pcie_readl(ep->elbi_base, PCIE_ELBI_RDLH_LINKUP);
|
||||
|
||||
val = exynos_pcie_readl(ep->mem_res->elbi_base, PCIE_ELBI_RDLH_LINKUP);
|
||||
if (val == PCIE_ELBI_LTSSM_ENABLE)
|
||||
return 1;
|
||||
|
||||
return 0;
|
||||
return (val & PCIE_ELBI_XMLH_LINKUP);
|
||||
}
|
||||
|
||||
static int exynos_pcie_host_init(struct pcie_port *pp)
|
||||
|
@ -386,8 +257,14 @@ static int exynos_pcie_host_init(struct pcie_port *pp)
|
|||
|
||||
pp->bridge->ops = &exynos_pci_ops;
|
||||
|
||||
exynos_pcie_establish_link(ep);
|
||||
exynos_pcie_enable_interrupts(ep);
|
||||
exynos_pcie_assert_core_reset(ep);
|
||||
|
||||
phy_reset(ep->phy);
|
||||
phy_power_on(ep->phy);
|
||||
phy_init(ep->phy);
|
||||
|
||||
exynos_pcie_deassert_core_reset(ep);
|
||||
exynos_pcie_enable_irq_pulse(ep);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -396,32 +273,27 @@ static const struct dw_pcie_host_ops exynos_pcie_host_ops = {
|
|||
.host_init = exynos_pcie_host_init,
|
||||
};
|
||||
|
||||
static int __init exynos_add_pcie_port(struct exynos_pcie *ep,
|
||||
static int exynos_add_pcie_port(struct exynos_pcie *ep,
|
||||
struct platform_device *pdev)
|
||||
{
|
||||
struct dw_pcie *pci = ep->pci;
|
||||
struct dw_pcie *pci = &ep->pci;
|
||||
struct pcie_port *pp = &pci->pp;
|
||||
struct device *dev = &pdev->dev;
|
||||
int ret;
|
||||
|
||||
pp->irq = platform_get_irq(pdev, 1);
|
||||
pp->irq = platform_get_irq(pdev, 0);
|
||||
if (pp->irq < 0)
|
||||
return pp->irq;
|
||||
|
||||
ret = devm_request_irq(dev, pp->irq, exynos_pcie_irq_handler,
|
||||
IRQF_SHARED, "exynos-pcie", ep);
|
||||
IRQF_SHARED, "exynos-pcie", ep);
|
||||
if (ret) {
|
||||
dev_err(dev, "failed to request irq\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (IS_ENABLED(CONFIG_PCI_MSI)) {
|
||||
pp->msi_irq = platform_get_irq(pdev, 0);
|
||||
if (pp->msi_irq < 0)
|
||||
return pp->msi_irq;
|
||||
}
|
||||
|
||||
pp->ops = &exynos_pcie_host_ops;
|
||||
pp->msi_irq = -ENODEV;
|
||||
|
||||
ret = dw_pcie_host_init(pp);
|
||||
if (ret) {
|
||||
|
@ -436,12 +308,12 @@ static const struct dw_pcie_ops dw_pcie_ops = {
|
|||
.read_dbi = exynos_pcie_read_dbi,
|
||||
.write_dbi = exynos_pcie_write_dbi,
|
||||
.link_up = exynos_pcie_link_up,
|
||||
.start_link = exynos_pcie_start_link,
|
||||
};
|
||||
|
||||
static int __init exynos_pcie_probe(struct platform_device *pdev)
|
||||
static int exynos_pcie_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct device *dev = &pdev->dev;
|
||||
struct dw_pcie *pci;
|
||||
struct exynos_pcie *ep;
|
||||
struct device_node *np = dev->of_node;
|
||||
int ret;
|
||||
|
@ -450,42 +322,44 @@ static int __init exynos_pcie_probe(struct platform_device *pdev)
|
|||
if (!ep)
|
||||
return -ENOMEM;
|
||||
|
||||
pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
|
||||
if (!pci)
|
||||
return -ENOMEM;
|
||||
|
||||
pci->dev = dev;
|
||||
pci->ops = &dw_pcie_ops;
|
||||
|
||||
ep->pci = pci;
|
||||
ep->ops = (const struct exynos_pcie_ops *)
|
||||
of_device_get_match_data(dev);
|
||||
|
||||
ep->reset_gpio = of_get_named_gpio(np, "reset-gpio", 0);
|
||||
ep->pci.dev = dev;
|
||||
ep->pci.ops = &dw_pcie_ops;
|
||||
|
||||
ep->phy = devm_of_phy_get(dev, np, NULL);
|
||||
if (IS_ERR(ep->phy)) {
|
||||
if (PTR_ERR(ep->phy) != -ENODEV)
|
||||
return PTR_ERR(ep->phy);
|
||||
if (IS_ERR(ep->phy))
|
||||
return PTR_ERR(ep->phy);
|
||||
|
||||
ep->phy = NULL;
|
||||
/* External Local Bus interface (ELBI) registers */
|
||||
ep->elbi_base = devm_platform_ioremap_resource_byname(pdev, "elbi");
|
||||
if (IS_ERR(ep->elbi_base))
|
||||
return PTR_ERR(ep->elbi_base);
|
||||
|
||||
ep->clk = devm_clk_get(dev, "pcie");
|
||||
if (IS_ERR(ep->clk)) {
|
||||
dev_err(dev, "Failed to get pcie rc clock\n");
|
||||
return PTR_ERR(ep->clk);
|
||||
}
|
||||
|
||||
if (ep->ops && ep->ops->get_mem_resources) {
|
||||
ret = ep->ops->get_mem_resources(pdev, ep);
|
||||
if (ret)
|
||||
return ret;
|
||||
ep->bus_clk = devm_clk_get(dev, "pcie_bus");
|
||||
if (IS_ERR(ep->bus_clk)) {
|
||||
dev_err(dev, "Failed to get pcie bus clock\n");
|
||||
return PTR_ERR(ep->bus_clk);
|
||||
}
|
||||
|
||||
if (ep->ops && ep->ops->get_clk_resources &&
|
||||
ep->ops->init_clk_resources) {
|
||||
ret = ep->ops->get_clk_resources(ep);
|
||||
if (ret)
|
||||
return ret;
|
||||
ret = ep->ops->init_clk_resources(ep);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
ep->supplies[0].supply = "vdd18";
|
||||
ep->supplies[1].supply = "vdd10";
|
||||
ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ep->supplies),
|
||||
ep->supplies);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = exynos_pcie_init_clk_resources(ep);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = regulator_bulk_enable(ARRAY_SIZE(ep->supplies), ep->supplies);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
platform_set_drvdata(pdev, ep);
|
||||
|
||||
|
@ -497,9 +371,9 @@ static int __init exynos_pcie_probe(struct platform_device *pdev)
|
|||
|
||||
fail_probe:
|
||||
phy_exit(ep->phy);
|
||||
exynos_pcie_deinit_clk_resources(ep);
|
||||
regulator_bulk_disable(ARRAY_SIZE(ep->supplies), ep->supplies);
|
||||
|
||||
if (ep->ops && ep->ops->deinit_clk_resources)
|
||||
ep->ops->deinit_clk_resources(ep);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -507,32 +381,65 @@ static int __exit exynos_pcie_remove(struct platform_device *pdev)
|
|||
{
|
||||
struct exynos_pcie *ep = platform_get_drvdata(pdev);
|
||||
|
||||
if (ep->ops && ep->ops->deinit_clk_resources)
|
||||
ep->ops->deinit_clk_resources(ep);
|
||||
dw_pcie_host_deinit(&ep->pci.pp);
|
||||
exynos_pcie_assert_core_reset(ep);
|
||||
phy_power_off(ep->phy);
|
||||
phy_exit(ep->phy);
|
||||
exynos_pcie_deinit_clk_resources(ep);
|
||||
regulator_bulk_disable(ARRAY_SIZE(ep->supplies), ep->supplies);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __maybe_unused exynos_pcie_suspend_noirq(struct device *dev)
|
||||
{
|
||||
struct exynos_pcie *ep = dev_get_drvdata(dev);
|
||||
|
||||
exynos_pcie_assert_core_reset(ep);
|
||||
phy_power_off(ep->phy);
|
||||
phy_exit(ep->phy);
|
||||
regulator_bulk_disable(ARRAY_SIZE(ep->supplies), ep->supplies);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __maybe_unused exynos_pcie_resume_noirq(struct device *dev)
|
||||
{
|
||||
struct exynos_pcie *ep = dev_get_drvdata(dev);
|
||||
struct dw_pcie *pci = &ep->pci;
|
||||
struct pcie_port *pp = &pci->pp;
|
||||
int ret;
|
||||
|
||||
ret = regulator_bulk_enable(ARRAY_SIZE(ep->supplies), ep->supplies);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* exynos_pcie_host_init controls ep->phy */
|
||||
exynos_pcie_host_init(pp);
|
||||
dw_pcie_setup_rc(pp);
|
||||
exynos_pcie_start_link(pci);
|
||||
return dw_pcie_wait_for_link(pci);
|
||||
}
|
||||
|
||||
static const struct dev_pm_ops exynos_pcie_pm_ops = {
|
||||
SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(exynos_pcie_suspend_noirq,
|
||||
exynos_pcie_resume_noirq)
|
||||
};
|
||||
|
||||
static const struct of_device_id exynos_pcie_of_match[] = {
|
||||
{
|
||||
.compatible = "samsung,exynos5440-pcie",
|
||||
.data = &exynos5440_pcie_ops
|
||||
},
|
||||
{},
|
||||
{ .compatible = "samsung,exynos5433-pcie", },
|
||||
{ },
|
||||
};
|
||||
|
||||
static struct platform_driver exynos_pcie_driver = {
|
||||
.probe = exynos_pcie_probe,
|
||||
.remove = __exit_p(exynos_pcie_remove),
|
||||
.driver = {
|
||||
.name = "exynos-pcie",
|
||||
.of_match_table = exynos_pcie_of_match,
|
||||
.pm = &exynos_pcie_pm_ops,
|
||||
},
|
||||
};
|
||||
|
||||
/* Exynos PCIe driver does not allow module unload */
|
||||
|
||||
static int __init exynos_pcie_init(void)
|
||||
{
|
||||
return platform_driver_probe(&exynos_pcie_driver, exynos_pcie_probe);
|
||||
}
|
||||
subsys_initcall(exynos_pcie_init);
|
||||
module_platform_driver(exynos_pcie_driver);
|
||||
MODULE_LICENSE("GPL v2");
|
||||
MODULE_DEVICE_TABLE(of, exynos_pcie_of_match);
|
||||
|
|
|
@ -745,9 +745,9 @@ static void imx6_pcie_ltssm_enable(struct device *dev)
|
|||
}
|
||||
}
|
||||
|
||||
static int imx6_pcie_establish_link(struct imx6_pcie *imx6_pcie)
|
||||
static int imx6_pcie_start_link(struct dw_pcie *pci)
|
||||
{
|
||||
struct dw_pcie *pci = imx6_pcie->pci;
|
||||
struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci);
|
||||
struct device *dev = pci->dev;
|
||||
u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
|
||||
u32 tmp;
|
||||
|
@ -834,9 +834,6 @@ static int imx6_pcie_host_init(struct pcie_port *pp)
|
|||
imx6_pcie_init_phy(imx6_pcie);
|
||||
imx6_pcie_deassert_core_reset(imx6_pcie);
|
||||
imx6_setup_phy_mpll(imx6_pcie);
|
||||
dw_pcie_setup_rc(pp);
|
||||
imx6_pcie_establish_link(imx6_pcie);
|
||||
dw_pcie_msi_init(pp);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -845,33 +842,8 @@ static const struct dw_pcie_host_ops imx6_pcie_host_ops = {
|
|||
.host_init = imx6_pcie_host_init,
|
||||
};
|
||||
|
||||
static int imx6_add_pcie_port(struct imx6_pcie *imx6_pcie,
|
||||
struct platform_device *pdev)
|
||||
{
|
||||
struct dw_pcie *pci = imx6_pcie->pci;
|
||||
struct pcie_port *pp = &pci->pp;
|
||||
struct device *dev = &pdev->dev;
|
||||
int ret;
|
||||
|
||||
if (IS_ENABLED(CONFIG_PCI_MSI)) {
|
||||
pp->msi_irq = platform_get_irq_byname(pdev, "msi");
|
||||
if (pp->msi_irq < 0)
|
||||
return pp->msi_irq;
|
||||
}
|
||||
|
||||
pp->ops = &imx6_pcie_host_ops;
|
||||
|
||||
ret = dw_pcie_host_init(pp);
|
||||
if (ret) {
|
||||
dev_err(dev, "failed to initialize host\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct dw_pcie_ops dw_pcie_ops = {
|
||||
/* No special ops needed, but pcie-designware still expects this struct */
|
||||
.start_link = imx6_pcie_start_link,
|
||||
};
|
||||
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
|
@ -980,7 +952,7 @@ static int imx6_pcie_resume_noirq(struct device *dev)
|
|||
imx6_pcie_deassert_core_reset(imx6_pcie);
|
||||
dw_pcie_setup_rc(pp);
|
||||
|
||||
ret = imx6_pcie_establish_link(imx6_pcie);
|
||||
ret = imx6_pcie_start_link(imx6_pcie->pci);
|
||||
if (ret < 0)
|
||||
dev_info(dev, "pcie link is down after resume.\n");
|
||||
|
||||
|
@ -1014,6 +986,7 @@ static int imx6_pcie_probe(struct platform_device *pdev)
|
|||
|
||||
pci->dev = dev;
|
||||
pci->ops = &dw_pcie_ops;
|
||||
pci->pp.ops = &imx6_pcie_host_ops;
|
||||
|
||||
imx6_pcie->pci = pci;
|
||||
imx6_pcie->drvdata = of_device_get_match_data(dev);
|
||||
|
@ -1163,7 +1136,7 @@ static int imx6_pcie_probe(struct platform_device *pdev)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = imx6_add_pcie_port(imx6_pcie, pdev);
|
||||
ret = dw_pcie_host_init(&pci->pp);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
|
|
|
@ -121,6 +121,7 @@ struct keystone_pcie {
|
|||
|
||||
int msi_host_irq;
|
||||
int num_lanes;
|
||||
u32 num_viewport;
|
||||
struct phy **phy;
|
||||
struct device_link **link;
|
||||
struct device_node *msi_intc_np;
|
||||
|
@ -272,14 +273,6 @@ static void ks_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie,
|
|||
ks_pcie_app_writel(ks_pcie, IRQ_EOI, offset);
|
||||
}
|
||||
|
||||
/*
|
||||
* Dummy function so that DW core doesn't configure MSI
|
||||
*/
|
||||
static int ks_pcie_am654_msi_host_init(struct pcie_port *pp)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void ks_pcie_enable_error_irq(struct keystone_pcie *ks_pcie)
|
||||
{
|
||||
ks_pcie_app_writel(ks_pcie, ERR_IRQ_ENABLE_SET, ERR_IRQ_ALL);
|
||||
|
@ -394,9 +387,9 @@ static void ks_pcie_clear_dbi_mode(struct keystone_pcie *ks_pcie)
|
|||
static void ks_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie)
|
||||
{
|
||||
u32 val;
|
||||
u32 num_viewport = ks_pcie->num_viewport;
|
||||
struct dw_pcie *pci = ks_pcie->pci;
|
||||
struct pcie_port *pp = &pci->pp;
|
||||
u32 num_viewport = pci->num_viewport;
|
||||
u64 start, end;
|
||||
struct resource *mem;
|
||||
int i;
|
||||
|
@ -519,14 +512,8 @@ static void ks_pcie_stop_link(struct dw_pcie *pci)
|
|||
static int ks_pcie_start_link(struct dw_pcie *pci)
|
||||
{
|
||||
struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
|
||||
struct device *dev = pci->dev;
|
||||
u32 val;
|
||||
|
||||
if (dw_pcie_link_up(pci)) {
|
||||
dev_dbg(dev, "link is already up\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Initiate Link Training */
|
||||
val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
|
||||
ks_pcie_app_writel(ks_pcie, CMD_STATUS, LTSSM_EN_VAL | val);
|
||||
|
@ -821,8 +808,6 @@ static int __init ks_pcie_host_init(struct pcie_port *pp)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
dw_pcie_setup_rc(pp);
|
||||
|
||||
ks_pcie_stop_link(pci);
|
||||
ks_pcie_setup_rc_app_regs(ks_pcie);
|
||||
writew(PCI_IO_RANGE_TYPE_32 | (PCI_IO_RANGE_TYPE_32 << 8),
|
||||
|
@ -841,9 +826,6 @@ static int __init ks_pcie_host_init(struct pcie_port *pp)
|
|||
"Asynchronous external abort");
|
||||
#endif
|
||||
|
||||
ks_pcie_start_link(pci);
|
||||
dw_pcie_wait_for_link(pci);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -854,7 +836,6 @@ static const struct dw_pcie_host_ops ks_pcie_host_ops = {
|
|||
|
||||
static const struct dw_pcie_host_ops ks_pcie_am654_host_ops = {
|
||||
.host_init = ks_pcie_host_init,
|
||||
.msi_host_init = ks_pcie_am654_msi_host_init,
|
||||
};
|
||||
|
||||
static irqreturn_t ks_pcie_err_irq_handler(int irq, void *priv)
|
||||
|
@ -864,23 +845,6 @@ static irqreturn_t ks_pcie_err_irq_handler(int irq, void *priv)
|
|||
return ks_pcie_handle_error_irq(ks_pcie);
|
||||
}
|
||||
|
||||
static int __init ks_pcie_add_pcie_port(struct keystone_pcie *ks_pcie,
|
||||
struct platform_device *pdev)
|
||||
{
|
||||
struct dw_pcie *pci = ks_pcie->pci;
|
||||
struct pcie_port *pp = &pci->pp;
|
||||
struct device *dev = &pdev->dev;
|
||||
int ret;
|
||||
|
||||
ret = dw_pcie_host_init(pp);
|
||||
if (ret) {
|
||||
dev_err(dev, "failed to initialize host\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void ks_pcie_am654_write_dbi2(struct dw_pcie *pci, void __iomem *base,
|
||||
u32 reg, size_t size, u32 val)
|
||||
{
|
||||
|
@ -977,33 +941,6 @@ static const struct dw_pcie_ep_ops ks_pcie_am654_ep_ops = {
|
|||
.get_features = &ks_pcie_am654_get_features,
|
||||
};
|
||||
|
||||
static int __init ks_pcie_add_pcie_ep(struct keystone_pcie *ks_pcie,
|
||||
struct platform_device *pdev)
|
||||
{
|
||||
int ret;
|
||||
struct dw_pcie_ep *ep;
|
||||
struct resource *res;
|
||||
struct device *dev = &pdev->dev;
|
||||
struct dw_pcie *pci = ks_pcie->pci;
|
||||
|
||||
ep = &pci->ep;
|
||||
|
||||
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
|
||||
if (!res)
|
||||
return -EINVAL;
|
||||
|
||||
ep->phys_base = res->start;
|
||||
ep->addr_size = resource_size(res);
|
||||
|
||||
ret = dw_pcie_ep_init(ep);
|
||||
if (ret) {
|
||||
dev_err(dev, "failed to initialize endpoint\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void ks_pcie_disable_phy(struct keystone_pcie *ks_pcie)
|
||||
{
|
||||
int num_lanes = ks_pcie->num_lanes;
|
||||
|
@ -1157,6 +1094,7 @@ static int __init ks_pcie_probe(struct platform_device *pdev)
|
|||
struct resource *res;
|
||||
unsigned int version;
|
||||
void __iomem *base;
|
||||
u32 num_viewport;
|
||||
struct phy **phy;
|
||||
u32 num_lanes;
|
||||
char name[10];
|
||||
|
@ -1288,6 +1226,12 @@ static int __init ks_pcie_probe(struct platform_device *pdev)
|
|||
goto err_get_sync;
|
||||
}
|
||||
|
||||
ret = of_property_read_u32(np, "num-viewport", &num_viewport);
|
||||
if (ret < 0) {
|
||||
dev_err(dev, "unable to read *num-viewport* property\n");
|
||||
goto err_get_sync;
|
||||
}
|
||||
|
||||
/*
|
||||
* "Power Sequencing and Reset Signal Timings" table in
|
||||
* PCI EXPRESS CARD ELECTROMECHANICAL SPECIFICATION, REV. 2.0
|
||||
|
@ -1301,8 +1245,9 @@ static int __init ks_pcie_probe(struct platform_device *pdev)
|
|||
gpiod_set_value_cansleep(gpiod, 1);
|
||||
}
|
||||
|
||||
ks_pcie->num_viewport = num_viewport;
|
||||
pci->pp.ops = host_ops;
|
||||
ret = ks_pcie_add_pcie_port(ks_pcie, pdev);
|
||||
ret = dw_pcie_host_init(&pci->pp);
|
||||
if (ret < 0)
|
||||
goto err_get_sync;
|
||||
break;
|
||||
|
@ -1313,7 +1258,7 @@ static int __init ks_pcie_probe(struct platform_device *pdev)
|
|||
}
|
||||
|
||||
pci->ep.ops = ep_ops;
|
||||
ret = ks_pcie_add_pcie_ep(ks_pcie, pdev);
|
||||
ret = dw_pcie_ep_init(&pci->ep);
|
||||
if (ret < 0)
|
||||
goto err_get_sync;
|
||||
break;
|
||||
|
|
|
@ -18,8 +18,6 @@
|
|||
|
||||
#include "pcie-designware.h"
|
||||
|
||||
#define PCIE_DBI2_OFFSET 0x1000 /* DBI2 base address*/
|
||||
|
||||
#define to_ls_pcie_ep(x) dev_get_drvdata((x)->dev)
|
||||
|
||||
struct ls_pcie_ep_drvdata {
|
||||
|
@ -124,34 +122,6 @@ static const struct of_device_id ls_pcie_ep_of_match[] = {
|
|||
{ },
|
||||
};
|
||||
|
||||
static int __init ls_add_pcie_ep(struct ls_pcie_ep *pcie,
|
||||
struct platform_device *pdev)
|
||||
{
|
||||
struct dw_pcie *pci = pcie->pci;
|
||||
struct device *dev = pci->dev;
|
||||
struct dw_pcie_ep *ep;
|
||||
struct resource *res;
|
||||
int ret;
|
||||
|
||||
ep = &pci->ep;
|
||||
ep->ops = pcie->drvdata->ops;
|
||||
|
||||
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
|
||||
if (!res)
|
||||
return -EINVAL;
|
||||
|
||||
ep->phys_base = res->start;
|
||||
ep->addr_size = resource_size(res);
|
||||
|
||||
ret = dw_pcie_ep_init(ep);
|
||||
if (ret) {
|
||||
dev_err(dev, "failed to initialize endpoint\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __init ls_pcie_ep_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct device *dev = &pdev->dev;
|
||||
|
@ -159,7 +129,6 @@ static int __init ls_pcie_ep_probe(struct platform_device *pdev)
|
|||
struct ls_pcie_ep *pcie;
|
||||
struct pci_epc_features *ls_epc;
|
||||
struct resource *dbi_base;
|
||||
int ret;
|
||||
|
||||
pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
|
||||
if (!pcie)
|
||||
|
@ -188,13 +157,11 @@ static int __init ls_pcie_ep_probe(struct platform_device *pdev)
|
|||
if (IS_ERR(pci->dbi_base))
|
||||
return PTR_ERR(pci->dbi_base);
|
||||
|
||||
pci->dbi_base2 = pci->dbi_base + PCIE_DBI2_OFFSET;
|
||||
pci->ep.ops = &ls_pcie_ep_ops;
|
||||
|
||||
platform_set_drvdata(pdev, pcie);
|
||||
|
||||
ret = ls_add_pcie_ep(pcie, pdev);
|
||||
|
||||
return ret;
|
||||
return dw_pcie_ep_init(&pci->ep);
|
||||
}
|
||||
|
||||
static struct platform_driver ls_pcie_ep_driver = {
|
||||
|
|
|
@ -83,14 +83,6 @@ static void ls_pcie_drop_msg_tlp(struct ls_pcie *pcie)
|
|||
iowrite32(val, pci->dbi_base + PCIE_STRFMR1);
|
||||
}
|
||||
|
||||
static void ls_pcie_disable_outbound_atus(struct ls_pcie *pcie)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < PCIE_IATU_NUM; i++)
|
||||
dw_pcie_disable_atu(pcie->pci, i, DW_PCIE_REGION_OUTBOUND);
|
||||
}
|
||||
|
||||
static int ls1021_pcie_link_up(struct dw_pcie *pci)
|
||||
{
|
||||
u32 state;
|
||||
|
@ -136,12 +128,6 @@ static int ls_pcie_host_init(struct pcie_port *pp)
|
|||
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
|
||||
struct ls_pcie *pcie = to_ls_pcie(pci);
|
||||
|
||||
/*
|
||||
* Disable outbound windows configured by the bootloader to avoid
|
||||
* one transaction hitting multiple outbound windows.
|
||||
* dw_pcie_setup_rc() will reconfigure the outbound windows.
|
||||
*/
|
||||
ls_pcie_disable_outbound_atus(pcie);
|
||||
ls_pcie_fix_error_response(pcie);
|
||||
|
||||
dw_pcie_dbi_ro_wr_en(pci);
|
||||
|
@ -150,8 +136,6 @@ static int ls_pcie_host_init(struct pcie_port *pp)
|
|||
|
||||
ls_pcie_drop_msg_tlp(pcie);
|
||||
|
||||
dw_pcie_setup_rc(pp);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -182,37 +166,12 @@ static int ls1021_pcie_host_init(struct pcie_port *pp)
|
|||
return ls_pcie_host_init(pp);
|
||||
}
|
||||
|
||||
static int ls_pcie_msi_host_init(struct pcie_port *pp)
|
||||
{
|
||||
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
|
||||
struct device *dev = pci->dev;
|
||||
struct device_node *np = dev->of_node;
|
||||
struct device_node *msi_node;
|
||||
|
||||
/*
|
||||
* The MSI domain is set by the generic of_msi_configure(). This
|
||||
* .msi_host_init() function keeps us from doing the default MSI
|
||||
* domain setup in dw_pcie_host_init() and also enforces the
|
||||
* requirement that "msi-parent" exists.
|
||||
*/
|
||||
msi_node = of_parse_phandle(np, "msi-parent", 0);
|
||||
if (!msi_node) {
|
||||
dev_err(dev, "failed to find msi-parent\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
of_node_put(msi_node);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct dw_pcie_host_ops ls1021_pcie_host_ops = {
|
||||
.host_init = ls1021_pcie_host_init,
|
||||
.msi_host_init = ls_pcie_msi_host_init,
|
||||
};
|
||||
|
||||
static const struct dw_pcie_host_ops ls_pcie_host_ops = {
|
||||
.host_init = ls_pcie_host_init,
|
||||
.msi_host_init = ls_pcie_msi_host_init,
|
||||
};
|
||||
|
||||
static const struct dw_pcie_ops dw_ls1021_pcie_ops = {
|
||||
|
@ -273,31 +232,12 @@ static const struct of_device_id ls_pcie_of_match[] = {
|
|||
{ },
|
||||
};
|
||||
|
||||
static int __init ls_add_pcie_port(struct ls_pcie *pcie)
|
||||
{
|
||||
struct dw_pcie *pci = pcie->pci;
|
||||
struct pcie_port *pp = &pci->pp;
|
||||
struct device *dev = pci->dev;
|
||||
int ret;
|
||||
|
||||
pp->ops = pcie->drvdata->ops;
|
||||
|
||||
ret = dw_pcie_host_init(pp);
|
||||
if (ret) {
|
||||
dev_err(dev, "failed to initialize host\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __init ls_pcie_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct device *dev = &pdev->dev;
|
||||
struct dw_pcie *pci;
|
||||
struct ls_pcie *pcie;
|
||||
struct resource *dbi_base;
|
||||
int ret;
|
||||
|
||||
pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
|
||||
if (!pcie)
|
||||
|
@ -311,6 +251,7 @@ static int __init ls_pcie_probe(struct platform_device *pdev)
|
|||
|
||||
pci->dev = dev;
|
||||
pci->ops = pcie->drvdata->dw_pcie_ops;
|
||||
pci->pp.ops = pcie->drvdata->ops;
|
||||
|
||||
pcie->pci = pci;
|
||||
|
||||
|
@ -326,11 +267,7 @@ static int __init ls_pcie_probe(struct platform_device *pdev)
|
|||
|
||||
platform_set_drvdata(pdev, pcie);
|
||||
|
||||
ret = ls_add_pcie_port(pcie);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
return dw_pcie_host_init(&pci->pp);
|
||||
}
|
||||
|
||||
static struct platform_driver ls_pcie_driver = {
|
||||
|
|
|
@ -231,7 +231,7 @@ static void meson_pcie_assert_reset(struct meson_pcie *mp)
|
|||
gpiod_set_value_cansleep(mp->reset_gpio, 0);
|
||||
}
|
||||
|
||||
static void meson_pcie_init_dw(struct meson_pcie *mp)
|
||||
static void meson_pcie_ltssm_enable(struct meson_pcie *mp)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
|
@ -289,20 +289,14 @@ static void meson_set_max_rd_req_size(struct meson_pcie *mp, int size)
|
|||
dw_pcie_writel_dbi(pci, offset + PCI_EXP_DEVCTL, val);
|
||||
}
|
||||
|
||||
static int meson_pcie_establish_link(struct meson_pcie *mp)
|
||||
static int meson_pcie_start_link(struct dw_pcie *pci)
|
||||
{
|
||||
struct dw_pcie *pci = &mp->pci;
|
||||
struct pcie_port *pp = &pci->pp;
|
||||
|
||||
meson_pcie_init_dw(mp);
|
||||
meson_set_max_payload(mp, MAX_PAYLOAD_SIZE);
|
||||
meson_set_max_rd_req_size(mp, MAX_READ_REQ_SIZE);
|
||||
|
||||
dw_pcie_setup_rc(pp);
|
||||
struct meson_pcie *mp = to_meson_pcie(pci);
|
||||
|
||||
meson_pcie_ltssm_enable(mp);
|
||||
meson_pcie_assert_reset(mp);
|
||||
|
||||
return dw_pcie_wait_for_link(pci);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int meson_pcie_rd_own_conf(struct pci_bus *bus, u32 devfn,
|
||||
|
@ -380,15 +374,11 @@ static int meson_pcie_host_init(struct pcie_port *pp)
|
|||
{
|
||||
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
|
||||
struct meson_pcie *mp = to_meson_pcie(pci);
|
||||
int ret;
|
||||
|
||||
pp->bridge->ops = &meson_pci_ops;
|
||||
|
||||
ret = meson_pcie_establish_link(mp);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
dw_pcie_msi_init(pp);
|
||||
meson_set_max_payload(mp, MAX_PAYLOAD_SIZE);
|
||||
meson_set_max_rd_req_size(mp, MAX_READ_REQ_SIZE);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -397,33 +387,9 @@ static const struct dw_pcie_host_ops meson_pcie_host_ops = {
|
|||
.host_init = meson_pcie_host_init,
|
||||
};
|
||||
|
||||
static int meson_add_pcie_port(struct meson_pcie *mp,
|
||||
struct platform_device *pdev)
|
||||
{
|
||||
struct dw_pcie *pci = &mp->pci;
|
||||
struct pcie_port *pp = &pci->pp;
|
||||
struct device *dev = &pdev->dev;
|
||||
int ret;
|
||||
|
||||
if (IS_ENABLED(CONFIG_PCI_MSI)) {
|
||||
pp->msi_irq = platform_get_irq(pdev, 0);
|
||||
if (pp->msi_irq < 0)
|
||||
return pp->msi_irq;
|
||||
}
|
||||
|
||||
pp->ops = &meson_pcie_host_ops;
|
||||
|
||||
ret = dw_pcie_host_init(pp);
|
||||
if (ret) {
|
||||
dev_err(dev, "failed to initialize host\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct dw_pcie_ops dw_pcie_ops = {
|
||||
.link_up = meson_pcie_link_up,
|
||||
.start_link = meson_pcie_start_link,
|
||||
};
|
||||
|
||||
static int meson_pcie_probe(struct platform_device *pdev)
|
||||
|
@ -440,6 +406,7 @@ static int meson_pcie_probe(struct platform_device *pdev)
|
|||
pci = &mp->pci;
|
||||
pci->dev = dev;
|
||||
pci->ops = &dw_pcie_ops;
|
||||
pci->pp.ops = &meson_pcie_host_ops;
|
||||
pci->num_lanes = 1;
|
||||
|
||||
mp->phy = devm_phy_get(dev, "pcie");
|
||||
|
@ -486,7 +453,7 @@ static int meson_pcie_probe(struct platform_device *pdev)
|
|||
|
||||
platform_set_drvdata(pdev, mp);
|
||||
|
||||
ret = meson_add_pcie_port(mp, pdev);
|
||||
ret = dw_pcie_host_init(&pci->pp);
|
||||
if (ret < 0) {
|
||||
dev_err(dev, "Add PCIe port failed, %d\n", ret);
|
||||
goto err_phy;
|
||||
|
|
|
@ -314,23 +314,6 @@ static const struct dw_pcie_host_ops al_pcie_host_ops = {
|
|||
.host_init = al_pcie_host_init,
|
||||
};
|
||||
|
||||
static int al_add_pcie_port(struct pcie_port *pp,
|
||||
struct platform_device *pdev)
|
||||
{
|
||||
struct device *dev = &pdev->dev;
|
||||
int ret;
|
||||
|
||||
pp->ops = &al_pcie_host_ops;
|
||||
|
||||
ret = dw_pcie_host_init(pp);
|
||||
if (ret) {
|
||||
dev_err(dev, "failed to initialize host\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct dw_pcie_ops dw_pcie_ops = {
|
||||
};
|
||||
|
||||
|
@ -339,7 +322,6 @@ static int al_pcie_probe(struct platform_device *pdev)
|
|||
struct device *dev = &pdev->dev;
|
||||
struct resource *controller_res;
|
||||
struct resource *ecam_res;
|
||||
struct resource *dbi_res;
|
||||
struct al_pcie *al_pcie;
|
||||
struct dw_pcie *pci;
|
||||
|
||||
|
@ -353,15 +335,11 @@ static int al_pcie_probe(struct platform_device *pdev)
|
|||
|
||||
pci->dev = dev;
|
||||
pci->ops = &dw_pcie_ops;
|
||||
pci->pp.ops = &al_pcie_host_ops;
|
||||
|
||||
al_pcie->pci = pci;
|
||||
al_pcie->dev = dev;
|
||||
|
||||
dbi_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
|
||||
pci->dbi_base = devm_pci_remap_cfg_resource(dev, dbi_res);
|
||||
if (IS_ERR(pci->dbi_base))
|
||||
return PTR_ERR(pci->dbi_base);
|
||||
|
||||
ecam_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
|
||||
if (!ecam_res) {
|
||||
dev_err(dev, "couldn't find 'config' reg in DT\n");
|
||||
|
@ -378,12 +356,11 @@ static int al_pcie_probe(struct platform_device *pdev)
|
|||
return PTR_ERR(al_pcie->controller_base);
|
||||
}
|
||||
|
||||
dev_dbg(dev, "From DT: dbi_base: %pR, controller_base: %pR\n",
|
||||
dbi_res, controller_res);
|
||||
dev_dbg(dev, "From DT: controller_base: %pR\n", controller_res);
|
||||
|
||||
platform_set_drvdata(pdev, al_pcie);
|
||||
|
||||
return al_add_pcie_port(&pci->pp, pdev);
|
||||
return dw_pcie_host_init(&pci->pp);
|
||||
}
|
||||
|
||||
static const struct of_device_id al_pcie_of_match[] = {
|
||||
|
|
|
@ -154,11 +154,23 @@ static int armada8k_pcie_link_up(struct dw_pcie *pci)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void armada8k_pcie_establish_link(struct armada8k_pcie *pcie)
|
||||
static int armada8k_pcie_start_link(struct dw_pcie *pci)
|
||||
{
|
||||
struct dw_pcie *pci = pcie->pci;
|
||||
u32 reg;
|
||||
|
||||
/* Start LTSSM */
|
||||
reg = dw_pcie_readl_dbi(pci, PCIE_GLOBAL_CONTROL_REG);
|
||||
reg |= PCIE_APP_LTSSM_EN;
|
||||
dw_pcie_writel_dbi(pci, PCIE_GLOBAL_CONTROL_REG, reg);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int armada8k_pcie_host_init(struct pcie_port *pp)
|
||||
{
|
||||
u32 reg;
|
||||
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
|
||||
|
||||
if (!dw_pcie_link_up(pci)) {
|
||||
/* Disable LTSSM state machine to enable configuration */
|
||||
reg = dw_pcie_readl_dbi(pci, PCIE_GLOBAL_CONTROL_REG);
|
||||
|
@ -193,26 +205,6 @@ static void armada8k_pcie_establish_link(struct armada8k_pcie *pcie)
|
|||
PCIE_INT_C_ASSERT_MASK | PCIE_INT_D_ASSERT_MASK;
|
||||
dw_pcie_writel_dbi(pci, PCIE_GLOBAL_INT_MASK1_REG, reg);
|
||||
|
||||
if (!dw_pcie_link_up(pci)) {
|
||||
/* Configuration done. Start LTSSM */
|
||||
reg = dw_pcie_readl_dbi(pci, PCIE_GLOBAL_CONTROL_REG);
|
||||
reg |= PCIE_APP_LTSSM_EN;
|
||||
dw_pcie_writel_dbi(pci, PCIE_GLOBAL_CONTROL_REG, reg);
|
||||
}
|
||||
|
||||
/* Wait until the link becomes active again */
|
||||
if (dw_pcie_wait_for_link(pci))
|
||||
dev_err(pci->dev, "Link not up after reconfiguration\n");
|
||||
}
|
||||
|
||||
static int armada8k_pcie_host_init(struct pcie_port *pp)
|
||||
{
|
||||
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
|
||||
struct armada8k_pcie *pcie = to_armada8k_pcie(pci);
|
||||
|
||||
dw_pcie_setup_rc(pp);
|
||||
armada8k_pcie_establish_link(pcie);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -269,6 +261,7 @@ static int armada8k_add_pcie_port(struct armada8k_pcie *pcie,
|
|||
|
||||
static const struct dw_pcie_ops dw_pcie_ops = {
|
||||
.link_up = armada8k_pcie_link_up,
|
||||
.start_link = armada8k_pcie_start_link,
|
||||
};
|
||||
|
||||
static int armada8k_pcie_probe(struct platform_device *pdev)
|
||||
|
|
|
@ -328,10 +328,6 @@ static int artpec6_pcie_host_init(struct pcie_port *pp)
|
|||
artpec6_pcie_init_phy(artpec6_pcie);
|
||||
artpec6_pcie_deassert_core_reset(artpec6_pcie);
|
||||
artpec6_pcie_wait_for_phy(artpec6_pcie);
|
||||
dw_pcie_setup_rc(pp);
|
||||
artpec6_pcie_establish_link(pci);
|
||||
dw_pcie_wait_for_link(pci);
|
||||
dw_pcie_msi_init(pp);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -340,31 +336,6 @@ static const struct dw_pcie_host_ops artpec6_pcie_host_ops = {
|
|||
.host_init = artpec6_pcie_host_init,
|
||||
};
|
||||
|
||||
static int artpec6_add_pcie_port(struct artpec6_pcie *artpec6_pcie,
|
||||
struct platform_device *pdev)
|
||||
{
|
||||
struct dw_pcie *pci = artpec6_pcie->pci;
|
||||
struct pcie_port *pp = &pci->pp;
|
||||
struct device *dev = pci->dev;
|
||||
int ret;
|
||||
|
||||
if (IS_ENABLED(CONFIG_PCI_MSI)) {
|
||||
pp->msi_irq = platform_get_irq_byname(pdev, "msi");
|
||||
if (pp->msi_irq < 0)
|
||||
return pp->msi_irq;
|
||||
}
|
||||
|
||||
pp->ops = &artpec6_pcie_host_ops;
|
||||
|
||||
ret = dw_pcie_host_init(pp);
|
||||
if (ret) {
|
||||
dev_err(dev, "failed to initialize host\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void artpec6_pcie_ep_init(struct dw_pcie_ep *ep)
|
||||
{
|
||||
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
|
||||
|
@ -403,38 +374,6 @@ static const struct dw_pcie_ep_ops pcie_ep_ops = {
|
|||
.raise_irq = artpec6_pcie_raise_irq,
|
||||
};
|
||||
|
||||
static int artpec6_add_pcie_ep(struct artpec6_pcie *artpec6_pcie,
|
||||
struct platform_device *pdev)
|
||||
{
|
||||
int ret;
|
||||
struct dw_pcie_ep *ep;
|
||||
struct resource *res;
|
||||
struct device *dev = &pdev->dev;
|
||||
struct dw_pcie *pci = artpec6_pcie->pci;
|
||||
|
||||
ep = &pci->ep;
|
||||
ep->ops = &pcie_ep_ops;
|
||||
|
||||
pci->dbi_base2 = devm_platform_ioremap_resource_byname(pdev, "dbi2");
|
||||
if (IS_ERR(pci->dbi_base2))
|
||||
return PTR_ERR(pci->dbi_base2);
|
||||
|
||||
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
|
||||
if (!res)
|
||||
return -EINVAL;
|
||||
|
||||
ep->phys_base = res->start;
|
||||
ep->addr_size = resource_size(res);
|
||||
|
||||
ret = dw_pcie_ep_init(ep);
|
||||
if (ret) {
|
||||
dev_err(dev, "failed to initialize endpoint\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int artpec6_pcie_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct device *dev = &pdev->dev;
|
||||
|
@ -469,10 +408,6 @@ static int artpec6_pcie_probe(struct platform_device *pdev)
|
|||
artpec6_pcie->variant = variant;
|
||||
artpec6_pcie->mode = mode;
|
||||
|
||||
pci->dbi_base = devm_platform_ioremap_resource_byname(pdev, "dbi");
|
||||
if (IS_ERR(pci->dbi_base))
|
||||
return PTR_ERR(pci->dbi_base);
|
||||
|
||||
artpec6_pcie->phy_base =
|
||||
devm_platform_ioremap_resource_byname(pdev, "phy");
|
||||
if (IS_ERR(artpec6_pcie->phy_base))
|
||||
|
@ -491,7 +426,9 @@ static int artpec6_pcie_probe(struct platform_device *pdev)
|
|||
if (!IS_ENABLED(CONFIG_PCIE_ARTPEC6_HOST))
|
||||
return -ENODEV;
|
||||
|
||||
ret = artpec6_add_pcie_port(artpec6_pcie, pdev);
|
||||
pci->pp.ops = &artpec6_pcie_host_ops;
|
||||
|
||||
ret = dw_pcie_host_init(&pci->pp);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
break;
|
||||
|
@ -504,9 +441,10 @@ static int artpec6_pcie_probe(struct platform_device *pdev)
|
|||
val = artpec6_pcie_readl(artpec6_pcie, PCIECFG);
|
||||
val &= ~PCIECFG_DEVICE_TYPE_MASK;
|
||||
artpec6_pcie_writel(artpec6_pcie, PCIECFG, val);
|
||||
ret = artpec6_add_pcie_ep(artpec6_pcie, pdev);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
pci->ep.ops = &pcie_ep_ops;
|
||||
|
||||
return dw_pcie_ep_init(&pci->ep);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
|
|
|
@ -7,6 +7,7 @@
|
|||
*/
|
||||
|
||||
#include <linux/of.h>
|
||||
#include <linux/platform_device.h>
|
||||
|
||||
#include "pcie-designware.h"
|
||||
#include <linux/pci-epc.h>
|
||||
|
@ -160,8 +161,8 @@ static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, u8 func_no,
|
|||
u32 free_win;
|
||||
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
|
||||
|
||||
free_win = find_first_zero_bit(ep->ib_window_map, ep->num_ib_windows);
|
||||
if (free_win >= ep->num_ib_windows) {
|
||||
free_win = find_first_zero_bit(ep->ib_window_map, pci->num_ib_windows);
|
||||
if (free_win >= pci->num_ib_windows) {
|
||||
dev_err(pci->dev, "No free inbound window\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -186,8 +187,8 @@ static int dw_pcie_ep_outbound_atu(struct dw_pcie_ep *ep, u8 func_no,
|
|||
u32 free_win;
|
||||
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
|
||||
|
||||
free_win = find_first_zero_bit(ep->ob_window_map, ep->num_ob_windows);
|
||||
if (free_win >= ep->num_ob_windows) {
|
||||
free_win = find_first_zero_bit(ep->ob_window_map, pci->num_ob_windows);
|
||||
if (free_win >= pci->num_ob_windows) {
|
||||
dev_err(pci->dev, "No free outbound window\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -263,8 +264,9 @@ static int dw_pcie_find_index(struct dw_pcie_ep *ep, phys_addr_t addr,
|
|||
u32 *atu_index)
|
||||
{
|
||||
u32 index;
|
||||
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
|
||||
|
||||
for (index = 0; index < ep->num_ob_windows; index++) {
|
||||
for (index = 0; index < pci->num_ob_windows; index++) {
|
||||
if (ep->outbound_addr[index] != addr)
|
||||
continue;
|
||||
*atu_index = index;
|
||||
|
@ -676,55 +678,57 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
|
|||
int ret;
|
||||
void *addr;
|
||||
u8 func_no;
|
||||
struct resource *res;
|
||||
struct pci_epc *epc;
|
||||
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
|
||||
struct device *dev = pci->dev;
|
||||
struct platform_device *pdev = to_platform_device(dev);
|
||||
struct device_node *np = dev->of_node;
|
||||
const struct pci_epc_features *epc_features;
|
||||
struct dw_pcie_ep_func *ep_func;
|
||||
|
||||
INIT_LIST_HEAD(&ep->func_list);
|
||||
|
||||
if (!pci->dbi_base || !pci->dbi_base2) {
|
||||
dev_err(dev, "dbi_base/dbi_base2 is not populated\n");
|
||||
return -EINVAL;
|
||||
if (!pci->dbi_base) {
|
||||
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
|
||||
pci->dbi_base = devm_pci_remap_cfg_resource(dev, res);
|
||||
if (IS_ERR(pci->dbi_base))
|
||||
return PTR_ERR(pci->dbi_base);
|
||||
}
|
||||
|
||||
ret = of_property_read_u32(np, "num-ib-windows", &ep->num_ib_windows);
|
||||
if (ret < 0) {
|
||||
dev_err(dev, "Unable to read *num-ib-windows* property\n");
|
||||
return ret;
|
||||
}
|
||||
if (ep->num_ib_windows > MAX_IATU_IN) {
|
||||
dev_err(dev, "Invalid *num-ib-windows*\n");
|
||||
return -EINVAL;
|
||||
if (!pci->dbi_base2) {
|
||||
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi2");
|
||||
if (!res)
|
||||
pci->dbi_base2 = pci->dbi_base + SZ_4K;
|
||||
else {
|
||||
pci->dbi_base2 = devm_pci_remap_cfg_resource(dev, res);
|
||||
if (IS_ERR(pci->dbi_base2))
|
||||
return PTR_ERR(pci->dbi_base2);
|
||||
}
|
||||
}
|
||||
|
||||
ret = of_property_read_u32(np, "num-ob-windows", &ep->num_ob_windows);
|
||||
if (ret < 0) {
|
||||
dev_err(dev, "Unable to read *num-ob-windows* property\n");
|
||||
return ret;
|
||||
}
|
||||
if (ep->num_ob_windows > MAX_IATU_OUT) {
|
||||
dev_err(dev, "Invalid *num-ob-windows*\n");
|
||||
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
|
||||
if (!res)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ep->phys_base = res->start;
|
||||
ep->addr_size = resource_size(res);
|
||||
|
||||
ep->ib_window_map = devm_kcalloc(dev,
|
||||
BITS_TO_LONGS(ep->num_ib_windows),
|
||||
BITS_TO_LONGS(pci->num_ib_windows),
|
||||
sizeof(long),
|
||||
GFP_KERNEL);
|
||||
if (!ep->ib_window_map)
|
||||
return -ENOMEM;
|
||||
|
||||
ep->ob_window_map = devm_kcalloc(dev,
|
||||
BITS_TO_LONGS(ep->num_ob_windows),
|
||||
BITS_TO_LONGS(pci->num_ob_windows),
|
||||
sizeof(long),
|
||||
GFP_KERNEL);
|
||||
if (!ep->ob_window_map)
|
||||
return -ENOMEM;
|
||||
|
||||
addr = devm_kcalloc(dev, ep->num_ob_windows, sizeof(phys_addr_t),
|
||||
addr = devm_kcalloc(dev, pci->num_ob_windows, sizeof(phys_addr_t),
|
||||
GFP_KERNEL);
|
||||
if (!addr)
|
||||
return -ENOMEM;
|
||||
|
|
|
@ -256,7 +256,7 @@ int dw_pcie_allocate_domains(struct pcie_port *pp)
|
|||
return 0;
|
||||
}
|
||||
|
||||
void dw_pcie_free_msi(struct pcie_port *pp)
|
||||
static void dw_pcie_free_msi(struct pcie_port *pp)
|
||||
{
|
||||
if (pp->msi_irq) {
|
||||
irq_set_chained_handler(pp->msi_irq, NULL);
|
||||
|
@ -275,19 +275,18 @@ void dw_pcie_free_msi(struct pcie_port *pp)
|
|||
}
|
||||
}
|
||||
|
||||
void dw_pcie_msi_init(struct pcie_port *pp)
|
||||
static void dw_pcie_msi_init(struct pcie_port *pp)
|
||||
{
|
||||
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
|
||||
u64 msi_target = (u64)pp->msi_data;
|
||||
|
||||
if (!IS_ENABLED(CONFIG_PCI_MSI))
|
||||
if (!pci_msi_enabled() || !pp->has_msi_ctrl)
|
||||
return;
|
||||
|
||||
/* Program the msi_data */
|
||||
dw_pcie_writel_dbi(pci, PCIE_MSI_ADDR_LO, lower_32_bits(msi_target));
|
||||
dw_pcie_writel_dbi(pci, PCIE_MSI_ADDR_HI, upper_32_bits(msi_target));
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dw_pcie_msi_init);
|
||||
|
||||
int dw_pcie_host_init(struct pcie_port *pp)
|
||||
{
|
||||
|
@ -310,6 +309,13 @@ int dw_pcie_host_init(struct pcie_port *pp)
|
|||
dev_err(dev, "Missing *config* reg space\n");
|
||||
}
|
||||
|
||||
if (!pci->dbi_base) {
|
||||
struct resource *dbi_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
|
||||
pci->dbi_base = devm_pci_remap_cfg_resource(dev, dbi_res);
|
||||
if (IS_ERR(pci->dbi_base))
|
||||
return PTR_ERR(pci->dbi_base);
|
||||
}
|
||||
|
||||
bridge = devm_pci_alloc_host_bridge(dev, 0);
|
||||
if (!bridge)
|
||||
return -ENOMEM;
|
||||
|
@ -350,44 +356,54 @@ int dw_pcie_host_init(struct pcie_port *pp)
|
|||
}
|
||||
}
|
||||
|
||||
ret = of_property_read_u32(np, "num-viewport", &pci->num_viewport);
|
||||
if (ret)
|
||||
pci->num_viewport = 2;
|
||||
|
||||
if (pci->link_gen < 1)
|
||||
pci->link_gen = of_pci_get_max_link_speed(np);
|
||||
|
||||
if (pci_msi_enabled()) {
|
||||
/*
|
||||
* If a specific SoC driver needs to change the
|
||||
* default number of vectors, it needs to implement
|
||||
* the set_num_vectors callback.
|
||||
*/
|
||||
if (!pp->ops->set_num_vectors) {
|
||||
pp->num_vectors = MSI_DEF_NUM_VECTORS;
|
||||
} else {
|
||||
pp->ops->set_num_vectors(pp);
|
||||
pp->has_msi_ctrl = !(pp->ops->msi_host_init ||
|
||||
of_property_read_bool(np, "msi-parent") ||
|
||||
of_property_read_bool(np, "msi-map"));
|
||||
|
||||
if (pp->num_vectors > MAX_MSI_IRQS ||
|
||||
pp->num_vectors == 0) {
|
||||
dev_err(dev,
|
||||
"Invalid number of vectors\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (!pp->num_vectors) {
|
||||
pp->num_vectors = MSI_DEF_NUM_VECTORS;
|
||||
} else if (pp->num_vectors > MAX_MSI_IRQS) {
|
||||
dev_err(dev, "Invalid number of vectors\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!pp->ops->msi_host_init) {
|
||||
if (pp->ops->msi_host_init) {
|
||||
ret = pp->ops->msi_host_init(pp);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
} else if (pp->has_msi_ctrl) {
|
||||
if (!pp->msi_irq) {
|
||||
pp->msi_irq = platform_get_irq_byname_optional(pdev, "msi");
|
||||
if (pp->msi_irq < 0) {
|
||||
pp->msi_irq = platform_get_irq(pdev, 0);
|
||||
if (pp->msi_irq < 0)
|
||||
return pp->msi_irq;
|
||||
}
|
||||
}
|
||||
|
||||
pp->msi_irq_chip = &dw_pci_msi_bottom_irq_chip;
|
||||
|
||||
ret = dw_pcie_allocate_domains(pp);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (pp->msi_irq)
|
||||
if (pp->msi_irq > 0)
|
||||
irq_set_chained_handler_and_data(pp->msi_irq,
|
||||
dw_chained_msi_isr,
|
||||
pp);
|
||||
|
||||
ret = dma_set_mask(pci->dev, DMA_BIT_MASK(32));
|
||||
if (!ret) {
|
||||
dev_warn(pci->dev,
|
||||
"Failed to set DMA mask to 32-bit. "
|
||||
"Devices with only 32-bit MSI support"
|
||||
" may not work properly\n");
|
||||
}
|
||||
|
||||
pp->msi_data = dma_map_single_attrs(pci->dev, &pp->msi_msg,
|
||||
sizeof(pp->msi_msg),
|
||||
DMA_FROM_DEVICE,
|
||||
|
@ -397,10 +413,6 @@ int dw_pcie_host_init(struct pcie_port *pp)
|
|||
pp->msi_data = 0;
|
||||
goto err_free_msi;
|
||||
}
|
||||
} else {
|
||||
ret = pp->ops->msi_host_init(pp);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -414,6 +426,18 @@ int dw_pcie_host_init(struct pcie_port *pp)
|
|||
goto err_free_msi;
|
||||
}
|
||||
|
||||
dw_pcie_setup_rc(pp);
|
||||
dw_pcie_msi_init(pp);
|
||||
|
||||
if (!dw_pcie_link_up(pci) && pci->ops->start_link) {
|
||||
ret = pci->ops->start_link(pci);
|
||||
if (ret)
|
||||
goto err_free_msi;
|
||||
}
|
||||
|
||||
/* Ignore errors, the link may come up later */
|
||||
dw_pcie_wait_for_link(pci);
|
||||
|
||||
bridge->sysdata = pp;
|
||||
|
||||
ret = pci_host_probe(bridge);
|
||||
|
@ -421,7 +445,7 @@ int dw_pcie_host_init(struct pcie_port *pp)
|
|||
return 0;
|
||||
|
||||
err_free_msi:
|
||||
if (pci_msi_enabled() && !pp->ops->msi_host_init)
|
||||
if (pp->has_msi_ctrl)
|
||||
dw_pcie_free_msi(pp);
|
||||
return ret;
|
||||
}
|
||||
|
@ -431,7 +455,7 @@ void dw_pcie_host_deinit(struct pcie_port *pp)
|
|||
{
|
||||
pci_stop_root_bus(pp->bridge->bus);
|
||||
pci_remove_root_bus(pp->bridge->bus);
|
||||
if (pci_msi_enabled() && !pp->ops->msi_host_init)
|
||||
if (pp->has_msi_ctrl)
|
||||
dw_pcie_free_msi(pp);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dw_pcie_host_deinit);
|
||||
|
@ -464,9 +488,7 @@ static void __iomem *dw_pcie_other_conf_map_bus(struct pci_bus *bus,
|
|||
type = PCIE_ATU_TYPE_CFG1;
|
||||
|
||||
|
||||
dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1,
|
||||
type, pp->cfg0_base,
|
||||
busdev, pp->cfg0_size);
|
||||
dw_pcie_prog_outbound_atu(pci, 0, type, pp->cfg0_base, busdev, pp->cfg0_size);
|
||||
|
||||
return pp->va_cfg0_base + where;
|
||||
}
|
||||
|
@ -480,9 +502,8 @@ static int dw_pcie_rd_other_conf(struct pci_bus *bus, unsigned int devfn,
|
|||
|
||||
ret = pci_generic_config_read(bus, devfn, where, size, val);
|
||||
|
||||
if (!ret && pci->num_viewport <= 2)
|
||||
dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1,
|
||||
PCIE_ATU_TYPE_IO, pp->io_base,
|
||||
if (!ret && pci->io_cfg_atu_shared)
|
||||
dw_pcie_prog_outbound_atu(pci, 0, PCIE_ATU_TYPE_IO, pp->io_base,
|
||||
pp->io_bus_addr, pp->io_size);
|
||||
|
||||
return ret;
|
||||
|
@ -497,9 +518,8 @@ static int dw_pcie_wr_other_conf(struct pci_bus *bus, unsigned int devfn,
|
|||
|
||||
ret = pci_generic_config_write(bus, devfn, where, size, val);
|
||||
|
||||
if (!ret && pci->num_viewport <= 2)
|
||||
dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1,
|
||||
PCIE_ATU_TYPE_IO, pp->io_base,
|
||||
if (!ret && pci->io_cfg_atu_shared)
|
||||
dw_pcie_prog_outbound_atu(pci, 0, PCIE_ATU_TYPE_IO, pp->io_base,
|
||||
pp->io_bus_addr, pp->io_size);
|
||||
|
||||
return ret;
|
||||
|
@ -531,6 +551,7 @@ static struct pci_ops dw_pcie_ops = {
|
|||
|
||||
void dw_pcie_setup_rc(struct pcie_port *pp)
|
||||
{
|
||||
int i;
|
||||
u32 val, ctrl, num_ctrls;
|
||||
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
|
||||
|
||||
|
@ -542,7 +563,7 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
|
|||
|
||||
dw_pcie_setup(pci);
|
||||
|
||||
if (pci_msi_enabled() && !pp->ops->msi_host_init) {
|
||||
if (pp->has_msi_ctrl) {
|
||||
num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
|
||||
|
||||
/* Initialize IRQ Status array */
|
||||
|
@ -580,27 +601,45 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
|
|||
PCI_COMMAND_MASTER | PCI_COMMAND_SERR;
|
||||
dw_pcie_writel_dbi(pci, PCI_COMMAND, val);
|
||||
|
||||
/* Ensure all outbound windows are disabled so there are multiple matches */
|
||||
for (i = 0; i < pci->num_ob_windows; i++)
|
||||
dw_pcie_disable_atu(pci, i, DW_PCIE_REGION_OUTBOUND);
|
||||
|
||||
/*
|
||||
* If the platform provides its own child bus config accesses, it means
|
||||
* the platform uses its own address translation component rather than
|
||||
* ATU, so we should not program the ATU here.
|
||||
*/
|
||||
if (pp->bridge->child_ops == &dw_child_pcie_ops) {
|
||||
struct resource_entry *tmp, *entry = NULL;
|
||||
int atu_idx = 0;
|
||||
struct resource_entry *entry;
|
||||
|
||||
/* Get last memory resource entry */
|
||||
resource_list_for_each_entry(tmp, &pp->bridge->windows)
|
||||
if (resource_type(tmp->res) == IORESOURCE_MEM)
|
||||
entry = tmp;
|
||||
resource_list_for_each_entry(entry, &pp->bridge->windows) {
|
||||
if (resource_type(entry->res) != IORESOURCE_MEM)
|
||||
continue;
|
||||
|
||||
dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX0,
|
||||
PCIE_ATU_TYPE_MEM, entry->res->start,
|
||||
entry->res->start - entry->offset,
|
||||
resource_size(entry->res));
|
||||
if (pci->num_viewport > 2)
|
||||
dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX2,
|
||||
PCIE_ATU_TYPE_IO, pp->io_base,
|
||||
pp->io_bus_addr, pp->io_size);
|
||||
if (pci->num_ob_windows <= ++atu_idx)
|
||||
break;
|
||||
|
||||
dw_pcie_prog_outbound_atu(pci, atu_idx,
|
||||
PCIE_ATU_TYPE_MEM, entry->res->start,
|
||||
entry->res->start - entry->offset,
|
||||
resource_size(entry->res));
|
||||
}
|
||||
|
||||
if (pp->io_size) {
|
||||
if (pci->num_ob_windows > ++atu_idx)
|
||||
dw_pcie_prog_outbound_atu(pci, atu_idx,
|
||||
PCIE_ATU_TYPE_IO, pp->io_base,
|
||||
pp->io_bus_addr, pp->io_size);
|
||||
else
|
||||
pci->io_cfg_atu_shared = true;
|
||||
}
|
||||
|
||||
if (pci->num_ob_windows <= atu_idx)
|
||||
dev_warn(pci->dev, "Resources exceed number of ATU entries (%d)",
|
||||
pci->num_ob_windows);
|
||||
}
|
||||
|
||||
dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0);
|
||||
|
|
|
@ -33,25 +33,7 @@ struct dw_plat_pcie_of_data {
|
|||
|
||||
static const struct of_device_id dw_plat_pcie_of_match[];
|
||||
|
||||
static int dw_plat_pcie_host_init(struct pcie_port *pp)
|
||||
{
|
||||
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
|
||||
|
||||
dw_pcie_setup_rc(pp);
|
||||
dw_pcie_wait_for_link(pci);
|
||||
dw_pcie_msi_init(pp);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void dw_plat_set_num_vectors(struct pcie_port *pp)
|
||||
{
|
||||
pp->num_vectors = MAX_MSI_IRQS;
|
||||
}
|
||||
|
||||
static const struct dw_pcie_host_ops dw_plat_pcie_host_ops = {
|
||||
.host_init = dw_plat_pcie_host_init,
|
||||
.set_num_vectors = dw_plat_set_num_vectors,
|
||||
};
|
||||
|
||||
static int dw_plat_pcie_establish_link(struct dw_pcie *pci)
|
||||
|
@ -122,12 +104,7 @@ static int dw_plat_add_pcie_port(struct dw_plat_pcie *dw_plat_pcie,
|
|||
if (pp->irq < 0)
|
||||
return pp->irq;
|
||||
|
||||
if (IS_ENABLED(CONFIG_PCI_MSI)) {
|
||||
pp->msi_irq = platform_get_irq(pdev, 0);
|
||||
if (pp->msi_irq < 0)
|
||||
return pp->msi_irq;
|
||||
}
|
||||
|
||||
pp->num_vectors = MAX_MSI_IRQS;
|
||||
pp->ops = &dw_plat_pcie_host_ops;
|
||||
|
||||
ret = dw_pcie_host_init(pp);
|
||||
|
@ -139,43 +116,11 @@ static int dw_plat_add_pcie_port(struct dw_plat_pcie *dw_plat_pcie,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int dw_plat_add_pcie_ep(struct dw_plat_pcie *dw_plat_pcie,
|
||||
struct platform_device *pdev)
|
||||
{
|
||||
int ret;
|
||||
struct dw_pcie_ep *ep;
|
||||
struct resource *res;
|
||||
struct device *dev = &pdev->dev;
|
||||
struct dw_pcie *pci = dw_plat_pcie->pci;
|
||||
|
||||
ep = &pci->ep;
|
||||
ep->ops = &pcie_ep_ops;
|
||||
|
||||
pci->dbi_base2 = devm_platform_ioremap_resource_byname(pdev, "dbi2");
|
||||
if (IS_ERR(pci->dbi_base2))
|
||||
return PTR_ERR(pci->dbi_base2);
|
||||
|
||||
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
|
||||
if (!res)
|
||||
return -EINVAL;
|
||||
|
||||
ep->phys_base = res->start;
|
||||
ep->addr_size = resource_size(res);
|
||||
|
||||
ret = dw_pcie_ep_init(ep);
|
||||
if (ret) {
|
||||
dev_err(dev, "Failed to initialize endpoint\n");
|
||||
return ret;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dw_plat_pcie_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct device *dev = &pdev->dev;
|
||||
struct dw_plat_pcie *dw_plat_pcie;
|
||||
struct dw_pcie *pci;
|
||||
struct resource *res; /* Resource from DT */
|
||||
int ret;
|
||||
const struct of_device_id *match;
|
||||
const struct dw_plat_pcie_of_data *data;
|
||||
|
@ -202,14 +147,6 @@ static int dw_plat_pcie_probe(struct platform_device *pdev)
|
|||
dw_plat_pcie->pci = pci;
|
||||
dw_plat_pcie->mode = mode;
|
||||
|
||||
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
|
||||
if (!res)
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
|
||||
pci->dbi_base = devm_ioremap_resource(dev, res);
|
||||
if (IS_ERR(pci->dbi_base))
|
||||
return PTR_ERR(pci->dbi_base);
|
||||
|
||||
platform_set_drvdata(pdev, dw_plat_pcie);
|
||||
|
||||
switch (dw_plat_pcie->mode) {
|
||||
|
@ -225,9 +162,8 @@ static int dw_plat_pcie_probe(struct platform_device *pdev)
|
|||
if (!IS_ENABLED(CONFIG_PCIE_DW_PLAT_EP))
|
||||
return -ENODEV;
|
||||
|
||||
ret = dw_plat_add_pcie_ep(dw_plat_pcie, pdev);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
pci->ep.ops = &pcie_ep_ops;
|
||||
return dw_pcie_ep_init(&pci->ep);
|
||||
break;
|
||||
default:
|
||||
dev_err(dev, "INVALID device type %d\n", dw_plat_pcie->mode);
|
||||
|
|
|
@ -228,7 +228,7 @@ static void dw_pcie_writel_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg,
|
|||
static void dw_pcie_prog_outbound_atu_unroll(struct dw_pcie *pci, u8 func_no,
|
||||
int index, int type,
|
||||
u64 cpu_addr, u64 pci_addr,
|
||||
u32 size)
|
||||
u64 size)
|
||||
{
|
||||
u32 retries, val;
|
||||
u64 limit_addr = cpu_addr + size - 1;
|
||||
|
@ -245,8 +245,10 @@ static void dw_pcie_prog_outbound_atu_unroll(struct dw_pcie *pci, u8 func_no,
|
|||
lower_32_bits(pci_addr));
|
||||
dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_TARGET,
|
||||
upper_32_bits(pci_addr));
|
||||
dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1,
|
||||
type | PCIE_ATU_FUNC_NUM(func_no));
|
||||
val = type | PCIE_ATU_FUNC_NUM(func_no);
|
||||
val = upper_32_bits(size - 1) ?
|
||||
val | PCIE_ATU_INCREASE_REGION_SIZE : val;
|
||||
dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1, val);
|
||||
dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2,
|
||||
PCIE_ATU_ENABLE);
|
||||
|
||||
|
@ -267,7 +269,7 @@ static void dw_pcie_prog_outbound_atu_unroll(struct dw_pcie *pci, u8 func_no,
|
|||
|
||||
static void __dw_pcie_prog_outbound_atu(struct dw_pcie *pci, u8 func_no,
|
||||
int index, int type, u64 cpu_addr,
|
||||
u64 pci_addr, u32 size)
|
||||
u64 pci_addr, u64 size)
|
||||
{
|
||||
u32 retries, val;
|
||||
|
||||
|
@ -311,7 +313,7 @@ static void __dw_pcie_prog_outbound_atu(struct dw_pcie *pci, u8 func_no,
|
|||
}
|
||||
|
||||
void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type,
|
||||
u64 cpu_addr, u64 pci_addr, u32 size)
|
||||
u64 cpu_addr, u64 pci_addr, u64 size)
|
||||
{
|
||||
__dw_pcie_prog_outbound_atu(pci, 0, index, type,
|
||||
cpu_addr, pci_addr, size);
|
||||
|
@ -544,6 +546,70 @@ static u8 dw_pcie_iatu_unroll_enabled(struct dw_pcie *pci)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void dw_pcie_iatu_detect_regions_unroll(struct dw_pcie *pci)
|
||||
{
|
||||
int max_region, i, ob = 0, ib = 0;
|
||||
u32 val;
|
||||
|
||||
max_region = min((int)pci->atu_size / 512, 256);
|
||||
|
||||
for (i = 0; i < max_region; i++) {
|
||||
dw_pcie_writel_ob_unroll(pci, i, PCIE_ATU_UNR_LOWER_TARGET,
|
||||
0x11110000);
|
||||
|
||||
val = dw_pcie_readl_ob_unroll(pci, i, PCIE_ATU_UNR_LOWER_TARGET);
|
||||
if (val == 0x11110000)
|
||||
ob++;
|
||||
else
|
||||
break;
|
||||
}
|
||||
|
||||
for (i = 0; i < max_region; i++) {
|
||||
dw_pcie_writel_ib_unroll(pci, i, PCIE_ATU_UNR_LOWER_TARGET,
|
||||
0x11110000);
|
||||
|
||||
val = dw_pcie_readl_ib_unroll(pci, i, PCIE_ATU_UNR_LOWER_TARGET);
|
||||
if (val == 0x11110000)
|
||||
ib++;
|
||||
else
|
||||
break;
|
||||
}
|
||||
pci->num_ib_windows = ib;
|
||||
pci->num_ob_windows = ob;
|
||||
}
|
||||
|
||||
static void dw_pcie_iatu_detect_regions(struct dw_pcie *pci)
|
||||
{
|
||||
int max_region, i, ob = 0, ib = 0;
|
||||
u32 val;
|
||||
|
||||
dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, 0xFF);
|
||||
max_region = dw_pcie_readl_dbi(pci, PCIE_ATU_VIEWPORT) + 1;
|
||||
|
||||
for (i = 0; i < max_region; i++) {
|
||||
dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, PCIE_ATU_REGION_OUTBOUND | i);
|
||||
dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET, 0x11110000);
|
||||
val = dw_pcie_readl_dbi(pci, PCIE_ATU_LOWER_TARGET);
|
||||
if (val == 0x11110000)
|
||||
ob++;
|
||||
else
|
||||
break;
|
||||
}
|
||||
|
||||
for (i = 0; i < max_region; i++) {
|
||||
dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, PCIE_ATU_REGION_INBOUND | i);
|
||||
dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET, 0x11110000);
|
||||
val = dw_pcie_readl_dbi(pci, PCIE_ATU_LOWER_TARGET);
|
||||
if (val == 0x11110000)
|
||||
ib++;
|
||||
else
|
||||
break;
|
||||
}
|
||||
|
||||
pci->num_ib_windows = ib;
|
||||
pci->num_ob_windows = ob;
|
||||
}
|
||||
|
||||
void dw_pcie_setup(struct dw_pcie *pci)
|
||||
{
|
||||
u32 val;
|
||||
|
@ -554,15 +620,30 @@ void dw_pcie_setup(struct dw_pcie *pci)
|
|||
if (pci->version >= 0x480A || (!pci->version &&
|
||||
dw_pcie_iatu_unroll_enabled(pci))) {
|
||||
pci->iatu_unroll_enabled = true;
|
||||
if (!pci->atu_base)
|
||||
pci->atu_base =
|
||||
devm_platform_ioremap_resource_byname(pdev, "atu");
|
||||
if (IS_ERR(pci->atu_base))
|
||||
pci->atu_base = pci->dbi_base + DEFAULT_DBI_ATU_OFFSET;
|
||||
}
|
||||
dev_dbg(pci->dev, "iATU unroll: %s\n", pci->iatu_unroll_enabled ?
|
||||
if (!pci->atu_base) {
|
||||
struct resource *res =
|
||||
platform_get_resource_byname(pdev, IORESOURCE_MEM, "atu");
|
||||
if (res)
|
||||
pci->atu_size = resource_size(res);
|
||||
pci->atu_base = devm_ioremap_resource(dev, res);
|
||||
if (IS_ERR(pci->atu_base))
|
||||
pci->atu_base = pci->dbi_base + DEFAULT_DBI_ATU_OFFSET;
|
||||
}
|
||||
|
||||
if (!pci->atu_size)
|
||||
/* Pick a minimal default, enough for 8 in and 8 out windows */
|
||||
pci->atu_size = SZ_4K;
|
||||
|
||||
dw_pcie_iatu_detect_regions_unroll(pci);
|
||||
} else
|
||||
dw_pcie_iatu_detect_regions(pci);
|
||||
|
||||
dev_info(pci->dev, "iATU unroll: %s\n", pci->iatu_unroll_enabled ?
|
||||
"enabled" : "disabled");
|
||||
|
||||
dev_info(pci->dev, "Detected iATU regions: %u outbound, %u inbound",
|
||||
pci->num_ob_windows, pci->num_ib_windows);
|
||||
|
||||
if (pci->link_gen > 0)
|
||||
dw_pcie_link_set_max_speed(pci, pci->link_gen);
|
||||
|
||||
|
|
|
@ -80,10 +80,8 @@
|
|||
#define PCIE_ATU_VIEWPORT 0x900
|
||||
#define PCIE_ATU_REGION_INBOUND BIT(31)
|
||||
#define PCIE_ATU_REGION_OUTBOUND 0
|
||||
#define PCIE_ATU_REGION_INDEX2 0x2
|
||||
#define PCIE_ATU_REGION_INDEX1 0x1
|
||||
#define PCIE_ATU_REGION_INDEX0 0x0
|
||||
#define PCIE_ATU_CR1 0x904
|
||||
#define PCIE_ATU_INCREASE_REGION_SIZE BIT(13)
|
||||
#define PCIE_ATU_TYPE_MEM 0x0
|
||||
#define PCIE_ATU_TYPE_IO 0x2
|
||||
#define PCIE_ATU_TYPE_CFG0 0x4
|
||||
|
@ -174,11 +172,11 @@ enum dw_pcie_device_mode {
|
|||
|
||||
struct dw_pcie_host_ops {
|
||||
int (*host_init)(struct pcie_port *pp);
|
||||
void (*set_num_vectors)(struct pcie_port *pp);
|
||||
int (*msi_host_init)(struct pcie_port *pp);
|
||||
};
|
||||
|
||||
struct pcie_port {
|
||||
bool has_msi_ctrl:1;
|
||||
u64 cfg0_base;
|
||||
void __iomem *va_cfg0_base;
|
||||
u32 cfg0_size;
|
||||
|
@ -239,8 +237,6 @@ struct dw_pcie_ep {
|
|||
phys_addr_t *outbound_addr;
|
||||
unsigned long *ib_window_map;
|
||||
unsigned long *ob_window_map;
|
||||
u32 num_ib_windows;
|
||||
u32 num_ob_windows;
|
||||
void __iomem *msi_mem;
|
||||
phys_addr_t msi_mem_phys;
|
||||
struct pci_epf_bar *epf_bar[PCI_STD_NUM_BARS];
|
||||
|
@ -265,8 +261,9 @@ struct dw_pcie {
|
|||
void __iomem *dbi_base2;
|
||||
/* Used when iatu_unroll_enabled is true */
|
||||
void __iomem *atu_base;
|
||||
u32 num_viewport;
|
||||
u8 iatu_unroll_enabled;
|
||||
size_t atu_size;
|
||||
u32 num_ib_windows;
|
||||
u32 num_ob_windows;
|
||||
struct pcie_port pp;
|
||||
struct dw_pcie_ep ep;
|
||||
const struct dw_pcie_ops *ops;
|
||||
|
@ -274,6 +271,8 @@ struct dw_pcie {
|
|||
int num_lanes;
|
||||
int link_gen;
|
||||
u8 n_fts[2];
|
||||
bool iatu_unroll_enabled: 1;
|
||||
bool io_cfg_atu_shared: 1;
|
||||
};
|
||||
|
||||
#define to_dw_pcie_from_pp(port) container_of((port), struct dw_pcie, pp)
|
||||
|
@ -295,7 +294,7 @@ void dw_pcie_upconfig_setup(struct dw_pcie *pci);
|
|||
int dw_pcie_wait_for_link(struct dw_pcie *pci);
|
||||
void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index,
|
||||
int type, u64 cpu_addr, u64 pci_addr,
|
||||
u32 size);
|
||||
u64 size);
|
||||
void dw_pcie_prog_ep_outbound_atu(struct dw_pcie *pci, u8 func_no, int index,
|
||||
int type, u64 cpu_addr, u64 pci_addr,
|
||||
u32 size);
|
||||
|
@ -365,8 +364,6 @@ static inline void dw_pcie_dbi_ro_wr_dis(struct dw_pcie *pci)
|
|||
|
||||
#ifdef CONFIG_PCIE_DW_HOST
|
||||
irqreturn_t dw_handle_msi_irq(struct pcie_port *pp);
|
||||
void dw_pcie_msi_init(struct pcie_port *pp);
|
||||
void dw_pcie_free_msi(struct pcie_port *pp);
|
||||
void dw_pcie_setup_rc(struct pcie_port *pp);
|
||||
int dw_pcie_host_init(struct pcie_port *pp);
|
||||
void dw_pcie_host_deinit(struct pcie_port *pp);
|
||||
|
@ -379,14 +376,6 @@ static inline irqreturn_t dw_handle_msi_irq(struct pcie_port *pp)
|
|||
return IRQ_NONE;
|
||||
}
|
||||
|
||||
static inline void dw_pcie_msi_init(struct pcie_port *pp)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void dw_pcie_free_msi(struct pcie_port *pp)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void dw_pcie_setup_rc(struct pcie_port *pp)
|
||||
{
|
||||
}
|
||||
|
|
|
@ -169,40 +169,32 @@ static int histb_pcie_link_up(struct dw_pcie *pci)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int histb_pcie_establish_link(struct pcie_port *pp)
|
||||
static int histb_pcie_start_link(struct dw_pcie *pci)
|
||||
{
|
||||
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
|
||||
struct histb_pcie *hipcie = to_histb_pcie(pci);
|
||||
u32 regval;
|
||||
|
||||
if (dw_pcie_link_up(pci)) {
|
||||
dev_info(pci->dev, "Link already up\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* PCIe RC work mode */
|
||||
regval = histb_pcie_readl(hipcie, PCIE_SYS_CTRL0);
|
||||
regval &= ~PCIE_DEVICE_TYPE_MASK;
|
||||
regval |= PCIE_WM_RC;
|
||||
histb_pcie_writel(hipcie, PCIE_SYS_CTRL0, regval);
|
||||
|
||||
/* setup root complex */
|
||||
dw_pcie_setup_rc(pp);
|
||||
|
||||
/* assert LTSSM enable */
|
||||
regval = histb_pcie_readl(hipcie, PCIE_SYS_CTRL7);
|
||||
regval |= PCIE_APP_LTSSM_ENABLE;
|
||||
histb_pcie_writel(hipcie, PCIE_SYS_CTRL7, regval);
|
||||
|
||||
return dw_pcie_wait_for_link(pci);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int histb_pcie_host_init(struct pcie_port *pp)
|
||||
{
|
||||
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
|
||||
struct histb_pcie *hipcie = to_histb_pcie(pci);
|
||||
u32 regval;
|
||||
|
||||
pp->bridge->ops = &histb_pci_ops;
|
||||
|
||||
histb_pcie_establish_link(pp);
|
||||
dw_pcie_msi_init(pp);
|
||||
/* PCIe RC work mode */
|
||||
regval = histb_pcie_readl(hipcie, PCIE_SYS_CTRL0);
|
||||
regval &= ~PCIE_DEVICE_TYPE_MASK;
|
||||
regval |= PCIE_WM_RC;
|
||||
histb_pcie_writel(hipcie, PCIE_SYS_CTRL0, regval);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -300,6 +292,7 @@ static const struct dw_pcie_ops dw_pcie_ops = {
|
|||
.read_dbi = histb_pcie_read_dbi,
|
||||
.write_dbi = histb_pcie_write_dbi,
|
||||
.link_up = histb_pcie_link_up,
|
||||
.start_link = histb_pcie_start_link,
|
||||
};
|
||||
|
||||
static int histb_pcie_probe(struct platform_device *pdev)
|
||||
|
@ -400,12 +393,6 @@ static int histb_pcie_probe(struct platform_device *pdev)
|
|||
return PTR_ERR(hipcie->bus_reset);
|
||||
}
|
||||
|
||||
if (IS_ENABLED(CONFIG_PCI_MSI)) {
|
||||
pp->msi_irq = platform_get_irq_byname(pdev, "msi");
|
||||
if (pp->msi_irq < 0)
|
||||
return pp->msi_irq;
|
||||
}
|
||||
|
||||
hipcie->phy = devm_phy_get(dev, "phy");
|
||||
if (IS_ERR(hipcie->phy)) {
|
||||
dev_info(dev, "no pcie-phy found\n");
|
||||
|
|
|
@ -58,8 +58,6 @@
|
|||
|
||||
struct intel_pcie_soc {
|
||||
unsigned int pcie_ver;
|
||||
unsigned int pcie_atu_offset;
|
||||
u32 num_viewport;
|
||||
};
|
||||
|
||||
struct intel_pcie_port {
|
||||
|
@ -153,15 +151,6 @@ static void intel_pcie_init_n_fts(struct dw_pcie *pci)
|
|||
pci->n_fts[0] = PORT_AFR_N_FTS_GEN12_DFT;
|
||||
}
|
||||
|
||||
static void intel_pcie_rc_setup(struct intel_pcie_port *lpp)
|
||||
{
|
||||
intel_pcie_ltssm_disable(lpp);
|
||||
intel_pcie_link_setup(lpp);
|
||||
intel_pcie_init_n_fts(&lpp->pci);
|
||||
dw_pcie_setup_rc(&lpp->pci.pp);
|
||||
dw_pcie_upconfig_setup(&lpp->pci);
|
||||
}
|
||||
|
||||
static int intel_pcie_ep_rst_init(struct intel_pcie_port *lpp)
|
||||
{
|
||||
struct device *dev = lpp->pci.dev;
|
||||
|
@ -213,14 +202,6 @@ static void intel_pcie_device_rst_deassert(struct intel_pcie_port *lpp)
|
|||
gpiod_set_value_cansleep(lpp->reset_gpio, 0);
|
||||
}
|
||||
|
||||
static int intel_pcie_app_logic_setup(struct intel_pcie_port *lpp)
|
||||
{
|
||||
intel_pcie_device_rst_deassert(lpp);
|
||||
intel_pcie_ltssm_enable(lpp);
|
||||
|
||||
return dw_pcie_wait_for_link(&lpp->pci);
|
||||
}
|
||||
|
||||
static void intel_pcie_core_irq_disable(struct intel_pcie_port *lpp)
|
||||
{
|
||||
pcie_app_wr(lpp, PCIE_APP_IRNEN, 0);
|
||||
|
@ -234,10 +215,6 @@ static int intel_pcie_get_resources(struct platform_device *pdev)
|
|||
struct device *dev = pci->dev;
|
||||
int ret;
|
||||
|
||||
pci->dbi_base = devm_platform_ioremap_resource_byname(pdev, "dbi");
|
||||
if (IS_ERR(pci->dbi_base))
|
||||
return PTR_ERR(pci->dbi_base);
|
||||
|
||||
lpp->core_clk = devm_clk_get(dev, NULL);
|
||||
if (IS_ERR(lpp->core_clk)) {
|
||||
ret = PTR_ERR(lpp->core_clk);
|
||||
|
@ -274,11 +251,6 @@ static int intel_pcie_get_resources(struct platform_device *pdev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void intel_pcie_deinit_phy(struct intel_pcie_port *lpp)
|
||||
{
|
||||
phy_exit(lpp->phy);
|
||||
}
|
||||
|
||||
static int intel_pcie_wait_l2(struct intel_pcie_port *lpp)
|
||||
{
|
||||
u32 value;
|
||||
|
@ -315,6 +287,7 @@ static void intel_pcie_turn_off(struct intel_pcie_port *lpp)
|
|||
static int intel_pcie_host_setup(struct intel_pcie_port *lpp)
|
||||
{
|
||||
int ret;
|
||||
struct dw_pcie *pci = &lpp->pci;
|
||||
|
||||
intel_pcie_core_rst_assert(lpp);
|
||||
intel_pcie_device_rst_assert(lpp);
|
||||
|
@ -331,8 +304,18 @@ static int intel_pcie_host_setup(struct intel_pcie_port *lpp)
|
|||
goto clk_err;
|
||||
}
|
||||
|
||||
intel_pcie_rc_setup(lpp);
|
||||
ret = intel_pcie_app_logic_setup(lpp);
|
||||
pci->atu_base = pci->dbi_base + 0xC0000;
|
||||
|
||||
intel_pcie_ltssm_disable(lpp);
|
||||
intel_pcie_link_setup(lpp);
|
||||
intel_pcie_init_n_fts(pci);
|
||||
dw_pcie_setup_rc(&pci->pp);
|
||||
dw_pcie_upconfig_setup(pci);
|
||||
|
||||
intel_pcie_device_rst_deassert(lpp);
|
||||
intel_pcie_ltssm_enable(lpp);
|
||||
|
||||
ret = dw_pcie_wait_for_link(pci);
|
||||
if (ret)
|
||||
goto app_init_err;
|
||||
|
||||
|
@ -346,7 +329,7 @@ app_init_err:
|
|||
clk_disable_unprepare(lpp->core_clk);
|
||||
clk_err:
|
||||
intel_pcie_core_rst_assert(lpp);
|
||||
intel_pcie_deinit_phy(lpp);
|
||||
phy_exit(lpp->phy);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -357,7 +340,7 @@ static void __intel_pcie_remove(struct intel_pcie_port *lpp)
|
|||
intel_pcie_turn_off(lpp);
|
||||
clk_disable_unprepare(lpp->core_clk);
|
||||
intel_pcie_core_rst_assert(lpp);
|
||||
intel_pcie_deinit_phy(lpp);
|
||||
phy_exit(lpp->phy);
|
||||
}
|
||||
|
||||
static int intel_pcie_remove(struct platform_device *pdev)
|
||||
|
@ -381,7 +364,7 @@ static int __maybe_unused intel_pcie_suspend_noirq(struct device *dev)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
intel_pcie_deinit_phy(lpp);
|
||||
phy_exit(lpp->phy);
|
||||
clk_disable_unprepare(lpp->core_clk);
|
||||
return ret;
|
||||
}
|
||||
|
@ -401,14 +384,6 @@ static int intel_pcie_rc_init(struct pcie_port *pp)
|
|||
return intel_pcie_host_setup(lpp);
|
||||
}
|
||||
|
||||
/*
|
||||
* Dummy function so that DW core doesn't configure MSI
|
||||
*/
|
||||
static int intel_pcie_msi_init(struct pcie_port *pp)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static u64 intel_pcie_cpu_addr(struct dw_pcie *pcie, u64 cpu_addr)
|
||||
{
|
||||
return cpu_addr + BUS_IATU_OFFSET;
|
||||
|
@ -420,13 +395,10 @@ static const struct dw_pcie_ops intel_pcie_ops = {
|
|||
|
||||
static const struct dw_pcie_host_ops intel_pcie_dw_ops = {
|
||||
.host_init = intel_pcie_rc_init,
|
||||
.msi_host_init = intel_pcie_msi_init,
|
||||
};
|
||||
|
||||
static const struct intel_pcie_soc pcie_data = {
|
||||
.pcie_ver = 0x520A,
|
||||
.pcie_atu_offset = 0xC0000,
|
||||
.num_viewport = 3,
|
||||
};
|
||||
|
||||
static int intel_pcie_probe(struct platform_device *pdev)
|
||||
|
@ -461,7 +433,6 @@ static int intel_pcie_probe(struct platform_device *pdev)
|
|||
|
||||
pci->ops = &intel_pcie_ops;
|
||||
pci->version = data->pcie_ver;
|
||||
pci->atu_base = pci->dbi_base + data->pcie_atu_offset;
|
||||
pp->ops = &intel_pcie_dw_ops;
|
||||
|
||||
ret = dw_pcie_host_init(pp);
|
||||
|
@ -470,12 +441,6 @@ static int intel_pcie_probe(struct platform_device *pdev)
|
|||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Intel PCIe doesn't configure IO region, so set viewport
|
||||
* to not perform IO region access.
|
||||
*/
|
||||
pci->num_viewport = data->num_viewport;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -157,11 +157,6 @@ static long kirin_pcie_get_resource(struct kirin_pcie *kirin_pcie,
|
|||
if (IS_ERR(kirin_pcie->phy_base))
|
||||
return PTR_ERR(kirin_pcie->phy_base);
|
||||
|
||||
kirin_pcie->pci->dbi_base =
|
||||
devm_platform_ioremap_resource_byname(pdev, "dbi");
|
||||
if (IS_ERR(kirin_pcie->pci->dbi_base))
|
||||
return PTR_ERR(kirin_pcie->pci->dbi_base);
|
||||
|
||||
kirin_pcie->crgctrl =
|
||||
syscon_regmap_lookup_by_compatible("hisilicon,hi3660-crgctrl");
|
||||
if (IS_ERR(kirin_pcie->crgctrl))
|
||||
|
@ -395,32 +390,14 @@ static int kirin_pcie_link_up(struct dw_pcie *pci)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int kirin_pcie_establish_link(struct pcie_port *pp)
|
||||
static int kirin_pcie_start_link(struct dw_pcie *pci)
|
||||
{
|
||||
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
|
||||
struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci);
|
||||
struct device *dev = kirin_pcie->pci->dev;
|
||||
int count = 0;
|
||||
|
||||
if (kirin_pcie_link_up(pci))
|
||||
return 0;
|
||||
|
||||
dw_pcie_setup_rc(pp);
|
||||
|
||||
/* assert LTSSM enable */
|
||||
kirin_apb_ctrl_writel(kirin_pcie, PCIE_LTSSM_ENABLE_BIT,
|
||||
PCIE_APP_LTSSM_ENABLE);
|
||||
|
||||
/* check if the link is up or not */
|
||||
while (!kirin_pcie_link_up(pci)) {
|
||||
usleep_range(LINK_WAIT_MIN, LINK_WAIT_MAX);
|
||||
count++;
|
||||
if (count == 1000) {
|
||||
dev_err(dev, "Link Fail\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -428,9 +405,6 @@ static int kirin_pcie_host_init(struct pcie_port *pp)
|
|||
{
|
||||
pp->bridge->ops = &kirin_pci_ops;
|
||||
|
||||
kirin_pcie_establish_link(pp);
|
||||
dw_pcie_msi_init(pp);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -438,42 +412,13 @@ static const struct dw_pcie_ops kirin_dw_pcie_ops = {
|
|||
.read_dbi = kirin_pcie_read_dbi,
|
||||
.write_dbi = kirin_pcie_write_dbi,
|
||||
.link_up = kirin_pcie_link_up,
|
||||
.start_link = kirin_pcie_start_link,
|
||||
};
|
||||
|
||||
static const struct dw_pcie_host_ops kirin_pcie_host_ops = {
|
||||
.host_init = kirin_pcie_host_init,
|
||||
};
|
||||
|
||||
static int kirin_pcie_add_msi(struct dw_pcie *pci,
|
||||
struct platform_device *pdev)
|
||||
{
|
||||
int irq;
|
||||
|
||||
if (IS_ENABLED(CONFIG_PCI_MSI)) {
|
||||
irq = platform_get_irq(pdev, 0);
|
||||
if (irq < 0)
|
||||
return irq;
|
||||
|
||||
pci->pp.msi_irq = irq;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int kirin_add_pcie_port(struct dw_pcie *pci,
|
||||
struct platform_device *pdev)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = kirin_pcie_add_msi(pci, pdev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
pci->pp.ops = &kirin_pcie_host_ops;
|
||||
|
||||
return dw_pcie_host_init(&pci->pp);
|
||||
}
|
||||
|
||||
static int kirin_pcie_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct device *dev = &pdev->dev;
|
||||
|
@ -496,6 +441,7 @@ static int kirin_pcie_probe(struct platform_device *pdev)
|
|||
|
||||
pci->dev = dev;
|
||||
pci->ops = &kirin_dw_pcie_ops;
|
||||
pci->pp.ops = &kirin_pcie_host_ops;
|
||||
kirin_pcie->pci = pci;
|
||||
|
||||
ret = kirin_pcie_get_clk(kirin_pcie, pdev);
|
||||
|
@ -521,7 +467,7 @@ static int kirin_pcie_probe(struct platform_device *pdev)
|
|||
|
||||
platform_set_drvdata(pdev, kirin_pcie);
|
||||
|
||||
return kirin_add_pcie_port(pci, pdev);
|
||||
return dw_pcie_host_init(&pci->pp);
|
||||
}
|
||||
|
||||
static const struct of_device_id kirin_pcie_match[] = {
|
||||
|
|
|
@ -9,6 +9,7 @@
|
|||
*/
|
||||
|
||||
#include <linux/clk.h>
|
||||
#include <linux/crc8.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/gpio/consumer.h>
|
||||
#include <linux/interrupt.h>
|
||||
|
@ -57,6 +58,7 @@
|
|||
#define PCIE20_PARF_SID_OFFSET 0x234
|
||||
#define PCIE20_PARF_BDF_TRANSLATE_CFG 0x24C
|
||||
#define PCIE20_PARF_DEVICE_TYPE 0x1000
|
||||
#define PCIE20_PARF_BDF_TO_SID_TABLE_N 0x2000
|
||||
|
||||
#define PCIE20_ELBI_SYS_CTRL 0x04
|
||||
#define PCIE20_ELBI_SYS_CTRL_LT_ENABLE BIT(0)
|
||||
|
@ -97,6 +99,9 @@
|
|||
|
||||
#define QCOM_PCIE_2_1_0_MAX_SUPPLY 3
|
||||
#define QCOM_PCIE_2_1_0_MAX_CLOCKS 5
|
||||
|
||||
#define QCOM_PCIE_CRC8_POLYNOMIAL (BIT(2) | BIT(1) | BIT(0))
|
||||
|
||||
struct qcom_pcie_resources_2_1_0 {
|
||||
struct clk_bulk_data clks[QCOM_PCIE_2_1_0_MAX_CLOCKS];
|
||||
struct reset_control *pci_reset;
|
||||
|
@ -179,6 +184,7 @@ struct qcom_pcie_ops {
|
|||
void (*deinit)(struct qcom_pcie *pcie);
|
||||
void (*post_deinit)(struct qcom_pcie *pcie);
|
||||
void (*ltssm_enable)(struct qcom_pcie *pcie);
|
||||
int (*config_sid)(struct qcom_pcie *pcie);
|
||||
};
|
||||
|
||||
struct qcom_pcie {
|
||||
|
@ -207,18 +213,15 @@ static void qcom_ep_reset_deassert(struct qcom_pcie *pcie)
|
|||
usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500);
|
||||
}
|
||||
|
||||
static int qcom_pcie_establish_link(struct qcom_pcie *pcie)
|
||||
static int qcom_pcie_start_link(struct dw_pcie *pci)
|
||||
{
|
||||
struct dw_pcie *pci = pcie->pci;
|
||||
|
||||
if (dw_pcie_link_up(pci))
|
||||
return 0;
|
||||
struct qcom_pcie *pcie = to_qcom_pcie(pci);
|
||||
|
||||
/* Enable Link Training state machine */
|
||||
if (pcie->ops->ltssm_enable)
|
||||
pcie->ops->ltssm_enable(pcie);
|
||||
|
||||
return dw_pcie_wait_for_link(pci);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void qcom_pcie_2_1_0_ltssm_enable(struct qcom_pcie *pcie)
|
||||
|
@ -1261,6 +1264,77 @@ static int qcom_pcie_link_up(struct dw_pcie *pci)
|
|||
return !!(val & PCI_EXP_LNKSTA_DLLLA);
|
||||
}
|
||||
|
||||
static int qcom_pcie_config_sid_sm8250(struct qcom_pcie *pcie)
|
||||
{
|
||||
/* iommu map structure */
|
||||
struct {
|
||||
u32 bdf;
|
||||
u32 phandle;
|
||||
u32 smmu_sid;
|
||||
u32 smmu_sid_len;
|
||||
} *map;
|
||||
void __iomem *bdf_to_sid_base = pcie->parf + PCIE20_PARF_BDF_TO_SID_TABLE_N;
|
||||
struct device *dev = pcie->pci->dev;
|
||||
u8 qcom_pcie_crc8_table[CRC8_TABLE_SIZE];
|
||||
int i, nr_map, size = 0;
|
||||
u32 smmu_sid_base;
|
||||
|
||||
of_get_property(dev->of_node, "iommu-map", &size);
|
||||
if (!size)
|
||||
return 0;
|
||||
|
||||
map = kzalloc(size, GFP_KERNEL);
|
||||
if (!map)
|
||||
return -ENOMEM;
|
||||
|
||||
of_property_read_u32_array(dev->of_node,
|
||||
"iommu-map", (u32 *)map, size / sizeof(u32));
|
||||
|
||||
nr_map = size / (sizeof(*map));
|
||||
|
||||
crc8_populate_msb(qcom_pcie_crc8_table, QCOM_PCIE_CRC8_POLYNOMIAL);
|
||||
|
||||
/* Registers need to be zero out first */
|
||||
memset_io(bdf_to_sid_base, 0, CRC8_TABLE_SIZE * sizeof(u32));
|
||||
|
||||
/* Extract the SMMU SID base from the first entry of iommu-map */
|
||||
smmu_sid_base = map[0].smmu_sid;
|
||||
|
||||
/* Look for an available entry to hold the mapping */
|
||||
for (i = 0; i < nr_map; i++) {
|
||||
u16 bdf_be = cpu_to_be16(map[i].bdf);
|
||||
u32 val;
|
||||
u8 hash;
|
||||
|
||||
hash = crc8(qcom_pcie_crc8_table, (u8 *)&bdf_be, sizeof(bdf_be),
|
||||
0);
|
||||
|
||||
val = readl(bdf_to_sid_base + hash * sizeof(u32));
|
||||
|
||||
/* If the register is already populated, look for next available entry */
|
||||
while (val) {
|
||||
u8 current_hash = hash++;
|
||||
u8 next_mask = 0xff;
|
||||
|
||||
/* If NEXT field is NULL then update it with next hash */
|
||||
if (!(val & next_mask)) {
|
||||
val |= (u32)hash;
|
||||
writel(val, bdf_to_sid_base + current_hash * sizeof(u32));
|
||||
}
|
||||
|
||||
val = readl(bdf_to_sid_base + hash * sizeof(u32));
|
||||
}
|
||||
|
||||
/* BDF [31:16] | SID [15:8] | NEXT [7:0] */
|
||||
val = map[i].bdf << 16 | (map[i].smmu_sid - smmu_sid_base) << 8 | 0;
|
||||
writel(val, bdf_to_sid_base + hash * sizeof(u32));
|
||||
}
|
||||
|
||||
kfree(map);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int qcom_pcie_host_init(struct pcie_port *pp)
|
||||
{
|
||||
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
|
||||
|
@ -1283,16 +1357,16 @@ static int qcom_pcie_host_init(struct pcie_port *pp)
|
|||
goto err_disable_phy;
|
||||
}
|
||||
|
||||
dw_pcie_setup_rc(pp);
|
||||
dw_pcie_msi_init(pp);
|
||||
|
||||
qcom_ep_reset_deassert(pcie);
|
||||
|
||||
ret = qcom_pcie_establish_link(pcie);
|
||||
if (ret)
|
||||
goto err;
|
||||
if (pcie->ops->config_sid) {
|
||||
ret = pcie->ops->config_sid(pcie);
|
||||
if (ret)
|
||||
goto err;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err:
|
||||
qcom_ep_reset_assert(pcie);
|
||||
if (pcie->ops->post_deinit)
|
||||
|
@ -1361,14 +1435,25 @@ static const struct qcom_pcie_ops ops_2_7_0 = {
|
|||
.post_deinit = qcom_pcie_post_deinit_2_7_0,
|
||||
};
|
||||
|
||||
/* Qcom IP rev.: 1.9.0 */
|
||||
static const struct qcom_pcie_ops ops_1_9_0 = {
|
||||
.get_resources = qcom_pcie_get_resources_2_7_0,
|
||||
.init = qcom_pcie_init_2_7_0,
|
||||
.deinit = qcom_pcie_deinit_2_7_0,
|
||||
.ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
|
||||
.post_init = qcom_pcie_post_init_2_7_0,
|
||||
.post_deinit = qcom_pcie_post_deinit_2_7_0,
|
||||
.config_sid = qcom_pcie_config_sid_sm8250,
|
||||
};
|
||||
|
||||
static const struct dw_pcie_ops dw_pcie_ops = {
|
||||
.link_up = qcom_pcie_link_up,
|
||||
.start_link = qcom_pcie_start_link,
|
||||
};
|
||||
|
||||
static int qcom_pcie_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct device *dev = &pdev->dev;
|
||||
struct resource *res;
|
||||
struct pcie_port *pp;
|
||||
struct dw_pcie *pci;
|
||||
struct qcom_pcie *pcie;
|
||||
|
@ -1407,13 +1492,6 @@ static int qcom_pcie_probe(struct platform_device *pdev)
|
|||
goto err_pm_runtime_put;
|
||||
}
|
||||
|
||||
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
|
||||
pci->dbi_base = devm_pci_remap_cfg_resource(dev, res);
|
||||
if (IS_ERR(pci->dbi_base)) {
|
||||
ret = PTR_ERR(pci->dbi_base);
|
||||
goto err_pm_runtime_put;
|
||||
}
|
||||
|
||||
pcie->elbi = devm_platform_ioremap_resource_byname(pdev, "elbi");
|
||||
if (IS_ERR(pcie->elbi)) {
|
||||
ret = PTR_ERR(pcie->elbi);
|
||||
|
@ -1432,14 +1510,6 @@ static int qcom_pcie_probe(struct platform_device *pdev)
|
|||
|
||||
pp->ops = &qcom_pcie_dw_ops;
|
||||
|
||||
if (IS_ENABLED(CONFIG_PCI_MSI)) {
|
||||
pp->msi_irq = platform_get_irq_byname(pdev, "msi");
|
||||
if (pp->msi_irq < 0) {
|
||||
ret = pp->msi_irq;
|
||||
goto err_pm_runtime_put;
|
||||
}
|
||||
}
|
||||
|
||||
ret = phy_init(pcie->phy);
|
||||
if (ret) {
|
||||
pm_runtime_disable(&pdev->dev);
|
||||
|
@ -1474,6 +1544,7 @@ static const struct of_device_id qcom_pcie_match[] = {
|
|||
{ .compatible = "qcom,pcie-ipq4019", .data = &ops_2_4_0 },
|
||||
{ .compatible = "qcom,pcie-qcs404", .data = &ops_2_4_0 },
|
||||
{ .compatible = "qcom,pcie-sdm845", .data = &ops_2_7_0 },
|
||||
{ .compatible = "qcom,pcie-sm8250", .data = &ops_1_9_0 },
|
||||
{ }
|
||||
};
|
||||
|
||||
|
|
|
@ -66,32 +66,10 @@ struct pcie_app_reg {
|
|||
|
||||
#define to_spear13xx_pcie(x) dev_get_drvdata((x)->dev)
|
||||
|
||||
static int spear13xx_pcie_establish_link(struct spear13xx_pcie *spear13xx_pcie)
|
||||
static int spear13xx_pcie_start_link(struct dw_pcie *pci)
|
||||
{
|
||||
struct dw_pcie *pci = spear13xx_pcie->pci;
|
||||
struct pcie_port *pp = &pci->pp;
|
||||
struct spear13xx_pcie *spear13xx_pcie = to_spear13xx_pcie(pci);
|
||||
struct pcie_app_reg *app_reg = spear13xx_pcie->app_base;
|
||||
u32 val;
|
||||
u32 exp_cap_off = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
|
||||
|
||||
if (dw_pcie_link_up(pci)) {
|
||||
dev_err(pci->dev, "link already up\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
dw_pcie_setup_rc(pp);
|
||||
|
||||
/*
|
||||
* this controller support only 128 bytes read size, however its
|
||||
* default value in capability register is 512 bytes. So force
|
||||
* it to 128 here.
|
||||
*/
|
||||
val = dw_pcie_readw_dbi(pci, exp_cap_off + PCI_EXP_DEVCTL);
|
||||
val &= ~PCI_EXP_DEVCTL_READRQ;
|
||||
dw_pcie_writew_dbi(pci, exp_cap_off + PCI_EXP_DEVCTL, val);
|
||||
|
||||
dw_pcie_writew_dbi(pci, PCI_VENDOR_ID, 0x104A);
|
||||
dw_pcie_writew_dbi(pci, PCI_DEVICE_ID, 0xCD80);
|
||||
|
||||
/* enable ltssm */
|
||||
writel(DEVICE_TYPE_RC | (1 << MISCTRL_EN_ID)
|
||||
|
@ -99,7 +77,7 @@ static int spear13xx_pcie_establish_link(struct spear13xx_pcie *spear13xx_pcie)
|
|||
| ((u32)1 << REG_TRANSLATION_ENABLE),
|
||||
&app_reg->app_ctrl_0);
|
||||
|
||||
return dw_pcie_wait_for_link(pci);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static irqreturn_t spear13xx_pcie_irq_handler(int irq, void *arg)
|
||||
|
@ -124,16 +102,12 @@ static irqreturn_t spear13xx_pcie_irq_handler(int irq, void *arg)
|
|||
|
||||
static void spear13xx_pcie_enable_interrupts(struct spear13xx_pcie *spear13xx_pcie)
|
||||
{
|
||||
struct dw_pcie *pci = spear13xx_pcie->pci;
|
||||
struct pcie_port *pp = &pci->pp;
|
||||
struct pcie_app_reg *app_reg = spear13xx_pcie->app_base;
|
||||
|
||||
/* Enable MSI interrupt */
|
||||
if (IS_ENABLED(CONFIG_PCI_MSI)) {
|
||||
dw_pcie_msi_init(pp);
|
||||
if (IS_ENABLED(CONFIG_PCI_MSI))
|
||||
writel(readl(&app_reg->int_mask) |
|
||||
MSI_CTRL_INT, &app_reg->int_mask);
|
||||
}
|
||||
}
|
||||
|
||||
static int spear13xx_pcie_link_up(struct dw_pcie *pci)
|
||||
|
@ -151,8 +125,23 @@ static int spear13xx_pcie_host_init(struct pcie_port *pp)
|
|||
{
|
||||
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
|
||||
struct spear13xx_pcie *spear13xx_pcie = to_spear13xx_pcie(pci);
|
||||
u32 exp_cap_off = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
|
||||
u32 val;
|
||||
|
||||
spear13xx_pcie->app_base = pci->dbi_base + 0x2000;
|
||||
|
||||
/*
|
||||
* this controller support only 128 bytes read size, however its
|
||||
* default value in capability register is 512 bytes. So force
|
||||
* it to 128 here.
|
||||
*/
|
||||
val = dw_pcie_readw_dbi(pci, exp_cap_off + PCI_EXP_DEVCTL);
|
||||
val &= ~PCI_EXP_DEVCTL_READRQ;
|
||||
dw_pcie_writew_dbi(pci, exp_cap_off + PCI_EXP_DEVCTL, val);
|
||||
|
||||
dw_pcie_writew_dbi(pci, PCI_VENDOR_ID, 0x104A);
|
||||
dw_pcie_writew_dbi(pci, PCI_DEVICE_ID, 0xCD80);
|
||||
|
||||
spear13xx_pcie_establish_link(spear13xx_pcie);
|
||||
spear13xx_pcie_enable_interrupts(spear13xx_pcie);
|
||||
|
||||
return 0;
|
||||
|
@ -183,6 +172,7 @@ static int spear13xx_add_pcie_port(struct spear13xx_pcie *spear13xx_pcie,
|
|||
}
|
||||
|
||||
pp->ops = &spear13xx_pcie_host_ops;
|
||||
pp->msi_irq = -ENODEV;
|
||||
|
||||
ret = dw_pcie_host_init(pp);
|
||||
if (ret) {
|
||||
|
@ -195,6 +185,7 @@ static int spear13xx_add_pcie_port(struct spear13xx_pcie *spear13xx_pcie,
|
|||
|
||||
static const struct dw_pcie_ops dw_pcie_ops = {
|
||||
.link_up = spear13xx_pcie_link_up,
|
||||
.start_link = spear13xx_pcie_start_link,
|
||||
};
|
||||
|
||||
static int spear13xx_pcie_probe(struct platform_device *pdev)
|
||||
|
@ -203,7 +194,6 @@ static int spear13xx_pcie_probe(struct platform_device *pdev)
|
|||
struct dw_pcie *pci;
|
||||
struct spear13xx_pcie *spear13xx_pcie;
|
||||
struct device_node *np = dev->of_node;
|
||||
struct resource *dbi_base;
|
||||
int ret;
|
||||
|
||||
spear13xx_pcie = devm_kzalloc(dev, sizeof(*spear13xx_pcie), GFP_KERNEL);
|
||||
|
@ -242,14 +232,6 @@ static int spear13xx_pcie_probe(struct platform_device *pdev)
|
|||
return ret;
|
||||
}
|
||||
|
||||
dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
|
||||
pci->dbi_base = devm_pci_remap_cfg_resource(dev, dbi_base);
|
||||
if (IS_ERR(pci->dbi_base)) {
|
||||
ret = PTR_ERR(pci->dbi_base);
|
||||
goto fail_clk;
|
||||
}
|
||||
spear13xx_pcie->app_base = pci->dbi_base + 0x2000;
|
||||
|
||||
if (of_property_read_bool(np, "st,pcie-is-gen1"))
|
||||
pci->link_gen = 1;
|
||||
|
||||
|
|
|
@ -765,8 +765,6 @@ static void tegra_pcie_enable_msi_interrupts(struct pcie_port *pp)
|
|||
struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
|
||||
u32 val;
|
||||
|
||||
dw_pcie_msi_init(pp);
|
||||
|
||||
/* Enable MSI interrupt generation */
|
||||
val = appl_readl(pcie, APPL_INTR_EN_L0_0);
|
||||
val |= APPL_INTR_EN_L0_0_SYS_MSI_INTR_EN;
|
||||
|
@ -861,6 +859,10 @@ static void tegra_pcie_prepare_host(struct pcie_port *pp)
|
|||
struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
|
||||
u32 val;
|
||||
|
||||
if (!pcie->pcie_cap_base)
|
||||
pcie->pcie_cap_base = dw_pcie_find_capability(&pcie->pci,
|
||||
PCI_CAP_ID_EXP);
|
||||
|
||||
val = dw_pcie_readl_dbi(pci, PCI_IO_BASE);
|
||||
val &= ~(IO_BASE_IO_DECODE | IO_BASE_IO_DECODE_BIT8);
|
||||
dw_pcie_writel_dbi(pci, PCI_IO_BASE, val);
|
||||
|
@ -889,6 +891,12 @@ static void tegra_pcie_prepare_host(struct pcie_port *pp)
|
|||
|
||||
init_host_aspm(pcie);
|
||||
|
||||
/* Disable ASPM-L1SS advertisement if there is no CLKREQ routing */
|
||||
if (!pcie->supports_clkreq) {
|
||||
disable_aspm_l11(pcie);
|
||||
disable_aspm_l12(pcie);
|
||||
}
|
||||
|
||||
val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
|
||||
val &= ~GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL;
|
||||
dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
|
||||
|
@ -990,11 +998,6 @@ static int tegra_pcie_dw_link_up(struct dw_pcie *pci)
|
|||
return !!(val & PCI_EXP_LNKSTA_DLLLA);
|
||||
}
|
||||
|
||||
static void tegra_pcie_set_msi_vec_num(struct pcie_port *pp)
|
||||
{
|
||||
pp->num_vectors = MAX_MSI_IRQS;
|
||||
}
|
||||
|
||||
static int tegra_pcie_dw_start_link(struct dw_pcie *pci)
|
||||
{
|
||||
struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
|
||||
|
@ -1019,7 +1022,6 @@ static const struct dw_pcie_ops tegra_dw_pcie_ops = {
|
|||
|
||||
static struct dw_pcie_host_ops tegra_pcie_dw_host_ops = {
|
||||
.host_init = tegra_pcie_dw_host_init,
|
||||
.set_num_vectors = tegra_pcie_set_msi_vec_num,
|
||||
};
|
||||
|
||||
static void tegra_pcie_disable_phy(struct tegra_pcie_dw *pcie)
|
||||
|
@ -1061,9 +1063,16 @@ phy_exit:
|
|||
|
||||
static int tegra_pcie_dw_parse_dt(struct tegra_pcie_dw *pcie)
|
||||
{
|
||||
struct platform_device *pdev = to_platform_device(pcie->dev);
|
||||
struct device_node *np = pcie->dev->of_node;
|
||||
int ret;
|
||||
|
||||
pcie->dbi_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
|
||||
if (!pcie->dbi_res) {
|
||||
dev_err(pcie->dev, "Failed to find \"dbi\" region\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
ret = of_property_read_u32(np, "nvidia,aspm-cmrt-us", &pcie->aspm_cmrt);
|
||||
if (ret < 0) {
|
||||
dev_info(pcie->dev, "Failed to read ASPM T_cmrt: %d\n", ret);
|
||||
|
@ -1390,15 +1399,6 @@ static int tegra_pcie_config_controller(struct tegra_pcie_dw *pcie,
|
|||
|
||||
reset_control_deassert(pcie->core_rst);
|
||||
|
||||
pcie->pcie_cap_base = dw_pcie_find_capability(&pcie->pci,
|
||||
PCI_CAP_ID_EXP);
|
||||
|
||||
/* Disable ASPM-L1SS advertisement as there is no CLKREQ routing */
|
||||
if (!pcie->supports_clkreq) {
|
||||
disable_aspm_l11(pcie);
|
||||
disable_aspm_l12(pcie);
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
||||
fail_phy:
|
||||
|
@ -1415,43 +1415,32 @@ fail_slot_reg_en:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int __deinit_controller(struct tegra_pcie_dw *pcie)
|
||||
static void tegra_pcie_unconfig_controller(struct tegra_pcie_dw *pcie)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = reset_control_assert(pcie->core_rst);
|
||||
if (ret) {
|
||||
dev_err(pcie->dev, "Failed to assert \"core\" reset: %d\n",
|
||||
ret);
|
||||
return ret;
|
||||
}
|
||||
if (ret)
|
||||
dev_err(pcie->dev, "Failed to assert \"core\" reset: %d\n", ret);
|
||||
|
||||
tegra_pcie_disable_phy(pcie);
|
||||
|
||||
ret = reset_control_assert(pcie->core_apb_rst);
|
||||
if (ret) {
|
||||
if (ret)
|
||||
dev_err(pcie->dev, "Failed to assert APB reset: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
clk_disable_unprepare(pcie->core_clk);
|
||||
|
||||
ret = regulator_disable(pcie->pex_ctl_supply);
|
||||
if (ret) {
|
||||
if (ret)
|
||||
dev_err(pcie->dev, "Failed to disable regulator: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
tegra_pcie_disable_slot_regulators(pcie);
|
||||
|
||||
ret = tegra_pcie_bpmp_set_ctrl_state(pcie, false);
|
||||
if (ret) {
|
||||
if (ret)
|
||||
dev_err(pcie->dev, "Failed to disable controller %d: %d\n",
|
||||
pcie->cid, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int tegra_pcie_init_controller(struct tegra_pcie_dw *pcie)
|
||||
|
@ -1475,7 +1464,8 @@ static int tegra_pcie_init_controller(struct tegra_pcie_dw *pcie)
|
|||
return 0;
|
||||
|
||||
fail_host_init:
|
||||
return __deinit_controller(pcie);
|
||||
tegra_pcie_unconfig_controller(pcie);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int tegra_pcie_try_link_l2(struct tegra_pcie_dw *pcie)
|
||||
|
@ -1516,6 +1506,14 @@ static void tegra_pcie_dw_pme_turnoff(struct tegra_pcie_dw *pcie)
|
|||
data &= ~APPL_PINMUX_PEX_RST;
|
||||
appl_writel(pcie, data, APPL_PINMUX);
|
||||
|
||||
/*
|
||||
* Some cards do not go to detect state even after de-asserting
|
||||
* PERST#. So, de-assert LTSSM to bring link to detect state.
|
||||
*/
|
||||
data = readl(pcie->appl_base + APPL_CTRL);
|
||||
data &= ~APPL_CTRL_LTSSM_EN;
|
||||
writel(data, pcie->appl_base + APPL_CTRL);
|
||||
|
||||
err = readl_poll_timeout_atomic(pcie->appl_base + APPL_DEBUG,
|
||||
data,
|
||||
((data &
|
||||
|
@ -1523,14 +1521,8 @@ static void tegra_pcie_dw_pme_turnoff(struct tegra_pcie_dw *pcie)
|
|||
APPL_DEBUG_LTSSM_STATE_SHIFT) ==
|
||||
LTSSM_STATE_PRE_DETECT,
|
||||
1, LTSSM_TIMEOUT);
|
||||
if (err) {
|
||||
if (err)
|
||||
dev_info(pcie->dev, "Link didn't go to detect state\n");
|
||||
} else {
|
||||
/* Disable LTSSM after link is in detect state */
|
||||
data = appl_readl(pcie, APPL_CTRL);
|
||||
data &= ~APPL_CTRL_LTSSM_EN;
|
||||
appl_writel(pcie, data, APPL_CTRL);
|
||||
}
|
||||
}
|
||||
/*
|
||||
* DBI registers may not be accessible after this as PLL-E would be
|
||||
|
@ -1544,30 +1536,20 @@ static void tegra_pcie_dw_pme_turnoff(struct tegra_pcie_dw *pcie)
|
|||
appl_writel(pcie, data, APPL_PINMUX);
|
||||
}
|
||||
|
||||
static int tegra_pcie_deinit_controller(struct tegra_pcie_dw *pcie)
|
||||
static void tegra_pcie_deinit_controller(struct tegra_pcie_dw *pcie)
|
||||
{
|
||||
tegra_pcie_downstream_dev_to_D0(pcie);
|
||||
dw_pcie_host_deinit(&pcie->pci.pp);
|
||||
tegra_pcie_dw_pme_turnoff(pcie);
|
||||
|
||||
return __deinit_controller(pcie);
|
||||
tegra_pcie_unconfig_controller(pcie);
|
||||
}
|
||||
|
||||
static int tegra_pcie_config_rp(struct tegra_pcie_dw *pcie)
|
||||
{
|
||||
struct pcie_port *pp = &pcie->pci.pp;
|
||||
struct device *dev = pcie->dev;
|
||||
char *name;
|
||||
int ret;
|
||||
|
||||
if (IS_ENABLED(CONFIG_PCI_MSI)) {
|
||||
pp->msi_irq = of_irq_get_byname(dev->of_node, "msi");
|
||||
if (!pp->msi_irq) {
|
||||
dev_err(dev, "Failed to get MSI interrupt\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
}
|
||||
|
||||
pm_runtime_enable(dev);
|
||||
|
||||
ret = pm_runtime_get_sync(dev);
|
||||
|
@ -1583,7 +1565,11 @@ static int tegra_pcie_config_rp(struct tegra_pcie_dw *pcie)
|
|||
goto fail_pm_get_sync;
|
||||
}
|
||||
|
||||
tegra_pcie_init_controller(pcie);
|
||||
ret = tegra_pcie_init_controller(pcie);
|
||||
if (ret < 0) {
|
||||
dev_err(dev, "Failed to initialize controller: %d\n", ret);
|
||||
goto fail_pm_get_sync;
|
||||
}
|
||||
|
||||
pcie->link_state = tegra_pcie_dw_link_up(&pcie->pci);
|
||||
if (!pcie->link_state) {
|
||||
|
@ -1907,19 +1893,12 @@ static int tegra_pcie_config_ep(struct tegra_pcie_dw *pcie,
|
|||
struct dw_pcie *pci = &pcie->pci;
|
||||
struct device *dev = pcie->dev;
|
||||
struct dw_pcie_ep *ep;
|
||||
struct resource *res;
|
||||
char *name;
|
||||
int ret;
|
||||
|
||||
ep = &pci->ep;
|
||||
ep->ops = &pcie_ep_ops;
|
||||
|
||||
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
|
||||
if (!res)
|
||||
return -EINVAL;
|
||||
|
||||
ep->phys_base = res->start;
|
||||
ep->addr_size = resource_size(res);
|
||||
ep->page_size = SZ_64K;
|
||||
|
||||
ret = gpiod_set_debounce(pcie->pex_rst_gpiod, PERST_DEBOUNCE_TIME);
|
||||
|
@ -1982,7 +1961,6 @@ static int tegra_pcie_dw_probe(struct platform_device *pdev)
|
|||
struct device *dev = &pdev->dev;
|
||||
struct resource *atu_dma_res;
|
||||
struct tegra_pcie_dw *pcie;
|
||||
struct resource *dbi_res;
|
||||
struct pcie_port *pp;
|
||||
struct dw_pcie *pci;
|
||||
struct phy **phys;
|
||||
|
@ -2001,8 +1979,10 @@ static int tegra_pcie_dw_probe(struct platform_device *pdev)
|
|||
pci->ops = &tegra_dw_pcie_ops;
|
||||
pci->n_fts[0] = N_FTS_VAL;
|
||||
pci->n_fts[1] = FTS_VAL;
|
||||
pci->version = 0x490A;
|
||||
|
||||
pp = &pci->pp;
|
||||
pp->num_vectors = MAX_MSI_IRQS;
|
||||
pcie->dev = &pdev->dev;
|
||||
pcie->mode = (enum dw_pcie_device_mode)data->mode;
|
||||
|
||||
|
@ -2091,20 +2071,6 @@ static int tegra_pcie_dw_probe(struct platform_device *pdev)
|
|||
|
||||
pcie->phys = phys;
|
||||
|
||||
dbi_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
|
||||
if (!dbi_res) {
|
||||
dev_err(dev, "Failed to find \"dbi\" region\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
pcie->dbi_res = dbi_res;
|
||||
|
||||
pci->dbi_base = devm_ioremap_resource(dev, dbi_res);
|
||||
if (IS_ERR(pci->dbi_base))
|
||||
return PTR_ERR(pci->dbi_base);
|
||||
|
||||
/* Tegra HW locates DBI2 at a fixed offset from DBI */
|
||||
pci->dbi_base2 = pci->dbi_base + 0x1000;
|
||||
|
||||
atu_dma_res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
|
||||
"atu_dma");
|
||||
if (!atu_dma_res) {
|
||||
|
@ -2113,6 +2079,7 @@ static int tegra_pcie_dw_probe(struct platform_device *pdev)
|
|||
}
|
||||
pcie->atu_dma_res = atu_dma_res;
|
||||
|
||||
pci->atu_size = resource_size(atu_dma_res);
|
||||
pci->atu_base = devm_ioremap_resource(dev, atu_dma_res);
|
||||
if (IS_ERR(pci->atu_base))
|
||||
return PTR_ERR(pci->atu_base);
|
||||
|
@ -2225,8 +2192,9 @@ static int tegra_pcie_dw_suspend_noirq(struct device *dev)
|
|||
PORT_LOGIC_MSI_CTRL_INT_0_EN);
|
||||
tegra_pcie_downstream_dev_to_D0(pcie);
|
||||
tegra_pcie_dw_pme_turnoff(pcie);
|
||||
tegra_pcie_unconfig_controller(pcie);
|
||||
|
||||
return __deinit_controller(pcie);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int tegra_pcie_dw_resume_noirq(struct device *dev)
|
||||
|
@ -2254,7 +2222,8 @@ static int tegra_pcie_dw_resume_noirq(struct device *dev)
|
|||
return 0;
|
||||
|
||||
fail_host_init:
|
||||
return __deinit_controller(pcie);
|
||||
tegra_pcie_unconfig_controller(pcie);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int tegra_pcie_dw_resume_early(struct device *dev)
|
||||
|
@ -2292,7 +2261,7 @@ static void tegra_pcie_dw_shutdown(struct platform_device *pdev)
|
|||
disable_irq(pcie->pci.pp.msi_irq);
|
||||
|
||||
tegra_pcie_dw_pme_turnoff(pcie);
|
||||
__deinit_controller(pcie);
|
||||
tegra_pcie_unconfig_controller(pcie);
|
||||
}
|
||||
|
||||
static const struct tegra_pcie_dw_of_data tegra_pcie_dw_rc_of_data = {
|
||||
|
|
|
@ -218,35 +218,6 @@ static const struct dw_pcie_ep_ops uniphier_pcie_ep_ops = {
|
|||
.get_features = uniphier_pcie_get_features,
|
||||
};
|
||||
|
||||
static int uniphier_add_pcie_ep(struct uniphier_pcie_ep_priv *priv,
|
||||
struct platform_device *pdev)
|
||||
{
|
||||
struct dw_pcie *pci = &priv->pci;
|
||||
struct dw_pcie_ep *ep = &pci->ep;
|
||||
struct device *dev = &pdev->dev;
|
||||
struct resource *res;
|
||||
int ret;
|
||||
|
||||
ep->ops = &uniphier_pcie_ep_ops;
|
||||
|
||||
pci->dbi_base2 = devm_platform_ioremap_resource_byname(pdev, "dbi2");
|
||||
if (IS_ERR(pci->dbi_base2))
|
||||
return PTR_ERR(pci->dbi_base2);
|
||||
|
||||
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
|
||||
if (!res)
|
||||
return -EINVAL;
|
||||
|
||||
ep->phys_base = res->start;
|
||||
ep->addr_size = resource_size(res);
|
||||
|
||||
ret = dw_pcie_ep_init(ep);
|
||||
if (ret)
|
||||
dev_err(dev, "Failed to initialize endpoint (%d)\n", ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int uniphier_pcie_ep_enable(struct uniphier_pcie_ep_priv *priv)
|
||||
{
|
||||
int ret;
|
||||
|
@ -300,7 +271,6 @@ static int uniphier_pcie_ep_probe(struct platform_device *pdev)
|
|||
{
|
||||
struct device *dev = &pdev->dev;
|
||||
struct uniphier_pcie_ep_priv *priv;
|
||||
struct resource *res;
|
||||
int ret;
|
||||
|
||||
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
|
||||
|
@ -314,11 +284,6 @@ static int uniphier_pcie_ep_probe(struct platform_device *pdev)
|
|||
priv->pci.dev = dev;
|
||||
priv->pci.ops = &dw_pcie_ops;
|
||||
|
||||
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
|
||||
priv->pci.dbi_base = devm_pci_remap_cfg_resource(dev, res);
|
||||
if (IS_ERR(priv->pci.dbi_base))
|
||||
return PTR_ERR(priv->pci.dbi_base);
|
||||
|
||||
priv->base = devm_platform_ioremap_resource_byname(pdev, "link");
|
||||
if (IS_ERR(priv->base))
|
||||
return PTR_ERR(priv->base);
|
||||
|
@ -352,7 +317,8 @@ static int uniphier_pcie_ep_probe(struct platform_device *pdev)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
return uniphier_add_pcie_ep(priv, pdev);
|
||||
priv->pci.ep.ops = &uniphier_pcie_ep_ops;
|
||||
return dw_pcie_ep_init(&priv->pci.ep);
|
||||
}
|
||||
|
||||
static const struct pci_epc_features uniphier_pro5_data = {
|
||||
|
|
|
@ -146,16 +146,13 @@ static int uniphier_pcie_link_up(struct dw_pcie *pci)
|
|||
return (val & mask) == mask;
|
||||
}
|
||||
|
||||
static int uniphier_pcie_establish_link(struct dw_pcie *pci)
|
||||
static int uniphier_pcie_start_link(struct dw_pcie *pci)
|
||||
{
|
||||
struct uniphier_pcie_priv *priv = to_uniphier_pcie(pci);
|
||||
|
||||
if (dw_pcie_link_up(pci))
|
||||
return 0;
|
||||
|
||||
uniphier_pcie_ltssm_enable(priv, true);
|
||||
|
||||
return dw_pcie_wait_for_link(pci);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void uniphier_pcie_stop_link(struct dw_pcie *pci)
|
||||
|
@ -317,13 +314,6 @@ static int uniphier_pcie_host_init(struct pcie_port *pp)
|
|||
|
||||
uniphier_pcie_irq_enable(priv);
|
||||
|
||||
dw_pcie_setup_rc(pp);
|
||||
ret = uniphier_pcie_establish_link(pci);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
dw_pcie_msi_init(pp);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -331,31 +321,6 @@ static const struct dw_pcie_host_ops uniphier_pcie_host_ops = {
|
|||
.host_init = uniphier_pcie_host_init,
|
||||
};
|
||||
|
||||
static int uniphier_add_pcie_port(struct uniphier_pcie_priv *priv,
|
||||
struct platform_device *pdev)
|
||||
{
|
||||
struct dw_pcie *pci = &priv->pci;
|
||||
struct pcie_port *pp = &pci->pp;
|
||||
struct device *dev = &pdev->dev;
|
||||
int ret;
|
||||
|
||||
pp->ops = &uniphier_pcie_host_ops;
|
||||
|
||||
if (IS_ENABLED(CONFIG_PCI_MSI)) {
|
||||
pp->msi_irq = platform_get_irq_byname(pdev, "msi");
|
||||
if (pp->msi_irq < 0)
|
||||
return pp->msi_irq;
|
||||
}
|
||||
|
||||
ret = dw_pcie_host_init(pp);
|
||||
if (ret) {
|
||||
dev_err(dev, "Failed to initialize host (%d)\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int uniphier_pcie_host_enable(struct uniphier_pcie_priv *priv)
|
||||
{
|
||||
int ret;
|
||||
|
@ -391,7 +356,7 @@ out_clk_disable:
|
|||
}
|
||||
|
||||
static const struct dw_pcie_ops dw_pcie_ops = {
|
||||
.start_link = uniphier_pcie_establish_link,
|
||||
.start_link = uniphier_pcie_start_link,
|
||||
.stop_link = uniphier_pcie_stop_link,
|
||||
.link_up = uniphier_pcie_link_up,
|
||||
};
|
||||
|
@ -400,7 +365,6 @@ static int uniphier_pcie_probe(struct platform_device *pdev)
|
|||
{
|
||||
struct device *dev = &pdev->dev;
|
||||
struct uniphier_pcie_priv *priv;
|
||||
struct resource *res;
|
||||
int ret;
|
||||
|
||||
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
|
||||
|
@ -410,11 +374,6 @@ static int uniphier_pcie_probe(struct platform_device *pdev)
|
|||
priv->pci.dev = dev;
|
||||
priv->pci.ops = &dw_pcie_ops;
|
||||
|
||||
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
|
||||
priv->pci.dbi_base = devm_pci_remap_cfg_resource(dev, res);
|
||||
if (IS_ERR(priv->pci.dbi_base))
|
||||
return PTR_ERR(priv->pci.dbi_base);
|
||||
|
||||
priv->base = devm_platform_ioremap_resource_byname(pdev, "link");
|
||||
if (IS_ERR(priv->base))
|
||||
return PTR_ERR(priv->base);
|
||||
|
@ -437,7 +396,9 @@ static int uniphier_pcie_probe(struct platform_device *pdev)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
return uniphier_add_pcie_port(priv, pdev);
|
||||
priv->pci.pp.ops = &uniphier_pcie_host_ops;
|
||||
|
||||
return dw_pcie_host_init(&priv->pci.pp);
|
||||
}
|
||||
|
||||
static const struct of_device_id uniphier_pcie_match[] = {
|
||||
|
|
|
@ -556,6 +556,11 @@ static int pci_parse_request_of_pci_ranges(struct device *dev,
|
|||
break;
|
||||
case IORESOURCE_MEM:
|
||||
res_valid |= !(res->flags & IORESOURCE_PREFETCH);
|
||||
|
||||
if (!(res->flags & IORESOURCE_PREFETCH))
|
||||
if (upper_32_bits(resource_size(res)))
|
||||
dev_warn(dev, "Memory resource size exceeds max for 32 bits\n");
|
||||
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -2522,6 +2522,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT3351, quirk_disab
|
|||
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT3364, quirk_disable_all_msi);
|
||||
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8380_0, quirk_disable_all_msi);
|
||||
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SI, 0x0761, quirk_disable_all_msi);
|
||||
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SAMSUNG, 0xa5e3, quirk_disable_all_msi);
|
||||
|
||||
/* Disable MSI on chipsets that are known to not support it */
|
||||
static void quirk_disable_msi(struct pci_dev *dev)
|
||||
|
|
|
@ -4,70 +4,41 @@
|
|||
*
|
||||
* Phy provider for PCIe controller on Exynos SoC series
|
||||
*
|
||||
* Copyright (C) 2017 Samsung Electronics Co., Ltd.
|
||||
* Copyright (C) 2017-2020 Samsung Electronics Co., Ltd.
|
||||
* Jaehoon Chung <jh80.chung@samsung.com>
|
||||
*/
|
||||
|
||||
#include <linux/delay.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/iopoll.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/mfd/syscon.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/of_platform.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/phy/phy.h>
|
||||
#include <linux/regmap.h>
|
||||
|
||||
/* PCIe Purple registers */
|
||||
#define PCIE_PHY_GLOBAL_RESET 0x000
|
||||
#define PCIE_PHY_COMMON_RESET 0x004
|
||||
#define PCIE_PHY_CMN_REG 0x008
|
||||
#define PCIE_PHY_MAC_RESET 0x00c
|
||||
#define PCIE_PHY_PLL_LOCKED 0x010
|
||||
#define PCIE_PHY_TRSVREG_RESET 0x020
|
||||
#define PCIE_PHY_TRSV_RESET 0x024
|
||||
#define PCIE_PHY_OFFSET(x) ((x) * 0x4)
|
||||
|
||||
/* PCIe PHY registers */
|
||||
#define PCIE_PHY_IMPEDANCE 0x004
|
||||
#define PCIE_PHY_PLL_DIV_0 0x008
|
||||
#define PCIE_PHY_PLL_BIAS 0x00c
|
||||
#define PCIE_PHY_DCC_FEEDBACK 0x014
|
||||
#define PCIE_PHY_PLL_DIV_1 0x05c
|
||||
#define PCIE_PHY_COMMON_POWER 0x064
|
||||
#define PCIE_PHY_COMMON_PD_CMN BIT(3)
|
||||
#define PCIE_PHY_TRSV0_EMP_LVL 0x084
|
||||
#define PCIE_PHY_TRSV0_DRV_LVL 0x088
|
||||
#define PCIE_PHY_TRSV0_RXCDR 0x0ac
|
||||
#define PCIE_PHY_TRSV0_POWER 0x0c4
|
||||
#define PCIE_PHY_TRSV0_PD_TSV BIT(7)
|
||||
#define PCIE_PHY_TRSV0_LVCC 0x0dc
|
||||
#define PCIE_PHY_TRSV1_EMP_LVL 0x144
|
||||
#define PCIE_PHY_TRSV1_RXCDR 0x16c
|
||||
#define PCIE_PHY_TRSV1_POWER 0x184
|
||||
#define PCIE_PHY_TRSV1_PD_TSV BIT(7)
|
||||
#define PCIE_PHY_TRSV1_LVCC 0x19c
|
||||
#define PCIE_PHY_TRSV2_EMP_LVL 0x204
|
||||
#define PCIE_PHY_TRSV2_RXCDR 0x22c
|
||||
#define PCIE_PHY_TRSV2_POWER 0x244
|
||||
#define PCIE_PHY_TRSV2_PD_TSV BIT(7)
|
||||
#define PCIE_PHY_TRSV2_LVCC 0x25c
|
||||
#define PCIE_PHY_TRSV3_EMP_LVL 0x2c4
|
||||
#define PCIE_PHY_TRSV3_RXCDR 0x2ec
|
||||
#define PCIE_PHY_TRSV3_POWER 0x304
|
||||
#define PCIE_PHY_TRSV3_PD_TSV BIT(7)
|
||||
#define PCIE_PHY_TRSV3_LVCC 0x31c
|
||||
/* Sysreg FSYS register offsets and bits for Exynos5433 */
|
||||
#define PCIE_EXYNOS5433_PHY_MAC_RESET 0x0208
|
||||
#define PCIE_MAC_RESET_MASK 0xFF
|
||||
#define PCIE_MAC_RESET BIT(4)
|
||||
#define PCIE_EXYNOS5433_PHY_L1SUB_CM_CON 0x1010
|
||||
#define PCIE_REFCLK_GATING_EN BIT(0)
|
||||
#define PCIE_EXYNOS5433_PHY_COMMON_RESET 0x1020
|
||||
#define PCIE_PHY_RESET BIT(0)
|
||||
#define PCIE_EXYNOS5433_PHY_GLOBAL_RESET 0x1040
|
||||
#define PCIE_GLOBAL_RESET BIT(0)
|
||||
#define PCIE_REFCLK BIT(1)
|
||||
#define PCIE_REFCLK_MASK 0x16
|
||||
#define PCIE_APP_REQ_EXIT_L1_MODE BIT(5)
|
||||
|
||||
struct exynos_pcie_phy_data {
|
||||
const struct phy_ops *ops;
|
||||
};
|
||||
/* PMU PCIE PHY isolation control */
|
||||
#define EXYNOS5433_PMU_PCIE_PHY_OFFSET 0x730
|
||||
|
||||
/* For Exynos pcie phy */
|
||||
struct exynos_pcie_phy {
|
||||
const struct exynos_pcie_phy_data *drv_data;
|
||||
void __iomem *phy_base;
|
||||
void __iomem *blk_base; /* For exynos5440 */
|
||||
void __iomem *base;
|
||||
struct regmap *pmureg;
|
||||
struct regmap *fsysreg;
|
||||
};
|
||||
|
||||
static void exynos_pcie_phy_writel(void __iomem *base, u32 val, u32 offset)
|
||||
|
@ -75,153 +46,103 @@ static void exynos_pcie_phy_writel(void __iomem *base, u32 val, u32 offset)
|
|||
writel(val, base + offset);
|
||||
}
|
||||
|
||||
static u32 exynos_pcie_phy_readl(void __iomem *base, u32 offset)
|
||||
{
|
||||
return readl(base + offset);
|
||||
}
|
||||
|
||||
/* For Exynos5440 specific functions */
|
||||
static int exynos5440_pcie_phy_init(struct phy *phy)
|
||||
/* Exynos5433 specific functions */
|
||||
static int exynos5433_pcie_phy_init(struct phy *phy)
|
||||
{
|
||||
struct exynos_pcie_phy *ep = phy_get_drvdata(phy);
|
||||
|
||||
/* DCC feedback control off */
|
||||
exynos_pcie_phy_writel(ep->phy_base, 0x29, PCIE_PHY_DCC_FEEDBACK);
|
||||
regmap_update_bits(ep->fsysreg, PCIE_EXYNOS5433_PHY_COMMON_RESET,
|
||||
PCIE_PHY_RESET, 1);
|
||||
regmap_update_bits(ep->fsysreg, PCIE_EXYNOS5433_PHY_MAC_RESET,
|
||||
PCIE_MAC_RESET, 0);
|
||||
|
||||
/* set TX/RX impedance */
|
||||
exynos_pcie_phy_writel(ep->phy_base, 0xd5, PCIE_PHY_IMPEDANCE);
|
||||
/* PHY refclk 24MHz */
|
||||
regmap_update_bits(ep->fsysreg, PCIE_EXYNOS5433_PHY_GLOBAL_RESET,
|
||||
PCIE_REFCLK_MASK, PCIE_REFCLK);
|
||||
regmap_update_bits(ep->fsysreg, PCIE_EXYNOS5433_PHY_GLOBAL_RESET,
|
||||
PCIE_GLOBAL_RESET, 0);
|
||||
|
||||
/* set 50Mhz PHY clock */
|
||||
exynos_pcie_phy_writel(ep->phy_base, 0x14, PCIE_PHY_PLL_DIV_0);
|
||||
exynos_pcie_phy_writel(ep->phy_base, 0x12, PCIE_PHY_PLL_DIV_1);
|
||||
|
||||
/* set TX Differential output for lane 0 */
|
||||
exynos_pcie_phy_writel(ep->phy_base, 0x7f, PCIE_PHY_TRSV0_DRV_LVL);
|
||||
exynos_pcie_phy_writel(ep->base, 0x11, PCIE_PHY_OFFSET(0x3));
|
||||
|
||||
/* set TX Pre-emphasis Level Control for lane 0 to minimum */
|
||||
exynos_pcie_phy_writel(ep->phy_base, 0x0, PCIE_PHY_TRSV0_EMP_LVL);
|
||||
/* band gap reference on */
|
||||
exynos_pcie_phy_writel(ep->base, 0, PCIE_PHY_OFFSET(0x20));
|
||||
exynos_pcie_phy_writel(ep->base, 0, PCIE_PHY_OFFSET(0x4b));
|
||||
|
||||
/* set RX clock and data recovery bandwidth */
|
||||
exynos_pcie_phy_writel(ep->phy_base, 0xe7, PCIE_PHY_PLL_BIAS);
|
||||
exynos_pcie_phy_writel(ep->phy_base, 0x82, PCIE_PHY_TRSV0_RXCDR);
|
||||
exynos_pcie_phy_writel(ep->phy_base, 0x82, PCIE_PHY_TRSV1_RXCDR);
|
||||
exynos_pcie_phy_writel(ep->phy_base, 0x82, PCIE_PHY_TRSV2_RXCDR);
|
||||
exynos_pcie_phy_writel(ep->phy_base, 0x82, PCIE_PHY_TRSV3_RXCDR);
|
||||
/* jitter tunning */
|
||||
exynos_pcie_phy_writel(ep->base, 0x34, PCIE_PHY_OFFSET(0x4));
|
||||
exynos_pcie_phy_writel(ep->base, 0x02, PCIE_PHY_OFFSET(0x7));
|
||||
exynos_pcie_phy_writel(ep->base, 0x41, PCIE_PHY_OFFSET(0x21));
|
||||
exynos_pcie_phy_writel(ep->base, 0x7F, PCIE_PHY_OFFSET(0x14));
|
||||
exynos_pcie_phy_writel(ep->base, 0xC0, PCIE_PHY_OFFSET(0x15));
|
||||
exynos_pcie_phy_writel(ep->base, 0x61, PCIE_PHY_OFFSET(0x36));
|
||||
|
||||
/* change TX Pre-emphasis Level Control for lanes */
|
||||
exynos_pcie_phy_writel(ep->phy_base, 0x39, PCIE_PHY_TRSV0_EMP_LVL);
|
||||
exynos_pcie_phy_writel(ep->phy_base, 0x39, PCIE_PHY_TRSV1_EMP_LVL);
|
||||
exynos_pcie_phy_writel(ep->phy_base, 0x39, PCIE_PHY_TRSV2_EMP_LVL);
|
||||
exynos_pcie_phy_writel(ep->phy_base, 0x39, PCIE_PHY_TRSV3_EMP_LVL);
|
||||
/* D0 uninit.. */
|
||||
exynos_pcie_phy_writel(ep->base, 0x44, PCIE_PHY_OFFSET(0x3D));
|
||||
|
||||
/* set LVCC */
|
||||
exynos_pcie_phy_writel(ep->phy_base, 0x20, PCIE_PHY_TRSV0_LVCC);
|
||||
exynos_pcie_phy_writel(ep->phy_base, 0xa0, PCIE_PHY_TRSV1_LVCC);
|
||||
exynos_pcie_phy_writel(ep->phy_base, 0xa0, PCIE_PHY_TRSV2_LVCC);
|
||||
exynos_pcie_phy_writel(ep->phy_base, 0xa0, PCIE_PHY_TRSV3_LVCC);
|
||||
/* 24MHz */
|
||||
exynos_pcie_phy_writel(ep->base, 0x94, PCIE_PHY_OFFSET(0x8));
|
||||
exynos_pcie_phy_writel(ep->base, 0xA7, PCIE_PHY_OFFSET(0x9));
|
||||
exynos_pcie_phy_writel(ep->base, 0x93, PCIE_PHY_OFFSET(0xA));
|
||||
exynos_pcie_phy_writel(ep->base, 0x6B, PCIE_PHY_OFFSET(0xC));
|
||||
exynos_pcie_phy_writel(ep->base, 0xA5, PCIE_PHY_OFFSET(0xF));
|
||||
exynos_pcie_phy_writel(ep->base, 0x34, PCIE_PHY_OFFSET(0x16));
|
||||
exynos_pcie_phy_writel(ep->base, 0xA3, PCIE_PHY_OFFSET(0x17));
|
||||
exynos_pcie_phy_writel(ep->base, 0xA7, PCIE_PHY_OFFSET(0x1A));
|
||||
exynos_pcie_phy_writel(ep->base, 0x71, PCIE_PHY_OFFSET(0x23));
|
||||
exynos_pcie_phy_writel(ep->base, 0x4C, PCIE_PHY_OFFSET(0x24));
|
||||
|
||||
/* pulse for common reset */
|
||||
exynos_pcie_phy_writel(ep->blk_base, 1, PCIE_PHY_COMMON_RESET);
|
||||
udelay(500);
|
||||
exynos_pcie_phy_writel(ep->blk_base, 0, PCIE_PHY_COMMON_RESET);
|
||||
exynos_pcie_phy_writel(ep->base, 0x0E, PCIE_PHY_OFFSET(0x26));
|
||||
exynos_pcie_phy_writel(ep->base, 0x14, PCIE_PHY_OFFSET(0x7));
|
||||
exynos_pcie_phy_writel(ep->base, 0x48, PCIE_PHY_OFFSET(0x43));
|
||||
exynos_pcie_phy_writel(ep->base, 0x44, PCIE_PHY_OFFSET(0x44));
|
||||
exynos_pcie_phy_writel(ep->base, 0x03, PCIE_PHY_OFFSET(0x45));
|
||||
exynos_pcie_phy_writel(ep->base, 0xA7, PCIE_PHY_OFFSET(0x48));
|
||||
exynos_pcie_phy_writel(ep->base, 0x13, PCIE_PHY_OFFSET(0x54));
|
||||
exynos_pcie_phy_writel(ep->base, 0x04, PCIE_PHY_OFFSET(0x31));
|
||||
exynos_pcie_phy_writel(ep->base, 0, PCIE_PHY_OFFSET(0x32));
|
||||
|
||||
regmap_update_bits(ep->fsysreg, PCIE_EXYNOS5433_PHY_COMMON_RESET,
|
||||
PCIE_PHY_RESET, 0);
|
||||
regmap_update_bits(ep->fsysreg, PCIE_EXYNOS5433_PHY_MAC_RESET,
|
||||
PCIE_MAC_RESET_MASK, PCIE_MAC_RESET);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int exynos5440_pcie_phy_power_on(struct phy *phy)
|
||||
{
|
||||
struct exynos_pcie_phy *ep = phy_get_drvdata(phy);
|
||||
u32 val;
|
||||
|
||||
exynos_pcie_phy_writel(ep->blk_base, 0, PCIE_PHY_COMMON_RESET);
|
||||
exynos_pcie_phy_writel(ep->blk_base, 0, PCIE_PHY_CMN_REG);
|
||||
exynos_pcie_phy_writel(ep->blk_base, 0, PCIE_PHY_TRSVREG_RESET);
|
||||
exynos_pcie_phy_writel(ep->blk_base, 0, PCIE_PHY_TRSV_RESET);
|
||||
|
||||
val = exynos_pcie_phy_readl(ep->phy_base, PCIE_PHY_COMMON_POWER);
|
||||
val &= ~PCIE_PHY_COMMON_PD_CMN;
|
||||
exynos_pcie_phy_writel(ep->phy_base, val, PCIE_PHY_COMMON_POWER);
|
||||
|
||||
val = exynos_pcie_phy_readl(ep->phy_base, PCIE_PHY_TRSV0_POWER);
|
||||
val &= ~PCIE_PHY_TRSV0_PD_TSV;
|
||||
exynos_pcie_phy_writel(ep->phy_base, val, PCIE_PHY_TRSV0_POWER);
|
||||
|
||||
val = exynos_pcie_phy_readl(ep->phy_base, PCIE_PHY_TRSV1_POWER);
|
||||
val &= ~PCIE_PHY_TRSV1_PD_TSV;
|
||||
exynos_pcie_phy_writel(ep->phy_base, val, PCIE_PHY_TRSV1_POWER);
|
||||
|
||||
val = exynos_pcie_phy_readl(ep->phy_base, PCIE_PHY_TRSV2_POWER);
|
||||
val &= ~PCIE_PHY_TRSV2_PD_TSV;
|
||||
exynos_pcie_phy_writel(ep->phy_base, val, PCIE_PHY_TRSV2_POWER);
|
||||
|
||||
val = exynos_pcie_phy_readl(ep->phy_base, PCIE_PHY_TRSV3_POWER);
|
||||
val &= ~PCIE_PHY_TRSV3_PD_TSV;
|
||||
exynos_pcie_phy_writel(ep->phy_base, val, PCIE_PHY_TRSV3_POWER);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int exynos5440_pcie_phy_power_off(struct phy *phy)
|
||||
{
|
||||
struct exynos_pcie_phy *ep = phy_get_drvdata(phy);
|
||||
u32 val;
|
||||
|
||||
if (readl_poll_timeout(ep->phy_base + PCIE_PHY_PLL_LOCKED, val,
|
||||
(val != 0), 1, 500)) {
|
||||
dev_err(&phy->dev, "PLL Locked: 0x%x\n", val);
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
val = exynos_pcie_phy_readl(ep->phy_base, PCIE_PHY_COMMON_POWER);
|
||||
val |= PCIE_PHY_COMMON_PD_CMN;
|
||||
exynos_pcie_phy_writel(ep->phy_base, val, PCIE_PHY_COMMON_POWER);
|
||||
|
||||
val = exynos_pcie_phy_readl(ep->phy_base, PCIE_PHY_TRSV0_POWER);
|
||||
val |= PCIE_PHY_TRSV0_PD_TSV;
|
||||
exynos_pcie_phy_writel(ep->phy_base, val, PCIE_PHY_TRSV0_POWER);
|
||||
|
||||
val = exynos_pcie_phy_readl(ep->phy_base, PCIE_PHY_TRSV1_POWER);
|
||||
val |= PCIE_PHY_TRSV1_PD_TSV;
|
||||
exynos_pcie_phy_writel(ep->phy_base, val, PCIE_PHY_TRSV1_POWER);
|
||||
|
||||
val = exynos_pcie_phy_readl(ep->phy_base, PCIE_PHY_TRSV2_POWER);
|
||||
val |= PCIE_PHY_TRSV2_PD_TSV;
|
||||
exynos_pcie_phy_writel(ep->phy_base, val, PCIE_PHY_TRSV2_POWER);
|
||||
|
||||
val = exynos_pcie_phy_readl(ep->phy_base, PCIE_PHY_TRSV3_POWER);
|
||||
val |= PCIE_PHY_TRSV3_PD_TSV;
|
||||
exynos_pcie_phy_writel(ep->phy_base, val, PCIE_PHY_TRSV3_POWER);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int exynos5440_pcie_phy_reset(struct phy *phy)
|
||||
static int exynos5433_pcie_phy_power_on(struct phy *phy)
|
||||
{
|
||||
struct exynos_pcie_phy *ep = phy_get_drvdata(phy);
|
||||
|
||||
exynos_pcie_phy_writel(ep->blk_base, 0, PCIE_PHY_MAC_RESET);
|
||||
exynos_pcie_phy_writel(ep->blk_base, 1, PCIE_PHY_GLOBAL_RESET);
|
||||
exynos_pcie_phy_writel(ep->blk_base, 0, PCIE_PHY_GLOBAL_RESET);
|
||||
|
||||
regmap_update_bits(ep->pmureg, EXYNOS5433_PMU_PCIE_PHY_OFFSET,
|
||||
BIT(0), 1);
|
||||
regmap_update_bits(ep->fsysreg, PCIE_EXYNOS5433_PHY_GLOBAL_RESET,
|
||||
PCIE_APP_REQ_EXIT_L1_MODE, 0);
|
||||
regmap_update_bits(ep->fsysreg, PCIE_EXYNOS5433_PHY_L1SUB_CM_CON,
|
||||
PCIE_REFCLK_GATING_EN, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct phy_ops exynos5440_phy_ops = {
|
||||
.init = exynos5440_pcie_phy_init,
|
||||
.power_on = exynos5440_pcie_phy_power_on,
|
||||
.power_off = exynos5440_pcie_phy_power_off,
|
||||
.reset = exynos5440_pcie_phy_reset,
|
||||
static int exynos5433_pcie_phy_power_off(struct phy *phy)
|
||||
{
|
||||
struct exynos_pcie_phy *ep = phy_get_drvdata(phy);
|
||||
|
||||
regmap_update_bits(ep->fsysreg, PCIE_EXYNOS5433_PHY_L1SUB_CM_CON,
|
||||
PCIE_REFCLK_GATING_EN, PCIE_REFCLK_GATING_EN);
|
||||
regmap_update_bits(ep->pmureg, EXYNOS5433_PMU_PCIE_PHY_OFFSET,
|
||||
BIT(0), 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct phy_ops exynos5433_phy_ops = {
|
||||
.init = exynos5433_pcie_phy_init,
|
||||
.power_on = exynos5433_pcie_phy_power_on,
|
||||
.power_off = exynos5433_pcie_phy_power_off,
|
||||
.owner = THIS_MODULE,
|
||||
};
|
||||
|
||||
static const struct exynos_pcie_phy_data exynos5440_pcie_phy_data = {
|
||||
.ops = &exynos5440_phy_ops,
|
||||
};
|
||||
|
||||
static const struct of_device_id exynos_pcie_phy_match[] = {
|
||||
{
|
||||
.compatible = "samsung,exynos5440-pcie-phy",
|
||||
.data = &exynos5440_pcie_phy_data,
|
||||
.compatible = "samsung,exynos5433-pcie-phy",
|
||||
},
|
||||
{},
|
||||
};
|
||||
|
@ -232,30 +153,30 @@ static int exynos_pcie_phy_probe(struct platform_device *pdev)
|
|||
struct exynos_pcie_phy *exynos_phy;
|
||||
struct phy *generic_phy;
|
||||
struct phy_provider *phy_provider;
|
||||
struct resource *res;
|
||||
const struct exynos_pcie_phy_data *drv_data;
|
||||
|
||||
drv_data = of_device_get_match_data(dev);
|
||||
if (!drv_data)
|
||||
return -ENODEV;
|
||||
|
||||
exynos_phy = devm_kzalloc(dev, sizeof(*exynos_phy), GFP_KERNEL);
|
||||
if (!exynos_phy)
|
||||
return -ENOMEM;
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
exynos_phy->phy_base = devm_ioremap_resource(dev, res);
|
||||
if (IS_ERR(exynos_phy->phy_base))
|
||||
return PTR_ERR(exynos_phy->phy_base);
|
||||
exynos_phy->base = devm_platform_ioremap_resource(pdev, 0);
|
||||
if (IS_ERR(exynos_phy->base))
|
||||
return PTR_ERR(exynos_phy->base);
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
|
||||
exynos_phy->blk_base = devm_ioremap_resource(dev, res);
|
||||
if (IS_ERR(exynos_phy->blk_base))
|
||||
return PTR_ERR(exynos_phy->blk_base);
|
||||
exynos_phy->pmureg = syscon_regmap_lookup_by_phandle(dev->of_node,
|
||||
"samsung,pmu-syscon");
|
||||
if (IS_ERR(exynos_phy->pmureg)) {
|
||||
dev_err(&pdev->dev, "PMU regmap lookup failed.\n");
|
||||
return PTR_ERR(exynos_phy->pmureg);
|
||||
}
|
||||
|
||||
exynos_phy->drv_data = drv_data;
|
||||
exynos_phy->fsysreg = syscon_regmap_lookup_by_phandle(dev->of_node,
|
||||
"samsung,fsys-sysreg");
|
||||
if (IS_ERR(exynos_phy->fsysreg)) {
|
||||
dev_err(&pdev->dev, "FSYS sysreg regmap lookup failed.\n");
|
||||
return PTR_ERR(exynos_phy->fsysreg);
|
||||
}
|
||||
|
||||
generic_phy = devm_phy_create(dev, dev->of_node, drv_data->ops);
|
||||
generic_phy = devm_phy_create(dev, dev->of_node, &exynos5433_phy_ops);
|
||||
if (IS_ERR(generic_phy)) {
|
||||
dev_err(dev, "failed to create PHY\n");
|
||||
return PTR_ERR(generic_phy);
|
||||
|
@ -275,5 +196,4 @@ static struct platform_driver exynos_pcie_phy_driver = {
|
|||
.suppress_bind_attrs = true,
|
||||
}
|
||||
};
|
||||
|
||||
builtin_platform_driver(exynos_pcie_phy_driver);
|
||||
|
|
Загрузка…
Ссылка в новой задаче