Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

This commit is contained in:
David S. Miller 2016-10-14 10:00:27 -04:00
Родитель 687d911466 29fbff8698
Коммит 8eed1cd4cd
436 изменённых файлов: 22859 добавлений и 7587 удалений

Просмотреть файл

@ -483,7 +483,7 @@ printk(KERN_INFO "my ip: %pI4\n", &ipaddress);
<function>get_user()</function>
/
<function>put_user()</function>
<filename class="headerfile">include/asm/uaccess.h</filename>
<filename class="headerfile">include/linux/uaccess.h</filename>
</title>
<para>

Просмотреть файл

@ -0,0 +1,23 @@
Amlogic Meson PWM Controller
============================
Required properties:
- compatible: Shall contain "amlogic,meson8b-pwm" or "amlogic,meson-gxbb-pwm".
- #pwm-cells: Should be 3. See pwm.txt in this directory for a description of
the cells format.
Optional properties:
- clocks: Could contain one or two parents clocks phandle for each of the two
PWM channels.
- clock-names: Could contain at least the "clkin0" and/or "clkin1" names.
Example:
pwm_ab: pwm@8550 {
compatible = "amlogic,meson-gxbb-pwm";
reg = <0x0 0x08550 0x0 0x10>;
#pwm-cells = <3>;
status = "disabled";
clocks = <&xtal>, <&xtal>;
clock-names = "clkin0", "clkin1";
}

Просмотреть файл

@ -2,8 +2,9 @@ MediaTek display PWM controller
Required properties:
- compatible: should be "mediatek,<name>-disp-pwm":
- "mediatek,mt8173-disp-pwm": found on mt8173 SoC.
- "mediatek,mt2701-disp-pwm": found on mt2701 SoC.
- "mediatek,mt6595-disp-pwm": found on mt6595 SoC.
- "mediatek,mt8173-disp-pwm": found on mt8173 SoC.
- reg: physical base address and length of the controller's registers.
- #pwm-cells: must be 2. See pwm.txt in this directory for a description of
the cell format.

Просмотреть файл

@ -13,13 +13,14 @@ Required parameters:
- pinctrl-0: List of phandles pointing to pin configuration nodes
for PWM module.
For Pinctrl properties, please refer to [1].
- clock-names: Set to "pwm".
- clock-names: Valid entries are "pwm" and/or "capture".
- clocks: phandle of the clock used by the PWM module.
For Clk properties, please refer to [2].
- interrupts: IRQ for the Capture device
Optional properties:
- st,pwm-num-chan: Number of available channels. If not passed, the driver
will consider single channel by default.
- st,pwm-num-chan: Number of available PWM channels. Default is 0.
- st,capture-num-chan: Number of available Capture channels. Default is 0.
[1] Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt
[2] Documentation/devicetree/bindings/clock/clock-bindings.txt
@ -38,4 +39,5 @@ pwm1: pwm@fe510000 {
clocks = <&clk_sysin>;
clock-names = "pwm";
st,pwm-num-chan = <4>;
st,capture-num-chan = <2>;
};

Просмотреть файл

@ -6,6 +6,7 @@ Required properties:
- "allwinner,sun5i-a10s-pwm"
- "allwinner,sun5i-a13-pwm"
- "allwinner,sun7i-a20-pwm"
- "allwinner,sun8i-h3-pwm"
- reg: physical base address and length of the controller's registers
- #pwm-cells: should be 3. See pwm.txt in this directory for a description of
the cells format.

Просмотреть файл

@ -0,0 +1,70 @@
Thermal driver for MAX77620 Power management IC from Maxim Semiconductor.
Maxim Semiconductor MAX77620 supports alarm interrupts when its
die temperature crosses 120C and 140C. These threshold temperatures
are not configurable. Device does not provide the real temperature
of die other than just indicating whether temperature is above or
below threshold level.
Required properties:
-------------------
#thermal-sensor-cells: Please refer <devicetree/bindings/thermal/thermal.txt>
for more details.
The value must be 0.
For more details, please refer generic thermal DT binding document
<devicetree/bindings/thermal/thermal.txt>.
Please refer <devicetree/bindings/mfd/max77620.txt> for mfd DT binding
document for the MAX77620.
Example:
--------
#include <dt-bindings/mfd/max77620.h>
#include <dt-bindings/thermal/thermal.h>
...
i2c@7000d000 {
spmic: max77620@3c {
compatible = "maxim,max77620";
:::::
#thermal-sensor-cells = <0>;
:::
};
};
cool_dev: cool-dev {
compatible = "cooling-dev";
#cooling-cells = <2>;
};
thermal-zones {
PMIC-Die {
polling-delay = <0>;
polling-delay-passive = <0>;
thermal-sensors = <&spmic>;
trips {
pmic_die_warn_temp_thresh: hot-die {
temperature = <120000>;
type = "hot";
hysteresis = <0>;
};
pmic_die_cirt_temp_thresh: cirtical-die {
temperature = <140000>;
type = "critical";
hysteresis = <0>;
};
};
cooling-maps {
map0 {
trip = <&pmic_die_warn_temp_thresh>;
cooling-device = <&cool_dev THERMAL_NO_LIMIT
THERMAL_NO_LIMIT>;
contribution = <100>;
};
};
};
};

Просмотреть файл

@ -8,7 +8,9 @@ apmixedsys register space via AHB bus accesses, so a phandle to the APMIXEDSYS
is also needed.
Required properties:
- compatible: "mediatek,mt8173-thermal"
- compatible:
- "mediatek,mt8173-thermal" : For MT8173 family of SoCs
- "mediatek,mt2701-thermal" : For MT2701 family of SoCs
- reg: Address range of the thermal controller
- interrupts: IRQ for the thermal controller
- clocks, clock-names: Clocks needed for the thermal controller. required

Просмотреть файл

@ -10,8 +10,14 @@ Required properties :
- compatible : For Tegra124, must contain "nvidia,tegra124-soctherm".
For Tegra132, must contain "nvidia,tegra132-soctherm".
For Tegra210, must contain "nvidia,tegra210-soctherm".
- reg : Should contain 1 entry:
- reg : Should contain at least 2 entries for each entry in reg-names:
- SOCTHERM register set
- Tegra CAR register set: Required for Tegra124 and Tegra210.
- CCROC register set: Required for Tegra132.
- reg-names : Should contain at least 2 entries:
- soctherm-reg
- car-reg
- ccroc-reg
- interrupts : Defines the interrupt used by SOCTHERM
- clocks : Must contain an entry for each entry in clock-names.
See ../clocks/clock-bindings.txt for details.
@ -25,17 +31,45 @@ Required properties :
- #thermal-sensor-cells : Should be 1. See ./thermal.txt for a description
of this property. See <dt-bindings/thermal/tegra124-soctherm.h> for a
list of valid values when referring to thermal sensors.
- throttle-cfgs: A sub-node which is a container of configuration for each
hardware throttle events. These events can be set as cooling devices.
* throttle events: Sub-nodes must be named as "light" or "heavy".
Properties:
- nvidia,priority: Each throttles has its own throttle settings, so the
SW need to set priorities for various throttle, the HW arbiter can select
the final throttle settings.
Bigger value indicates higher priority, In general, higher priority
translates to lower target frequency. SW needs to ensure that critical
thermal alarms are given higher priority, and ensure that there is
no race if priority of two vectors is set to the same value.
The range of this value is 1~100.
- nvidia,cpu-throt-percent: This property is for Tegra124 and Tegra210.
It is the throttling depth of pulse skippers, it's the percentage
throttling.
- nvidia,cpu-throt-level: This property is only for Tegra132, it is the
level of pulse skippers, which used to throttle clock frequencies. It
indicates cpu clock throttling depth, and the depth can be programmed.
Must set as following values:
TEGRA_SOCTHERM_THROT_LEVEL_LOW, TEGRA_SOCTHERM_THROT_LEVEL_MED
TEGRA_SOCTHERM_THROT_LEVEL_HIGH, TEGRA_SOCTHERM_THROT_LEVEL_NONE
- #cooling-cells: Should be 1. This cooling device only support on/off state.
See ./thermal.txt for a description of this property.
Note:
- the "critical" type trip points will be set to SOC_THERM hardware as the
shut down temperature. Once the temperature of this thermal zone is higher
than it, the system will be shutdown or reset by hardware.
- the "hot" type trip points will be set to SOC_THERM hardware as the throttle
temperature. Once the the temperature of this thermal zone is higher
than it, it will trigger the HW throttle event.
Example :
soctherm@700e2000 {
compatible = "nvidia,tegra124-soctherm";
reg = <0x0 0x700e2000 0x0 0x1000>;
reg = <0x0 0x700e2000 0x0 0x600 /* SOC_THERM reg_base */
0x0 0x60006000 0x0 0x400 /* CAR reg_base */
reg-names = "soctherm-reg", "car-reg";
interrupts = <GIC_SPI 48 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&tegra_car TEGRA124_CLK_TSENSOR>,
<&tegra_car TEGRA124_CLK_SOC_THERM>;
@ -44,6 +78,76 @@ Example :
reset-names = "soctherm";
#thermal-sensor-cells = <1>;
throttle-cfgs {
/*
* When the "heavy" cooling device triggered,
* the HW will skip cpu clock's pulse in 85% depth
*/
throttle_heavy: heavy {
nvidia,priority = <100>;
nvidia,cpu-throt-percent = <85>;
#cooling-cells = <1>;
};
/*
* When the "light" cooling device triggered,
* the HW will skip cpu clock's pulse in 50% depth
*/
throttle_light: light {
nvidia,priority = <80>;
nvidia,cpu-throt-percent = <50>;
#cooling-cells = <1>;
};
/*
* If these two devices are triggered in same time, the HW throttle
* arbiter will select the highest priority as the final throttle
* settings to skip cpu pulse.
*/
};
};
Example: referring to Tegra132's "reg", "reg-names" and "throttle-cfgs" :
soctherm@700e2000 {
compatible = "nvidia,tegra132-soctherm";
reg = <0x0 0x700e2000 0x0 0x600 /* SOC_THERM reg_base */
0x0 0x70040000 0x0 0x200>; /* CCROC reg_base */;
reg-names = "soctherm-reg", "ccroc-reg";
throttle-cfgs {
/*
* When the "heavy" cooling device triggered,
* the HW will skip cpu clock's pulse in HIGH level
*/
throttle_heavy: heavy {
nvidia,priority = <100>;
nvidia,cpu-throt-level = <TEGRA_SOCTHERM_THROT_LEVEL_HIGH>;
#cooling-cells = <1>;
};
/*
* When the "light" cooling device triggered,
* the HW will skip cpu clock's pulse in MED level
*/
throttle_light: light {
nvidia,priority = <80>;
nvidia,cpu-throt-level = <TEGRA_SOCTHERM_THROT_LEVEL_MED>;
#cooling-cells = <1>;
};
/*
* If these two devices are triggered in same time, the HW throttle
* arbiter will select the highest priority as the final throttle
* settings to skip cpu pulse.
*/
};
};
Example: referring to thermal sensors :
@ -62,6 +166,19 @@ Example: referring to thermal sensors :
hysteresis = <1000>;
type = "critical";
};
cpu_throttle_trip: throttle-trip {
temperature = <100000>;
hysteresis = <1000>;
type = "hot";
};
};
cooling-maps {
map0 {
trip = <&cpu_throttle_trip>;
cooling-device = <&throttle_heavy 1 1>;
};
};
};
};

Просмотреть файл

@ -0,0 +1,21 @@
* QCOM SoC Temperature Sensor (TSENS)
Required properties:
- compatible :
- "qcom,msm8916-tsens" : For 8916 Family of SoCs
- "qcom,msm8974-tsens" : For 8974 Family of SoCs
- "qcom,msm8996-tsens" : For 8996 Family of SoCs
- reg: Address range of the thermal registers
- #thermal-sensor-cells : Should be 1. See ./thermal.txt for a description.
- Refer to Documentation/devicetree/bindings/nvmem/nvmem.txt to know how to specify
nvmem cells
Example:
tsens: thermal-sensor@900000 {
compatible = "qcom,msm8916-tsens";
reg = <0x4a8000 0x2000>;
nvmem-cells = <&tsens_caldata>, <&tsens_calsel>;
nvmem-cell-names = "caldata", "calsel";
#thermal-sensor-cells = <1>;
};

Просмотреть файл

@ -7,6 +7,8 @@ Required properties:
- reg : Physical base address and size
Optional properties:
- clocks : Input clock specifier. Refer to common clock
bindings.
- clock-frequency : Frequency of clock in Hz
- xlnx,wdt-enable-once : 0 - Watchdog can be restarted
1 - Watchdog can be enabled just once
@ -17,6 +19,7 @@ Example:
axi-timebase-wdt@40100000 {
clock-frequency = <50000000>;
compatible = "xlnx,xps-timebase-wdt-1.00.a";
clocks = <&clkc 15>;
reg = <0x40100000 0x10000>;
xlnx,wdt-enable-once = <0x0>;
xlnx,wdt-interval = <0x1b>;

Просмотреть файл

@ -9,8 +9,7 @@ functionality.
Required properties
- compatible : Must be one of: "st,stih407-lpc" "st,stih416-lpc"
"st,stih415-lpc" "st,stid127-lpc"
- compatible : Should be: "st,stih407-lpc"
- reg : LPC registers base address + size
- interrupts : LPC interrupt line number and associated flags
- clocks : Clock used by LPC device (See: ../clock/clock-bindings.txt)

Просмотреть файл

@ -2470,6 +2470,11 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
nfsrootdebug [NFS] enable nfsroot debugging messages.
See Documentation/filesystems/nfs/nfsroot.txt.
nfs.callback_nr_threads=
[NFSv4] set the total number of threads that the
NFS client will assign to service NFSv4 callback
requests.
nfs.callback_tcpport=
[NFS] set the TCP port on which the NFSv4 callback
channel should listen.
@ -2493,6 +2498,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
of returning the full 64-bit number.
The default is to return 64-bit inode numbers.
nfs.max_session_cb_slots=
[NFSv4.1] Sets the maximum number of session
slots the client will assign to the callback
channel. This determines the maximum number of
callbacks the client will process in parallel for
a particular server.
nfs.max_session_slots=
[NFSv4.1] Sets the maximum number of session slots
the client will attempt to negotiate with the server.

Просмотреть файл

@ -49,6 +49,9 @@ temperature) and throttle appropriate devices.
.bind: bind the thermal zone device with a thermal cooling device.
.unbind: unbind the thermal zone device with a thermal cooling device.
.get_temp: get the current temperature of the thermal zone.
.set_trips: set the trip points window. Whenever the current temperature
is updated, the trip points immediately below and above the
current temperature are found.
.get_mode: get the current mode (enabled/disabled) of the thermal zone.
- "enabled" means the kernel thermal management is enabled.
- "disabled" will prevent kernel thermal driver action upon trip points
@ -95,6 +98,10 @@ temperature) and throttle appropriate devices.
get_temp: a pointer to a function that reads the
sensor temperature. This is mandatory
callback provided by sensor driver.
set_trips: a pointer to a function that sets a
temperature window. When this window is
left the driver must inform the thermal
core via thermal_zone_device_update.
get_trend: a pointer to a function that reads the
sensor temperature trend.
set_emul_temp: a pointer to a function that sets
@ -140,6 +147,18 @@ temperature) and throttle appropriate devices.
Normally this function will not need to be called and the resource
management code will ensure that the resource is freed.
1.1.7 int thermal_zone_get_slope(struct thermal_zone_device *tz)
This interface is used to read the slope attribute value
for the thermal zone device, which might be useful for platform
drivers for temperature calculations.
1.1.8 int thermal_zone_get_offset(struct thermal_zone_device *tz)
This interface is used to read the offset attribute value
for the thermal zone device, which might be useful for platform
drivers for temperature calculations.
1.2 thermal cooling device interface
1.2.1 struct thermal_cooling_device *thermal_cooling_device_register(char *name,
void *devdata, struct thermal_cooling_device_ops *)

Просмотреть файл

@ -48,8 +48,10 @@ struct watchdog_device {
const struct attribute_group **groups;
const struct watchdog_info *info;
const struct watchdog_ops *ops;
const struct watchdog_governor *gov;
unsigned int bootstatus;
unsigned int timeout;
unsigned int pretimeout;
unsigned int min_timeout;
unsigned int max_timeout;
unsigned int min_hw_heartbeat_ms;
@ -74,9 +76,11 @@ It contains following fields:
* info: a pointer to a watchdog_info structure. This structure gives some
additional information about the watchdog timer itself. (Like it's unique name)
* ops: a pointer to the list of watchdog operations that the watchdog supports.
* gov: a pointer to the assigned watchdog device pretimeout governor or NULL.
* timeout: the watchdog timer's timeout value (in seconds).
This is the time after which the system will reboot if user space does
not send a heartbeat request if WDOG_ACTIVE is set.
* pretimeout: the watchdog timer's pretimeout value (in seconds).
* min_timeout: the watchdog timer's minimum timeout value (in seconds).
If set, the minimum configurable value for 'timeout'.
* max_timeout: the watchdog timer's maximum timeout value (in seconds),
@ -121,6 +125,7 @@ struct watchdog_ops {
int (*ping)(struct watchdog_device *);
unsigned int (*status)(struct watchdog_device *);
int (*set_timeout)(struct watchdog_device *, unsigned int);
int (*set_pretimeout)(struct watchdog_device *, unsigned int);
unsigned int (*get_timeleft)(struct watchdog_device *);
int (*restart)(struct watchdog_device *);
void (*ref)(struct watchdog_device *) __deprecated;
@ -188,6 +193,23 @@ they are supported. These optional routines/operations are:
If set_timeout is not provided but, WDIOF_SETTIMEOUT is set, the watchdog
infrastructure updates the timeout value of the watchdog_device internally
to the requested value.
If the pretimeout feature is used (WDIOF_PRETIMEOUT), then set_timeout must
also take care of checking if pretimeout is still valid and set up the timer
accordingly. This can't be done in the core without races, so it is the
duty of the driver.
* set_pretimeout: this routine checks and changes the pretimeout value of
the watchdog. It is optional because not all watchdogs support pretimeout
notification. The timeout value is not an absolute time, but the number of
seconds before the actual timeout would happen. It returns 0 on success,
-EINVAL for "parameter out of range" and -EIO for "could not write value to
the watchdog". A value of 0 disables pretimeout notification.
(Note: the WDIOF_PRETIMEOUT needs to be set in the options field of the
watchdog's info structure).
If the watchdog driver does not have to perform any action but setting the
watchdog_device.pretimeout, this callback can be omitted. That means if
set_pretimeout is not provided but WDIOF_PRETIMEOUT is set, the watchdog
infrastructure updates the pretimeout value of the watchdog_device internally
to the requested value.
* get_timeleft: this routines returns the time that's left before a reset.
* restart: this routine restarts the machine. It returns 0 on success or a
negative errno code for failure.
@ -268,3 +290,14 @@ User should follow the following guidelines for setting the priority:
* 128: default restart handler, use if no other handler is expected to be
available, and/or if restart is sufficient to restart the entire system
* 255: highest priority, will preempt all other restart handlers
To raise a pretimeout notification, the following function should be used:
void watchdog_notify_pretimeout(struct watchdog_device *wdd)
The function can be called in the interrupt context. If watchdog pretimeout
governor framework (kbuild CONFIG_WATCHDOG_PRETIMEOUT_GOV symbol) is enabled,
an action is taken by a preconfigured pretimeout governor preassigned to
the watchdog device. If watchdog pretimeout governor framework is not
enabled, watchdog_notify_pretimeout() prints a notification message to
the kernel log buffer.

Просмотреть файл

@ -4775,15 +4775,6 @@ L: iommu@lists.linux-foundation.org
S: Maintained
F: drivers/iommu/exynos-iommu.c
EXYNOS MIPI DISPLAY DRIVERS
M: Inki Dae <inki.dae@samsung.com>
M: Donghwa Lee <dh09.lee@samsung.com>
M: Kyungmin Park <kyungmin.park@samsung.com>
L: linux-fbdev@vger.kernel.org
S: Maintained
F: drivers/video/fbdev/exynos/exynos_mipi*
F: include/video/exynos_mipi*
EZchip NPS platform support
M: Noam Camus <noamc@ezchip.com>
S: Supported
@ -4962,12 +4953,9 @@ F: drivers/net/wan/dlci.c
F: drivers/net/wan/sdla.c
FRAMEBUFFER LAYER
M: Jean-Christophe Plagniol-Villard <plagnioj@jcrosoft.com>
M: Tomi Valkeinen <tomi.valkeinen@ti.com>
L: linux-fbdev@vger.kernel.org
W: http://linux-fbdev.sourceforge.net/
Q: http://patchwork.kernel.org/project/linux-fbdev/list/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/plagnioj/linux-fbdev.git
S: Maintained
F: Documentation/fb/
F: drivers/video/
@ -9201,6 +9189,14 @@ S: Maintained
F: Documentation/devicetree/bindings/pci/versatile.txt
F: drivers/pci/host/pci-versatile.c
PCI DRIVER FOR ARMADA 8K
M: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
L: linux-pci@vger.kernel.org
L: linux-arm-kernel@lists.infradead.org
S: Maintained
F: Documentation/devicetree/bindings/pci/pci-armada8k.txt
F: drivers/pci/host/pcie-armada8k.c
PCI DRIVER FOR APPLIEDMICRO XGENE
M: Tanmay Inamdar <tinamdar@apm.com>
L: linux-pci@vger.kernel.org
@ -9247,6 +9243,7 @@ M: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
L: linux-pci@vger.kernel.org
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: Documentation/devicetree/bindings/pci/aardvark-pci.txt
F: drivers/pci/host/pci-aardvark.c
PCI DRIVER FOR NVIDIA TEGRA

Просмотреть файл

@ -621,6 +621,7 @@ include arch/$(SRCARCH)/Makefile
KBUILD_CFLAGS += $(call cc-option,-fno-delete-null-pointer-checks,)
KBUILD_CFLAGS += $(call cc-disable-warning,maybe-uninitialized,)
KBUILD_CFLAGS += $(call cc-disable-warning,frame-address,)
ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
KBUILD_CFLAGS += -Os

Просмотреть файл

@ -2045,44 +2045,32 @@
thermal-zones {
cpu {
trips {
trip {
cpu-shutdown-trip {
temperature = <101000>;
hysteresis = <0>;
type = "critical";
};
};
cooling-maps {
/* There are currently no cooling maps because there are no cooling devices */
};
};
mem {
trips {
trip {
mem-shutdown-trip {
temperature = <101000>;
hysteresis = <0>;
type = "critical";
};
};
cooling-maps {
/* There are currently no cooling maps because there are no cooling devices */
};
};
gpu {
trips {
trip {
gpu-shutdown-trip {
temperature = <101000>;
hysteresis = <0>;
type = "critical";
};
};
cooling-maps {
/* There are currently no cooling maps because there are no cooling devices */
};
};
};
};

Просмотреть файл

@ -851,7 +851,9 @@
soctherm: thermal-sensor@700e2000 {
compatible = "nvidia,tegra124-soctherm";
reg = <0x0 0x700e2000 0x0 0x1000>;
reg = <0x0 0x700e2000 0x0 0x600 /* SOC_THERM reg_base */
0x0 0x60006000 0x0 0x400>; /* CAR reg_base */
reg-names = "soctherm-reg", "car-reg";
interrupts = <GIC_SPI 48 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&tegra_car TEGRA124_CLK_TSENSOR>,
<&tegra_car TEGRA124_CLK_SOC_THERM>;
@ -859,6 +861,15 @@
resets = <&tegra_car 78>;
reset-names = "soctherm";
#thermal-sensor-cells = <1>;
throttle-cfgs {
throttle_heavy: heavy {
nvidia,priority = <100>;
nvidia,cpu-throt-percent = <85>;
#cooling-cells = <2>;
};
};
};
dfll: clock@70110000 {
@ -1154,6 +1165,26 @@
thermal-sensors =
<&soctherm TEGRA124_SOCTHERM_SENSOR_CPU>;
trips {
cpu-shutdown-trip {
temperature = <103000>;
hysteresis = <0>;
type = "critical";
};
cpu_throttle_trip: throttle-trip {
temperature = <100000>;
hysteresis = <1000>;
type = "hot";
};
};
cooling-maps {
map0 {
trip = <&cpu_throttle_trip>;
cooling-device = <&throttle_heavy 1 1>;
};
};
};
mem {
@ -1162,6 +1193,21 @@
thermal-sensors =
<&soctherm TEGRA124_SOCTHERM_SENSOR_MEM>;
trips {
mem-shutdown-trip {
temperature = <103000>;
hysteresis = <0>;
type = "critical";
};
};
cooling-maps {
/*
* There are currently no cooling maps,
* because there are no cooling devices.
*/
};
};
gpu {
@ -1170,6 +1216,26 @@
thermal-sensors =
<&soctherm TEGRA124_SOCTHERM_SENSOR_GPU>;
trips {
gpu-shutdown-trip {
temperature = <101000>;
hysteresis = <0>;
type = "critical";
};
gpu_throttle_trip: throttle-trip {
temperature = <99000>;
hysteresis = <1000>;
type = "hot";
};
};
cooling-maps {
map0 {
trip = <&gpu_throttle_trip>;
cooling-device = <&throttle_heavy 1 1>;
};
};
};
pllx {
@ -1178,6 +1244,21 @@
thermal-sensors =
<&soctherm TEGRA124_SOCTHERM_SENSOR_PLLX>;
trips {
pllx-shutdown-trip {
temperature = <103000>;
hysteresis = <0>;
type = "critical";
};
};
cooling-maps {
/*
* There are currently no cooling maps,
* because there are no cooling devices.
*/
};
};
};

Просмотреть файл

@ -168,8 +168,6 @@ CONFIG_DRM_PANEL_SAMSUNG_LD9040=y
CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0=y
CONFIG_DRM_NXP_PTN3460=y
CONFIG_DRM_PARADE_PS8622=y
CONFIG_EXYNOS_VIDEO=y
CONFIG_EXYNOS_MIPI_DSI=y
CONFIG_LCD_CLASS_DEVICE=y
CONFIG_LCD_PLATFORM=y
CONFIG_BACKLIGHT_PWM=y

Просмотреть файл

@ -23,7 +23,6 @@ static inline int fsr_fs(unsigned int fsr)
#endif
void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs);
unsigned long search_exception_table(unsigned long addr);
void early_abt_enable(void);
#endif /* __ARCH_ARM_FAULT_H */

Просмотреть файл

@ -4,6 +4,7 @@
#include <dt-bindings/pinctrl/pinctrl-tegra.h>
#include <dt-bindings/pinctrl/pinctrl-tegra-xusb.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/thermal/tegra124-soctherm.h>
/ {
compatible = "nvidia,tegra132", "nvidia,tegra124";
@ -727,8 +728,10 @@
};
soctherm: thermal-sensor@700e2000 {
compatible = "nvidia,tegra124-soctherm";
reg = <0x0 0x700e2000 0x0 0x1000>;
compatible = "nvidia,tegra132-soctherm";
reg = <0x0 0x700e2000 0x0 0x600 /* 0: SOC_THERM reg_base */
0x0 0x70040000 0x0 0x200>; /* 2: CCROC reg_base */
reg-names = "soctherm-reg", "ccroc-reg";
interrupts = <GIC_SPI 48 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&tegra_car TEGRA124_CLK_TSENSOR>,
<&tegra_car TEGRA124_CLK_SOC_THERM>;
@ -736,6 +739,118 @@
resets = <&tegra_car 78>;
reset-names = "soctherm";
#thermal-sensor-cells = <1>;
throttle-cfgs {
throttle_heavy: heavy {
nvidia,priority = <100>;
nvidia,cpu-throt-level = <TEGRA_SOCTHERM_THROT_LEVEL_HIGH>;
#cooling-cells = <2>;
};
};
};
thermal-zones {
cpu {
polling-delay-passive = <1000>;
polling-delay = <0>;
thermal-sensors =
<&soctherm TEGRA124_SOCTHERM_SENSOR_CPU>;
trips {
cpu_shutdown_trip {
temperature = <105000>;
hysteresis = <1000>;
type = "critical";
};
cpu_throttle_trip: throttle-trip {
temperature = <102000>;
hysteresis = <1000>;
type = "hot";
};
};
cooling-maps {
map0 {
trip = <&cpu_throttle_trip>;
cooling-device = <&throttle_heavy 1 1>;
};
};
};
mem {
polling-delay-passive = <0>;
polling-delay = <0>;
thermal-sensors =
<&soctherm TEGRA124_SOCTHERM_SENSOR_MEM>;
trips {
mem_shutdown_trip {
temperature = <101000>;
hysteresis = <1000>;
type = "critical";
};
};
cooling-maps {
/*
* There are currently no cooling maps,
* because there are no cooling devices.
*/
};
};
gpu {
polling-delay-passive = <1000>;
polling-delay = <0>;
thermal-sensors =
<&soctherm TEGRA124_SOCTHERM_SENSOR_GPU>;
trips {
gpu_shutdown_trip {
temperature = <101000>;
hysteresis = <1000>;
type = "critical";
};
gpu_throttle_trip: throttle-trip {
temperature = <99000>;
hysteresis = <1000>;
type = "hot";
};
};
cooling-maps {
map0 {
trip = <&gpu_throttle_trip>;
cooling-device = <&throttle_heavy 1 1>;
};
};
};
pllx {
polling-delay-passive = <0>;
polling-delay = <0>;
thermal-sensors =
<&soctherm TEGRA124_SOCTHERM_SENSOR_PLLX>;
trips {
pllx_shutdown_trip {
temperature = <105000>;
hysteresis = <1000>;
type = "critical";
};
};
cooling-maps {
/*
* There are currently no cooling maps,
* because there are no cooling devices.
*/
};
};
};
ahub@70300000 {

Просмотреть файл

@ -3,6 +3,7 @@
#include <dt-bindings/memory/tegra210-mc.h>
#include <dt-bindings/pinctrl/pinctrl-tegra.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/thermal/tegra124-soctherm.h>
/ {
compatible = "nvidia,tegra210";
@ -1159,4 +1160,130 @@
(GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>;
interrupt-parent = <&gic>;
};
soctherm: thermal-sensor@700e2000 {
compatible = "nvidia,tegra210-soctherm";
reg = <0x0 0x700e2000 0x0 0x600 /* SOC_THERM reg_base */
0x0 0x60006000 0x0 0x400>; /* CAR reg_base */
reg-names = "soctherm-reg", "car-reg";
interrupts = <GIC_SPI 48 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&tegra_car TEGRA210_CLK_TSENSOR>,
<&tegra_car TEGRA210_CLK_SOC_THERM>;
clock-names = "tsensor", "soctherm";
resets = <&tegra_car 78>;
reset-names = "soctherm";
#thermal-sensor-cells = <1>;
throttle-cfgs {
throttle_heavy: heavy {
nvidia,priority = <100>;
nvidia,cpu-throt-percent = <85>;
#cooling-cells = <2>;
};
};
};
thermal-zones {
cpu {
polling-delay-passive = <1000>;
polling-delay = <0>;
thermal-sensors =
<&soctherm TEGRA124_SOCTHERM_SENSOR_CPU>;
trips {
cpu-shutdown-trip {
temperature = <102500>;
hysteresis = <0>;
type = "critical";
};
cpu_throttle_trip: throttle-trip {
temperature = <98500>;
hysteresis = <1000>;
type = "hot";
};
};
cooling-maps {
map0 {
trip = <&cpu_throttle_trip>;
cooling-device = <&throttle_heavy 1 1>;
};
};
};
mem {
polling-delay-passive = <0>;
polling-delay = <0>;
thermal-sensors =
<&soctherm TEGRA124_SOCTHERM_SENSOR_MEM>;
trips {
mem-shutdown-trip {
temperature = <103000>;
hysteresis = <0>;
type = "critical";
};
};
cooling-maps {
/*
* There are currently no cooling maps,
* because there are no cooling devices.
*/
};
};
gpu {
polling-delay-passive = <1000>;
polling-delay = <0>;
thermal-sensors =
<&soctherm TEGRA124_SOCTHERM_SENSOR_GPU>;
trips {
gpu-shutdown-trip {
temperature = <103000>;
hysteresis = <0>;
type = "critical";
};
gpu_throttle_trip: throttle-trip {
temperature = <100000>;
hysteresis = <1000>;
type = "hot";
};
};
cooling-maps {
map0 {
trip = <&gpu_throttle_trip>;
cooling-device = <&throttle_heavy 1 1>;
};
};
};
pllx {
polling-delay-passive = <0>;
polling-delay = <0>;
thermal-sensors =
<&soctherm TEGRA124_SOCTHERM_SENSOR_PLLX>;
trips {
pllx-shutdown-trip {
temperature = <103000>;
hysteresis = <0>;
type = "critical";
};
};
cooling-maps {
/*
* There are currently no cooling maps,
* because there are no cooling devices.
*/
};
};
};
};

Просмотреть файл

@ -522,5 +522,6 @@ extern void __init pgtable_cache_init(void);
#ifndef __ASSEMBLY__
extern void __init paging_init(void);
#endif /* !__ASSEMBLY__ */
#define HAVE_ARCH_UNMAPPED_AREA
#endif /* _ASM_PGTABLE_H */

Просмотреть файл

@ -32,7 +32,6 @@ typedef struct {
#define get_ds() (KERNEL_DS)
#define get_fs() (__current_thread_info->addr_limit)
#define segment_eq(a, b) ((a).seg == (b).seg)
#define __kernel_ds_p() segment_eq(get_fs(), KERNEL_DS)
#define get_addr_limit() (get_fs().seg)
#define set_fs(_x) \

Просмотреть файл

@ -20,8 +20,6 @@
#include <asm/segment.h>
#include <asm/sections.h>
#define HAVE_ARCH_UNMAPPED_AREA /* we decide where to put mmaps */
#define __ptr(x) ((unsigned long __force *)(x))
#define VERIFY_READ 0

Просмотреть файл

@ -44,9 +44,6 @@ struct exception_table_entry
unsigned long insn, fixup;
};
/* Returns 0 if exception not found and fixup otherwise. */
extern unsigned long search_exception_table(unsigned long);
/*
* These are the main single-value transfer routines. They automatically

Просмотреть файл

@ -71,9 +71,6 @@ struct exception_table_entry {
unsigned long insn, fixup;
};
/* Returns 0 if exception not found and fixup otherwise. */
extern unsigned long search_exception_table(unsigned long);
#ifndef CONFIG_MMU
/* Check against bounds of physical memory */

Просмотреть файл

@ -0,0 +1,13 @@
#ifndef _ASM_EXTABLE_H
#define _ASM_EXTABLE_H
struct exception_table_entry
{
unsigned long insn;
unsigned long nextinsn;
};
struct pt_regs;
extern int fixup_exception(struct pt_regs *regs);
#endif

Просмотреть файл

@ -3,7 +3,7 @@
#include <linux/list.h>
#include <linux/elf.h>
#include <asm/uaccess.h>
#include <asm/extable.h>
struct mod_arch_specific {
/* Data Bus Error exception tables */

Просмотреть файл

@ -16,6 +16,7 @@
#include <linux/thread_info.h>
#include <linux/string.h>
#include <asm/asm-eva.h>
#include <asm/extable.h>
/*
* The fs value determines whether argument validity checking should be
@ -1485,12 +1486,4 @@ static inline long strnlen_user(const char __user *s, long n)
return res;
}
struct exception_table_entry
{
unsigned long insn;
unsigned long nextinsn;
};
extern int fixup_exception(struct pt_regs *regs);
#endif /* _ASM_UACCESS_H */

Просмотреть файл

@ -16,6 +16,7 @@
#include <linux/timer.h>
#include <linux/mutex.h>
#include <linux/uaccess.h>
#include "picvue.h"

Просмотреть файл

@ -18,7 +18,6 @@
#include <asm/page.h>
#include <asm/ptrace.h>
#include <asm/cpu-regs.h>
#include <asm/uaccess.h>
#include <asm/current.h>
/* Forward declaration, a strange C thing */

Просмотреть файл

@ -38,7 +38,6 @@
#define get_ds() (KERNEL_DS)
#define get_fs() (current_thread_info()->addr_limit)
#define set_fs(x) (current_thread_info()->addr_limit = (x))
#define __kernel_ds_p() (current_thread_info()->addr_limit.seg == 0x9FFFFFFF)
#define segment_eq(a, b) ((a).seg == (b).seg)
@ -72,12 +71,6 @@ static inline int ___range_ok(unsigned long addr, unsigned int size)
#define access_ok(type, addr, size) (__range_ok((addr), (size)) == 0)
#define __access_ok(addr, size) (__range_ok((addr), (size)) == 0)
static inline int verify_area(int type, const void *addr, unsigned long size)
{
return access_ok(type, addr, size) ? 0 : -EFAULT;
}
/*
* The exception table consists of pairs of addresses: the first is the
* address of an instruction that is allowed to fault, and the second is

Просмотреть файл

@ -75,7 +75,7 @@ static int restore_sigcontext(struct pt_regs *regs,
struct fpucontext *buf;
err |= __get_user(buf, &sc->fpucontext);
if (buf) {
if (verify_area(VERIFY_READ, buf, sizeof(*buf)))
if (!access_ok(VERIFY_READ, buf, sizeof(*buf)))
goto badframe;
err |= fpu_restore_sigcontext(buf);
}
@ -98,7 +98,7 @@ asmlinkage long sys_sigreturn(void)
long d0;
frame = (struct sigframe __user *) current_frame()->sp;
if (verify_area(VERIFY_READ, frame, sizeof(*frame)))
if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
goto badframe;
if (__get_user(set.sig[0], &frame->sc.oldmask))
goto badframe;
@ -130,7 +130,7 @@ asmlinkage long sys_rt_sigreturn(void)
long d0;
frame = (struct rt_sigframe __user *) current_frame()->sp;
if (verify_area(VERIFY_READ, frame, sizeof(*frame)))
if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
goto badframe;
if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
goto badframe;

Просмотреть файл

@ -82,10 +82,6 @@ struct exception_table_entry {
unsigned long insn, fixup;
};
/* Returns 0 if exception not found and fixup otherwise. */
extern unsigned long search_exception_table(unsigned long);
extern void sort_exception_table(void);
/*
* These are the main single-value transfer routines. They automatically
* use the right size if we just have the right pointer type.

Просмотреть файл

@ -83,10 +83,10 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, (unsigned long)pgd_val(e))
/* This is the size of the initially mapped kernel memory */
#if defined(CONFIG_64BIT) || defined(CONFIG_SMP)
#define KERNEL_INITIAL_ORDER 25 /* 1<<25 = 32MB */
#if defined(CONFIG_64BIT)
#define KERNEL_INITIAL_ORDER 26 /* 1<<26 = 64MB */
#else
#define KERNEL_INITIAL_ORDER 24 /* 1<<24 = 16MB */
#define KERNEL_INITIAL_ORDER 25 /* 1<<25 = 32MB */
#endif
#define KERNEL_INITIAL_SIZE (1 << KERNEL_INITIAL_ORDER)

Просмотреть файл

@ -11,6 +11,7 @@ void parisc_terminate(char *msg, struct pt_regs *regs,
void die_if_kernel(char *str, struct pt_regs *regs, long err);
/* mm/fault.c */
const char *trap_name(unsigned long code);
void do_page_fault(struct pt_regs *regs, unsigned long code,
unsigned long address);
#endif

Просмотреть файл

@ -458,8 +458,8 @@ void parisc_terminate(char *msg, struct pt_regs *regs, int code, unsigned long o
}
printk("\n");
printk(KERN_CRIT "%s: Code=%d regs=%p (Addr=" RFMT ")\n",
msg, code, regs, offset);
pr_crit("%s: Code=%d (%s) regs=%p (Addr=" RFMT ")\n",
msg, code, trap_name(code), regs, offset);
show_regs(regs);
spin_unlock(&terminate_lock);

Просмотреть файл

@ -90,8 +90,9 @@ SECTIONS
/* Start of data section */
_sdata = .;
RO_DATA_SECTION(8)
/* Architecturally we need to keep __gp below 0x1000000 and thus
* in front of RO_DATA_SECTION() which stores lots of tracepoint
* and ftrace symbols. */
#ifdef CONFIG_64BIT
. = ALIGN(16);
/* Linkage tables */
@ -106,6 +107,12 @@ SECTIONS
}
#endif
RO_DATA_SECTION(8)
/* RO because of BUILDTIME_EXTABLE_SORT */
EXCEPTION_TABLE(8)
NOTES
/* unwind info */
.PARISC.unwind : {
__start___unwind = .;
@ -121,9 +128,6 @@ SECTIONS
. = ALIGN(HUGEPAGE_SIZE);
data_start = .;
EXCEPTION_TABLE(8)
NOTES
/* Data */
RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, PAGE_SIZE)

Просмотреть файл

@ -14,7 +14,7 @@
#include <linux/ptrace.h>
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/extable.h>
#include <linux/uaccess.h>
#include <asm/traps.h>
@ -204,6 +204,16 @@ static const char * const trap_description[] = {
[28] "Unaligned data reference trap",
};
const char *trap_name(unsigned long code)
{
const char *t = NULL;
if (code < ARRAY_SIZE(trap_description))
t = trap_description[code];
return t ? t : "Unknown trap";
}
/*
* Print out info about fatal segfaults, if the show_unhandled_signals
* sysctl is set:
@ -213,8 +223,6 @@ show_signal_msg(struct pt_regs *regs, unsigned long code,
unsigned long address, struct task_struct *tsk,
struct vm_area_struct *vma)
{
const char *trap_name = NULL;
if (!unhandled_signal(tsk, SIGSEGV))
return;
@ -226,10 +234,7 @@ show_signal_msg(struct pt_regs *regs, unsigned long code,
tsk->comm, code, address);
print_vma_addr(KERN_CONT " in ", regs->iaoq[0]);
if (code < ARRAY_SIZE(trap_description))
trap_name = trap_description[code];
pr_warn(KERN_CONT " trap #%lu: %s%c", code,
trap_name ? trap_name : "unknown",
pr_cont(" trap #%lu: %s%c", code, trap_name(code),
vma ? ',':'\n');
if (vma)

Просмотреть файл

@ -105,6 +105,8 @@ static void * __init get_memblock(unsigned long size)
else
panic("get_memblock() failed.\n");
memset(__va(phys), 0, size);
return __va(phys);
}

Просмотреть файл

@ -0,0 +1,11 @@
#ifndef _ASM_SCORE_EXTABLE_H
#define _ASM_SCORE_EXTABLE_H
struct exception_table_entry {
unsigned long insn;
unsigned long fixup;
};
struct pt_regs;
extern int fixup_exception(struct pt_regs *regs);
#endif

Просмотреть файл

@ -2,7 +2,7 @@
#define _ASM_SCORE_MODULE_H
#include <linux/list.h>
#include <asm/uaccess.h>
#include <asm/extable.h>
#include <asm-generic/module.h>
struct mod_arch_specific {

Просмотреть файл

@ -4,6 +4,7 @@
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/thread_info.h>
#include <asm/extable.h>
#define VERIFY_READ 0
#define VERIFY_WRITE 1
@ -420,12 +421,5 @@ static inline long strnlen_user(const char __user *str, long len)
return __strnlen_user(str, len);
}
struct exception_table_entry {
unsigned long insn;
unsigned long fixup;
};
extern int fixup_exception(struct pt_regs *regs);
#endif /* __SCORE_UACCESS_H */

Просмотреть файл

@ -192,8 +192,6 @@ struct exception_table_entry {
#endif
int fixup_exception(struct pt_regs *regs);
/* Returns 0 if exception not found and fixup.unit otherwise. */
unsigned long search_exception_table(unsigned long addr);
const struct exception_table_entry *search_exception_tables(unsigned long addr);
extern void *set_exception_table_vec(unsigned int vec, void *handler);

Просмотреть файл

@ -7,7 +7,7 @@
#include <asm/ptrace.h>
#include <asm/processor.h>
#include <asm/uaccess.h>
#include <asm/extable_64.h>
#include <asm/spitfire.h>
/*

Просмотреть файл

@ -0,0 +1,20 @@
#ifndef __ASM_EXTABLE64_H
#define __ASM_EXTABLE64_H
/*
* The exception table consists of pairs of addresses: the first is the
* address of an instruction that is allowed to fault, and the second is
* the address at which the program should continue. No registers are
* modified, so it is entirely up to the continuation code to figure out
* what to do.
*
* All the routines below use bits of fixup code that are out of line
* with the main instruction path. This means when everything is well,
* we don't even have to jump over them. Further, they do not intrude
* on our cache or tlb entries.
*/
struct exception_table_entry {
unsigned int insn, fixup;
};
#endif

Просмотреть файл

@ -13,6 +13,7 @@
#include <asm/asi.h>
#include <asm/spitfire.h>
#include <asm-generic/uaccess-unaligned.h>
#include <asm/extable_64.h>
#endif
#ifndef __ASSEMBLY__
@ -81,23 +82,6 @@ static inline int access_ok(int type, const void __user * addr, unsigned long si
return 1;
}
/*
* The exception table consists of pairs of addresses: the first is the
* address of an instruction that is allowed to fault, and the second is
* the address at which the program should continue. No registers are
* modified, so it is entirely up to the continuation code to figure out
* what to do.
*
* All the routines below use bits of fixup code that are out of line
* with the main instruction path. This means when everything is well,
* we don't even have to jump over them. Further, they do not intrude
* on our cache or tlb entries.
*/
struct exception_table_entry {
unsigned int insn, fixup;
};
void __ret_efault(void);
void __retl_efault(void);

Просмотреть файл

@ -4,7 +4,6 @@
/* Caches aren't brain-dead on the intel. */
#include <asm-generic/cacheflush.h>
#include <asm/special_insns.h>
#include <asm/uaccess.h>
/*
* The set_memory_* API can be used to change various attributes of a virtual

Просмотреть файл

@ -0,0 +1,35 @@
#ifndef _ASM_X86_EXTABLE_H
#define _ASM_X86_EXTABLE_H
/*
* The exception table consists of triples of addresses relative to the
* exception table entry itself. The first address is of an instruction
* that is allowed to fault, the second is the target at which the program
* should continue. The third is a handler function to deal with the fault
* caused by the instruction in the first field.
*
* All the routines below use bits of fixup code that are out of line
* with the main instruction path. This means when everything is well,
* we don't even have to jump over them. Further, they do not intrude
* on our cache or tlb entries.
*/
struct exception_table_entry {
int insn, fixup, handler;
};
struct pt_regs;
#define ARCH_HAS_RELATIVE_EXTABLE
#define swap_ex_entry_fixup(a, b, tmp, delta) \
do { \
(a)->fixup = (b)->fixup + (delta); \
(b)->fixup = (tmp).fixup - (delta); \
(a)->handler = (b)->handler + (delta); \
(b)->handler = (tmp).handler - (delta); \
} while (0)
extern int fixup_exception(struct pt_regs *regs, int trapnr);
extern bool ex_has_fault_handler(unsigned long ip);
extern void early_fixup_exception(struct pt_regs *regs, int trapnr);
#endif

Просмотреть файл

@ -2,7 +2,7 @@
#define _ASM_X86_SECTIONS_H
#include <asm-generic/sections.h>
#include <asm/uaccess.h>
#include <asm/extable.h>
extern char __brk_base[], __brk_limit[];
extern struct exception_table_entry __stop___ex_table[];

Просмотреть файл

@ -11,6 +11,7 @@
#include <asm/asm.h>
#include <asm/page.h>
#include <asm/smap.h>
#include <asm/extable.h>
#define VERIFY_READ 0
#define VERIFY_WRITE 1
@ -90,37 +91,6 @@ static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, un
#define access_ok(type, addr, size) \
likely(!__range_not_ok(addr, size, user_addr_max()))
/*
* The exception table consists of triples of addresses relative to the
* exception table entry itself. The first address is of an instruction
* that is allowed to fault, the second is the target at which the program
* should continue. The third is a handler function to deal with the fault
* caused by the instruction in the first field.
*
* All the routines below use bits of fixup code that are out of line
* with the main instruction path. This means when everything is well,
* we don't even have to jump over them. Further, they do not intrude
* on our cache or tlb entries.
*/
struct exception_table_entry {
int insn, fixup, handler;
};
#define ARCH_HAS_RELATIVE_EXTABLE
#define swap_ex_entry_fixup(a, b, tmp, delta) \
do { \
(a)->fixup = (b)->fixup + (delta); \
(b)->fixup = (tmp).fixup - (delta); \
(a)->handler = (b)->handler + (delta); \
(b)->handler = (tmp).handler - (delta); \
} while (0)
extern int fixup_exception(struct pt_regs *regs, int trapnr);
extern bool ex_has_fault_handler(unsigned long ip);
extern void early_fixup_exception(struct pt_regs *regs, int trapnr);
/*
* These are the main single-value transfer routines. They automatically
* use the right size if we just have the right pointer type.

Просмотреть файл

@ -5,7 +5,7 @@
*/
#include <linux/sched.h> /* test_thread_flag(), ... */
#include <linux/kdebug.h> /* oops_begin/end, ... */
#include <linux/extable.h> /* search_exception_table */
#include <linux/extable.h> /* search_exception_tables */
#include <linux/bootmem.h> /* max_low_pfn */
#include <linux/kprobes.h> /* NOKPROBE_SYMBOL, ... */
#include <linux/mmiotrace.h> /* kmmio_handler, ... */

Просмотреть файл

@ -0,0 +1,160 @@
/*
* include/asm-xtensa/uaccess.h
*
* User space memory access functions
*
* These routines provide basic accessing functions to the user memory
* space for the kernel. This header file provides functions such as:
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
*/
#ifndef _XTENSA_ASM_UACCESS_H
#define _XTENSA_ASM_UACCESS_H
#include <linux/errno.h>
#include <asm/types.h>
#define VERIFY_READ 0
#define VERIFY_WRITE 1
#include <asm/current.h>
#include <asm/asm-offsets.h>
#include <asm/processor.h>
/*
* These assembly macros mirror the C macros in asm/uaccess.h. They
* should always have identical functionality. See
* arch/xtensa/kernel/sys.S for usage.
*/
#define KERNEL_DS 0
#define USER_DS 1
#define get_ds (KERNEL_DS)
/*
* get_fs reads current->thread.current_ds into a register.
* On Entry:
* <ad> anything
* <sp> stack
* On Exit:
* <ad> contains current->thread.current_ds
*/
.macro get_fs ad, sp
GET_CURRENT(\ad,\sp)
#if THREAD_CURRENT_DS > 1020
addi \ad, \ad, TASK_THREAD
l32i \ad, \ad, THREAD_CURRENT_DS - TASK_THREAD
#else
l32i \ad, \ad, THREAD_CURRENT_DS
#endif
.endm
/*
* set_fs sets current->thread.current_ds to some value.
* On Entry:
* <at> anything (temp register)
* <av> value to write
* <sp> stack
* On Exit:
* <at> destroyed (actually, current)
* <av> preserved, value to write
*/
.macro set_fs at, av, sp
GET_CURRENT(\at,\sp)
s32i \av, \at, THREAD_CURRENT_DS
.endm
/*
* kernel_ok determines whether we should bypass addr/size checking.
* See the equivalent C-macro version below for clarity.
* On success, kernel_ok branches to a label indicated by parameter
* <success>. This implies that the macro falls through to the next
* insruction on an error.
*
* Note that while this macro can be used independently, we designed
* in for optimal use in the access_ok macro below (i.e., we fall
* through on error).
*
* On Entry:
* <at> anything (temp register)
* <success> label to branch to on success; implies
* fall-through macro on error
* <sp> stack pointer
* On Exit:
* <at> destroyed (actually, current->thread.current_ds)
*/
#if ((KERNEL_DS != 0) || (USER_DS == 0))
# error Assembly macro kernel_ok fails
#endif
.macro kernel_ok at, sp, success
get_fs \at, \sp
beqz \at, \success
.endm
/*
* user_ok determines whether the access to user-space memory is allowed.
* See the equivalent C-macro version below for clarity.
*
* On error, user_ok branches to a label indicated by parameter
* <error>. This implies that the macro falls through to the next
* instruction on success.
*
* Note that while this macro can be used independently, we designed
* in for optimal use in the access_ok macro below (i.e., we fall
* through on success).
*
* On Entry:
* <aa> register containing memory address
* <as> register containing memory size
* <at> temp register
* <error> label to branch to on error; implies fall-through
* macro on success
* On Exit:
* <aa> preserved
* <as> preserved
* <at> destroyed (actually, (TASK_SIZE + 1 - size))
*/
.macro user_ok aa, as, at, error
movi \at, __XTENSA_UL_CONST(TASK_SIZE)
bgeu \as, \at, \error
sub \at, \at, \as
bgeu \aa, \at, \error
.endm
/*
* access_ok determines whether a memory access is allowed. See the
* equivalent C-macro version below for clarity.
*
* On error, access_ok branches to a label indicated by parameter
* <error>. This implies that the macro falls through to the next
* instruction on success.
*
* Note that we assume success is the common case, and we optimize the
* branch fall-through case on success.
*
* On Entry:
* <aa> register containing memory address
* <as> register containing memory size
* <at> temp register
* <sp>
* <error> label to branch to on error; implies fall-through
* macro on success
* On Exit:
* <aa> preserved
* <as> preserved
* <at> destroyed
*/
.macro access_ok aa, as, at, sp, error
kernel_ok \at, \sp, .Laccess_ok_\@
user_ok \aa, \as, \at, \error
.Laccess_ok_\@:
.endm
#endif /* _XTENSA_ASM_UACCESS_H */

Просмотреть файл

@ -17,153 +17,12 @@
#define _XTENSA_UACCESS_H
#include <linux/errno.h>
#ifndef __ASSEMBLY__
#include <linux/prefetch.h>
#endif
#include <asm/types.h>
#define VERIFY_READ 0
#define VERIFY_WRITE 1
#ifdef __ASSEMBLY__
#include <asm/current.h>
#include <asm/asm-offsets.h>
#include <asm/processor.h>
/*
* These assembly macros mirror the C macros that follow below. They
* should always have identical functionality. See
* arch/xtensa/kernel/sys.S for usage.
*/
#define KERNEL_DS 0
#define USER_DS 1
#define get_ds (KERNEL_DS)
/*
* get_fs reads current->thread.current_ds into a register.
* On Entry:
* <ad> anything
* <sp> stack
* On Exit:
* <ad> contains current->thread.current_ds
*/
.macro get_fs ad, sp
GET_CURRENT(\ad,\sp)
#if THREAD_CURRENT_DS > 1020
addi \ad, \ad, TASK_THREAD
l32i \ad, \ad, THREAD_CURRENT_DS - TASK_THREAD
#else
l32i \ad, \ad, THREAD_CURRENT_DS
#endif
.endm
/*
* set_fs sets current->thread.current_ds to some value.
* On Entry:
* <at> anything (temp register)
* <av> value to write
* <sp> stack
* On Exit:
* <at> destroyed (actually, current)
* <av> preserved, value to write
*/
.macro set_fs at, av, sp
GET_CURRENT(\at,\sp)
s32i \av, \at, THREAD_CURRENT_DS
.endm
/*
* kernel_ok determines whether we should bypass addr/size checking.
* See the equivalent C-macro version below for clarity.
* On success, kernel_ok branches to a label indicated by parameter
* <success>. This implies that the macro falls through to the next
* insruction on an error.
*
* Note that while this macro can be used independently, we designed
* in for optimal use in the access_ok macro below (i.e., we fall
* through on error).
*
* On Entry:
* <at> anything (temp register)
* <success> label to branch to on success; implies
* fall-through macro on error
* <sp> stack pointer
* On Exit:
* <at> destroyed (actually, current->thread.current_ds)
*/
#if ((KERNEL_DS != 0) || (USER_DS == 0))
# error Assembly macro kernel_ok fails
#endif
.macro kernel_ok at, sp, success
get_fs \at, \sp
beqz \at, \success
.endm
/*
* user_ok determines whether the access to user-space memory is allowed.
* See the equivalent C-macro version below for clarity.
*
* On error, user_ok branches to a label indicated by parameter
* <error>. This implies that the macro falls through to the next
* instruction on success.
*
* Note that while this macro can be used independently, we designed
* in for optimal use in the access_ok macro below (i.e., we fall
* through on success).
*
* On Entry:
* <aa> register containing memory address
* <as> register containing memory size
* <at> temp register
* <error> label to branch to on error; implies fall-through
* macro on success
* On Exit:
* <aa> preserved
* <as> preserved
* <at> destroyed (actually, (TASK_SIZE + 1 - size))
*/
.macro user_ok aa, as, at, error
movi \at, __XTENSA_UL_CONST(TASK_SIZE)
bgeu \as, \at, \error
sub \at, \at, \as
bgeu \aa, \at, \error
.endm
/*
* access_ok determines whether a memory access is allowed. See the
* equivalent C-macro version below for clarity.
*
* On error, access_ok branches to a label indicated by parameter
* <error>. This implies that the macro falls through to the next
* instruction on success.
*
* Note that we assume success is the common case, and we optimize the
* branch fall-through case on success.
*
* On Entry:
* <aa> register containing memory address
* <as> register containing memory size
* <at> temp register
* <sp>
* <error> label to branch to on error; implies fall-through
* macro on success
* On Exit:
* <aa> preserved
* <as> preserved
* <at> destroyed
*/
.macro access_ok aa, as, at, sp, error
kernel_ok \at, \sp, .Laccess_ok_\@
user_ok \aa, \as, \at, \error
.Laccess_ok_\@:
.endm
#else /* __ASSEMBLY__ not defined */
#include <linux/sched.h>
/*
@ -495,16 +354,4 @@ struct exception_table_entry
unsigned long insn, fixup;
};
/* Returns 0 if exception not found and fixup.unit otherwise. */
extern unsigned long search_exception_table(unsigned long addr);
extern void sort_exception_table(void);
/* Returns the new pc */
#define fixup_exception(map_reg, fixup_unit, pc) \
({ \
fixup_unit; \
})
#endif /* __ASSEMBLY__ */
#endif /* _XTENSA_UACCESS_H */

Просмотреть файл

@ -17,7 +17,7 @@
#include <asm/processor.h>
#include <asm/coprocessor.h>
#include <asm/thread_info.h>
#include <asm/uaccess.h>
#include <asm/asm-uaccess.h>
#include <asm/unistd.h>
#include <asm/ptrace.h>
#include <asm/current.h>

Просмотреть файл

@ -17,7 +17,7 @@
#include <asm/processor.h>
#include <asm/coprocessor.h>
#include <asm/thread_info.h>
#include <asm/uaccess.h>
#include <asm/asm-uaccess.h>
#include <asm/unistd.h>
#include <asm/ptrace.h>
#include <asm/current.h>

Просмотреть файл

@ -520,7 +520,8 @@ static void acpi_thermal_check(void *data)
if (!tz->tz_enabled)
return;
thermal_zone_device_update(tz->thermal_zone);
thermal_zone_device_update(tz->thermal_zone,
THERMAL_EVENT_UNSPECIFIED);
}
/* sys I/F for generic thermal sysfs support */

Просмотреть файл

@ -21,6 +21,7 @@
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/uaccess.h>
#include <asm/io.h>
#include <asm/reboot.h>

Просмотреть файл

@ -471,9 +471,9 @@ static int bond_check_dev_link(struct bonding *bond,
/* Yes, the mii is overlaid on the ifreq.ifr_ifru */
strncpy(ifr.ifr_name, slave_dev->name, IFNAMSIZ);
mii = if_mii(&ifr);
if (IOCTL(slave_dev, &ifr, SIOCGMIIPHY) == 0) {
if (ioctl(slave_dev, &ifr, SIOCGMIIPHY) == 0) {
mii->reg_num = MII_BMSR;
if (IOCTL(slave_dev, &ifr, SIOCGMIIREG) == 0)
if (ioctl(slave_dev, &ifr, SIOCGMIIREG) == 0)
return mii->val_out & BMSR_LSTATUS;
}
}

Просмотреть файл

@ -693,7 +693,7 @@ static int cn23xx_enable_io_queues(struct octeon_device *oct)
while ((reg_val & CN23XX_PKT_INPUT_CTL_RST) &&
!(reg_val &
CN23XX_PKT_INPUT_CTL_QUIET) &&
loop--) {
--loop) {
reg_val = octeon_read_csr64(
oct,
CN23XX_SLI_IQ_PKT_CONTROL64(q_no));

Просмотреть файл

@ -287,7 +287,7 @@ retry:
goto retry;
}
MLX5_SET64(manage_pages_in, in, pas[i], addr);
MLX5_ARRAY_SET64(manage_pages_in, in, pas, i, addr);
}
MLX5_SET(manage_pages_in, in, opcode, MLX5_CMD_OP_MANAGE_PAGES);
@ -344,7 +344,7 @@ static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
if (fwp->func_id != func_id)
continue;
MLX5_SET64(manage_pages_out, out, pas[i], fwp->addr);
MLX5_ARRAY_SET64(manage_pages_out, out, pas, i, fwp->addr);
i++;
}

Просмотреть файл

@ -1517,7 +1517,7 @@ static void qed_ll2_register_cb_ops(struct qed_dev *cdev,
static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
{
struct qed_ll2_info ll2_info;
struct qed_ll2_buffer *buffer;
struct qed_ll2_buffer *buffer, *tmp_buffer;
enum qed_ll2_conn_type conn_type;
struct qed_ptt *p_ptt;
int rc, i;
@ -1587,7 +1587,7 @@ static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
/* Post all Rx buffers to FW */
spin_lock_bh(&cdev->ll2->lock);
list_for_each_entry(buffer, &cdev->ll2->list, list) {
list_for_each_entry_safe(buffer, tmp_buffer, &cdev->ll2->list, list) {
rc = qed_ll2_post_rx_buffer(QED_LEADING_HWFN(cdev),
cdev->ll2->handle,
buffer->phys_addr, 0, buffer, 1);

Просмотреть файл

@ -2947,7 +2947,7 @@ static const struct qed_rdma_ops qed_rdma_ops_pass = {
.roce_ll2_stats = &qed_roce_ll2_stats,
};
const struct qed_rdma_ops *qed_get_rdma_ops()
const struct qed_rdma_ops *qed_get_rdma_ops(void)
{
return &qed_rdma_ops_pass;
}

Просмотреть файл

@ -652,20 +652,27 @@ static int stmmac_init_ptp(struct stmmac_priv *priv)
if (IS_ERR(priv->clk_ptp_ref)) {
priv->clk_ptp_rate = clk_get_rate(priv->stmmac_clk);
priv->clk_ptp_ref = NULL;
netdev_dbg(priv->dev, "PTP uses main clock\n");
} else {
clk_prepare_enable(priv->clk_ptp_ref);
priv->clk_ptp_rate = clk_get_rate(priv->clk_ptp_ref);
netdev_dbg(priv->dev, "PTP rate %d\n", priv->clk_ptp_rate);
}
priv->adv_ts = 0;
if (priv->dma_cap.atime_stamp && priv->extend_desc)
/* Check if adv_ts can be enabled for dwmac 4.x core */
if (priv->plat->has_gmac4 && priv->dma_cap.atime_stamp)
priv->adv_ts = 1;
/* Dwmac 3.x core with extend_desc can support adv_ts */
else if (priv->extend_desc && priv->dma_cap.atime_stamp)
priv->adv_ts = 1;
if (netif_msg_hw(priv) && priv->dma_cap.time_stamp)
pr_debug("IEEE 1588-2002 Time Stamp supported\n");
if (priv->dma_cap.time_stamp)
netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
if (netif_msg_hw(priv) && priv->adv_ts)
pr_debug("IEEE 1588-2008 Advanced Time Stamp supported\n");
if (priv->adv_ts)
netdev_info(priv->dev,
"IEEE 1588-2008 Advanced Timestamp supported\n");
priv->hw->ptp = &stmmac_ptp;
priv->hwts_tx_en = 0;
@ -1702,8 +1709,8 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
if (init_ptp) {
ret = stmmac_init_ptp(priv);
if (ret && ret != -EOPNOTSUPP)
pr_warn("%s: failed PTP initialisation\n", __func__);
if (ret)
netdev_warn(priv->dev, "PTP support cannot init.\n");
}
#ifdef CONFIG_DEBUG_FS

Просмотреть файл

@ -186,10 +186,12 @@ int stmmac_ptp_register(struct stmmac_priv *priv)
priv->device);
if (IS_ERR(priv->ptp_clock)) {
priv->ptp_clock = NULL;
pr_err("ptp_clock_register() failed on %s\n", priv->dev->name);
} else if (priv->ptp_clock)
pr_debug("Added PTP HW clock successfully on %s\n",
priv->dev->name);
return PTR_ERR(priv->ptp_clock);
}
spin_lock_init(&priv->ptp_lock);
netdev_dbg(priv->dev, "Added PTP HW clock successfully\n");
return 0;
}

Просмотреть файл

@ -610,8 +610,8 @@ err_out_regions:
#ifdef CONFIG_PCI
if (pdev)
pci_release_regions(pdev);
#endif
err_out:
#endif
if (pdev)
pci_disable_device(pdev);
return rc;

Просмотреть файл

@ -431,8 +431,7 @@ static void axienet_setoptions(struct net_device *ndev, u32 options)
lp->options |= options;
}
static void __axienet_device_reset(struct axienet_local *lp,
struct device *dev, off_t offset)
static void __axienet_device_reset(struct axienet_local *lp, off_t offset)
{
u32 timeout;
/* Reset Axi DMA. This would reset Axi Ethernet core as well. The reset
@ -468,8 +467,8 @@ static void axienet_device_reset(struct net_device *ndev)
u32 axienet_status;
struct axienet_local *lp = netdev_priv(ndev);
__axienet_device_reset(lp, &ndev->dev, XAXIDMA_TX_CR_OFFSET);
__axienet_device_reset(lp, &ndev->dev, XAXIDMA_RX_CR_OFFSET);
__axienet_device_reset(lp, XAXIDMA_TX_CR_OFFSET);
__axienet_device_reset(lp, XAXIDMA_RX_CR_OFFSET);
lp->max_frm_size = XAE_MAX_VLAN_FRAME_SIZE;
lp->options |= XAE_OPTION_VLAN;
@ -1338,8 +1337,8 @@ static void axienet_dma_err_handler(unsigned long data)
axienet_iow(lp, XAE_MDIO_MC_OFFSET, (mdio_mcreg &
~XAE_MDIO_MC_MDIOEN_MASK));
__axienet_device_reset(lp, &ndev->dev, XAXIDMA_TX_CR_OFFSET);
__axienet_device_reset(lp, &ndev->dev, XAXIDMA_RX_CR_OFFSET);
__axienet_device_reset(lp, XAXIDMA_TX_CR_OFFSET);
__axienet_device_reset(lp, XAXIDMA_RX_CR_OFFSET);
axienet_iow(lp, XAE_MDIO_MC_OFFSET, mdio_mcreg);
axienet_mdio_wait_until_ready(lp);

Просмотреть файл

@ -442,8 +442,6 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
}
net_trans_info = get_net_transport_info(skb, &hdr_offset);
if (net_trans_info == TRANSPORT_INFO_NOT_IP)
goto do_send;
/*
* Setup the sendside checksum offload only if this is not a
@ -478,56 +476,29 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
}
lso_info->lso_v2_transmit.tcp_header_offset = hdr_offset;
lso_info->lso_v2_transmit.mss = skb_shinfo(skb)->gso_size;
goto do_send;
} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
if (net_trans_info & INFO_TCP) {
rndis_msg_size += NDIS_CSUM_PPI_SIZE;
ppi = init_ppi_data(rndis_msg, NDIS_CSUM_PPI_SIZE,
TCPIP_CHKSUM_PKTINFO);
csum_info = (struct ndis_tcp_ip_checksum_info *)((void *)ppi +
ppi->ppi_offset);
if (net_trans_info & (INFO_IPV4 << 16))
csum_info->transmit.is_ipv4 = 1;
else
csum_info->transmit.is_ipv6 = 1;
csum_info->transmit.tcp_checksum = 1;
csum_info->transmit.tcp_header_offset = hdr_offset;
} else {
/* UDP checksum (and other) offload is not supported. */
if (skb_checksum_help(skb))
goto drop;
}
}
if ((skb->ip_summed == CHECKSUM_NONE) ||
(skb->ip_summed == CHECKSUM_UNNECESSARY))
goto do_send;
rndis_msg_size += NDIS_CSUM_PPI_SIZE;
ppi = init_ppi_data(rndis_msg, NDIS_CSUM_PPI_SIZE,
TCPIP_CHKSUM_PKTINFO);
csum_info = (struct ndis_tcp_ip_checksum_info *)((void *)ppi +
ppi->ppi_offset);
if (net_trans_info & (INFO_IPV4 << 16))
csum_info->transmit.is_ipv4 = 1;
else
csum_info->transmit.is_ipv6 = 1;
if (net_trans_info & INFO_TCP) {
csum_info->transmit.tcp_checksum = 1;
csum_info->transmit.tcp_header_offset = hdr_offset;
} else if (net_trans_info & INFO_UDP) {
/* UDP checksum offload is not supported on ws2008r2.
* Furthermore, on ws2012 and ws2012r2, there are some
* issues with udp checksum offload from Linux guests.
* (these are host issues).
* For now compute the checksum here.
*/
struct udphdr *uh;
u16 udp_len;
ret = skb_cow_head(skb, 0);
if (ret)
goto no_memory;
uh = udp_hdr(skb);
udp_len = ntohs(uh->len);
uh->check = 0;
uh->check = csum_tcpudp_magic(ip_hdr(skb)->saddr,
ip_hdr(skb)->daddr,
udp_len, IPPROTO_UDP,
csum_partial(uh, udp_len, 0));
if (uh->check == 0)
uh->check = CSUM_MANGLED_0;
csum_info->transmit.udp_checksum = 0;
}
do_send:
/* Start filling in the page buffers with the rndis hdr */
rndis_msg->msg_len += rndis_msg_size;
packet->total_data_buflen = rndis_msg->msg_len;

Просмотреть файл

@ -607,6 +607,21 @@ void phy_start_machine(struct phy_device *phydev)
queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, HZ);
}
/**
* phy_trigger_machine - trigger the state machine to run
*
* @phydev: the phy_device struct
*
* Description: There has been a change in state which requires that the
* state machine runs.
*/
static void phy_trigger_machine(struct phy_device *phydev)
{
cancel_delayed_work_sync(&phydev->state_queue);
queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, 0);
}
/**
* phy_stop_machine - stop the PHY state machine tracking
* @phydev: target phy_device struct
@ -639,6 +654,8 @@ static void phy_error(struct phy_device *phydev)
mutex_lock(&phydev->lock);
phydev->state = PHY_HALTED;
mutex_unlock(&phydev->lock);
phy_trigger_machine(phydev);
}
/**
@ -800,8 +817,7 @@ void phy_change(struct work_struct *work)
}
/* reschedule state queue work to run as soon as possible */
cancel_delayed_work_sync(&phydev->state_queue);
queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, 0);
phy_trigger_machine(phydev);
return;
ignore:
@ -890,6 +906,8 @@ void phy_start(struct phy_device *phydev)
/* if phy was suspended, bring the physical link up again */
if (do_resume)
phy_resume(phydev);
phy_trigger_machine(phydev);
}
EXPORT_SYMBOL(phy_start);

Просмотреть файл

@ -59,6 +59,10 @@ enum qmi_wwan_flags {
QMI_WWAN_FLAG_RAWIP = 1 << 0,
};
enum qmi_wwan_quirks {
QMI_WWAN_QUIRK_DTR = 1 << 0, /* needs "set DTR" request */
};
static void qmi_wwan_netdev_setup(struct net_device *net)
{
struct usbnet *dev = netdev_priv(net);
@ -411,9 +415,14 @@ static int qmi_wwan_bind(struct usbnet *dev, struct usb_interface *intf)
* clearing out state the clients might need.
*
* MDM9x30 is the first QMI chipset with USB3 support. Abuse
* this fact to enable the quirk.
* this fact to enable the quirk for all USB3 devices.
*
* There are also chipsets with the same "set DTR" requirement
* but without USB3 support. Devices based on these chips
* need a quirk flag in the device ID table.
*/
if (le16_to_cpu(dev->udev->descriptor.bcdUSB) >= 0x0201) {
if (dev->driver_info->data & QMI_WWAN_QUIRK_DTR ||
le16_to_cpu(dev->udev->descriptor.bcdUSB) >= 0x0201) {
qmi_wwan_manage_power(dev, 1);
qmi_wwan_change_dtr(dev, true);
}
@ -526,6 +535,16 @@ static const struct driver_info qmi_wwan_info = {
.rx_fixup = qmi_wwan_rx_fixup,
};
static const struct driver_info qmi_wwan_info_quirk_dtr = {
.description = "WWAN/QMI device",
.flags = FLAG_WWAN,
.bind = qmi_wwan_bind,
.unbind = qmi_wwan_unbind,
.manage_power = qmi_wwan_manage_power,
.rx_fixup = qmi_wwan_rx_fixup,
.data = QMI_WWAN_QUIRK_DTR,
};
#define HUAWEI_VENDOR_ID 0x12D1
/* map QMI/wwan function by a fixed interface number */
@ -533,6 +552,11 @@ static const struct driver_info qmi_wwan_info = {
USB_DEVICE_INTERFACE_NUMBER(vend, prod, num), \
.driver_info = (unsigned long)&qmi_wwan_info
/* devices requiring "set DTR" quirk */
#define QMI_QUIRK_SET_DTR(vend, prod, num) \
USB_DEVICE_INTERFACE_NUMBER(vend, prod, num), \
.driver_info = (unsigned long)&qmi_wwan_info_quirk_dtr
/* Gobi 1000 QMI/wwan interface number is 3 according to qcserial */
#define QMI_GOBI1K_DEVICE(vend, prod) \
QMI_FIXED_INTF(vend, prod, 3)
@ -895,6 +919,8 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */
{QMI_FIXED_INTF(0x22de, 0x9061, 3)}, /* WeTelecom WPD-600N */
{QMI_FIXED_INTF(0x1e0e, 0x9001, 5)}, /* SIMCom 7230E */
{QMI_QUIRK_SET_DTR(0x2c7c, 0x0125, 4)}, /* Quectel EC25, EC20 R2.0 Mini PCIe */
{QMI_QUIRK_SET_DTR(0x2c7c, 0x0121, 4)}, /* Quectel EC21 Mini PCIe */
/* 4. Gobi 1000 devices */
{QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */

Просмотреть файл

@ -407,4 +407,8 @@ u32 xenvif_set_hash_mapping(struct xenvif *vif, u32 gref, u32 len,
void xenvif_set_skb_hash(struct xenvif *vif, struct sk_buff *skb);
#ifdef CONFIG_DEBUG_FS
void xenvif_dump_hash_info(struct xenvif *vif, struct seq_file *m);
#endif
#endif /* __XEN_NETBACK__COMMON_H__ */

Просмотреть файл

@ -360,6 +360,74 @@ u32 xenvif_set_hash_mapping(struct xenvif *vif, u32 gref, u32 len,
return XEN_NETIF_CTRL_STATUS_SUCCESS;
}
#ifdef CONFIG_DEBUG_FS
void xenvif_dump_hash_info(struct xenvif *vif, struct seq_file *m)
{
unsigned int i;
switch (vif->hash.alg) {
case XEN_NETIF_CTRL_HASH_ALGORITHM_TOEPLITZ:
seq_puts(m, "Hash Algorithm: TOEPLITZ\n");
break;
case XEN_NETIF_CTRL_HASH_ALGORITHM_NONE:
seq_puts(m, "Hash Algorithm: NONE\n");
/* FALLTHRU */
default:
return;
}
if (vif->hash.flags) {
seq_puts(m, "\nHash Flags:\n");
if (vif->hash.flags & XEN_NETIF_CTRL_HASH_TYPE_IPV4)
seq_puts(m, "- IPv4\n");
if (vif->hash.flags & XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP)
seq_puts(m, "- IPv4 + TCP\n");
if (vif->hash.flags & XEN_NETIF_CTRL_HASH_TYPE_IPV6)
seq_puts(m, "- IPv6\n");
if (vif->hash.flags & XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP)
seq_puts(m, "- IPv6 + TCP\n");
}
seq_puts(m, "\nHash Key:\n");
for (i = 0; i < XEN_NETBK_MAX_HASH_KEY_SIZE; ) {
unsigned int j, n;
n = 8;
if (i + n >= XEN_NETBK_MAX_HASH_KEY_SIZE)
n = XEN_NETBK_MAX_HASH_KEY_SIZE - i;
seq_printf(m, "[%2u - %2u]: ", i, i + n - 1);
for (j = 0; j < n; j++, i++)
seq_printf(m, "%02x ", vif->hash.key[i]);
seq_puts(m, "\n");
}
if (vif->hash.size != 0) {
seq_puts(m, "\nHash Mapping:\n");
for (i = 0; i < vif->hash.size; ) {
unsigned int j, n;
n = 8;
if (i + n >= vif->hash.size)
n = vif->hash.size - i;
seq_printf(m, "[%4u - %4u]: ", i, i + n - 1);
for (j = 0; j < n; j++, i++)
seq_printf(m, "%4u ", vif->hash.mapping[i]);
seq_puts(m, "\n");
}
}
}
#endif /* CONFIG_DEBUG_FS */
void xenvif_init_hash(struct xenvif *vif)
{
if (xenvif_hash_cache_size == 0)

Просмотреть файл

@ -337,9 +337,9 @@ static void xenvif_rx_next_chunk(struct xenvif_queue *queue,
frag_data += pkt->frag_offset;
frag_len -= pkt->frag_offset;
chunk_len = min(frag_len, XEN_PAGE_SIZE - offset);
chunk_len = min(chunk_len,
XEN_PAGE_SIZE - xen_offset_in_page(frag_data));
chunk_len = min_t(size_t, frag_len, XEN_PAGE_SIZE - offset);
chunk_len = min_t(size_t, chunk_len, XEN_PAGE_SIZE -
xen_offset_in_page(frag_data));
pkt->frag_offset += chunk_len;
@ -425,6 +425,8 @@ void xenvif_rx_skb(struct xenvif_queue *queue)
xenvif_rx_next_skb(queue, &pkt);
queue->last_rx_time = jiffies;
do {
struct xen_netif_rx_request *req;
struct xen_netif_rx_response *rsp;

Просмотреть файл

@ -165,7 +165,7 @@ xenvif_write_io_ring(struct file *filp, const char __user *buf, size_t count,
return count;
}
static int xenvif_dump_open(struct inode *inode, struct file *filp)
static int xenvif_io_ring_open(struct inode *inode, struct file *filp)
{
int ret;
void *queue = NULL;
@ -179,13 +179,35 @@ static int xenvif_dump_open(struct inode *inode, struct file *filp)
static const struct file_operations xenvif_dbg_io_ring_ops_fops = {
.owner = THIS_MODULE,
.open = xenvif_dump_open,
.open = xenvif_io_ring_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
.write = xenvif_write_io_ring,
};
static int xenvif_read_ctrl(struct seq_file *m, void *v)
{
struct xenvif *vif = m->private;
xenvif_dump_hash_info(vif, m);
return 0;
}
static int xenvif_ctrl_open(struct inode *inode, struct file *filp)
{
return single_open(filp, xenvif_read_ctrl, inode->i_private);
}
static const struct file_operations xenvif_dbg_ctrl_ops_fops = {
.owner = THIS_MODULE,
.open = xenvif_ctrl_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static void xenvif_debugfs_addif(struct xenvif *vif)
{
struct dentry *pfile;
@ -210,6 +232,17 @@ static void xenvif_debugfs_addif(struct xenvif *vif)
pr_warn("Creation of io_ring file returned %ld!\n",
PTR_ERR(pfile));
}
if (vif->ctrl_irq) {
pfile = debugfs_create_file("ctrl",
S_IRUSR,
vif->xenvif_dbg_root,
vif,
&xenvif_dbg_ctrl_ops_fops);
if (IS_ERR_OR_NULL(pfile))
pr_warn("Creation of ctrl file returned %ld!\n",
PTR_ERR(pfile));
}
} else
netdev_warn(vif->dev,
"Creation of vif debugfs dir returned %ld!\n",

Просмотреть файл

@ -230,20 +230,20 @@ static int advk_pcie_link_up(struct advk_pcie *pcie)
static int advk_pcie_wait_for_link(struct advk_pcie *pcie)
{
struct device *dev = &pcie->pdev->dev;
int retries;
/* check if the link is up or not */
for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
if (advk_pcie_link_up(pcie)) {
dev_info(&pcie->pdev->dev, "link up\n");
dev_info(dev, "link up\n");
return 0;
}
usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX);
}
dev_err(&pcie->pdev->dev, "link never came up\n");
dev_err(dev, "link never came up\n");
return -ETIMEDOUT;
}
@ -376,6 +376,7 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie)
static void advk_pcie_check_pio_status(struct advk_pcie *pcie)
{
struct device *dev = &pcie->pdev->dev;
u32 reg;
unsigned int status;
char *strcomp_status, *str_posted;
@ -407,12 +408,13 @@ static void advk_pcie_check_pio_status(struct advk_pcie *pcie)
else
str_posted = "Posted";
dev_err(&pcie->pdev->dev, "%s PIO Response Status: %s, %#x @ %#x\n",
dev_err(dev, "%s PIO Response Status: %s, %#x @ %#x\n",
str_posted, strcomp_status, reg, advk_readl(pcie, PIO_ADDR_LS));
}
static int advk_pcie_wait_pio(struct advk_pcie *pcie)
{
struct device *dev = &pcie->pdev->dev;
unsigned long timeout;
timeout = jiffies + msecs_to_jiffies(PIO_TIMEOUT_MS);
@ -426,7 +428,7 @@ static int advk_pcie_wait_pio(struct advk_pcie *pcie)
return 0;
}
dev_err(&pcie->pdev->dev, "config read/write timed out\n");
dev_err(dev, "config read/write timed out\n");
return -ETIMEDOUT;
}
@ -560,10 +562,11 @@ static int advk_pcie_alloc_msi(struct advk_pcie *pcie)
static void advk_pcie_free_msi(struct advk_pcie *pcie, int hwirq)
{
struct device *dev = &pcie->pdev->dev;
mutex_lock(&pcie->msi_used_lock);
if (!test_bit(hwirq, pcie->msi_irq_in_use))
dev_err(&pcie->pdev->dev, "trying to free unused MSI#%d\n",
hwirq);
dev_err(dev, "trying to free unused MSI#%d\n", hwirq);
else
clear_bit(hwirq, pcie->msi_irq_in_use);
mutex_unlock(&pcie->msi_used_lock);
@ -910,6 +913,7 @@ out_release_res:
static int advk_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct advk_pcie *pcie;
struct resource *res;
struct pci_bus *bus, *child;
@ -917,31 +921,29 @@ static int advk_pcie_probe(struct platform_device *pdev)
struct device_node *msi_node;
int ret, irq;
pcie = devm_kzalloc(&pdev->dev, sizeof(struct advk_pcie),
GFP_KERNEL);
pcie = devm_kzalloc(dev, sizeof(struct advk_pcie), GFP_KERNEL);
if (!pcie)
return -ENOMEM;
pcie->pdev = pdev;
platform_set_drvdata(pdev, pcie);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
pcie->base = devm_ioremap_resource(&pdev->dev, res);
pcie->base = devm_ioremap_resource(dev, res);
if (IS_ERR(pcie->base))
return PTR_ERR(pcie->base);
irq = platform_get_irq(pdev, 0);
ret = devm_request_irq(&pdev->dev, irq, advk_pcie_irq_handler,
ret = devm_request_irq(dev, irq, advk_pcie_irq_handler,
IRQF_SHARED | IRQF_NO_THREAD, "advk-pcie",
pcie);
if (ret) {
dev_err(&pdev->dev, "Failed to register interrupt\n");
dev_err(dev, "Failed to register interrupt\n");
return ret;
}
ret = advk_pcie_parse_request_of_pci_ranges(pcie);
if (ret) {
dev_err(&pdev->dev, "Failed to parse resources\n");
dev_err(dev, "Failed to parse resources\n");
return ret;
}
@ -949,24 +951,24 @@ static int advk_pcie_probe(struct platform_device *pdev)
ret = advk_pcie_init_irq_domain(pcie);
if (ret) {
dev_err(&pdev->dev, "Failed to initialize irq\n");
dev_err(dev, "Failed to initialize irq\n");
return ret;
}
ret = advk_pcie_init_msi_irq_domain(pcie);
if (ret) {
dev_err(&pdev->dev, "Failed to initialize irq\n");
dev_err(dev, "Failed to initialize irq\n");
advk_pcie_remove_irq_domain(pcie);
return ret;
}
msi_node = of_parse_phandle(pdev->dev.of_node, "msi-parent", 0);
msi_node = of_parse_phandle(dev->of_node, "msi-parent", 0);
if (msi_node)
msi = of_pci_find_msi_chip_by_node(msi_node);
else
msi = NULL;
bus = pci_scan_root_bus_msi(&pdev->dev, 0, &advk_pcie_ops,
bus = pci_scan_root_bus_msi(dev, 0, &advk_pcie_ops,
pcie, &pcie->resources, &pcie->msi);
if (!bus) {
advk_pcie_remove_msi_irq_domain(pcie);
@ -980,7 +982,6 @@ static int advk_pcie_probe(struct platform_device *pdev)
pcie_bus_configure_settings(child);
pci_bus_add_devices(bus);
return 0;
}

Просмотреть файл

@ -64,11 +64,10 @@
#define DRA7XX_CPU_TO_BUS_ADDR 0x0FFFFFFF
struct dra7xx_pcie {
void __iomem *base;
struct phy **phy;
int phy_count;
struct device *dev;
struct pcie_port pp;
void __iomem *base; /* DT ti_conf */
int phy_count; /* DT phy-names count */
struct phy **phy;
};
#define to_dra7xx_pcie(x) container_of((x), struct dra7xx_pcie, pp)
@ -84,17 +83,6 @@ static inline void dra7xx_pcie_writel(struct dra7xx_pcie *pcie, u32 offset,
writel(value, pcie->base + offset);
}
static inline u32 dra7xx_pcie_readl_rc(struct pcie_port *pp, u32 offset)
{
return readl(pp->dbi_base + offset);
}
static inline void dra7xx_pcie_writel_rc(struct pcie_port *pp, u32 offset,
u32 value)
{
writel(value, pp->dbi_base + offset);
}
static int dra7xx_pcie_link_up(struct pcie_port *pp)
{
struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pp);
@ -103,13 +91,14 @@ static int dra7xx_pcie_link_up(struct pcie_port *pp)
return !!(reg & LINK_UP);
}
static int dra7xx_pcie_establish_link(struct pcie_port *pp)
static int dra7xx_pcie_establish_link(struct dra7xx_pcie *dra7xx)
{
struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pp);
struct pcie_port *pp = &dra7xx->pp;
struct device *dev = pp->dev;
u32 reg;
if (dw_pcie_link_up(pp)) {
dev_err(pp->dev, "link is already up\n");
dev_err(dev, "link is already up\n");
return 0;
}
@ -120,10 +109,8 @@ static int dra7xx_pcie_establish_link(struct pcie_port *pp)
return dw_pcie_wait_for_link(pp);
}
static void dra7xx_pcie_enable_interrupts(struct pcie_port *pp)
static void dra7xx_pcie_enable_interrupts(struct dra7xx_pcie *dra7xx)
{
struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pp);
dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN,
~INTERRUPTS);
dra7xx_pcie_writel(dra7xx,
@ -142,6 +129,8 @@ static void dra7xx_pcie_enable_interrupts(struct pcie_port *pp)
static void dra7xx_pcie_host_init(struct pcie_port *pp)
{
struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pp);
pp->io_base &= DRA7XX_CPU_TO_BUS_ADDR;
pp->mem_base &= DRA7XX_CPU_TO_BUS_ADDR;
pp->cfg0_base &= DRA7XX_CPU_TO_BUS_ADDR;
@ -149,10 +138,10 @@ static void dra7xx_pcie_host_init(struct pcie_port *pp)
dw_pcie_setup_rc(pp);
dra7xx_pcie_establish_link(pp);
dra7xx_pcie_establish_link(dra7xx);
if (IS_ENABLED(CONFIG_PCI_MSI))
dw_pcie_msi_init(pp);
dra7xx_pcie_enable_interrupts(pp);
dra7xx_pcie_enable_interrupts(dra7xx);
}
static struct pcie_host_ops dra7xx_pcie_host_ops = {
@ -196,8 +185,8 @@ static int dra7xx_pcie_init_irq_domain(struct pcie_port *pp)
static irqreturn_t dra7xx_pcie_msi_irq_handler(int irq, void *arg)
{
struct pcie_port *pp = arg;
struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pp);
struct dra7xx_pcie *dra7xx = arg;
struct pcie_port *pp = &dra7xx->pp;
u32 reg;
reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI);
@ -223,51 +212,51 @@ static irqreturn_t dra7xx_pcie_msi_irq_handler(int irq, void *arg)
static irqreturn_t dra7xx_pcie_irq_handler(int irq, void *arg)
{
struct dra7xx_pcie *dra7xx = arg;
struct device *dev = dra7xx->pp.dev;
u32 reg;
reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN);
if (reg & ERR_SYS)
dev_dbg(dra7xx->dev, "System Error\n");
dev_dbg(dev, "System Error\n");
if (reg & ERR_FATAL)
dev_dbg(dra7xx->dev, "Fatal Error\n");
dev_dbg(dev, "Fatal Error\n");
if (reg & ERR_NONFATAL)
dev_dbg(dra7xx->dev, "Non Fatal Error\n");
dev_dbg(dev, "Non Fatal Error\n");
if (reg & ERR_COR)
dev_dbg(dra7xx->dev, "Correctable Error\n");
dev_dbg(dev, "Correctable Error\n");
if (reg & ERR_AXI)
dev_dbg(dra7xx->dev, "AXI tag lookup fatal Error\n");
dev_dbg(dev, "AXI tag lookup fatal Error\n");
if (reg & ERR_ECRC)
dev_dbg(dra7xx->dev, "ECRC Error\n");
dev_dbg(dev, "ECRC Error\n");
if (reg & PME_TURN_OFF)
dev_dbg(dra7xx->dev,
dev_dbg(dev,
"Power Management Event Turn-Off message received\n");
if (reg & PME_TO_ACK)
dev_dbg(dra7xx->dev,
dev_dbg(dev,
"Power Management Turn-Off Ack message received\n");
if (reg & PM_PME)
dev_dbg(dra7xx->dev,
"PM Power Management Event message received\n");
dev_dbg(dev, "PM Power Management Event message received\n");
if (reg & LINK_REQ_RST)
dev_dbg(dra7xx->dev, "Link Request Reset\n");
dev_dbg(dev, "Link Request Reset\n");
if (reg & LINK_UP_EVT)
dev_dbg(dra7xx->dev, "Link-up state change\n");
dev_dbg(dev, "Link-up state change\n");
if (reg & CFG_BME_EVT)
dev_dbg(dra7xx->dev, "CFG 'Bus Master Enable' change\n");
dev_dbg(dev, "CFG 'Bus Master Enable' change\n");
if (reg & CFG_MSE_EVT)
dev_dbg(dra7xx->dev, "CFG 'Memory Space Enable' change\n");
dev_dbg(dev, "CFG 'Memory Space Enable' change\n");
dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN, reg);
@ -278,13 +267,9 @@ static int __init dra7xx_add_pcie_port(struct dra7xx_pcie *dra7xx,
struct platform_device *pdev)
{
int ret;
struct pcie_port *pp;
struct pcie_port *pp = &dra7xx->pp;
struct device *dev = pp->dev;
struct resource *res;
struct device *dev = &pdev->dev;
pp = &dra7xx->pp;
pp->dev = dev;
pp->ops = &dra7xx_pcie_host_ops;
pp->irq = platform_get_irq(pdev, 1);
if (pp->irq < 0) {
@ -292,12 +277,11 @@ static int __init dra7xx_add_pcie_port(struct dra7xx_pcie *dra7xx,
return -EINVAL;
}
ret = devm_request_irq(&pdev->dev, pp->irq,
dra7xx_pcie_msi_irq_handler,
ret = devm_request_irq(dev, pp->irq, dra7xx_pcie_msi_irq_handler,
IRQF_SHARED | IRQF_NO_THREAD,
"dra7-pcie-msi", pp);
"dra7-pcie-msi", dra7xx);
if (ret) {
dev_err(&pdev->dev, "failed to request irq\n");
dev_err(dev, "failed to request irq\n");
return ret;
}
@ -314,7 +298,7 @@ static int __init dra7xx_add_pcie_port(struct dra7xx_pcie *dra7xx,
ret = dw_pcie_host_init(pp);
if (ret) {
dev_err(dra7xx->dev, "failed to initialize host\n");
dev_err(dev, "failed to initialize host\n");
return ret;
}
@ -332,6 +316,7 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev)
void __iomem *base;
struct resource *res;
struct dra7xx_pcie *dra7xx;
struct pcie_port *pp;
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
char name[10];
@ -343,6 +328,10 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev)
if (!dra7xx)
return -ENOMEM;
pp = &dra7xx->pp;
pp->dev = dev;
pp->ops = &dra7xx_pcie_host_ops;
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
dev_err(dev, "missing IRQ resource\n");
@ -390,7 +379,6 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev)
dra7xx->base = base;
dra7xx->phy = phy;
dra7xx->dev = dev;
dra7xx->phy_count = phy_count;
pm_runtime_enable(dev);
@ -407,7 +395,7 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev)
ret = devm_gpio_request_one(dev, gpio_sel, gpio_flags,
"pcie_reset");
if (ret) {
dev_err(&pdev->dev, "gpio%d request failed, ret %d\n",
dev_err(dev, "gpio%d request failed, ret %d\n",
gpio_sel, ret);
goto err_gpio;
}
@ -420,12 +408,11 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev)
reg &= ~LTSSM_EN;
dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg);
platform_set_drvdata(pdev, dra7xx);
ret = dra7xx_add_pcie_port(dra7xx, pdev);
if (ret < 0)
goto err_gpio;
platform_set_drvdata(pdev, dra7xx);
return 0;
err_gpio:
@ -451,9 +438,9 @@ static int dra7xx_pcie_suspend(struct device *dev)
u32 val;
/* clear MSE */
val = dra7xx_pcie_readl_rc(pp, PCI_COMMAND);
val = dw_pcie_readl_rc(pp, PCI_COMMAND);
val &= ~PCI_COMMAND_MEMORY;
dra7xx_pcie_writel_rc(pp, PCI_COMMAND, val);
dw_pcie_writel_rc(pp, PCI_COMMAND, val);
return 0;
}
@ -465,9 +452,9 @@ static int dra7xx_pcie_resume(struct device *dev)
u32 val;
/* set MSE */
val = dra7xx_pcie_readl_rc(pp, PCI_COMMAND);
val = dw_pcie_readl_rc(pp, PCI_COMMAND);
val |= PCI_COMMAND_MEMORY;
dra7xx_pcie_writel_rc(pp, PCI_COMMAND, val);
dw_pcie_writel_rc(pp, PCI_COMMAND, val);
return 0;
}

Просмотреть файл

@ -29,13 +29,13 @@
#define to_exynos_pcie(x) container_of(x, struct exynos_pcie, pp)
struct exynos_pcie {
void __iomem *elbi_base;
void __iomem *phy_base;
void __iomem *block_base;
struct pcie_port pp;
void __iomem *elbi_base; /* DT 0th resource */
void __iomem *phy_base; /* DT 1st resource */
void __iomem *block_base; /* DT 2nd resource */
int reset_gpio;
struct clk *clk;
struct clk *bus_clk;
struct pcie_port pp;
};
/* PCIe ELBI registers */
@ -102,40 +102,40 @@ struct exynos_pcie {
#define PCIE_PHY_TRSV3_PD_TSV (0x1 << 7)
#define PCIE_PHY_TRSV3_LVCC 0x31c
static inline void exynos_elb_writel(struct exynos_pcie *pcie, u32 val, u32 reg)
static void exynos_elb_writel(struct exynos_pcie *exynos_pcie, u32 val, u32 reg)
{
writel(val, pcie->elbi_base + reg);
writel(val, exynos_pcie->elbi_base + reg);
}
static inline u32 exynos_elb_readl(struct exynos_pcie *pcie, u32 reg)
static u32 exynos_elb_readl(struct exynos_pcie *exynos_pcie, u32 reg)
{
return readl(pcie->elbi_base + reg);
return readl(exynos_pcie->elbi_base + reg);
}
static inline void exynos_phy_writel(struct exynos_pcie *pcie, u32 val, u32 reg)
static void exynos_phy_writel(struct exynos_pcie *exynos_pcie, u32 val, u32 reg)
{
writel(val, pcie->phy_base + reg);
writel(val, exynos_pcie->phy_base + reg);
}
static inline u32 exynos_phy_readl(struct exynos_pcie *pcie, u32 reg)
static u32 exynos_phy_readl(struct exynos_pcie *exynos_pcie, u32 reg)
{
return readl(pcie->phy_base + reg);
return readl(exynos_pcie->phy_base + reg);
}
static inline void exynos_blk_writel(struct exynos_pcie *pcie, u32 val, u32 reg)
static void exynos_blk_writel(struct exynos_pcie *exynos_pcie, u32 val, u32 reg)
{
writel(val, pcie->block_base + reg);
writel(val, exynos_pcie->block_base + reg);
}
static inline u32 exynos_blk_readl(struct exynos_pcie *pcie, u32 reg)
static u32 exynos_blk_readl(struct exynos_pcie *exynos_pcie, u32 reg)
{
return readl(pcie->block_base + reg);
return readl(exynos_pcie->block_base + reg);
}
static void exynos_pcie_sideband_dbi_w_mode(struct pcie_port *pp, bool on)
static void exynos_pcie_sideband_dbi_w_mode(struct exynos_pcie *exynos_pcie,
bool on)
{
u32 val;
struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
if (on) {
val = exynos_elb_readl(exynos_pcie, PCIE_ELBI_SLV_AWMISC);
@ -148,10 +148,10 @@ static void exynos_pcie_sideband_dbi_w_mode(struct pcie_port *pp, bool on)
}
}
static void exynos_pcie_sideband_dbi_r_mode(struct pcie_port *pp, bool on)
static void exynos_pcie_sideband_dbi_r_mode(struct exynos_pcie *exynos_pcie,
bool on)
{
u32 val;
struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
if (on) {
val = exynos_elb_readl(exynos_pcie, PCIE_ELBI_SLV_ARMISC);
@ -164,10 +164,9 @@ static void exynos_pcie_sideband_dbi_r_mode(struct pcie_port *pp, bool on)
}
}
static void exynos_pcie_assert_core_reset(struct pcie_port *pp)
static void exynos_pcie_assert_core_reset(struct exynos_pcie *exynos_pcie)
{
u32 val;
struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
val = exynos_elb_readl(exynos_pcie, PCIE_CORE_RESET);
val &= ~PCIE_CORE_RESET_ENABLE;
@ -177,10 +176,9 @@ static void exynos_pcie_assert_core_reset(struct pcie_port *pp)
exynos_elb_writel(exynos_pcie, 0, PCIE_NONSTICKY_RESET);
}
static void exynos_pcie_deassert_core_reset(struct pcie_port *pp)
static void exynos_pcie_deassert_core_reset(struct exynos_pcie *exynos_pcie)
{
u32 val;
struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
val = exynos_elb_readl(exynos_pcie, PCIE_CORE_RESET);
val |= PCIE_CORE_RESET_ENABLE;
@ -193,18 +191,14 @@ static void exynos_pcie_deassert_core_reset(struct pcie_port *pp)
exynos_blk_writel(exynos_pcie, 1, PCIE_PHY_MAC_RESET);
}
static void exynos_pcie_assert_phy_reset(struct pcie_port *pp)
static void exynos_pcie_assert_phy_reset(struct exynos_pcie *exynos_pcie)
{
struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
exynos_blk_writel(exynos_pcie, 0, PCIE_PHY_MAC_RESET);
exynos_blk_writel(exynos_pcie, 1, PCIE_PHY_GLOBAL_RESET);
}
static void exynos_pcie_deassert_phy_reset(struct pcie_port *pp)
static void exynos_pcie_deassert_phy_reset(struct exynos_pcie *exynos_pcie)
{
struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
exynos_blk_writel(exynos_pcie, 0, PCIE_PHY_GLOBAL_RESET);
exynos_elb_writel(exynos_pcie, 1, PCIE_PWR_RESET);
exynos_blk_writel(exynos_pcie, 0, PCIE_PHY_COMMON_RESET);
@ -213,10 +207,9 @@ static void exynos_pcie_deassert_phy_reset(struct pcie_port *pp)
exynos_blk_writel(exynos_pcie, 0, PCIE_PHY_TRSV_RESET);
}
static void exynos_pcie_power_on_phy(struct pcie_port *pp)
static void exynos_pcie_power_on_phy(struct exynos_pcie *exynos_pcie)
{
u32 val;
struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
val = exynos_phy_readl(exynos_pcie, PCIE_PHY_COMMON_POWER);
val &= ~PCIE_PHY_COMMON_PD_CMN;
@ -239,10 +232,9 @@ static void exynos_pcie_power_on_phy(struct pcie_port *pp)
exynos_phy_writel(exynos_pcie, val, PCIE_PHY_TRSV3_POWER);
}
static void exynos_pcie_power_off_phy(struct pcie_port *pp)
static void exynos_pcie_power_off_phy(struct exynos_pcie *exynos_pcie)
{
u32 val;
struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
val = exynos_phy_readl(exynos_pcie, PCIE_PHY_COMMON_POWER);
val |= PCIE_PHY_COMMON_PD_CMN;
@ -265,10 +257,8 @@ static void exynos_pcie_power_off_phy(struct pcie_port *pp)
exynos_phy_writel(exynos_pcie, val, PCIE_PHY_TRSV3_POWER);
}
static void exynos_pcie_init_phy(struct pcie_port *pp)
static void exynos_pcie_init_phy(struct exynos_pcie *exynos_pcie)
{
struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
/* DCC feedback control off */
exynos_phy_writel(exynos_pcie, 0x29, PCIE_PHY_DCC_FEEDBACK);
@ -305,51 +295,41 @@ static void exynos_pcie_init_phy(struct pcie_port *pp)
exynos_phy_writel(exynos_pcie, 0xa0, PCIE_PHY_TRSV3_LVCC);
}
static void exynos_pcie_assert_reset(struct pcie_port *pp)
static void exynos_pcie_assert_reset(struct exynos_pcie *exynos_pcie)
{
struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
struct pcie_port *pp = &exynos_pcie->pp;
struct device *dev = pp->dev;
if (exynos_pcie->reset_gpio >= 0)
devm_gpio_request_one(pp->dev, exynos_pcie->reset_gpio,
devm_gpio_request_one(dev, exynos_pcie->reset_gpio,
GPIOF_OUT_INIT_HIGH, "RESET");
}
static int exynos_pcie_establish_link(struct pcie_port *pp)
static int exynos_pcie_establish_link(struct exynos_pcie *exynos_pcie)
{
struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
struct pcie_port *pp = &exynos_pcie->pp;
struct device *dev = pp->dev;
u32 val;
if (dw_pcie_link_up(pp)) {
dev_err(pp->dev, "Link already up\n");
dev_err(dev, "Link already up\n");
return 0;
}
/* assert reset signals */
exynos_pcie_assert_core_reset(pp);
exynos_pcie_assert_phy_reset(pp);
/* de-assert phy reset */
exynos_pcie_deassert_phy_reset(pp);
/* power on phy */
exynos_pcie_power_on_phy(pp);
/* initialize phy */
exynos_pcie_init_phy(pp);
exynos_pcie_assert_core_reset(exynos_pcie);
exynos_pcie_assert_phy_reset(exynos_pcie);
exynos_pcie_deassert_phy_reset(exynos_pcie);
exynos_pcie_power_on_phy(exynos_pcie);
exynos_pcie_init_phy(exynos_pcie);
/* pulse for common reset */
exynos_blk_writel(exynos_pcie, 1, PCIE_PHY_COMMON_RESET);
udelay(500);
exynos_blk_writel(exynos_pcie, 0, PCIE_PHY_COMMON_RESET);
/* de-assert core reset */
exynos_pcie_deassert_core_reset(pp);
/* setup root complex */
exynos_pcie_deassert_core_reset(exynos_pcie);
dw_pcie_setup_rc(pp);
/* assert reset signal */
exynos_pcie_assert_reset(pp);
exynos_pcie_assert_reset(exynos_pcie);
/* assert LTSSM enable */
exynos_elb_writel(exynos_pcie, PCIE_ELBI_LTSSM_ENABLE,
@ -361,27 +341,23 @@ static int exynos_pcie_establish_link(struct pcie_port *pp)
while (exynos_phy_readl(exynos_pcie, PCIE_PHY_PLL_LOCKED) == 0) {
val = exynos_blk_readl(exynos_pcie, PCIE_PHY_PLL_LOCKED);
dev_info(pp->dev, "PLL Locked: 0x%x\n", val);
dev_info(dev, "PLL Locked: 0x%x\n", val);
}
/* power off phy */
exynos_pcie_power_off_phy(pp);
exynos_pcie_power_off_phy(exynos_pcie);
return -ETIMEDOUT;
}
static void exynos_pcie_clear_irq_pulse(struct pcie_port *pp)
static void exynos_pcie_clear_irq_pulse(struct exynos_pcie *exynos_pcie)
{
u32 val;
struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
val = exynos_elb_readl(exynos_pcie, PCIE_IRQ_PULSE);
exynos_elb_writel(exynos_pcie, val, PCIE_IRQ_PULSE);
}
static void exynos_pcie_enable_irq_pulse(struct pcie_port *pp)
static void exynos_pcie_enable_irq_pulse(struct exynos_pcie *exynos_pcie)
{
u32 val;
struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
/* enable INTX interrupt */
val = IRQ_INTA_ASSERT | IRQ_INTB_ASSERT |
@ -391,23 +367,24 @@ static void exynos_pcie_enable_irq_pulse(struct pcie_port *pp)
static irqreturn_t exynos_pcie_irq_handler(int irq, void *arg)
{
struct pcie_port *pp = arg;
struct exynos_pcie *exynos_pcie = arg;
exynos_pcie_clear_irq_pulse(pp);
exynos_pcie_clear_irq_pulse(exynos_pcie);
return IRQ_HANDLED;
}
static irqreturn_t exynos_pcie_msi_irq_handler(int irq, void *arg)
{
struct pcie_port *pp = arg;
struct exynos_pcie *exynos_pcie = arg;
struct pcie_port *pp = &exynos_pcie->pp;
return dw_handle_msi_irq(pp);
}
static void exynos_pcie_msi_init(struct pcie_port *pp)
static void exynos_pcie_msi_init(struct exynos_pcie *exynos_pcie)
{
struct pcie_port *pp = &exynos_pcie->pp;
u32 val;
struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
dw_pcie_msi_init(pp);
@ -417,60 +394,64 @@ static void exynos_pcie_msi_init(struct pcie_port *pp)
exynos_elb_writel(exynos_pcie, val, PCIE_IRQ_EN_LEVEL);
}
static void exynos_pcie_enable_interrupts(struct pcie_port *pp)
static void exynos_pcie_enable_interrupts(struct exynos_pcie *exynos_pcie)
{
exynos_pcie_enable_irq_pulse(pp);
exynos_pcie_enable_irq_pulse(exynos_pcie);
if (IS_ENABLED(CONFIG_PCI_MSI))
exynos_pcie_msi_init(pp);
exynos_pcie_msi_init(exynos_pcie);
}
static inline u32 exynos_pcie_readl_rc(struct pcie_port *pp,
void __iomem *dbi_base)
static u32 exynos_pcie_readl_rc(struct pcie_port *pp, u32 reg)
{
struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
u32 val;
exynos_pcie_sideband_dbi_r_mode(pp, true);
val = readl(dbi_base);
exynos_pcie_sideband_dbi_r_mode(pp, false);
exynos_pcie_sideband_dbi_r_mode(exynos_pcie, true);
val = readl(pp->dbi_base + reg);
exynos_pcie_sideband_dbi_r_mode(exynos_pcie, false);
return val;
}
static inline void exynos_pcie_writel_rc(struct pcie_port *pp,
u32 val, void __iomem *dbi_base)
static void exynos_pcie_writel_rc(struct pcie_port *pp, u32 reg, u32 val)
{
exynos_pcie_sideband_dbi_w_mode(pp, true);
writel(val, dbi_base);
exynos_pcie_sideband_dbi_w_mode(pp, false);
struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
exynos_pcie_sideband_dbi_w_mode(exynos_pcie, true);
writel(val, pp->dbi_base + reg);
exynos_pcie_sideband_dbi_w_mode(exynos_pcie, false);
}
static int exynos_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
u32 *val)
{
struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
int ret;
exynos_pcie_sideband_dbi_r_mode(pp, true);
exynos_pcie_sideband_dbi_r_mode(exynos_pcie, true);
ret = dw_pcie_cfg_read(pp->dbi_base + where, size, val);
exynos_pcie_sideband_dbi_r_mode(pp, false);
exynos_pcie_sideband_dbi_r_mode(exynos_pcie, false);
return ret;
}
static int exynos_pcie_wr_own_conf(struct pcie_port *pp, int where, int size,
u32 val)
{
struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
int ret;
exynos_pcie_sideband_dbi_w_mode(pp, true);
exynos_pcie_sideband_dbi_w_mode(exynos_pcie, true);
ret = dw_pcie_cfg_write(pp->dbi_base + where, size, val);
exynos_pcie_sideband_dbi_w_mode(pp, false);
exynos_pcie_sideband_dbi_w_mode(exynos_pcie, false);
return ret;
}
static int exynos_pcie_link_up(struct pcie_port *pp)
{
struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
u32 val = exynos_elb_readl(exynos_pcie, PCIE_ELBI_RDLH_LINKUP);
u32 val;
val = exynos_elb_readl(exynos_pcie, PCIE_ELBI_RDLH_LINKUP);
if (val == PCIE_ELBI_LTSSM_ENABLE)
return 1;
@ -479,8 +460,10 @@ static int exynos_pcie_link_up(struct pcie_port *pp)
static void exynos_pcie_host_init(struct pcie_port *pp)
{
exynos_pcie_establish_link(pp);
exynos_pcie_enable_interrupts(pp);
struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
exynos_pcie_establish_link(exynos_pcie);
exynos_pcie_enable_interrupts(exynos_pcie);
}
static struct pcie_host_ops exynos_pcie_host_ops = {
@ -492,36 +475,38 @@ static struct pcie_host_ops exynos_pcie_host_ops = {
.host_init = exynos_pcie_host_init,
};
static int __init exynos_add_pcie_port(struct pcie_port *pp,
static int __init exynos_add_pcie_port(struct exynos_pcie *exynos_pcie,
struct platform_device *pdev)
{
struct pcie_port *pp = &exynos_pcie->pp;
struct device *dev = pp->dev;
int ret;
pp->irq = platform_get_irq(pdev, 1);
if (!pp->irq) {
dev_err(&pdev->dev, "failed to get irq\n");
dev_err(dev, "failed to get irq\n");
return -ENODEV;
}
ret = devm_request_irq(&pdev->dev, pp->irq, exynos_pcie_irq_handler,
IRQF_SHARED, "exynos-pcie", pp);
ret = devm_request_irq(dev, pp->irq, exynos_pcie_irq_handler,
IRQF_SHARED, "exynos-pcie", exynos_pcie);
if (ret) {
dev_err(&pdev->dev, "failed to request irq\n");
dev_err(dev, "failed to request irq\n");
return ret;
}
if (IS_ENABLED(CONFIG_PCI_MSI)) {
pp->msi_irq = platform_get_irq(pdev, 0);
if (!pp->msi_irq) {
dev_err(&pdev->dev, "failed to get msi irq\n");
dev_err(dev, "failed to get msi irq\n");
return -ENODEV;
}
ret = devm_request_irq(&pdev->dev, pp->msi_irq,
ret = devm_request_irq(dev, pp->msi_irq,
exynos_pcie_msi_irq_handler,
IRQF_SHARED | IRQF_NO_THREAD,
"exynos-pcie", pp);
"exynos-pcie", exynos_pcie);
if (ret) {
dev_err(&pdev->dev, "failed to request msi irq\n");
dev_err(dev, "failed to request msi irq\n");
return ret;
}
}
@ -531,7 +516,7 @@ static int __init exynos_add_pcie_port(struct pcie_port *pp,
ret = dw_pcie_host_init(pp);
if (ret) {
dev_err(&pdev->dev, "failed to initialize host\n");
dev_err(dev, "failed to initialize host\n");
return ret;
}
@ -540,37 +525,36 @@ static int __init exynos_add_pcie_port(struct pcie_port *pp,
static int __init exynos_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct exynos_pcie *exynos_pcie;
struct pcie_port *pp;
struct device_node *np = pdev->dev.of_node;
struct device_node *np = dev->of_node;
struct resource *elbi_base;
struct resource *phy_base;
struct resource *block_base;
int ret;
exynos_pcie = devm_kzalloc(&pdev->dev, sizeof(*exynos_pcie),
GFP_KERNEL);
exynos_pcie = devm_kzalloc(dev, sizeof(*exynos_pcie), GFP_KERNEL);
if (!exynos_pcie)
return -ENOMEM;
pp = &exynos_pcie->pp;
pp->dev = &pdev->dev;
pp->dev = dev;
exynos_pcie->reset_gpio = of_get_named_gpio(np, "reset-gpio", 0);
exynos_pcie->clk = devm_clk_get(&pdev->dev, "pcie");
exynos_pcie->clk = devm_clk_get(dev, "pcie");
if (IS_ERR(exynos_pcie->clk)) {
dev_err(&pdev->dev, "Failed to get pcie rc clock\n");
dev_err(dev, "Failed to get pcie rc clock\n");
return PTR_ERR(exynos_pcie->clk);
}
ret = clk_prepare_enable(exynos_pcie->clk);
if (ret)
return ret;
exynos_pcie->bus_clk = devm_clk_get(&pdev->dev, "pcie_bus");
exynos_pcie->bus_clk = devm_clk_get(dev, "pcie_bus");
if (IS_ERR(exynos_pcie->bus_clk)) {
dev_err(&pdev->dev, "Failed to get pcie bus clock\n");
dev_err(dev, "Failed to get pcie bus clock\n");
ret = PTR_ERR(exynos_pcie->bus_clk);
goto fail_clk;
}
@ -579,27 +563,27 @@ static int __init exynos_pcie_probe(struct platform_device *pdev)
goto fail_clk;
elbi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
exynos_pcie->elbi_base = devm_ioremap_resource(&pdev->dev, elbi_base);
exynos_pcie->elbi_base = devm_ioremap_resource(dev, elbi_base);
if (IS_ERR(exynos_pcie->elbi_base)) {
ret = PTR_ERR(exynos_pcie->elbi_base);
goto fail_bus_clk;
}
phy_base = platform_get_resource(pdev, IORESOURCE_MEM, 1);
exynos_pcie->phy_base = devm_ioremap_resource(&pdev->dev, phy_base);
exynos_pcie->phy_base = devm_ioremap_resource(dev, phy_base);
if (IS_ERR(exynos_pcie->phy_base)) {
ret = PTR_ERR(exynos_pcie->phy_base);
goto fail_bus_clk;
}
block_base = platform_get_resource(pdev, IORESOURCE_MEM, 2);
exynos_pcie->block_base = devm_ioremap_resource(&pdev->dev, block_base);
exynos_pcie->block_base = devm_ioremap_resource(dev, block_base);
if (IS_ERR(exynos_pcie->block_base)) {
ret = PTR_ERR(exynos_pcie->block_base);
goto fail_bus_clk;
}
ret = exynos_add_pcie_port(pp, pdev);
ret = exynos_add_pcie_port(exynos_pcie, pdev);
if (ret < 0)
goto fail_bus_clk;

Просмотреть файл

@ -39,16 +39,15 @@ enum imx6_pcie_variants {
};
struct imx6_pcie {
struct pcie_port pp; /* pp.dbi_base is DT 0th resource */
int reset_gpio;
bool gpio_active_high;
struct clk *pcie_bus;
struct clk *pcie_phy;
struct clk *pcie_inbound_axi;
struct clk *pcie;
struct pcie_port pp;
struct regmap *iomuxc_gpr;
enum imx6_pcie_variants variant;
void __iomem *mem_base;
u32 tx_deemph_gen1;
u32 tx_deemph_gen2_3p5db;
u32 tx_deemph_gen2_6db;
@ -96,14 +95,15 @@ struct imx6_pcie {
#define PHY_RX_OVRD_IN_LO_RX_DATA_EN (1 << 5)
#define PHY_RX_OVRD_IN_LO_RX_PLL_EN (1 << 3)
static int pcie_phy_poll_ack(void __iomem *dbi_base, int exp_val)
static int pcie_phy_poll_ack(struct imx6_pcie *imx6_pcie, int exp_val)
{
struct pcie_port *pp = &imx6_pcie->pp;
u32 val;
u32 max_iterations = 10;
u32 wait_counter = 0;
do {
val = readl(dbi_base + PCIE_PHY_STAT);
val = dw_pcie_readl_rc(pp, PCIE_PHY_STAT);
val = (val >> PCIE_PHY_STAT_ACK_LOC) & 0x1;
wait_counter++;
@ -116,123 +116,126 @@ static int pcie_phy_poll_ack(void __iomem *dbi_base, int exp_val)
return -ETIMEDOUT;
}
static int pcie_phy_wait_ack(void __iomem *dbi_base, int addr)
static int pcie_phy_wait_ack(struct imx6_pcie *imx6_pcie, int addr)
{
struct pcie_port *pp = &imx6_pcie->pp;
u32 val;
int ret;
val = addr << PCIE_PHY_CTRL_DATA_LOC;
writel(val, dbi_base + PCIE_PHY_CTRL);
dw_pcie_writel_rc(pp, PCIE_PHY_CTRL, val);
val |= (0x1 << PCIE_PHY_CTRL_CAP_ADR_LOC);
writel(val, dbi_base + PCIE_PHY_CTRL);
dw_pcie_writel_rc(pp, PCIE_PHY_CTRL, val);
ret = pcie_phy_poll_ack(dbi_base, 1);
ret = pcie_phy_poll_ack(imx6_pcie, 1);
if (ret)
return ret;
val = addr << PCIE_PHY_CTRL_DATA_LOC;
writel(val, dbi_base + PCIE_PHY_CTRL);
dw_pcie_writel_rc(pp, PCIE_PHY_CTRL, val);
return pcie_phy_poll_ack(dbi_base, 0);
return pcie_phy_poll_ack(imx6_pcie, 0);
}
/* Read from the 16-bit PCIe PHY control registers (not memory-mapped) */
static int pcie_phy_read(void __iomem *dbi_base, int addr, int *data)
static int pcie_phy_read(struct imx6_pcie *imx6_pcie, int addr, int *data)
{
struct pcie_port *pp = &imx6_pcie->pp;
u32 val, phy_ctl;
int ret;
ret = pcie_phy_wait_ack(dbi_base, addr);
ret = pcie_phy_wait_ack(imx6_pcie, addr);
if (ret)
return ret;
/* assert Read signal */
phy_ctl = 0x1 << PCIE_PHY_CTRL_RD_LOC;
writel(phy_ctl, dbi_base + PCIE_PHY_CTRL);
dw_pcie_writel_rc(pp, PCIE_PHY_CTRL, phy_ctl);
ret = pcie_phy_poll_ack(dbi_base, 1);
ret = pcie_phy_poll_ack(imx6_pcie, 1);
if (ret)
return ret;
val = readl(dbi_base + PCIE_PHY_STAT);
val = dw_pcie_readl_rc(pp, PCIE_PHY_STAT);
*data = val & 0xffff;
/* deassert Read signal */
writel(0x00, dbi_base + PCIE_PHY_CTRL);
dw_pcie_writel_rc(pp, PCIE_PHY_CTRL, 0x00);
return pcie_phy_poll_ack(dbi_base, 0);
return pcie_phy_poll_ack(imx6_pcie, 0);
}
static int pcie_phy_write(void __iomem *dbi_base, int addr, int data)
static int pcie_phy_write(struct imx6_pcie *imx6_pcie, int addr, int data)
{
struct pcie_port *pp = &imx6_pcie->pp;
u32 var;
int ret;
/* write addr */
/* cap addr */
ret = pcie_phy_wait_ack(dbi_base, addr);
ret = pcie_phy_wait_ack(imx6_pcie, addr);
if (ret)
return ret;
var = data << PCIE_PHY_CTRL_DATA_LOC;
writel(var, dbi_base + PCIE_PHY_CTRL);
dw_pcie_writel_rc(pp, PCIE_PHY_CTRL, var);
/* capture data */
var |= (0x1 << PCIE_PHY_CTRL_CAP_DAT_LOC);
writel(var, dbi_base + PCIE_PHY_CTRL);
dw_pcie_writel_rc(pp, PCIE_PHY_CTRL, var);
ret = pcie_phy_poll_ack(dbi_base, 1);
ret = pcie_phy_poll_ack(imx6_pcie, 1);
if (ret)
return ret;
/* deassert cap data */
var = data << PCIE_PHY_CTRL_DATA_LOC;
writel(var, dbi_base + PCIE_PHY_CTRL);
dw_pcie_writel_rc(pp, PCIE_PHY_CTRL, var);
/* wait for ack de-assertion */
ret = pcie_phy_poll_ack(dbi_base, 0);
ret = pcie_phy_poll_ack(imx6_pcie, 0);
if (ret)
return ret;
/* assert wr signal */
var = 0x1 << PCIE_PHY_CTRL_WR_LOC;
writel(var, dbi_base + PCIE_PHY_CTRL);
dw_pcie_writel_rc(pp, PCIE_PHY_CTRL, var);
/* wait for ack */
ret = pcie_phy_poll_ack(dbi_base, 1);
ret = pcie_phy_poll_ack(imx6_pcie, 1);
if (ret)
return ret;
/* deassert wr signal */
var = data << PCIE_PHY_CTRL_DATA_LOC;
writel(var, dbi_base + PCIE_PHY_CTRL);
dw_pcie_writel_rc(pp, PCIE_PHY_CTRL, var);
/* wait for ack de-assertion */
ret = pcie_phy_poll_ack(dbi_base, 0);
ret = pcie_phy_poll_ack(imx6_pcie, 0);
if (ret)
return ret;
writel(0x0, dbi_base + PCIE_PHY_CTRL);
dw_pcie_writel_rc(pp, PCIE_PHY_CTRL, 0x0);
return 0;
}
static void imx6_pcie_reset_phy(struct pcie_port *pp)
static void imx6_pcie_reset_phy(struct imx6_pcie *imx6_pcie)
{
u32 tmp;
pcie_phy_read(pp->dbi_base, PHY_RX_OVRD_IN_LO, &tmp);
pcie_phy_read(imx6_pcie, PHY_RX_OVRD_IN_LO, &tmp);
tmp |= (PHY_RX_OVRD_IN_LO_RX_DATA_EN |
PHY_RX_OVRD_IN_LO_RX_PLL_EN);
pcie_phy_write(pp->dbi_base, PHY_RX_OVRD_IN_LO, tmp);
pcie_phy_write(imx6_pcie, PHY_RX_OVRD_IN_LO, tmp);
usleep_range(2000, 3000);
pcie_phy_read(pp->dbi_base, PHY_RX_OVRD_IN_LO, &tmp);
pcie_phy_read(imx6_pcie, PHY_RX_OVRD_IN_LO, &tmp);
tmp &= ~(PHY_RX_OVRD_IN_LO_RX_DATA_EN |
PHY_RX_OVRD_IN_LO_RX_PLL_EN);
pcie_phy_write(pp->dbi_base, PHY_RX_OVRD_IN_LO, tmp);
pcie_phy_write(imx6_pcie, PHY_RX_OVRD_IN_LO, tmp);
}
/* Added for PCI abort handling */
@ -242,9 +245,9 @@ static int imx6q_pcie_abort_handler(unsigned long addr,
return 0;
}
static int imx6_pcie_assert_core_reset(struct pcie_port *pp)
static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie)
{
struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
struct pcie_port *pp = &imx6_pcie->pp;
u32 val, gpr1, gpr12;
switch (imx6_pcie->variant) {
@ -281,10 +284,10 @@ static int imx6_pcie_assert_core_reset(struct pcie_port *pp)
if ((gpr1 & IMX6Q_GPR1_PCIE_REF_CLK_EN) &&
(gpr12 & IMX6Q_GPR12_PCIE_CTL_2)) {
val = readl(pp->dbi_base + PCIE_PL_PFLR);
val = dw_pcie_readl_rc(pp, PCIE_PL_PFLR);
val &= ~PCIE_PL_PFLR_LINK_STATE_MASK;
val |= PCIE_PL_PFLR_FORCE_LINK;
writel(val, pp->dbi_base + PCIE_PL_PFLR);
dw_pcie_writel_rc(pp, PCIE_PL_PFLR, val);
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
IMX6Q_GPR12_PCIE_CTL_2, 0 << 10);
@ -296,20 +299,19 @@ static int imx6_pcie_assert_core_reset(struct pcie_port *pp)
IMX6Q_GPR1_PCIE_REF_CLK_EN, 0 << 16);
break;
}
return 0;
}
static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie)
{
struct pcie_port *pp = &imx6_pcie->pp;
struct device *dev = pp->dev;
int ret = 0;
switch (imx6_pcie->variant) {
case IMX6SX:
ret = clk_prepare_enable(imx6_pcie->pcie_inbound_axi);
if (ret) {
dev_err(pp->dev, "unable to enable pcie_axi clock\n");
dev_err(dev, "unable to enable pcie_axi clock\n");
break;
}
@ -336,32 +338,33 @@ static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie)
return ret;
}
static int imx6_pcie_deassert_core_reset(struct pcie_port *pp)
static void imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie)
{
struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
struct pcie_port *pp = &imx6_pcie->pp;
struct device *dev = pp->dev;
int ret;
ret = clk_prepare_enable(imx6_pcie->pcie_phy);
if (ret) {
dev_err(pp->dev, "unable to enable pcie_phy clock\n");
goto err_pcie_phy;
dev_err(dev, "unable to enable pcie_phy clock\n");
return;
}
ret = clk_prepare_enable(imx6_pcie->pcie_bus);
if (ret) {
dev_err(pp->dev, "unable to enable pcie_bus clock\n");
dev_err(dev, "unable to enable pcie_bus clock\n");
goto err_pcie_bus;
}
ret = clk_prepare_enable(imx6_pcie->pcie);
if (ret) {
dev_err(pp->dev, "unable to enable pcie clock\n");
dev_err(dev, "unable to enable pcie clock\n");
goto err_pcie;
}
ret = imx6_pcie_enable_ref_clk(imx6_pcie);
if (ret) {
dev_err(pp->dev, "unable to enable pcie ref clock\n");
dev_err(dev, "unable to enable pcie ref clock\n");
goto err_ref_clk;
}
@ -392,7 +395,7 @@ static int imx6_pcie_deassert_core_reset(struct pcie_port *pp)
break;
}
return 0;
return;
err_ref_clk:
clk_disable_unprepare(imx6_pcie->pcie);
@ -400,14 +403,10 @@ err_pcie:
clk_disable_unprepare(imx6_pcie->pcie_bus);
err_pcie_bus:
clk_disable_unprepare(imx6_pcie->pcie_phy);
err_pcie_phy:
return ret;
}
static void imx6_pcie_init_phy(struct pcie_port *pp)
static void imx6_pcie_init_phy(struct imx6_pcie *imx6_pcie)
{
struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
if (imx6_pcie->variant == IMX6SX)
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
IMX6SX_GPR12_PCIE_RX_EQ_MASK,
@ -439,45 +438,52 @@ static void imx6_pcie_init_phy(struct pcie_port *pp)
imx6_pcie->tx_swing_low << 25);
}
static int imx6_pcie_wait_for_link(struct pcie_port *pp)
static int imx6_pcie_wait_for_link(struct imx6_pcie *imx6_pcie)
{
struct pcie_port *pp = &imx6_pcie->pp;
struct device *dev = pp->dev;
/* check if the link is up or not */
if (!dw_pcie_wait_for_link(pp))
return 0;
dev_dbg(pp->dev, "DEBUG_R0: 0x%08x, DEBUG_R1: 0x%08x\n",
readl(pp->dbi_base + PCIE_PHY_DEBUG_R0),
readl(pp->dbi_base + PCIE_PHY_DEBUG_R1));
dev_dbg(dev, "DEBUG_R0: 0x%08x, DEBUG_R1: 0x%08x\n",
dw_pcie_readl_rc(pp, PCIE_PHY_DEBUG_R0),
dw_pcie_readl_rc(pp, PCIE_PHY_DEBUG_R1));
return -ETIMEDOUT;
}
static int imx6_pcie_wait_for_speed_change(struct pcie_port *pp)
static int imx6_pcie_wait_for_speed_change(struct imx6_pcie *imx6_pcie)
{
struct pcie_port *pp = &imx6_pcie->pp;
struct device *dev = pp->dev;
u32 tmp;
unsigned int retries;
for (retries = 0; retries < 200; retries++) {
tmp = readl(pp->dbi_base + PCIE_LINK_WIDTH_SPEED_CONTROL);
tmp = dw_pcie_readl_rc(pp, PCIE_LINK_WIDTH_SPEED_CONTROL);
/* Test if the speed change finished. */
if (!(tmp & PORT_LOGIC_SPEED_CHANGE))
return 0;
usleep_range(100, 1000);
}
dev_err(pp->dev, "Speed change timeout\n");
dev_err(dev, "Speed change timeout\n");
return -EINVAL;
}
static irqreturn_t imx6_pcie_msi_handler(int irq, void *arg)
{
struct pcie_port *pp = arg;
struct imx6_pcie *imx6_pcie = arg;
struct pcie_port *pp = &imx6_pcie->pp;
return dw_handle_msi_irq(pp);
}
static int imx6_pcie_establish_link(struct pcie_port *pp)
static int imx6_pcie_establish_link(struct imx6_pcie *imx6_pcie)
{
struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
struct pcie_port *pp = &imx6_pcie->pp;
struct device *dev = pp->dev;
u32 tmp;
int ret;
@ -486,76 +492,73 @@ static int imx6_pcie_establish_link(struct pcie_port *pp)
* started in Gen2 mode, there is a possibility the devices on the
* bus will not be detected at all. This happens with PCIe switches.
*/
tmp = readl(pp->dbi_base + PCIE_RC_LCR);
tmp = dw_pcie_readl_rc(pp, PCIE_RC_LCR);
tmp &= ~PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK;
tmp |= PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN1;
writel(tmp, pp->dbi_base + PCIE_RC_LCR);
dw_pcie_writel_rc(pp, PCIE_RC_LCR, tmp);
/* Start LTSSM. */
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
IMX6Q_GPR12_PCIE_CTL_2, 1 << 10);
ret = imx6_pcie_wait_for_link(pp);
ret = imx6_pcie_wait_for_link(imx6_pcie);
if (ret) {
dev_info(pp->dev, "Link never came up\n");
dev_info(dev, "Link never came up\n");
goto err_reset_phy;
}
if (imx6_pcie->link_gen == 2) {
/* Allow Gen2 mode after the link is up. */
tmp = readl(pp->dbi_base + PCIE_RC_LCR);
tmp = dw_pcie_readl_rc(pp, PCIE_RC_LCR);
tmp &= ~PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK;
tmp |= PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2;
writel(tmp, pp->dbi_base + PCIE_RC_LCR);
dw_pcie_writel_rc(pp, PCIE_RC_LCR, tmp);
} else {
dev_info(pp->dev, "Link: Gen2 disabled\n");
dev_info(dev, "Link: Gen2 disabled\n");
}
/*
* Start Directed Speed Change so the best possible speed both link
* partners support can be negotiated.
*/
tmp = readl(pp->dbi_base + PCIE_LINK_WIDTH_SPEED_CONTROL);
tmp = dw_pcie_readl_rc(pp, PCIE_LINK_WIDTH_SPEED_CONTROL);
tmp |= PORT_LOGIC_SPEED_CHANGE;
writel(tmp, pp->dbi_base + PCIE_LINK_WIDTH_SPEED_CONTROL);
dw_pcie_writel_rc(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, tmp);
ret = imx6_pcie_wait_for_speed_change(pp);
ret = imx6_pcie_wait_for_speed_change(imx6_pcie);
if (ret) {
dev_err(pp->dev, "Failed to bring link up!\n");
dev_err(dev, "Failed to bring link up!\n");
goto err_reset_phy;
}
/* Make sure link training is finished as well! */
ret = imx6_pcie_wait_for_link(pp);
ret = imx6_pcie_wait_for_link(imx6_pcie);
if (ret) {
dev_err(pp->dev, "Failed to bring link up!\n");
dev_err(dev, "Failed to bring link up!\n");
goto err_reset_phy;
}
tmp = readl(pp->dbi_base + PCIE_RC_LCSR);
dev_info(pp->dev, "Link up, Gen%i\n", (tmp >> 16) & 0xf);
tmp = dw_pcie_readl_rc(pp, PCIE_RC_LCSR);
dev_info(dev, "Link up, Gen%i\n", (tmp >> 16) & 0xf);
return 0;
err_reset_phy:
dev_dbg(pp->dev, "PHY DEBUG_R0=0x%08x DEBUG_R1=0x%08x\n",
readl(pp->dbi_base + PCIE_PHY_DEBUG_R0),
readl(pp->dbi_base + PCIE_PHY_DEBUG_R1));
imx6_pcie_reset_phy(pp);
dev_dbg(dev, "PHY DEBUG_R0=0x%08x DEBUG_R1=0x%08x\n",
dw_pcie_readl_rc(pp, PCIE_PHY_DEBUG_R0),
dw_pcie_readl_rc(pp, PCIE_PHY_DEBUG_R1));
imx6_pcie_reset_phy(imx6_pcie);
return ret;
}
static void imx6_pcie_host_init(struct pcie_port *pp)
{
imx6_pcie_assert_core_reset(pp);
imx6_pcie_init_phy(pp);
imx6_pcie_deassert_core_reset(pp);
struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
imx6_pcie_assert_core_reset(imx6_pcie);
imx6_pcie_init_phy(imx6_pcie);
imx6_pcie_deassert_core_reset(imx6_pcie);
dw_pcie_setup_rc(pp);
imx6_pcie_establish_link(pp);
imx6_pcie_establish_link(imx6_pcie);
if (IS_ENABLED(CONFIG_PCI_MSI))
dw_pcie_msi_init(pp);
@ -563,7 +566,7 @@ static void imx6_pcie_host_init(struct pcie_port *pp)
static int imx6_pcie_link_up(struct pcie_port *pp)
{
return readl(pp->dbi_base + PCIE_PHY_DEBUG_R1) &
return dw_pcie_readl_rc(pp, PCIE_PHY_DEBUG_R1) &
PCIE_PHY_DEBUG_R1_XMLH_LINK_UP;
}
@ -572,24 +575,26 @@ static struct pcie_host_ops imx6_pcie_host_ops = {
.host_init = imx6_pcie_host_init,
};
static int __init imx6_add_pcie_port(struct pcie_port *pp,
struct platform_device *pdev)
static int __init imx6_add_pcie_port(struct imx6_pcie *imx6_pcie,
struct platform_device *pdev)
{
struct pcie_port *pp = &imx6_pcie->pp;
struct device *dev = pp->dev;
int ret;
if (IS_ENABLED(CONFIG_PCI_MSI)) {
pp->msi_irq = platform_get_irq_byname(pdev, "msi");
if (pp->msi_irq <= 0) {
dev_err(&pdev->dev, "failed to get MSI irq\n");
dev_err(dev, "failed to get MSI irq\n");
return -ENODEV;
}
ret = devm_request_irq(&pdev->dev, pp->msi_irq,
ret = devm_request_irq(dev, pp->msi_irq,
imx6_pcie_msi_handler,
IRQF_SHARED | IRQF_NO_THREAD,
"mx6-pcie-msi", pp);
"mx6-pcie-msi", imx6_pcie);
if (ret) {
dev_err(&pdev->dev, "failed to request MSI irq\n");
dev_err(dev, "failed to request MSI irq\n");
return ret;
}
}
@ -599,7 +604,7 @@ static int __init imx6_add_pcie_port(struct pcie_port *pp,
ret = dw_pcie_host_init(pp);
if (ret) {
dev_err(&pdev->dev, "failed to initialize host\n");
dev_err(dev, "failed to initialize host\n");
return ret;
}
@ -608,75 +613,72 @@ static int __init imx6_add_pcie_port(struct pcie_port *pp,
static int __init imx6_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct imx6_pcie *imx6_pcie;
struct pcie_port *pp;
struct device_node *np = pdev->dev.of_node;
struct resource *dbi_base;
struct device_node *node = pdev->dev.of_node;
struct device_node *node = dev->of_node;
int ret;
imx6_pcie = devm_kzalloc(&pdev->dev, sizeof(*imx6_pcie), GFP_KERNEL);
imx6_pcie = devm_kzalloc(dev, sizeof(*imx6_pcie), GFP_KERNEL);
if (!imx6_pcie)
return -ENOMEM;
pp = &imx6_pcie->pp;
pp->dev = &pdev->dev;
pp->dev = dev;
imx6_pcie->variant =
(enum imx6_pcie_variants)of_device_get_match_data(&pdev->dev);
(enum imx6_pcie_variants)of_device_get_match_data(dev);
/* Added for PCI abort handling */
hook_fault_code(16 + 6, imx6q_pcie_abort_handler, SIGBUS, 0,
"imprecise external abort");
dbi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
pp->dbi_base = devm_ioremap_resource(&pdev->dev, dbi_base);
pp->dbi_base = devm_ioremap_resource(dev, dbi_base);
if (IS_ERR(pp->dbi_base))
return PTR_ERR(pp->dbi_base);
/* Fetch GPIOs */
imx6_pcie->reset_gpio = of_get_named_gpio(np, "reset-gpio", 0);
imx6_pcie->gpio_active_high = of_property_read_bool(np,
imx6_pcie->reset_gpio = of_get_named_gpio(node, "reset-gpio", 0);
imx6_pcie->gpio_active_high = of_property_read_bool(node,
"reset-gpio-active-high");
if (gpio_is_valid(imx6_pcie->reset_gpio)) {
ret = devm_gpio_request_one(&pdev->dev, imx6_pcie->reset_gpio,
ret = devm_gpio_request_one(dev, imx6_pcie->reset_gpio,
imx6_pcie->gpio_active_high ?
GPIOF_OUT_INIT_HIGH :
GPIOF_OUT_INIT_LOW,
"PCIe reset");
if (ret) {
dev_err(&pdev->dev, "unable to get reset gpio\n");
dev_err(dev, "unable to get reset gpio\n");
return ret;
}
}
/* Fetch clocks */
imx6_pcie->pcie_phy = devm_clk_get(&pdev->dev, "pcie_phy");
imx6_pcie->pcie_phy = devm_clk_get(dev, "pcie_phy");
if (IS_ERR(imx6_pcie->pcie_phy)) {
dev_err(&pdev->dev,
"pcie_phy clock source missing or invalid\n");
dev_err(dev, "pcie_phy clock source missing or invalid\n");
return PTR_ERR(imx6_pcie->pcie_phy);
}
imx6_pcie->pcie_bus = devm_clk_get(&pdev->dev, "pcie_bus");
imx6_pcie->pcie_bus = devm_clk_get(dev, "pcie_bus");
if (IS_ERR(imx6_pcie->pcie_bus)) {
dev_err(&pdev->dev,
"pcie_bus clock source missing or invalid\n");
dev_err(dev, "pcie_bus clock source missing or invalid\n");
return PTR_ERR(imx6_pcie->pcie_bus);
}
imx6_pcie->pcie = devm_clk_get(&pdev->dev, "pcie");
imx6_pcie->pcie = devm_clk_get(dev, "pcie");
if (IS_ERR(imx6_pcie->pcie)) {
dev_err(&pdev->dev,
"pcie clock source missing or invalid\n");
dev_err(dev, "pcie clock source missing or invalid\n");
return PTR_ERR(imx6_pcie->pcie);
}
if (imx6_pcie->variant == IMX6SX) {
imx6_pcie->pcie_inbound_axi = devm_clk_get(&pdev->dev,
imx6_pcie->pcie_inbound_axi = devm_clk_get(dev,
"pcie_inbound_axi");
if (IS_ERR(imx6_pcie->pcie_inbound_axi)) {
dev_err(&pdev->dev,
dev_err(dev,
"pcie_incbound_axi clock missing or invalid\n");
return PTR_ERR(imx6_pcie->pcie_inbound_axi);
}
@ -686,7 +688,7 @@ static int __init imx6_pcie_probe(struct platform_device *pdev)
imx6_pcie->iomuxc_gpr =
syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr");
if (IS_ERR(imx6_pcie->iomuxc_gpr)) {
dev_err(&pdev->dev, "unable to find iomuxc registers\n");
dev_err(dev, "unable to find iomuxc registers\n");
return PTR_ERR(imx6_pcie->iomuxc_gpr);
}
@ -712,12 +714,12 @@ static int __init imx6_pcie_probe(struct platform_device *pdev)
imx6_pcie->tx_swing_low = 127;
/* Limit link speed */
ret = of_property_read_u32(pp->dev->of_node, "fsl,max-link-speed",
ret = of_property_read_u32(node, "fsl,max-link-speed",
&imx6_pcie->link_gen);
if (ret)
imx6_pcie->link_gen = 1;
ret = imx6_add_pcie_port(pp, pdev);
ret = imx6_add_pcie_port(imx6_pcie, pdev);
if (ret < 0)
return ret;
@ -730,7 +732,7 @@ static void imx6_pcie_shutdown(struct platform_device *pdev)
struct imx6_pcie *imx6_pcie = platform_get_drvdata(pdev);
/* bring down link, so bootloader gets clean state in case of reboot */
imx6_pcie_assert_core_reset(&imx6_pcie->pp);
imx6_pcie_assert_core_reset(imx6_pcie);
}
static const struct of_device_id imx6_pcie_of_match[] = {

Просмотреть файл

@ -88,13 +88,24 @@ phys_addr_t ks_dw_pcie_get_msi_addr(struct pcie_port *pp)
return ks_pcie->app.start + MSI_IRQ;
}
static u32 ks_dw_app_readl(struct keystone_pcie *ks_pcie, u32 offset)
{
return readl(ks_pcie->va_app_base + offset);
}
static void ks_dw_app_writel(struct keystone_pcie *ks_pcie, u32 offset, u32 val)
{
writel(val, ks_pcie->va_app_base + offset);
}
void ks_dw_pcie_handle_msi_irq(struct keystone_pcie *ks_pcie, int offset)
{
struct pcie_port *pp = &ks_pcie->pp;
struct device *dev = pp->dev;
u32 pending, vector;
int src, virq;
pending = readl(ks_pcie->va_app_base + MSI0_IRQ_STATUS + (offset << 4));
pending = ks_dw_app_readl(ks_pcie, MSI0_IRQ_STATUS + (offset << 4));
/*
* MSI0 status bit 0-3 shows vectors 0, 8, 16, 24, MSI1 status bit
@ -104,7 +115,7 @@ void ks_dw_pcie_handle_msi_irq(struct keystone_pcie *ks_pcie, int offset)
if (BIT(src) & pending) {
vector = offset + (src << 3);
virq = irq_linear_revmap(pp->irq_domain, vector);
dev_dbg(pp->dev, "irq: bit %d, vector %d, virq %d\n",
dev_dbg(dev, "irq: bit %d, vector %d, virq %d\n",
src, vector, virq);
generic_handle_irq(virq);
}
@ -124,9 +135,9 @@ static void ks_dw_pcie_msi_irq_ack(struct irq_data *d)
offset = d->irq - irq_linear_revmap(pp->irq_domain, 0);
update_reg_offset_bit_pos(offset, &reg_offset, &bit_pos);
writel(BIT(bit_pos),
ks_pcie->va_app_base + MSI0_IRQ_STATUS + (reg_offset << 4));
writel(reg_offset + MSI_IRQ_OFFSET, ks_pcie->va_app_base + IRQ_EOI);
ks_dw_app_writel(ks_pcie, MSI0_IRQ_STATUS + (reg_offset << 4),
BIT(bit_pos));
ks_dw_app_writel(ks_pcie, IRQ_EOI, reg_offset + MSI_IRQ_OFFSET);
}
void ks_dw_pcie_msi_set_irq(struct pcie_port *pp, int irq)
@ -135,8 +146,8 @@ void ks_dw_pcie_msi_set_irq(struct pcie_port *pp, int irq)
struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
update_reg_offset_bit_pos(irq, &reg_offset, &bit_pos);
writel(BIT(bit_pos),
ks_pcie->va_app_base + MSI0_IRQ_ENABLE_SET + (reg_offset << 4));
ks_dw_app_writel(ks_pcie, MSI0_IRQ_ENABLE_SET + (reg_offset << 4),
BIT(bit_pos));
}
void ks_dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq)
@ -145,8 +156,8 @@ void ks_dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq)
struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
update_reg_offset_bit_pos(irq, &reg_offset, &bit_pos);
writel(BIT(bit_pos),
ks_pcie->va_app_base + MSI0_IRQ_ENABLE_CLR + (reg_offset << 4));
ks_dw_app_writel(ks_pcie, MSI0_IRQ_ENABLE_CLR + (reg_offset << 4),
BIT(bit_pos));
}
static void ks_dw_pcie_msi_irq_mask(struct irq_data *d)
@ -215,6 +226,7 @@ static const struct irq_domain_ops ks_dw_pcie_msi_domain_ops = {
int ks_dw_pcie_msi_host_init(struct pcie_port *pp, struct msi_controller *chip)
{
struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
struct device *dev = pp->dev;
int i;
pp->irq_domain = irq_domain_add_linear(ks_pcie->msi_intc_np,
@ -222,7 +234,7 @@ int ks_dw_pcie_msi_host_init(struct pcie_port *pp, struct msi_controller *chip)
&ks_dw_pcie_msi_domain_ops,
chip);
if (!pp->irq_domain) {
dev_err(pp->dev, "irq domain init failed\n");
dev_err(dev, "irq domain init failed\n");
return -ENXIO;
}
@ -237,47 +249,47 @@ void ks_dw_pcie_enable_legacy_irqs(struct keystone_pcie *ks_pcie)
int i;
for (i = 0; i < MAX_LEGACY_IRQS; i++)
writel(0x1, ks_pcie->va_app_base + IRQ_ENABLE_SET + (i << 4));
ks_dw_app_writel(ks_pcie, IRQ_ENABLE_SET + (i << 4), 0x1);
}
void ks_dw_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie, int offset)
{
struct pcie_port *pp = &ks_pcie->pp;
struct device *dev = pp->dev;
u32 pending;
int virq;
pending = readl(ks_pcie->va_app_base + IRQ_STATUS + (offset << 4));
pending = ks_dw_app_readl(ks_pcie, IRQ_STATUS + (offset << 4));
if (BIT(0) & pending) {
virq = irq_linear_revmap(ks_pcie->legacy_irq_domain, offset);
dev_dbg(pp->dev, ": irq: irq_offset %d, virq %d\n", offset,
virq);
dev_dbg(dev, ": irq: irq_offset %d, virq %d\n", offset, virq);
generic_handle_irq(virq);
}
/* EOI the INTx interrupt */
writel(offset, ks_pcie->va_app_base + IRQ_EOI);
ks_dw_app_writel(ks_pcie, IRQ_EOI, offset);
}
void ks_dw_pcie_enable_error_irq(void __iomem *reg_base)
void ks_dw_pcie_enable_error_irq(struct keystone_pcie *ks_pcie)
{
writel(ERR_IRQ_ALL, reg_base + ERR_IRQ_ENABLE_SET);
ks_dw_app_writel(ks_pcie, ERR_IRQ_ENABLE_SET, ERR_IRQ_ALL);
}
irqreturn_t ks_dw_pcie_handle_error_irq(struct device *dev,
void __iomem *reg_base)
irqreturn_t ks_dw_pcie_handle_error_irq(struct keystone_pcie *ks_pcie)
{
u32 status;
status = readl(reg_base + ERR_IRQ_STATUS_RAW) & ERR_IRQ_ALL;
status = ks_dw_app_readl(ks_pcie, ERR_IRQ_STATUS_RAW) & ERR_IRQ_ALL;
if (!status)
return IRQ_NONE;
if (status & ERR_FATAL_IRQ)
dev_err(dev, "fatal error (status %#010x)\n", status);
dev_err(ks_pcie->pp.dev, "fatal error (status %#010x)\n",
status);
/* Ack the IRQ; status bits are RW1C */
writel(status, reg_base + ERR_IRQ_STATUS);
ks_dw_app_writel(ks_pcie, ERR_IRQ_STATUS, status);
return IRQ_HANDLED;
}
@ -322,15 +334,15 @@ static const struct irq_domain_ops ks_dw_pcie_legacy_irq_domain_ops = {
* Since modification of dbi_cs2 involves different clock domain, read the
* status back to ensure the transition is complete.
*/
static void ks_dw_pcie_set_dbi_mode(void __iomem *reg_virt)
static void ks_dw_pcie_set_dbi_mode(struct keystone_pcie *ks_pcie)
{
u32 val;
writel(DBI_CS2_EN_VAL | readl(reg_virt + CMD_STATUS),
reg_virt + CMD_STATUS);
val = ks_dw_app_readl(ks_pcie, CMD_STATUS);
ks_dw_app_writel(ks_pcie, CMD_STATUS, DBI_CS2_EN_VAL | val);
do {
val = readl(reg_virt + CMD_STATUS);
val = ks_dw_app_readl(ks_pcie, CMD_STATUS);
} while (!(val & DBI_CS2_EN_VAL));
}
@ -340,15 +352,15 @@ static void ks_dw_pcie_set_dbi_mode(void __iomem *reg_virt)
* Since modification of dbi_cs2 involves different clock domain, read the
* status back to ensure the transition is complete.
*/
static void ks_dw_pcie_clear_dbi_mode(void __iomem *reg_virt)
static void ks_dw_pcie_clear_dbi_mode(struct keystone_pcie *ks_pcie)
{
u32 val;
writel(~DBI_CS2_EN_VAL & readl(reg_virt + CMD_STATUS),
reg_virt + CMD_STATUS);
val = ks_dw_app_readl(ks_pcie, CMD_STATUS);
ks_dw_app_writel(ks_pcie, CMD_STATUS, ~DBI_CS2_EN_VAL & val);
do {
val = readl(reg_virt + CMD_STATUS);
val = ks_dw_app_readl(ks_pcie, CMD_STATUS);
} while (val & DBI_CS2_EN_VAL);
}
@ -357,28 +369,29 @@ void ks_dw_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie)
struct pcie_port *pp = &ks_pcie->pp;
u32 start = pp->mem->start, end = pp->mem->end;
int i, tr_size;
u32 val;
/* Disable BARs for inbound access */
ks_dw_pcie_set_dbi_mode(ks_pcie->va_app_base);
writel(0, pp->dbi_base + PCI_BASE_ADDRESS_0);
writel(0, pp->dbi_base + PCI_BASE_ADDRESS_1);
ks_dw_pcie_clear_dbi_mode(ks_pcie->va_app_base);
ks_dw_pcie_set_dbi_mode(ks_pcie);
dw_pcie_writel_rc(pp, PCI_BASE_ADDRESS_0, 0);
dw_pcie_writel_rc(pp, PCI_BASE_ADDRESS_1, 0);
ks_dw_pcie_clear_dbi_mode(ks_pcie);
/* Set outbound translation size per window division */
writel(CFG_PCIM_WIN_SZ_IDX & 0x7, ks_pcie->va_app_base + OB_SIZE);
ks_dw_app_writel(ks_pcie, OB_SIZE, CFG_PCIM_WIN_SZ_IDX & 0x7);
tr_size = (1 << (CFG_PCIM_WIN_SZ_IDX & 0x7)) * SZ_1M;
/* Using Direct 1:1 mapping of RC <-> PCI memory space */
for (i = 0; (i < CFG_PCIM_WIN_CNT) && (start < end); i++) {
writel(start | 1, ks_pcie->va_app_base + OB_OFFSET_INDEX(i));
writel(0, ks_pcie->va_app_base + OB_OFFSET_HI(i));
ks_dw_app_writel(ks_pcie, OB_OFFSET_INDEX(i), start | 1);
ks_dw_app_writel(ks_pcie, OB_OFFSET_HI(i), 0);
start += tr_size;
}
/* Enable OB translation */
writel(OB_XLAT_EN_VAL | readl(ks_pcie->va_app_base + CMD_STATUS),
ks_pcie->va_app_base + CMD_STATUS);
val = ks_dw_app_readl(ks_pcie, CMD_STATUS);
ks_dw_app_writel(ks_pcie, CMD_STATUS, OB_XLAT_EN_VAL | val);
}
/**
@ -418,7 +431,7 @@ static void __iomem *ks_pcie_cfg_setup(struct keystone_pcie *ks_pcie, u8 bus,
if (bus != 1)
regval |= BIT(24);
writel(regval, ks_pcie->va_app_base + CFG_SETUP);
ks_dw_app_writel(ks_pcie, CFG_SETUP, regval);
return pp->va_cfg0_base;
}
@ -456,19 +469,19 @@ void ks_dw_pcie_v3_65_scan_bus(struct pcie_port *pp)
struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
/* Configure and set up BAR0 */
ks_dw_pcie_set_dbi_mode(ks_pcie->va_app_base);
ks_dw_pcie_set_dbi_mode(ks_pcie);
/* Enable BAR0 */
writel(1, pp->dbi_base + PCI_BASE_ADDRESS_0);
writel(SZ_4K - 1, pp->dbi_base + PCI_BASE_ADDRESS_0);
dw_pcie_writel_rc(pp, PCI_BASE_ADDRESS_0, 1);
dw_pcie_writel_rc(pp, PCI_BASE_ADDRESS_0, SZ_4K - 1);
ks_dw_pcie_clear_dbi_mode(ks_pcie->va_app_base);
ks_dw_pcie_clear_dbi_mode(ks_pcie);
/*
* For BAR0, just setting bus address for inbound writes (MSI) should
* be sufficient. Use physical address to avoid any conflicts.
*/
writel(ks_pcie->app.start, pp->dbi_base + PCI_BASE_ADDRESS_0);
dw_pcie_writel_rc(pp, PCI_BASE_ADDRESS_0, ks_pcie->app.start);
}
/**
@ -476,8 +489,9 @@ void ks_dw_pcie_v3_65_scan_bus(struct pcie_port *pp)
*/
int ks_dw_pcie_link_up(struct pcie_port *pp)
{
u32 val = readl(pp->dbi_base + DEBUG0);
u32 val;
val = dw_pcie_readl_rc(pp, DEBUG0);
return (val & LTSSM_STATE_MASK) == LTSSM_STATE_L0;
}
@ -486,13 +500,13 @@ void ks_dw_pcie_initiate_link_train(struct keystone_pcie *ks_pcie)
u32 val;
/* Disable Link training */
val = readl(ks_pcie->va_app_base + CMD_STATUS);
val = ks_dw_app_readl(ks_pcie, CMD_STATUS);
val &= ~LTSSM_EN_VAL;
writel(LTSSM_EN_VAL | val, ks_pcie->va_app_base + CMD_STATUS);
ks_dw_app_writel(ks_pcie, CMD_STATUS, LTSSM_EN_VAL | val);
/* Initiate Link Training */
val = readl(ks_pcie->va_app_base + CMD_STATUS);
writel(LTSSM_EN_VAL | val, ks_pcie->va_app_base + CMD_STATUS);
val = ks_dw_app_readl(ks_pcie, CMD_STATUS);
ks_dw_app_writel(ks_pcie, CMD_STATUS, LTSSM_EN_VAL | val);
}
/**
@ -506,12 +520,13 @@ int __init ks_dw_pcie_host_init(struct keystone_pcie *ks_pcie,
struct device_node *msi_intc_np)
{
struct pcie_port *pp = &ks_pcie->pp;
struct platform_device *pdev = to_platform_device(pp->dev);
struct device *dev = pp->dev;
struct platform_device *pdev = to_platform_device(dev);
struct resource *res;
/* Index 0 is the config reg. space address */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
pp->dbi_base = devm_ioremap_resource(pp->dev, res);
pp->dbi_base = devm_ioremap_resource(dev, res);
if (IS_ERR(pp->dbi_base))
return PTR_ERR(pp->dbi_base);
@ -524,7 +539,7 @@ int __init ks_dw_pcie_host_init(struct keystone_pcie *ks_pcie,
/* Index 1 is the application reg. space address */
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
ks_pcie->va_app_base = devm_ioremap_resource(pp->dev, res);
ks_pcie->va_app_base = devm_ioremap_resource(dev, res);
if (IS_ERR(ks_pcie->va_app_base))
return PTR_ERR(ks_pcie->va_app_base);
@ -537,7 +552,7 @@ int __init ks_dw_pcie_host_init(struct keystone_pcie *ks_pcie,
&ks_dw_pcie_legacy_irq_domain_ops,
NULL);
if (!ks_pcie->legacy_irq_domain) {
dev_err(pp->dev, "Failed to add irq domain for legacy irqs\n");
dev_err(dev, "Failed to add irq domain for legacy irqs\n");
return -EINVAL;
}

Просмотреть файл

@ -89,12 +89,13 @@ DECLARE_PCI_FIXUP_ENABLE(PCI_ANY_ID, PCI_ANY_ID, quirk_limit_mrrs);
static int ks_pcie_establish_link(struct keystone_pcie *ks_pcie)
{
struct pcie_port *pp = &ks_pcie->pp;
struct device *dev = pp->dev;
unsigned int retries;
dw_pcie_setup_rc(pp);
if (dw_pcie_link_up(pp)) {
dev_err(pp->dev, "Link already up\n");
dev_err(dev, "Link already up\n");
return 0;
}
@ -105,7 +106,7 @@ static int ks_pcie_establish_link(struct keystone_pcie *ks_pcie)
return 0;
}
dev_err(pp->dev, "phy link never came up\n");
dev_err(dev, "phy link never came up\n");
return -ETIMEDOUT;
}
@ -115,9 +116,10 @@ static void ks_pcie_msi_irq_handler(struct irq_desc *desc)
struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc);
u32 offset = irq - ks_pcie->msi_host_irqs[0];
struct pcie_port *pp = &ks_pcie->pp;
struct device *dev = pp->dev;
struct irq_chip *chip = irq_desc_get_chip(desc);
dev_dbg(pp->dev, "%s, irq %d\n", __func__, irq);
dev_dbg(dev, "%s, irq %d\n", __func__, irq);
/*
* The chained irq handler installation would have replaced normal
@ -142,10 +144,11 @@ static void ks_pcie_legacy_irq_handler(struct irq_desc *desc)
unsigned int irq = irq_desc_get_irq(desc);
struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc);
struct pcie_port *pp = &ks_pcie->pp;
struct device *dev = pp->dev;
u32 irq_offset = irq - ks_pcie->legacy_host_irqs[0];
struct irq_chip *chip = irq_desc_get_chip(desc);
dev_dbg(pp->dev, ": Handling legacy irq %d\n", irq);
dev_dbg(dev, ": Handling legacy irq %d\n", irq);
/*
* The chained irq handler installation would have replaced normal
@ -234,7 +237,7 @@ static void ks_pcie_setup_interrupts(struct keystone_pcie *ks_pcie)
}
if (ks_pcie->error_irq > 0)
ks_dw_pcie_enable_error_irq(ks_pcie->va_app_base);
ks_dw_pcie_enable_error_irq(ks_pcie);
}
/*
@ -302,14 +305,14 @@ static irqreturn_t pcie_err_irq_handler(int irq, void *priv)
{
struct keystone_pcie *ks_pcie = priv;
return ks_dw_pcie_handle_error_irq(ks_pcie->pp.dev,
ks_pcie->va_app_base);
return ks_dw_pcie_handle_error_irq(ks_pcie);
}
static int __init ks_add_pcie_port(struct keystone_pcie *ks_pcie,
struct platform_device *pdev)
{
struct pcie_port *pp = &ks_pcie->pp;
struct device *dev = pp->dev;
int ret;
ret = ks_pcie_get_irq_controller_info(ks_pcie,
@ -332,12 +335,12 @@ static int __init ks_add_pcie_port(struct keystone_pcie *ks_pcie,
*/
ks_pcie->error_irq = irq_of_parse_and_map(ks_pcie->np, 0);
if (ks_pcie->error_irq <= 0)
dev_info(&pdev->dev, "no error IRQ defined\n");
dev_info(dev, "no error IRQ defined\n");
else {
ret = request_irq(ks_pcie->error_irq, pcie_err_irq_handler,
IRQF_SHARED, "pcie-error-irq", ks_pcie);
if (ret < 0) {
dev_err(&pdev->dev, "failed to request error IRQ %d\n",
dev_err(dev, "failed to request error IRQ %d\n",
ks_pcie->error_irq);
return ret;
}
@ -347,7 +350,7 @@ static int __init ks_add_pcie_port(struct keystone_pcie *ks_pcie,
pp->ops = &keystone_pcie_host_ops;
ret = ks_dw_pcie_host_init(ks_pcie, ks_pcie->msi_intc_np);
if (ret) {
dev_err(&pdev->dev, "failed to initialize host\n");
dev_err(dev, "failed to initialize host\n");
return ret;
}
@ -381,12 +384,12 @@ static int __init ks_pcie_probe(struct platform_device *pdev)
struct phy *phy;
int ret;
ks_pcie = devm_kzalloc(&pdev->dev, sizeof(*ks_pcie),
GFP_KERNEL);
ks_pcie = devm_kzalloc(dev, sizeof(*ks_pcie), GFP_KERNEL);
if (!ks_pcie)
return -ENOMEM;
pp = &ks_pcie->pp;
pp->dev = dev;
/* initialize SerDes Phy if present */
phy = devm_phy_get(dev, "pcie-phy");
@ -408,7 +411,6 @@ static int __init ks_pcie_probe(struct platform_device *pdev)
devm_iounmap(dev, reg_p);
devm_release_mem_region(dev, res->start, resource_size(res));
pp->dev = dev;
ks_pcie->np = dev->of_node;
platform_set_drvdata(pdev, ks_pcie);
ks_pcie->clk = devm_clk_get(dev, "pcie");

Просмотреть файл

@ -17,8 +17,8 @@
#define MAX_LEGACY_HOST_IRQS 4
struct keystone_pcie {
struct pcie_port pp; /* pp.dbi_base is DT 0th res */
struct clk *clk;
struct pcie_port pp;
/* PCI Device ID */
u32 device_id;
int num_legacy_host_irqs;
@ -34,7 +34,7 @@ struct keystone_pcie {
int error_irq;
/* Application register space */
void __iomem *va_app_base;
void __iomem *va_app_base; /* DT 1st resource */
struct resource app;
};
@ -45,9 +45,8 @@ phys_addr_t ks_dw_pcie_get_msi_addr(struct pcie_port *pp);
/* Keystone specific PCI controller APIs */
void ks_dw_pcie_enable_legacy_irqs(struct keystone_pcie *ks_pcie);
void ks_dw_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie, int offset);
void ks_dw_pcie_enable_error_irq(void __iomem *reg_base);
irqreturn_t ks_dw_pcie_handle_error_irq(struct device *dev,
void __iomem *reg_base);
void ks_dw_pcie_enable_error_irq(struct keystone_pcie *ks_pcie);
irqreturn_t ks_dw_pcie_handle_error_irq(struct keystone_pcie *ks_pcie);
int ks_dw_pcie_host_init(struct keystone_pcie *ks_pcie,
struct device_node *msi_intc_np);
int ks_dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,

Просмотреть файл

@ -45,10 +45,9 @@ struct ls_pcie_drvdata {
};
struct ls_pcie {
void __iomem *dbi;
struct pcie_port pp; /* pp.dbi_base is DT regs */
void __iomem *lut;
struct regmap *scfg;
struct pcie_port pp;
const struct ls_pcie_drvdata *drvdata;
int index;
};
@ -59,7 +58,7 @@ static bool ls_pcie_is_bridge(struct ls_pcie *pcie)
{
u32 header_type;
header_type = ioread8(pcie->dbi + PCI_HEADER_TYPE);
header_type = ioread8(pcie->pp.dbi_base + PCI_HEADER_TYPE);
header_type &= 0x7f;
return header_type == PCI_HEADER_TYPE_BRIDGE;
@ -68,13 +67,13 @@ static bool ls_pcie_is_bridge(struct ls_pcie *pcie)
/* Clear multi-function bit */
static void ls_pcie_clear_multifunction(struct ls_pcie *pcie)
{
iowrite8(PCI_HEADER_TYPE_BRIDGE, pcie->dbi + PCI_HEADER_TYPE);
iowrite8(PCI_HEADER_TYPE_BRIDGE, pcie->pp.dbi_base + PCI_HEADER_TYPE);
}
/* Fix class value */
static void ls_pcie_fix_class(struct ls_pcie *pcie)
{
iowrite16(PCI_CLASS_BRIDGE_PCI, pcie->dbi + PCI_CLASS_DEVICE);
iowrite16(PCI_CLASS_BRIDGE_PCI, pcie->pp.dbi_base + PCI_CLASS_DEVICE);
}
/* Drop MSG TLP except for Vendor MSG */
@ -82,9 +81,9 @@ static void ls_pcie_drop_msg_tlp(struct ls_pcie *pcie)
{
u32 val;
val = ioread32(pcie->dbi + PCIE_STRFMR1);
val = ioread32(pcie->pp.dbi_base + PCIE_STRFMR1);
val &= 0xDFFFFFFF;
iowrite32(val, pcie->dbi + PCIE_STRFMR1);
iowrite32(val, pcie->pp.dbi_base + PCIE_STRFMR1);
}
static int ls1021_pcie_link_up(struct pcie_port *pp)
@ -106,18 +105,19 @@ static int ls1021_pcie_link_up(struct pcie_port *pp)
static void ls1021_pcie_host_init(struct pcie_port *pp)
{
struct device *dev = pp->dev;
struct ls_pcie *pcie = to_ls_pcie(pp);
u32 index[2];
pcie->scfg = syscon_regmap_lookup_by_phandle(pp->dev->of_node,
pcie->scfg = syscon_regmap_lookup_by_phandle(dev->of_node,
"fsl,pcie-scfg");
if (IS_ERR(pcie->scfg)) {
dev_err(pp->dev, "No syscfg phandle specified\n");
dev_err(dev, "No syscfg phandle specified\n");
pcie->scfg = NULL;
return;
}
if (of_property_read_u32_array(pp->dev->of_node,
if (of_property_read_u32_array(dev->of_node,
"fsl,pcie-scfg", index, 2)) {
pcie->scfg = NULL;
return;
@ -148,18 +148,19 @@ static void ls_pcie_host_init(struct pcie_port *pp)
{
struct ls_pcie *pcie = to_ls_pcie(pp);
iowrite32(1, pcie->dbi + PCIE_DBI_RO_WR_EN);
iowrite32(1, pcie->pp.dbi_base + PCIE_DBI_RO_WR_EN);
ls_pcie_fix_class(pcie);
ls_pcie_clear_multifunction(pcie);
ls_pcie_drop_msg_tlp(pcie);
iowrite32(0, pcie->dbi + PCIE_DBI_RO_WR_EN);
iowrite32(0, pcie->pp.dbi_base + PCIE_DBI_RO_WR_EN);
}
static int ls_pcie_msi_host_init(struct pcie_port *pp,
struct msi_controller *chip)
{
struct device *dev = pp->dev;
struct device_node *np = dev->of_node;
struct device_node *msi_node;
struct device_node *np = pp->dev->of_node;
/*
* The MSI domain is set by the generic of_msi_configure(). This
@ -169,7 +170,7 @@ static int ls_pcie_msi_host_init(struct pcie_port *pp,
*/
msi_node = of_parse_phandle(np, "msi-parent", 0);
if (!msi_node) {
dev_err(pp->dev, "failed to find msi-parent\n");
dev_err(dev, "failed to find msi-parent\n");
return -EINVAL;
}
@ -212,19 +213,15 @@ static const struct of_device_id ls_pcie_of_match[] = {
{ },
};
static int __init ls_add_pcie_port(struct pcie_port *pp,
struct platform_device *pdev)
static int __init ls_add_pcie_port(struct ls_pcie *pcie)
{
struct pcie_port *pp = &pcie->pp;
struct device *dev = pp->dev;
int ret;
struct ls_pcie *pcie = to_ls_pcie(pp);
pp->dev = &pdev->dev;
pp->dbi_base = pcie->dbi;
pp->ops = pcie->drvdata->ops;
ret = dw_pcie_host_init(pp);
if (ret) {
dev_err(pp->dev, "failed to initialize host\n");
dev_err(dev, "failed to initialize host\n");
return ret;
}
@ -233,38 +230,42 @@ static int __init ls_add_pcie_port(struct pcie_port *pp,
static int __init ls_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
const struct of_device_id *match;
struct ls_pcie *pcie;
struct pcie_port *pp;
struct resource *dbi_base;
int ret;
match = of_match_device(ls_pcie_of_match, &pdev->dev);
match = of_match_device(ls_pcie_of_match, dev);
if (!match)
return -ENODEV;
pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL);
pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
if (!pcie)
return -ENOMEM;
pp = &pcie->pp;
pp->dev = dev;
pp->ops = pcie->drvdata->ops;
dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
pcie->dbi = devm_ioremap_resource(&pdev->dev, dbi_base);
if (IS_ERR(pcie->dbi)) {
dev_err(&pdev->dev, "missing *regs* space\n");
return PTR_ERR(pcie->dbi);
pcie->pp.dbi_base = devm_ioremap_resource(dev, dbi_base);
if (IS_ERR(pcie->pp.dbi_base)) {
dev_err(dev, "missing *regs* space\n");
return PTR_ERR(pcie->pp.dbi_base);
}
pcie->drvdata = match->data;
pcie->lut = pcie->dbi + pcie->drvdata->lut_offset;
pcie->lut = pcie->pp.dbi_base + pcie->drvdata->lut_offset;
if (!ls_pcie_is_bridge(pcie))
return -ENODEV;
ret = ls_add_pcie_port(&pcie->pp, pdev);
ret = ls_add_pcie_port(pcie);
if (ret < 0)
return ret;
platform_set_drvdata(pdev, pcie);
return 0;
}

Просмотреть файл

@ -1190,13 +1190,13 @@ static void mvebu_pcie_powerdown(struct mvebu_pcie_port *port)
static int mvebu_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct mvebu_pcie *pcie;
struct device_node *np = pdev->dev.of_node;
struct device_node *np = dev->of_node;
struct device_node *child;
int num, i, ret;
pcie = devm_kzalloc(&pdev->dev, sizeof(struct mvebu_pcie),
GFP_KERNEL);
pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
if (!pcie)
return -ENOMEM;
@ -1206,7 +1206,7 @@ static int mvebu_pcie_probe(struct platform_device *pdev)
/* Get the PCIe memory and I/O aperture */
mvebu_mbus_get_pcie_mem_aperture(&pcie->mem);
if (resource_size(&pcie->mem) == 0) {
dev_err(&pdev->dev, "invalid memory aperture size\n");
dev_err(dev, "invalid memory aperture size\n");
return -EINVAL;
}
@ -1224,20 +1224,18 @@ static int mvebu_pcie_probe(struct platform_device *pdev)
/* Get the bus range */
ret = of_pci_parse_bus_range(np, &pcie->busn);
if (ret) {
dev_err(&pdev->dev, "failed to parse bus-range property: %d\n",
ret);
dev_err(dev, "failed to parse bus-range property: %d\n", ret);
return ret;
}
num = of_get_available_child_count(pdev->dev.of_node);
num = of_get_available_child_count(np);
pcie->ports = devm_kcalloc(&pdev->dev, num, sizeof(*pcie->ports),
GFP_KERNEL);
pcie->ports = devm_kcalloc(dev, num, sizeof(*pcie->ports), GFP_KERNEL);
if (!pcie->ports)
return -ENOMEM;
i = 0;
for_each_available_child_of_node(pdev->dev.of_node, child) {
for_each_available_child_of_node(np, child) {
struct mvebu_pcie_port *port = &pcie->ports[i];
ret = mvebu_pcie_parse_port(pcie, port, child);
@ -1266,8 +1264,7 @@ static int mvebu_pcie_probe(struct platform_device *pdev)
port->base = mvebu_pcie_map_registers(pdev, child, port);
if (IS_ERR(port->base)) {
dev_err(&pdev->dev, "%s: cannot map registers\n",
port->name);
dev_err(dev, "%s: cannot map registers\n", port->name);
port->base = NULL;
mvebu_pcie_powerdown(port);
continue;

Просмотреть файл

@ -154,10 +154,11 @@ static int rcar_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
static irqreturn_t rcar_pci_err_irq(int irq, void *pw)
{
struct rcar_pci_priv *priv = pw;
struct device *dev = priv->dev;
u32 status = ioread32(priv->reg + RCAR_PCI_INT_STATUS_REG);
if (status & RCAR_PCI_INT_ALLERRORS) {
dev_err(priv->dev, "error irq: status %08x\n", status);
dev_err(dev, "error irq: status %08x\n", status);
/* clear the error(s) */
iowrite32(status & RCAR_PCI_INT_ALLERRORS,
@ -170,13 +171,14 @@ static irqreturn_t rcar_pci_err_irq(int irq, void *pw)
static void rcar_pci_setup_errirq(struct rcar_pci_priv *priv)
{
struct device *dev = priv->dev;
int ret;
u32 val;
ret = devm_request_irq(priv->dev, priv->irq, rcar_pci_err_irq,
ret = devm_request_irq(dev, priv->irq, rcar_pci_err_irq,
IRQF_SHARED, "error irq", priv);
if (ret) {
dev_err(priv->dev, "cannot claim IRQ for error handling\n");
dev_err(dev, "cannot claim IRQ for error handling\n");
return;
}
@ -192,15 +194,16 @@ static inline void rcar_pci_setup_errirq(struct rcar_pci_priv *priv) { }
static int rcar_pci_setup(int nr, struct pci_sys_data *sys)
{
struct rcar_pci_priv *priv = sys->private_data;
struct device *dev = priv->dev;
void __iomem *reg = priv->reg;
u32 val;
int ret;
pm_runtime_enable(priv->dev);
pm_runtime_get_sync(priv->dev);
pm_runtime_enable(dev);
pm_runtime_get_sync(dev);
val = ioread32(reg + RCAR_PCI_UNIT_REV_REG);
dev_info(priv->dev, "PCI: bus%u revision %x\n", sys->busnr, val);
dev_info(dev, "PCI: bus%u revision %x\n", sys->busnr, val);
/* Disable Direct Power Down State and assert reset */
val = ioread32(reg + RCAR_USBCTR_REG) & ~RCAR_USBCTR_DIRPD;
@ -275,7 +278,7 @@ static int rcar_pci_setup(int nr, struct pci_sys_data *sys)
/* Add PCI resources */
pci_add_resource(&sys->resources, &priv->mem_res);
ret = devm_request_pci_bus_resources(priv->dev, &sys->resources);
ret = devm_request_pci_bus_resources(dev, &sys->resources);
if (ret < 0)
return ret;
@ -311,6 +314,7 @@ static int pci_dma_range_parser_init(struct of_pci_range_parser *parser,
static int rcar_pci_parse_map_dma_ranges(struct rcar_pci_priv *pci,
struct device_node *np)
{
struct device *dev = pci->dev;
struct of_pci_range range;
struct of_pci_range_parser parser;
int index = 0;
@ -331,14 +335,14 @@ static int rcar_pci_parse_map_dma_ranges(struct rcar_pci_priv *pci,
/* Catch HW limitations */
if (!(range.flags & IORESOURCE_PREFETCH)) {
dev_err(pci->dev, "window must be prefetchable\n");
dev_err(dev, "window must be prefetchable\n");
return -EINVAL;
}
if (pci->window_addr) {
u32 lowaddr = 1 << (ffs(pci->window_addr) - 1);
if (lowaddr < pci->window_size) {
dev_err(pci->dev, "invalid window size/addr\n");
dev_err(dev, "invalid window size/addr\n");
return -EINVAL;
}
}
@ -350,6 +354,7 @@ static int rcar_pci_parse_map_dma_ranges(struct rcar_pci_priv *pci,
static int rcar_pci_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct resource *cfg_res, *mem_res;
struct rcar_pci_priv *priv;
void __iomem *reg;
@ -357,7 +362,7 @@ static int rcar_pci_probe(struct platform_device *pdev)
void *hw_private[1];
cfg_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
reg = devm_ioremap_resource(&pdev->dev, cfg_res);
reg = devm_ioremap_resource(dev, cfg_res);
if (IS_ERR(reg))
return PTR_ERR(reg);
@ -368,8 +373,7 @@ static int rcar_pci_probe(struct platform_device *pdev)
if (mem_res->start & 0xFFFF)
return -EINVAL;
priv = devm_kzalloc(&pdev->dev,
sizeof(struct rcar_pci_priv), GFP_KERNEL);
priv = devm_kzalloc(dev, sizeof(struct rcar_pci_priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
@ -378,10 +382,10 @@ static int rcar_pci_probe(struct platform_device *pdev)
priv->irq = platform_get_irq(pdev, 0);
priv->reg = reg;
priv->dev = &pdev->dev;
priv->dev = dev;
if (priv->irq < 0) {
dev_err(&pdev->dev, "no valid irq found\n");
dev_err(dev, "no valid irq found\n");
return priv->irq;
}
@ -390,23 +394,23 @@ static int rcar_pci_probe(struct platform_device *pdev)
priv->window_pci = 0x40000000;
priv->window_size = SZ_1G;
if (pdev->dev.of_node) {
if (dev->of_node) {
struct resource busnr;
int ret;
ret = of_pci_parse_bus_range(pdev->dev.of_node, &busnr);
ret = of_pci_parse_bus_range(dev->of_node, &busnr);
if (ret < 0) {
dev_err(&pdev->dev, "failed to parse bus-range\n");
dev_err(dev, "failed to parse bus-range\n");
return ret;
}
priv->busnr = busnr.start;
if (busnr.end != busnr.start)
dev_warn(&pdev->dev, "only one bus number supported\n");
dev_warn(dev, "only one bus number supported\n");
ret = rcar_pci_parse_map_dma_ranges(priv, pdev->dev.of_node);
ret = rcar_pci_parse_map_dma_ranges(priv, dev->of_node);
if (ret < 0) {
dev_err(&pdev->dev, "failed to parse dma-range\n");
dev_err(dev, "failed to parse dma-range\n");
return ret;
}
} else {
@ -421,7 +425,7 @@ static int rcar_pci_probe(struct platform_device *pdev)
hw.map_irq = rcar_pci_map_irq;
hw.ops = &rcar_pci_ops;
hw.setup = rcar_pci_setup;
pci_common_init_dev(&pdev->dev, &hw);
pci_common_init_dev(dev, &hw);
return 0;
}

Просмотреть файл

@ -384,6 +384,7 @@ static unsigned long tegra_pcie_conf_offset(unsigned int devfn, int where)
static struct tegra_pcie_bus *tegra_pcie_bus_alloc(struct tegra_pcie *pcie,
unsigned int busnr)
{
struct device *dev = pcie->dev;
pgprot_t prot = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
L_PTE_XN | L_PTE_MT_DEV_SHARED | L_PTE_SHARED);
phys_addr_t cs = pcie->cs->start;
@ -413,8 +414,7 @@ static struct tegra_pcie_bus *tegra_pcie_bus_alloc(struct tegra_pcie *pcie,
err = ioremap_page_range(virt, virt + SZ_64K, phys, prot);
if (err < 0) {
dev_err(pcie->dev, "ioremap_page_range() failed: %d\n",
err);
dev_err(dev, "ioremap_page_range() failed: %d\n", err);
goto unmap;
}
}
@ -462,6 +462,7 @@ static void __iomem *tegra_pcie_map_bus(struct pci_bus *bus,
int where)
{
struct tegra_pcie *pcie = sys_to_pcie(bus->sysdata);
struct device *dev = pcie->dev;
void __iomem *addr = NULL;
if (bus->number == 0) {
@ -482,8 +483,7 @@ static void __iomem *tegra_pcie_map_bus(struct pci_bus *bus,
addr = (void __iomem *)b->area->addr;
if (!addr) {
dev_err(pcie->dev,
"failed to map cfg. space for bus %u\n",
dev_err(dev, "failed to map cfg. space for bus %u\n",
bus->number);
return NULL;
}
@ -584,12 +584,13 @@ static void tegra_pcie_port_disable(struct tegra_pcie_port *port)
static void tegra_pcie_port_free(struct tegra_pcie_port *port)
{
struct tegra_pcie *pcie = port->pcie;
struct device *dev = pcie->dev;
devm_iounmap(pcie->dev, port->base);
devm_release_mem_region(pcie->dev, port->regs.start,
devm_iounmap(dev, port->base);
devm_release_mem_region(dev, port->regs.start,
resource_size(&port->regs));
list_del(&port->list);
devm_kfree(pcie->dev, port);
devm_kfree(dev, port);
}
/* Tegra PCIE root complex wrongly reports device class */
@ -612,12 +613,13 @@ DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, tegra_pcie_relax_enable);
static int tegra_pcie_setup(int nr, struct pci_sys_data *sys)
{
struct tegra_pcie *pcie = sys_to_pcie(sys);
struct device *dev = pcie->dev;
int err;
sys->mem_offset = pcie->offset.mem;
sys->io_offset = pcie->offset.io;
err = devm_request_resource(pcie->dev, &iomem_resource, &pcie->io);
err = devm_request_resource(dev, &iomem_resource, &pcie->io);
if (err < 0)
return err;
@ -631,7 +633,7 @@ static int tegra_pcie_setup(int nr, struct pci_sys_data *sys)
sys->mem_offset);
pci_add_resource(&sys->resources, &pcie->busn);
err = devm_request_pci_bus_resources(pcie->dev, &sys->resources);
err = devm_request_pci_bus_resources(dev, &sys->resources);
if (err < 0)
return err;
@ -672,6 +674,7 @@ static irqreturn_t tegra_pcie_isr(int irq, void *arg)
"Peer2Peer error",
};
struct tegra_pcie *pcie = arg;
struct device *dev = pcie->dev;
u32 code, signature;
code = afi_readl(pcie, AFI_INTR_CODE) & AFI_INTR_CODE_MASK;
@ -689,11 +692,9 @@ static irqreturn_t tegra_pcie_isr(int irq, void *arg)
* happen a lot during enumeration
*/
if (code == AFI_INTR_MASTER_ABORT)
dev_dbg(pcie->dev, "%s, signature: %08x\n", err_msg[code],
signature);
dev_dbg(dev, "%s, signature: %08x\n", err_msg[code], signature);
else
dev_err(pcie->dev, "%s, signature: %08x\n", err_msg[code],
signature);
dev_err(dev, "%s, signature: %08x\n", err_msg[code], signature);
if (code == AFI_INTR_TARGET_ABORT || code == AFI_INTR_MASTER_ABORT ||
code == AFI_INTR_FPCI_DECODE_ERROR) {
@ -701,9 +702,9 @@ static irqreturn_t tegra_pcie_isr(int irq, void *arg)
u64 address = (u64)fpci << 32 | (signature & 0xfffffffc);
if (code == AFI_INTR_MASTER_ABORT)
dev_dbg(pcie->dev, " FPCI address: %10llx\n", address);
dev_dbg(dev, " FPCI address: %10llx\n", address);
else
dev_err(pcie->dev, " FPCI address: %10llx\n", address);
dev_err(dev, " FPCI address: %10llx\n", address);
}
return IRQ_HANDLED;
@ -793,6 +794,7 @@ static int tegra_pcie_pll_wait(struct tegra_pcie *pcie, unsigned long timeout)
static int tegra_pcie_phy_enable(struct tegra_pcie *pcie)
{
struct device *dev = pcie->dev;
const struct tegra_pcie_soc *soc = pcie->soc;
u32 value;
int err;
@ -829,7 +831,7 @@ static int tegra_pcie_phy_enable(struct tegra_pcie *pcie)
/* wait for the PLL to lock */
err = tegra_pcie_pll_wait(pcie, 500);
if (err < 0) {
dev_err(pcie->dev, "PLL failed to lock: %d\n", err);
dev_err(dev, "PLL failed to lock: %d\n", err);
return err;
}
@ -859,7 +861,7 @@ static int tegra_pcie_phy_disable(struct tegra_pcie *pcie)
/* override IDDQ */
value = pads_readl(pcie, PADS_CTL);
value |= PADS_CTL_IDDQ_1L;
pads_writel(pcie, PADS_CTL, value);
pads_writel(pcie, value, PADS_CTL);
/* reset PLL */
value = pads_readl(pcie, soc->pads_pll_ctl);
@ -880,8 +882,7 @@ static int tegra_pcie_port_phy_power_on(struct tegra_pcie_port *port)
for (i = 0; i < port->lanes; i++) {
err = phy_power_on(port->phys[i]);
if (err < 0) {
dev_err(dev, "failed to power on PHY#%u: %d\n", i,
err);
dev_err(dev, "failed to power on PHY#%u: %d\n", i, err);
return err;
}
}
@ -909,6 +910,7 @@ static int tegra_pcie_port_phy_power_off(struct tegra_pcie_port *port)
static int tegra_pcie_phy_power_on(struct tegra_pcie *pcie)
{
struct device *dev = pcie->dev;
const struct tegra_pcie_soc *soc = pcie->soc;
struct tegra_pcie_port *port;
int err;
@ -920,7 +922,7 @@ static int tegra_pcie_phy_power_on(struct tegra_pcie *pcie)
err = tegra_pcie_phy_enable(pcie);
if (err < 0)
dev_err(pcie->dev, "failed to power on PHY: %d\n", err);
dev_err(dev, "failed to power on PHY: %d\n", err);
return err;
}
@ -928,7 +930,7 @@ static int tegra_pcie_phy_power_on(struct tegra_pcie *pcie)
list_for_each_entry(port, &pcie->ports, list) {
err = tegra_pcie_port_phy_power_on(port);
if (err < 0) {
dev_err(pcie->dev,
dev_err(dev,
"failed to power on PCIe port %u PHY: %d\n",
port->index, err);
return err;
@ -946,6 +948,7 @@ static int tegra_pcie_phy_power_on(struct tegra_pcie *pcie)
static int tegra_pcie_phy_power_off(struct tegra_pcie *pcie)
{
struct device *dev = pcie->dev;
struct tegra_pcie_port *port;
int err;
@ -956,8 +959,7 @@ static int tegra_pcie_phy_power_off(struct tegra_pcie *pcie)
err = tegra_pcie_phy_disable(pcie);
if (err < 0)
dev_err(pcie->dev, "failed to power off PHY: %d\n",
err);
dev_err(dev, "failed to power off PHY: %d\n", err);
return err;
}
@ -965,7 +967,7 @@ static int tegra_pcie_phy_power_off(struct tegra_pcie *pcie)
list_for_each_entry(port, &pcie->ports, list) {
err = tegra_pcie_port_phy_power_off(port);
if (err < 0) {
dev_err(pcie->dev,
dev_err(dev,
"failed to power off PCIe port %u PHY: %d\n",
port->index, err);
return err;
@ -977,6 +979,7 @@ static int tegra_pcie_phy_power_off(struct tegra_pcie *pcie)
static int tegra_pcie_enable_controller(struct tegra_pcie *pcie)
{
struct device *dev = pcie->dev;
const struct tegra_pcie_soc *soc = pcie->soc;
struct tegra_pcie_port *port;
unsigned long value;
@ -1016,7 +1019,7 @@ static int tegra_pcie_enable_controller(struct tegra_pcie *pcie)
err = tegra_pcie_phy_power_on(pcie);
if (err < 0) {
dev_err(pcie->dev, "failed to power on PHY(s): %d\n", err);
dev_err(dev, "failed to power on PHY(s): %d\n", err);
return err;
}
@ -1049,13 +1052,14 @@ static int tegra_pcie_enable_controller(struct tegra_pcie *pcie)
static void tegra_pcie_power_off(struct tegra_pcie *pcie)
{
struct device *dev = pcie->dev;
int err;
/* TODO: disable and unprepare clocks? */
err = tegra_pcie_phy_power_off(pcie);
if (err < 0)
dev_err(pcie->dev, "failed to power off PHY(s): %d\n", err);
dev_err(dev, "failed to power off PHY(s): %d\n", err);
reset_control_assert(pcie->pcie_xrst);
reset_control_assert(pcie->afi_rst);
@ -1065,11 +1069,12 @@ static void tegra_pcie_power_off(struct tegra_pcie *pcie)
err = regulator_bulk_disable(pcie->num_supplies, pcie->supplies);
if (err < 0)
dev_warn(pcie->dev, "failed to disable regulators: %d\n", err);
dev_warn(dev, "failed to disable regulators: %d\n", err);
}
static int tegra_pcie_power_on(struct tegra_pcie *pcie)
{
struct device *dev = pcie->dev;
const struct tegra_pcie_soc *soc = pcie->soc;
int err;
@ -1082,13 +1087,13 @@ static int tegra_pcie_power_on(struct tegra_pcie *pcie)
/* enable regulators */
err = regulator_bulk_enable(pcie->num_supplies, pcie->supplies);
if (err < 0)
dev_err(pcie->dev, "failed to enable regulators: %d\n", err);
dev_err(dev, "failed to enable regulators: %d\n", err);
err = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_PCIE,
pcie->pex_clk,
pcie->pex_rst);
if (err) {
dev_err(pcie->dev, "powerup sequence failed: %d\n", err);
dev_err(dev, "powerup sequence failed: %d\n", err);
return err;
}
@ -1096,22 +1101,21 @@ static int tegra_pcie_power_on(struct tegra_pcie *pcie)
err = clk_prepare_enable(pcie->afi_clk);
if (err < 0) {
dev_err(pcie->dev, "failed to enable AFI clock: %d\n", err);
dev_err(dev, "failed to enable AFI clock: %d\n", err);
return err;
}
if (soc->has_cml_clk) {
err = clk_prepare_enable(pcie->cml_clk);
if (err < 0) {
dev_err(pcie->dev, "failed to enable CML clock: %d\n",
err);
dev_err(dev, "failed to enable CML clock: %d\n", err);
return err;
}
}
err = clk_prepare_enable(pcie->pll_e);
if (err < 0) {
dev_err(pcie->dev, "failed to enable PLLE clock: %d\n", err);
dev_err(dev, "failed to enable PLLE clock: %d\n", err);
return err;
}
@ -1120,22 +1124,23 @@ static int tegra_pcie_power_on(struct tegra_pcie *pcie)
static int tegra_pcie_clocks_get(struct tegra_pcie *pcie)
{
struct device *dev = pcie->dev;
const struct tegra_pcie_soc *soc = pcie->soc;
pcie->pex_clk = devm_clk_get(pcie->dev, "pex");
pcie->pex_clk = devm_clk_get(dev, "pex");
if (IS_ERR(pcie->pex_clk))
return PTR_ERR(pcie->pex_clk);
pcie->afi_clk = devm_clk_get(pcie->dev, "afi");
pcie->afi_clk = devm_clk_get(dev, "afi");
if (IS_ERR(pcie->afi_clk))
return PTR_ERR(pcie->afi_clk);
pcie->pll_e = devm_clk_get(pcie->dev, "pll_e");
pcie->pll_e = devm_clk_get(dev, "pll_e");
if (IS_ERR(pcie->pll_e))
return PTR_ERR(pcie->pll_e);
if (soc->has_cml_clk) {
pcie->cml_clk = devm_clk_get(pcie->dev, "cml");
pcie->cml_clk = devm_clk_get(dev, "cml");
if (IS_ERR(pcie->cml_clk))
return PTR_ERR(pcie->cml_clk);
}
@ -1145,15 +1150,17 @@ static int tegra_pcie_clocks_get(struct tegra_pcie *pcie)
static int tegra_pcie_resets_get(struct tegra_pcie *pcie)
{
pcie->pex_rst = devm_reset_control_get(pcie->dev, "pex");
struct device *dev = pcie->dev;
pcie->pex_rst = devm_reset_control_get(dev, "pex");
if (IS_ERR(pcie->pex_rst))
return PTR_ERR(pcie->pex_rst);
pcie->afi_rst = devm_reset_control_get(pcie->dev, "afi");
pcie->afi_rst = devm_reset_control_get(dev, "afi");
if (IS_ERR(pcie->afi_rst))
return PTR_ERR(pcie->afi_rst);
pcie->pcie_xrst = devm_reset_control_get(pcie->dev, "pcie_x");
pcie->pcie_xrst = devm_reset_control_get(dev, "pcie_x");
if (IS_ERR(pcie->pcie_xrst))
return PTR_ERR(pcie->pcie_xrst);
@ -1162,18 +1169,19 @@ static int tegra_pcie_resets_get(struct tegra_pcie *pcie)
static int tegra_pcie_phys_get_legacy(struct tegra_pcie *pcie)
{
struct device *dev = pcie->dev;
int err;
pcie->phy = devm_phy_optional_get(pcie->dev, "pcie");
pcie->phy = devm_phy_optional_get(dev, "pcie");
if (IS_ERR(pcie->phy)) {
err = PTR_ERR(pcie->phy);
dev_err(pcie->dev, "failed to get PHY: %d\n", err);
dev_err(dev, "failed to get PHY: %d\n", err);
return err;
}
err = phy_init(pcie->phy);
if (err < 0) {
dev_err(pcie->dev, "failed to initialize PHY: %d\n", err);
dev_err(dev, "failed to initialize PHY: %d\n", err);
return err;
}
@ -1256,43 +1264,44 @@ static int tegra_pcie_phys_get(struct tegra_pcie *pcie)
static int tegra_pcie_get_resources(struct tegra_pcie *pcie)
{
struct platform_device *pdev = to_platform_device(pcie->dev);
struct device *dev = pcie->dev;
struct platform_device *pdev = to_platform_device(dev);
struct resource *pads, *afi, *res;
int err;
err = tegra_pcie_clocks_get(pcie);
if (err) {
dev_err(&pdev->dev, "failed to get clocks: %d\n", err);
dev_err(dev, "failed to get clocks: %d\n", err);
return err;
}
err = tegra_pcie_resets_get(pcie);
if (err) {
dev_err(&pdev->dev, "failed to get resets: %d\n", err);
dev_err(dev, "failed to get resets: %d\n", err);
return err;
}
err = tegra_pcie_phys_get(pcie);
if (err < 0) {
dev_err(&pdev->dev, "failed to get PHYs: %d\n", err);
dev_err(dev, "failed to get PHYs: %d\n", err);
return err;
}
err = tegra_pcie_power_on(pcie);
if (err) {
dev_err(&pdev->dev, "failed to power up: %d\n", err);
dev_err(dev, "failed to power up: %d\n", err);
return err;
}
pads = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pads");
pcie->pads = devm_ioremap_resource(&pdev->dev, pads);
pcie->pads = devm_ioremap_resource(dev, pads);
if (IS_ERR(pcie->pads)) {
err = PTR_ERR(pcie->pads);
goto poweroff;
}
afi = platform_get_resource_byname(pdev, IORESOURCE_MEM, "afi");
pcie->afi = devm_ioremap_resource(&pdev->dev, afi);
pcie->afi = devm_ioremap_resource(dev, afi);
if (IS_ERR(pcie->afi)) {
err = PTR_ERR(pcie->afi);
goto poweroff;
@ -1305,7 +1314,7 @@ static int tegra_pcie_get_resources(struct tegra_pcie *pcie)
goto poweroff;
}
pcie->cs = devm_request_mem_region(pcie->dev, res->start,
pcie->cs = devm_request_mem_region(dev, res->start,
resource_size(res), res->name);
if (!pcie->cs) {
err = -EADDRNOTAVAIL;
@ -1315,7 +1324,7 @@ static int tegra_pcie_get_resources(struct tegra_pcie *pcie)
/* request interrupt */
err = platform_get_irq_byname(pdev, "intr");
if (err < 0) {
dev_err(&pdev->dev, "failed to get IRQ: %d\n", err);
dev_err(dev, "failed to get IRQ: %d\n", err);
goto poweroff;
}
@ -1323,7 +1332,7 @@ static int tegra_pcie_get_resources(struct tegra_pcie *pcie)
err = request_irq(pcie->irq, tegra_pcie_isr, IRQF_SHARED, "PCIE", pcie);
if (err) {
dev_err(&pdev->dev, "failed to register IRQ: %d\n", err);
dev_err(dev, "failed to register IRQ: %d\n", err);
goto poweroff;
}
@ -1336,6 +1345,7 @@ poweroff:
static int tegra_pcie_put_resources(struct tegra_pcie *pcie)
{
struct device *dev = pcie->dev;
int err;
if (pcie->irq > 0)
@ -1345,7 +1355,7 @@ static int tegra_pcie_put_resources(struct tegra_pcie *pcie)
err = phy_exit(pcie->phy);
if (err < 0)
dev_err(pcie->dev, "failed to teardown PHY: %d\n", err);
dev_err(dev, "failed to teardown PHY: %d\n", err);
return 0;
}
@ -1384,6 +1394,7 @@ static void tegra_msi_free(struct tegra_msi *chip, unsigned long irq)
static irqreturn_t tegra_pcie_msi_irq(int irq, void *data)
{
struct tegra_pcie *pcie = data;
struct device *dev = pcie->dev;
struct tegra_msi *msi = &pcie->msi;
unsigned int i, processed = 0;
@ -1403,13 +1414,13 @@ static irqreturn_t tegra_pcie_msi_irq(int irq, void *data)
if (test_bit(index, msi->used))
generic_handle_irq(irq);
else
dev_info(pcie->dev, "unhandled MSI\n");
dev_info(dev, "unhandled MSI\n");
} else {
/*
* that's weird who triggered this?
* just clear it
*/
dev_info(pcie->dev, "unexpected MSI\n");
dev_info(dev, "unexpected MSI\n");
}
/* see if there's any more pending in this vector */
@ -1488,7 +1499,8 @@ static const struct irq_domain_ops msi_domain_ops = {
static int tegra_pcie_enable_msi(struct tegra_pcie *pcie)
{
struct platform_device *pdev = to_platform_device(pcie->dev);
struct device *dev = pcie->dev;
struct platform_device *pdev = to_platform_device(dev);
const struct tegra_pcie_soc *soc = pcie->soc;
struct tegra_msi *msi = &pcie->msi;
unsigned long base;
@ -1497,20 +1509,20 @@ static int tegra_pcie_enable_msi(struct tegra_pcie *pcie)
mutex_init(&msi->lock);
msi->chip.dev = pcie->dev;
msi->chip.dev = dev;
msi->chip.setup_irq = tegra_msi_setup_irq;
msi->chip.teardown_irq = tegra_msi_teardown_irq;
msi->domain = irq_domain_add_linear(pcie->dev->of_node, INT_PCI_MSI_NR,
msi->domain = irq_domain_add_linear(dev->of_node, INT_PCI_MSI_NR,
&msi_domain_ops, &msi->chip);
if (!msi->domain) {
dev_err(&pdev->dev, "failed to create IRQ domain\n");
dev_err(dev, "failed to create IRQ domain\n");
return -ENOMEM;
}
err = platform_get_irq_byname(pdev, "msi");
if (err < 0) {
dev_err(&pdev->dev, "failed to get IRQ: %d\n", err);
dev_err(dev, "failed to get IRQ: %d\n", err);
goto err;
}
@ -1519,7 +1531,7 @@ static int tegra_pcie_enable_msi(struct tegra_pcie *pcie)
err = request_irq(msi->irq, tegra_pcie_msi_irq, IRQF_NO_THREAD,
tegra_msi_irq_chip.name, pcie);
if (err < 0) {
dev_err(&pdev->dev, "failed to request IRQ: %d\n", err);
dev_err(dev, "failed to request IRQ: %d\n", err);
goto err;
}
@ -1594,46 +1606,47 @@ static int tegra_pcie_disable_msi(struct tegra_pcie *pcie)
static int tegra_pcie_get_xbar_config(struct tegra_pcie *pcie, u32 lanes,
u32 *xbar)
{
struct device_node *np = pcie->dev->of_node;
struct device *dev = pcie->dev;
struct device_node *np = dev->of_node;
if (of_device_is_compatible(np, "nvidia,tegra124-pcie")) {
switch (lanes) {
case 0x0000104:
dev_info(pcie->dev, "4x1, 1x1 configuration\n");
dev_info(dev, "4x1, 1x1 configuration\n");
*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X4_X1;
return 0;
case 0x0000102:
dev_info(pcie->dev, "2x1, 1x1 configuration\n");
dev_info(dev, "2x1, 1x1 configuration\n");
*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X2_X1;
return 0;
}
} else if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) {
switch (lanes) {
case 0x00000204:
dev_info(pcie->dev, "4x1, 2x1 configuration\n");
dev_info(dev, "4x1, 2x1 configuration\n");
*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_420;
return 0;
case 0x00020202:
dev_info(pcie->dev, "2x3 configuration\n");
dev_info(dev, "2x3 configuration\n");
*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_222;
return 0;
case 0x00010104:
dev_info(pcie->dev, "4x1, 1x2 configuration\n");
dev_info(dev, "4x1, 1x2 configuration\n");
*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411;
return 0;
}
} else if (of_device_is_compatible(np, "nvidia,tegra20-pcie")) {
switch (lanes) {
case 0x00000004:
dev_info(pcie->dev, "single-mode configuration\n");
dev_info(dev, "single-mode configuration\n");
*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE;
return 0;
case 0x00000202:
dev_info(pcie->dev, "dual-mode configuration\n");
dev_info(dev, "dual-mode configuration\n");
*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL;
return 0;
}
@ -1673,7 +1686,8 @@ static bool of_regulator_bulk_available(struct device_node *np,
*/
static int tegra_pcie_get_legacy_regulators(struct tegra_pcie *pcie)
{
struct device_node *np = pcie->dev->of_node;
struct device *dev = pcie->dev;
struct device_node *np = dev->of_node;
if (of_device_is_compatible(np, "nvidia,tegra30-pcie"))
pcie->num_supplies = 3;
@ -1681,12 +1695,12 @@ static int tegra_pcie_get_legacy_regulators(struct tegra_pcie *pcie)
pcie->num_supplies = 2;
if (pcie->num_supplies == 0) {
dev_err(pcie->dev, "device %s not supported in legacy mode\n",
dev_err(dev, "device %s not supported in legacy mode\n",
np->full_name);
return -ENODEV;
}
pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
sizeof(*pcie->supplies),
GFP_KERNEL);
if (!pcie->supplies)
@ -1698,8 +1712,7 @@ static int tegra_pcie_get_legacy_regulators(struct tegra_pcie *pcie)
if (pcie->num_supplies > 2)
pcie->supplies[2].supply = "avdd";
return devm_regulator_bulk_get(pcie->dev, pcie->num_supplies,
pcie->supplies);
return devm_regulator_bulk_get(dev, pcie->num_supplies, pcie->supplies);
}
/*
@ -1713,13 +1726,14 @@ static int tegra_pcie_get_legacy_regulators(struct tegra_pcie *pcie)
*/
static int tegra_pcie_get_regulators(struct tegra_pcie *pcie, u32 lane_mask)
{
struct device_node *np = pcie->dev->of_node;
struct device *dev = pcie->dev;
struct device_node *np = dev->of_node;
unsigned int i = 0;
if (of_device_is_compatible(np, "nvidia,tegra124-pcie")) {
pcie->num_supplies = 7;
pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
sizeof(*pcie->supplies),
GFP_KERNEL);
if (!pcie->supplies)
@ -1746,7 +1760,7 @@ static int tegra_pcie_get_regulators(struct tegra_pcie *pcie, u32 lane_mask)
pcie->num_supplies = 4 + (need_pexa ? 2 : 0) +
(need_pexb ? 2 : 0);
pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
sizeof(*pcie->supplies),
GFP_KERNEL);
if (!pcie->supplies)
@ -1769,7 +1783,7 @@ static int tegra_pcie_get_regulators(struct tegra_pcie *pcie, u32 lane_mask)
} else if (of_device_is_compatible(np, "nvidia,tegra20-pcie")) {
pcie->num_supplies = 5;
pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
sizeof(*pcie->supplies),
GFP_KERNEL);
if (!pcie->supplies)
@ -1782,9 +1796,9 @@ static int tegra_pcie_get_regulators(struct tegra_pcie *pcie, u32 lane_mask)
pcie->supplies[4].supply = "vddio-pex-clk";
}
if (of_regulator_bulk_available(pcie->dev->of_node, pcie->supplies,
if (of_regulator_bulk_available(dev->of_node, pcie->supplies,
pcie->num_supplies))
return devm_regulator_bulk_get(pcie->dev, pcie->num_supplies,
return devm_regulator_bulk_get(dev, pcie->num_supplies,
pcie->supplies);
/*
@ -1792,9 +1806,9 @@ static int tegra_pcie_get_regulators(struct tegra_pcie *pcie, u32 lane_mask)
* that the device tree complies with an older version of the device
* tree binding.
*/
dev_info(pcie->dev, "using legacy DT binding for power supplies\n");
dev_info(dev, "using legacy DT binding for power supplies\n");
devm_kfree(pcie->dev, pcie->supplies);
devm_kfree(dev, pcie->supplies);
pcie->num_supplies = 0;
return tegra_pcie_get_legacy_regulators(pcie);
@ -1802,7 +1816,8 @@ static int tegra_pcie_get_regulators(struct tegra_pcie *pcie, u32 lane_mask)
static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
{
struct device_node *np = pcie->dev->of_node, *port;
struct device *dev = pcie->dev;
struct device_node *np = dev->of_node, *port;
const struct tegra_pcie_soc *soc = pcie->soc;
struct of_pci_range_parser parser;
struct of_pci_range range;
@ -1812,7 +1827,7 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
int err;
if (of_pci_range_parser_init(&parser, np)) {
dev_err(pcie->dev, "missing \"ranges\" property\n");
dev_err(dev, "missing \"ranges\" property\n");
return -EINVAL;
}
@ -1867,8 +1882,7 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
err = of_pci_parse_bus_range(np, &pcie->busn);
if (err < 0) {
dev_err(pcie->dev, "failed to parse ranges property: %d\n",
err);
dev_err(dev, "failed to parse ranges property: %d\n", err);
pcie->busn.name = np->name;
pcie->busn.start = 0;
pcie->busn.end = 0xff;
@ -1883,15 +1897,14 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
err = of_pci_get_devfn(port);
if (err < 0) {
dev_err(pcie->dev, "failed to parse address: %d\n",
err);
dev_err(dev, "failed to parse address: %d\n", err);
return err;
}
index = PCI_SLOT(err);
if (index < 1 || index > soc->num_ports) {
dev_err(pcie->dev, "invalid port number: %d\n", index);
dev_err(dev, "invalid port number: %d\n", index);
return -EINVAL;
}
@ -1899,13 +1912,13 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
err = of_property_read_u32(port, "nvidia,num-lanes", &value);
if (err < 0) {
dev_err(pcie->dev, "failed to parse # of lanes: %d\n",
dev_err(dev, "failed to parse # of lanes: %d\n",
err);
return err;
}
if (value > 16) {
dev_err(pcie->dev, "invalid # of lanes: %u\n", value);
dev_err(dev, "invalid # of lanes: %u\n", value);
return -EINVAL;
}
@ -1919,14 +1932,13 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
mask |= ((1 << value) - 1) << lane;
lane += value;
rp = devm_kzalloc(pcie->dev, sizeof(*rp), GFP_KERNEL);
rp = devm_kzalloc(dev, sizeof(*rp), GFP_KERNEL);
if (!rp)
return -ENOMEM;
err = of_address_to_resource(port, 0, &rp->regs);
if (err < 0) {
dev_err(pcie->dev, "failed to parse address: %d\n",
err);
dev_err(dev, "failed to parse address: %d\n", err);
return err;
}
@ -1936,7 +1948,7 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
rp->pcie = pcie;
rp->np = port;
rp->base = devm_ioremap_resource(pcie->dev, &rp->regs);
rp->base = devm_ioremap_resource(dev, &rp->regs);
if (IS_ERR(rp->base))
return PTR_ERR(rp->base);
@ -1945,7 +1957,7 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
err = tegra_pcie_get_xbar_config(pcie, lanes, &pcie->xbar_config);
if (err < 0) {
dev_err(pcie->dev, "invalid lane configuration\n");
dev_err(dev, "invalid lane configuration\n");
return err;
}
@ -1964,6 +1976,7 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
#define TEGRA_PCIE_LINKUP_TIMEOUT 200 /* up to 1.2 seconds */
static bool tegra_pcie_port_check_link(struct tegra_pcie_port *port)
{
struct device *dev = port->pcie->dev;
unsigned int retries = 3;
unsigned long value;
@ -1986,8 +1999,7 @@ static bool tegra_pcie_port_check_link(struct tegra_pcie_port *port)
} while (--timeout);
if (!timeout) {
dev_err(port->pcie->dev, "link %u down, retrying\n",
port->index);
dev_err(dev, "link %u down, retrying\n", port->index);
goto retry;
}
@ -2011,11 +2023,12 @@ retry:
static int tegra_pcie_enable(struct tegra_pcie *pcie)
{
struct device *dev = pcie->dev;
struct tegra_pcie_port *port, *tmp;
struct hw_pci hw;
list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
dev_info(pcie->dev, "probing port %u, using %u lanes\n",
dev_info(dev, "probing port %u, using %u lanes\n",
port->index, port->lanes);
tegra_pcie_port_enable(port);
@ -2023,7 +2036,7 @@ static int tegra_pcie_enable(struct tegra_pcie *pcie)
if (tegra_pcie_port_check_link(port))
continue;
dev_info(pcie->dev, "link %u down, ignoring\n", port->index);
dev_info(dev, "link %u down, ignoring\n", port->index);
tegra_pcie_port_disable(port);
tegra_pcie_port_free(port);
@ -2041,8 +2054,7 @@ static int tegra_pcie_enable(struct tegra_pcie *pcie)
hw.map_irq = tegra_pcie_map_irq;
hw.ops = &tegra_pcie_ops;
pci_common_init_dev(pcie->dev, &hw);
pci_common_init_dev(dev, &hw);
return 0;
}
@ -2204,17 +2216,18 @@ remove:
static int tegra_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct tegra_pcie *pcie;
int err;
pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL);
pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
if (!pcie)
return -ENOMEM;
pcie->soc = of_device_get_match_data(&pdev->dev);
pcie->soc = of_device_get_match_data(dev);
INIT_LIST_HEAD(&pcie->buses);
INIT_LIST_HEAD(&pcie->ports);
pcie->dev = &pdev->dev;
pcie->dev = dev;
err = tegra_pcie_parse_dt(pcie);
if (err < 0)
@ -2222,7 +2235,7 @@ static int tegra_pcie_probe(struct platform_device *pdev)
err = tegra_pcie_get_resources(pcie);
if (err < 0) {
dev_err(&pdev->dev, "failed to request resources: %d\n", err);
dev_err(dev, "failed to request resources: %d\n", err);
return err;
}
@ -2236,27 +2249,23 @@ static int tegra_pcie_probe(struct platform_device *pdev)
if (IS_ENABLED(CONFIG_PCI_MSI)) {
err = tegra_pcie_enable_msi(pcie);
if (err < 0) {
dev_err(&pdev->dev,
"failed to enable MSI support: %d\n",
err);
dev_err(dev, "failed to enable MSI support: %d\n", err);
goto put_resources;
}
}
err = tegra_pcie_enable(pcie);
if (err < 0) {
dev_err(&pdev->dev, "failed to enable PCIe ports: %d\n", err);
dev_err(dev, "failed to enable PCIe ports: %d\n", err);
goto disable_msi;
}
if (IS_ENABLED(CONFIG_DEBUG_FS)) {
err = tegra_pcie_debugfs_init(pcie);
if (err < 0)
dev_err(&pdev->dev, "failed to setup debugfs: %d\n",
err);
dev_err(dev, "failed to setup debugfs: %d\n", err);
}
platform_set_drvdata(pdev, pcie);
return 0;
disable_msi:

Просмотреть файл

@ -76,6 +76,16 @@ struct xgene_pcie_port {
u32 version;
};
static u32 xgene_pcie_readl(struct xgene_pcie_port *port, u32 reg)
{
return readl(port->csr_base + reg);
}
static void xgene_pcie_writel(struct xgene_pcie_port *port, u32 reg, u32 val)
{
writel(val, port->csr_base + reg);
}
static inline u32 pcie_bar_low_val(u32 addr, u32 flags)
{
return (addr & PCI_BASE_ADDRESS_MEM_MASK) | flags;
@ -112,9 +122,9 @@ static void xgene_pcie_set_rtdid_reg(struct pci_bus *bus, uint devfn)
if (!pci_is_root_bus(bus))
rtdid_val = (b << 8) | (d << 3) | f;
writel(rtdid_val, port->csr_base + RTDID);
xgene_pcie_writel(port, RTDID, rtdid_val);
/* read the register back to ensure flush */
readl(port->csr_base + RTDID);
xgene_pcie_readl(port, RTDID);
}
/*
@ -179,28 +189,28 @@ static struct pci_ops xgene_pcie_ops = {
.write = pci_generic_config_write32,
};
static u64 xgene_pcie_set_ib_mask(void __iomem *csr_base, u32 addr,
static u64 xgene_pcie_set_ib_mask(struct xgene_pcie_port *port, u32 addr,
u32 flags, u64 size)
{
u64 mask = (~(size - 1) & PCI_BASE_ADDRESS_MEM_MASK) | flags;
u32 val32 = 0;
u32 val;
val32 = readl(csr_base + addr);
val32 = xgene_pcie_readl(port, addr);
val = (val32 & 0x0000ffff) | (lower_32_bits(mask) << 16);
writel(val, csr_base + addr);
xgene_pcie_writel(port, addr, val);
val32 = readl(csr_base + addr + 0x04);
val32 = xgene_pcie_readl(port, addr + 0x04);
val = (val32 & 0xffff0000) | (lower_32_bits(mask) >> 16);
writel(val, csr_base + addr + 0x04);
xgene_pcie_writel(port, addr + 0x04, val);
val32 = readl(csr_base + addr + 0x04);
val32 = xgene_pcie_readl(port, addr + 0x04);
val = (val32 & 0x0000ffff) | (upper_32_bits(mask) << 16);
writel(val, csr_base + addr + 0x04);
xgene_pcie_writel(port, addr + 0x04, val);
val32 = readl(csr_base + addr + 0x08);
val32 = xgene_pcie_readl(port, addr + 0x08);
val = (val32 & 0xffff0000) | (upper_32_bits(mask) >> 16);
writel(val, csr_base + addr + 0x08);
xgene_pcie_writel(port, addr + 0x08, val);
return mask;
}
@ -208,32 +218,32 @@ static u64 xgene_pcie_set_ib_mask(void __iomem *csr_base, u32 addr,
static void xgene_pcie_linkup(struct xgene_pcie_port *port,
u32 *lanes, u32 *speed)
{
void __iomem *csr_base = port->csr_base;
u32 val32;
port->link_up = false;
val32 = readl(csr_base + PCIECORE_CTLANDSTATUS);
val32 = xgene_pcie_readl(port, PCIECORE_CTLANDSTATUS);
if (val32 & LINK_UP_MASK) {
port->link_up = true;
*speed = PIPE_PHY_RATE_RD(val32);
val32 = readl(csr_base + BRIDGE_STATUS_0);
val32 = xgene_pcie_readl(port, BRIDGE_STATUS_0);
*lanes = val32 >> 26;
}
}
static int xgene_pcie_init_port(struct xgene_pcie_port *port)
{
struct device *dev = port->dev;
int rc;
port->clk = clk_get(port->dev, NULL);
port->clk = clk_get(dev, NULL);
if (IS_ERR(port->clk)) {
dev_err(port->dev, "clock not available\n");
dev_err(dev, "clock not available\n");
return -ENODEV;
}
rc = clk_prepare_enable(port->clk);
if (rc) {
dev_err(port->dev, "clock enable failed\n");
dev_err(dev, "clock enable failed\n");
return rc;
}
@ -243,15 +253,16 @@ static int xgene_pcie_init_port(struct xgene_pcie_port *port)
static int xgene_pcie_map_reg(struct xgene_pcie_port *port,
struct platform_device *pdev)
{
struct device *dev = port->dev;
struct resource *res;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "csr");
port->csr_base = devm_ioremap_resource(port->dev, res);
port->csr_base = devm_ioremap_resource(dev, res);
if (IS_ERR(port->csr_base))
return PTR_ERR(port->csr_base);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg");
port->cfg_base = devm_ioremap_resource(port->dev, res);
port->cfg_base = devm_ioremap_resource(dev, res);
if (IS_ERR(port->cfg_base))
return PTR_ERR(port->cfg_base);
port->cfg_addr = res->start;
@ -263,7 +274,7 @@ static void xgene_pcie_setup_ob_reg(struct xgene_pcie_port *port,
struct resource *res, u32 offset,
u64 cpu_addr, u64 pci_addr)
{
void __iomem *base = port->csr_base + offset;
struct device *dev = port->dev;
resource_size_t size = resource_size(res);
u64 restype = resource_type(res);
u64 mask = 0;
@ -280,22 +291,24 @@ static void xgene_pcie_setup_ob_reg(struct xgene_pcie_port *port,
if (size >= min_size)
mask = ~(size - 1) | flag;
else
dev_warn(port->dev, "res size 0x%llx less than minimum 0x%x\n",
dev_warn(dev, "res size 0x%llx less than minimum 0x%x\n",
(u64)size, min_size);
writel(lower_32_bits(cpu_addr), base);
writel(upper_32_bits(cpu_addr), base + 0x04);
writel(lower_32_bits(mask), base + 0x08);
writel(upper_32_bits(mask), base + 0x0c);
writel(lower_32_bits(pci_addr), base + 0x10);
writel(upper_32_bits(pci_addr), base + 0x14);
xgene_pcie_writel(port, offset, lower_32_bits(cpu_addr));
xgene_pcie_writel(port, offset + 0x04, upper_32_bits(cpu_addr));
xgene_pcie_writel(port, offset + 0x08, lower_32_bits(mask));
xgene_pcie_writel(port, offset + 0x0c, upper_32_bits(mask));
xgene_pcie_writel(port, offset + 0x10, lower_32_bits(pci_addr));
xgene_pcie_writel(port, offset + 0x14, upper_32_bits(pci_addr));
}
static void xgene_pcie_setup_cfg_reg(void __iomem *csr_base, u64 addr)
static void xgene_pcie_setup_cfg_reg(struct xgene_pcie_port *port)
{
writel(lower_32_bits(addr), csr_base + CFGBARL);
writel(upper_32_bits(addr), csr_base + CFGBARH);
writel(EN_REG, csr_base + CFGCTL);
u64 addr = port->cfg_addr;
xgene_pcie_writel(port, CFGBARL, lower_32_bits(addr));
xgene_pcie_writel(port, CFGBARH, upper_32_bits(addr));
xgene_pcie_writel(port, CFGCTL, EN_REG);
}
static int xgene_pcie_map_ranges(struct xgene_pcie_port *port,
@ -310,7 +323,7 @@ static int xgene_pcie_map_ranges(struct xgene_pcie_port *port,
struct resource *res = window->res;
u64 restype = resource_type(res);
dev_dbg(port->dev, "%pR\n", res);
dev_dbg(dev, "%pR\n", res);
switch (restype) {
case IORESOURCE_IO:
@ -339,17 +352,18 @@ static int xgene_pcie_map_ranges(struct xgene_pcie_port *port,
return -EINVAL;
}
}
xgene_pcie_setup_cfg_reg(port->csr_base, port->cfg_addr);
xgene_pcie_setup_cfg_reg(port);
return 0;
}
static void xgene_pcie_setup_pims(void *addr, u64 pim, u64 size)
static void xgene_pcie_setup_pims(struct xgene_pcie_port *port, u32 pim_reg,
u64 pim, u64 size)
{
writel(lower_32_bits(pim), addr);
writel(upper_32_bits(pim) | EN_COHERENCY, addr + 0x04);
writel(lower_32_bits(size), addr + 0x10);
writel(upper_32_bits(size), addr + 0x14);
xgene_pcie_writel(port, pim_reg, lower_32_bits(pim));
xgene_pcie_writel(port, pim_reg + 0x04,
upper_32_bits(pim) | EN_COHERENCY);
xgene_pcie_writel(port, pim_reg + 0x10, lower_32_bits(size));
xgene_pcie_writel(port, pim_reg + 0x14, upper_32_bits(size));
}
/*
@ -379,10 +393,10 @@ static int xgene_pcie_select_ib_reg(u8 *ib_reg_mask, u64 size)
static void xgene_pcie_setup_ib_reg(struct xgene_pcie_port *port,
struct of_pci_range *range, u8 *ib_reg_mask)
{
void __iomem *csr_base = port->csr_base;
void __iomem *cfg_base = port->cfg_base;
struct device *dev = port->dev;
void *bar_addr;
void *pim_addr;
u32 pim_reg;
u64 cpu_addr = range->cpu_addr;
u64 pci_addr = range->pci_addr;
u64 size = range->size;
@ -393,7 +407,7 @@ static void xgene_pcie_setup_ib_reg(struct xgene_pcie_port *port,
region = xgene_pcie_select_ib_reg(ib_reg_mask, range->size);
if (region < 0) {
dev_warn(port->dev, "invalid pcie dma-range config\n");
dev_warn(dev, "invalid pcie dma-range config\n");
return;
}
@ -403,29 +417,27 @@ static void xgene_pcie_setup_ib_reg(struct xgene_pcie_port *port,
bar_low = pcie_bar_low_val((u32)cpu_addr, flags);
switch (region) {
case 0:
xgene_pcie_set_ib_mask(csr_base, BRIDGE_CFG_4, flags, size);
xgene_pcie_set_ib_mask(port, BRIDGE_CFG_4, flags, size);
bar_addr = cfg_base + PCI_BASE_ADDRESS_0;
writel(bar_low, bar_addr);
writel(upper_32_bits(cpu_addr), bar_addr + 0x4);
pim_addr = csr_base + PIM1_1L;
pim_reg = PIM1_1L;
break;
case 1:
bar_addr = csr_base + IBAR2;
writel(bar_low, bar_addr);
writel(lower_32_bits(mask), csr_base + IR2MSK);
pim_addr = csr_base + PIM2_1L;
xgene_pcie_writel(port, IBAR2, bar_low);
xgene_pcie_writel(port, IR2MSK, lower_32_bits(mask));
pim_reg = PIM2_1L;
break;
case 2:
bar_addr = csr_base + IBAR3L;
writel(bar_low, bar_addr);
writel(upper_32_bits(cpu_addr), bar_addr + 0x4);
writel(lower_32_bits(mask), csr_base + IR3MSKL);
writel(upper_32_bits(mask), csr_base + IR3MSKL + 0x4);
pim_addr = csr_base + PIM3_1L;
xgene_pcie_writel(port, IBAR3L, bar_low);
xgene_pcie_writel(port, IBAR3L + 0x4, upper_32_bits(cpu_addr));
xgene_pcie_writel(port, IR3MSKL, lower_32_bits(mask));
xgene_pcie_writel(port, IR3MSKL + 0x4, upper_32_bits(mask));
pim_reg = PIM3_1L;
break;
}
xgene_pcie_setup_pims(pim_addr, pci_addr, ~(size - 1));
xgene_pcie_setup_pims(port, pim_reg, pci_addr, ~(size - 1));
}
static int pci_dma_range_parser_init(struct of_pci_range_parser *parser,
@ -463,7 +475,7 @@ static int xgene_pcie_parse_map_dma_ranges(struct xgene_pcie_port *port)
for_each_of_pci_range(&parser, &range) {
u64 end = range.cpu_addr + range.size - 1;
dev_dbg(port->dev, "0x%08x 0x%016llx..0x%016llx -> 0x%016llx\n",
dev_dbg(dev, "0x%08x 0x%016llx..0x%016llx -> 0x%016llx\n",
range.flags, range.cpu_addr, end, range.pci_addr);
xgene_pcie_setup_ib_reg(port, &range, &ib_reg_mask);
}
@ -476,13 +488,14 @@ static void xgene_pcie_clear_config(struct xgene_pcie_port *port)
int i;
for (i = PIM1_1L; i <= CFGCTL; i += 4)
writel(0x0, port->csr_base + i);
xgene_pcie_writel(port, i, 0);
}
static int xgene_pcie_setup(struct xgene_pcie_port *port,
struct list_head *res,
resource_size_t io_base)
{
struct device *dev = port->dev;
u32 val, lanes = 0, speed = 0;
int ret;
@ -490,7 +503,7 @@ static int xgene_pcie_setup(struct xgene_pcie_port *port,
/* setup the vendor and device IDs correctly */
val = (XGENE_PCIE_DEVICEID << 16) | XGENE_PCIE_VENDORID;
writel(val, port->csr_base + BRIDGE_CFG_0);
xgene_pcie_writel(port, BRIDGE_CFG_0, val);
ret = xgene_pcie_map_ranges(port, res, io_base);
if (ret)
@ -502,27 +515,28 @@ static int xgene_pcie_setup(struct xgene_pcie_port *port,
xgene_pcie_linkup(port, &lanes, &speed);
if (!port->link_up)
dev_info(port->dev, "(rc) link down\n");
dev_info(dev, "(rc) link down\n");
else
dev_info(port->dev, "(rc) x%d gen-%d link up\n",
lanes, speed + 1);
dev_info(dev, "(rc) x%d gen-%d link up\n", lanes, speed + 1);
return 0;
}
static int xgene_pcie_probe_bridge(struct platform_device *pdev)
{
struct device_node *dn = pdev->dev.of_node;
struct device *dev = &pdev->dev;
struct device_node *dn = dev->of_node;
struct xgene_pcie_port *port;
resource_size_t iobase = 0;
struct pci_bus *bus;
int ret;
LIST_HEAD(res);
port = devm_kzalloc(&pdev->dev, sizeof(*port), GFP_KERNEL);
port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
if (!port)
return -ENOMEM;
port->node = of_node_get(pdev->dev.of_node);
port->dev = &pdev->dev;
port->node = of_node_get(dn);
port->dev = dev;
port->version = XGENE_PCIE_IP_VER_UNKN;
if (of_device_is_compatible(port->node, "apm,xgene-pcie"))
@ -540,7 +554,7 @@ static int xgene_pcie_probe_bridge(struct platform_device *pdev)
if (ret)
return ret;
ret = devm_request_pci_bus_resources(&pdev->dev, &res);
ret = devm_request_pci_bus_resources(dev, &res);
if (ret)
goto error;
@ -548,8 +562,7 @@ static int xgene_pcie_probe_bridge(struct platform_device *pdev)
if (ret)
goto error;
bus = pci_create_root_bus(&pdev->dev, 0,
&xgene_pcie_ops, port, &res);
bus = pci_create_root_bus(dev, 0, &xgene_pcie_ops, port, &res);
if (!bus) {
ret = -ENOMEM;
goto error;
@ -558,8 +571,6 @@ static int xgene_pcie_probe_bridge(struct platform_device *pdev)
pci_scan_child_bus(bus);
pci_assign_unassigned_bus_resources(bus);
pci_bus_add_devices(bus);
platform_set_drvdata(pdev, port);
return 0;
error:

Просмотреть файл

@ -55,15 +55,19 @@
#define TLP_PAYLOAD_SIZE 0x01
#define TLP_READ_TAG 0x1d
#define TLP_WRITE_TAG 0x10
#define TLP_CFG_DW0(fmttype) (((fmttype) << 24) | TLP_PAYLOAD_SIZE)
#define TLP_CFG_DW1(reqid, tag, be) (((reqid) << 16) | (tag << 8) | (be))
#define RP_DEVFN 0
#define TLP_REQ_ID(bus, devfn) (((bus) << 8) | (devfn))
#define TLP_CFG_DW0(pcie, bus) \
((((bus == pcie->root_bus_nr) ? TLP_FMTTYPE_CFGRD0 \
: TLP_FMTTYPE_CFGRD1) << 24) | \
TLP_PAYLOAD_SIZE)
#define TLP_CFG_DW1(pcie, tag, be) \
(((TLP_REQ_ID(pcie->root_bus_nr, RP_DEVFN)) << 16) | (tag << 8) | (be))
#define TLP_CFG_DW2(bus, devfn, offset) \
(((bus) << 24) | ((devfn) << 16) | (offset))
#define TLP_REQ_ID(bus, devfn) (((bus) << 8) | (devfn))
#define TLP_COMP_STATUS(s) (((s) >> 12) & 7)
#define TLP_HDR_SIZE 3
#define TLP_LOOP 500
#define RP_DEVFN 0
#define LINK_UP_TIMEOUT HZ
#define LINK_RETRAIN_TIMEOUT HZ
@ -74,7 +78,7 @@
struct altera_pcie {
struct platform_device *pdev;
void __iomem *cra_base;
void __iomem *cra_base; /* DT Cra */
int irq;
u8 root_bus_nr;
struct irq_domain *irq_domain;
@ -131,7 +135,7 @@ static void tlp_write_tx(struct altera_pcie *pcie,
cra_writel(pcie, tlp_rp_regdata->ctrl, RP_TX_CNTRL);
}
static bool altera_pcie_valid_config(struct altera_pcie *pcie,
static bool altera_pcie_valid_device(struct altera_pcie *pcie,
struct pci_bus *bus, int dev)
{
/* If there is no link, then there is no device */
@ -218,13 +222,8 @@ static int tlp_cfg_dword_read(struct altera_pcie *pcie, u8 bus, u32 devfn,
{
u32 headers[TLP_HDR_SIZE];
if (bus == pcie->root_bus_nr)
headers[0] = TLP_CFG_DW0(TLP_FMTTYPE_CFGRD0);
else
headers[0] = TLP_CFG_DW0(TLP_FMTTYPE_CFGRD1);
headers[1] = TLP_CFG_DW1(TLP_REQ_ID(pcie->root_bus_nr, RP_DEVFN),
TLP_READ_TAG, byte_en);
headers[0] = TLP_CFG_DW0(pcie, bus);
headers[1] = TLP_CFG_DW1(pcie, TLP_READ_TAG, byte_en);
headers[2] = TLP_CFG_DW2(bus, devfn, where);
tlp_write_packet(pcie, headers, 0, false);
@ -238,13 +237,8 @@ static int tlp_cfg_dword_write(struct altera_pcie *pcie, u8 bus, u32 devfn,
u32 headers[TLP_HDR_SIZE];
int ret;
if (bus == pcie->root_bus_nr)
headers[0] = TLP_CFG_DW0(TLP_FMTTYPE_CFGWR0);
else
headers[0] = TLP_CFG_DW0(TLP_FMTTYPE_CFGWR1);
headers[1] = TLP_CFG_DW1(TLP_REQ_ID(pcie->root_bus_nr, RP_DEVFN),
TLP_WRITE_TAG, byte_en);
headers[0] = TLP_CFG_DW0(pcie, bus);
headers[1] = TLP_CFG_DW1(pcie, TLP_WRITE_TAG, byte_en);
headers[2] = TLP_CFG_DW2(bus, devfn, where);
/* check alignment to Qword */
@ -342,7 +336,7 @@ static int altera_pcie_cfg_read(struct pci_bus *bus, unsigned int devfn,
if (altera_pcie_hide_rc_bar(bus, devfn, where))
return PCIBIOS_BAD_REGISTER_NUMBER;
if (!altera_pcie_valid_config(pcie, bus, PCI_SLOT(devfn))) {
if (!altera_pcie_valid_device(pcie, bus, PCI_SLOT(devfn))) {
*value = 0xffffffff;
return PCIBIOS_DEVICE_NOT_FOUND;
}
@ -359,7 +353,7 @@ static int altera_pcie_cfg_write(struct pci_bus *bus, unsigned int devfn,
if (altera_pcie_hide_rc_bar(bus, devfn, where))
return PCIBIOS_BAD_REGISTER_NUMBER;
if (!altera_pcie_valid_config(pcie, bus, PCI_SLOT(devfn)))
if (!altera_pcie_valid_device(pcie, bus, PCI_SLOT(devfn)))
return PCIBIOS_DEVICE_NOT_FOUND;
return _altera_pcie_cfg_write(pcie, bus->number, devfn, where, size,
@ -394,6 +388,7 @@ static int altera_write_cap_word(struct altera_pcie *pcie, u8 busno,
static void altera_wait_link_retrain(struct altera_pcie *pcie)
{
struct device *dev = &pcie->pdev->dev;
u16 reg16;
unsigned long start_jiffies;
@ -406,7 +401,7 @@ static void altera_wait_link_retrain(struct altera_pcie *pcie)
break;
if (time_after(jiffies, start_jiffies + LINK_RETRAIN_TIMEOUT)) {
dev_err(&pcie->pdev->dev, "link retrain timeout\n");
dev_err(dev, "link retrain timeout\n");
break;
}
udelay(100);
@ -419,7 +414,7 @@ static void altera_wait_link_retrain(struct altera_pcie *pcie)
break;
if (time_after(jiffies, start_jiffies + LINK_UP_TIMEOUT)) {
dev_err(&pcie->pdev->dev, "link up timeout\n");
dev_err(dev, "link up timeout\n");
break;
}
udelay(100);
@ -460,7 +455,6 @@ static int altera_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
{
irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq);
irq_set_chip_data(irq, domain->host_data);
return 0;
}
@ -472,12 +466,14 @@ static void altera_pcie_isr(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
struct altera_pcie *pcie;
struct device *dev;
unsigned long status;
u32 bit;
u32 virq;
chained_irq_enter(chip, desc);
pcie = irq_desc_get_handler_data(desc);
dev = &pcie->pdev->dev;
while ((status = cra_readl(pcie, P2A_INT_STATUS)
& P2A_INT_STS_ALL) != 0) {
@ -489,8 +485,7 @@ static void altera_pcie_isr(struct irq_desc *desc)
if (virq)
generic_handle_irq(virq);
else
dev_err(&pcie->pdev->dev,
"unexpected IRQ, INT%d\n", bit);
dev_err(dev, "unexpected IRQ, INT%d\n", bit);
}
}
@ -549,30 +544,25 @@ static int altera_pcie_init_irq_domain(struct altera_pcie *pcie)
static int altera_pcie_parse_dt(struct altera_pcie *pcie)
{
struct resource *cra;
struct device *dev = &pcie->pdev->dev;
struct platform_device *pdev = pcie->pdev;
struct resource *cra;
cra = platform_get_resource_byname(pdev, IORESOURCE_MEM, "Cra");
if (!cra) {
dev_err(&pdev->dev, "no Cra memory resource defined\n");
return -ENODEV;
}
pcie->cra_base = devm_ioremap_resource(&pdev->dev, cra);
pcie->cra_base = devm_ioremap_resource(dev, cra);
if (IS_ERR(pcie->cra_base)) {
dev_err(&pdev->dev, "failed to map cra memory\n");
dev_err(dev, "failed to map cra memory\n");
return PTR_ERR(pcie->cra_base);
}
/* setup IRQ */
pcie->irq = platform_get_irq(pdev, 0);
if (pcie->irq <= 0) {
dev_err(&pdev->dev, "failed to get IRQ: %d\n", pcie->irq);
dev_err(dev, "failed to get IRQ: %d\n", pcie->irq);
return -EINVAL;
}
irq_set_chained_handler_and_data(pcie->irq, altera_pcie_isr, pcie);
return 0;
}
@ -583,12 +573,13 @@ static void altera_pcie_host_init(struct altera_pcie *pcie)
static int altera_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct altera_pcie *pcie;
struct pci_bus *bus;
struct pci_bus *child;
int ret;
pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL);
pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
if (!pcie)
return -ENOMEM;
@ -596,7 +587,7 @@ static int altera_pcie_probe(struct platform_device *pdev)
ret = altera_pcie_parse_dt(pcie);
if (ret) {
dev_err(&pdev->dev, "Parsing DT failed\n");
dev_err(dev, "Parsing DT failed\n");
return ret;
}
@ -604,13 +595,13 @@ static int altera_pcie_probe(struct platform_device *pdev)
ret = altera_pcie_parse_request_of_pci_ranges(pcie);
if (ret) {
dev_err(&pdev->dev, "Failed add resources\n");
dev_err(dev, "Failed add resources\n");
return ret;
}
ret = altera_pcie_init_irq_domain(pcie);
if (ret) {
dev_err(&pdev->dev, "Failed creating IRQ Domain\n");
dev_err(dev, "Failed creating IRQ Domain\n");
return ret;
}
@ -620,7 +611,7 @@ static int altera_pcie_probe(struct platform_device *pdev)
cra_writel(pcie, P2A_INT_ENA_ALL, P2A_INT_ENABLE);
altera_pcie_host_init(pcie);
bus = pci_scan_root_bus(&pdev->dev, pcie->root_bus_nr, &altera_pcie_ops,
bus = pci_scan_root_bus(dev, pcie->root_bus_nr, &altera_pcie_ops,
pcie, &pcie->resources);
if (!bus)
return -ENOMEM;
@ -633,8 +624,6 @@ static int altera_pcie_probe(struct platform_device *pdev)
pcie_bus_configure_settings(child);
pci_bus_add_devices(bus);
platform_set_drvdata(pdev, pcie);
return ret;
}

Просмотреть файл

@ -29,34 +29,33 @@
#include "pcie-designware.h"
struct armada8k_pcie {
void __iomem *base;
struct pcie_port pp; /* pp.dbi_base is DT ctrl */
struct clk *clk;
struct pcie_port pp;
};
#define PCIE_VENDOR_REGS_OFFSET 0x8000
#define PCIE_GLOBAL_CONTROL_REG 0x0
#define PCIE_GLOBAL_CONTROL_REG (PCIE_VENDOR_REGS_OFFSET + 0x0)
#define PCIE_APP_LTSSM_EN BIT(2)
#define PCIE_DEVICE_TYPE_SHIFT 4
#define PCIE_DEVICE_TYPE_MASK 0xF
#define PCIE_DEVICE_TYPE_RC 0x4 /* Root complex */
#define PCIE_GLOBAL_STATUS_REG 0x8
#define PCIE_GLOBAL_STATUS_REG (PCIE_VENDOR_REGS_OFFSET + 0x8)
#define PCIE_GLB_STS_RDLH_LINK_UP BIT(1)
#define PCIE_GLB_STS_PHY_LINK_UP BIT(9)
#define PCIE_GLOBAL_INT_CAUSE1_REG 0x1C
#define PCIE_GLOBAL_INT_MASK1_REG 0x20
#define PCIE_GLOBAL_INT_CAUSE1_REG (PCIE_VENDOR_REGS_OFFSET + 0x1C)
#define PCIE_GLOBAL_INT_MASK1_REG (PCIE_VENDOR_REGS_OFFSET + 0x20)
#define PCIE_INT_A_ASSERT_MASK BIT(9)
#define PCIE_INT_B_ASSERT_MASK BIT(10)
#define PCIE_INT_C_ASSERT_MASK BIT(11)
#define PCIE_INT_D_ASSERT_MASK BIT(12)
#define PCIE_ARCACHE_TRC_REG 0x50
#define PCIE_AWCACHE_TRC_REG 0x54
#define PCIE_ARUSER_REG 0x5C
#define PCIE_AWUSER_REG 0x60
#define PCIE_ARCACHE_TRC_REG (PCIE_VENDOR_REGS_OFFSET + 0x50)
#define PCIE_AWCACHE_TRC_REG (PCIE_VENDOR_REGS_OFFSET + 0x54)
#define PCIE_ARUSER_REG (PCIE_VENDOR_REGS_OFFSET + 0x5C)
#define PCIE_AWUSER_REG (PCIE_VENDOR_REGS_OFFSET + 0x60)
/*
* AR/AW Cache defauls: Normal memory, Write-Back, Read / Write
* allocate
@ -72,11 +71,10 @@ struct armada8k_pcie {
static int armada8k_pcie_link_up(struct pcie_port *pp)
{
struct armada8k_pcie *pcie = to_armada8k_pcie(pp);
u32 reg;
u32 mask = PCIE_GLB_STS_RDLH_LINK_UP | PCIE_GLB_STS_PHY_LINK_UP;
reg = readl(pcie->base + PCIE_GLOBAL_STATUS_REG);
reg = dw_pcie_readl_rc(pp, PCIE_GLOBAL_STATUS_REG);
if ((reg & mask) == mask)
return 1;
@ -85,51 +83,50 @@ static int armada8k_pcie_link_up(struct pcie_port *pp)
return 0;
}
static void armada8k_pcie_establish_link(struct pcie_port *pp)
static void armada8k_pcie_establish_link(struct armada8k_pcie *pcie)
{
struct armada8k_pcie *pcie = to_armada8k_pcie(pp);
void __iomem *base = pcie->base;
struct pcie_port *pp = &pcie->pp;
u32 reg;
if (!dw_pcie_link_up(pp)) {
/* Disable LTSSM state machine to enable configuration */
reg = readl(base + PCIE_GLOBAL_CONTROL_REG);
reg = dw_pcie_readl_rc(pp, PCIE_GLOBAL_CONTROL_REG);
reg &= ~(PCIE_APP_LTSSM_EN);
writel(reg, base + PCIE_GLOBAL_CONTROL_REG);
dw_pcie_writel_rc(pp, PCIE_GLOBAL_CONTROL_REG, reg);
}
/* Set the device to root complex mode */
reg = readl(base + PCIE_GLOBAL_CONTROL_REG);
reg = dw_pcie_readl_rc(pp, PCIE_GLOBAL_CONTROL_REG);
reg &= ~(PCIE_DEVICE_TYPE_MASK << PCIE_DEVICE_TYPE_SHIFT);
reg |= PCIE_DEVICE_TYPE_RC << PCIE_DEVICE_TYPE_SHIFT;
writel(reg, base + PCIE_GLOBAL_CONTROL_REG);
dw_pcie_writel_rc(pp, PCIE_GLOBAL_CONTROL_REG, reg);
/* Set the PCIe master AxCache attributes */
writel(ARCACHE_DEFAULT_VALUE, base + PCIE_ARCACHE_TRC_REG);
writel(AWCACHE_DEFAULT_VALUE, base + PCIE_AWCACHE_TRC_REG);
dw_pcie_writel_rc(pp, PCIE_ARCACHE_TRC_REG, ARCACHE_DEFAULT_VALUE);
dw_pcie_writel_rc(pp, PCIE_AWCACHE_TRC_REG, AWCACHE_DEFAULT_VALUE);
/* Set the PCIe master AxDomain attributes */
reg = readl(base + PCIE_ARUSER_REG);
reg = dw_pcie_readl_rc(pp, PCIE_ARUSER_REG);
reg &= ~(AX_USER_DOMAIN_MASK << AX_USER_DOMAIN_SHIFT);
reg |= DOMAIN_OUTER_SHAREABLE << AX_USER_DOMAIN_SHIFT;
writel(reg, base + PCIE_ARUSER_REG);
dw_pcie_writel_rc(pp, PCIE_ARUSER_REG, reg);
reg = readl(base + PCIE_AWUSER_REG);
reg = dw_pcie_readl_rc(pp, PCIE_AWUSER_REG);
reg &= ~(AX_USER_DOMAIN_MASK << AX_USER_DOMAIN_SHIFT);
reg |= DOMAIN_OUTER_SHAREABLE << AX_USER_DOMAIN_SHIFT;
writel(reg, base + PCIE_AWUSER_REG);
dw_pcie_writel_rc(pp, PCIE_AWUSER_REG, reg);
/* Enable INT A-D interrupts */
reg = readl(base + PCIE_GLOBAL_INT_MASK1_REG);
reg = dw_pcie_readl_rc(pp, PCIE_GLOBAL_INT_MASK1_REG);
reg |= PCIE_INT_A_ASSERT_MASK | PCIE_INT_B_ASSERT_MASK |
PCIE_INT_C_ASSERT_MASK | PCIE_INT_D_ASSERT_MASK;
writel(reg, base + PCIE_GLOBAL_INT_MASK1_REG);
dw_pcie_writel_rc(pp, PCIE_GLOBAL_INT_MASK1_REG, reg);
if (!dw_pcie_link_up(pp)) {
/* Configuration done. Start LTSSM */
reg = readl(base + PCIE_GLOBAL_CONTROL_REG);
reg = dw_pcie_readl_rc(pp, PCIE_GLOBAL_CONTROL_REG);
reg |= PCIE_APP_LTSSM_EN;
writel(reg, base + PCIE_GLOBAL_CONTROL_REG);
dw_pcie_writel_rc(pp, PCIE_GLOBAL_CONTROL_REG, reg);
}
/* Wait until the link becomes active again */
@ -139,15 +136,16 @@ static void armada8k_pcie_establish_link(struct pcie_port *pp)
static void armada8k_pcie_host_init(struct pcie_port *pp)
{
struct armada8k_pcie *pcie = to_armada8k_pcie(pp);
dw_pcie_setup_rc(pp);
armada8k_pcie_establish_link(pp);
armada8k_pcie_establish_link(pcie);
}
static irqreturn_t armada8k_pcie_irq_handler(int irq, void *arg)
{
struct pcie_port *pp = arg;
struct armada8k_pcie *pcie = to_armada8k_pcie(pp);
void __iomem *base = pcie->base;
struct armada8k_pcie *pcie = arg;
struct pcie_port *pp = &pcie->pp;
u32 val;
/*
@ -155,8 +153,8 @@ static irqreturn_t armada8k_pcie_irq_handler(int irq, void *arg)
* PCI device. However, they are also latched into the PCIe
* controller, so we simply discard them.
*/
val = readl(base + PCIE_GLOBAL_INT_CAUSE1_REG);
writel(val, base + PCIE_GLOBAL_INT_CAUSE1_REG);
val = dw_pcie_readl_rc(pp, PCIE_GLOBAL_INT_CAUSE1_REG);
dw_pcie_writel_rc(pp, PCIE_GLOBAL_INT_CAUSE1_REG, val);
return IRQ_HANDLED;
}
@ -166,9 +164,10 @@ static struct pcie_host_ops armada8k_pcie_host_ops = {
.host_init = armada8k_pcie_host_init,
};
static int armada8k_add_pcie_port(struct pcie_port *pp,
static int armada8k_add_pcie_port(struct armada8k_pcie *pcie,
struct platform_device *pdev)
{
struct pcie_port *pp = &pcie->pp;
struct device *dev = &pdev->dev;
int ret;
@ -182,7 +181,7 @@ static int armada8k_add_pcie_port(struct pcie_port *pp,
}
ret = devm_request_irq(dev, pp->irq, armada8k_pcie_irq_handler,
IRQF_SHARED, "armada8k-pcie", pp);
IRQF_SHARED, "armada8k-pcie", pcie);
if (ret) {
dev_err(dev, "failed to request irq %d\n", pp->irq);
return ret;
@ -217,7 +216,6 @@ static int armada8k_pcie_probe(struct platform_device *pdev)
pp = &pcie->pp;
pp->dev = dev;
platform_set_drvdata(pdev, pcie);
/* Get the dw-pcie unit configuration/control registers base. */
base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ctrl");
@ -228,9 +226,7 @@ static int armada8k_pcie_probe(struct platform_device *pdev)
goto fail;
}
pcie->base = pp->dbi_base + PCIE_VENDOR_REGS_OFFSET;
ret = armada8k_add_pcie_port(pp, pdev);
ret = armada8k_add_pcie_port(pcie, pdev);
if (ret)
goto fail;

Просмотреть файл

@ -27,9 +27,9 @@
#define to_artpec6_pcie(x) container_of(x, struct artpec6_pcie, pp)
struct artpec6_pcie {
struct pcie_port pp;
struct regmap *regmap;
void __iomem *phy_base;
struct pcie_port pp; /* pp.dbi_base is DT dbi */
struct regmap *regmap; /* DT axis,syscon-pcie */
void __iomem *phy_base; /* DT phy */
};
/* PCIe Port Logic registers (memory-mapped) */
@ -65,18 +65,31 @@ struct artpec6_pcie {
#define ARTPEC6_CPU_TO_BUS_ADDR 0x0fffffff
static int artpec6_pcie_establish_link(struct pcie_port *pp)
static u32 artpec6_pcie_readl(struct artpec6_pcie *artpec6_pcie, u32 offset)
{
struct artpec6_pcie *artpec6_pcie = to_artpec6_pcie(pp);
u32 val;
regmap_read(artpec6_pcie->regmap, offset, &val);
return val;
}
static void artpec6_pcie_writel(struct artpec6_pcie *artpec6_pcie, u32 offset, u32 val)
{
regmap_write(artpec6_pcie->regmap, offset, val);
}
static int artpec6_pcie_establish_link(struct artpec6_pcie *artpec6_pcie)
{
struct pcie_port *pp = &artpec6_pcie->pp;
u32 val;
unsigned int retries;
/* Hold DW core in reset */
regmap_read(artpec6_pcie->regmap, PCIECFG, &val);
val = artpec6_pcie_readl(artpec6_pcie, PCIECFG);
val |= PCIECFG_CORE_RESET_REQ;
regmap_write(artpec6_pcie->regmap, PCIECFG, val);
artpec6_pcie_writel(artpec6_pcie, PCIECFG, val);
regmap_read(artpec6_pcie->regmap, PCIECFG, &val);
val = artpec6_pcie_readl(artpec6_pcie, PCIECFG);
val |= PCIECFG_RISRCREN | /* Receiver term. 50 Ohm */
PCIECFG_MODE_TX_DRV_EN |
PCIECFG_CISRREN | /* Reference clock term. 100 Ohm */
@ -84,27 +97,27 @@ static int artpec6_pcie_establish_link(struct pcie_port *pp)
val |= PCIECFG_REFCLK_ENABLE;
val &= ~PCIECFG_DBG_OEN;
val &= ~PCIECFG_CLKREQ_B;
regmap_write(artpec6_pcie->regmap, PCIECFG, val);
artpec6_pcie_writel(artpec6_pcie, PCIECFG, val);
usleep_range(5000, 6000);
regmap_read(artpec6_pcie->regmap, NOCCFG, &val);
val = artpec6_pcie_readl(artpec6_pcie, NOCCFG);
val |= NOCCFG_ENABLE_CLK_PCIE;
regmap_write(artpec6_pcie->regmap, NOCCFG, val);
artpec6_pcie_writel(artpec6_pcie, NOCCFG, val);
usleep_range(20, 30);
regmap_read(artpec6_pcie->regmap, PCIECFG, &val);
val = artpec6_pcie_readl(artpec6_pcie, PCIECFG);
val |= PCIECFG_PCLK_ENABLE | PCIECFG_PLL_ENABLE;
regmap_write(artpec6_pcie->regmap, PCIECFG, val);
artpec6_pcie_writel(artpec6_pcie, PCIECFG, val);
usleep_range(6000, 7000);
regmap_read(artpec6_pcie->regmap, NOCCFG, &val);
val = artpec6_pcie_readl(artpec6_pcie, NOCCFG);
val &= ~NOCCFG_POWER_PCIE_IDLEREQ;
regmap_write(artpec6_pcie->regmap, NOCCFG, val);
artpec6_pcie_writel(artpec6_pcie, NOCCFG, val);
retries = 50;
do {
usleep_range(1000, 2000);
regmap_read(artpec6_pcie->regmap, NOCCFG, &val);
val = artpec6_pcie_readl(artpec6_pcie, NOCCFG);
retries--;
} while (retries &&
(val & (NOCCFG_POWER_PCIE_IDLEACK | NOCCFG_POWER_PCIE_IDLE)));
@ -117,16 +130,16 @@ static int artpec6_pcie_establish_link(struct pcie_port *pp)
} while (retries && !(val & PHY_COSPLLLOCK));
/* Take DW core out of reset */
regmap_read(artpec6_pcie->regmap, PCIECFG, &val);
val = artpec6_pcie_readl(artpec6_pcie, PCIECFG);
val &= ~PCIECFG_CORE_RESET_REQ;
regmap_write(artpec6_pcie->regmap, PCIECFG, val);
artpec6_pcie_writel(artpec6_pcie, PCIECFG, val);
usleep_range(100, 200);
/*
* Enable writing to config regs. This is required as the Synopsys
* driver changes the class code. That register needs DBI write enable.
*/
writel(DBI_RO_WR_EN, pp->dbi_base + MISC_CONTROL_1_OFF);
dw_pcie_writel_rc(pp, MISC_CONTROL_1_OFF, DBI_RO_WR_EN);
pp->io_base &= ARTPEC6_CPU_TO_BUS_ADDR;
pp->mem_base &= ARTPEC6_CPU_TO_BUS_ADDR;
@ -137,78 +150,69 @@ static int artpec6_pcie_establish_link(struct pcie_port *pp)
dw_pcie_setup_rc(pp);
/* assert LTSSM enable */
regmap_read(artpec6_pcie->regmap, PCIECFG, &val);
val = artpec6_pcie_readl(artpec6_pcie, PCIECFG);
val |= PCIECFG_LTSSM_ENABLE;
regmap_write(artpec6_pcie->regmap, PCIECFG, val);
artpec6_pcie_writel(artpec6_pcie, PCIECFG, val);
/* check if the link is up or not */
if (!dw_pcie_wait_for_link(pp))
return 0;
dev_dbg(pp->dev, "DEBUG_R0: 0x%08x, DEBUG_R1: 0x%08x\n",
readl(pp->dbi_base + PCIE_PHY_DEBUG_R0),
readl(pp->dbi_base + PCIE_PHY_DEBUG_R1));
dw_pcie_readl_rc(pp, PCIE_PHY_DEBUG_R0),
dw_pcie_readl_rc(pp, PCIE_PHY_DEBUG_R1));
return -ETIMEDOUT;
}
static void artpec6_pcie_enable_interrupts(struct pcie_port *pp)
static void artpec6_pcie_enable_interrupts(struct artpec6_pcie *artpec6_pcie)
{
struct pcie_port *pp = &artpec6_pcie->pp;
if (IS_ENABLED(CONFIG_PCI_MSI))
dw_pcie_msi_init(pp);
}
static void artpec6_pcie_host_init(struct pcie_port *pp)
{
artpec6_pcie_establish_link(pp);
artpec6_pcie_enable_interrupts(pp);
}
struct artpec6_pcie *artpec6_pcie = to_artpec6_pcie(pp);
static int artpec6_pcie_link_up(struct pcie_port *pp)
{
u32 rc;
/*
* Get status from Synopsys IP
* link is debug bit 36, debug register 1 starts at bit 32
*/
rc = readl(pp->dbi_base + PCIE_PHY_DEBUG_R1) & (0x1 << (36 - 32));
if (rc)
return 1;
return 0;
artpec6_pcie_establish_link(artpec6_pcie);
artpec6_pcie_enable_interrupts(artpec6_pcie);
}
static struct pcie_host_ops artpec6_pcie_host_ops = {
.link_up = artpec6_pcie_link_up,
.host_init = artpec6_pcie_host_init,
};
static irqreturn_t artpec6_pcie_msi_handler(int irq, void *arg)
{
struct pcie_port *pp = arg;
struct artpec6_pcie *artpec6_pcie = arg;
struct pcie_port *pp = &artpec6_pcie->pp;
return dw_handle_msi_irq(pp);
}
static int artpec6_add_pcie_port(struct pcie_port *pp,
static int artpec6_add_pcie_port(struct artpec6_pcie *artpec6_pcie,
struct platform_device *pdev)
{
struct pcie_port *pp = &artpec6_pcie->pp;
struct device *dev = pp->dev;
int ret;
if (IS_ENABLED(CONFIG_PCI_MSI)) {
pp->msi_irq = platform_get_irq_byname(pdev, "msi");
if (pp->msi_irq <= 0) {
dev_err(&pdev->dev, "failed to get MSI irq\n");
dev_err(dev, "failed to get MSI irq\n");
return -ENODEV;
}
ret = devm_request_irq(&pdev->dev, pp->msi_irq,
ret = devm_request_irq(dev, pp->msi_irq,
artpec6_pcie_msi_handler,
IRQF_SHARED | IRQF_NO_THREAD,
"artpec6-pcie-msi", pp);
"artpec6-pcie-msi", artpec6_pcie);
if (ret) {
dev_err(&pdev->dev, "failed to request MSI irq\n");
dev_err(dev, "failed to request MSI irq\n");
return ret;
}
}
@ -218,7 +222,7 @@ static int artpec6_add_pcie_port(struct pcie_port *pp,
ret = dw_pcie_host_init(pp);
if (ret) {
dev_err(&pdev->dev, "failed to initialize host\n");
dev_err(dev, "failed to initialize host\n");
return ret;
}
@ -227,41 +231,40 @@ static int artpec6_add_pcie_port(struct pcie_port *pp,
static int artpec6_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct artpec6_pcie *artpec6_pcie;
struct pcie_port *pp;
struct resource *dbi_base;
struct resource *phy_base;
int ret;
artpec6_pcie = devm_kzalloc(&pdev->dev, sizeof(*artpec6_pcie),
GFP_KERNEL);
artpec6_pcie = devm_kzalloc(dev, sizeof(*artpec6_pcie), GFP_KERNEL);
if (!artpec6_pcie)
return -ENOMEM;
pp = &artpec6_pcie->pp;
pp->dev = &pdev->dev;
pp->dev = dev;
dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
pp->dbi_base = devm_ioremap_resource(&pdev->dev, dbi_base);
pp->dbi_base = devm_ioremap_resource(dev, dbi_base);
if (IS_ERR(pp->dbi_base))
return PTR_ERR(pp->dbi_base);
phy_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phy");
artpec6_pcie->phy_base = devm_ioremap_resource(&pdev->dev, phy_base);
artpec6_pcie->phy_base = devm_ioremap_resource(dev, phy_base);
if (IS_ERR(artpec6_pcie->phy_base))
return PTR_ERR(artpec6_pcie->phy_base);
artpec6_pcie->regmap =
syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
syscon_regmap_lookup_by_phandle(dev->of_node,
"axis,syscon-pcie");
if (IS_ERR(artpec6_pcie->regmap))
return PTR_ERR(artpec6_pcie->regmap);
ret = artpec6_add_pcie_port(pp, pdev);
ret = artpec6_add_pcie_port(artpec6_pcie, pdev);
if (ret < 0)
return ret;
platform_set_drvdata(pdev, artpec6_pcie);
return 0;
}

Просмотреть файл

@ -25,8 +25,7 @@
#include "pcie-designware.h"
struct dw_plat_pcie {
void __iomem *mem_base;
struct pcie_port pp;
struct pcie_port pp; /* pp.dbi_base is DT 0th resource */
};
static irqreturn_t dw_plat_pcie_msi_irq_handler(int irq, void *arg)
@ -52,6 +51,7 @@ static struct pcie_host_ops dw_plat_pcie_host_ops = {
static int dw_plat_add_pcie_port(struct pcie_port *pp,
struct platform_device *pdev)
{
struct device *dev = pp->dev;
int ret;
pp->irq = platform_get_irq(pdev, 1);
@ -63,11 +63,11 @@ static int dw_plat_add_pcie_port(struct pcie_port *pp,
if (pp->msi_irq < 0)
return pp->msi_irq;
ret = devm_request_irq(&pdev->dev, pp->msi_irq,
ret = devm_request_irq(dev, pp->msi_irq,
dw_plat_pcie_msi_irq_handler,
IRQF_SHARED, "dw-plat-pcie-msi", pp);
if (ret) {
dev_err(&pdev->dev, "failed to request MSI IRQ\n");
dev_err(dev, "failed to request MSI IRQ\n");
return ret;
}
}
@ -77,7 +77,7 @@ static int dw_plat_add_pcie_port(struct pcie_port *pp,
ret = dw_pcie_host_init(pp);
if (ret) {
dev_err(&pdev->dev, "failed to initialize host\n");
dev_err(dev, "failed to initialize host\n");
return ret;
}
@ -86,31 +86,28 @@ static int dw_plat_add_pcie_port(struct pcie_port *pp,
static int dw_plat_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct dw_plat_pcie *dw_plat_pcie;
struct pcie_port *pp;
struct resource *res; /* Resource from DT */
int ret;
dw_plat_pcie = devm_kzalloc(&pdev->dev, sizeof(*dw_plat_pcie),
GFP_KERNEL);
dw_plat_pcie = devm_kzalloc(dev, sizeof(*dw_plat_pcie), GFP_KERNEL);
if (!dw_plat_pcie)
return -ENOMEM;
pp = &dw_plat_pcie->pp;
pp->dev = &pdev->dev;
pp->dev = dev;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
dw_plat_pcie->mem_base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(dw_plat_pcie->mem_base))
return PTR_ERR(dw_plat_pcie->mem_base);
pp->dbi_base = dw_plat_pcie->mem_base;
pp->dbi_base = devm_ioremap_resource(dev, res);
if (IS_ERR(pp->dbi_base))
return PTR_ERR(pp->dbi_base);
ret = dw_plat_add_pcie_port(pp, pdev);
if (ret < 0)
return ret;
platform_set_drvdata(pdev, dw_plat_pcie);
return 0;
}

Просмотреть файл

@ -141,41 +141,35 @@ int dw_pcie_cfg_write(void __iomem *addr, int size, u32 val)
return PCIBIOS_SUCCESSFUL;
}
static inline u32 dw_pcie_readl_rc(struct pcie_port *pp, u32 reg)
u32 dw_pcie_readl_rc(struct pcie_port *pp, u32 reg)
{
if (pp->ops->readl_rc)
return pp->ops->readl_rc(pp, pp->dbi_base + reg);
return pp->ops->readl_rc(pp, reg);
return readl(pp->dbi_base + reg);
}
static inline void dw_pcie_writel_rc(struct pcie_port *pp, u32 val, u32 reg)
void dw_pcie_writel_rc(struct pcie_port *pp, u32 reg, u32 val)
{
if (pp->ops->writel_rc)
pp->ops->writel_rc(pp, val, pp->dbi_base + reg);
pp->ops->writel_rc(pp, reg, val);
else
writel(val, pp->dbi_base + reg);
}
static inline u32 dw_pcie_readl_unroll(struct pcie_port *pp, u32 index, u32 reg)
static u32 dw_pcie_readl_unroll(struct pcie_port *pp, u32 index, u32 reg)
{
u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
if (pp->ops->readl_rc)
return pp->ops->readl_rc(pp, pp->dbi_base + offset + reg);
return readl(pp->dbi_base + offset + reg);
return dw_pcie_readl_rc(pp, offset + reg);
}
static inline void dw_pcie_writel_unroll(struct pcie_port *pp, u32 index,
u32 val, u32 reg)
static void dw_pcie_writel_unroll(struct pcie_port *pp, u32 index, u32 reg,
u32 val)
{
u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
if (pp->ops->writel_rc)
pp->ops->writel_rc(pp, val, pp->dbi_base + offset + reg);
else
writel(val, pp->dbi_base + offset + reg);
dw_pcie_writel_rc(pp, offset + reg, val);
}
static int dw_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
@ -202,35 +196,35 @@ static void dw_pcie_prog_outbound_atu(struct pcie_port *pp, int index,
u32 retries, val;
if (pp->iatu_unroll_enabled) {
dw_pcie_writel_unroll(pp, index,
lower_32_bits(cpu_addr), PCIE_ATU_UNR_LOWER_BASE);
dw_pcie_writel_unroll(pp, index,
upper_32_bits(cpu_addr), PCIE_ATU_UNR_UPPER_BASE);
dw_pcie_writel_unroll(pp, index,
lower_32_bits(cpu_addr + size - 1), PCIE_ATU_UNR_LIMIT);
dw_pcie_writel_unroll(pp, index,
lower_32_bits(pci_addr), PCIE_ATU_UNR_LOWER_TARGET);
dw_pcie_writel_unroll(pp, index,
upper_32_bits(pci_addr), PCIE_ATU_UNR_UPPER_TARGET);
dw_pcie_writel_unroll(pp, index,
type, PCIE_ATU_UNR_REGION_CTRL1);
dw_pcie_writel_unroll(pp, index,
PCIE_ATU_ENABLE, PCIE_ATU_UNR_REGION_CTRL2);
dw_pcie_writel_unroll(pp, index, PCIE_ATU_UNR_LOWER_BASE,
lower_32_bits(cpu_addr));
dw_pcie_writel_unroll(pp, index, PCIE_ATU_UNR_UPPER_BASE,
upper_32_bits(cpu_addr));
dw_pcie_writel_unroll(pp, index, PCIE_ATU_UNR_LIMIT,
lower_32_bits(cpu_addr + size - 1));
dw_pcie_writel_unroll(pp, index, PCIE_ATU_UNR_LOWER_TARGET,
lower_32_bits(pci_addr));
dw_pcie_writel_unroll(pp, index, PCIE_ATU_UNR_UPPER_TARGET,
upper_32_bits(pci_addr));
dw_pcie_writel_unroll(pp, index, PCIE_ATU_UNR_REGION_CTRL1,
type);
dw_pcie_writel_unroll(pp, index, PCIE_ATU_UNR_REGION_CTRL2,
PCIE_ATU_ENABLE);
} else {
dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | index,
PCIE_ATU_VIEWPORT);
dw_pcie_writel_rc(pp, lower_32_bits(cpu_addr),
PCIE_ATU_LOWER_BASE);
dw_pcie_writel_rc(pp, upper_32_bits(cpu_addr),
PCIE_ATU_UPPER_BASE);
dw_pcie_writel_rc(pp, lower_32_bits(cpu_addr + size - 1),
PCIE_ATU_LIMIT);
dw_pcie_writel_rc(pp, lower_32_bits(pci_addr),
PCIE_ATU_LOWER_TARGET);
dw_pcie_writel_rc(pp, upper_32_bits(pci_addr),
PCIE_ATU_UPPER_TARGET);
dw_pcie_writel_rc(pp, type, PCIE_ATU_CR1);
dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
dw_pcie_writel_rc(pp, PCIE_ATU_VIEWPORT,
PCIE_ATU_REGION_OUTBOUND | index);
dw_pcie_writel_rc(pp, PCIE_ATU_LOWER_BASE,
lower_32_bits(cpu_addr));
dw_pcie_writel_rc(pp, PCIE_ATU_UPPER_BASE,
upper_32_bits(cpu_addr));
dw_pcie_writel_rc(pp, PCIE_ATU_LIMIT,
lower_32_bits(cpu_addr + size - 1));
dw_pcie_writel_rc(pp, PCIE_ATU_LOWER_TARGET,
lower_32_bits(pci_addr));
dw_pcie_writel_rc(pp, PCIE_ATU_UPPER_TARGET,
upper_32_bits(pci_addr));
dw_pcie_writel_rc(pp, PCIE_ATU_CR1, type);
dw_pcie_writel_rc(pp, PCIE_ATU_CR2, PCIE_ATU_ENABLE);
}
/*
@ -760,8 +754,8 @@ static int dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
return ret;
}
static int dw_pcie_valid_config(struct pcie_port *pp,
struct pci_bus *bus, int dev)
static int dw_pcie_valid_device(struct pcie_port *pp, struct pci_bus *bus,
int dev)
{
/* If there is no link, then there is no device */
if (bus->number != pp->root_bus_nr) {
@ -781,7 +775,7 @@ static int dw_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
{
struct pcie_port *pp = bus->sysdata;
if (dw_pcie_valid_config(pp, bus, PCI_SLOT(devfn)) == 0) {
if (!dw_pcie_valid_device(pp, bus, PCI_SLOT(devfn))) {
*val = 0xffffffff;
return PCIBIOS_DEVICE_NOT_FOUND;
}
@ -797,7 +791,7 @@ static int dw_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
{
struct pcie_port *pp = bus->sysdata;
if (dw_pcie_valid_config(pp, bus, PCI_SLOT(devfn)) == 0)
if (!dw_pcie_valid_device(pp, bus, PCI_SLOT(devfn)))
return PCIBIOS_DEVICE_NOT_FOUND;
if (bus->number == pp->root_bus_nr)
@ -835,7 +829,7 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
dev_err(pp->dev, "num-lanes %u: invalid value\n", pp->lanes);
return;
}
dw_pcie_writel_rc(pp, val, PCIE_PORT_LINK_CONTROL);
dw_pcie_writel_rc(pp, PCIE_PORT_LINK_CONTROL, val);
/* set link width speed control register */
val = dw_pcie_readl_rc(pp, PCIE_LINK_WIDTH_SPEED_CONTROL);
@ -854,30 +848,30 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
val |= PORT_LOGIC_LINK_WIDTH_8_LANES;
break;
}
dw_pcie_writel_rc(pp, val, PCIE_LINK_WIDTH_SPEED_CONTROL);
dw_pcie_writel_rc(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
/* setup RC BARs */
dw_pcie_writel_rc(pp, 0x00000004, PCI_BASE_ADDRESS_0);
dw_pcie_writel_rc(pp, 0x00000000, PCI_BASE_ADDRESS_1);
dw_pcie_writel_rc(pp, PCI_BASE_ADDRESS_0, 0x00000004);
dw_pcie_writel_rc(pp, PCI_BASE_ADDRESS_1, 0x00000000);
/* setup interrupt pins */
val = dw_pcie_readl_rc(pp, PCI_INTERRUPT_LINE);
val &= 0xffff00ff;
val |= 0x00000100;
dw_pcie_writel_rc(pp, val, PCI_INTERRUPT_LINE);
dw_pcie_writel_rc(pp, PCI_INTERRUPT_LINE, val);
/* setup bus numbers */
val = dw_pcie_readl_rc(pp, PCI_PRIMARY_BUS);
val &= 0xff000000;
val |= 0x00010100;
dw_pcie_writel_rc(pp, val, PCI_PRIMARY_BUS);
dw_pcie_writel_rc(pp, PCI_PRIMARY_BUS, val);
/* setup command register */
val = dw_pcie_readl_rc(pp, PCI_COMMAND);
val &= 0xffff0000;
val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
PCI_COMMAND_MASTER | PCI_COMMAND_SERR;
dw_pcie_writel_rc(pp, val, PCI_COMMAND);
dw_pcie_writel_rc(pp, PCI_COMMAND, val);
/*
* If the platform provides ->rd_other_conf, it means the platform

Просмотреть файл

@ -54,9 +54,8 @@ struct pcie_port {
};
struct pcie_host_ops {
u32 (*readl_rc)(struct pcie_port *pp, void __iomem *dbi_base);
void (*writel_rc)(struct pcie_port *pp,
u32 val, void __iomem *dbi_base);
u32 (*readl_rc)(struct pcie_port *pp, u32 reg);
void (*writel_rc)(struct pcie_port *pp, u32 reg, u32 val);
int (*rd_own_conf)(struct pcie_port *pp, int where, int size, u32 *val);
int (*wr_own_conf)(struct pcie_port *pp, int where, int size, u32 val);
int (*rd_other_conf)(struct pcie_port *pp, struct pci_bus *bus,
@ -73,6 +72,8 @@ struct pcie_host_ops {
int (*msi_host_init)(struct pcie_port *pp, struct msi_controller *chip);
};
u32 dw_pcie_readl_rc(struct pcie_port *pp, u32 reg);
void dw_pcie_writel_rc(struct pcie_port *pp, u32 reg, u32 val);
int dw_pcie_cfg_read(void __iomem *addr, int size, u32 *val);
int dw_pcie_cfg_write(void __iomem *addr, int size, u32 val);
irqreturn_t dw_handle_msi_irq(struct pcie_port *pp);

Просмотреть файл

@ -22,51 +22,38 @@
#include "pcie-designware.h"
#define PCIE_LTSSM_LINKUP_STATE 0x11
#define PCIE_LTSSM_STATE_MASK 0x3F
#define PCIE_SUBCTRL_SYS_STATE4_REG 0x6818
#define PCIE_SYS_STATE4 0x31c
#define PCIE_HIP06_CTRL_OFF 0x1000
#define PCIE_SUBCTRL_SYS_STATE4_REG 0x6818
#define PCIE_HIP06_CTRL_OFF 0x1000
#define PCIE_SYS_STATE4 (PCIE_HIP06_CTRL_OFF + 0x31c)
#define PCIE_LTSSM_LINKUP_STATE 0x11
#define PCIE_LTSSM_STATE_MASK 0x3F
#define to_hisi_pcie(x) container_of(x, struct hisi_pcie, pp)
struct hisi_pcie;
struct pcie_soc_ops {
int (*hisi_pcie_link_up)(struct hisi_pcie *pcie);
int (*hisi_pcie_link_up)(struct hisi_pcie *hisi_pcie);
};
struct hisi_pcie {
struct pcie_port pp; /* pp.dbi_base is DT rc_dbi */
struct regmap *subctrl;
void __iomem *reg_base;
u32 port_id;
struct pcie_port pp;
struct pcie_soc_ops *soc_ops;
};
static inline void hisi_pcie_apb_writel(struct hisi_pcie *pcie,
u32 val, u32 reg)
{
writel(val, pcie->reg_base + reg);
}
static inline u32 hisi_pcie_apb_readl(struct hisi_pcie *pcie, u32 reg)
{
return readl(pcie->reg_base + reg);
}
/* HipXX PCIe host only supports 32-bit config access */
static int hisi_pcie_cfg_read(struct pcie_port *pp, int where, int size,
u32 *val)
{
u32 reg;
u32 reg_val;
struct hisi_pcie *pcie = to_hisi_pcie(pp);
void *walker = &reg_val;
walker += (where & 0x3);
reg = where & ~0x3;
reg_val = hisi_pcie_apb_readl(pcie, reg);
reg_val = dw_pcie_readl_rc(pp, reg);
if (size == 1)
*val = *(u8 __force *) walker;
@ -86,21 +73,20 @@ static int hisi_pcie_cfg_write(struct pcie_port *pp, int where, int size,
{
u32 reg_val;
u32 reg;
struct hisi_pcie *pcie = to_hisi_pcie(pp);
void *walker = &reg_val;
walker += (where & 0x3);
reg = where & ~0x3;
if (size == 4)
hisi_pcie_apb_writel(pcie, val, reg);
dw_pcie_writel_rc(pp, reg, val);
else if (size == 2) {
reg_val = hisi_pcie_apb_readl(pcie, reg);
reg_val = dw_pcie_readl_rc(pp, reg);
*(u16 __force *) walker = val;
hisi_pcie_apb_writel(pcie, reg_val, reg);
dw_pcie_writel_rc(pp, reg, reg_val);
} else if (size == 1) {
reg_val = hisi_pcie_apb_readl(pcie, reg);
reg_val = dw_pcie_readl_rc(pp, reg);
*(u8 __force *) walker = val;
hisi_pcie_apb_writel(pcie, reg_val, reg);
dw_pcie_writel_rc(pp, reg, reg_val);
} else
return PCIBIOS_BAD_REGISTER_NUMBER;
@ -119,10 +105,10 @@ static int hisi_pcie_link_up_hip05(struct hisi_pcie *hisi_pcie)
static int hisi_pcie_link_up_hip06(struct hisi_pcie *hisi_pcie)
{
struct pcie_port *pp = &hisi_pcie->pp;
u32 val;
val = hisi_pcie_apb_readl(hisi_pcie, PCIE_HIP06_CTRL_OFF +
PCIE_SYS_STATE4);
val = dw_pcie_readl_rc(pp, PCIE_SYS_STATE4);
return ((val & PCIE_LTSSM_STATE_MASK) == PCIE_LTSSM_LINKUP_STATE);
}
@ -140,19 +126,20 @@ static struct pcie_host_ops hisi_pcie_host_ops = {
.link_up = hisi_pcie_link_up,
};
static int hisi_add_pcie_port(struct pcie_port *pp,
struct platform_device *pdev)
static int hisi_add_pcie_port(struct hisi_pcie *hisi_pcie,
struct platform_device *pdev)
{
struct pcie_port *pp = &hisi_pcie->pp;
struct device *dev = pp->dev;
int ret;
u32 port_id;
struct hisi_pcie *hisi_pcie = to_hisi_pcie(pp);
if (of_property_read_u32(pdev->dev.of_node, "port-id", &port_id)) {
dev_err(&pdev->dev, "failed to read port-id\n");
if (of_property_read_u32(dev->of_node, "port-id", &port_id)) {
dev_err(dev, "failed to read port-id\n");
return -EINVAL;
}
if (port_id > 3) {
dev_err(&pdev->dev, "Invalid port-id: %d\n", port_id);
dev_err(dev, "Invalid port-id: %d\n", port_id);
return -EINVAL;
}
hisi_pcie->port_id = port_id;
@ -161,7 +148,7 @@ static int hisi_add_pcie_port(struct pcie_port *pp,
ret = dw_pcie_host_init(pp);
if (ret) {
dev_err(&pdev->dev, "failed to initialize host\n");
dev_err(dev, "failed to initialize host\n");
return ret;
}
@ -170,6 +157,7 @@ static int hisi_add_pcie_port(struct pcie_port *pp,
static int hisi_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct hisi_pcie *hisi_pcie;
struct pcie_port *pp;
const struct of_device_id *match;
@ -177,40 +165,36 @@ static int hisi_pcie_probe(struct platform_device *pdev)
struct device_driver *driver;
int ret;
hisi_pcie = devm_kzalloc(&pdev->dev, sizeof(*hisi_pcie), GFP_KERNEL);
hisi_pcie = devm_kzalloc(dev, sizeof(*hisi_pcie), GFP_KERNEL);
if (!hisi_pcie)
return -ENOMEM;
pp = &hisi_pcie->pp;
pp->dev = &pdev->dev;
driver = (pdev->dev).driver;
pp->dev = dev;
driver = dev->driver;
match = of_match_device(driver->of_match_table, &pdev->dev);
match = of_match_device(driver->of_match_table, dev);
hisi_pcie->soc_ops = (struct pcie_soc_ops *) match->data;
hisi_pcie->subctrl =
syscon_regmap_lookup_by_compatible("hisilicon,pcie-sas-subctrl");
if (IS_ERR(hisi_pcie->subctrl)) {
dev_err(pp->dev, "cannot get subctrl base\n");
dev_err(dev, "cannot get subctrl base\n");
return PTR_ERR(hisi_pcie->subctrl);
}
reg = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rc_dbi");
hisi_pcie->reg_base = devm_ioremap_resource(&pdev->dev, reg);
if (IS_ERR(hisi_pcie->reg_base)) {
dev_err(pp->dev, "cannot get rc_dbi base\n");
return PTR_ERR(hisi_pcie->reg_base);
pp->dbi_base = devm_ioremap_resource(dev, reg);
if (IS_ERR(pp->dbi_base)) {
dev_err(dev, "cannot get rc_dbi base\n");
return PTR_ERR(pp->dbi_base);
}
hisi_pcie->pp.dbi_base = hisi_pcie->reg_base;
ret = hisi_add_pcie_port(pp, pdev);
ret = hisi_add_pcie_port(hisi_pcie, pdev);
if (ret)
return ret;
platform_set_drvdata(pdev, hisi_pcie);
dev_warn(pp->dev, "only 32-bit config accesses supported; smaller writes may corrupt adjacent RW1C fields\n");
dev_warn(dev, "only 32-bit config accesses supported; smaller writes may corrupt adjacent RW1C fields\n");
return 0;
}

Просмотреть файл

@ -42,19 +42,24 @@ static int iproc_pcie_bcma_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
static int iproc_pcie_bcma_probe(struct bcma_device *bdev)
{
struct device *dev = &bdev->dev;
struct iproc_pcie *pcie;
LIST_HEAD(res);
struct resource res_mem;
int ret;
pcie = devm_kzalloc(&bdev->dev, sizeof(*pcie), GFP_KERNEL);
pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
if (!pcie)
return -ENOMEM;
pcie->dev = &bdev->dev;
bcma_set_drvdata(bdev, pcie);
pcie->dev = dev;
pcie->base = bdev->io_addr;
if (!pcie->base) {
dev_err(dev, "no controller registers\n");
return -ENOMEM;
}
pcie->base_addr = bdev->addr;
res_mem.start = bdev->addr_s[0];
@ -67,10 +72,11 @@ static int iproc_pcie_bcma_probe(struct bcma_device *bdev)
ret = iproc_pcie_setup(pcie, &res);
if (ret)
dev_err(pcie->dev, "PCIe controller setup failed\n");
dev_err(dev, "PCIe controller setup failed\n");
pci_free_resource_list(&res);
bcma_set_drvdata(bdev, pcie);
return ret;
}

Просмотреть файл

@ -40,35 +40,35 @@ MODULE_DEVICE_TABLE(of, iproc_pcie_of_match_table);
static int iproc_pcie_pltfm_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
const struct of_device_id *of_id;
struct iproc_pcie *pcie;
struct device_node *np = pdev->dev.of_node;
struct device_node *np = dev->of_node;
struct resource reg;
resource_size_t iobase = 0;
LIST_HEAD(res);
int ret;
of_id = of_match_device(iproc_pcie_of_match_table, &pdev->dev);
of_id = of_match_device(iproc_pcie_of_match_table, dev);
if (!of_id)
return -EINVAL;
pcie = devm_kzalloc(&pdev->dev, sizeof(struct iproc_pcie), GFP_KERNEL);
pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
if (!pcie)
return -ENOMEM;
pcie->dev = &pdev->dev;
pcie->dev = dev;
pcie->type = (enum iproc_pcie_type)of_id->data;
platform_set_drvdata(pdev, pcie);
ret = of_address_to_resource(np, 0, &reg);
if (ret < 0) {
dev_err(pcie->dev, "unable to obtain controller resources\n");
dev_err(dev, "unable to obtain controller resources\n");
return ret;
}
pcie->base = devm_ioremap(pcie->dev, reg.start, resource_size(&reg));
pcie->base = devm_ioremap(dev, reg.start, resource_size(&reg));
if (!pcie->base) {
dev_err(pcie->dev, "unable to map controller registers\n");
dev_err(dev, "unable to map controller registers\n");
return -ENOMEM;
}
pcie->base_addr = reg.start;
@ -79,7 +79,7 @@ static int iproc_pcie_pltfm_probe(struct platform_device *pdev)
ret = of_property_read_u32(np, "brcm,pcie-ob-axi-offset",
&val);
if (ret) {
dev_err(pcie->dev,
dev_err(dev,
"missing brcm,pcie-ob-axi-offset property\n");
return ret;
}
@ -88,7 +88,7 @@ static int iproc_pcie_pltfm_probe(struct platform_device *pdev)
ret = of_property_read_u32(np, "brcm,pcie-ob-window-size",
&val);
if (ret) {
dev_err(pcie->dev,
dev_err(dev,
"missing brcm,pcie-ob-window-size property\n");
return ret;
}
@ -101,7 +101,7 @@ static int iproc_pcie_pltfm_probe(struct platform_device *pdev)
}
/* PHY use is optional */
pcie->phy = devm_phy_get(&pdev->dev, "pcie-phy");
pcie->phy = devm_phy_get(dev, "pcie-phy");
if (IS_ERR(pcie->phy)) {
if (PTR_ERR(pcie->phy) == -EPROBE_DEFER)
return -EPROBE_DEFER;
@ -110,7 +110,7 @@ static int iproc_pcie_pltfm_probe(struct platform_device *pdev)
ret = of_pci_get_host_bridge_resources(np, 0, 0xff, &res, &iobase);
if (ret) {
dev_err(pcie->dev,
dev_err(dev,
"unable to get PCI host bridge resources\n");
return ret;
}
@ -119,10 +119,11 @@ static int iproc_pcie_pltfm_probe(struct platform_device *pdev)
ret = iproc_pcie_setup(pcie, &res);
if (ret)
dev_err(pcie->dev, "PCIe controller setup failed\n");
dev_err(dev, "PCIe controller setup failed\n");
pci_free_resource_list(&res);
platform_set_drvdata(pdev, pcie);
return ret;
}

Просмотреть файл

@ -63,6 +63,8 @@
#define OARR_SIZE_CFG_SHIFT 1
#define OARR_SIZE_CFG BIT(OARR_SIZE_CFG_SHIFT)
#define PCI_EXP_CAP 0xac
#define MAX_NUM_OB_WINDOWS 2
#define IPROC_PCIE_REG_INVALID 0xffff
@ -258,9 +260,10 @@ static void iproc_pcie_reset(struct iproc_pcie *pcie)
static int iproc_pcie_check_link(struct iproc_pcie *pcie, struct pci_bus *bus)
{
struct device *dev = pcie->dev;
u8 hdr_type;
u32 link_ctrl, class, val;
u16 pos, link_status;
u16 pos = PCI_EXP_CAP, link_status;
bool link_is_active = false;
/*
@ -272,14 +275,14 @@ static int iproc_pcie_check_link(struct iproc_pcie *pcie, struct pci_bus *bus)
val = iproc_pcie_read_reg(pcie, IPROC_PCIE_LINK_STATUS);
if (!(val & PCIE_PHYLINKUP) || !(val & PCIE_DL_ACTIVE)) {
dev_err(pcie->dev, "PHY or data link is INACTIVE!\n");
dev_err(dev, "PHY or data link is INACTIVE!\n");
return -ENODEV;
}
/* make sure we are not in EP mode */
pci_bus_read_config_byte(bus, 0, PCI_HEADER_TYPE, &hdr_type);
if ((hdr_type & 0x7f) != PCI_HEADER_TYPE_BRIDGE) {
dev_err(pcie->dev, "in EP mode, hdr=%#02x\n", hdr_type);
dev_err(dev, "in EP mode, hdr=%#02x\n", hdr_type);
return -EFAULT;
}
@ -293,30 +296,27 @@ static int iproc_pcie_check_link(struct iproc_pcie *pcie, struct pci_bus *bus)
pci_bus_write_config_dword(bus, 0, PCI_BRIDGE_CTRL_REG_OFFSET, class);
/* check link status to see if link is active */
pos = pci_bus_find_capability(bus, 0, PCI_CAP_ID_EXP);
pci_bus_read_config_word(bus, 0, pos + PCI_EXP_LNKSTA, &link_status);
if (link_status & PCI_EXP_LNKSTA_NLW)
link_is_active = true;
if (!link_is_active) {
/* try GEN 1 link speed */
#define PCI_LINK_STATUS_CTRL_2_OFFSET 0x0dc
#define PCI_TARGET_LINK_SPEED_MASK 0xf
#define PCI_TARGET_LINK_SPEED_GEN2 0x2
#define PCI_TARGET_LINK_SPEED_GEN1 0x1
pci_bus_read_config_dword(bus, 0,
PCI_LINK_STATUS_CTRL_2_OFFSET,
pos + PCI_EXP_LNKCTL2,
&link_ctrl);
if ((link_ctrl & PCI_TARGET_LINK_SPEED_MASK) ==
PCI_TARGET_LINK_SPEED_GEN2) {
link_ctrl &= ~PCI_TARGET_LINK_SPEED_MASK;
link_ctrl |= PCI_TARGET_LINK_SPEED_GEN1;
pci_bus_write_config_dword(bus, 0,
PCI_LINK_STATUS_CTRL_2_OFFSET,
pos + PCI_EXP_LNKCTL2,
link_ctrl);
msleep(100);
pos = pci_bus_find_capability(bus, 0, PCI_CAP_ID_EXP);
pci_bus_read_config_word(bus, 0, pos + PCI_EXP_LNKSTA,
&link_status);
if (link_status & PCI_EXP_LNKSTA_NLW)
@ -324,7 +324,7 @@ static int iproc_pcie_check_link(struct iproc_pcie *pcie, struct pci_bus *bus)
}
}
dev_info(pcie->dev, "link: %s\n", link_is_active ? "UP" : "DOWN");
dev_info(dev, "link: %s\n", link_is_active ? "UP" : "DOWN");
return link_is_active ? 0 : -ENODEV;
}
@ -349,12 +349,13 @@ static int iproc_pcie_setup_ob(struct iproc_pcie *pcie, u64 axi_addr,
u64 pci_addr, resource_size_t size)
{
struct iproc_pcie_ob *ob = &pcie->ob;
struct device *dev = pcie->dev;
unsigned i;
u64 max_size = (u64)ob->window_size * MAX_NUM_OB_WINDOWS;
u64 remainder;
if (size > max_size) {
dev_err(pcie->dev,
dev_err(dev,
"res size %pap exceeds max supported size 0x%llx\n",
&size, max_size);
return -EINVAL;
@ -362,15 +363,14 @@ static int iproc_pcie_setup_ob(struct iproc_pcie *pcie, u64 axi_addr,
div64_u64_rem(size, ob->window_size, &remainder);
if (remainder) {
dev_err(pcie->dev,
dev_err(dev,
"res size %pap needs to be multiple of window size %pap\n",
&size, &ob->window_size);
return -EINVAL;
}
if (axi_addr < ob->axi_offset) {
dev_err(pcie->dev,
"axi address %pap less than offset %pap\n",
dev_err(dev, "axi address %pap less than offset %pap\n",
&axi_addr, &ob->axi_offset);
return -EINVAL;
}
@ -406,6 +406,7 @@ static int iproc_pcie_setup_ob(struct iproc_pcie *pcie, u64 axi_addr,
static int iproc_pcie_map_ranges(struct iproc_pcie *pcie,
struct list_head *resources)
{
struct device *dev = pcie->dev;
struct resource_entry *window;
int ret;
@ -425,7 +426,7 @@ static int iproc_pcie_map_ranges(struct iproc_pcie *pcie,
return ret;
break;
default:
dev_err(pcie->dev, "invalid resource %pR\n", res);
dev_err(dev, "invalid resource %pR\n", res);
return -EINVAL;
}
}
@ -455,26 +456,25 @@ static void iproc_pcie_msi_disable(struct iproc_pcie *pcie)
int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res)
{
struct device *dev;
int ret;
void *sysdata;
struct pci_bus *bus;
if (!pcie || !pcie->dev || !pcie->base)
return -EINVAL;
ret = devm_request_pci_bus_resources(pcie->dev, res);
dev = pcie->dev;
ret = devm_request_pci_bus_resources(dev, res);
if (ret)
return ret;
ret = phy_init(pcie->phy);
if (ret) {
dev_err(pcie->dev, "unable to initialize PCIe PHY\n");
dev_err(dev, "unable to initialize PCIe PHY\n");
return ret;
}
ret = phy_power_on(pcie->phy);
if (ret) {
dev_err(pcie->dev, "unable to power on PCIe PHY\n");
dev_err(dev, "unable to power on PCIe PHY\n");
goto err_exit_phy;
}
@ -486,7 +486,7 @@ int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res)
pcie->reg_offsets = iproc_pcie_reg_paxc;
break;
default:
dev_err(pcie->dev, "incompatible iProc PCIe interface\n");
dev_err(dev, "incompatible iProc PCIe interface\n");
ret = -EINVAL;
goto err_power_off_phy;
}
@ -496,7 +496,7 @@ int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res)
if (pcie->need_ob_cfg) {
ret = iproc_pcie_map_ranges(pcie, res);
if (ret) {
dev_err(pcie->dev, "map failed\n");
dev_err(dev, "map failed\n");
goto err_power_off_phy;
}
}
@ -508,9 +508,9 @@ int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res)
sysdata = pcie;
#endif
bus = pci_create_root_bus(pcie->dev, 0, &iproc_pcie_ops, sysdata, res);
bus = pci_create_root_bus(dev, 0, &iproc_pcie_ops, sysdata, res);
if (!bus) {
dev_err(pcie->dev, "unable to create PCI root bus\n");
dev_err(dev, "unable to create PCI root bus\n");
ret = -ENOMEM;
goto err_power_off_phy;
}
@ -518,7 +518,7 @@ int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res)
ret = iproc_pcie_check_link(pcie, bus);
if (ret) {
dev_err(pcie->dev, "no PCIe EP device detected\n");
dev_err(dev, "no PCIe EP device detected\n");
goto err_rm_root_bus;
}
@ -526,7 +526,7 @@ int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res)
if (IS_ENABLED(CONFIG_PCI_MSI))
if (iproc_pcie_msi_enable(pcie))
dev_info(pcie->dev, "not using iProc MSI\n");
dev_info(dev, "not using iProc MSI\n");
pci_scan_child_bus(bus);
pci_assign_unassigned_bus_resources(bus);

Просмотреть файл

@ -86,12 +86,10 @@ struct qcom_pcie_ops {
};
struct qcom_pcie {
struct pcie_port pp;
struct device *dev;
struct pcie_port pp; /* pp.dbi_base is DT dbi */
void __iomem *parf; /* DT parf */
void __iomem *elbi; /* DT elbi */
union qcom_pcie_resources res;
void __iomem *parf;
void __iomem *dbi;
void __iomem *elbi;
struct phy *phy;
struct gpio_desc *reset;
struct qcom_pcie_ops *ops;
@ -136,7 +134,7 @@ static int qcom_pcie_establish_link(struct qcom_pcie *pcie)
static int qcom_pcie_get_resources_v0(struct qcom_pcie *pcie)
{
struct qcom_pcie_resources_v0 *res = &pcie->res.v0;
struct device *dev = pcie->dev;
struct device *dev = pcie->pp.dev;
res->vdda = devm_regulator_get(dev, "vdda");
if (IS_ERR(res->vdda))
@ -188,7 +186,7 @@ static int qcom_pcie_get_resources_v0(struct qcom_pcie *pcie)
static int qcom_pcie_get_resources_v1(struct qcom_pcie *pcie)
{
struct qcom_pcie_resources_v1 *res = &pcie->res.v1;
struct device *dev = pcie->dev;
struct device *dev = pcie->pp.dev;
res->vdda = devm_regulator_get(dev, "vdda");
if (IS_ERR(res->vdda))
@ -237,7 +235,7 @@ static void qcom_pcie_deinit_v0(struct qcom_pcie *pcie)
static int qcom_pcie_init_v0(struct qcom_pcie *pcie)
{
struct qcom_pcie_resources_v0 *res = &pcie->res.v0;
struct device *dev = pcie->dev;
struct device *dev = pcie->pp.dev;
u32 val;
int ret;
@ -359,7 +357,7 @@ static void qcom_pcie_deinit_v1(struct qcom_pcie *pcie)
static int qcom_pcie_init_v1(struct qcom_pcie *pcie)
{
struct qcom_pcie_resources_v1 *res = &pcie->res.v1;
struct device *dev = pcie->dev;
struct device *dev = pcie->pp.dev;
int ret;
ret = reset_control_deassert(res->core);
@ -426,7 +424,7 @@ err_res:
static int qcom_pcie_link_up(struct pcie_port *pp)
{
struct qcom_pcie *pcie = to_qcom_pcie(pp);
u16 val = readw(pcie->dbi + PCIE20_CAP + PCI_EXP_LNKSTA);
u16 val = readw(pcie->pp.dbi_base + PCIE20_CAP + PCI_EXP_LNKSTA);
return !!(val & PCI_EXP_LNKSTA_DLLLA);
}
@ -509,8 +507,8 @@ static int qcom_pcie_probe(struct platform_device *pdev)
if (!pcie)
return -ENOMEM;
pp = &pcie->pp;
pcie->ops = (struct qcom_pcie_ops *)of_device_get_match_data(dev);
pcie->dev = dev;
pcie->reset = devm_gpiod_get_optional(dev, "perst", GPIOD_OUT_LOW);
if (IS_ERR(pcie->reset))
@ -522,9 +520,9 @@ static int qcom_pcie_probe(struct platform_device *pdev)
return PTR_ERR(pcie->parf);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
pcie->dbi = devm_ioremap_resource(dev, res);
if (IS_ERR(pcie->dbi))
return PTR_ERR(pcie->dbi);
pp->dbi_base = devm_ioremap_resource(dev, res);
if (IS_ERR(pp->dbi_base))
return PTR_ERR(pp->dbi_base);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "elbi");
pcie->elbi = devm_ioremap_resource(dev, res);
@ -539,9 +537,7 @@ static int qcom_pcie_probe(struct platform_device *pdev)
if (ret)
return ret;
pp = &pcie->pp;
pp->dev = dev;
pp->dbi_base = pcie->dbi;
pp->root_bus_nr = -1;
pp->ops = &qcom_pcie_dw_ops;
@ -569,8 +565,6 @@ static int qcom_pcie_probe(struct platform_device *pdev)
return ret;
}
platform_set_drvdata(pdev, pcie);
return 0;
}

Просмотреть файл

@ -31,8 +31,6 @@
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#define DRV_NAME "rcar-pcie"
#define PCIECAR 0x000010
#define PCIECCTLR 0x000018
#define CONFIG_SEND_ENABLE (1 << 31)
@ -397,6 +395,7 @@ static int rcar_pcie_setup(struct list_head *resource, struct rcar_pcie *pci)
static void rcar_pcie_force_speedup(struct rcar_pcie *pcie)
{
struct device *dev = pcie->dev;
unsigned int timeout = 1000;
u32 macsr;
@ -404,7 +403,7 @@ static void rcar_pcie_force_speedup(struct rcar_pcie *pcie)
return;
if (rcar_pci_read_reg(pcie, MACCTLR) & SPEED_CHANGE) {
dev_err(pcie->dev, "Speed change already in progress\n");
dev_err(dev, "Speed change already in progress\n");
return;
}
@ -433,7 +432,7 @@ static void rcar_pcie_force_speedup(struct rcar_pcie *pcie)
rcar_pci_write_reg(pcie, macsr, MACSR);
if (macsr & SPCHGFAIL)
dev_err(pcie->dev, "Speed change failed\n");
dev_err(dev, "Speed change failed\n");
goto done;
}
@ -441,15 +440,16 @@ static void rcar_pcie_force_speedup(struct rcar_pcie *pcie)
msleep(1);
};
dev_err(pcie->dev, "Speed change timed out\n");
dev_err(dev, "Speed change timed out\n");
done:
dev_info(pcie->dev, "Current link speed is %s GT/s\n",
dev_info(dev, "Current link speed is %s GT/s\n",
(macsr & LINK_SPEED) == LINK_SPEED_5_0GTS ? "5" : "2.5");
}
static int rcar_pcie_enable(struct rcar_pcie *pcie)
{
struct device *dev = pcie->dev;
struct pci_bus *bus, *child;
LIST_HEAD(res);
@ -461,14 +461,14 @@ static int rcar_pcie_enable(struct rcar_pcie *pcie)
pci_add_flags(PCI_REASSIGN_ALL_RSRC | PCI_REASSIGN_ALL_BUS);
if (IS_ENABLED(CONFIG_PCI_MSI))
bus = pci_scan_root_bus_msi(pcie->dev, pcie->root_bus_nr,
bus = pci_scan_root_bus_msi(dev, pcie->root_bus_nr,
&rcar_pcie_ops, pcie, &res, &pcie->msi.chip);
else
bus = pci_scan_root_bus(pcie->dev, pcie->root_bus_nr,
bus = pci_scan_root_bus(dev, pcie->root_bus_nr,
&rcar_pcie_ops, pcie, &res);
if (!bus) {
dev_err(pcie->dev, "Scanning rootbus failed");
dev_err(dev, "Scanning rootbus failed");
return -ENODEV;
}
@ -487,6 +487,7 @@ static int rcar_pcie_enable(struct rcar_pcie *pcie)
static int phy_wait_for_ack(struct rcar_pcie *pcie)
{
struct device *dev = pcie->dev;
unsigned int timeout = 100;
while (timeout--) {
@ -496,7 +497,7 @@ static int phy_wait_for_ack(struct rcar_pcie *pcie)
udelay(100);
}
dev_err(pcie->dev, "Access to PCIe phy timed out\n");
dev_err(dev, "Access to PCIe phy timed out\n");
return -ETIMEDOUT;
}
@ -697,6 +698,7 @@ static irqreturn_t rcar_pcie_msi_irq(int irq, void *data)
{
struct rcar_pcie *pcie = data;
struct rcar_msi *msi = &pcie->msi;
struct device *dev = pcie->dev;
unsigned long reg;
reg = rcar_pci_read_reg(pcie, PCIEMSIFR);
@ -717,10 +719,10 @@ static irqreturn_t rcar_pcie_msi_irq(int irq, void *data)
if (test_bit(index, msi->used))
generic_handle_irq(irq);
else
dev_info(pcie->dev, "unhandled MSI\n");
dev_info(dev, "unhandled MSI\n");
} else {
/* Unknown MSI, just clear it */
dev_dbg(pcie->dev, "unexpected MSI\n");
dev_dbg(dev, "unexpected MSI\n");
}
/* see if there's any more pending in this vector */
@ -843,22 +845,22 @@ static const struct irq_domain_ops msi_domain_ops = {
static int rcar_pcie_enable_msi(struct rcar_pcie *pcie)
{
struct platform_device *pdev = to_platform_device(pcie->dev);
struct device *dev = pcie->dev;
struct rcar_msi *msi = &pcie->msi;
unsigned long base;
int err, i;
mutex_init(&msi->lock);
msi->chip.dev = pcie->dev;
msi->chip.dev = dev;
msi->chip.setup_irq = rcar_msi_setup_irq;
msi->chip.setup_irqs = rcar_msi_setup_irqs;
msi->chip.teardown_irq = rcar_msi_teardown_irq;
msi->domain = irq_domain_add_linear(pcie->dev->of_node, INT_PCI_MSI_NR,
msi->domain = irq_domain_add_linear(dev->of_node, INT_PCI_MSI_NR,
&msi_domain_ops, &msi->chip);
if (!msi->domain) {
dev_err(&pdev->dev, "failed to create IRQ domain\n");
dev_err(dev, "failed to create IRQ domain\n");
return -ENOMEM;
}
@ -866,19 +868,19 @@ static int rcar_pcie_enable_msi(struct rcar_pcie *pcie)
irq_create_mapping(msi->domain, i);
/* Two irqs are for MSI, but they are also used for non-MSI irqs */
err = devm_request_irq(&pdev->dev, msi->irq1, rcar_pcie_msi_irq,
err = devm_request_irq(dev, msi->irq1, rcar_pcie_msi_irq,
IRQF_SHARED | IRQF_NO_THREAD,
rcar_msi_irq_chip.name, pcie);
if (err < 0) {
dev_err(&pdev->dev, "failed to request IRQ: %d\n", err);
dev_err(dev, "failed to request IRQ: %d\n", err);
goto err;
}
err = devm_request_irq(&pdev->dev, msi->irq2, rcar_pcie_msi_irq,
err = devm_request_irq(dev, msi->irq2, rcar_pcie_msi_irq,
IRQF_SHARED | IRQF_NO_THREAD,
rcar_msi_irq_chip.name, pcie);
if (err < 0) {
dev_err(&pdev->dev, "failed to request IRQ: %d\n", err);
dev_err(dev, "failed to request IRQ: %d\n", err);
goto err;
}
@ -899,32 +901,32 @@ err:
return err;
}
static int rcar_pcie_get_resources(struct platform_device *pdev,
struct rcar_pcie *pcie)
static int rcar_pcie_get_resources(struct rcar_pcie *pcie)
{
struct device *dev = pcie->dev;
struct resource res;
int err, i;
err = of_address_to_resource(pdev->dev.of_node, 0, &res);
err = of_address_to_resource(dev->of_node, 0, &res);
if (err)
return err;
pcie->base = devm_ioremap_resource(&pdev->dev, &res);
pcie->base = devm_ioremap_resource(dev, &res);
if (IS_ERR(pcie->base))
return PTR_ERR(pcie->base);
pcie->clk = devm_clk_get(&pdev->dev, "pcie");
pcie->clk = devm_clk_get(dev, "pcie");
if (IS_ERR(pcie->clk)) {
dev_err(pcie->dev, "cannot get platform clock\n");
dev_err(dev, "cannot get platform clock\n");
return PTR_ERR(pcie->clk);
}
err = clk_prepare_enable(pcie->clk);
if (err)
return err;
pcie->bus_clk = devm_clk_get(&pdev->dev, "pcie_bus");
pcie->bus_clk = devm_clk_get(dev, "pcie_bus");
if (IS_ERR(pcie->bus_clk)) {
dev_err(pcie->dev, "cannot get pcie bus clock\n");
dev_err(dev, "cannot get pcie bus clock\n");
err = PTR_ERR(pcie->bus_clk);
goto fail_clk;
}
@ -932,17 +934,17 @@ static int rcar_pcie_get_resources(struct platform_device *pdev,
if (err)
goto fail_clk;
i = irq_of_parse_and_map(pdev->dev.of_node, 0);
i = irq_of_parse_and_map(dev->of_node, 0);
if (!i) {
dev_err(pcie->dev, "cannot get platform resources for msi interrupt\n");
dev_err(dev, "cannot get platform resources for msi interrupt\n");
err = -ENOENT;
goto err_map_reg;
}
pcie->msi.irq1 = i;
i = irq_of_parse_and_map(pdev->dev.of_node, 1);
i = irq_of_parse_and_map(dev->of_node, 1);
if (!i) {
dev_err(pcie->dev, "cannot get platform resources for msi interrupt\n");
dev_err(dev, "cannot get platform resources for msi interrupt\n");
err = -ENOENT;
goto err_map_reg;
}
@ -1119,60 +1121,60 @@ out_release_res:
static int rcar_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct rcar_pcie *pcie;
unsigned int data;
const struct of_device_id *of_id;
int err;
int (*hw_init_fn)(struct rcar_pcie *);
pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL);
pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
if (!pcie)
return -ENOMEM;
pcie->dev = &pdev->dev;
platform_set_drvdata(pdev, pcie);
pcie->dev = dev;
INIT_LIST_HEAD(&pcie->resources);
rcar_pcie_parse_request_of_pci_ranges(pcie);
err = rcar_pcie_get_resources(pdev, pcie);
err = rcar_pcie_get_resources(pcie);
if (err < 0) {
dev_err(&pdev->dev, "failed to request resources: %d\n", err);
dev_err(dev, "failed to request resources: %d\n", err);
return err;
}
err = rcar_pcie_parse_map_dma_ranges(pcie, pdev->dev.of_node);
err = rcar_pcie_parse_map_dma_ranges(pcie, dev->of_node);
if (err)
return err;
of_id = of_match_device(rcar_pcie_of_match, pcie->dev);
of_id = of_match_device(rcar_pcie_of_match, dev);
if (!of_id || !of_id->data)
return -EINVAL;
hw_init_fn = of_id->data;
pm_runtime_enable(pcie->dev);
err = pm_runtime_get_sync(pcie->dev);
pm_runtime_enable(dev);
err = pm_runtime_get_sync(dev);
if (err < 0) {
dev_err(pcie->dev, "pm_runtime_get_sync failed\n");
dev_err(dev, "pm_runtime_get_sync failed\n");
goto err_pm_disable;
}
/* Failure to get a link might just be that no cards are inserted */
err = hw_init_fn(pcie);
if (err) {
dev_info(&pdev->dev, "PCIe link down\n");
dev_info(dev, "PCIe link down\n");
err = 0;
goto err_pm_put;
}
data = rcar_pci_read_reg(pcie, MACSR);
dev_info(&pdev->dev, "PCIe x%d: link up\n", (data >> 20) & 0x3f);
dev_info(dev, "PCIe x%d: link up\n", (data >> 20) & 0x3f);
if (IS_ENABLED(CONFIG_PCI_MSI)) {
err = rcar_pcie_enable_msi(pcie);
if (err < 0) {
dev_err(&pdev->dev,
dev_err(dev,
"failed to enable MSI support: %d\n",
err);
goto err_pm_put;
@ -1186,16 +1188,16 @@ static int rcar_pcie_probe(struct platform_device *pdev)
return 0;
err_pm_put:
pm_runtime_put(pcie->dev);
pm_runtime_put(dev);
err_pm_disable:
pm_runtime_disable(pcie->dev);
pm_runtime_disable(dev);
return err;
}
static struct platform_driver rcar_pcie_driver = {
.driver = {
.name = DRV_NAME,
.name = "rcar-pcie",
.of_match_table = rcar_pcie_of_match,
.suppress_bind_attrs = true,
},

Просмотреть файл

@ -972,7 +972,7 @@ static int rockchip_pcie_prog_ob_atu(struct rockchip_pcie *rockchip,
return -EINVAL;
if (region_no == 0) {
if (AXI_REGION_0_SIZE < (2ULL << num_pass_bits))
return -EINVAL;
return -EINVAL;
}
if (region_no != 0) {
if (AXI_REGION_SIZE < (2ULL << num_pass_bits))
@ -1091,8 +1091,6 @@ static int rockchip_pcie_probe(struct platform_device *pdev)
if (err)
goto err_vpcie;
platform_set_drvdata(pdev, rockchip);
rockchip_pcie_enable_interrupts(rockchip);
err = rockchip_pcie_init_irq_domain(rockchip);

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше