Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Minor conflicts in drivers/net/ethernet/mellanox/mlx5/core/en_rep.c, we had some overlapping changes: 1) In 'net' MLX5E_PARAMS_LOG_{SQ,RQ}_SIZE --> MLX5E_REP_PARAMS_LOG_{SQ,RQ}_SIZE 2) In 'net-next' params->log_rq_size is renamed to be params->log_rq_mtu_frames. 3) In 'net-next' params->hard_mtu is added. Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Коммит
c0b458a946
1
.mailmap
1
.mailmap
|
@ -62,6 +62,7 @@ Frank Zago <fzago@systemfabricworks.com>
|
|||
Greg Kroah-Hartman <greg@echidna.(none)>
|
||||
Greg Kroah-Hartman <gregkh@suse.de>
|
||||
Greg Kroah-Hartman <greg@kroah.com>
|
||||
Gregory CLEMENT <gregory.clement@bootlin.com> <gregory.clement@free-electrons.com>
|
||||
Henk Vergonet <Henk.Vergonet@gmail.com>
|
||||
Henrik Kretzschmar <henne@nachtwindheim.de>
|
||||
Henrik Rydberg <rydberg@bitmath.org>
|
||||
|
|
|
@ -3,11 +3,11 @@ Device-Tree bindings for sigma delta modulator
|
|||
Required properties:
|
||||
- compatible: should be "ads1201", "sd-modulator". "sd-modulator" can be use
|
||||
as a generic SD modulator if modulator not specified in compatible list.
|
||||
- #io-channel-cells = <1>: See the IIO bindings section "IIO consumers".
|
||||
- #io-channel-cells = <0>: See the IIO bindings section "IIO consumers".
|
||||
|
||||
Example node:
|
||||
|
||||
ads1202: adc@0 {
|
||||
compatible = "sd-modulator";
|
||||
#io-channel-cells = <1>;
|
||||
#io-channel-cells = <0>;
|
||||
};
|
||||
|
|
|
@ -18,7 +18,7 @@ corresponding hardware driver. Kernel CAPI then forwards CAPI messages in both
|
|||
directions between the application and the hardware driver.
|
||||
|
||||
Format and semantics of CAPI messages are specified in the CAPI 2.0 standard.
|
||||
This standard is freely available from http://www.capi.org.
|
||||
This standard is freely available from https://www.capi.org.
|
||||
|
||||
|
||||
2. Driver and Device Registration
|
||||
|
|
|
@ -33,10 +33,10 @@ README for the ISDN-subsystem
|
|||
de.alt.comm.isdn4linux
|
||||
|
||||
There is also a well maintained FAQ in English available at
|
||||
http://www.mhessler.de/i4lfaq/
|
||||
https://www.mhessler.de/i4lfaq/
|
||||
It can be viewed online, or downloaded in sgml/text/html format.
|
||||
The FAQ can also be viewed online at
|
||||
http://www.isdn4linux.de/faq/
|
||||
https://www.isdn4linux.de/faq/i4lfaq.html
|
||||
or downloaded from
|
||||
ftp://ftp.isdn4linux.de/pub/isdn4linux/FAQ/
|
||||
|
||||
|
|
|
@ -8,9 +8,9 @@ You find it in:
|
|||
|
||||
In case you just want to see the FAQ online, or download the newest version,
|
||||
you can have a look at my website:
|
||||
http://www.mhessler.de/i4lfaq/ (view + download)
|
||||
https://www.mhessler.de/i4lfaq/ (view + download)
|
||||
or:
|
||||
http://www.isdn4linux.de/faq/ (view)
|
||||
https://www.isdn4linux.de/faq/4lfaq.html (view)
|
||||
|
||||
As the extension tells, the FAQ is in SGML format, and you can convert it
|
||||
into text/html/... format by using the sgml2txt/sgml2html/... tools.
|
||||
|
|
|
@ -29,8 +29,9 @@ GigaSet 307x Device Driver
|
|||
T-Com Sinus 721 data
|
||||
Chicago 390 USB (KPN)
|
||||
|
||||
See also http://www.erbze.info/sinus_gigaset.htm and
|
||||
http://gigaset307x.sourceforge.net/
|
||||
See also http://www.erbze.info/sinus_gigaset.htm
|
||||
(archived at https://web.archive.org/web/20100717020421/http://www.erbze.info:80/sinus_gigaset.htm ) and
|
||||
http://gigaset307x.sourceforge.net/
|
||||
|
||||
We had also reports from users of Gigaset M105 who could use the drivers
|
||||
with SX 100 and CX 100 ISDN bases (only in unimodem mode, see section 2.5.)
|
||||
|
@ -52,7 +53,7 @@ GigaSet 307x Device Driver
|
|||
to use CAPI 2.0 or ISDN4Linux for ISDN connections (voice or data).
|
||||
|
||||
There are some user space tools available at
|
||||
http://sourceforge.net/projects/gigaset307x/
|
||||
https://sourceforge.net/projects/gigaset307x/
|
||||
which provide access to additional device specific functions like SMS,
|
||||
phonebook or call journal.
|
||||
|
||||
|
@ -202,7 +203,7 @@ GigaSet 307x Device Driver
|
|||
You can use some configuration tool of your distribution to configure this
|
||||
"modem" or configure pppd/wvdial manually. There are some example ppp
|
||||
configuration files and chat scripts in the gigaset-VERSION/ppp directory
|
||||
in the driver packages from http://sourceforge.net/projects/gigaset307x/.
|
||||
in the driver packages from https://sourceforge.net/projects/gigaset307x/.
|
||||
Please note that the USB drivers are not able to change the state of the
|
||||
control lines. This means you must use "Stupid Mode" if you are using
|
||||
wvdial or you should use the nocrtscts option of pppd.
|
||||
|
@ -361,7 +362,7 @@ GigaSet 307x Device Driver
|
|||
---------------------------
|
||||
If you can't solve problems with the driver on your own, feel free to
|
||||
use one of the forums, bug trackers, or mailing lists on
|
||||
http://sourceforge.net/projects/gigaset307x
|
||||
https://sourceforge.net/projects/gigaset307x
|
||||
or write an electronic mail to the maintainers.
|
||||
|
||||
Try to provide as much information as possible, such as
|
||||
|
@ -391,11 +392,12 @@ GigaSet 307x Device Driver
|
|||
4. Links, other software
|
||||
---------------------
|
||||
- Sourceforge project developing this driver and associated tools
|
||||
http://sourceforge.net/projects/gigaset307x
|
||||
https://sourceforge.net/projects/gigaset307x
|
||||
- Yahoo! Group on the Siemens Gigaset family of devices
|
||||
http://de.groups.yahoo.com/group/Siemens-Gigaset
|
||||
https://de.groups.yahoo.com/group/Siemens-Gigaset
|
||||
- Siemens Gigaset/T-Sinus compatibility table
|
||||
http://www.erbze.info/sinus_gigaset.htm
|
||||
(archived at https://web.archive.org/web/20100717020421/http://www.erbze.info:80/sinus_gigaset.htm )
|
||||
|
||||
|
||||
5. Credits
|
||||
|
|
39
MAINTAINERS
39
MAINTAINERS
|
@ -1060,41 +1060,42 @@ ARM PORT
|
|||
M: Russell King <linux@armlinux.org.uk>
|
||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
W: http://www.armlinux.org.uk/
|
||||
S: Maintained
|
||||
S: Odd Fixes
|
||||
T: git git://git.armlinux.org.uk/~rmk/linux-arm.git
|
||||
F: arch/arm/
|
||||
X: arch/arm/boot/dts/
|
||||
|
||||
ARM PRIMECELL AACI PL041 DRIVER
|
||||
M: Russell King <linux@armlinux.org.uk>
|
||||
S: Maintained
|
||||
S: Odd Fixes
|
||||
F: sound/arm/aaci.*
|
||||
|
||||
ARM PRIMECELL BUS SUPPORT
|
||||
M: Russell King <linux@armlinux.org.uk>
|
||||
S: Maintained
|
||||
S: Odd Fixes
|
||||
F: drivers/amba/
|
||||
F: include/linux/amba/bus.h
|
||||
|
||||
ARM PRIMECELL CLCD PL110 DRIVER
|
||||
M: Russell King <linux@armlinux.org.uk>
|
||||
S: Maintained
|
||||
S: Odd Fixes
|
||||
F: drivers/video/fbdev/amba-clcd.*
|
||||
|
||||
ARM PRIMECELL KMI PL050 DRIVER
|
||||
M: Russell King <linux@armlinux.org.uk>
|
||||
S: Maintained
|
||||
S: Odd Fixes
|
||||
F: drivers/input/serio/ambakmi.*
|
||||
F: include/linux/amba/kmi.h
|
||||
|
||||
ARM PRIMECELL MMCI PL180/1 DRIVER
|
||||
M: Russell King <linux@armlinux.org.uk>
|
||||
S: Maintained
|
||||
S: Odd Fixes
|
||||
F: drivers/mmc/host/mmci.*
|
||||
F: include/linux/amba/mmci.h
|
||||
|
||||
ARM PRIMECELL UART PL010 AND PL011 DRIVERS
|
||||
M: Russell King <linux@armlinux.org.uk>
|
||||
S: Maintained
|
||||
S: Odd Fixes
|
||||
F: drivers/tty/serial/amba-pl01*.c
|
||||
F: include/linux/amba/serial.h
|
||||
|
||||
|
@ -1152,7 +1153,7 @@ S: Maintained
|
|||
F: drivers/clk/sunxi/
|
||||
|
||||
ARM/Allwinner sunXi SoC support
|
||||
M: Maxime Ripard <maxime.ripard@free-electrons.com>
|
||||
M: Maxime Ripard <maxime.ripard@bootlin.com>
|
||||
M: Chen-Yu Tsai <wens@csie.org>
|
||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
S: Maintained
|
||||
|
@ -4626,7 +4627,7 @@ F: include/uapi/drm/drm*
|
|||
F: include/linux/vga*
|
||||
|
||||
DRM DRIVERS FOR ALLWINNER A10
|
||||
M: Maxime Ripard <maxime.ripard@free-electrons.com>
|
||||
M: Maxime Ripard <maxime.ripard@bootlin.com>
|
||||
L: dri-devel@lists.freedesktop.org
|
||||
S: Supported
|
||||
F: drivers/gpu/drm/sun4i/
|
||||
|
@ -8435,7 +8436,7 @@ S: Orphan
|
|||
F: drivers/net/wireless/marvell/libertas/
|
||||
|
||||
MARVELL MACCHIATOBIN SUPPORT
|
||||
M: Russell King <rmk@armlinux.org.uk>
|
||||
M: Russell King <linux@armlinux.org.uk>
|
||||
L: linux-arm-kernel@lists.infradead.org
|
||||
S: Maintained
|
||||
F: arch/arm64/boot/dts/marvell/armada-8040-mcbin.dts
|
||||
|
@ -8448,7 +8449,7 @@ F: drivers/net/ethernet/marvell/mv643xx_eth.*
|
|||
F: include/linux/mv643xx.h
|
||||
|
||||
MARVELL MV88X3310 PHY DRIVER
|
||||
M: Russell King <rmk@armlinux.org.uk>
|
||||
M: Russell King <linux@armlinux.org.uk>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/net/phy/marvell10g.c
|
||||
|
@ -12892,6 +12893,19 @@ S: Maintained
|
|||
F: drivers/net/ethernet/socionext/netsec.c
|
||||
F: Documentation/devicetree/bindings/net/socionext-netsec.txt
|
||||
|
||||
SOLIDRUN CLEARFOG SUPPORT
|
||||
M: Russell King <linux@armlinux.org.uk>
|
||||
S: Maintained
|
||||
F: arch/arm/boot/dts/armada-388-clearfog*
|
||||
F: arch/arm/boot/dts/armada-38x-solidrun-*
|
||||
|
||||
SOLIDRUN CUBOX-I/HUMMINGBOARD SUPPORT
|
||||
M: Russell King <linux@armlinux.org.uk>
|
||||
S: Maintained
|
||||
F: arch/arm/boot/dts/imx6*-cubox-i*
|
||||
F: arch/arm/boot/dts/imx6*-hummingboard*
|
||||
F: arch/arm/boot/dts/imx6*-sr-*
|
||||
|
||||
SONIC NETWORK DRIVER
|
||||
M: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
|
||||
L: netdev@vger.kernel.org
|
||||
|
@ -13661,7 +13675,8 @@ S: Supported
|
|||
F: drivers/i2c/busses/i2c-tegra.c
|
||||
|
||||
TEGRA IOMMU DRIVERS
|
||||
M: Hiroshi Doyu <hdoyu@nvidia.com>
|
||||
M: Thierry Reding <thierry.reding@gmail.com>
|
||||
L: linux-tegra@vger.kernel.org
|
||||
S: Supported
|
||||
F: drivers/iommu/tegra*
|
||||
|
||||
|
|
6
Makefile
6
Makefile
|
@ -2,7 +2,7 @@
|
|||
VERSION = 4
|
||||
PATCHLEVEL = 16
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc6
|
||||
EXTRAVERSION = -rc7
|
||||
NAME = Fearless Coyote
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
@ -487,6 +487,8 @@ CLANG_GCC_TC := --gcc-toolchain=$(GCC_TOOLCHAIN)
|
|||
endif
|
||||
KBUILD_CFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC)
|
||||
KBUILD_AFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC)
|
||||
KBUILD_CFLAGS += $(call cc-option, -no-integrated-as)
|
||||
KBUILD_AFLAGS += $(call cc-option, -no-integrated-as)
|
||||
endif
|
||||
|
||||
RETPOLINE_CFLAGS_GCC := -mindirect-branch=thunk-extern -mindirect-branch-register
|
||||
|
@ -743,8 +745,6 @@ KBUILD_CFLAGS += $(call cc-disable-warning, tautological-compare)
|
|||
# See modpost pattern 2
|
||||
KBUILD_CFLAGS += $(call cc-option, -mno-global-merge,)
|
||||
KBUILD_CFLAGS += $(call cc-option, -fcatch-undefined-behavior)
|
||||
KBUILD_CFLAGS += $(call cc-option, -no-integrated-as)
|
||||
KBUILD_AFLAGS += $(call cc-option, -no-integrated-as)
|
||||
else
|
||||
|
||||
# These warnings generated too much noise in a regular build.
|
||||
|
|
|
@ -22,6 +22,7 @@ config ARM_PTDUMP_DEBUGFS
|
|||
|
||||
config DEBUG_WX
|
||||
bool "Warn on W+X mappings at boot"
|
||||
depends on MMU
|
||||
select ARM_PTDUMP_CORE
|
||||
---help---
|
||||
Generate a warning if any W+X mappings are found at boot.
|
||||
|
|
|
@ -30,7 +30,7 @@ esac
|
|||
|
||||
sym_val() {
|
||||
# extract hex value for symbol in $1
|
||||
local val=$($NM "$VMLINUX" | sed -n "/ $1$/{s/ .*$//p;q}")
|
||||
local val=$($NM "$VMLINUX" 2>/dev/null | sed -n "/ $1\$/{s/ .*$//p;q}")
|
||||
[ "$val" ] || { echo "can't find $1 in $VMLINUX" 1>&2; exit 1; }
|
||||
# convert from hex to decimal
|
||||
echo $((0x$val))
|
||||
|
@ -48,12 +48,12 @@ data_end=$(($_edata_loc - $base_offset))
|
|||
file_end=$(stat -c "%s" "$XIPIMAGE")
|
||||
if [ "$file_end" != "$data_end" ]; then
|
||||
printf "end of xipImage doesn't match with _edata_loc (%#x vs %#x)\n" \
|
||||
$(($file_end + $base_offset)) $_edata_loc 2>&1
|
||||
$(($file_end + $base_offset)) $_edata_loc 1>&2
|
||||
exit 1;
|
||||
fi
|
||||
|
||||
# be ready to clean up
|
||||
trap 'rm -f "$XIPIMAGE.tmp"' 0 1 2 3
|
||||
trap 'rm -f "$XIPIMAGE.tmp"; exit 1' 1 2 3
|
||||
|
||||
# substitute the data section by a compressed version
|
||||
$DD if="$XIPIMAGE" count=$data_start iflag=count_bytes of="$XIPIMAGE.tmp"
|
||||
|
|
|
@ -42,6 +42,11 @@
|
|||
};
|
||||
};
|
||||
|
||||
memory@40000000 {
|
||||
device_type = "memory";
|
||||
reg = <0x40000000 0>;
|
||||
};
|
||||
|
||||
ahb {
|
||||
compatible = "simple-bus";
|
||||
#address-cells = <1>;
|
||||
|
|
|
@ -42,6 +42,11 @@
|
|||
};
|
||||
};
|
||||
|
||||
memory@80000000 {
|
||||
device_type = "memory";
|
||||
reg = <0x80000000 0>;
|
||||
};
|
||||
|
||||
ahb {
|
||||
compatible = "simple-bus";
|
||||
#address-cells = <1>;
|
||||
|
|
|
@ -82,7 +82,7 @@
|
|||
enable-active-high;
|
||||
};
|
||||
|
||||
reg_usb_otg2_vbus: regulator-usb-otg1-vbus {
|
||||
reg_usb_otg2_vbus: regulator-usb-otg2-vbus {
|
||||
compatible = "regulator-fixed";
|
||||
regulator-name = "usb_otg2_vbus";
|
||||
regulator-min-microvolt = <5000000>;
|
||||
|
|
|
@ -927,6 +927,7 @@
|
|||
i2s: i2s@ff890000 {
|
||||
compatible = "rockchip,rk3288-i2s", "rockchip,rk3066-i2s";
|
||||
reg = <0x0 0xff890000 0x0 0x10000>;
|
||||
#sound-dai-cells = <0>;
|
||||
interrupts = <GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH>;
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
|
@ -1176,6 +1177,7 @@
|
|||
compatible = "rockchip,rk3288-dw-hdmi";
|
||||
reg = <0x0 0xff980000 0x0 0x20000>;
|
||||
reg-io-width = <4>;
|
||||
#sound-dai-cells = <0>;
|
||||
rockchip,grf = <&grf>;
|
||||
interrupts = <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>;
|
||||
clocks = <&cru PCLK_HDMI_CTRL>, <&cru SCLK_HDMI_HDCP>, <&cru SCLK_HDMI_CEC>;
|
||||
|
|
|
@ -42,7 +42,6 @@
|
|||
|
||||
/dts-v1/;
|
||||
#include "sun6i-a31s.dtsi"
|
||||
#include "sunxi-common-regulators.dtsi"
|
||||
#include <dt-bindings/gpio/gpio.h>
|
||||
|
||||
/ {
|
||||
|
@ -99,6 +98,7 @@
|
|||
pinctrl-0 = <&gmac_pins_rgmii_a>, <&gmac_phy_reset_pin_bpi_m2>;
|
||||
phy = <&phy1>;
|
||||
phy-mode = "rgmii";
|
||||
phy-supply = <®_dldo1>;
|
||||
snps,reset-gpio = <&pio 0 21 GPIO_ACTIVE_HIGH>; /* PA21 */
|
||||
snps,reset-active-low;
|
||||
snps,reset-delays-us = <0 10000 30000>;
|
||||
|
@ -118,7 +118,7 @@
|
|||
&mmc0 {
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&mmc0_pins_a>, <&mmc0_cd_pin_bpi_m2>;
|
||||
vmmc-supply = <®_vcc3v0>;
|
||||
vmmc-supply = <®_dcdc1>;
|
||||
bus-width = <4>;
|
||||
cd-gpios = <&pio 0 4 GPIO_ACTIVE_HIGH>; /* PA4 */
|
||||
cd-inverted;
|
||||
|
@ -132,7 +132,7 @@
|
|||
&mmc2 {
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&mmc2_pins_a>;
|
||||
vmmc-supply = <®_vcc3v0>;
|
||||
vmmc-supply = <®_aldo1>;
|
||||
mmc-pwrseq = <&mmc2_pwrseq>;
|
||||
bus-width = <4>;
|
||||
non-removable;
|
||||
|
@ -163,6 +163,8 @@
|
|||
reg = <0x68>;
|
||||
interrupt-parent = <&nmi_intc>;
|
||||
interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
|
||||
eldoin-supply = <®_dcdc1>;
|
||||
x-powers,drive-vbus-en;
|
||||
};
|
||||
};
|
||||
|
||||
|
@ -193,7 +195,28 @@
|
|||
|
||||
#include "axp22x.dtsi"
|
||||
|
||||
®_aldo1 {
|
||||
regulator-min-microvolt = <3300000>;
|
||||
regulator-max-microvolt = <3300000>;
|
||||
regulator-name = "vcc-wifi";
|
||||
};
|
||||
|
||||
®_aldo2 {
|
||||
regulator-always-on;
|
||||
regulator-min-microvolt = <2500000>;
|
||||
regulator-max-microvolt = <2500000>;
|
||||
regulator-name = "vcc-gmac";
|
||||
};
|
||||
|
||||
®_aldo3 {
|
||||
regulator-always-on;
|
||||
regulator-min-microvolt = <3000000>;
|
||||
regulator-max-microvolt = <3000000>;
|
||||
regulator-name = "avcc";
|
||||
};
|
||||
|
||||
®_dc5ldo {
|
||||
regulator-always-on;
|
||||
regulator-min-microvolt = <700000>;
|
||||
regulator-max-microvolt = <1320000>;
|
||||
regulator-name = "vdd-cpus";
|
||||
|
@ -233,6 +256,40 @@
|
|||
regulator-name = "vcc-dram";
|
||||
};
|
||||
|
||||
®_dldo1 {
|
||||
regulator-min-microvolt = <3000000>;
|
||||
regulator-max-microvolt = <3000000>;
|
||||
regulator-name = "vcc-mac";
|
||||
};
|
||||
|
||||
®_dldo2 {
|
||||
regulator-min-microvolt = <2800000>;
|
||||
regulator-max-microvolt = <2800000>;
|
||||
regulator-name = "avdd-csi";
|
||||
};
|
||||
|
||||
®_dldo3 {
|
||||
regulator-always-on;
|
||||
regulator-min-microvolt = <3300000>;
|
||||
regulator-max-microvolt = <3300000>;
|
||||
regulator-name = "vcc-pb";
|
||||
};
|
||||
|
||||
®_eldo1 {
|
||||
regulator-min-microvolt = <1800000>;
|
||||
regulator-max-microvolt = <1800000>;
|
||||
regulator-name = "vdd-csi";
|
||||
status = "okay";
|
||||
};
|
||||
|
||||
®_ldo_io1 {
|
||||
regulator-always-on;
|
||||
regulator-min-microvolt = <1800000>;
|
||||
regulator-max-microvolt = <1800000>;
|
||||
regulator-name = "vcc-pm-cpus";
|
||||
status = "okay";
|
||||
};
|
||||
|
||||
&uart0 {
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&uart0_pins_a>;
|
||||
|
|
|
@ -12,8 +12,6 @@ struct mm_struct;
|
|||
|
||||
void arm_install_vdso(struct mm_struct *mm, unsigned long addr);
|
||||
|
||||
extern char vdso_start, vdso_end;
|
||||
|
||||
extern unsigned int vdso_total_pages;
|
||||
|
||||
#else /* CONFIG_VDSO */
|
||||
|
|
|
@ -39,6 +39,8 @@
|
|||
|
||||
static struct page **vdso_text_pagelist;
|
||||
|
||||
extern char vdso_start[], vdso_end[];
|
||||
|
||||
/* Total number of pages needed for the data and text portions of the VDSO. */
|
||||
unsigned int vdso_total_pages __ro_after_init;
|
||||
|
||||
|
@ -197,13 +199,13 @@ static int __init vdso_init(void)
|
|||
unsigned int text_pages;
|
||||
int i;
|
||||
|
||||
if (memcmp(&vdso_start, "\177ELF", 4)) {
|
||||
if (memcmp(vdso_start, "\177ELF", 4)) {
|
||||
pr_err("VDSO is not a valid ELF object!\n");
|
||||
return -ENOEXEC;
|
||||
}
|
||||
|
||||
text_pages = (&vdso_end - &vdso_start) >> PAGE_SHIFT;
|
||||
pr_debug("vdso: %i text pages at base %p\n", text_pages, &vdso_start);
|
||||
text_pages = (vdso_end - vdso_start) >> PAGE_SHIFT;
|
||||
pr_debug("vdso: %i text pages at base %p\n", text_pages, vdso_start);
|
||||
|
||||
/* Allocate the VDSO text pagelist */
|
||||
vdso_text_pagelist = kcalloc(text_pages, sizeof(struct page *),
|
||||
|
@ -218,7 +220,7 @@ static int __init vdso_init(void)
|
|||
for (i = 0; i < text_pages; i++) {
|
||||
struct page *page;
|
||||
|
||||
page = virt_to_page(&vdso_start + i * PAGE_SIZE);
|
||||
page = virt_to_page(vdso_start + i * PAGE_SIZE);
|
||||
vdso_text_pagelist[i] = page;
|
||||
}
|
||||
|
||||
|
@ -229,7 +231,7 @@ static int __init vdso_init(void)
|
|||
|
||||
cntvct_ok = cntvct_functional();
|
||||
|
||||
patch_vdso(&vdso_start);
|
||||
patch_vdso(vdso_start);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -127,8 +127,8 @@ static struct gpiod_lookup_table mmc_gpios_table = {
|
|||
.dev_id = "da830-mmc.0",
|
||||
.table = {
|
||||
/* CD: gpio3_12: gpio60: chip 1 contains gpio range 32-63*/
|
||||
GPIO_LOOKUP("davinci_gpio.1", 28, "cd", GPIO_ACTIVE_LOW),
|
||||
GPIO_LOOKUP("davinci_gpio.1", 29, "wp", GPIO_ACTIVE_LOW),
|
||||
GPIO_LOOKUP("davinci_gpio.0", 28, "cd", GPIO_ACTIVE_LOW),
|
||||
GPIO_LOOKUP("davinci_gpio.0", 29, "wp", GPIO_ACTIVE_LOW),
|
||||
},
|
||||
};
|
||||
|
||||
|
|
|
@ -133,6 +133,9 @@ static void __init u8500_init_machine(void)
|
|||
if (of_machine_is_compatible("st-ericsson,u8540"))
|
||||
of_platform_populate(NULL, u8500_local_bus_nodes,
|
||||
u8540_auxdata_lookup, NULL);
|
||||
else
|
||||
of_platform_populate(NULL, u8500_local_bus_nodes,
|
||||
NULL, NULL);
|
||||
}
|
||||
|
||||
static const char * stericsson_dt_platform_compat[] = {
|
||||
|
|
|
@ -888,11 +888,8 @@ static int omap_dm_timer_probe(struct platform_device *pdev)
|
|||
timer->irq = irq->start;
|
||||
timer->pdev = pdev;
|
||||
|
||||
/* Skip pm_runtime_enable for OMAP1 */
|
||||
if (!(timer->capability & OMAP_TIMER_NEEDS_RESET)) {
|
||||
pm_runtime_enable(dev);
|
||||
pm_runtime_irq_safe(dev);
|
||||
}
|
||||
pm_runtime_enable(dev);
|
||||
pm_runtime_irq_safe(dev);
|
||||
|
||||
if (!timer->reserved) {
|
||||
ret = pm_runtime_get_sync(dev);
|
||||
|
|
|
@ -5,13 +5,4 @@ void omap_map_sram(unsigned long start, unsigned long size,
|
|||
unsigned long skip, int cached);
|
||||
void omap_sram_reset(void);
|
||||
|
||||
extern void *omap_sram_push_address(unsigned long size);
|
||||
|
||||
/* Macro to push a function to the internal SRAM, using the fncpy API */
|
||||
#define omap_sram_push(funcp, size) ({ \
|
||||
typeof(&(funcp)) _res = NULL; \
|
||||
void *_sram_address = omap_sram_push_address(size); \
|
||||
if (_sram_address) \
|
||||
_res = fncpy(_sram_address, &(funcp), size); \
|
||||
_res; \
|
||||
})
|
||||
extern void *omap_sram_push(void *funcp, unsigned long size);
|
||||
|
|
|
@ -23,6 +23,7 @@
|
|||
#include <asm/fncpy.h>
|
||||
#include <asm/tlb.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/set_memory.h>
|
||||
|
||||
#include <asm/mach/map.h>
|
||||
|
||||
|
@ -42,7 +43,7 @@ static void __iomem *omap_sram_ceil;
|
|||
* Note that fncpy requires the returned address to be aligned
|
||||
* to an 8-byte boundary.
|
||||
*/
|
||||
void *omap_sram_push_address(unsigned long size)
|
||||
static void *omap_sram_push_address(unsigned long size)
|
||||
{
|
||||
unsigned long available, new_ceil = (unsigned long)omap_sram_ceil;
|
||||
|
||||
|
@ -60,6 +61,30 @@ void *omap_sram_push_address(unsigned long size)
|
|||
return (void *)omap_sram_ceil;
|
||||
}
|
||||
|
||||
void *omap_sram_push(void *funcp, unsigned long size)
|
||||
{
|
||||
void *sram;
|
||||
unsigned long base;
|
||||
int pages;
|
||||
void *dst = NULL;
|
||||
|
||||
sram = omap_sram_push_address(size);
|
||||
if (!sram)
|
||||
return NULL;
|
||||
|
||||
base = (unsigned long)sram & PAGE_MASK;
|
||||
pages = PAGE_ALIGN(size) / PAGE_SIZE;
|
||||
|
||||
set_memory_rw(base, pages);
|
||||
|
||||
dst = fncpy(sram, funcp, size);
|
||||
|
||||
set_memory_ro(base, pages);
|
||||
set_memory_x(base, pages);
|
||||
|
||||
return dst;
|
||||
}
|
||||
|
||||
/*
|
||||
* The SRAM context is lost during off-idle and stack
|
||||
* needs to be reset.
|
||||
|
@ -75,6 +100,9 @@ void omap_sram_reset(void)
|
|||
void __init omap_map_sram(unsigned long start, unsigned long size,
|
||||
unsigned long skip, int cached)
|
||||
{
|
||||
unsigned long base;
|
||||
int pages;
|
||||
|
||||
if (size == 0)
|
||||
return;
|
||||
|
||||
|
@ -95,4 +123,10 @@ void __init omap_map_sram(unsigned long start, unsigned long size,
|
|||
*/
|
||||
memset_io(omap_sram_base + omap_sram_skip, 0,
|
||||
omap_sram_size - omap_sram_skip);
|
||||
|
||||
base = (unsigned long)omap_sram_base;
|
||||
pages = PAGE_ALIGN(omap_sram_size) / PAGE_SIZE;
|
||||
|
||||
set_memory_ro(base, pages);
|
||||
set_memory_x(base, pages);
|
||||
}
|
||||
|
|
|
@ -648,7 +648,7 @@ int vfp_restore_user_hwstate(struct user_vfp __user *ufp,
|
|||
*/
|
||||
static int vfp_dying_cpu(unsigned int cpu)
|
||||
{
|
||||
vfp_force_reload(cpu, current_thread_info());
|
||||
vfp_current_hw_state[cpu] = NULL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -406,8 +406,9 @@
|
|||
wlan_pd_n: wlan-pd-n {
|
||||
compatible = "regulator-fixed";
|
||||
regulator-name = "wlan_pd_n";
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&wlan_module_reset_l>;
|
||||
|
||||
/* Note the wlan_module_reset_l pinctrl */
|
||||
enable-active-high;
|
||||
gpio = <&gpio1 11 GPIO_ACTIVE_HIGH>;
|
||||
|
||||
|
@ -983,12 +984,6 @@ ap_i2c_audio: &i2c8 {
|
|||
pinctrl-0 = <
|
||||
&ap_pwroff /* AP will auto-assert this when in S3 */
|
||||
&clk_32k /* This pin is always 32k on gru boards */
|
||||
|
||||
/*
|
||||
* We want this driven low ASAP; firmware should help us, but
|
||||
* we can help ourselves too.
|
||||
*/
|
||||
&wlan_module_reset_l
|
||||
>;
|
||||
|
||||
pcfg_output_low: pcfg-output-low {
|
||||
|
@ -1168,12 +1163,7 @@ ap_i2c_audio: &i2c8 {
|
|||
};
|
||||
|
||||
wlan_module_reset_l: wlan-module-reset-l {
|
||||
/*
|
||||
* We want this driven low ASAP (As {Soon,Strongly} As
|
||||
* Possible), to avoid leakage through the powered-down
|
||||
* WiFi.
|
||||
*/
|
||||
rockchip,pins = <1 11 RK_FUNC_GPIO &pcfg_output_low>;
|
||||
rockchip,pins = <1 11 RK_FUNC_GPIO &pcfg_pull_none>;
|
||||
};
|
||||
|
||||
bt_host_wake_l: bt-host-wake-l {
|
||||
|
|
|
@ -411,8 +411,8 @@
|
|||
reg = <0x0 0xfe800000 0x0 0x100000>;
|
||||
interrupts = <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH 0>;
|
||||
dr_mode = "otg";
|
||||
phys = <&u2phy0_otg>, <&tcphy0_usb3>;
|
||||
phy-names = "usb2-phy", "usb3-phy";
|
||||
phys = <&u2phy0_otg>;
|
||||
phy-names = "usb2-phy";
|
||||
phy_type = "utmi_wide";
|
||||
snps,dis_enblslpm_quirk;
|
||||
snps,dis-u2-freeclk-exists-quirk;
|
||||
|
@ -444,8 +444,8 @@
|
|||
reg = <0x0 0xfe900000 0x0 0x100000>;
|
||||
interrupts = <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH 0>;
|
||||
dr_mode = "otg";
|
||||
phys = <&u2phy1_otg>, <&tcphy1_usb3>;
|
||||
phy-names = "usb2-phy", "usb3-phy";
|
||||
phys = <&u2phy1_otg>;
|
||||
phy-names = "usb2-phy";
|
||||
phy_type = "utmi_wide";
|
||||
snps,dis_enblslpm_quirk;
|
||||
snps,dis-u2-freeclk-exists-quirk;
|
||||
|
|
|
@ -13,6 +13,8 @@ choice
|
|||
config SOC_AMAZON_SE
|
||||
bool "Amazon SE"
|
||||
select SOC_TYPE_XWAY
|
||||
select MFD_SYSCON
|
||||
select MFD_CORE
|
||||
|
||||
config SOC_XWAY
|
||||
bool "XWAY"
|
||||
|
|
|
@ -549,9 +549,9 @@ void __init ltq_soc_init(void)
|
|||
clkdev_add_static(ltq_ar9_cpu_hz(), ltq_ar9_fpi_hz(),
|
||||
ltq_ar9_fpi_hz(), CLOCK_250M);
|
||||
clkdev_add_pmu("1f203018.usb2-phy", "phy", 1, 0, PMU_USB0_P);
|
||||
clkdev_add_pmu("1e101000.usb", "otg", 1, 0, PMU_USB0);
|
||||
clkdev_add_pmu("1e101000.usb", "otg", 1, 0, PMU_USB0 | PMU_AHBM);
|
||||
clkdev_add_pmu("1f203034.usb2-phy", "phy", 1, 0, PMU_USB1_P);
|
||||
clkdev_add_pmu("1e106000.usb", "otg", 1, 0, PMU_USB1);
|
||||
clkdev_add_pmu("1e106000.usb", "otg", 1, 0, PMU_USB1 | PMU_AHBM);
|
||||
clkdev_add_pmu("1e180000.etop", "switch", 1, 0, PMU_SWITCH);
|
||||
clkdev_add_pmu("1e103000.sdio", NULL, 1, 0, PMU_SDIO);
|
||||
clkdev_add_pmu("1e103100.deu", NULL, 1, 0, PMU_DEU);
|
||||
|
@ -560,7 +560,7 @@ void __init ltq_soc_init(void)
|
|||
} else {
|
||||
clkdev_add_static(ltq_danube_cpu_hz(), ltq_danube_fpi_hz(),
|
||||
ltq_danube_fpi_hz(), ltq_danube_pp32_hz());
|
||||
clkdev_add_pmu("1f203018.usb2-phy", "ctrl", 1, 0, PMU_USB0);
|
||||
clkdev_add_pmu("1e101000.usb", "otg", 1, 0, PMU_USB0 | PMU_AHBM);
|
||||
clkdev_add_pmu("1f203018.usb2-phy", "phy", 1, 0, PMU_USB0_P);
|
||||
clkdev_add_pmu("1e103000.sdio", NULL, 1, 0, PMU_SDIO);
|
||||
clkdev_add_pmu("1e103100.deu", NULL, 1, 0, PMU_DEU);
|
||||
|
|
|
@ -170,6 +170,28 @@ void prom_soc_init(struct ralink_soc_info *soc_info)
|
|||
u32 n1;
|
||||
u32 rev;
|
||||
|
||||
/* Early detection of CMP support */
|
||||
mips_cm_probe();
|
||||
mips_cpc_probe();
|
||||
|
||||
if (mips_cps_numiocu(0)) {
|
||||
/*
|
||||
* mips_cm_probe() wipes out bootloader
|
||||
* config for CM regions and we have to configure them
|
||||
* again. This SoC cannot talk to pamlbus devices
|
||||
* witout proper iocu region set up.
|
||||
*
|
||||
* FIXME: it would be better to do this with values
|
||||
* from DT, but we need this very early because
|
||||
* without this we cannot talk to pretty much anything
|
||||
* including serial.
|
||||
*/
|
||||
write_gcr_reg0_base(MT7621_PALMBUS_BASE);
|
||||
write_gcr_reg0_mask(~MT7621_PALMBUS_SIZE |
|
||||
CM_GCR_REGn_MASK_CMTGT_IOCU0);
|
||||
__sync();
|
||||
}
|
||||
|
||||
n0 = __raw_readl(sysc + SYSC_REG_CHIP_NAME0);
|
||||
n1 = __raw_readl(sysc + SYSC_REG_CHIP_NAME1);
|
||||
|
||||
|
@ -194,26 +216,6 @@ void prom_soc_init(struct ralink_soc_info *soc_info)
|
|||
|
||||
rt2880_pinmux_data = mt7621_pinmux_data;
|
||||
|
||||
/* Early detection of CMP support */
|
||||
mips_cm_probe();
|
||||
mips_cpc_probe();
|
||||
|
||||
if (mips_cps_numiocu(0)) {
|
||||
/*
|
||||
* mips_cm_probe() wipes out bootloader
|
||||
* config for CM regions and we have to configure them
|
||||
* again. This SoC cannot talk to pamlbus devices
|
||||
* witout proper iocu region set up.
|
||||
*
|
||||
* FIXME: it would be better to do this with values
|
||||
* from DT, but we need this very early because
|
||||
* without this we cannot talk to pretty much anything
|
||||
* including serial.
|
||||
*/
|
||||
write_gcr_reg0_base(MT7621_PALMBUS_BASE);
|
||||
write_gcr_reg0_mask(~MT7621_PALMBUS_SIZE |
|
||||
CM_GCR_REGn_MASK_CMTGT_IOCU0);
|
||||
}
|
||||
|
||||
if (!register_cps_smp_ops())
|
||||
return;
|
||||
|
|
|
@ -96,16 +96,9 @@ static void ralink_restart(char *command)
|
|||
unreachable();
|
||||
}
|
||||
|
||||
static void ralink_halt(void)
|
||||
{
|
||||
local_irq_disable();
|
||||
unreachable();
|
||||
}
|
||||
|
||||
static int __init mips_reboot_setup(void)
|
||||
{
|
||||
_machine_restart = ralink_restart;
|
||||
_machine_halt = ralink_halt;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -87,6 +87,9 @@ typedef struct {
|
|||
/* Number of bits in the mm_cpumask */
|
||||
atomic_t active_cpus;
|
||||
|
||||
/* Number of users of the external (Nest) MMU */
|
||||
atomic_t copros;
|
||||
|
||||
/* NPU NMMU context */
|
||||
struct npu_context *npu_context;
|
||||
|
||||
|
|
|
@ -47,9 +47,6 @@ extern void radix__flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmad
|
|||
#endif
|
||||
extern void radix__flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr);
|
||||
extern void radix__flush_tlb_collapsed_pmd(struct mm_struct *mm, unsigned long addr);
|
||||
extern void radix__flush_tlb_lpid_va(unsigned long lpid, unsigned long gpa,
|
||||
unsigned long page_size);
|
||||
extern void radix__flush_tlb_lpid(unsigned long lpid);
|
||||
extern void radix__flush_tlb_all(void);
|
||||
extern void radix__flush_tlb_pte_p9_dd1(unsigned long old_pte, struct mm_struct *mm,
|
||||
unsigned long address);
|
||||
|
|
|
@ -203,6 +203,7 @@ static inline void cpu_feature_keys_init(void) { }
|
|||
#define CPU_FTR_DAWR LONG_ASM_CONST(0x0400000000000000)
|
||||
#define CPU_FTR_DABRX LONG_ASM_CONST(0x0800000000000000)
|
||||
#define CPU_FTR_PMAO_BUG LONG_ASM_CONST(0x1000000000000000)
|
||||
#define CPU_FTR_P9_TLBIE_BUG LONG_ASM_CONST(0x2000000000000000)
|
||||
#define CPU_FTR_POWER9_DD1 LONG_ASM_CONST(0x4000000000000000)
|
||||
#define CPU_FTR_POWER9_DD2_1 LONG_ASM_CONST(0x8000000000000000)
|
||||
|
||||
|
@ -465,7 +466,7 @@ static inline void cpu_feature_keys_init(void) { }
|
|||
CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY | \
|
||||
CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_DAWR | \
|
||||
CPU_FTR_ARCH_207S | CPU_FTR_TM_COMP | CPU_FTR_ARCH_300 | \
|
||||
CPU_FTR_PKEY)
|
||||
CPU_FTR_PKEY | CPU_FTR_P9_TLBIE_BUG)
|
||||
#define CPU_FTRS_POWER9_DD1 ((CPU_FTRS_POWER9 | CPU_FTR_POWER9_DD1) & \
|
||||
(~CPU_FTR_SAO))
|
||||
#define CPU_FTRS_POWER9_DD2_0 CPU_FTRS_POWER9
|
||||
|
|
|
@ -92,15 +92,23 @@ static inline void dec_mm_active_cpus(struct mm_struct *mm)
|
|||
static inline void mm_context_add_copro(struct mm_struct *mm)
|
||||
{
|
||||
/*
|
||||
* On hash, should only be called once over the lifetime of
|
||||
* the context, as we can't decrement the active cpus count
|
||||
* and flush properly for the time being.
|
||||
* If any copro is in use, increment the active CPU count
|
||||
* in order to force TLB invalidations to be global as to
|
||||
* propagate to the Nest MMU.
|
||||
*/
|
||||
inc_mm_active_cpus(mm);
|
||||
if (atomic_inc_return(&mm->context.copros) == 1)
|
||||
inc_mm_active_cpus(mm);
|
||||
}
|
||||
|
||||
static inline void mm_context_remove_copro(struct mm_struct *mm)
|
||||
{
|
||||
int c;
|
||||
|
||||
c = atomic_dec_if_positive(&mm->context.copros);
|
||||
|
||||
/* Detect imbalance between add and remove */
|
||||
WARN_ON(c < 0);
|
||||
|
||||
/*
|
||||
* Need to broadcast a global flush of the full mm before
|
||||
* decrementing active_cpus count, as the next TLBI may be
|
||||
|
@ -111,7 +119,7 @@ static inline void mm_context_remove_copro(struct mm_struct *mm)
|
|||
* for the time being. Invalidations will remain global if
|
||||
* used on hash.
|
||||
*/
|
||||
if (radix_enabled()) {
|
||||
if (c == 0 && radix_enabled()) {
|
||||
flush_all_mm(mm);
|
||||
dec_mm_active_cpus(mm);
|
||||
}
|
||||
|
|
|
@ -709,6 +709,9 @@ static __init void cpufeatures_cpu_quirks(void)
|
|||
cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD1;
|
||||
else if ((version & 0xffffefff) == 0x004e0201)
|
||||
cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD2_1;
|
||||
|
||||
if ((version & 0xffff0000) == 0x004e0000)
|
||||
cur_cpu_spec->cpu_features |= CPU_FTR_P9_TLBIE_BUG;
|
||||
}
|
||||
|
||||
static void __init cpufeatures_setup_finished(void)
|
||||
|
@ -720,6 +723,9 @@ static void __init cpufeatures_setup_finished(void)
|
|||
cur_cpu_spec->cpu_features |= CPU_FTR_HVMODE;
|
||||
}
|
||||
|
||||
/* Make sure powerpc_base_platform is non-NULL */
|
||||
powerpc_base_platform = cur_cpu_spec->platform;
|
||||
|
||||
system_registers.lpcr = mfspr(SPRN_LPCR);
|
||||
system_registers.hfscr = mfspr(SPRN_HFSCR);
|
||||
system_registers.fscr = mfspr(SPRN_FSCR);
|
||||
|
|
|
@ -706,7 +706,7 @@ EXC_COMMON_BEGIN(bad_addr_slb)
|
|||
ld r3, PACA_EXSLB+EX_DAR(r13)
|
||||
std r3, _DAR(r1)
|
||||
beq cr6, 2f
|
||||
li r10, 0x480 /* fix trap number for I-SLB miss */
|
||||
li r10, 0x481 /* fix trap number for I-SLB miss */
|
||||
std r10, _TRAP(r1)
|
||||
2: bl save_nvgprs
|
||||
addi r3, r1, STACK_FRAME_OVERHEAD
|
||||
|
|
|
@ -476,6 +476,14 @@ void force_external_irq_replay(void)
|
|||
*/
|
||||
WARN_ON(!arch_irqs_disabled());
|
||||
|
||||
/*
|
||||
* Interrupts must always be hard disabled before irq_happened is
|
||||
* modified (to prevent lost update in case of interrupt between
|
||||
* load and store).
|
||||
*/
|
||||
__hard_irq_disable();
|
||||
local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
|
||||
|
||||
/* Indicate in the PACA that we have an interrupt to replay */
|
||||
local_paca->irq_happened |= PACA_IRQ_EE;
|
||||
}
|
||||
|
|
|
@ -157,6 +157,9 @@ static void kvmppc_radix_tlbie_page(struct kvm *kvm, unsigned long addr,
|
|||
asm volatile("ptesync": : :"memory");
|
||||
asm volatile(PPC_TLBIE_5(%0, %1, 0, 0, 1)
|
||||
: : "r" (addr), "r" (kvm->arch.lpid) : "memory");
|
||||
if (cpu_has_feature(CPU_FTR_P9_TLBIE_BUG))
|
||||
asm volatile(PPC_TLBIE_5(%0, %1, 0, 0, 1)
|
||||
: : "r" (addr), "r" (kvm->arch.lpid) : "memory");
|
||||
asm volatile("ptesync": : :"memory");
|
||||
}
|
||||
|
||||
|
|
|
@ -473,6 +473,17 @@ static void do_tlbies(struct kvm *kvm, unsigned long *rbvalues,
|
|||
trace_tlbie(kvm->arch.lpid, 0, rbvalues[i],
|
||||
kvm->arch.lpid, 0, 0, 0);
|
||||
}
|
||||
|
||||
if (cpu_has_feature(CPU_FTR_P9_TLBIE_BUG)) {
|
||||
/*
|
||||
* Need the extra ptesync to make sure we don't
|
||||
* re-order the tlbie
|
||||
*/
|
||||
asm volatile("ptesync": : :"memory");
|
||||
asm volatile(PPC_TLBIE_5(%0,%1,0,0,0) : :
|
||||
"r" (rbvalues[0]), "r" (kvm->arch.lpid));
|
||||
}
|
||||
|
||||
asm volatile("eieio; tlbsync; ptesync" : : : "memory");
|
||||
kvm->arch.tlbie_lock = 0;
|
||||
} else {
|
||||
|
|
|
@ -1557,6 +1557,24 @@ mc_cont:
|
|||
ptesync
|
||||
3: stw r5,VCPU_SLB_MAX(r9)
|
||||
|
||||
/* load host SLB entries */
|
||||
BEGIN_MMU_FTR_SECTION
|
||||
b 0f
|
||||
END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
|
||||
ld r8,PACA_SLBSHADOWPTR(r13)
|
||||
|
||||
.rept SLB_NUM_BOLTED
|
||||
li r3, SLBSHADOW_SAVEAREA
|
||||
LDX_BE r5, r8, r3
|
||||
addi r3, r3, 8
|
||||
LDX_BE r6, r8, r3
|
||||
andis. r7,r5,SLB_ESID_V@h
|
||||
beq 1f
|
||||
slbmte r6,r5
|
||||
1: addi r8,r8,16
|
||||
.endr
|
||||
0:
|
||||
|
||||
guest_bypass:
|
||||
stw r12, STACK_SLOT_TRAP(r1)
|
||||
mr r3, r12
|
||||
|
@ -2018,23 +2036,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
|
|||
mtspr SPRN_LPCR,r8
|
||||
isync
|
||||
48:
|
||||
/* load host SLB entries */
|
||||
BEGIN_MMU_FTR_SECTION
|
||||
b 0f
|
||||
END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
|
||||
ld r8,PACA_SLBSHADOWPTR(r13)
|
||||
|
||||
.rept SLB_NUM_BOLTED
|
||||
li r3, SLBSHADOW_SAVEAREA
|
||||
LDX_BE r5, r8, r3
|
||||
addi r3, r3, 8
|
||||
LDX_BE r6, r8, r3
|
||||
andis. r7,r5,SLB_ESID_V@h
|
||||
beq 1f
|
||||
slbmte r6,r5
|
||||
1: addi r8,r8,16
|
||||
.endr
|
||||
0:
|
||||
#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
|
||||
/* Finish timing, if we have a vcpu */
|
||||
ld r4, HSTATE_KVM_VCPU(r13)
|
||||
|
|
|
@ -201,6 +201,15 @@ static inline unsigned long ___tlbie(unsigned long vpn, int psize,
|
|||
return va;
|
||||
}
|
||||
|
||||
static inline void fixup_tlbie(unsigned long vpn, int psize, int apsize, int ssize)
|
||||
{
|
||||
if (cpu_has_feature(CPU_FTR_P9_TLBIE_BUG)) {
|
||||
/* Need the extra ptesync to ensure we don't reorder tlbie*/
|
||||
asm volatile("ptesync": : :"memory");
|
||||
___tlbie(vpn, psize, apsize, ssize);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void __tlbie(unsigned long vpn, int psize, int apsize, int ssize)
|
||||
{
|
||||
unsigned long rb;
|
||||
|
@ -278,6 +287,7 @@ static inline void tlbie(unsigned long vpn, int psize, int apsize,
|
|||
asm volatile("ptesync": : :"memory");
|
||||
} else {
|
||||
__tlbie(vpn, psize, apsize, ssize);
|
||||
fixup_tlbie(vpn, psize, apsize, ssize);
|
||||
asm volatile("eieio; tlbsync; ptesync": : :"memory");
|
||||
}
|
||||
if (lock_tlbie && !use_local)
|
||||
|
@ -771,7 +781,7 @@ static void native_hpte_clear(void)
|
|||
*/
|
||||
static void native_flush_hash_range(unsigned long number, int local)
|
||||
{
|
||||
unsigned long vpn;
|
||||
unsigned long vpn = 0;
|
||||
unsigned long hash, index, hidx, shift, slot;
|
||||
struct hash_pte *hptep;
|
||||
unsigned long hpte_v;
|
||||
|
@ -843,6 +853,10 @@ static void native_flush_hash_range(unsigned long number, int local)
|
|||
__tlbie(vpn, psize, psize, ssize);
|
||||
} pte_iterate_hashed_end();
|
||||
}
|
||||
/*
|
||||
* Just do one more with the last used values.
|
||||
*/
|
||||
fixup_tlbie(vpn, psize, psize, ssize);
|
||||
asm volatile("eieio; tlbsync; ptesync":::"memory");
|
||||
|
||||
if (lock_tlbie)
|
||||
|
|
|
@ -173,6 +173,7 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
|
|||
mm_iommu_init(mm);
|
||||
#endif
|
||||
atomic_set(&mm->context.active_cpus, 0);
|
||||
atomic_set(&mm->context.copros, 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -481,6 +481,7 @@ void mmu_partition_table_set_entry(unsigned int lpid, unsigned long dw0,
|
|||
"r" (TLBIEL_INVAL_SET_LPID), "r" (lpid));
|
||||
trace_tlbie(lpid, 0, TLBIEL_INVAL_SET_LPID, lpid, 2, 0, 0);
|
||||
}
|
||||
/* do we need fixup here ?*/
|
||||
asm volatile("eieio; tlbsync; ptesync" : : : "memory");
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mmu_partition_table_set_entry);
|
||||
|
|
|
@ -119,6 +119,49 @@ static inline void __tlbie_pid(unsigned long pid, unsigned long ric)
|
|||
trace_tlbie(0, 0, rb, rs, ric, prs, r);
|
||||
}
|
||||
|
||||
static inline void __tlbiel_va(unsigned long va, unsigned long pid,
|
||||
unsigned long ap, unsigned long ric)
|
||||
{
|
||||
unsigned long rb,rs,prs,r;
|
||||
|
||||
rb = va & ~(PPC_BITMASK(52, 63));
|
||||
rb |= ap << PPC_BITLSHIFT(58);
|
||||
rs = pid << PPC_BITLSHIFT(31);
|
||||
prs = 1; /* process scoped */
|
||||
r = 1; /* raidx format */
|
||||
|
||||
asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
|
||||
: : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
|
||||
trace_tlbie(0, 1, rb, rs, ric, prs, r);
|
||||
}
|
||||
|
||||
static inline void __tlbie_va(unsigned long va, unsigned long pid,
|
||||
unsigned long ap, unsigned long ric)
|
||||
{
|
||||
unsigned long rb,rs,prs,r;
|
||||
|
||||
rb = va & ~(PPC_BITMASK(52, 63));
|
||||
rb |= ap << PPC_BITLSHIFT(58);
|
||||
rs = pid << PPC_BITLSHIFT(31);
|
||||
prs = 1; /* process scoped */
|
||||
r = 1; /* raidx format */
|
||||
|
||||
asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
|
||||
: : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
|
||||
trace_tlbie(0, 0, rb, rs, ric, prs, r);
|
||||
}
|
||||
|
||||
static inline void fixup_tlbie(void)
|
||||
{
|
||||
unsigned long pid = 0;
|
||||
unsigned long va = ((1UL << 52) - 1);
|
||||
|
||||
if (cpu_has_feature(CPU_FTR_P9_TLBIE_BUG)) {
|
||||
asm volatile("ptesync": : :"memory");
|
||||
__tlbie_va(va, pid, mmu_get_ap(MMU_PAGE_64K), RIC_FLUSH_TLB);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* We use 128 set in radix mode and 256 set in hpt mode.
|
||||
*/
|
||||
|
@ -151,26 +194,27 @@ static inline void _tlbiel_pid(unsigned long pid, unsigned long ric)
|
|||
static inline void _tlbie_pid(unsigned long pid, unsigned long ric)
|
||||
{
|
||||
asm volatile("ptesync": : :"memory");
|
||||
__tlbie_pid(pid, ric);
|
||||
|
||||
/*
|
||||
* Workaround the fact that the "ric" argument to __tlbie_pid
|
||||
* must be a compile-time contraint to match the "i" constraint
|
||||
* in the asm statement.
|
||||
*/
|
||||
switch (ric) {
|
||||
case RIC_FLUSH_TLB:
|
||||
__tlbie_pid(pid, RIC_FLUSH_TLB);
|
||||
break;
|
||||
case RIC_FLUSH_PWC:
|
||||
__tlbie_pid(pid, RIC_FLUSH_PWC);
|
||||
break;
|
||||
case RIC_FLUSH_ALL:
|
||||
default:
|
||||
__tlbie_pid(pid, RIC_FLUSH_ALL);
|
||||
}
|
||||
fixup_tlbie();
|
||||
asm volatile("eieio; tlbsync; ptesync": : :"memory");
|
||||
}
|
||||
|
||||
static inline void __tlbiel_va(unsigned long va, unsigned long pid,
|
||||
unsigned long ap, unsigned long ric)
|
||||
{
|
||||
unsigned long rb,rs,prs,r;
|
||||
|
||||
rb = va & ~(PPC_BITMASK(52, 63));
|
||||
rb |= ap << PPC_BITLSHIFT(58);
|
||||
rs = pid << PPC_BITLSHIFT(31);
|
||||
prs = 1; /* process scoped */
|
||||
r = 1; /* raidx format */
|
||||
|
||||
asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
|
||||
: : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
|
||||
trace_tlbie(0, 1, rb, rs, ric, prs, r);
|
||||
}
|
||||
|
||||
static inline void __tlbiel_va_range(unsigned long start, unsigned long end,
|
||||
unsigned long pid, unsigned long page_size,
|
||||
unsigned long psize)
|
||||
|
@ -203,22 +247,6 @@ static inline void _tlbiel_va_range(unsigned long start, unsigned long end,
|
|||
asm volatile("ptesync": : :"memory");
|
||||
}
|
||||
|
||||
static inline void __tlbie_va(unsigned long va, unsigned long pid,
|
||||
unsigned long ap, unsigned long ric)
|
||||
{
|
||||
unsigned long rb,rs,prs,r;
|
||||
|
||||
rb = va & ~(PPC_BITMASK(52, 63));
|
||||
rb |= ap << PPC_BITLSHIFT(58);
|
||||
rs = pid << PPC_BITLSHIFT(31);
|
||||
prs = 1; /* process scoped */
|
||||
r = 1; /* raidx format */
|
||||
|
||||
asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
|
||||
: : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
|
||||
trace_tlbie(0, 0, rb, rs, ric, prs, r);
|
||||
}
|
||||
|
||||
static inline void __tlbie_va_range(unsigned long start, unsigned long end,
|
||||
unsigned long pid, unsigned long page_size,
|
||||
unsigned long psize)
|
||||
|
@ -237,6 +265,7 @@ static inline void _tlbie_va(unsigned long va, unsigned long pid,
|
|||
|
||||
asm volatile("ptesync": : :"memory");
|
||||
__tlbie_va(va, pid, ap, ric);
|
||||
fixup_tlbie();
|
||||
asm volatile("eieio; tlbsync; ptesync": : :"memory");
|
||||
}
|
||||
|
||||
|
@ -248,6 +277,7 @@ static inline void _tlbie_va_range(unsigned long start, unsigned long end,
|
|||
if (also_pwc)
|
||||
__tlbie_pid(pid, RIC_FLUSH_PWC);
|
||||
__tlbie_va_range(start, end, pid, page_size, psize);
|
||||
fixup_tlbie();
|
||||
asm volatile("eieio; tlbsync; ptesync": : :"memory");
|
||||
}
|
||||
|
||||
|
@ -311,6 +341,16 @@ void radix__local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmadd
|
|||
}
|
||||
EXPORT_SYMBOL(radix__local_flush_tlb_page);
|
||||
|
||||
static bool mm_needs_flush_escalation(struct mm_struct *mm)
|
||||
{
|
||||
/*
|
||||
* P9 nest MMU has issues with the page walk cache
|
||||
* caching PTEs and not flushing them properly when
|
||||
* RIC = 0 for a PID/LPID invalidate
|
||||
*/
|
||||
return atomic_read(&mm->context.copros) != 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
void radix__flush_tlb_mm(struct mm_struct *mm)
|
||||
{
|
||||
|
@ -321,9 +361,12 @@ void radix__flush_tlb_mm(struct mm_struct *mm)
|
|||
return;
|
||||
|
||||
preempt_disable();
|
||||
if (!mm_is_thread_local(mm))
|
||||
_tlbie_pid(pid, RIC_FLUSH_TLB);
|
||||
else
|
||||
if (!mm_is_thread_local(mm)) {
|
||||
if (mm_needs_flush_escalation(mm))
|
||||
_tlbie_pid(pid, RIC_FLUSH_ALL);
|
||||
else
|
||||
_tlbie_pid(pid, RIC_FLUSH_TLB);
|
||||
} else
|
||||
_tlbiel_pid(pid, RIC_FLUSH_TLB);
|
||||
preempt_enable();
|
||||
}
|
||||
|
@ -435,10 +478,14 @@ void radix__flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
|
|||
}
|
||||
|
||||
if (full) {
|
||||
if (local)
|
||||
if (local) {
|
||||
_tlbiel_pid(pid, RIC_FLUSH_TLB);
|
||||
else
|
||||
_tlbie_pid(pid, RIC_FLUSH_TLB);
|
||||
} else {
|
||||
if (mm_needs_flush_escalation(mm))
|
||||
_tlbie_pid(pid, RIC_FLUSH_ALL);
|
||||
else
|
||||
_tlbie_pid(pid, RIC_FLUSH_TLB);
|
||||
}
|
||||
} else {
|
||||
bool hflush = false;
|
||||
unsigned long hstart, hend;
|
||||
|
@ -465,6 +512,7 @@ void radix__flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
|
|||
if (hflush)
|
||||
__tlbie_va_range(hstart, hend, pid,
|
||||
HPAGE_PMD_SIZE, MMU_PAGE_2M);
|
||||
fixup_tlbie();
|
||||
asm volatile("eieio; tlbsync; ptesync": : :"memory");
|
||||
}
|
||||
}
|
||||
|
@ -548,6 +596,9 @@ static inline void __radix__flush_tlb_range_psize(struct mm_struct *mm,
|
|||
}
|
||||
|
||||
if (full) {
|
||||
if (!local && mm_needs_flush_escalation(mm))
|
||||
also_pwc = true;
|
||||
|
||||
if (local)
|
||||
_tlbiel_pid(pid, also_pwc ? RIC_FLUSH_ALL : RIC_FLUSH_TLB);
|
||||
else
|
||||
|
@ -603,46 +654,6 @@ void radix__flush_tlb_collapsed_pmd(struct mm_struct *mm, unsigned long addr)
|
|||
}
|
||||
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
|
||||
|
||||
void radix__flush_tlb_lpid_va(unsigned long lpid, unsigned long gpa,
|
||||
unsigned long page_size)
|
||||
{
|
||||
unsigned long rb,rs,prs,r;
|
||||
unsigned long ap;
|
||||
unsigned long ric = RIC_FLUSH_TLB;
|
||||
|
||||
ap = mmu_get_ap(radix_get_mmu_psize(page_size));
|
||||
rb = gpa & ~(PPC_BITMASK(52, 63));
|
||||
rb |= ap << PPC_BITLSHIFT(58);
|
||||
rs = lpid & ((1UL << 32) - 1);
|
||||
prs = 0; /* process scoped */
|
||||
r = 1; /* raidx format */
|
||||
|
||||
asm volatile("ptesync": : :"memory");
|
||||
asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
|
||||
: : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
|
||||
asm volatile("eieio; tlbsync; ptesync": : :"memory");
|
||||
trace_tlbie(lpid, 0, rb, rs, ric, prs, r);
|
||||
}
|
||||
EXPORT_SYMBOL(radix__flush_tlb_lpid_va);
|
||||
|
||||
void radix__flush_tlb_lpid(unsigned long lpid)
|
||||
{
|
||||
unsigned long rb,rs,prs,r;
|
||||
unsigned long ric = RIC_FLUSH_ALL;
|
||||
|
||||
rb = 0x2 << PPC_BITLSHIFT(53); /* IS = 2 */
|
||||
rs = lpid & ((1UL << 32) - 1);
|
||||
prs = 0; /* partition scoped */
|
||||
r = 1; /* raidx format */
|
||||
|
||||
asm volatile("ptesync": : :"memory");
|
||||
asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
|
||||
: : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
|
||||
asm volatile("eieio; tlbsync; ptesync": : :"memory");
|
||||
trace_tlbie(lpid, 0, rb, rs, ric, prs, r);
|
||||
}
|
||||
EXPORT_SYMBOL(radix__flush_tlb_lpid);
|
||||
|
||||
void radix__flush_pmd_tlb_range(struct vm_area_struct *vma,
|
||||
unsigned long start, unsigned long end)
|
||||
{
|
||||
|
|
|
@ -315,19 +315,6 @@ config X86_L1_CACHE_SHIFT
|
|||
default "4" if MELAN || M486 || MGEODEGX1
|
||||
default "5" if MWINCHIP3D || MWINCHIPC6 || MCRUSOE || MEFFICEON || MCYRIXIII || MK6 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2 || MGEODE_LX
|
||||
|
||||
config X86_PPRO_FENCE
|
||||
bool "PentiumPro memory ordering errata workaround"
|
||||
depends on M686 || M586MMX || M586TSC || M586 || M486 || MGEODEGX1
|
||||
---help---
|
||||
Old PentiumPro multiprocessor systems had errata that could cause
|
||||
memory operations to violate the x86 ordering standard in rare cases.
|
||||
Enabling this option will attempt to work around some (but not all)
|
||||
occurrences of this problem, at the cost of much heavier spinlock and
|
||||
memory barrier operations.
|
||||
|
||||
If unsure, say n here. Even distro kernels should think twice before
|
||||
enabling this: there are few systems, and an unlikely bug.
|
||||
|
||||
config X86_F00F_BUG
|
||||
def_bool y
|
||||
depends on M586MMX || M586TSC || M586 || M486
|
||||
|
|
|
@ -223,6 +223,15 @@ KBUILD_CFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) $(asinstr) $(avx_instr)
|
|||
|
||||
LDFLAGS := -m elf_$(UTS_MACHINE)
|
||||
|
||||
#
|
||||
# The 64-bit kernel must be aligned to 2MB. Pass -z max-page-size=0x200000 to
|
||||
# the linker to force 2MB page size regardless of the default page size used
|
||||
# by the linker.
|
||||
#
|
||||
ifdef CONFIG_X86_64
|
||||
LDFLAGS += $(call ld-option, -z max-page-size=0x200000)
|
||||
endif
|
||||
|
||||
# Speed up the build
|
||||
KBUILD_CFLAGS += -pipe
|
||||
# Workaround for a gcc prelease that unfortunately was shipped in a suse release
|
||||
|
|
|
@ -309,6 +309,10 @@ static void parse_elf(void *output)
|
|||
|
||||
switch (phdr->p_type) {
|
||||
case PT_LOAD:
|
||||
#ifdef CONFIG_X86_64
|
||||
if ((phdr->p_align % 0x200000) != 0)
|
||||
error("Alignment of LOAD segment isn't multiple of 2MB");
|
||||
#endif
|
||||
#ifdef CONFIG_RELOCATABLE
|
||||
dest = output;
|
||||
dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
|
||||
|
|
|
@ -1138,7 +1138,7 @@ apicinterrupt3 HYPERV_REENLIGHTENMENT_VECTOR \
|
|||
#endif /* CONFIG_HYPERV */
|
||||
|
||||
idtentry debug do_debug has_error_code=0 paranoid=1 shift_ist=DEBUG_STACK
|
||||
idtentry int3 do_int3 has_error_code=0 paranoid=1 shift_ist=DEBUG_STACK
|
||||
idtentry int3 do_int3 has_error_code=0
|
||||
idtentry stack_segment do_stack_segment has_error_code=1
|
||||
|
||||
#ifdef CONFIG_XEN
|
||||
|
|
|
@ -5,8 +5,6 @@
|
|||
#undef CONFIG_OPTIMIZE_INLINING
|
||||
#endif
|
||||
|
||||
#undef CONFIG_X86_PPRO_FENCE
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
|
||||
/*
|
||||
|
|
|
@ -347,7 +347,7 @@ void __init set_vsyscall_pgtable_user_bits(pgd_t *root)
|
|||
set_pgd(pgd, __pgd(pgd_val(*pgd) | _PAGE_USER));
|
||||
p4d = p4d_offset(pgd, VSYSCALL_ADDR);
|
||||
#if CONFIG_PGTABLE_LEVELS >= 5
|
||||
p4d->p4d |= _PAGE_USER;
|
||||
set_p4d(p4d, __p4d(p4d_val(*p4d) | _PAGE_USER));
|
||||
#endif
|
||||
pud = pud_offset(p4d, VSYSCALL_ADDR);
|
||||
set_pud(pud, __pud(pud_val(*pud) | _PAGE_USER));
|
||||
|
|
|
@ -2118,7 +2118,8 @@ static int x86_pmu_event_init(struct perf_event *event)
|
|||
event->destroy(event);
|
||||
}
|
||||
|
||||
if (READ_ONCE(x86_pmu.attr_rdpmc))
|
||||
if (READ_ONCE(x86_pmu.attr_rdpmc) &&
|
||||
!(event->hw.flags & PERF_X86_EVENT_LARGE_PEBS))
|
||||
event->hw.flags |= PERF_X86_EVENT_RDPMC_ALLOWED;
|
||||
|
||||
return err;
|
||||
|
|
|
@ -2952,9 +2952,9 @@ static void intel_pebs_aliases_skl(struct perf_event *event)
|
|||
return intel_pebs_aliases_precdist(event);
|
||||
}
|
||||
|
||||
static unsigned long intel_pmu_free_running_flags(struct perf_event *event)
|
||||
static unsigned long intel_pmu_large_pebs_flags(struct perf_event *event)
|
||||
{
|
||||
unsigned long flags = x86_pmu.free_running_flags;
|
||||
unsigned long flags = x86_pmu.large_pebs_flags;
|
||||
|
||||
if (event->attr.use_clockid)
|
||||
flags &= ~PERF_SAMPLE_TIME;
|
||||
|
@ -2976,8 +2976,8 @@ static int intel_pmu_hw_config(struct perf_event *event)
|
|||
if (!event->attr.freq) {
|
||||
event->hw.flags |= PERF_X86_EVENT_AUTO_RELOAD;
|
||||
if (!(event->attr.sample_type &
|
||||
~intel_pmu_free_running_flags(event)))
|
||||
event->hw.flags |= PERF_X86_EVENT_FREERUNNING;
|
||||
~intel_pmu_large_pebs_flags(event)))
|
||||
event->hw.flags |= PERF_X86_EVENT_LARGE_PEBS;
|
||||
}
|
||||
if (x86_pmu.pebs_aliases)
|
||||
x86_pmu.pebs_aliases(event);
|
||||
|
@ -3194,7 +3194,7 @@ static unsigned bdw_limit_period(struct perf_event *event, unsigned left)
|
|||
X86_CONFIG(.event=0xc0, .umask=0x01)) {
|
||||
if (left < 128)
|
||||
left = 128;
|
||||
left &= ~0x3fu;
|
||||
left &= ~0x3fULL;
|
||||
}
|
||||
return left;
|
||||
}
|
||||
|
@ -3460,7 +3460,7 @@ static __initconst const struct x86_pmu core_pmu = {
|
|||
.event_map = intel_pmu_event_map,
|
||||
.max_events = ARRAY_SIZE(intel_perfmon_event_map),
|
||||
.apic = 1,
|
||||
.free_running_flags = PEBS_FREERUNNING_FLAGS,
|
||||
.large_pebs_flags = LARGE_PEBS_FLAGS,
|
||||
|
||||
/*
|
||||
* Intel PMCs cannot be accessed sanely above 32-bit width,
|
||||
|
@ -3502,7 +3502,7 @@ static __initconst const struct x86_pmu intel_pmu = {
|
|||
.event_map = intel_pmu_event_map,
|
||||
.max_events = ARRAY_SIZE(intel_perfmon_event_map),
|
||||
.apic = 1,
|
||||
.free_running_flags = PEBS_FREERUNNING_FLAGS,
|
||||
.large_pebs_flags = LARGE_PEBS_FLAGS,
|
||||
/*
|
||||
* Intel PMCs cannot be accessed sanely above 32 bit width,
|
||||
* so we install an artificial 1<<31 period regardless of
|
||||
|
|
|
@ -935,7 +935,7 @@ void intel_pmu_pebs_add(struct perf_event *event)
|
|||
bool needed_cb = pebs_needs_sched_cb(cpuc);
|
||||
|
||||
cpuc->n_pebs++;
|
||||
if (hwc->flags & PERF_X86_EVENT_FREERUNNING)
|
||||
if (hwc->flags & PERF_X86_EVENT_LARGE_PEBS)
|
||||
cpuc->n_large_pebs++;
|
||||
|
||||
pebs_update_state(needed_cb, cpuc, event->ctx->pmu);
|
||||
|
@ -975,7 +975,7 @@ void intel_pmu_pebs_del(struct perf_event *event)
|
|||
bool needed_cb = pebs_needs_sched_cb(cpuc);
|
||||
|
||||
cpuc->n_pebs--;
|
||||
if (hwc->flags & PERF_X86_EVENT_FREERUNNING)
|
||||
if (hwc->flags & PERF_X86_EVENT_LARGE_PEBS)
|
||||
cpuc->n_large_pebs--;
|
||||
|
||||
pebs_update_state(needed_cb, cpuc, event->ctx->pmu);
|
||||
|
@ -1530,7 +1530,7 @@ void __init intel_ds_init(void)
|
|||
x86_pmu.pebs_record_size =
|
||||
sizeof(struct pebs_record_skl);
|
||||
x86_pmu.drain_pebs = intel_pmu_drain_pebs_nhm;
|
||||
x86_pmu.free_running_flags |= PERF_SAMPLE_TIME;
|
||||
x86_pmu.large_pebs_flags |= PERF_SAMPLE_TIME;
|
||||
break;
|
||||
|
||||
default:
|
||||
|
|
|
@ -3343,6 +3343,7 @@ static struct extra_reg skx_uncore_cha_extra_regs[] = {
|
|||
SNBEP_CBO_EVENT_EXTRA_REG(0x9134, 0xffff, 0x4),
|
||||
SNBEP_CBO_EVENT_EXTRA_REG(0x35, 0xff, 0x8),
|
||||
SNBEP_CBO_EVENT_EXTRA_REG(0x36, 0xff, 0x8),
|
||||
SNBEP_CBO_EVENT_EXTRA_REG(0x38, 0xff, 0x3),
|
||||
EVENT_EXTRA_END
|
||||
};
|
||||
|
||||
|
@ -3562,24 +3563,27 @@ static struct intel_uncore_type *skx_msr_uncores[] = {
|
|||
NULL,
|
||||
};
|
||||
|
||||
/*
|
||||
* To determine the number of CHAs, it should read bits 27:0 in the CAPID6
|
||||
* register which located at Device 30, Function 3, Offset 0x9C. PCI ID 0x2083.
|
||||
*/
|
||||
#define SKX_CAPID6 0x9c
|
||||
#define SKX_CHA_BIT_MASK GENMASK(27, 0)
|
||||
|
||||
static int skx_count_chabox(void)
|
||||
{
|
||||
struct pci_dev *chabox_dev = NULL;
|
||||
int bus, count = 0;
|
||||
struct pci_dev *dev = NULL;
|
||||
u32 val = 0;
|
||||
|
||||
while (1) {
|
||||
chabox_dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x208d, chabox_dev);
|
||||
if (!chabox_dev)
|
||||
break;
|
||||
if (count == 0)
|
||||
bus = chabox_dev->bus->number;
|
||||
if (bus != chabox_dev->bus->number)
|
||||
break;
|
||||
count++;
|
||||
}
|
||||
dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x2083, dev);
|
||||
if (!dev)
|
||||
goto out;
|
||||
|
||||
pci_dev_put(chabox_dev);
|
||||
return count;
|
||||
pci_read_config_dword(dev, SKX_CAPID6, &val);
|
||||
val &= SKX_CHA_BIT_MASK;
|
||||
out:
|
||||
pci_dev_put(dev);
|
||||
return hweight32(val);
|
||||
}
|
||||
|
||||
void skx_uncore_cpu_init(void)
|
||||
|
|
|
@ -69,7 +69,7 @@ struct event_constraint {
|
|||
#define PERF_X86_EVENT_RDPMC_ALLOWED 0x0100 /* grant rdpmc permission */
|
||||
#define PERF_X86_EVENT_EXCL_ACCT 0x0200 /* accounted EXCL event */
|
||||
#define PERF_X86_EVENT_AUTO_RELOAD 0x0400 /* use PEBS auto-reload */
|
||||
#define PERF_X86_EVENT_FREERUNNING 0x0800 /* use freerunning PEBS */
|
||||
#define PERF_X86_EVENT_LARGE_PEBS 0x0800 /* use large PEBS */
|
||||
|
||||
|
||||
struct amd_nb {
|
||||
|
@ -88,7 +88,7 @@ struct amd_nb {
|
|||
* REGS_USER can be handled for events limited to ring 3.
|
||||
*
|
||||
*/
|
||||
#define PEBS_FREERUNNING_FLAGS \
|
||||
#define LARGE_PEBS_FLAGS \
|
||||
(PERF_SAMPLE_IP | PERF_SAMPLE_TID | PERF_SAMPLE_ADDR | \
|
||||
PERF_SAMPLE_ID | PERF_SAMPLE_CPU | PERF_SAMPLE_STREAM_ID | \
|
||||
PERF_SAMPLE_DATA_SRC | PERF_SAMPLE_IDENTIFIER | \
|
||||
|
@ -608,7 +608,7 @@ struct x86_pmu {
|
|||
struct event_constraint *pebs_constraints;
|
||||
void (*pebs_aliases)(struct perf_event *event);
|
||||
int max_pebs_events;
|
||||
unsigned long free_running_flags;
|
||||
unsigned long large_pebs_flags;
|
||||
|
||||
/*
|
||||
* Intel LBR
|
||||
|
|
|
@ -52,11 +52,7 @@ static inline unsigned long array_index_mask_nospec(unsigned long index,
|
|||
#define barrier_nospec() alternative_2("", "mfence", X86_FEATURE_MFENCE_RDTSC, \
|
||||
"lfence", X86_FEATURE_LFENCE_RDTSC)
|
||||
|
||||
#ifdef CONFIG_X86_PPRO_FENCE
|
||||
#define dma_rmb() rmb()
|
||||
#else
|
||||
#define dma_rmb() barrier()
|
||||
#endif
|
||||
#define dma_wmb() barrier()
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
|
@ -68,30 +64,6 @@ static inline unsigned long array_index_mask_nospec(unsigned long index,
|
|||
#define __smp_wmb() barrier()
|
||||
#define __smp_store_mb(var, value) do { (void)xchg(&var, value); } while (0)
|
||||
|
||||
#if defined(CONFIG_X86_PPRO_FENCE)
|
||||
|
||||
/*
|
||||
* For this option x86 doesn't have a strong TSO memory
|
||||
* model and we should fall back to full barriers.
|
||||
*/
|
||||
|
||||
#define __smp_store_release(p, v) \
|
||||
do { \
|
||||
compiletime_assert_atomic_type(*p); \
|
||||
__smp_mb(); \
|
||||
WRITE_ONCE(*p, v); \
|
||||
} while (0)
|
||||
|
||||
#define __smp_load_acquire(p) \
|
||||
({ \
|
||||
typeof(*p) ___p1 = READ_ONCE(*p); \
|
||||
compiletime_assert_atomic_type(*p); \
|
||||
__smp_mb(); \
|
||||
___p1; \
|
||||
})
|
||||
|
||||
#else /* regular x86 TSO memory ordering */
|
||||
|
||||
#define __smp_store_release(p, v) \
|
||||
do { \
|
||||
compiletime_assert_atomic_type(*p); \
|
||||
|
@ -107,8 +79,6 @@ do { \
|
|||
___p1; \
|
||||
})
|
||||
|
||||
#endif
|
||||
|
||||
/* Atomic operations are already serializing on x86 */
|
||||
#define __smp_mb__before_atomic() barrier()
|
||||
#define __smp_mb__after_atomic() barrier()
|
||||
|
|
|
@ -232,21 +232,6 @@ extern void set_iounmap_nonlazy(void);
|
|||
*/
|
||||
#define __ISA_IO_base ((char __iomem *)(PAGE_OFFSET))
|
||||
|
||||
/*
|
||||
* Cache management
|
||||
*
|
||||
* This needed for two cases
|
||||
* 1. Out of order aware processors
|
||||
* 2. Accidentally out of order processors (PPro errata #51)
|
||||
*/
|
||||
|
||||
static inline void flush_write_buffers(void)
|
||||
{
|
||||
#if defined(CONFIG_X86_PPRO_FENCE)
|
||||
asm volatile("lock; addl $0,0(%%esp)": : :"memory");
|
||||
#endif
|
||||
}
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
extern void native_io_delay(void);
|
||||
|
|
|
@ -160,7 +160,6 @@ static const __initconst struct idt_data early_pf_idts[] = {
|
|||
*/
|
||||
static const __initconst struct idt_data dbg_idts[] = {
|
||||
INTG(X86_TRAP_DB, debug),
|
||||
INTG(X86_TRAP_BP, int3),
|
||||
};
|
||||
#endif
|
||||
|
||||
|
@ -183,7 +182,6 @@ gate_desc debug_idt_table[IDT_ENTRIES] __page_aligned_bss;
|
|||
static const __initconst struct idt_data ist_idts[] = {
|
||||
ISTG(X86_TRAP_DB, debug, DEBUG_STACK),
|
||||
ISTG(X86_TRAP_NMI, nmi, NMI_STACK),
|
||||
SISTG(X86_TRAP_BP, int3, DEBUG_STACK),
|
||||
ISTG(X86_TRAP_DF, double_fault, DOUBLEFAULT_STACK),
|
||||
#ifdef CONFIG_X86_MCE
|
||||
ISTG(X86_TRAP_MC, &machine_check, MCE_STACK),
|
||||
|
|
|
@ -546,7 +546,7 @@ static void __init kvm_guest_init(void)
|
|||
}
|
||||
|
||||
if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) &&
|
||||
!kvm_para_has_feature(KVM_FEATURE_STEAL_TIME))
|
||||
kvm_para_has_feature(KVM_FEATURE_STEAL_TIME))
|
||||
pv_mmu_ops.flush_tlb_others = kvm_flush_tlb_others;
|
||||
|
||||
if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
|
||||
|
@ -635,7 +635,7 @@ static __init int kvm_setup_pv_tlb_flush(void)
|
|||
int cpu;
|
||||
|
||||
if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) &&
|
||||
!kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
|
||||
kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
|
||||
for_each_possible_cpu(cpu) {
|
||||
zalloc_cpumask_var_node(per_cpu_ptr(&__pv_tlb_mask, cpu),
|
||||
GFP_KERNEL, cpu_to_node(cpu));
|
||||
|
|
|
@ -37,7 +37,6 @@ static dma_addr_t nommu_map_page(struct device *dev, struct page *page,
|
|||
WARN_ON(size == 0);
|
||||
if (!check_addr("map_single", dev, bus, size))
|
||||
return NOMMU_MAPPING_ERROR;
|
||||
flush_write_buffers();
|
||||
return bus;
|
||||
}
|
||||
|
||||
|
@ -72,25 +71,9 @@ static int nommu_map_sg(struct device *hwdev, struct scatterlist *sg,
|
|||
return 0;
|
||||
s->dma_length = s->length;
|
||||
}
|
||||
flush_write_buffers();
|
||||
return nents;
|
||||
}
|
||||
|
||||
static void nommu_sync_single_for_device(struct device *dev,
|
||||
dma_addr_t addr, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
flush_write_buffers();
|
||||
}
|
||||
|
||||
|
||||
static void nommu_sync_sg_for_device(struct device *dev,
|
||||
struct scatterlist *sg, int nelems,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
flush_write_buffers();
|
||||
}
|
||||
|
||||
static int nommu_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
||||
{
|
||||
return dma_addr == NOMMU_MAPPING_ERROR;
|
||||
|
@ -101,8 +84,6 @@ const struct dma_map_ops nommu_dma_ops = {
|
|||
.free = dma_generic_free_coherent,
|
||||
.map_sg = nommu_map_sg,
|
||||
.map_page = nommu_map_page,
|
||||
.sync_single_for_device = nommu_sync_single_for_device,
|
||||
.sync_sg_for_device = nommu_sync_sg_for_device,
|
||||
.is_phys = 1,
|
||||
.mapping_error = nommu_mapping_error,
|
||||
.dma_supported = x86_dma_supported,
|
||||
|
|
|
@ -577,7 +577,6 @@ do_general_protection(struct pt_regs *regs, long error_code)
|
|||
}
|
||||
NOKPROBE_SYMBOL(do_general_protection);
|
||||
|
||||
/* May run on IST stack. */
|
||||
dotraplinkage void notrace do_int3(struct pt_regs *regs, long error_code)
|
||||
{
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||
|
@ -592,6 +591,13 @@ dotraplinkage void notrace do_int3(struct pt_regs *regs, long error_code)
|
|||
if (poke_int3_handler(regs))
|
||||
return;
|
||||
|
||||
/*
|
||||
* Use ist_enter despite the fact that we don't use an IST stack.
|
||||
* We can be called from a kprobe in non-CONTEXT_KERNEL kernel
|
||||
* mode or even during context tracking state changes.
|
||||
*
|
||||
* This means that we can't schedule. That's okay.
|
||||
*/
|
||||
ist_enter(regs);
|
||||
RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
|
||||
#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
|
||||
|
@ -609,15 +615,10 @@ dotraplinkage void notrace do_int3(struct pt_regs *regs, long error_code)
|
|||
SIGTRAP) == NOTIFY_STOP)
|
||||
goto exit;
|
||||
|
||||
/*
|
||||
* Let others (NMI) know that the debug stack is in use
|
||||
* as we may switch to the interrupt stack.
|
||||
*/
|
||||
debug_stack_usage_inc();
|
||||
cond_local_irq_enable(regs);
|
||||
do_trap(X86_TRAP_BP, SIGTRAP, "int3", regs, error_code, NULL);
|
||||
cond_local_irq_disable(regs);
|
||||
debug_stack_usage_dec();
|
||||
|
||||
exit:
|
||||
ist_exit(regs);
|
||||
}
|
||||
|
|
|
@ -10711,6 +10711,11 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
|
|||
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
||||
u32 exec_control, vmcs12_exec_ctrl;
|
||||
|
||||
if (vmx->nested.dirty_vmcs12) {
|
||||
prepare_vmcs02_full(vcpu, vmcs12, from_vmentry);
|
||||
vmx->nested.dirty_vmcs12 = false;
|
||||
}
|
||||
|
||||
/*
|
||||
* First, the fields that are shadowed. This must be kept in sync
|
||||
* with vmx_shadow_fields.h.
|
||||
|
@ -10948,11 +10953,6 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
|
|||
/* Note: modifies VM_ENTRY/EXIT_CONTROLS and GUEST/HOST_IA32_EFER */
|
||||
vmx_set_efer(vcpu, vcpu->arch.efer);
|
||||
|
||||
if (vmx->nested.dirty_vmcs12) {
|
||||
prepare_vmcs02_full(vcpu, vmcs12, from_vmentry);
|
||||
vmx->nested.dirty_vmcs12 = false;
|
||||
}
|
||||
|
||||
/* Shadow page tables on either EPT or shadow page tables. */
|
||||
if (nested_vmx_load_cr3(vcpu, vmcs12->guest_cr3, nested_cpu_has_ept(vmcs12),
|
||||
entry_failure_code))
|
||||
|
|
|
@ -227,7 +227,7 @@ int __init efi_alloc_page_tables(void)
|
|||
if (!pud) {
|
||||
if (CONFIG_PGTABLE_LEVELS > 4)
|
||||
free_page((unsigned long) pgd_page_vaddr(*pgd));
|
||||
free_page((unsigned long)efi_pgd);
|
||||
free_pages((unsigned long)efi_pgd, PGD_ALLOCATION_ORDER);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
|
|
|
@ -30,11 +30,7 @@
|
|||
|
||||
#endif /* CONFIG_X86_32 */
|
||||
|
||||
#ifdef CONFIG_X86_PPRO_FENCE
|
||||
#define dma_rmb() rmb()
|
||||
#else /* CONFIG_X86_PPRO_FENCE */
|
||||
#define dma_rmb() barrier()
|
||||
#endif /* CONFIG_X86_PPRO_FENCE */
|
||||
#define dma_wmb() barrier()
|
||||
|
||||
#include <asm-generic/barrier.h>
|
||||
|
|
|
@ -3147,7 +3147,7 @@ static int ia_proc_read(struct atm_dev *dev,loff_t *pos,char *page)
|
|||
" Size of Tx Buffer : %u\n"
|
||||
" Number of Rx Buffer: %u\n"
|
||||
" Size of Rx Buffer : %u\n"
|
||||
" Packets Receiverd : %u\n"
|
||||
" Packets Received : %u\n"
|
||||
" Packets Transmitted: %u\n"
|
||||
" Cells Received : %u\n"
|
||||
" Cells Transmitted : %u\n"
|
||||
|
|
|
@ -118,14 +118,15 @@ static void *stm32_dmamux_route_allocate(struct of_phandle_args *dma_spec,
|
|||
spin_lock_irqsave(&dmamux->lock, flags);
|
||||
mux->chan_id = find_first_zero_bit(dmamux->dma_inuse,
|
||||
dmamux->dma_requests);
|
||||
set_bit(mux->chan_id, dmamux->dma_inuse);
|
||||
spin_unlock_irqrestore(&dmamux->lock, flags);
|
||||
|
||||
if (mux->chan_id == dmamux->dma_requests) {
|
||||
spin_unlock_irqrestore(&dmamux->lock, flags);
|
||||
dev_err(&pdev->dev, "Run out of free DMA requests\n");
|
||||
ret = -ENOMEM;
|
||||
goto error;
|
||||
goto error_chan_id;
|
||||
}
|
||||
set_bit(mux->chan_id, dmamux->dma_inuse);
|
||||
spin_unlock_irqrestore(&dmamux->lock, flags);
|
||||
|
||||
/* Look for DMA Master */
|
||||
for (i = 1, min = 0, max = dmamux->dma_reqs[i];
|
||||
|
@ -173,6 +174,8 @@ static void *stm32_dmamux_route_allocate(struct of_phandle_args *dma_spec,
|
|||
|
||||
error:
|
||||
clear_bit(mux->chan_id, dmamux->dma_inuse);
|
||||
|
||||
error_chan_id:
|
||||
kfree(mux);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
|
|
@ -821,13 +821,13 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
|
|||
pr_warn("Can't create new usermode queue because %d queues were already created\n",
|
||||
dqm->total_queue_count);
|
||||
retval = -EPERM;
|
||||
goto out;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
|
||||
retval = allocate_sdma_queue(dqm, &q->sdma_id);
|
||||
if (retval)
|
||||
goto out;
|
||||
goto out_unlock;
|
||||
q->properties.sdma_queue_id =
|
||||
q->sdma_id / CIK_SDMA_QUEUES_PER_ENGINE;
|
||||
q->properties.sdma_engine_id =
|
||||
|
@ -838,7 +838,7 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
|
|||
|
||||
if (!mqd) {
|
||||
retval = -ENOMEM;
|
||||
goto out;
|
||||
goto out_deallocate_sdma_queue;
|
||||
}
|
||||
|
||||
dqm->asic_ops.init_sdma_vm(dqm, q, qpd);
|
||||
|
@ -848,7 +848,7 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
|
|||
retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
|
||||
&q->gart_mqd_addr, &q->properties);
|
||||
if (retval)
|
||||
goto out;
|
||||
goto out_deallocate_sdma_queue;
|
||||
|
||||
list_add(&q->list, &qpd->queues_list);
|
||||
qpd->queue_count++;
|
||||
|
@ -869,7 +869,13 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
|
|||
pr_debug("Total of %d queues are accountable so far\n",
|
||||
dqm->total_queue_count);
|
||||
|
||||
out:
|
||||
mutex_unlock(&dqm->lock);
|
||||
return retval;
|
||||
|
||||
out_deallocate_sdma_queue:
|
||||
if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
|
||||
deallocate_sdma_queue(dqm, q->sdma_id);
|
||||
out_unlock:
|
||||
mutex_unlock(&dqm->lock);
|
||||
return retval;
|
||||
}
|
||||
|
@ -1188,8 +1194,10 @@ static int process_termination_cpsch(struct device_queue_manager *dqm,
|
|||
|
||||
/* Clear all user mode queues */
|
||||
list_for_each_entry(q, &qpd->queues_list, list) {
|
||||
if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
|
||||
if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
|
||||
dqm->sdma_queue_count--;
|
||||
deallocate_sdma_queue(dqm, q->sdma_id);
|
||||
}
|
||||
|
||||
if (q->properties.is_active)
|
||||
dqm->queue_count--;
|
||||
|
|
|
@ -188,8 +188,7 @@ static int pm_create_map_process(struct packet_manager *pm, uint32_t *buffer,
|
|||
packet->sh_mem_ape1_base = qpd->sh_mem_ape1_base;
|
||||
packet->sh_mem_ape1_limit = qpd->sh_mem_ape1_limit;
|
||||
|
||||
/* TODO: scratch support */
|
||||
packet->sh_hidden_private_base_vmid = 0;
|
||||
packet->sh_hidden_private_base_vmid = qpd->sh_hidden_private_base;
|
||||
|
||||
packet->gds_addr_lo = lower_32_bits(qpd->gds_context_area);
|
||||
packet->gds_addr_hi = upper_32_bits(qpd->gds_context_area);
|
||||
|
|
|
@ -2009,9 +2009,9 @@ static const struct tegra_dc_soc_info tegra124_dc_soc_info = {
|
|||
.coupled_pm = false,
|
||||
.has_nvdisplay = false,
|
||||
.num_primary_formats = ARRAY_SIZE(tegra124_primary_formats),
|
||||
.primary_formats = tegra114_primary_formats,
|
||||
.primary_formats = tegra124_primary_formats,
|
||||
.num_overlay_formats = ARRAY_SIZE(tegra124_overlay_formats),
|
||||
.overlay_formats = tegra114_overlay_formats,
|
||||
.overlay_formats = tegra124_overlay_formats,
|
||||
};
|
||||
|
||||
static const struct tegra_dc_soc_info tegra210_dc_soc_info = {
|
||||
|
@ -2160,7 +2160,7 @@ static int tegra_dc_couple(struct tegra_dc *dc)
|
|||
struct device_link *link;
|
||||
struct device *partner;
|
||||
|
||||
partner = driver_find_device(dc->dev->driver, NULL, 0,
|
||||
partner = driver_find_device(dc->dev->driver, NULL, NULL,
|
||||
tegra_dc_match_by_pipe);
|
||||
if (!partner)
|
||||
return -EPROBE_DEFER;
|
||||
|
|
|
@ -417,13 +417,24 @@ __hv_pkt_iter_next(struct vmbus_channel *channel,
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(__hv_pkt_iter_next);
|
||||
|
||||
/* How many bytes were read in this iterator cycle */
|
||||
static u32 hv_pkt_iter_bytes_read(const struct hv_ring_buffer_info *rbi,
|
||||
u32 start_read_index)
|
||||
{
|
||||
if (rbi->priv_read_index >= start_read_index)
|
||||
return rbi->priv_read_index - start_read_index;
|
||||
else
|
||||
return rbi->ring_datasize - start_read_index +
|
||||
rbi->priv_read_index;
|
||||
}
|
||||
|
||||
/*
|
||||
* Update host ring buffer after iterating over packets.
|
||||
*/
|
||||
void hv_pkt_iter_close(struct vmbus_channel *channel)
|
||||
{
|
||||
struct hv_ring_buffer_info *rbi = &channel->inbound;
|
||||
u32 orig_write_sz = hv_get_bytes_to_write(rbi);
|
||||
u32 curr_write_sz, pending_sz, bytes_read, start_read_index;
|
||||
|
||||
/*
|
||||
* Make sure all reads are done before we update the read index since
|
||||
|
@ -431,8 +442,12 @@ void hv_pkt_iter_close(struct vmbus_channel *channel)
|
|||
* is updated.
|
||||
*/
|
||||
virt_rmb();
|
||||
start_read_index = rbi->ring_buffer->read_index;
|
||||
rbi->ring_buffer->read_index = rbi->priv_read_index;
|
||||
|
||||
if (!rbi->ring_buffer->feature_bits.feat_pending_send_sz)
|
||||
return;
|
||||
|
||||
/*
|
||||
* Issue a full memory barrier before making the signaling decision.
|
||||
* Here is the reason for having this barrier:
|
||||
|
@ -446,26 +461,29 @@ void hv_pkt_iter_close(struct vmbus_channel *channel)
|
|||
*/
|
||||
virt_mb();
|
||||
|
||||
/* If host has disabled notifications then skip */
|
||||
if (rbi->ring_buffer->interrupt_mask)
|
||||
pending_sz = READ_ONCE(rbi->ring_buffer->pending_send_sz);
|
||||
if (!pending_sz)
|
||||
return;
|
||||
|
||||
if (rbi->ring_buffer->feature_bits.feat_pending_send_sz) {
|
||||
u32 pending_sz = READ_ONCE(rbi->ring_buffer->pending_send_sz);
|
||||
/*
|
||||
* Ensure the read of write_index in hv_get_bytes_to_write()
|
||||
* happens after the read of pending_send_sz.
|
||||
*/
|
||||
virt_rmb();
|
||||
curr_write_sz = hv_get_bytes_to_write(rbi);
|
||||
bytes_read = hv_pkt_iter_bytes_read(rbi, start_read_index);
|
||||
|
||||
/*
|
||||
* If there was space before we began iteration,
|
||||
* then host was not blocked. Also handles case where
|
||||
* pending_sz is zero then host has nothing pending
|
||||
* and does not need to be signaled.
|
||||
*/
|
||||
if (orig_write_sz > pending_sz)
|
||||
return;
|
||||
/*
|
||||
* If there was space before we began iteration,
|
||||
* then host was not blocked.
|
||||
*/
|
||||
|
||||
/* If pending write will not fit, don't give false hope. */
|
||||
if (hv_get_bytes_to_write(rbi) < pending_sz)
|
||||
return;
|
||||
}
|
||||
if (curr_write_sz - bytes_read > pending_sz)
|
||||
return;
|
||||
|
||||
/* If pending write will not fit, don't give false hope. */
|
||||
if (curr_write_sz <= pending_sz)
|
||||
return;
|
||||
|
||||
vmbus_setevent(channel);
|
||||
}
|
||||
|
|
|
@ -888,6 +888,11 @@ static int stm32f7_i2c_probe(struct platform_device *pdev)
|
|||
}
|
||||
|
||||
setup = of_device_get_match_data(&pdev->dev);
|
||||
if (!setup) {
|
||||
dev_err(&pdev->dev, "Can't get device data\n");
|
||||
ret = -ENODEV;
|
||||
goto clk_free;
|
||||
}
|
||||
i2c_dev->setup = *setup;
|
||||
|
||||
ret = device_property_read_u32(i2c_dev->dev, "i2c-scl-rising-time-ns",
|
||||
|
|
|
@ -920,6 +920,8 @@ static const struct iio_trigger_ops st_accel_trigger_ops = {
|
|||
int st_accel_common_probe(struct iio_dev *indio_dev)
|
||||
{
|
||||
struct st_sensor_data *adata = iio_priv(indio_dev);
|
||||
struct st_sensors_platform_data *pdata =
|
||||
(struct st_sensors_platform_data *)adata->dev->platform_data;
|
||||
int irq = adata->get_irq_data_ready(indio_dev);
|
||||
int err;
|
||||
|
||||
|
@ -946,7 +948,10 @@ int st_accel_common_probe(struct iio_dev *indio_dev)
|
|||
&adata->sensor_settings->fs.fs_avl[0];
|
||||
adata->odr = adata->sensor_settings->odr.odr_avl[0].hz;
|
||||
|
||||
err = st_sensors_init_sensor(indio_dev, adata->dev->platform_data);
|
||||
if (!pdata)
|
||||
pdata = (struct st_sensors_platform_data *)&default_accel_pdata;
|
||||
|
||||
err = st_sensors_init_sensor(indio_dev, pdata);
|
||||
if (err < 0)
|
||||
goto st_accel_power_off;
|
||||
|
||||
|
|
|
@ -462,8 +462,10 @@ static int meson_sar_adc_lock(struct iio_dev *indio_dev)
|
|||
regmap_read(priv->regmap, MESON_SAR_ADC_DELAY, &val);
|
||||
} while (val & MESON_SAR_ADC_DELAY_BL30_BUSY && timeout--);
|
||||
|
||||
if (timeout < 0)
|
||||
if (timeout < 0) {
|
||||
mutex_unlock(&indio_dev->mlock);
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -54,7 +54,6 @@ struct stm32_dfsdm_adc {
|
|||
struct stm32_dfsdm *dfsdm;
|
||||
const struct stm32_dfsdm_dev_data *dev_data;
|
||||
unsigned int fl_id;
|
||||
unsigned int ch_id;
|
||||
|
||||
/* ADC specific */
|
||||
unsigned int oversamp;
|
||||
|
@ -384,7 +383,7 @@ static ssize_t dfsdm_adc_audio_set_spiclk(struct iio_dev *indio_dev,
|
|||
{
|
||||
struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
|
||||
struct stm32_dfsdm_filter *fl = &adc->dfsdm->fl_list[adc->fl_id];
|
||||
struct stm32_dfsdm_channel *ch = &adc->dfsdm->ch_list[adc->ch_id];
|
||||
struct stm32_dfsdm_channel *ch = &adc->dfsdm->ch_list[chan->channel];
|
||||
unsigned int sample_freq = adc->sample_freq;
|
||||
unsigned int spi_freq;
|
||||
int ret;
|
||||
|
@ -419,18 +418,20 @@ static ssize_t dfsdm_adc_audio_set_spiclk(struct iio_dev *indio_dev,
|
|||
return len;
|
||||
}
|
||||
|
||||
static int stm32_dfsdm_start_conv(struct stm32_dfsdm_adc *adc, bool dma)
|
||||
static int stm32_dfsdm_start_conv(struct stm32_dfsdm_adc *adc,
|
||||
const struct iio_chan_spec *chan,
|
||||
bool dma)
|
||||
{
|
||||
struct regmap *regmap = adc->dfsdm->regmap;
|
||||
int ret;
|
||||
unsigned int dma_en = 0, cont_en = 0;
|
||||
|
||||
ret = stm32_dfsdm_start_channel(adc->dfsdm, adc->ch_id);
|
||||
ret = stm32_dfsdm_start_channel(adc->dfsdm, chan->channel);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ret = stm32_dfsdm_filter_configure(adc->dfsdm, adc->fl_id,
|
||||
adc->ch_id);
|
||||
chan->channel);
|
||||
if (ret < 0)
|
||||
goto stop_channels;
|
||||
|
||||
|
@ -464,12 +465,13 @@ stop_channels:
|
|||
|
||||
regmap_update_bits(regmap, DFSDM_CR1(adc->fl_id),
|
||||
DFSDM_CR1_RCONT_MASK, 0);
|
||||
stm32_dfsdm_stop_channel(adc->dfsdm, adc->fl_id);
|
||||
stm32_dfsdm_stop_channel(adc->dfsdm, chan->channel);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void stm32_dfsdm_stop_conv(struct stm32_dfsdm_adc *adc)
|
||||
static void stm32_dfsdm_stop_conv(struct stm32_dfsdm_adc *adc,
|
||||
const struct iio_chan_spec *chan)
|
||||
{
|
||||
struct regmap *regmap = adc->dfsdm->regmap;
|
||||
|
||||
|
@ -482,7 +484,7 @@ static void stm32_dfsdm_stop_conv(struct stm32_dfsdm_adc *adc)
|
|||
regmap_update_bits(regmap, DFSDM_CR1(adc->fl_id),
|
||||
DFSDM_CR1_RCONT_MASK, 0);
|
||||
|
||||
stm32_dfsdm_stop_channel(adc->dfsdm, adc->ch_id);
|
||||
stm32_dfsdm_stop_channel(adc->dfsdm, chan->channel);
|
||||
}
|
||||
|
||||
static int stm32_dfsdm_set_watermark(struct iio_dev *indio_dev,
|
||||
|
@ -609,6 +611,7 @@ static int stm32_dfsdm_adc_dma_start(struct iio_dev *indio_dev)
|
|||
static int stm32_dfsdm_postenable(struct iio_dev *indio_dev)
|
||||
{
|
||||
struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
|
||||
const struct iio_chan_spec *chan = &indio_dev->channels[0];
|
||||
int ret;
|
||||
|
||||
/* Reset adc buffer index */
|
||||
|
@ -618,7 +621,7 @@ static int stm32_dfsdm_postenable(struct iio_dev *indio_dev)
|
|||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ret = stm32_dfsdm_start_conv(adc, true);
|
||||
ret = stm32_dfsdm_start_conv(adc, chan, true);
|
||||
if (ret) {
|
||||
dev_err(&indio_dev->dev, "Can't start conversion\n");
|
||||
goto stop_dfsdm;
|
||||
|
@ -635,7 +638,7 @@ static int stm32_dfsdm_postenable(struct iio_dev *indio_dev)
|
|||
return 0;
|
||||
|
||||
err_stop_conv:
|
||||
stm32_dfsdm_stop_conv(adc);
|
||||
stm32_dfsdm_stop_conv(adc, chan);
|
||||
stop_dfsdm:
|
||||
stm32_dfsdm_stop_dfsdm(adc->dfsdm);
|
||||
|
||||
|
@ -645,11 +648,12 @@ stop_dfsdm:
|
|||
static int stm32_dfsdm_predisable(struct iio_dev *indio_dev)
|
||||
{
|
||||
struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
|
||||
const struct iio_chan_spec *chan = &indio_dev->channels[0];
|
||||
|
||||
if (adc->dma_chan)
|
||||
dmaengine_terminate_all(adc->dma_chan);
|
||||
|
||||
stm32_dfsdm_stop_conv(adc);
|
||||
stm32_dfsdm_stop_conv(adc, chan);
|
||||
|
||||
stm32_dfsdm_stop_dfsdm(adc->dfsdm);
|
||||
|
||||
|
@ -730,7 +734,7 @@ static int stm32_dfsdm_single_conv(struct iio_dev *indio_dev,
|
|||
if (ret < 0)
|
||||
goto stop_dfsdm;
|
||||
|
||||
ret = stm32_dfsdm_start_conv(adc, false);
|
||||
ret = stm32_dfsdm_start_conv(adc, chan, false);
|
||||
if (ret < 0) {
|
||||
regmap_update_bits(adc->dfsdm->regmap, DFSDM_CR2(adc->fl_id),
|
||||
DFSDM_CR2_REOCIE_MASK, DFSDM_CR2_REOCIE(0));
|
||||
|
@ -751,7 +755,7 @@ static int stm32_dfsdm_single_conv(struct iio_dev *indio_dev,
|
|||
else
|
||||
ret = IIO_VAL_INT;
|
||||
|
||||
stm32_dfsdm_stop_conv(adc);
|
||||
stm32_dfsdm_stop_conv(adc, chan);
|
||||
|
||||
stop_dfsdm:
|
||||
stm32_dfsdm_stop_dfsdm(adc->dfsdm);
|
||||
|
@ -765,7 +769,7 @@ static int stm32_dfsdm_write_raw(struct iio_dev *indio_dev,
|
|||
{
|
||||
struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
|
||||
struct stm32_dfsdm_filter *fl = &adc->dfsdm->fl_list[adc->fl_id];
|
||||
struct stm32_dfsdm_channel *ch = &adc->dfsdm->ch_list[adc->ch_id];
|
||||
struct stm32_dfsdm_channel *ch = &adc->dfsdm->ch_list[chan->channel];
|
||||
unsigned int spi_freq = adc->spi_freq;
|
||||
int ret = -EINVAL;
|
||||
|
||||
|
@ -972,7 +976,6 @@ static int stm32_dfsdm_adc_chan_init_one(struct iio_dev *indio_dev,
|
|||
}
|
||||
ch->scan_type.realbits = 24;
|
||||
ch->scan_type.storagebits = 32;
|
||||
adc->ch_id = ch->channel;
|
||||
|
||||
return stm32_dfsdm_chan_configure(adc->dfsdm,
|
||||
&adc->dfsdm->ch_list[ch->channel]);
|
||||
|
@ -1001,7 +1004,7 @@ static int stm32_dfsdm_audio_init(struct iio_dev *indio_dev)
|
|||
}
|
||||
ch->info_mask_separate = BIT(IIO_CHAN_INFO_SAMP_FREQ);
|
||||
|
||||
d_ch = &adc->dfsdm->ch_list[adc->ch_id];
|
||||
d_ch = &adc->dfsdm->ch_list[ch->channel];
|
||||
if (d_ch->src != DFSDM_CHANNEL_SPI_CLOCK_EXTERNAL)
|
||||
adc->spi_freq = adc->dfsdm->spi_master_freq;
|
||||
|
||||
|
@ -1042,8 +1045,8 @@ static int stm32_dfsdm_adc_init(struct iio_dev *indio_dev)
|
|||
return -ENOMEM;
|
||||
|
||||
for (chan_idx = 0; chan_idx < num_ch; chan_idx++) {
|
||||
ch->scan_index = chan_idx;
|
||||
ret = stm32_dfsdm_adc_chan_init_one(indio_dev, ch);
|
||||
ch[chan_idx].scan_index = chan_idx;
|
||||
ret = stm32_dfsdm_adc_chan_init_one(indio_dev, &ch[chan_idx]);
|
||||
if (ret < 0) {
|
||||
dev_err(&indio_dev->dev, "Channels init failed\n");
|
||||
return ret;
|
||||
|
|
|
@ -83,7 +83,7 @@ int stm32_dfsdm_start_dfsdm(struct stm32_dfsdm *dfsdm)
|
|||
{
|
||||
struct dfsdm_priv *priv = container_of(dfsdm, struct dfsdm_priv, dfsdm);
|
||||
struct device *dev = &priv->pdev->dev;
|
||||
unsigned int clk_div = priv->spi_clk_out_div;
|
||||
unsigned int clk_div = priv->spi_clk_out_div, clk_src;
|
||||
int ret;
|
||||
|
||||
if (atomic_inc_return(&priv->n_active_ch) == 1) {
|
||||
|
@ -100,6 +100,14 @@ int stm32_dfsdm_start_dfsdm(struct stm32_dfsdm *dfsdm)
|
|||
}
|
||||
}
|
||||
|
||||
/* select clock source, e.g. 0 for "dfsdm" or 1 for "audio" */
|
||||
clk_src = priv->aclk ? 1 : 0;
|
||||
ret = regmap_update_bits(dfsdm->regmap, DFSDM_CHCFGR1(0),
|
||||
DFSDM_CHCFGR1_CKOUTSRC_MASK,
|
||||
DFSDM_CHCFGR1_CKOUTSRC(clk_src));
|
||||
if (ret < 0)
|
||||
goto disable_aclk;
|
||||
|
||||
/* Output the SPI CLKOUT (if clk_div == 0 clock if OFF) */
|
||||
ret = regmap_update_bits(dfsdm->regmap, DFSDM_CHCFGR1(0),
|
||||
DFSDM_CHCFGR1_CKOUTDIV_MASK,
|
||||
|
@ -274,7 +282,7 @@ static int stm32_dfsdm_probe(struct platform_device *pdev)
|
|||
|
||||
dfsdm->regmap = devm_regmap_init_mmio_clk(&pdev->dev, "dfsdm",
|
||||
dfsdm->base,
|
||||
&stm32h7_dfsdm_regmap_cfg);
|
||||
dev_data->regmap_cfg);
|
||||
if (IS_ERR(dfsdm->regmap)) {
|
||||
ret = PTR_ERR(dfsdm->regmap);
|
||||
dev_err(&pdev->dev, "%s: Failed to allocate regmap: %d\n",
|
||||
|
|
|
@ -133,6 +133,9 @@ static int ccs811_start_sensor_application(struct i2c_client *client)
|
|||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
if ((ret & CCS811_STATUS_FW_MODE_APPLICATION))
|
||||
return 0;
|
||||
|
||||
if ((ret & CCS811_STATUS_APP_VALID_MASK) !=
|
||||
CCS811_STATUS_APP_VALID_LOADED)
|
||||
return -EIO;
|
||||
|
|
|
@ -640,7 +640,7 @@ int st_press_common_probe(struct iio_dev *indio_dev)
|
|||
press_data->sensor_settings->drdy_irq.int2.addr))
|
||||
pdata = (struct st_sensors_platform_data *)&default_press_pdata;
|
||||
|
||||
err = st_sensors_init_sensor(indio_dev, press_data->dev->platform_data);
|
||||
err = st_sensors_init_sensor(indio_dev, pdata);
|
||||
if (err < 0)
|
||||
goto st_press_power_off;
|
||||
|
||||
|
|
|
@ -207,6 +207,22 @@ int rdma_addr_size(struct sockaddr *addr)
|
|||
}
|
||||
EXPORT_SYMBOL(rdma_addr_size);
|
||||
|
||||
int rdma_addr_size_in6(struct sockaddr_in6 *addr)
|
||||
{
|
||||
int ret = rdma_addr_size((struct sockaddr *) addr);
|
||||
|
||||
return ret <= sizeof(*addr) ? ret : 0;
|
||||
}
|
||||
EXPORT_SYMBOL(rdma_addr_size_in6);
|
||||
|
||||
int rdma_addr_size_kss(struct __kernel_sockaddr_storage *addr)
|
||||
{
|
||||
int ret = rdma_addr_size((struct sockaddr *) addr);
|
||||
|
||||
return ret <= sizeof(*addr) ? ret : 0;
|
||||
}
|
||||
EXPORT_SYMBOL(rdma_addr_size_kss);
|
||||
|
||||
static struct rdma_addr_client self;
|
||||
|
||||
void rdma_addr_register_client(struct rdma_addr_client *client)
|
||||
|
@ -586,6 +602,15 @@ static void process_one_req(struct work_struct *_work)
|
|||
list_del(&req->list);
|
||||
mutex_unlock(&lock);
|
||||
|
||||
/*
|
||||
* Although the work will normally have been canceled by the
|
||||
* workqueue, it can still be requeued as long as it is on the
|
||||
* req_list, so it could have been requeued before we grabbed &lock.
|
||||
* We need to cancel it after it is removed from req_list to really be
|
||||
* sure it is safe to free.
|
||||
*/
|
||||
cancel_delayed_work(&req->work);
|
||||
|
||||
req->callback(req->status, (struct sockaddr *)&req->src_addr,
|
||||
req->addr, req->context);
|
||||
put_client(req->client);
|
||||
|
|
|
@ -290,6 +290,7 @@ void ib_dealloc_device(struct ib_device *device)
|
|||
{
|
||||
WARN_ON(device->reg_state != IB_DEV_UNREGISTERED &&
|
||||
device->reg_state != IB_DEV_UNINITIALIZED);
|
||||
rdma_restrack_clean(&device->res);
|
||||
put_device(&device->dev);
|
||||
}
|
||||
EXPORT_SYMBOL(ib_dealloc_device);
|
||||
|
@ -600,8 +601,6 @@ void ib_unregister_device(struct ib_device *device)
|
|||
}
|
||||
up_read(&lists_rwsem);
|
||||
|
||||
rdma_restrack_clean(&device->res);
|
||||
|
||||
ib_device_unregister_rdmacg(device);
|
||||
ib_device_unregister_sysfs(device);
|
||||
|
||||
|
|
|
@ -632,6 +632,9 @@ static ssize_t ucma_bind_ip(struct ucma_file *file, const char __user *inbuf,
|
|||
if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
|
||||
return -EFAULT;
|
||||
|
||||
if (!rdma_addr_size_in6(&cmd.addr))
|
||||
return -EINVAL;
|
||||
|
||||
ctx = ucma_get_ctx(file, cmd.id);
|
||||
if (IS_ERR(ctx))
|
||||
return PTR_ERR(ctx);
|
||||
|
@ -645,22 +648,21 @@ static ssize_t ucma_bind(struct ucma_file *file, const char __user *inbuf,
|
|||
int in_len, int out_len)
|
||||
{
|
||||
struct rdma_ucm_bind cmd;
|
||||
struct sockaddr *addr;
|
||||
struct ucma_context *ctx;
|
||||
int ret;
|
||||
|
||||
if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
|
||||
return -EFAULT;
|
||||
|
||||
addr = (struct sockaddr *) &cmd.addr;
|
||||
if (cmd.reserved || !cmd.addr_size || (cmd.addr_size != rdma_addr_size(addr)))
|
||||
if (cmd.reserved || !cmd.addr_size ||
|
||||
cmd.addr_size != rdma_addr_size_kss(&cmd.addr))
|
||||
return -EINVAL;
|
||||
|
||||
ctx = ucma_get_ctx(file, cmd.id);
|
||||
if (IS_ERR(ctx))
|
||||
return PTR_ERR(ctx);
|
||||
|
||||
ret = rdma_bind_addr(ctx->cm_id, addr);
|
||||
ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr);
|
||||
ucma_put_ctx(ctx);
|
||||
return ret;
|
||||
}
|
||||
|
@ -670,23 +672,22 @@ static ssize_t ucma_resolve_ip(struct ucma_file *file,
|
|||
int in_len, int out_len)
|
||||
{
|
||||
struct rdma_ucm_resolve_ip cmd;
|
||||
struct sockaddr *src, *dst;
|
||||
struct ucma_context *ctx;
|
||||
int ret;
|
||||
|
||||
if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
|
||||
return -EFAULT;
|
||||
|
||||
src = (struct sockaddr *) &cmd.src_addr;
|
||||
dst = (struct sockaddr *) &cmd.dst_addr;
|
||||
if (!rdma_addr_size(src) || !rdma_addr_size(dst))
|
||||
if (!rdma_addr_size_in6(&cmd.src_addr) ||
|
||||
!rdma_addr_size_in6(&cmd.dst_addr))
|
||||
return -EINVAL;
|
||||
|
||||
ctx = ucma_get_ctx(file, cmd.id);
|
||||
if (IS_ERR(ctx))
|
||||
return PTR_ERR(ctx);
|
||||
|
||||
ret = rdma_resolve_addr(ctx->cm_id, src, dst, cmd.timeout_ms);
|
||||
ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr,
|
||||
(struct sockaddr *) &cmd.dst_addr, cmd.timeout_ms);
|
||||
ucma_put_ctx(ctx);
|
||||
return ret;
|
||||
}
|
||||
|
@ -696,24 +697,23 @@ static ssize_t ucma_resolve_addr(struct ucma_file *file,
|
|||
int in_len, int out_len)
|
||||
{
|
||||
struct rdma_ucm_resolve_addr cmd;
|
||||
struct sockaddr *src, *dst;
|
||||
struct ucma_context *ctx;
|
||||
int ret;
|
||||
|
||||
if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
|
||||
return -EFAULT;
|
||||
|
||||
src = (struct sockaddr *) &cmd.src_addr;
|
||||
dst = (struct sockaddr *) &cmd.dst_addr;
|
||||
if (cmd.reserved || (cmd.src_size && (cmd.src_size != rdma_addr_size(src))) ||
|
||||
!cmd.dst_size || (cmd.dst_size != rdma_addr_size(dst)))
|
||||
if (cmd.reserved ||
|
||||
(cmd.src_size && (cmd.src_size != rdma_addr_size_kss(&cmd.src_addr))) ||
|
||||
!cmd.dst_size || (cmd.dst_size != rdma_addr_size_kss(&cmd.dst_addr)))
|
||||
return -EINVAL;
|
||||
|
||||
ctx = ucma_get_ctx(file, cmd.id);
|
||||
if (IS_ERR(ctx))
|
||||
return PTR_ERR(ctx);
|
||||
|
||||
ret = rdma_resolve_addr(ctx->cm_id, src, dst, cmd.timeout_ms);
|
||||
ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr,
|
||||
(struct sockaddr *) &cmd.dst_addr, cmd.timeout_ms);
|
||||
ucma_put_ctx(ctx);
|
||||
return ret;
|
||||
}
|
||||
|
@ -1166,6 +1166,11 @@ static ssize_t ucma_init_qp_attr(struct ucma_file *file,
|
|||
if (IS_ERR(ctx))
|
||||
return PTR_ERR(ctx);
|
||||
|
||||
if (!ctx->cm_id->device) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
resp.qp_attr_mask = 0;
|
||||
memset(&qp_attr, 0, sizeof qp_attr);
|
||||
qp_attr.qp_state = cmd.qp_state;
|
||||
|
@ -1307,7 +1312,7 @@ static ssize_t ucma_set_option(struct ucma_file *file, const char __user *inbuf,
|
|||
if (IS_ERR(ctx))
|
||||
return PTR_ERR(ctx);
|
||||
|
||||
if (unlikely(cmd.optval > KMALLOC_MAX_SIZE))
|
||||
if (unlikely(cmd.optlen > KMALLOC_MAX_SIZE))
|
||||
return -EINVAL;
|
||||
|
||||
optval = memdup_user((void __user *) (unsigned long) cmd.optval,
|
||||
|
@ -1331,7 +1336,7 @@ static ssize_t ucma_notify(struct ucma_file *file, const char __user *inbuf,
|
|||
{
|
||||
struct rdma_ucm_notify cmd;
|
||||
struct ucma_context *ctx;
|
||||
int ret;
|
||||
int ret = -EINVAL;
|
||||
|
||||
if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
|
||||
return -EFAULT;
|
||||
|
@ -1340,7 +1345,9 @@ static ssize_t ucma_notify(struct ucma_file *file, const char __user *inbuf,
|
|||
if (IS_ERR(ctx))
|
||||
return PTR_ERR(ctx);
|
||||
|
||||
ret = rdma_notify(ctx->cm_id, (enum ib_event_type) cmd.event);
|
||||
if (ctx->cm_id->device)
|
||||
ret = rdma_notify(ctx->cm_id, (enum ib_event_type)cmd.event);
|
||||
|
||||
ucma_put_ctx(ctx);
|
||||
return ret;
|
||||
}
|
||||
|
@ -1426,7 +1433,7 @@ static ssize_t ucma_join_ip_multicast(struct ucma_file *file,
|
|||
join_cmd.response = cmd.response;
|
||||
join_cmd.uid = cmd.uid;
|
||||
join_cmd.id = cmd.id;
|
||||
join_cmd.addr_size = rdma_addr_size((struct sockaddr *) &cmd.addr);
|
||||
join_cmd.addr_size = rdma_addr_size_in6(&cmd.addr);
|
||||
if (!join_cmd.addr_size)
|
||||
return -EINVAL;
|
||||
|
||||
|
@ -1445,7 +1452,7 @@ static ssize_t ucma_join_multicast(struct ucma_file *file,
|
|||
if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
|
||||
return -EFAULT;
|
||||
|
||||
if (!rdma_addr_size((struct sockaddr *)&cmd.addr))
|
||||
if (!rdma_addr_size_kss(&cmd.addr))
|
||||
return -EINVAL;
|
||||
|
||||
return ucma_process_join(file, &cmd, out_len);
|
||||
|
|
|
@ -4383,7 +4383,7 @@ err_dma_alloc_buf:
|
|||
eq->l0_dma = 0;
|
||||
|
||||
if (mhop_num == 1)
|
||||
for (i -= i; i >= 0; i--)
|
||||
for (i -= 1; i >= 0; i--)
|
||||
dma_free_coherent(dev, buf_chk_sz, eq->buf[i],
|
||||
eq->buf_dma[i]);
|
||||
else if (mhop_num == 2) {
|
||||
|
|
|
@ -3482,9 +3482,12 @@ static void destroy_umrc_res(struct mlx5_ib_dev *dev)
|
|||
if (err)
|
||||
mlx5_ib_warn(dev, "mr cache cleanup failed\n");
|
||||
|
||||
mlx5_ib_destroy_qp(dev->umrc.qp);
|
||||
ib_free_cq(dev->umrc.cq);
|
||||
ib_dealloc_pd(dev->umrc.pd);
|
||||
if (dev->umrc.qp)
|
||||
mlx5_ib_destroy_qp(dev->umrc.qp);
|
||||
if (dev->umrc.cq)
|
||||
ib_free_cq(dev->umrc.cq);
|
||||
if (dev->umrc.pd)
|
||||
ib_dealloc_pd(dev->umrc.pd);
|
||||
}
|
||||
|
||||
enum {
|
||||
|
@ -3586,12 +3589,15 @@ static int create_umr_res(struct mlx5_ib_dev *dev)
|
|||
|
||||
error_4:
|
||||
mlx5_ib_destroy_qp(qp);
|
||||
dev->umrc.qp = NULL;
|
||||
|
||||
error_3:
|
||||
ib_free_cq(cq);
|
||||
dev->umrc.cq = NULL;
|
||||
|
||||
error_2:
|
||||
ib_dealloc_pd(pd);
|
||||
dev->umrc.pd = NULL;
|
||||
|
||||
error_0:
|
||||
kfree(attr);
|
||||
|
|
|
@ -740,6 +740,9 @@ int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev)
|
|||
{
|
||||
int i;
|
||||
|
||||
if (!dev->cache.wq)
|
||||
return 0;
|
||||
|
||||
dev->cache.stopped = 1;
|
||||
flush_workqueue(dev->cache.wq);
|
||||
|
||||
|
|
|
@ -833,7 +833,8 @@ static struct qedr_dev *qedr_add(struct qed_dev *cdev, struct pci_dev *pdev,
|
|||
|
||||
dev->num_cnq = dev->ops->rdma_get_min_cnq_msix(cdev);
|
||||
if (!dev->num_cnq) {
|
||||
DP_ERR(dev, "not enough CNQ resources.\n");
|
||||
DP_ERR(dev, "Failed. At least one CNQ is required.\n");
|
||||
rc = -ENOMEM;
|
||||
goto init_err;
|
||||
}
|
||||
|
||||
|
|
|
@ -1841,14 +1841,15 @@ static void qedr_reset_qp_hwq_info(struct qedr_qp_hwq_info *qph)
|
|||
|
||||
static int qedr_update_qp_state(struct qedr_dev *dev,
|
||||
struct qedr_qp *qp,
|
||||
enum qed_roce_qp_state cur_state,
|
||||
enum qed_roce_qp_state new_state)
|
||||
{
|
||||
int status = 0;
|
||||
|
||||
if (new_state == qp->state)
|
||||
if (new_state == cur_state)
|
||||
return 0;
|
||||
|
||||
switch (qp->state) {
|
||||
switch (cur_state) {
|
||||
case QED_ROCE_QP_STATE_RESET:
|
||||
switch (new_state) {
|
||||
case QED_ROCE_QP_STATE_INIT:
|
||||
|
@ -1955,6 +1956,7 @@ int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
|
|||
struct qedr_dev *dev = get_qedr_dev(&qp->dev->ibdev);
|
||||
const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
|
||||
enum ib_qp_state old_qp_state, new_qp_state;
|
||||
enum qed_roce_qp_state cur_state;
|
||||
int rc = 0;
|
||||
|
||||
DP_DEBUG(dev, QEDR_MSG_QP,
|
||||
|
@ -2086,18 +2088,23 @@ int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
|
|||
SET_FIELD(qp_params.modify_flags,
|
||||
QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT, 1);
|
||||
|
||||
qp_params.ack_timeout = attr->timeout;
|
||||
if (attr->timeout) {
|
||||
u32 temp;
|
||||
|
||||
temp = 4096 * (1UL << attr->timeout) / 1000 / 1000;
|
||||
/* FW requires [msec] */
|
||||
qp_params.ack_timeout = temp;
|
||||
} else {
|
||||
/* Infinite */
|
||||
/* The received timeout value is an exponent used like this:
|
||||
* "12.7.34 LOCAL ACK TIMEOUT
|
||||
* Value representing the transport (ACK) timeout for use by
|
||||
* the remote, expressed as: 4.096 * 2^timeout [usec]"
|
||||
* The FW expects timeout in msec so we need to divide the usec
|
||||
* result by 1000. We'll approximate 1000~2^10, and 4.096 ~ 2^2,
|
||||
* so we get: 2^2 * 2^timeout / 2^10 = 2^(timeout - 8).
|
||||
* The value of zero means infinite so we use a 'max_t' to make
|
||||
* sure that sub 1 msec values will be configured as 1 msec.
|
||||
*/
|
||||
if (attr->timeout)
|
||||
qp_params.ack_timeout =
|
||||
1 << max_t(int, attr->timeout - 8, 0);
|
||||
else
|
||||
qp_params.ack_timeout = 0;
|
||||
}
|
||||
}
|
||||
|
||||
if (attr_mask & IB_QP_RETRY_CNT) {
|
||||
SET_FIELD(qp_params.modify_flags,
|
||||
QED_ROCE_MODIFY_QP_VALID_RETRY_CNT, 1);
|
||||
|
@ -2170,13 +2177,25 @@ int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
|
|||
qp->dest_qp_num = attr->dest_qp_num;
|
||||
}
|
||||
|
||||
cur_state = qp->state;
|
||||
|
||||
/* Update the QP state before the actual ramrod to prevent a race with
|
||||
* fast path. Modifying the QP state to error will cause the device to
|
||||
* flush the CQEs and while polling the flushed CQEs will considered as
|
||||
* a potential issue if the QP isn't in error state.
|
||||
*/
|
||||
if ((attr_mask & IB_QP_STATE) && qp->qp_type != IB_QPT_GSI &&
|
||||
!udata && qp_params.new_state == QED_ROCE_QP_STATE_ERR)
|
||||
qp->state = QED_ROCE_QP_STATE_ERR;
|
||||
|
||||
if (qp->qp_type != IB_QPT_GSI)
|
||||
rc = dev->ops->rdma_modify_qp(dev->rdma_ctx,
|
||||
qp->qed_qp, &qp_params);
|
||||
|
||||
if (attr_mask & IB_QP_STATE) {
|
||||
if ((qp->qp_type != IB_QPT_GSI) && (!udata))
|
||||
rc = qedr_update_qp_state(dev, qp, qp_params.new_state);
|
||||
rc = qedr_update_qp_state(dev, qp, cur_state,
|
||||
qp_params.new_state);
|
||||
qp->state = qp_params.new_state;
|
||||
}
|
||||
|
||||
|
|
|
@ -887,7 +887,7 @@ static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps
|
|||
|
||||
q = bdev_get_queue(p->path.dev->bdev);
|
||||
attached_handler_name = scsi_dh_attached_handler_name(q, GFP_KERNEL);
|
||||
if (attached_handler_name) {
|
||||
if (attached_handler_name || m->hw_handler_name) {
|
||||
INIT_DELAYED_WORK(&p->activate_path, activate_path_work);
|
||||
r = setup_scsi_dh(p->path.dev->bdev, m, attached_handler_name, &ti->error);
|
||||
if (r) {
|
||||
|
|
|
@ -466,7 +466,7 @@ static int dm_get_bdev_for_ioctl(struct mapped_device *md,
|
|||
{
|
||||
struct dm_target *tgt;
|
||||
struct dm_table *map;
|
||||
int srcu_idx, r;
|
||||
int srcu_idx, r, r2;
|
||||
|
||||
retry:
|
||||
r = -ENOTTY;
|
||||
|
@ -492,9 +492,11 @@ retry:
|
|||
goto out;
|
||||
|
||||
bdgrab(*bdev);
|
||||
r = blkdev_get(*bdev, *mode, _dm_claim_ptr);
|
||||
if (r < 0)
|
||||
r2 = blkdev_get(*bdev, *mode, _dm_claim_ptr);
|
||||
if (r2 < 0) {
|
||||
r = r2;
|
||||
goto out;
|
||||
}
|
||||
|
||||
dm_put_live_table(md, srcu_idx);
|
||||
return r;
|
||||
|
|
|
@ -151,7 +151,7 @@ config DVB_MMAP
|
|||
select VIDEOBUF2_VMALLOC
|
||||
default n
|
||||
help
|
||||
This option enables DVB experimental memory-mapped API, with
|
||||
This option enables DVB experimental memory-mapped API, which
|
||||
reduces the number of context switches to read DVB buffers, as
|
||||
the buffers can use mmap() syscalls.
|
||||
|
||||
|
|
|
@ -172,16 +172,13 @@ static irqreturn_t tegra_cec_irq_handler(int irq, void *data)
|
|||
}
|
||||
}
|
||||
|
||||
if (status & (TEGRA_CEC_INT_STAT_RX_REGISTER_OVERRUN |
|
||||
TEGRA_CEC_INT_STAT_RX_BUS_ANOMALY_DETECTED |
|
||||
TEGRA_CEC_INT_STAT_RX_START_BIT_DETECTED |
|
||||
TEGRA_CEC_INT_STAT_RX_BUS_ERROR_DETECTED)) {
|
||||
if (status & TEGRA_CEC_INT_STAT_RX_START_BIT_DETECTED) {
|
||||
cec_write(cec, TEGRA_CEC_INT_STAT,
|
||||
(TEGRA_CEC_INT_STAT_RX_REGISTER_OVERRUN |
|
||||
TEGRA_CEC_INT_STAT_RX_BUS_ANOMALY_DETECTED |
|
||||
TEGRA_CEC_INT_STAT_RX_START_BIT_DETECTED |
|
||||
TEGRA_CEC_INT_STAT_RX_BUS_ERROR_DETECTED));
|
||||
} else if (status & TEGRA_CEC_INT_STAT_RX_REGISTER_FULL) {
|
||||
TEGRA_CEC_INT_STAT_RX_START_BIT_DETECTED);
|
||||
cec->rx_done = false;
|
||||
cec->rx_buf_cnt = 0;
|
||||
}
|
||||
if (status & TEGRA_CEC_INT_STAT_RX_REGISTER_FULL) {
|
||||
u32 v;
|
||||
|
||||
cec_write(cec, TEGRA_CEC_INT_STAT,
|
||||
|
@ -255,7 +252,7 @@ static int tegra_cec_adap_enable(struct cec_adapter *adap, bool enable)
|
|||
TEGRA_CEC_INT_MASK_TX_BUS_ANOMALY_DETECTED |
|
||||
TEGRA_CEC_INT_MASK_TX_FRAME_TRANSMITTED |
|
||||
TEGRA_CEC_INT_MASK_RX_REGISTER_FULL |
|
||||
TEGRA_CEC_INT_MASK_RX_REGISTER_OVERRUN);
|
||||
TEGRA_CEC_INT_MASK_RX_START_BIT_DETECTED);
|
||||
|
||||
cec_write(cec, TEGRA_CEC_HW_CONTROL, TEGRA_CEC_HWCTRL_TX_RX_MODE);
|
||||
return 0;
|
||||
|
|
|
@ -1889,6 +1889,8 @@ static inline u32 jedec_read_mfr(struct map_info *map, uint32_t base,
|
|||
do {
|
||||
uint32_t ofs = cfi_build_cmd_addr(0 + (bank << 8), map, cfi);
|
||||
mask = (1 << (cfi->device_type * 8)) - 1;
|
||||
if (ofs >= map->size)
|
||||
return 0;
|
||||
result = map_read(map, base + ofs);
|
||||
bank++;
|
||||
} while ((result.x[0] & mask) == CFI_MFR_CONTINUATION);
|
||||
|
|
|
@ -479,7 +479,7 @@ static int shrink_ecclayout(struct mtd_info *mtd,
|
|||
for (i = 0; i < MTD_MAX_ECCPOS_ENTRIES;) {
|
||||
u32 eccpos;
|
||||
|
||||
ret = mtd_ooblayout_ecc(mtd, section, &oobregion);
|
||||
ret = mtd_ooblayout_ecc(mtd, section++, &oobregion);
|
||||
if (ret < 0) {
|
||||
if (ret != -ERANGE)
|
||||
return ret;
|
||||
|
@ -526,7 +526,7 @@ static int get_oobinfo(struct mtd_info *mtd, struct nand_oobinfo *to)
|
|||
for (i = 0; i < ARRAY_SIZE(to->eccpos);) {
|
||||
u32 eccpos;
|
||||
|
||||
ret = mtd_ooblayout_ecc(mtd, section, &oobregion);
|
||||
ret = mtd_ooblayout_ecc(mtd, section++, &oobregion);
|
||||
if (ret < 0) {
|
||||
if (ret != -ERANGE)
|
||||
return ret;
|
||||
|
|
|
@ -426,7 +426,7 @@ static int get_strength(struct atmel_pmecc_user *user)
|
|||
|
||||
static int get_sectorsize(struct atmel_pmecc_user *user)
|
||||
{
|
||||
return user->cache.cfg & PMECC_LOOKUP_TABLE_SIZE_1024 ? 1024 : 512;
|
||||
return user->cache.cfg & PMECC_CFG_SECTOR1024 ? 1024 : 512;
|
||||
}
|
||||
|
||||
static void atmel_pmecc_gen_syndrome(struct atmel_pmecc_user *user, int sector)
|
||||
|
|
|
@ -173,14 +173,9 @@ static void set_addr(struct mtd_info *mtd, int column, int page_addr, int oob)
|
|||
|
||||
/* returns nonzero if entire page is blank */
|
||||
static int check_read_ecc(struct mtd_info *mtd, struct fsl_ifc_ctrl *ctrl,
|
||||
u32 *eccstat, unsigned int bufnum)
|
||||
u32 eccstat, unsigned int bufnum)
|
||||
{
|
||||
u32 reg = eccstat[bufnum / 4];
|
||||
int errors;
|
||||
|
||||
errors = (reg >> ((3 - bufnum % 4) * 8)) & 15;
|
||||
|
||||
return errors;
|
||||
return (eccstat >> ((3 - bufnum % 4) * 8)) & 15;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -193,7 +188,7 @@ static void fsl_ifc_run_command(struct mtd_info *mtd)
|
|||
struct fsl_ifc_ctrl *ctrl = priv->ctrl;
|
||||
struct fsl_ifc_nand_ctrl *nctrl = ifc_nand_ctrl;
|
||||
struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs;
|
||||
u32 eccstat[4];
|
||||
u32 eccstat;
|
||||
int i;
|
||||
|
||||
/* set the chip select for NAND Transaction */
|
||||
|
@ -228,19 +223,17 @@ static void fsl_ifc_run_command(struct mtd_info *mtd)
|
|||
if (nctrl->eccread) {
|
||||
int errors;
|
||||
int bufnum = nctrl->page & priv->bufnum_mask;
|
||||
int sector = bufnum * chip->ecc.steps;
|
||||
int sector_end = sector + chip->ecc.steps - 1;
|
||||
int sector_start = bufnum * chip->ecc.steps;
|
||||
int sector_end = sector_start + chip->ecc.steps - 1;
|
||||
__be32 *eccstat_regs;
|
||||
|
||||
if (ctrl->version >= FSL_IFC_VERSION_2_0_0)
|
||||
eccstat_regs = ifc->ifc_nand.v2_nand_eccstat;
|
||||
else
|
||||
eccstat_regs = ifc->ifc_nand.v1_nand_eccstat;
|
||||
eccstat_regs = ifc->ifc_nand.nand_eccstat;
|
||||
eccstat = ifc_in32(&eccstat_regs[sector_start / 4]);
|
||||
|
||||
for (i = sector / 4; i <= sector_end / 4; i++)
|
||||
eccstat[i] = ifc_in32(&eccstat_regs[i]);
|
||||
for (i = sector_start; i <= sector_end; i++) {
|
||||
if (i != sector_start && !(i % 4))
|
||||
eccstat = ifc_in32(&eccstat_regs[i / 4]);
|
||||
|
||||
for (i = sector; i <= sector_end; i++) {
|
||||
errors = check_read_ecc(mtd, ctrl, eccstat, i);
|
||||
|
||||
if (errors == 15) {
|
||||
|
@ -626,6 +619,7 @@ static int fsl_ifc_wait(struct mtd_info *mtd, struct nand_chip *chip)
|
|||
struct fsl_ifc_ctrl *ctrl = priv->ctrl;
|
||||
struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs;
|
||||
u32 nand_fsr;
|
||||
int status;
|
||||
|
||||
/* Use READ_STATUS command, but wait for the device to be ready */
|
||||
ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
|
||||
|
@ -640,12 +634,12 @@ static int fsl_ifc_wait(struct mtd_info *mtd, struct nand_chip *chip)
|
|||
fsl_ifc_run_command(mtd);
|
||||
|
||||
nand_fsr = ifc_in32(&ifc->ifc_nand.nand_fsr);
|
||||
|
||||
status = nand_fsr >> 24;
|
||||
/*
|
||||
* The chip always seems to report that it is
|
||||
* write-protected, even when it is not.
|
||||
*/
|
||||
return nand_fsr | NAND_STATUS_WP;
|
||||
return status | NAND_STATUS_WP;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -1528,39 +1528,6 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
|
|||
goto err_close;
|
||||
}
|
||||
|
||||
/* If the mode uses primary, then the following is handled by
|
||||
* bond_change_active_slave().
|
||||
*/
|
||||
if (!bond_uses_primary(bond)) {
|
||||
/* set promiscuity level to new slave */
|
||||
if (bond_dev->flags & IFF_PROMISC) {
|
||||
res = dev_set_promiscuity(slave_dev, 1);
|
||||
if (res)
|
||||
goto err_close;
|
||||
}
|
||||
|
||||
/* set allmulti level to new slave */
|
||||
if (bond_dev->flags & IFF_ALLMULTI) {
|
||||
res = dev_set_allmulti(slave_dev, 1);
|
||||
if (res)
|
||||
goto err_close;
|
||||
}
|
||||
|
||||
netif_addr_lock_bh(bond_dev);
|
||||
|
||||
dev_mc_sync_multiple(slave_dev, bond_dev);
|
||||
dev_uc_sync_multiple(slave_dev, bond_dev);
|
||||
|
||||
netif_addr_unlock_bh(bond_dev);
|
||||
}
|
||||
|
||||
if (BOND_MODE(bond) == BOND_MODE_8023AD) {
|
||||
/* add lacpdu mc addr to mc list */
|
||||
u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR;
|
||||
|
||||
dev_mc_add(slave_dev, lacpdu_multicast);
|
||||
}
|
||||
|
||||
res = vlan_vids_add_by_dev(slave_dev, bond_dev);
|
||||
if (res) {
|
||||
netdev_err(bond_dev, "Couldn't add bond vlan ids to %s\n",
|
||||
|
@ -1725,6 +1692,40 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
|
|||
goto err_upper_unlink;
|
||||
}
|
||||
|
||||
/* If the mode uses primary, then the following is handled by
|
||||
* bond_change_active_slave().
|
||||
*/
|
||||
if (!bond_uses_primary(bond)) {
|
||||
/* set promiscuity level to new slave */
|
||||
if (bond_dev->flags & IFF_PROMISC) {
|
||||
res = dev_set_promiscuity(slave_dev, 1);
|
||||
if (res)
|
||||
goto err_sysfs_del;
|
||||
}
|
||||
|
||||
/* set allmulti level to new slave */
|
||||
if (bond_dev->flags & IFF_ALLMULTI) {
|
||||
res = dev_set_allmulti(slave_dev, 1);
|
||||
if (res) {
|
||||
if (bond_dev->flags & IFF_PROMISC)
|
||||
dev_set_promiscuity(slave_dev, -1);
|
||||
goto err_sysfs_del;
|
||||
}
|
||||
}
|
||||
|
||||
netif_addr_lock_bh(bond_dev);
|
||||
dev_mc_sync_multiple(slave_dev, bond_dev);
|
||||
dev_uc_sync_multiple(slave_dev, bond_dev);
|
||||
netif_addr_unlock_bh(bond_dev);
|
||||
|
||||
if (BOND_MODE(bond) == BOND_MODE_8023AD) {
|
||||
/* add lacpdu mc addr to mc list */
|
||||
u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR;
|
||||
|
||||
dev_mc_add(slave_dev, lacpdu_multicast);
|
||||
}
|
||||
}
|
||||
|
||||
bond->slave_cnt++;
|
||||
bond_compute_features(bond);
|
||||
bond_set_carrier(bond);
|
||||
|
@ -1748,6 +1749,9 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
|
|||
return 0;
|
||||
|
||||
/* Undo stages on error */
|
||||
err_sysfs_del:
|
||||
bond_sysfs_slave_del(new_slave);
|
||||
|
||||
err_upper_unlink:
|
||||
bond_upper_dev_unlink(bond, new_slave);
|
||||
|
||||
|
@ -1755,9 +1759,6 @@ err_unregister:
|
|||
netdev_rx_handler_unregister(slave_dev);
|
||||
|
||||
err_detach:
|
||||
if (!bond_uses_primary(bond))
|
||||
bond_hw_addr_flush(bond_dev, slave_dev);
|
||||
|
||||
vlan_vids_del_by_dev(slave_dev, bond_dev);
|
||||
if (rcu_access_pointer(bond->primary_slave) == new_slave)
|
||||
RCU_INIT_POINTER(bond->primary_slave, NULL);
|
||||
|
|
|
@ -1409,6 +1409,7 @@ static const struct of_device_id mt7530_of_match[] = {
|
|||
{ .compatible = "mediatek,mt7530" },
|
||||
{ /* sentinel */ },
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, mt7530_of_match);
|
||||
|
||||
static struct mdio_driver mt7530_mdio_driver = {
|
||||
.probe = mt7530_probe,
|
||||
|
@ -1424,4 +1425,3 @@ mdio_module_driver(mt7530_mdio_driver);
|
|||
MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
|
||||
MODULE_DESCRIPTION("Driver for Mediatek MT7530 Switch");
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_ALIAS("platform:mediatek-mt7530");
|
||||
|
|
|
@ -1132,6 +1132,7 @@ static void mvneta_port_up(struct mvneta_port *pp)
|
|||
}
|
||||
mvreg_write(pp, MVNETA_TXQ_CMD, q_map);
|
||||
|
||||
q_map = 0;
|
||||
/* Enable all initialized RXQs. */
|
||||
for (queue = 0; queue < rxq_number; queue++) {
|
||||
struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
|
||||
|
|
|
@ -156,57 +156,63 @@ static int mlx4_en_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num)
|
|||
static u8 mlx4_en_dcbnl_set_all(struct net_device *netdev)
|
||||
{
|
||||
struct mlx4_en_priv *priv = netdev_priv(netdev);
|
||||
struct mlx4_en_port_profile *prof = priv->prof;
|
||||
struct mlx4_en_dev *mdev = priv->mdev;
|
||||
u8 tx_pause, tx_ppp, rx_pause, rx_ppp;
|
||||
|
||||
if (!(priv->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
|
||||
return 1;
|
||||
|
||||
if (priv->cee_config.pfc_state) {
|
||||
int tc;
|
||||
rx_ppp = prof->rx_ppp;
|
||||
tx_ppp = prof->tx_ppp;
|
||||
|
||||
priv->prof->rx_pause = 0;
|
||||
priv->prof->tx_pause = 0;
|
||||
for (tc = 0; tc < CEE_DCBX_MAX_PRIO; tc++) {
|
||||
u8 tc_mask = 1 << tc;
|
||||
|
||||
switch (priv->cee_config.dcb_pfc[tc]) {
|
||||
case pfc_disabled:
|
||||
priv->prof->tx_ppp &= ~tc_mask;
|
||||
priv->prof->rx_ppp &= ~tc_mask;
|
||||
tx_ppp &= ~tc_mask;
|
||||
rx_ppp &= ~tc_mask;
|
||||
break;
|
||||
case pfc_enabled_full:
|
||||
priv->prof->tx_ppp |= tc_mask;
|
||||
priv->prof->rx_ppp |= tc_mask;
|
||||
tx_ppp |= tc_mask;
|
||||
rx_ppp |= tc_mask;
|
||||
break;
|
||||
case pfc_enabled_tx:
|
||||
priv->prof->tx_ppp |= tc_mask;
|
||||
priv->prof->rx_ppp &= ~tc_mask;
|
||||
tx_ppp |= tc_mask;
|
||||
rx_ppp &= ~tc_mask;
|
||||
break;
|
||||
case pfc_enabled_rx:
|
||||
priv->prof->tx_ppp &= ~tc_mask;
|
||||
priv->prof->rx_ppp |= tc_mask;
|
||||
tx_ppp &= ~tc_mask;
|
||||
rx_ppp |= tc_mask;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
en_dbg(DRV, priv, "Set pfc on\n");
|
||||
rx_pause = !!(rx_ppp || tx_ppp) ? 0 : prof->rx_pause;
|
||||
tx_pause = !!(rx_ppp || tx_ppp) ? 0 : prof->tx_pause;
|
||||
} else {
|
||||
priv->prof->rx_pause = 1;
|
||||
priv->prof->tx_pause = 1;
|
||||
en_dbg(DRV, priv, "Set pfc off\n");
|
||||
rx_ppp = 0;
|
||||
tx_ppp = 0;
|
||||
rx_pause = prof->rx_pause;
|
||||
tx_pause = prof->tx_pause;
|
||||
}
|
||||
|
||||
if (mlx4_SET_PORT_general(mdev->dev, priv->port,
|
||||
priv->rx_skb_size + ETH_FCS_LEN,
|
||||
priv->prof->tx_pause,
|
||||
priv->prof->tx_ppp,
|
||||
priv->prof->rx_pause,
|
||||
priv->prof->rx_ppp)) {
|
||||
tx_pause, tx_ppp, rx_pause, rx_ppp)) {
|
||||
en_err(priv, "Failed setting pause params\n");
|
||||
return 1;
|
||||
}
|
||||
|
||||
prof->tx_ppp = tx_ppp;
|
||||
prof->rx_ppp = rx_ppp;
|
||||
prof->tx_pause = tx_pause;
|
||||
prof->rx_pause = rx_pause;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -408,6 +414,7 @@ static int mlx4_en_dcbnl_ieee_setpfc(struct net_device *dev,
|
|||
struct mlx4_en_priv *priv = netdev_priv(dev);
|
||||
struct mlx4_en_port_profile *prof = priv->prof;
|
||||
struct mlx4_en_dev *mdev = priv->mdev;
|
||||
u32 tx_pause, tx_ppp, rx_pause, rx_ppp;
|
||||
int err;
|
||||
|
||||
en_dbg(DRV, priv, "cap: 0x%x en: 0x%x mbc: 0x%x delay: %d\n",
|
||||
|
@ -416,23 +423,26 @@ static int mlx4_en_dcbnl_ieee_setpfc(struct net_device *dev,
|
|||
pfc->mbc,
|
||||
pfc->delay);
|
||||
|
||||
prof->rx_pause = !pfc->pfc_en;
|
||||
prof->tx_pause = !pfc->pfc_en;
|
||||
prof->rx_ppp = pfc->pfc_en;
|
||||
prof->tx_ppp = pfc->pfc_en;
|
||||
rx_pause = prof->rx_pause && !pfc->pfc_en;
|
||||
tx_pause = prof->tx_pause && !pfc->pfc_en;
|
||||
rx_ppp = pfc->pfc_en;
|
||||
tx_ppp = pfc->pfc_en;
|
||||
|
||||
err = mlx4_SET_PORT_general(mdev->dev, priv->port,
|
||||
priv->rx_skb_size + ETH_FCS_LEN,
|
||||
prof->tx_pause,
|
||||
prof->tx_ppp,
|
||||
prof->rx_pause,
|
||||
prof->rx_ppp);
|
||||
if (err)
|
||||
tx_pause, tx_ppp, rx_pause, rx_ppp);
|
||||
if (err) {
|
||||
en_err(priv, "Failed setting pause params\n");
|
||||
else
|
||||
mlx4_en_update_pfc_stats_bitmap(mdev->dev, &priv->stats_bitmap,
|
||||
prof->rx_ppp, prof->rx_pause,
|
||||
prof->tx_ppp, prof->tx_pause);
|
||||
return err;
|
||||
}
|
||||
|
||||
mlx4_en_update_pfc_stats_bitmap(mdev->dev, &priv->stats_bitmap,
|
||||
rx_ppp, rx_pause, tx_ppp, tx_pause);
|
||||
|
||||
prof->tx_ppp = tx_ppp;
|
||||
prof->rx_ppp = rx_ppp;
|
||||
prof->rx_pause = rx_pause;
|
||||
prof->tx_pause = tx_pause;
|
||||
|
||||
return err;
|
||||
}
|
||||
|
|
|
@ -1060,27 +1060,32 @@ static int mlx4_en_set_pauseparam(struct net_device *dev,
|
|||
{
|
||||
struct mlx4_en_priv *priv = netdev_priv(dev);
|
||||
struct mlx4_en_dev *mdev = priv->mdev;
|
||||
u8 tx_pause, tx_ppp, rx_pause, rx_ppp;
|
||||
int err;
|
||||
|
||||
if (pause->autoneg)
|
||||
return -EINVAL;
|
||||
|
||||
priv->prof->tx_pause = pause->tx_pause != 0;
|
||||
priv->prof->rx_pause = pause->rx_pause != 0;
|
||||
tx_pause = !!(pause->tx_pause);
|
||||
rx_pause = !!(pause->rx_pause);
|
||||
rx_ppp = priv->prof->rx_ppp && !(tx_pause || rx_pause);
|
||||
tx_ppp = priv->prof->tx_ppp && !(tx_pause || rx_pause);
|
||||
|
||||
err = mlx4_SET_PORT_general(mdev->dev, priv->port,
|
||||
priv->rx_skb_size + ETH_FCS_LEN,
|
||||
priv->prof->tx_pause,
|
||||
priv->prof->tx_ppp,
|
||||
priv->prof->rx_pause,
|
||||
priv->prof->rx_ppp);
|
||||
if (err)
|
||||
en_err(priv, "Failed setting pause params\n");
|
||||
else
|
||||
mlx4_en_update_pfc_stats_bitmap(mdev->dev, &priv->stats_bitmap,
|
||||
priv->prof->rx_ppp,
|
||||
priv->prof->rx_pause,
|
||||
priv->prof->tx_ppp,
|
||||
priv->prof->tx_pause);
|
||||
tx_pause, tx_ppp, rx_pause, rx_ppp);
|
||||
if (err) {
|
||||
en_err(priv, "Failed setting pause params, err = %d\n", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
mlx4_en_update_pfc_stats_bitmap(mdev->dev, &priv->stats_bitmap,
|
||||
rx_ppp, rx_pause, tx_ppp, tx_pause);
|
||||
|
||||
priv->prof->tx_pause = tx_pause;
|
||||
priv->prof->rx_pause = rx_pause;
|
||||
priv->prof->tx_ppp = tx_ppp;
|
||||
priv->prof->rx_ppp = rx_ppp;
|
||||
|
||||
return err;
|
||||
}
|
||||
|
|
|
@ -163,9 +163,9 @@ static void mlx4_en_get_profile(struct mlx4_en_dev *mdev)
|
|||
params->udp_rss = 0;
|
||||
}
|
||||
for (i = 1; i <= MLX4_MAX_PORTS; i++) {
|
||||
params->prof[i].rx_pause = 1;
|
||||
params->prof[i].rx_pause = !(pfcrx || pfctx);
|
||||
params->prof[i].rx_ppp = pfcrx;
|
||||
params->prof[i].tx_pause = 1;
|
||||
params->prof[i].tx_pause = !(pfcrx || pfctx);
|
||||
params->prof[i].tx_ppp = pfctx;
|
||||
params->prof[i].tx_ring_size = MLX4_EN_DEF_TX_RING_SIZE;
|
||||
params->prof[i].rx_ring_size = MLX4_EN_DEF_RX_RING_SIZE;
|
||||
|
|
|
@ -5088,6 +5088,7 @@ static void rem_slave_fs_rule(struct mlx4_dev *dev, int slave)
|
|||
&tracker->res_tree[RES_FS_RULE]);
|
||||
list_del(&fs_rule->com.list);
|
||||
spin_unlock_irq(mlx4_tlock(dev));
|
||||
kfree(fs_rule->mirr_mbox);
|
||||
kfree(fs_rule);
|
||||
state = 0;
|
||||
break;
|
||||
|
|
|
@ -46,7 +46,7 @@ config MLX5_MPFS
|
|||
|
||||
config MLX5_ESWITCH
|
||||
bool "Mellanox Technologies MLX5 SRIOV E-Switch support"
|
||||
depends on MLX5_CORE_EN
|
||||
depends on MLX5_CORE_EN && NET_SWITCHDEV
|
||||
default y
|
||||
---help---
|
||||
Mellanox Technologies Ethernet SRIOV E-Switch support in ConnectX NIC.
|
||||
|
|
Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше
Загрузка…
Ссылка в новой задаче