Linux 4.18-rc5
-----BEGIN PGP SIGNATURE----- iQFSBAABCAA8FiEEq68RxlopcLEwq+PEeb4+QwBBGIYFAltLpVUeHHRvcnZhbGRz QGxpbnV4LWZvdW5kYXRpb24ub3JnAAoJEHm+PkMAQRiGWisH/ikONMwV7OrSk36Y 5rxzTFUoBk0Qffct88gtSNuRVCxaVb1ofCndvFJE6A6HfJkWpbBzH6eq90aakmJi f7uFcu4YmsQpeQaf9lpftWmY2vDf2fIadVTV0RnSMXks57wMax1cpBe7LJGpz13e f+g5XRVs1MdlZVtr6tG2SU3Y5AqVVVsYe/0DBPonEqeh9/JJbPFCuNkFOxxzAqPu VTnjyoOqG8qtZzjklNtR5rZn0Gv592tWX36eiWTQdThNmVFkGEAJwsHCQlY4OQYK 61QN4UhOHiu8e1ZuGDNEDhNVRnKtaaYUPFeWL1wLRW73ul4P3ZkpvpS8QTMwcFJI JjzNOkI= =ckcO -----END PGP SIGNATURE----- Merge tag 'v4.18-rc5' into x86/mm, to pick up fixes Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Коммит
37c45b2354
|
@ -4846,3 +4846,8 @@
|
|||
xirc2ps_cs= [NET,PCMCIA]
|
||||
Format:
|
||||
<irq>,<irq_mask>,<io>,<full_duplex>,<do_sound>,<lockup_hack>[,<irq2>[,<irq3>[,<irq4>]]]
|
||||
|
||||
xhci-hcd.quirks [USB,KNL]
|
||||
A hex value specifying bitmask with supplemental xhci
|
||||
host controller quirks. Meaning of each bit can be
|
||||
consulted in header drivers/usb/host/xhci.h.
|
||||
|
|
|
@ -50,6 +50,11 @@ LDFLAGS_MODULE
|
|||
--------------------------------------------------
|
||||
Additional options used for $(LD) when linking modules.
|
||||
|
||||
KBUILD_KCONFIG
|
||||
--------------------------------------------------
|
||||
Set the top-level Kconfig file to the value of this environment
|
||||
variable. The default name is "Kconfig".
|
||||
|
||||
KBUILD_VERBOSE
|
||||
--------------------------------------------------
|
||||
Set the kbuild verbosity. Can be assigned same values as "V=...".
|
||||
|
@ -88,7 +93,8 @@ In most cases the name of the architecture is the same as the
|
|||
directory name found in the arch/ directory.
|
||||
But some architectures such as x86 and sparc have aliases.
|
||||
x86: i386 for 32 bit, x86_64 for 64 bit
|
||||
sparc: sparc for 32 bit, sparc64 for 64 bit
|
||||
sh: sh for 32 bit, sh64 for 64 bit
|
||||
sparc: sparc32 for 32 bit, sparc64 for 64 bit
|
||||
|
||||
CROSS_COMPILE
|
||||
--------------------------------------------------
|
||||
|
@ -148,15 +154,6 @@ stripped after they are installed. If INSTALL_MOD_STRIP is '1', then
|
|||
the default option --strip-debug will be used. Otherwise,
|
||||
INSTALL_MOD_STRIP value will be used as the options to the strip command.
|
||||
|
||||
INSTALL_FW_PATH
|
||||
--------------------------------------------------
|
||||
INSTALL_FW_PATH specifies where to install the firmware blobs.
|
||||
The default value is:
|
||||
|
||||
$(INSTALL_MOD_PATH)/lib/firmware
|
||||
|
||||
The value can be overridden in which case the default value is ignored.
|
||||
|
||||
INSTALL_HDR_PATH
|
||||
--------------------------------------------------
|
||||
INSTALL_HDR_PATH specifies where to install user space headers when
|
||||
|
|
|
@ -2,9 +2,9 @@ This file contains some assistance for using "make *config".
|
|||
|
||||
Use "make help" to list all of the possible configuration targets.
|
||||
|
||||
The xconfig ('qconf') and menuconfig ('mconf') programs also
|
||||
have embedded help text. Be sure to check it for navigation,
|
||||
search, and other general help text.
|
||||
The xconfig ('qconf'), menuconfig ('mconf'), and nconfig ('nconf')
|
||||
programs also have embedded help text. Be sure to check that for
|
||||
navigation, search, and other general help text.
|
||||
|
||||
======================================================================
|
||||
General
|
||||
|
@ -17,13 +17,16 @@ this happens, using a previously working .config file and running
|
|||
for you, so you may find that you need to see what NEW kernel
|
||||
symbols have been introduced.
|
||||
|
||||
To see a list of new config symbols when using "make oldconfig", use
|
||||
To see a list of new config symbols, use
|
||||
|
||||
cp user/some/old.config .config
|
||||
make listnewconfig
|
||||
|
||||
and the config program will list any new symbols, one per line.
|
||||
|
||||
Alternatively, you can use the brute force method:
|
||||
|
||||
make oldconfig
|
||||
scripts/diffconfig .config.old .config | less
|
||||
|
||||
______________________________________________________________________
|
||||
|
@ -160,7 +163,7 @@ Searching in menuconfig:
|
|||
This lists all config symbols that contain "hotplug",
|
||||
e.g., HOTPLUG_CPU, MEMORY_HOTPLUG.
|
||||
|
||||
For search help, enter / followed TAB-TAB-TAB (to highlight
|
||||
For search help, enter / followed by TAB-TAB (to highlight
|
||||
<Help>) and Enter. This will tell you that you can also use
|
||||
regular expressions (regexes) in the search string, so if you
|
||||
are not interested in MEMORY_HOTPLUG, you could try
|
||||
|
@ -202,6 +205,39 @@ Example:
|
|||
make MENUCONFIG_MODE=single_menu menuconfig
|
||||
|
||||
|
||||
======================================================================
|
||||
nconfig
|
||||
--------------------------------------------------
|
||||
|
||||
nconfig is an alternate text-based configurator. It lists function
|
||||
keys across the bottom of the terminal (window) that execute commands.
|
||||
You can also just use the corresponding numeric key to execute the
|
||||
commands unless you are in a data entry window. E.g., instead of F6
|
||||
for Save, you can just press 6.
|
||||
|
||||
Use F1 for Global help or F3 for the Short help menu.
|
||||
|
||||
Searching in nconfig:
|
||||
|
||||
You can search either in the menu entry "prompt" strings
|
||||
or in the configuration symbols.
|
||||
|
||||
Use / to begin a search through the menu entries. This does
|
||||
not support regular expressions. Use <Down> or <Up> for
|
||||
Next hit and Previous hit, respectively. Use <Esc> to
|
||||
terminate the search mode.
|
||||
|
||||
F8 (SymSearch) searches the configuration symbols for the
|
||||
given string or regular expression (regex).
|
||||
|
||||
NCONFIG_MODE
|
||||
--------------------------------------------------
|
||||
This mode shows all sub-menus in one large tree.
|
||||
|
||||
Example:
|
||||
make NCONFIG_MODE=single_menu nconfig
|
||||
|
||||
|
||||
======================================================================
|
||||
xconfig
|
||||
--------------------------------------------------
|
||||
|
@ -230,8 +266,7 @@ gconfig
|
|||
|
||||
Searching in gconfig:
|
||||
|
||||
None (gconfig isn't maintained as well as xconfig or menuconfig);
|
||||
however, gconfig does have a few more viewing choices than
|
||||
xconfig does.
|
||||
There is no search command in gconfig. However, gconfig does
|
||||
have several different viewing choices, modes, and options.
|
||||
|
||||
###
|
||||
|
|
11
MAINTAINERS
11
MAINTAINERS
|
@ -581,7 +581,7 @@ W: https://www.infradead.org/~dhowells/kafs/
|
|||
|
||||
AGPGART DRIVER
|
||||
M: David Airlie <airlied@linux.ie>
|
||||
T: git git://people.freedesktop.org/~airlied/linux (part of drm maint)
|
||||
T: git git://anongit.freedesktop.org/drm/drm
|
||||
S: Maintained
|
||||
F: drivers/char/agp/
|
||||
F: include/linux/agp*
|
||||
|
@ -4460,6 +4460,7 @@ F: Documentation/blockdev/drbd/
|
|||
|
||||
DRIVER CORE, KOBJECTS, DEBUGFS AND SYSFS
|
||||
M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
||||
R: "Rafael J. Wysocki" <rafael@kernel.org>
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core.git
|
||||
S: Supported
|
||||
F: Documentation/kobject.txt
|
||||
|
@ -4630,7 +4631,7 @@ F: include/uapi/drm/vmwgfx_drm.h
|
|||
DRM DRIVERS
|
||||
M: David Airlie <airlied@linux.ie>
|
||||
L: dri-devel@lists.freedesktop.org
|
||||
T: git git://people.freedesktop.org/~airlied/linux
|
||||
T: git git://anongit.freedesktop.org/drm/drm
|
||||
B: https://bugs.freedesktop.org/
|
||||
C: irc://chat.freenode.net/dri-devel
|
||||
S: Maintained
|
||||
|
@ -10213,11 +10214,13 @@ F: sound/soc/codecs/sgtl5000*
|
|||
|
||||
NXP TDA998X DRM DRIVER
|
||||
M: Russell King <linux@armlinux.org.uk>
|
||||
S: Supported
|
||||
S: Maintained
|
||||
T: git git://git.armlinux.org.uk/~rmk/linux-arm.git drm-tda998x-devel
|
||||
T: git git://git.armlinux.org.uk/~rmk/linux-arm.git drm-tda998x-fixes
|
||||
F: drivers/gpu/drm/i2c/tda998x_drv.c
|
||||
F: include/drm/i2c/tda998x.h
|
||||
F: include/dt-bindings/display/tda998x.h
|
||||
K: "nxp,tda998x"
|
||||
|
||||
NXP TFA9879 DRIVER
|
||||
M: Peter Rosin <peda@axentia.se>
|
||||
|
@ -11835,7 +11838,7 @@ S: Supported
|
|||
F: arch/hexagon/
|
||||
|
||||
QUALCOMM HIDMA DRIVER
|
||||
M: Sinan Kaya <okaya@codeaurora.org>
|
||||
M: Sinan Kaya <okaya@kernel.org>
|
||||
L: linux-arm-kernel@lists.infradead.org
|
||||
L: linux-arm-msm@vger.kernel.org
|
||||
L: dmaengine@vger.kernel.org
|
||||
|
|
15
Makefile
15
Makefile
|
@ -2,7 +2,7 @@
|
|||
VERSION = 4
|
||||
PATCHLEVEL = 18
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc3
|
||||
EXTRAVERSION = -rc5
|
||||
NAME = Merciless Moray
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
@ -353,9 +353,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
|
|||
else if [ -x /bin/bash ]; then echo /bin/bash; \
|
||||
else echo sh; fi ; fi)
|
||||
|
||||
HOST_LFS_CFLAGS := $(shell getconf LFS_CFLAGS)
|
||||
HOST_LFS_LDFLAGS := $(shell getconf LFS_LDFLAGS)
|
||||
HOST_LFS_LIBS := $(shell getconf LFS_LIBS)
|
||||
HOST_LFS_CFLAGS := $(shell getconf LFS_CFLAGS 2>/dev/null)
|
||||
HOST_LFS_LDFLAGS := $(shell getconf LFS_LDFLAGS 2>/dev/null)
|
||||
HOST_LFS_LIBS := $(shell getconf LFS_LIBS 2>/dev/null)
|
||||
|
||||
HOSTCC = gcc
|
||||
HOSTCXX = g++
|
||||
|
@ -507,11 +507,6 @@ ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC) $(KBUILD_CFLA
|
|||
KBUILD_AFLAGS += -DCC_HAVE_ASM_GOTO
|
||||
endif
|
||||
|
||||
ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/cc-can-link.sh $(CC)), y)
|
||||
CC_CAN_LINK := y
|
||||
export CC_CAN_LINK
|
||||
endif
|
||||
|
||||
# The expansion should be delayed until arch/$(SRCARCH)/Makefile is included.
|
||||
# Some architectures define CROSS_COMPILE in arch/$(SRCARCH)/Makefile.
|
||||
# CC_VERSION_TEXT is referenced from Kconfig (so it needs export),
|
||||
|
@ -1717,6 +1712,6 @@ endif # skip-makefile
|
|||
PHONY += FORCE
|
||||
FORCE:
|
||||
|
||||
# Declare the contents of the .PHONY variable as phony. We keep that
|
||||
# Declare the contents of the PHONY variable as phony. We keep that
|
||||
# information in a variable so we can use it in if_changed and friends.
|
||||
.PHONY: $(PHONY)
|
||||
|
|
|
@ -168,7 +168,6 @@
|
|||
AM33XX_IOPAD(0x8f0, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc0_dat3.mmc0_dat3 */
|
||||
AM33XX_IOPAD(0x904, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc0_cmd.mmc0_cmd */
|
||||
AM33XX_IOPAD(0x900, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc0_clk.mmc0_clk */
|
||||
AM33XX_IOPAD(0x9a0, PIN_INPUT | MUX_MODE4) /* mcasp0_aclkr.mmc0_sdwp */
|
||||
>;
|
||||
};
|
||||
|
||||
|
|
|
@ -39,6 +39,8 @@
|
|||
ti,davinci-ctrl-ram-size = <0x2000>;
|
||||
ti,davinci-rmii-en = /bits/ 8 <1>;
|
||||
local-mac-address = [ 00 00 00 00 00 00 ];
|
||||
clocks = <&emac_ick>;
|
||||
clock-names = "ick";
|
||||
};
|
||||
|
||||
davinci_mdio: ethernet@5c030000 {
|
||||
|
@ -49,6 +51,8 @@
|
|||
bus_freq = <1000000>;
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
clocks = <&emac_fck>;
|
||||
clock-names = "fck";
|
||||
};
|
||||
|
||||
uart4: serial@4809e000 {
|
||||
|
@ -87,6 +91,11 @@
|
|||
};
|
||||
};
|
||||
|
||||
/* Table Table 5-79 of the TRM shows 480ab000 is reserved */
|
||||
&usb_otg_hs {
|
||||
status = "disabled";
|
||||
};
|
||||
|
||||
&iva {
|
||||
status = "disabled";
|
||||
};
|
||||
|
|
|
@ -610,6 +610,8 @@
|
|||
|
||||
touchscreen-size-x = <480>;
|
||||
touchscreen-size-y = <272>;
|
||||
|
||||
wakeup-source;
|
||||
};
|
||||
|
||||
tlv320aic3106: tlv320aic3106@1b {
|
||||
|
|
|
@ -547,7 +547,7 @@
|
|||
|
||||
thermal: thermal@e8078 {
|
||||
compatible = "marvell,armada380-thermal";
|
||||
reg = <0xe4078 0x4>, <0xe4074 0x4>;
|
||||
reg = <0xe4078 0x4>, <0xe4070 0x8>;
|
||||
status = "okay";
|
||||
};
|
||||
|
||||
|
|
|
@ -1580,7 +1580,6 @@
|
|||
dr_mode = "otg";
|
||||
snps,dis_u3_susphy_quirk;
|
||||
snps,dis_u2_susphy_quirk;
|
||||
snps,dis_metastability_quirk;
|
||||
};
|
||||
};
|
||||
|
||||
|
@ -1608,6 +1607,7 @@
|
|||
dr_mode = "otg";
|
||||
snps,dis_u3_susphy_quirk;
|
||||
snps,dis_u2_susphy_quirk;
|
||||
snps,dis_metastability_quirk;
|
||||
};
|
||||
};
|
||||
|
||||
|
|
|
@ -770,7 +770,7 @@
|
|||
|
||||
pinctrl_ts: tsgrp {
|
||||
fsl,pins = <
|
||||
MX51_PAD_CSI1_D8__GPIO3_12 0x85
|
||||
MX51_PAD_CSI1_D8__GPIO3_12 0x04
|
||||
MX51_PAD_CSI1_D9__GPIO3_13 0x85
|
||||
>;
|
||||
};
|
||||
|
|
|
@ -141,9 +141,11 @@ CONFIG_USB_STORAGE=y
|
|||
CONFIG_USB_CHIPIDEA=y
|
||||
CONFIG_USB_CHIPIDEA_UDC=y
|
||||
CONFIG_USB_CHIPIDEA_HOST=y
|
||||
CONFIG_USB_CHIPIDEA_ULPI=y
|
||||
CONFIG_NOP_USB_XCEIV=y
|
||||
CONFIG_USB_GADGET=y
|
||||
CONFIG_USB_ETH=m
|
||||
CONFIG_USB_ULPI_BUS=y
|
||||
CONFIG_MMC=y
|
||||
CONFIG_MMC_SDHCI=y
|
||||
CONFIG_MMC_SDHCI_PLTFM=y
|
||||
|
|
|
@ -302,6 +302,7 @@ CONFIG_USB_STORAGE=y
|
|||
CONFIG_USB_CHIPIDEA=y
|
||||
CONFIG_USB_CHIPIDEA_UDC=y
|
||||
CONFIG_USB_CHIPIDEA_HOST=y
|
||||
CONFIG_USB_CHIPIDEA_ULPI=y
|
||||
CONFIG_USB_SERIAL=m
|
||||
CONFIG_USB_SERIAL_GENERIC=y
|
||||
CONFIG_USB_SERIAL_FTDI_SIO=m
|
||||
|
@ -338,6 +339,7 @@ CONFIG_USB_GADGETFS=m
|
|||
CONFIG_USB_FUNCTIONFS=m
|
||||
CONFIG_USB_MASS_STORAGE=m
|
||||
CONFIG_USB_G_SERIAL=m
|
||||
CONFIG_USB_ULPI_BUS=y
|
||||
CONFIG_MMC=y
|
||||
CONFIG_MMC_SDHCI=y
|
||||
CONFIG_MMC_SDHCI_PLTFM=y
|
||||
|
|
|
@ -272,9 +272,11 @@
|
|||
* Allocate stack space to store 128 bytes worth of tweaks. For
|
||||
* performance, this space is aligned to a 16-byte boundary so that we
|
||||
* can use the load/store instructions that declare 16-byte alignment.
|
||||
* For Thumb2 compatibility, don't do the 'bic' directly on 'sp'.
|
||||
*/
|
||||
sub sp, #128
|
||||
bic sp, #0xf
|
||||
sub r12, sp, #128
|
||||
bic r12, #0xf
|
||||
mov sp, r12
|
||||
|
||||
.if \n == 64
|
||||
// Load first tweak
|
||||
|
|
|
@ -1 +1,4 @@
|
|||
obj-$(CONFIG_TRUSTED_FOUNDATIONS) += trusted_foundations.o
|
||||
|
||||
# tf_generic_smc() fails to build with -fsanitize-coverage=trace-pc
|
||||
KCOV_INSTRUMENT := n
|
||||
|
|
|
@ -177,7 +177,7 @@ M_CLASS(streq r3, [r12, #PMSAv8_MAIR1])
|
|||
bic r0, r0, #CR_I
|
||||
#endif
|
||||
mcr p15, 0, r0, c1, c0, 0 @ write control reg
|
||||
isb
|
||||
instr_sync
|
||||
#elif defined (CONFIG_CPU_V7M)
|
||||
#ifdef CONFIG_ARM_MPU
|
||||
ldreq r3, [r12, MPU_CTRL]
|
||||
|
|
|
@ -109,6 +109,45 @@ void omap5_erratum_workaround_801819(void)
|
|||
static inline void omap5_erratum_workaround_801819(void) { }
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
|
||||
/*
|
||||
* Configure ACR and enable ACTLR[0] (Enable invalidates of BTB with
|
||||
* ICIALLU) to activate the workaround for secondary Core.
|
||||
* NOTE: it is assumed that the primary core's configuration is done
|
||||
* by the boot loader (kernel will detect a misconfiguration and complain
|
||||
* if this is not done).
|
||||
*
|
||||
* In General Purpose(GP) devices, ACR bit settings can only be done
|
||||
* by ROM code in "secure world" using the smc call and there is no
|
||||
* option to update the "firmware" on such devices. This also works for
|
||||
* High security(HS) devices, as a backup option in case the
|
||||
* "update" is not done in the "security firmware".
|
||||
*/
|
||||
static void omap5_secondary_harden_predictor(void)
|
||||
{
|
||||
u32 acr, acr_mask;
|
||||
|
||||
asm volatile ("mrc p15, 0, %0, c1, c0, 1" : "=r" (acr));
|
||||
|
||||
/*
|
||||
* ACTLR[0] (Enable invalidates of BTB with ICIALLU)
|
||||
*/
|
||||
acr_mask = BIT(0);
|
||||
|
||||
/* Do we already have it done.. if yes, skip expensive smc */
|
||||
if ((acr & acr_mask) == acr_mask)
|
||||
return;
|
||||
|
||||
acr |= acr_mask;
|
||||
omap_smc1(OMAP5_DRA7_MON_SET_ACR_INDEX, acr);
|
||||
|
||||
pr_debug("%s: ARM ACR setup for CVE_2017_5715 applied on CPU%d\n",
|
||||
__func__, smp_processor_id());
|
||||
}
|
||||
#else
|
||||
static inline void omap5_secondary_harden_predictor(void) { }
|
||||
#endif
|
||||
|
||||
static void omap4_secondary_init(unsigned int cpu)
|
||||
{
|
||||
/*
|
||||
|
@ -131,6 +170,8 @@ static void omap4_secondary_init(unsigned int cpu)
|
|||
set_cntfreq();
|
||||
/* Configure ACR to disable streaming WA for 801819 */
|
||||
omap5_erratum_workaround_801819();
|
||||
/* Enable ACR to allow for ICUALLU workaround */
|
||||
omap5_secondary_harden_predictor();
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -185,7 +185,7 @@ static int pxa_irq_suspend(void)
|
|||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < pxa_internal_irq_nr / 32; i++) {
|
||||
for (i = 0; i < DIV_ROUND_UP(pxa_internal_irq_nr, 32); i++) {
|
||||
void __iomem *base = irq_base(i);
|
||||
|
||||
saved_icmr[i] = __raw_readl(base + ICMR);
|
||||
|
@ -204,7 +204,7 @@ static void pxa_irq_resume(void)
|
|||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < pxa_internal_irq_nr / 32; i++) {
|
||||
for (i = 0; i < DIV_ROUND_UP(pxa_internal_irq_nr, 32); i++) {
|
||||
void __iomem *base = irq_base(i);
|
||||
|
||||
__raw_writel(saved_icmr[i], base + ICMR);
|
||||
|
|
|
@ -736,20 +736,29 @@ static int __mark_rodata_ro(void *unused)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int kernel_set_to_readonly __read_mostly;
|
||||
|
||||
void mark_rodata_ro(void)
|
||||
{
|
||||
kernel_set_to_readonly = 1;
|
||||
stop_machine(__mark_rodata_ro, NULL, NULL);
|
||||
debug_checkwx();
|
||||
}
|
||||
|
||||
void set_kernel_text_rw(void)
|
||||
{
|
||||
if (!kernel_set_to_readonly)
|
||||
return;
|
||||
|
||||
set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), false,
|
||||
current->active_mm);
|
||||
}
|
||||
|
||||
void set_kernel_text_ro(void)
|
||||
{
|
||||
if (!kernel_set_to_readonly)
|
||||
return;
|
||||
|
||||
set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), true,
|
||||
current->active_mm);
|
||||
}
|
||||
|
|
|
@ -1844,7 +1844,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
|||
/* there are 2 passes here */
|
||||
bpf_jit_dump(prog->len, image_size, 2, ctx.target);
|
||||
|
||||
set_memory_ro((unsigned long)header, header->pages);
|
||||
bpf_jit_binary_lock_ro(header);
|
||||
prog->bpf_func = (void *)ctx.target;
|
||||
prog->jited = 1;
|
||||
prog->jited_len = image_size;
|
||||
|
|
|
@ -10,7 +10,7 @@
|
|||
#
|
||||
# Copyright (C) 1995-2001 by Russell King
|
||||
|
||||
LDFLAGS_vmlinux :=-p --no-undefined -X
|
||||
LDFLAGS_vmlinux :=--no-undefined -X
|
||||
CPPFLAGS_vmlinux.lds = -DTEXT_OFFSET=$(TEXT_OFFSET)
|
||||
GZFLAGS :=-9
|
||||
|
||||
|
@ -60,15 +60,15 @@ ifeq ($(CONFIG_CPU_BIG_ENDIAN), y)
|
|||
KBUILD_CPPFLAGS += -mbig-endian
|
||||
CHECKFLAGS += -D__AARCH64EB__
|
||||
AS += -EB
|
||||
LD += -EB
|
||||
LDFLAGS += -maarch64linuxb
|
||||
# We must use the linux target here, since distributions don't tend to package
|
||||
# the ELF linker scripts with binutils, and this results in a build failure.
|
||||
LDFLAGS += -EB -maarch64linuxb
|
||||
UTS_MACHINE := aarch64_be
|
||||
else
|
||||
KBUILD_CPPFLAGS += -mlittle-endian
|
||||
CHECKFLAGS += -D__AARCH64EL__
|
||||
AS += -EL
|
||||
LD += -EL
|
||||
LDFLAGS += -maarch64linux
|
||||
LDFLAGS += -EL -maarch64linux # See comment above
|
||||
UTS_MACHINE := aarch64
|
||||
endif
|
||||
|
||||
|
|
|
@ -29,20 +29,15 @@ DECLARE_PER_CPU(bool, kernel_neon_busy);
|
|||
static __must_check inline bool may_use_simd(void)
|
||||
{
|
||||
/*
|
||||
* The raw_cpu_read() is racy if called with preemption enabled.
|
||||
* This is not a bug: kernel_neon_busy is only set when
|
||||
* preemption is disabled, so we cannot migrate to another CPU
|
||||
* while it is set, nor can we migrate to a CPU where it is set.
|
||||
* So, if we find it clear on some CPU then we're guaranteed to
|
||||
* find it clear on any CPU we could migrate to.
|
||||
*
|
||||
* If we are in between kernel_neon_begin()...kernel_neon_end(),
|
||||
* the flag will be set, but preemption is also disabled, so we
|
||||
* can't migrate to another CPU and spuriously see it become
|
||||
* false.
|
||||
* kernel_neon_busy is only set while preemption is disabled,
|
||||
* and is clear whenever preemption is enabled. Since
|
||||
* this_cpu_read() is atomic w.r.t. preemption, kernel_neon_busy
|
||||
* cannot change under our feet -- if it's set we cannot be
|
||||
* migrated, and if it's clear we cannot be migrated to a CPU
|
||||
* where it is set.
|
||||
*/
|
||||
return !in_irq() && !irqs_disabled() && !in_nmi() &&
|
||||
!raw_cpu_read(kernel_neon_busy);
|
||||
!this_cpu_read(kernel_neon_busy);
|
||||
}
|
||||
|
||||
#else /* ! CONFIG_KERNEL_MODE_NEON */
|
||||
|
|
|
@ -44,6 +44,7 @@ extern inline pmd_t *pmd_alloc_kernel(pgd_t *pgd, unsigned long address)
|
|||
static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t page,
|
||||
unsigned long address)
|
||||
{
|
||||
pgtable_page_dtor(page);
|
||||
__free_page(page);
|
||||
}
|
||||
|
||||
|
@ -74,8 +75,9 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm,
|
|||
return page;
|
||||
}
|
||||
|
||||
extern inline void pte_free(struct mm_struct *mm, struct page *page)
|
||||
static inline void pte_free(struct mm_struct *mm, struct page *page)
|
||||
{
|
||||
pgtable_page_dtor(page);
|
||||
__free_page(page);
|
||||
}
|
||||
|
||||
|
|
|
@ -29,6 +29,7 @@
|
|||
#include <linux/kallsyms.h>
|
||||
#include <linux/random.h>
|
||||
#include <linux/prctl.h>
|
||||
#include <linux/nmi.h>
|
||||
|
||||
#include <asm/asm.h>
|
||||
#include <asm/bootinfo.h>
|
||||
|
@ -655,28 +656,42 @@ unsigned long arch_align_stack(unsigned long sp)
|
|||
return sp & ALMASK;
|
||||
}
|
||||
|
||||
static void arch_dump_stack(void *info)
|
||||
static DEFINE_PER_CPU(call_single_data_t, backtrace_csd);
|
||||
static struct cpumask backtrace_csd_busy;
|
||||
|
||||
static void handle_backtrace(void *info)
|
||||
{
|
||||
struct pt_regs *regs;
|
||||
nmi_cpu_backtrace(get_irq_regs());
|
||||
cpumask_clear_cpu(smp_processor_id(), &backtrace_csd_busy);
|
||||
}
|
||||
|
||||
regs = get_irq_regs();
|
||||
static void raise_backtrace(cpumask_t *mask)
|
||||
{
|
||||
call_single_data_t *csd;
|
||||
int cpu;
|
||||
|
||||
if (regs)
|
||||
show_regs(regs);
|
||||
for_each_cpu(cpu, mask) {
|
||||
/*
|
||||
* If we previously sent an IPI to the target CPU & it hasn't
|
||||
* cleared its bit in the busy cpumask then it didn't handle
|
||||
* our previous IPI & it's not safe for us to reuse the
|
||||
* call_single_data_t.
|
||||
*/
|
||||
if (cpumask_test_and_set_cpu(cpu, &backtrace_csd_busy)) {
|
||||
pr_warn("Unable to send backtrace IPI to CPU%u - perhaps it hung?\n",
|
||||
cpu);
|
||||
continue;
|
||||
}
|
||||
|
||||
dump_stack();
|
||||
csd = &per_cpu(backtrace_csd, cpu);
|
||||
csd->func = handle_backtrace;
|
||||
smp_call_function_single_async(cpu, csd);
|
||||
}
|
||||
}
|
||||
|
||||
void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
|
||||
{
|
||||
long this_cpu = get_cpu();
|
||||
|
||||
if (cpumask_test_cpu(this_cpu, mask) && !exclude_self)
|
||||
dump_stack();
|
||||
|
||||
smp_call_function_many(mask, arch_dump_stack, NULL, 1);
|
||||
|
||||
put_cpu();
|
||||
nmi_trigger_cpumask_backtrace(mask, exclude_self, raise_backtrace);
|
||||
}
|
||||
|
||||
int mips_get_process_fp_mode(struct task_struct *task)
|
||||
|
|
|
@ -351,6 +351,7 @@ static void __show_regs(const struct pt_regs *regs)
|
|||
void show_regs(struct pt_regs *regs)
|
||||
{
|
||||
__show_regs((struct pt_regs *)regs);
|
||||
dump_stack();
|
||||
}
|
||||
|
||||
void show_registers(struct pt_regs *regs)
|
||||
|
|
|
@ -9,6 +9,7 @@
|
|||
#include <linux/export.h>
|
||||
#include <asm/addrspace.h>
|
||||
#include <asm/byteorder.h>
|
||||
#include <linux/ioport.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/vmalloc.h>
|
||||
|
@ -98,6 +99,20 @@ static int remap_area_pages(unsigned long address, phys_addr_t phys_addr,
|
|||
return error;
|
||||
}
|
||||
|
||||
static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
|
||||
void *arg)
|
||||
{
|
||||
unsigned long i;
|
||||
|
||||
for (i = 0; i < nr_pages; i++) {
|
||||
if (pfn_valid(start_pfn + i) &&
|
||||
!PageReserved(pfn_to_page(start_pfn + i)))
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Generic mapping function (not visible outside):
|
||||
*/
|
||||
|
@ -116,8 +131,8 @@ static int remap_area_pages(unsigned long address, phys_addr_t phys_addr,
|
|||
|
||||
void __iomem * __ioremap(phys_addr_t phys_addr, phys_addr_t size, unsigned long flags)
|
||||
{
|
||||
unsigned long offset, pfn, last_pfn;
|
||||
struct vm_struct * area;
|
||||
unsigned long offset;
|
||||
phys_addr_t last_addr;
|
||||
void * addr;
|
||||
|
||||
|
@ -137,18 +152,16 @@ void __iomem * __ioremap(phys_addr_t phys_addr, phys_addr_t size, unsigned long
|
|||
return (void __iomem *) CKSEG1ADDR(phys_addr);
|
||||
|
||||
/*
|
||||
* Don't allow anybody to remap normal RAM that we're using..
|
||||
* Don't allow anybody to remap RAM that may be allocated by the page
|
||||
* allocator, since that could lead to races & data clobbering.
|
||||
*/
|
||||
if (phys_addr < virt_to_phys(high_memory)) {
|
||||
char *t_addr, *t_end;
|
||||
struct page *page;
|
||||
|
||||
t_addr = __va(phys_addr);
|
||||
t_end = t_addr + (size - 1);
|
||||
|
||||
for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++)
|
||||
if(!PageReserved(page))
|
||||
return NULL;
|
||||
pfn = PFN_DOWN(phys_addr);
|
||||
last_pfn = PFN_DOWN(last_addr);
|
||||
if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL,
|
||||
__ioremap_check_ram) == 1) {
|
||||
WARN_ONCE(1, "ioremap on RAM at %pa - %pa\n",
|
||||
&phys_addr, &last_addr);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -98,8 +98,12 @@ static inline void pte_free(struct mm_struct *mm, struct page *pte)
|
|||
__free_page(pte);
|
||||
}
|
||||
|
||||
#define __pte_free_tlb(tlb, pte, addr) \
|
||||
do { \
|
||||
pgtable_page_dtor(pte); \
|
||||
tlb_remove_page((tlb), (pte)); \
|
||||
} while (0)
|
||||
|
||||
#define __pte_free_tlb(tlb, pte, addr) tlb_remove_page((tlb), (pte))
|
||||
#define pmd_pgtable(pmd) pmd_page(pmd)
|
||||
|
||||
#define check_pgt_cache() do { } while (0)
|
||||
|
|
|
@ -277,12 +277,6 @@ EXCEPTION_ENTRY(_data_page_fault_handler)
|
|||
l.addi r3,r1,0 // pt_regs
|
||||
/* r4 set be EXCEPTION_HANDLE */ // effective address of fault
|
||||
|
||||
/*
|
||||
* __PHX__: TODO
|
||||
*
|
||||
* all this can be written much simpler. look at
|
||||
* DTLB miss handler in the CONFIG_GUARD_PROTECTED_CORE part
|
||||
*/
|
||||
#ifdef CONFIG_OPENRISC_NO_SPR_SR_DSX
|
||||
l.lwz r6,PT_PC(r3) // address of an offending insn
|
||||
l.lwz r6,0(r6) // instruction that caused pf
|
||||
|
@ -314,7 +308,7 @@ EXCEPTION_ENTRY(_data_page_fault_handler)
|
|||
|
||||
#else
|
||||
|
||||
l.lwz r6,PT_SR(r3) // SR
|
||||
l.mfspr r6,r0,SPR_SR // SR
|
||||
l.andi r6,r6,SPR_SR_DSX // check for delay slot exception
|
||||
l.sfne r6,r0 // exception happened in delay slot
|
||||
l.bnf 7f
|
||||
|
|
|
@ -210,8 +210,7 @@
|
|||
* r4 - EEAR exception EA
|
||||
* r10 - current pointing to current_thread_info struct
|
||||
* r12 - syscall 0, since we didn't come from syscall
|
||||
* r13 - temp it actually contains new SR, not needed anymore
|
||||
* r31 - handler address of the handler we'll jump to
|
||||
* r30 - handler address of the handler we'll jump to
|
||||
*
|
||||
* handler has to save remaining registers to the exception
|
||||
* ksp frame *before* tainting them!
|
||||
|
@ -244,6 +243,7 @@
|
|||
/* r1 is KSP, r30 is __pa(KSP) */ ;\
|
||||
tophys (r30,r1) ;\
|
||||
l.sw PT_GPR12(r30),r12 ;\
|
||||
/* r4 use for tmp before EA */ ;\
|
||||
l.mfspr r12,r0,SPR_EPCR_BASE ;\
|
||||
l.sw PT_PC(r30),r12 ;\
|
||||
l.mfspr r12,r0,SPR_ESR_BASE ;\
|
||||
|
@ -263,7 +263,10 @@
|
|||
/* r12 == 1 if we come from syscall */ ;\
|
||||
CLEAR_GPR(r12) ;\
|
||||
/* ----- turn on MMU ----- */ ;\
|
||||
l.ori r30,r0,(EXCEPTION_SR) ;\
|
||||
/* Carry DSX into exception SR */ ;\
|
||||
l.mfspr r30,r0,SPR_SR ;\
|
||||
l.andi r30,r30,SPR_SR_DSX ;\
|
||||
l.ori r30,r30,(EXCEPTION_SR) ;\
|
||||
l.mtspr r0,r30,SPR_ESR_BASE ;\
|
||||
/* r30: EA address of handler */ ;\
|
||||
LOAD_SYMBOL_2_GPR(r30,handler) ;\
|
||||
|
|
|
@ -300,7 +300,7 @@ static inline int in_delay_slot(struct pt_regs *regs)
|
|||
return 0;
|
||||
}
|
||||
#else
|
||||
return regs->sr & SPR_SR_DSX;
|
||||
return mfspr(SPR_SR) & SPR_SR_DSX;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
|
|
@ -107,6 +107,7 @@ config ARCH_RV32I
|
|||
select GENERIC_LIB_ASHLDI3
|
||||
select GENERIC_LIB_ASHRDI3
|
||||
select GENERIC_LIB_LSHRDI3
|
||||
select GENERIC_LIB_UCMPDI2
|
||||
|
||||
config ARCH_RV64I
|
||||
bool "RV64I"
|
||||
|
|
|
@ -21,8 +21,13 @@ typedef struct user_regs_struct elf_gregset_t;
|
|||
|
||||
typedef union __riscv_fp_state elf_fpregset_t;
|
||||
|
||||
#define ELF_RISCV_R_SYM(r_info) ((r_info) >> 32)
|
||||
#define ELF_RISCV_R_TYPE(r_info) ((r_info) & 0xffffffff)
|
||||
#if __riscv_xlen == 64
|
||||
#define ELF_RISCV_R_SYM(r_info) ELF64_R_SYM(r_info)
|
||||
#define ELF_RISCV_R_TYPE(r_info) ELF64_R_TYPE(r_info)
|
||||
#else
|
||||
#define ELF_RISCV_R_SYM(r_info) ELF32_R_SYM(r_info)
|
||||
#define ELF_RISCV_R_TYPE(r_info) ELF32_R_TYPE(r_info)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* RISC-V relocation types
|
||||
|
|
|
@ -16,10 +16,6 @@
|
|||
#include <linux/irqchip.h>
|
||||
#include <linux/irqdomain.h>
|
||||
|
||||
#ifdef CONFIG_RISCV_INTC
|
||||
#include <linux/irqchip/irq-riscv-intc.h>
|
||||
#endif
|
||||
|
||||
void __init init_IRQ(void)
|
||||
{
|
||||
irqchip_init();
|
||||
|
|
|
@ -37,7 +37,7 @@ static int apply_r_riscv_64_rela(struct module *me, u32 *location, Elf_Addr v)
|
|||
static int apply_r_riscv_branch_rela(struct module *me, u32 *location,
|
||||
Elf_Addr v)
|
||||
{
|
||||
s64 offset = (void *)v - (void *)location;
|
||||
ptrdiff_t offset = (void *)v - (void *)location;
|
||||
u32 imm12 = (offset & 0x1000) << (31 - 12);
|
||||
u32 imm11 = (offset & 0x800) >> (11 - 7);
|
||||
u32 imm10_5 = (offset & 0x7e0) << (30 - 10);
|
||||
|
@ -50,7 +50,7 @@ static int apply_r_riscv_branch_rela(struct module *me, u32 *location,
|
|||
static int apply_r_riscv_jal_rela(struct module *me, u32 *location,
|
||||
Elf_Addr v)
|
||||
{
|
||||
s64 offset = (void *)v - (void *)location;
|
||||
ptrdiff_t offset = (void *)v - (void *)location;
|
||||
u32 imm20 = (offset & 0x100000) << (31 - 20);
|
||||
u32 imm19_12 = (offset & 0xff000);
|
||||
u32 imm11 = (offset & 0x800) << (20 - 11);
|
||||
|
@ -63,7 +63,7 @@ static int apply_r_riscv_jal_rela(struct module *me, u32 *location,
|
|||
static int apply_r_riscv_rcv_branch_rela(struct module *me, u32 *location,
|
||||
Elf_Addr v)
|
||||
{
|
||||
s64 offset = (void *)v - (void *)location;
|
||||
ptrdiff_t offset = (void *)v - (void *)location;
|
||||
u16 imm8 = (offset & 0x100) << (12 - 8);
|
||||
u16 imm7_6 = (offset & 0xc0) >> (6 - 5);
|
||||
u16 imm5 = (offset & 0x20) >> (5 - 2);
|
||||
|
@ -78,7 +78,7 @@ static int apply_r_riscv_rcv_branch_rela(struct module *me, u32 *location,
|
|||
static int apply_r_riscv_rvc_jump_rela(struct module *me, u32 *location,
|
||||
Elf_Addr v)
|
||||
{
|
||||
s64 offset = (void *)v - (void *)location;
|
||||
ptrdiff_t offset = (void *)v - (void *)location;
|
||||
u16 imm11 = (offset & 0x800) << (12 - 11);
|
||||
u16 imm10 = (offset & 0x400) >> (10 - 8);
|
||||
u16 imm9_8 = (offset & 0x300) << (12 - 11);
|
||||
|
@ -96,7 +96,7 @@ static int apply_r_riscv_rvc_jump_rela(struct module *me, u32 *location,
|
|||
static int apply_r_riscv_pcrel_hi20_rela(struct module *me, u32 *location,
|
||||
Elf_Addr v)
|
||||
{
|
||||
s64 offset = (void *)v - (void *)location;
|
||||
ptrdiff_t offset = (void *)v - (void *)location;
|
||||
s32 hi20;
|
||||
|
||||
if (offset != (s32)offset) {
|
||||
|
@ -178,7 +178,7 @@ static int apply_r_riscv_lo12_s_rela(struct module *me, u32 *location,
|
|||
static int apply_r_riscv_got_hi20_rela(struct module *me, u32 *location,
|
||||
Elf_Addr v)
|
||||
{
|
||||
s64 offset = (void *)v - (void *)location;
|
||||
ptrdiff_t offset = (void *)v - (void *)location;
|
||||
s32 hi20;
|
||||
|
||||
/* Always emit the got entry */
|
||||
|
@ -200,7 +200,7 @@ static int apply_r_riscv_got_hi20_rela(struct module *me, u32 *location,
|
|||
static int apply_r_riscv_call_plt_rela(struct module *me, u32 *location,
|
||||
Elf_Addr v)
|
||||
{
|
||||
s64 offset = (void *)v - (void *)location;
|
||||
ptrdiff_t offset = (void *)v - (void *)location;
|
||||
s32 fill_v = offset;
|
||||
u32 hi20, lo12;
|
||||
|
||||
|
@ -227,7 +227,7 @@ static int apply_r_riscv_call_plt_rela(struct module *me, u32 *location,
|
|||
static int apply_r_riscv_call_rela(struct module *me, u32 *location,
|
||||
Elf_Addr v)
|
||||
{
|
||||
s64 offset = (void *)v - (void *)location;
|
||||
ptrdiff_t offset = (void *)v - (void *)location;
|
||||
s32 fill_v = offset;
|
||||
u32 hi20, lo12;
|
||||
|
||||
|
@ -263,14 +263,14 @@ static int apply_r_riscv_align_rela(struct module *me, u32 *location,
|
|||
static int apply_r_riscv_add32_rela(struct module *me, u32 *location,
|
||||
Elf_Addr v)
|
||||
{
|
||||
*(u32 *)location += (*(u32 *)v);
|
||||
*(u32 *)location += (u32)v;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int apply_r_riscv_sub32_rela(struct module *me, u32 *location,
|
||||
Elf_Addr v)
|
||||
{
|
||||
*(u32 *)location -= (*(u32 *)v);
|
||||
*(u32 *)location -= (u32)v;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -347,7 +347,7 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
|
|||
unsigned int j;
|
||||
|
||||
for (j = 0; j < sechdrs[relsec].sh_size / sizeof(*rel); j++) {
|
||||
u64 hi20_loc =
|
||||
unsigned long hi20_loc =
|
||||
sechdrs[sechdrs[relsec].sh_info].sh_addr
|
||||
+ rel[j].r_offset;
|
||||
u32 hi20_type = ELF_RISCV_R_TYPE(rel[j].r_info);
|
||||
|
@ -360,12 +360,12 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
|
|||
Elf_Sym *hi20_sym =
|
||||
(Elf_Sym *)sechdrs[symindex].sh_addr
|
||||
+ ELF_RISCV_R_SYM(rel[j].r_info);
|
||||
u64 hi20_sym_val =
|
||||
unsigned long hi20_sym_val =
|
||||
hi20_sym->st_value
|
||||
+ rel[j].r_addend;
|
||||
|
||||
/* Calculate lo12 */
|
||||
u64 offset = hi20_sym_val - hi20_loc;
|
||||
size_t offset = hi20_sym_val - hi20_loc;
|
||||
if (IS_ENABLED(CONFIG_MODULE_SECTIONS)
|
||||
&& hi20_type == R_RISCV_GOT_HI20) {
|
||||
offset = module_emit_got_entry(
|
||||
|
|
|
@ -50,7 +50,7 @@ static int riscv_gpr_set(struct task_struct *target,
|
|||
struct pt_regs *regs;
|
||||
|
||||
regs = task_pt_regs(target);
|
||||
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, ®s, 0, -1);
|
||||
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, regs, 0, -1);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -220,8 +220,3 @@ void __init setup_arch(char **cmdline_p)
|
|||
riscv_fill_hwcap();
|
||||
}
|
||||
|
||||
static int __init riscv_device_init(void)
|
||||
{
|
||||
return of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
|
||||
}
|
||||
subsys_initcall_sync(riscv_device_init);
|
||||
|
|
|
@ -28,7 +28,9 @@ static void __init zone_sizes_init(void)
|
|||
{
|
||||
unsigned long max_zone_pfns[MAX_NR_ZONES] = { 0, };
|
||||
|
||||
#ifdef CONFIG_ZONE_DMA32
|
||||
max_zone_pfns[ZONE_DMA32] = PFN_DOWN(min(4UL * SZ_1G, max_low_pfn));
|
||||
#endif
|
||||
max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
|
||||
|
||||
free_area_init_nodes(max_zone_pfns);
|
||||
|
|
|
@ -160,6 +160,7 @@ config S390
|
|||
select HAVE_OPROFILE
|
||||
select HAVE_PERF_EVENTS
|
||||
select HAVE_REGS_AND_STACK_ACCESS_API
|
||||
select HAVE_RSEQ
|
||||
select HAVE_SYSCALL_TRACEPOINTS
|
||||
select HAVE_VIRT_CPU_ACCOUNTING
|
||||
select MODULES_USE_ELF_RELA
|
||||
|
|
|
@ -183,3 +183,4 @@ COMPAT_SYSCALL_WRAP2(s390_guarded_storage, int, command, struct gs_cb *, gs_cb);
|
|||
COMPAT_SYSCALL_WRAP5(statx, int, dfd, const char __user *, path, unsigned, flags, unsigned, mask, struct statx __user *, buffer);
|
||||
COMPAT_SYSCALL_WRAP4(s390_sthyi, unsigned long, code, void __user *, info, u64 __user *, rc, unsigned long, flags);
|
||||
COMPAT_SYSCALL_WRAP5(kexec_file_load, int, kernel_fd, int, initrd_fd, unsigned long, cmdline_len, const char __user *, cmdline_ptr, unsigned long, flags)
|
||||
COMPAT_SYSCALL_WRAP4(rseq, struct rseq __user *, rseq, u32, rseq_len, int, flags, u32, sig)
|
||||
|
|
|
@ -357,6 +357,10 @@ ENTRY(system_call)
|
|||
stg %r2,__PT_R2(%r11) # store return value
|
||||
|
||||
.Lsysc_return:
|
||||
#ifdef CONFIG_DEBUG_RSEQ
|
||||
lgr %r2,%r11
|
||||
brasl %r14,rseq_syscall
|
||||
#endif
|
||||
LOCKDEP_SYS_EXIT
|
||||
.Lsysc_tif:
|
||||
TSTMSK __PT_FLAGS(%r11),_PIF_WORK
|
||||
|
@ -1265,7 +1269,7 @@ cleanup_critical:
|
|||
jl 0f
|
||||
clg %r9,BASED(.Lcleanup_table+104) # .Lload_fpu_regs_end
|
||||
jl .Lcleanup_load_fpu_regs
|
||||
0: BR_EX %r14
|
||||
0: BR_EX %r14,%r11
|
||||
|
||||
.align 8
|
||||
.Lcleanup_table:
|
||||
|
@ -1301,7 +1305,7 @@ cleanup_critical:
|
|||
ni __SIE_PROG0C+3(%r9),0xfe # no longer in SIE
|
||||
lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
|
||||
larl %r9,sie_exit # skip forward to sie_exit
|
||||
BR_EX %r14
|
||||
BR_EX %r14,%r11
|
||||
#endif
|
||||
|
||||
.Lcleanup_system_call:
|
||||
|
|
|
@ -498,7 +498,7 @@ void do_signal(struct pt_regs *regs)
|
|||
}
|
||||
/* No longer in a system call */
|
||||
clear_pt_regs_flag(regs, PIF_SYSCALL);
|
||||
|
||||
rseq_signal_deliver(&ksig, regs);
|
||||
if (is_compat_task())
|
||||
handle_signal32(&ksig, oldset, regs);
|
||||
else
|
||||
|
@ -537,4 +537,5 @@ void do_notify_resume(struct pt_regs *regs)
|
|||
{
|
||||
clear_thread_flag(TIF_NOTIFY_RESUME);
|
||||
tracehook_notify_resume(regs);
|
||||
rseq_handle_notify_resume(NULL, regs);
|
||||
}
|
||||
|
|
|
@ -389,3 +389,5 @@
|
|||
379 common statx sys_statx compat_sys_statx
|
||||
380 common s390_sthyi sys_s390_sthyi compat_sys_s390_sthyi
|
||||
381 common kexec_file_load sys_kexec_file_load compat_sys_kexec_file_load
|
||||
382 common io_pgetevents sys_io_pgetevents compat_sys_io_pgetevents
|
||||
383 common rseq sys_rseq compat_sys_rseq
|
||||
|
|
|
@ -252,6 +252,8 @@ void page_table_free(struct mm_struct *mm, unsigned long *table)
|
|||
spin_unlock_bh(&mm->context.lock);
|
||||
if (mask != 0)
|
||||
return;
|
||||
} else {
|
||||
atomic_xor_bits(&page->_refcount, 3U << 24);
|
||||
}
|
||||
|
||||
pgtable_page_dtor(page);
|
||||
|
@ -304,6 +306,8 @@ static void __tlb_remove_table(void *_table)
|
|||
break;
|
||||
/* fallthrough */
|
||||
case 3: /* 4K page table with pgstes */
|
||||
if (mask & 3)
|
||||
atomic_xor_bits(&page->_refcount, 3 << 24);
|
||||
pgtable_page_dtor(page);
|
||||
__free_page(page);
|
||||
break;
|
||||
|
|
|
@ -1286,6 +1286,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
|
|||
goto free_addrs;
|
||||
}
|
||||
if (bpf_jit_prog(&jit, fp)) {
|
||||
bpf_jit_binary_free(header);
|
||||
fp = orig_fp;
|
||||
goto free_addrs;
|
||||
}
|
||||
|
|
|
@ -114,18 +114,12 @@ __setup_efi_pci(efi_pci_io_protocol_t *pci, struct pci_setup_rom **__rom)
|
|||
struct pci_setup_rom *rom = NULL;
|
||||
efi_status_t status;
|
||||
unsigned long size;
|
||||
uint64_t attributes, romsize;
|
||||
uint64_t romsize;
|
||||
void *romimage;
|
||||
|
||||
status = efi_call_proto(efi_pci_io_protocol, attributes, pci,
|
||||
EfiPciIoAttributeOperationGet, 0ULL,
|
||||
&attributes);
|
||||
if (status != EFI_SUCCESS)
|
||||
return status;
|
||||
|
||||
/*
|
||||
* Some firmware images contain EFI function pointers at the place where the
|
||||
* romimage and romsize fields are supposed to be. Typically the EFI
|
||||
* Some firmware images contain EFI function pointers at the place where
|
||||
* the romimage and romsize fields are supposed to be. Typically the EFI
|
||||
* code is mapped at high addresses, translating to an unrealistically
|
||||
* large romsize. The UEFI spec limits the size of option ROMs to 16
|
||||
* MiB so we reject any ROMs over 16 MiB in size to catch this.
|
||||
|
|
|
@ -535,6 +535,7 @@ ENTRY(crypto_aegis128_aesni_enc_tail)
|
|||
movdqu STATE3, 0x40(STATEP)
|
||||
|
||||
FRAME_END
|
||||
ret
|
||||
ENDPROC(crypto_aegis128_aesni_enc_tail)
|
||||
|
||||
.macro decrypt_block a s0 s1 s2 s3 s4 i
|
||||
|
|
|
@ -645,6 +645,7 @@ ENTRY(crypto_aegis128l_aesni_enc_tail)
|
|||
state_store0
|
||||
|
||||
FRAME_END
|
||||
ret
|
||||
ENDPROC(crypto_aegis128l_aesni_enc_tail)
|
||||
|
||||
/*
|
||||
|
|
|
@ -543,6 +543,7 @@ ENTRY(crypto_aegis256_aesni_enc_tail)
|
|||
state_store0
|
||||
|
||||
FRAME_END
|
||||
ret
|
||||
ENDPROC(crypto_aegis256_aesni_enc_tail)
|
||||
|
||||
/*
|
||||
|
|
|
@ -453,6 +453,7 @@ ENTRY(crypto_morus1280_avx2_enc_tail)
|
|||
vmovdqu STATE4, (4 * 32)(%rdi)
|
||||
|
||||
FRAME_END
|
||||
ret
|
||||
ENDPROC(crypto_morus1280_avx2_enc_tail)
|
||||
|
||||
/*
|
||||
|
|
|
@ -652,6 +652,7 @@ ENTRY(crypto_morus1280_sse2_enc_tail)
|
|||
movdqu STATE4_HI, (9 * 16)(%rdi)
|
||||
|
||||
FRAME_END
|
||||
ret
|
||||
ENDPROC(crypto_morus1280_sse2_enc_tail)
|
||||
|
||||
/*
|
||||
|
|
|
@ -437,6 +437,7 @@ ENTRY(crypto_morus640_sse2_enc_tail)
|
|||
movdqu STATE4, (4 * 16)(%rdi)
|
||||
|
||||
FRAME_END
|
||||
ret
|
||||
ENDPROC(crypto_morus640_sse2_enc_tail)
|
||||
|
||||
/*
|
||||
|
|
|
@ -114,6 +114,8 @@ static bool __send_ipi_mask_ex(const struct cpumask *mask, int vector)
|
|||
ipi_arg->vp_set.format = HV_GENERIC_SET_SPARSE_4K;
|
||||
nr_bank = cpumask_to_vpset(&(ipi_arg->vp_set), mask);
|
||||
}
|
||||
if (nr_bank < 0)
|
||||
goto ipi_mask_ex_done;
|
||||
if (!nr_bank)
|
||||
ipi_arg->vp_set.format = HV_GENERIC_SET_ALL;
|
||||
|
||||
|
@ -158,6 +160,9 @@ static bool __send_ipi_mask(const struct cpumask *mask, int vector)
|
|||
|
||||
for_each_cpu(cur_cpu, mask) {
|
||||
vcpu = hv_cpu_number_to_vp_number(cur_cpu);
|
||||
if (vcpu == VP_INVAL)
|
||||
goto ipi_mask_done;
|
||||
|
||||
/*
|
||||
* This particular version of the IPI hypercall can
|
||||
* only target upto 64 CPUs.
|
||||
|
|
|
@ -265,7 +265,7 @@ void __init hyperv_init(void)
|
|||
{
|
||||
u64 guest_id, required_msrs;
|
||||
union hv_x64_msr_hypercall_contents hypercall_msr;
|
||||
int cpuhp;
|
||||
int cpuhp, i;
|
||||
|
||||
if (x86_hyper_type != X86_HYPER_MS_HYPERV)
|
||||
return;
|
||||
|
@ -293,6 +293,9 @@ void __init hyperv_init(void)
|
|||
if (!hv_vp_index)
|
||||
return;
|
||||
|
||||
for (i = 0; i < num_possible_cpus(); i++)
|
||||
hv_vp_index[i] = VP_INVAL;
|
||||
|
||||
hv_vp_assist_page = kcalloc(num_possible_cpus(),
|
||||
sizeof(*hv_vp_assist_page), GFP_KERNEL);
|
||||
if (!hv_vp_assist_page) {
|
||||
|
|
|
@ -46,6 +46,65 @@
|
|||
#define _ASM_SI __ASM_REG(si)
|
||||
#define _ASM_DI __ASM_REG(di)
|
||||
|
||||
#ifndef __x86_64__
|
||||
/* 32 bit */
|
||||
|
||||
#define _ASM_ARG1 _ASM_AX
|
||||
#define _ASM_ARG2 _ASM_DX
|
||||
#define _ASM_ARG3 _ASM_CX
|
||||
|
||||
#define _ASM_ARG1L eax
|
||||
#define _ASM_ARG2L edx
|
||||
#define _ASM_ARG3L ecx
|
||||
|
||||
#define _ASM_ARG1W ax
|
||||
#define _ASM_ARG2W dx
|
||||
#define _ASM_ARG3W cx
|
||||
|
||||
#define _ASM_ARG1B al
|
||||
#define _ASM_ARG2B dl
|
||||
#define _ASM_ARG3B cl
|
||||
|
||||
#else
|
||||
/* 64 bit */
|
||||
|
||||
#define _ASM_ARG1 _ASM_DI
|
||||
#define _ASM_ARG2 _ASM_SI
|
||||
#define _ASM_ARG3 _ASM_DX
|
||||
#define _ASM_ARG4 _ASM_CX
|
||||
#define _ASM_ARG5 r8
|
||||
#define _ASM_ARG6 r9
|
||||
|
||||
#define _ASM_ARG1Q rdi
|
||||
#define _ASM_ARG2Q rsi
|
||||
#define _ASM_ARG3Q rdx
|
||||
#define _ASM_ARG4Q rcx
|
||||
#define _ASM_ARG5Q r8
|
||||
#define _ASM_ARG6Q r9
|
||||
|
||||
#define _ASM_ARG1L edi
|
||||
#define _ASM_ARG2L esi
|
||||
#define _ASM_ARG3L edx
|
||||
#define _ASM_ARG4L ecx
|
||||
#define _ASM_ARG5L r8d
|
||||
#define _ASM_ARG6L r9d
|
||||
|
||||
#define _ASM_ARG1W di
|
||||
#define _ASM_ARG2W si
|
||||
#define _ASM_ARG3W dx
|
||||
#define _ASM_ARG4W cx
|
||||
#define _ASM_ARG5W r8w
|
||||
#define _ASM_ARG6W r9w
|
||||
|
||||
#define _ASM_ARG1B dil
|
||||
#define _ASM_ARG2B sil
|
||||
#define _ASM_ARG3B dl
|
||||
#define _ASM_ARG4B cl
|
||||
#define _ASM_ARG5B r8b
|
||||
#define _ASM_ARG6B r9b
|
||||
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Macros to generate condition code outputs from inline assembly,
|
||||
* The output operand must be type "bool".
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
* Interrupt control:
|
||||
*/
|
||||
|
||||
static inline unsigned long native_save_fl(void)
|
||||
extern inline unsigned long native_save_fl(void)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
|
|
|
@ -9,6 +9,8 @@
|
|||
#include <asm/hyperv-tlfs.h>
|
||||
#include <asm/nospec-branch.h>
|
||||
|
||||
#define VP_INVAL U32_MAX
|
||||
|
||||
struct ms_hyperv_info {
|
||||
u32 features;
|
||||
u32 misc_features;
|
||||
|
@ -20,7 +22,6 @@ struct ms_hyperv_info {
|
|||
|
||||
extern struct ms_hyperv_info ms_hyperv;
|
||||
|
||||
|
||||
/*
|
||||
* Generate the guest ID.
|
||||
*/
|
||||
|
@ -281,6 +282,8 @@ static inline int cpumask_to_vpset(struct hv_vpset *vpset,
|
|||
*/
|
||||
for_each_cpu(cpu, cpus) {
|
||||
vcpu = hv_cpu_number_to_vp_number(cpu);
|
||||
if (vcpu == VP_INVAL)
|
||||
return -1;
|
||||
vcpu_bank = vcpu / 64;
|
||||
vcpu_offset = vcpu % 64;
|
||||
__set_bit(vcpu_offset, (unsigned long *)
|
||||
|
|
|
@ -61,6 +61,7 @@ obj-y += alternative.o i8253.o hw_breakpoint.o
|
|||
obj-y += tsc.o tsc_msr.o io_delay.o rtc.o
|
||||
obj-y += pci-iommu_table.o
|
||||
obj-y += resource.o
|
||||
obj-y += irqflags.o
|
||||
|
||||
obj-y += process.o
|
||||
obj-y += fpu/
|
||||
|
|
|
@ -543,7 +543,9 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)
|
|||
nodes_per_socket = ((value >> 3) & 7) + 1;
|
||||
}
|
||||
|
||||
if (c->x86 >= 0x15 && c->x86 <= 0x17) {
|
||||
if (!boot_cpu_has(X86_FEATURE_AMD_SSBD) &&
|
||||
!boot_cpu_has(X86_FEATURE_VIRT_SSBD) &&
|
||||
c->x86 >= 0x15 && c->x86 <= 0x17) {
|
||||
unsigned int bit;
|
||||
|
||||
switch (c->x86) {
|
||||
|
|
|
@ -155,7 +155,8 @@ x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest)
|
|||
guestval |= guest_spec_ctrl & x86_spec_ctrl_mask;
|
||||
|
||||
/* SSBD controlled in MSR_SPEC_CTRL */
|
||||
if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD))
|
||||
if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
|
||||
static_cpu_has(X86_FEATURE_AMD_SSBD))
|
||||
hostval |= ssbd_tif_to_spec_ctrl(ti->flags);
|
||||
|
||||
if (hostval != guestval) {
|
||||
|
@ -533,9 +534,10 @@ static enum ssb_mitigation __init __ssb_select_mitigation(void)
|
|||
* Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD may
|
||||
* use a completely different MSR and bit dependent on family.
|
||||
*/
|
||||
if (!static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
|
||||
if (!static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) &&
|
||||
!static_cpu_has(X86_FEATURE_AMD_SSBD)) {
|
||||
x86_amd_ssb_disable();
|
||||
else {
|
||||
} else {
|
||||
x86_spec_ctrl_base |= SPEC_CTRL_SSBD;
|
||||
x86_spec_ctrl_mask |= SPEC_CTRL_SSBD;
|
||||
wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
|
||||
|
|
|
@ -106,7 +106,8 @@ mtrr_write(struct file *file, const char __user *buf, size_t len, loff_t * ppos)
|
|||
|
||||
memset(line, 0, LINE_SIZE);
|
||||
|
||||
length = strncpy_from_user(line, buf, LINE_SIZE - 1);
|
||||
len = min_t(size_t, len, LINE_SIZE - 1);
|
||||
length = strncpy_from_user(line, buf, len);
|
||||
if (length < 0)
|
||||
return length;
|
||||
|
||||
|
|
|
@ -0,0 +1,26 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
|
||||
#include <asm/asm.h>
|
||||
#include <asm/export.h>
|
||||
#include <linux/linkage.h>
|
||||
|
||||
/*
|
||||
* unsigned long native_save_fl(void)
|
||||
*/
|
||||
ENTRY(native_save_fl)
|
||||
pushf
|
||||
pop %_ASM_AX
|
||||
ret
|
||||
ENDPROC(native_save_fl)
|
||||
EXPORT_SYMBOL(native_save_fl)
|
||||
|
||||
/*
|
||||
* void native_restore_fl(unsigned long flags)
|
||||
* %eax/%rdi: flags
|
||||
*/
|
||||
ENTRY(native_restore_fl)
|
||||
push %_ASM_ARG1
|
||||
popf
|
||||
ret
|
||||
ENDPROC(native_restore_fl)
|
||||
EXPORT_SYMBOL(native_restore_fl)
|
|
@ -221,6 +221,11 @@ static void notrace start_secondary(void *unused)
|
|||
#ifdef CONFIG_X86_32
|
||||
/* switch away from the initial page table */
|
||||
load_cr3(swapper_pg_dir);
|
||||
/*
|
||||
* Initialize the CR4 shadow before doing anything that could
|
||||
* try to read it.
|
||||
*/
|
||||
cr4_init_shadow();
|
||||
__flush_tlb_all();
|
||||
#endif
|
||||
load_current_idt();
|
||||
|
|
|
@ -6,7 +6,7 @@ purgatory-y := purgatory.o stack.o setup-x86_$(BITS).o sha256.o entry64.o string
|
|||
targets += $(purgatory-y)
|
||||
PURGATORY_OBJS = $(addprefix $(obj)/,$(purgatory-y))
|
||||
|
||||
$(obj)/sha256.o: $(srctree)/lib/sha256.c
|
||||
$(obj)/sha256.o: $(srctree)/lib/sha256.c FORCE
|
||||
$(call if_changed_rule,cc_o_c)
|
||||
|
||||
LDFLAGS_purgatory.ro := -e purgatory_start -r --no-undefined -nostdlib -z nodefaultlib
|
||||
|
|
|
@ -1207,12 +1207,20 @@ asmlinkage __visible void __init xen_start_kernel(void)
|
|||
|
||||
xen_setup_features();
|
||||
|
||||
xen_setup_machphys_mapping();
|
||||
|
||||
/* Install Xen paravirt ops */
|
||||
pv_info = xen_info;
|
||||
pv_init_ops.patch = paravirt_patch_default;
|
||||
pv_cpu_ops = xen_cpu_ops;
|
||||
xen_init_irq_ops();
|
||||
|
||||
/*
|
||||
* Setup xen_vcpu early because it is needed for
|
||||
* local_irq_disable(), irqs_disabled(), e.g. in printk().
|
||||
*
|
||||
* Don't do the full vcpu_info placement stuff until we have
|
||||
* the cpu_possible_mask and a non-dummy shared_info.
|
||||
*/
|
||||
xen_vcpu_info_reset(0);
|
||||
|
||||
x86_platform.get_nmi_reason = xen_get_nmi_reason;
|
||||
|
||||
|
@ -1225,10 +1233,12 @@ asmlinkage __visible void __init xen_start_kernel(void)
|
|||
* Set up some pagetable state before starting to set any ptes.
|
||||
*/
|
||||
|
||||
xen_setup_machphys_mapping();
|
||||
xen_init_mmu_ops();
|
||||
|
||||
/* Prevent unwanted bits from being set in PTEs. */
|
||||
__supported_pte_mask &= ~_PAGE_GLOBAL;
|
||||
__default_kernel_pte_mask &= ~_PAGE_GLOBAL;
|
||||
|
||||
/*
|
||||
* Prevent page tables from being allocated in highmem, even
|
||||
|
@ -1249,20 +1259,9 @@ asmlinkage __visible void __init xen_start_kernel(void)
|
|||
get_cpu_cap(&boot_cpu_data);
|
||||
x86_configure_nx();
|
||||
|
||||
xen_init_irq_ops();
|
||||
|
||||
/* Let's presume PV guests always boot on vCPU with id 0. */
|
||||
per_cpu(xen_vcpu_id, 0) = 0;
|
||||
|
||||
/*
|
||||
* Setup xen_vcpu early because idt_setup_early_handler needs it for
|
||||
* local_irq_disable(), irqs_disabled().
|
||||
*
|
||||
* Don't do the full vcpu_info placement stuff until we have
|
||||
* the cpu_possible_mask and a non-dummy shared_info.
|
||||
*/
|
||||
xen_vcpu_info_reset(0);
|
||||
|
||||
idt_setup_early_handler();
|
||||
|
||||
xen_init_capabilities();
|
||||
|
|
|
@ -128,8 +128,6 @@ static const struct pv_irq_ops xen_irq_ops __initconst = {
|
|||
|
||||
void __init xen_init_irq_ops(void)
|
||||
{
|
||||
/* For PVH we use default pv_irq_ops settings. */
|
||||
if (!xen_feature(XENFEAT_hvm_callback_vector))
|
||||
pv_irq_ops = xen_irq_ops;
|
||||
pv_irq_ops = xen_irq_ops;
|
||||
x86_init.irqs.intr_init = xen_init_IRQ;
|
||||
}
|
||||
|
|
|
@ -267,8 +267,6 @@ bsg_map_hdr(struct request_queue *q, struct sg_io_v4 *hdr, fmode_t mode)
|
|||
} else if (hdr->din_xfer_len) {
|
||||
ret = blk_rq_map_user(q, rq, NULL, uptr64(hdr->din_xferp),
|
||||
hdr->din_xfer_len, GFP_KERNEL);
|
||||
} else {
|
||||
ret = blk_rq_map_user(q, rq, NULL, NULL, 0, GFP_KERNEL);
|
||||
}
|
||||
|
||||
if (ret)
|
||||
|
|
|
@ -51,16 +51,23 @@ acpi_status acpi_hw_legacy_sleep(u8 sleep_state)
|
|||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
/*
|
||||
* 1) Disable all GPEs
|
||||
* 2) Enable all wakeup GPEs
|
||||
*/
|
||||
/* Disable all GPEs */
|
||||
status = acpi_hw_disable_all_gpes();
|
||||
if (ACPI_FAILURE(status)) {
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
/*
|
||||
* If the target sleep state is S5, clear all GPEs and fixed events too
|
||||
*/
|
||||
if (sleep_state == ACPI_STATE_S5) {
|
||||
status = acpi_hw_clear_acpi_status();
|
||||
if (ACPI_FAILURE(status)) {
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
}
|
||||
acpi_gbl_system_awake_and_running = FALSE;
|
||||
|
||||
/* Enable all wakeup GPEs */
|
||||
status = acpi_hw_enable_all_wakeup_gpes();
|
||||
if (ACPI_FAILURE(status)) {
|
||||
return_ACPI_STATUS(status);
|
||||
|
|
|
@ -182,19 +182,19 @@ acpi_ut_prefixed_namespace_error(const char *module_name,
|
|||
switch (lookup_status) {
|
||||
case AE_ALREADY_EXISTS:
|
||||
|
||||
acpi_os_printf("\n" ACPI_MSG_BIOS_ERROR);
|
||||
acpi_os_printf(ACPI_MSG_BIOS_ERROR);
|
||||
message = "Failure creating";
|
||||
break;
|
||||
|
||||
case AE_NOT_FOUND:
|
||||
|
||||
acpi_os_printf("\n" ACPI_MSG_BIOS_ERROR);
|
||||
acpi_os_printf(ACPI_MSG_BIOS_ERROR);
|
||||
message = "Could not resolve";
|
||||
break;
|
||||
|
||||
default:
|
||||
|
||||
acpi_os_printf("\n" ACPI_MSG_ERROR);
|
||||
acpi_os_printf(ACPI_MSG_ERROR);
|
||||
message = "Failure resolving";
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -717,10 +717,11 @@ void battery_hook_register(struct acpi_battery_hook *hook)
|
|||
*/
|
||||
pr_err("extension failed to load: %s", hook->name);
|
||||
__battery_hook_unregister(hook, 0);
|
||||
return;
|
||||
goto end;
|
||||
}
|
||||
}
|
||||
pr_info("new extension: %s\n", hook->name);
|
||||
end:
|
||||
mutex_unlock(&hook_mutex);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(battery_hook_register);
|
||||
|
@ -732,7 +733,7 @@ EXPORT_SYMBOL_GPL(battery_hook_register);
|
|||
*/
|
||||
static void battery_hook_add_battery(struct acpi_battery *battery)
|
||||
{
|
||||
struct acpi_battery_hook *hook_node;
|
||||
struct acpi_battery_hook *hook_node, *tmp;
|
||||
|
||||
mutex_lock(&hook_mutex);
|
||||
INIT_LIST_HEAD(&battery->list);
|
||||
|
@ -744,15 +745,15 @@ static void battery_hook_add_battery(struct acpi_battery *battery)
|
|||
* when a battery gets hotplugged or initialized
|
||||
* during the battery module initialization.
|
||||
*/
|
||||
list_for_each_entry(hook_node, &battery_hook_list, list) {
|
||||
list_for_each_entry_safe(hook_node, tmp, &battery_hook_list, list) {
|
||||
if (hook_node->add_battery(battery->bat)) {
|
||||
/*
|
||||
* The notification of the extensions has failed, to
|
||||
* prevent further errors we will unload the extension.
|
||||
*/
|
||||
__battery_hook_unregister(hook_node, 0);
|
||||
pr_err("error in extension, unloading: %s",
|
||||
hook_node->name);
|
||||
__battery_hook_unregister(hook_node, 0);
|
||||
}
|
||||
}
|
||||
mutex_unlock(&hook_mutex);
|
||||
|
|
|
@ -408,6 +408,8 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
|
|||
const guid_t *guid;
|
||||
int rc, i;
|
||||
|
||||
if (cmd_rc)
|
||||
*cmd_rc = -EINVAL;
|
||||
func = cmd;
|
||||
if (cmd == ND_CMD_CALL) {
|
||||
call_pkg = buf;
|
||||
|
@ -518,6 +520,8 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
|
|||
* If we return an error (like elsewhere) then caller wouldn't
|
||||
* be able to rely upon data returned to make calculation.
|
||||
*/
|
||||
if (cmd_rc)
|
||||
*cmd_rc = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1273,7 +1277,7 @@ static ssize_t scrub_show(struct device *dev,
|
|||
|
||||
mutex_lock(&acpi_desc->init_mutex);
|
||||
rc = sprintf(buf, "%d%s", acpi_desc->scrub_count,
|
||||
work_busy(&acpi_desc->dwork.work)
|
||||
acpi_desc->scrub_busy
|
||||
&& !acpi_desc->cancel ? "+\n" : "\n");
|
||||
mutex_unlock(&acpi_desc->init_mutex);
|
||||
}
|
||||
|
@ -2939,6 +2943,32 @@ static unsigned int __acpi_nfit_scrub(struct acpi_nfit_desc *acpi_desc,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void __sched_ars(struct acpi_nfit_desc *acpi_desc, unsigned int tmo)
|
||||
{
|
||||
lockdep_assert_held(&acpi_desc->init_mutex);
|
||||
|
||||
acpi_desc->scrub_busy = 1;
|
||||
/* note this should only be set from within the workqueue */
|
||||
if (tmo)
|
||||
acpi_desc->scrub_tmo = tmo;
|
||||
queue_delayed_work(nfit_wq, &acpi_desc->dwork, tmo * HZ);
|
||||
}
|
||||
|
||||
static void sched_ars(struct acpi_nfit_desc *acpi_desc)
|
||||
{
|
||||
__sched_ars(acpi_desc, 0);
|
||||
}
|
||||
|
||||
static void notify_ars_done(struct acpi_nfit_desc *acpi_desc)
|
||||
{
|
||||
lockdep_assert_held(&acpi_desc->init_mutex);
|
||||
|
||||
acpi_desc->scrub_busy = 0;
|
||||
acpi_desc->scrub_count++;
|
||||
if (acpi_desc->scrub_count_state)
|
||||
sysfs_notify_dirent(acpi_desc->scrub_count_state);
|
||||
}
|
||||
|
||||
static void acpi_nfit_scrub(struct work_struct *work)
|
||||
{
|
||||
struct acpi_nfit_desc *acpi_desc;
|
||||
|
@ -2949,14 +2979,10 @@ static void acpi_nfit_scrub(struct work_struct *work)
|
|||
mutex_lock(&acpi_desc->init_mutex);
|
||||
query_rc = acpi_nfit_query_poison(acpi_desc);
|
||||
tmo = __acpi_nfit_scrub(acpi_desc, query_rc);
|
||||
if (tmo) {
|
||||
queue_delayed_work(nfit_wq, &acpi_desc->dwork, tmo * HZ);
|
||||
acpi_desc->scrub_tmo = tmo;
|
||||
} else {
|
||||
acpi_desc->scrub_count++;
|
||||
if (acpi_desc->scrub_count_state)
|
||||
sysfs_notify_dirent(acpi_desc->scrub_count_state);
|
||||
}
|
||||
if (tmo)
|
||||
__sched_ars(acpi_desc, tmo);
|
||||
else
|
||||
notify_ars_done(acpi_desc);
|
||||
memset(acpi_desc->ars_status, 0, acpi_desc->max_ars);
|
||||
mutex_unlock(&acpi_desc->init_mutex);
|
||||
}
|
||||
|
@ -3037,7 +3063,7 @@ static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc)
|
|||
break;
|
||||
}
|
||||
|
||||
queue_delayed_work(nfit_wq, &acpi_desc->dwork, 0);
|
||||
sched_ars(acpi_desc);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -3239,7 +3265,7 @@ int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc, unsigned long flags)
|
|||
}
|
||||
}
|
||||
if (scheduled) {
|
||||
queue_delayed_work(nfit_wq, &acpi_desc->dwork, 0);
|
||||
sched_ars(acpi_desc);
|
||||
dev_dbg(dev, "ars_scan triggered\n");
|
||||
}
|
||||
mutex_unlock(&acpi_desc->init_mutex);
|
||||
|
|
|
@ -203,6 +203,7 @@ struct acpi_nfit_desc {
|
|||
unsigned int max_ars;
|
||||
unsigned int scrub_count;
|
||||
unsigned int scrub_mode;
|
||||
unsigned int scrub_busy:1;
|
||||
unsigned int cancel:1;
|
||||
unsigned long dimm_cmd_force_en;
|
||||
unsigned long bus_cmd_force_en;
|
||||
|
|
|
@ -481,8 +481,14 @@ static int topology_get_acpi_cpu_tag(struct acpi_table_header *table,
|
|||
if (cpu_node) {
|
||||
cpu_node = acpi_find_processor_package_id(table, cpu_node,
|
||||
level, flag);
|
||||
/* Only the first level has a guaranteed id */
|
||||
if (level == 0)
|
||||
/*
|
||||
* As per specification if the processor structure represents
|
||||
* an actual processor, then ACPI processor ID must be valid.
|
||||
* For processor containers ACPI_PPTT_ACPI_PROCESSOR_ID_VALID
|
||||
* should be set if the UID is valid
|
||||
*/
|
||||
if (level == 0 ||
|
||||
cpu_node->flags & ACPI_PPTT_ACPI_PROCESSOR_ID_VALID)
|
||||
return cpu_node->acpi_processor_id;
|
||||
return ACPI_PTR_DIFF(cpu_node, table);
|
||||
}
|
||||
|
|
|
@ -398,7 +398,6 @@ config SATA_DWC_VDEBUG
|
|||
|
||||
config SATA_HIGHBANK
|
||||
tristate "Calxeda Highbank SATA support"
|
||||
depends on HAS_DMA
|
||||
depends on ARCH_HIGHBANK || COMPILE_TEST
|
||||
help
|
||||
This option enables support for the Calxeda Highbank SoC's
|
||||
|
@ -408,7 +407,6 @@ config SATA_HIGHBANK
|
|||
|
||||
config SATA_MV
|
||||
tristate "Marvell SATA support"
|
||||
depends on HAS_DMA
|
||||
depends on PCI || ARCH_DOVE || ARCH_MV78XX0 || \
|
||||
ARCH_MVEBU || ARCH_ORION5X || COMPILE_TEST
|
||||
select GENERIC_PHY
|
||||
|
|
|
@ -400,6 +400,7 @@ static const struct pci_device_id ahci_pci_tbl[] = {
|
|||
{ PCI_VDEVICE(INTEL, 0x0f23), board_ahci_mobile }, /* Bay Trail AHCI */
|
||||
{ PCI_VDEVICE(INTEL, 0x22a3), board_ahci_mobile }, /* Cherry Tr. AHCI */
|
||||
{ PCI_VDEVICE(INTEL, 0x5ae3), board_ahci_mobile }, /* ApolloLake AHCI */
|
||||
{ PCI_VDEVICE(INTEL, 0x34d3), board_ahci_mobile }, /* Ice Lake LP AHCI */
|
||||
|
||||
/* JMicron 360/1/3/5/6, match class to avoid IDE function */
|
||||
{ PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
|
||||
|
@ -1280,6 +1281,59 @@ static bool ahci_broken_suspend(struct pci_dev *pdev)
|
|||
return strcmp(buf, dmi->driver_data) < 0;
|
||||
}
|
||||
|
||||
static bool ahci_broken_lpm(struct pci_dev *pdev)
|
||||
{
|
||||
static const struct dmi_system_id sysids[] = {
|
||||
/* Various Lenovo 50 series have LPM issues with older BIOSen */
|
||||
{
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
|
||||
DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X250"),
|
||||
},
|
||||
.driver_data = "20180406", /* 1.31 */
|
||||
},
|
||||
{
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
|
||||
DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L450"),
|
||||
},
|
||||
.driver_data = "20180420", /* 1.28 */
|
||||
},
|
||||
{
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
|
||||
DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T450s"),
|
||||
},
|
||||
.driver_data = "20180315", /* 1.33 */
|
||||
},
|
||||
{
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
|
||||
DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad W541"),
|
||||
},
|
||||
/*
|
||||
* Note date based on release notes, 2.35 has been
|
||||
* reported to be good, but I've been unable to get
|
||||
* a hold of the reporter to get the DMI BIOS date.
|
||||
* TODO: fix this.
|
||||
*/
|
||||
.driver_data = "20180310", /* 2.35 */
|
||||
},
|
||||
{ } /* terminate list */
|
||||
};
|
||||
const struct dmi_system_id *dmi = dmi_first_match(sysids);
|
||||
int year, month, date;
|
||||
char buf[9];
|
||||
|
||||
if (!dmi)
|
||||
return false;
|
||||
|
||||
dmi_get_date(DMI_BIOS_DATE, &year, &month, &date);
|
||||
snprintf(buf, sizeof(buf), "%04d%02d%02d", year, month, date);
|
||||
|
||||
return strcmp(buf, dmi->driver_data) < 0;
|
||||
}
|
||||
|
||||
static bool ahci_broken_online(struct pci_dev *pdev)
|
||||
{
|
||||
#define ENCODE_BUSDEVFN(bus, slot, func) \
|
||||
|
@ -1694,6 +1748,12 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
"quirky BIOS, skipping spindown on poweroff\n");
|
||||
}
|
||||
|
||||
if (ahci_broken_lpm(pdev)) {
|
||||
pi.flags |= ATA_FLAG_NO_LPM;
|
||||
dev_warn(&pdev->dev,
|
||||
"BIOS update required for Link Power Management support\n");
|
||||
}
|
||||
|
||||
if (ahci_broken_suspend(pdev)) {
|
||||
hpriv->flags |= AHCI_HFLAG_NO_SUSPEND;
|
||||
dev_warn(&pdev->dev,
|
||||
|
|
|
@ -82,7 +82,7 @@ static void ahci_mvebu_regret_option(struct ahci_host_priv *hpriv)
|
|||
*
|
||||
* Return: 0 on success; Error code otherwise.
|
||||
*/
|
||||
int ahci_mvebu_stop_engine(struct ata_port *ap)
|
||||
static int ahci_mvebu_stop_engine(struct ata_port *ap)
|
||||
{
|
||||
void __iomem *port_mmio = ahci_port_base(ap);
|
||||
u32 tmp, port_fbs;
|
||||
|
|
|
@ -35,6 +35,7 @@
|
|||
#include <linux/kernel.h>
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/nospec.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/interrupt.h>
|
||||
|
@ -1146,10 +1147,12 @@ static ssize_t ahci_led_store(struct ata_port *ap, const char *buf,
|
|||
|
||||
/* get the slot number from the message */
|
||||
pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
|
||||
if (pmp < EM_MAX_SLOTS)
|
||||
if (pmp < EM_MAX_SLOTS) {
|
||||
pmp = array_index_nospec(pmp, EM_MAX_SLOTS);
|
||||
emp = &pp->em_priv[pmp];
|
||||
else
|
||||
} else {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* mask off the activity bits if we are in sw_activity
|
||||
* mode, user should turn off sw_activity before setting
|
||||
|
|
|
@ -2493,6 +2493,9 @@ int ata_dev_configure(struct ata_device *dev)
|
|||
(id[ATA_ID_SATA_CAPABILITY] & 0xe) == 0x2)
|
||||
dev->horkage |= ATA_HORKAGE_NOLPM;
|
||||
|
||||
if (ap->flags & ATA_FLAG_NO_LPM)
|
||||
dev->horkage |= ATA_HORKAGE_NOLPM;
|
||||
|
||||
if (dev->horkage & ATA_HORKAGE_NOLPM) {
|
||||
ata_dev_warn(dev, "LPM support broken, forcing max_power\n");
|
||||
dev->link->ap->target_lpm_policy = ATA_LPM_MAX_POWER;
|
||||
|
|
|
@ -614,8 +614,7 @@ void ata_scsi_cmd_error_handler(struct Scsi_Host *host, struct ata_port *ap,
|
|||
list_for_each_entry_safe(scmd, tmp, eh_work_q, eh_entry) {
|
||||
struct ata_queued_cmd *qc;
|
||||
|
||||
for (i = 0; i < ATA_MAX_QUEUE; i++) {
|
||||
qc = __ata_qc_from_tag(ap, i);
|
||||
ata_qc_for_each_raw(ap, qc, i) {
|
||||
if (qc->flags & ATA_QCFLAG_ACTIVE &&
|
||||
qc->scsicmd == scmd)
|
||||
break;
|
||||
|
@ -818,14 +817,13 @@ EXPORT_SYMBOL_GPL(ata_port_wait_eh);
|
|||
|
||||
static int ata_eh_nr_in_flight(struct ata_port *ap)
|
||||
{
|
||||
struct ata_queued_cmd *qc;
|
||||
unsigned int tag;
|
||||
int nr = 0;
|
||||
|
||||
/* count only non-internal commands */
|
||||
for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
|
||||
if (ata_tag_internal(tag))
|
||||
continue;
|
||||
if (ata_qc_from_tag(ap, tag))
|
||||
ata_qc_for_each(ap, qc, tag) {
|
||||
if (qc)
|
||||
nr++;
|
||||
}
|
||||
|
||||
|
@ -847,13 +845,13 @@ void ata_eh_fastdrain_timerfn(struct timer_list *t)
|
|||
goto out_unlock;
|
||||
|
||||
if (cnt == ap->fastdrain_cnt) {
|
||||
struct ata_queued_cmd *qc;
|
||||
unsigned int tag;
|
||||
|
||||
/* No progress during the last interval, tag all
|
||||
* in-flight qcs as timed out and freeze the port.
|
||||
*/
|
||||
for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
|
||||
struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
|
||||
ata_qc_for_each(ap, qc, tag) {
|
||||
if (qc)
|
||||
qc->err_mask |= AC_ERR_TIMEOUT;
|
||||
}
|
||||
|
@ -999,6 +997,7 @@ void ata_port_schedule_eh(struct ata_port *ap)
|
|||
|
||||
static int ata_do_link_abort(struct ata_port *ap, struct ata_link *link)
|
||||
{
|
||||
struct ata_queued_cmd *qc;
|
||||
int tag, nr_aborted = 0;
|
||||
|
||||
WARN_ON(!ap->ops->error_handler);
|
||||
|
@ -1007,9 +1006,7 @@ static int ata_do_link_abort(struct ata_port *ap, struct ata_link *link)
|
|||
ata_eh_set_pending(ap, 0);
|
||||
|
||||
/* include internal tag in iteration */
|
||||
for (tag = 0; tag <= ATA_MAX_QUEUE; tag++) {
|
||||
struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
|
||||
|
||||
ata_qc_for_each_with_internal(ap, qc, tag) {
|
||||
if (qc && (!link || qc->dev->link == link)) {
|
||||
qc->flags |= ATA_QCFLAG_FAILED;
|
||||
ata_qc_complete(qc);
|
||||
|
@ -1712,9 +1709,7 @@ void ata_eh_analyze_ncq_error(struct ata_link *link)
|
|||
return;
|
||||
|
||||
/* has LLDD analyzed already? */
|
||||
for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
|
||||
qc = __ata_qc_from_tag(ap, tag);
|
||||
|
||||
ata_qc_for_each_raw(ap, qc, tag) {
|
||||
if (!(qc->flags & ATA_QCFLAG_FAILED))
|
||||
continue;
|
||||
|
||||
|
@ -2136,6 +2131,7 @@ static void ata_eh_link_autopsy(struct ata_link *link)
|
|||
{
|
||||
struct ata_port *ap = link->ap;
|
||||
struct ata_eh_context *ehc = &link->eh_context;
|
||||
struct ata_queued_cmd *qc;
|
||||
struct ata_device *dev;
|
||||
unsigned int all_err_mask = 0, eflags = 0;
|
||||
int tag, nr_failed = 0, nr_quiet = 0;
|
||||
|
@ -2168,9 +2164,7 @@ static void ata_eh_link_autopsy(struct ata_link *link)
|
|||
|
||||
all_err_mask |= ehc->i.err_mask;
|
||||
|
||||
for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
|
||||
struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
|
||||
|
||||
ata_qc_for_each_raw(ap, qc, tag) {
|
||||
if (!(qc->flags & ATA_QCFLAG_FAILED) ||
|
||||
ata_dev_phys_link(qc->dev) != link)
|
||||
continue;
|
||||
|
@ -2436,6 +2430,7 @@ static void ata_eh_link_report(struct ata_link *link)
|
|||
{
|
||||
struct ata_port *ap = link->ap;
|
||||
struct ata_eh_context *ehc = &link->eh_context;
|
||||
struct ata_queued_cmd *qc;
|
||||
const char *frozen, *desc;
|
||||
char tries_buf[6] = "";
|
||||
int tag, nr_failed = 0;
|
||||
|
@ -2447,9 +2442,7 @@ static void ata_eh_link_report(struct ata_link *link)
|
|||
if (ehc->i.desc[0] != '\0')
|
||||
desc = ehc->i.desc;
|
||||
|
||||
for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
|
||||
struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
|
||||
|
||||
ata_qc_for_each_raw(ap, qc, tag) {
|
||||
if (!(qc->flags & ATA_QCFLAG_FAILED) ||
|
||||
ata_dev_phys_link(qc->dev) != link ||
|
||||
((qc->flags & ATA_QCFLAG_QUIET) &&
|
||||
|
@ -2511,8 +2504,7 @@ static void ata_eh_link_report(struct ata_link *link)
|
|||
ehc->i.serror & SERR_DEV_XCHG ? "DevExch " : "");
|
||||
#endif
|
||||
|
||||
for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
|
||||
struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
|
||||
ata_qc_for_each_raw(ap, qc, tag) {
|
||||
struct ata_taskfile *cmd = &qc->tf, *res = &qc->result_tf;
|
||||
char data_buf[20] = "";
|
||||
char cdb_buf[70] = "";
|
||||
|
@ -3992,12 +3984,11 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
|
|||
*/
|
||||
void ata_eh_finish(struct ata_port *ap)
|
||||
{
|
||||
struct ata_queued_cmd *qc;
|
||||
int tag;
|
||||
|
||||
/* retry or finish qcs */
|
||||
for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
|
||||
struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
|
||||
|
||||
ata_qc_for_each_raw(ap, qc, tag) {
|
||||
if (!(qc->flags & ATA_QCFLAG_FAILED))
|
||||
continue;
|
||||
|
||||
|
|
|
@ -3805,10 +3805,20 @@ static unsigned int ata_scsi_zbc_out_xlat(struct ata_queued_cmd *qc)
|
|||
*/
|
||||
goto invalid_param_len;
|
||||
}
|
||||
if (block > dev->n_sectors)
|
||||
goto out_of_range;
|
||||
|
||||
all = cdb[14] & 0x1;
|
||||
if (all) {
|
||||
/*
|
||||
* Ignore the block address (zone ID) as defined by ZBC.
|
||||
*/
|
||||
block = 0;
|
||||
} else if (block >= dev->n_sectors) {
|
||||
/*
|
||||
* Block must be a valid zone ID (a zone start LBA).
|
||||
*/
|
||||
fp = 2;
|
||||
goto invalid_fld;
|
||||
}
|
||||
|
||||
if (ata_ncq_enabled(qc->dev) &&
|
||||
ata_fpdma_zac_mgmt_out_supported(qc->dev)) {
|
||||
|
@ -3837,10 +3847,6 @@ static unsigned int ata_scsi_zbc_out_xlat(struct ata_queued_cmd *qc)
|
|||
invalid_fld:
|
||||
ata_scsi_set_invalid_field(qc->dev, scmd, fp, 0xff);
|
||||
return 1;
|
||||
out_of_range:
|
||||
/* "Logical Block Address out of range" */
|
||||
ata_scsi_set_sense(qc->dev, scmd, ILLEGAL_REQUEST, 0x21, 0x00);
|
||||
return 1;
|
||||
invalid_param_len:
|
||||
/* "Parameter list length error" */
|
||||
ata_scsi_set_sense(qc->dev, scmd, ILLEGAL_REQUEST, 0x1a, 0x0);
|
||||
|
|
|
@ -395,12 +395,6 @@ static inline unsigned int sata_fsl_tag(unsigned int tag,
|
|||
{
|
||||
/* We let libATA core do actual (queue) tag allocation */
|
||||
|
||||
/* all non NCQ/queued commands should have tag#0 */
|
||||
if (ata_tag_internal(tag)) {
|
||||
DPRINTK("mapping internal cmds to tag#0\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (unlikely(tag >= SATA_FSL_QUEUE_DEPTH)) {
|
||||
DPRINTK("tag %d invalid : out of range\n", tag);
|
||||
return 0;
|
||||
|
@ -1229,8 +1223,7 @@ static void sata_fsl_host_intr(struct ata_port *ap)
|
|||
|
||||
/* Workaround for data length mismatch errata */
|
||||
if (unlikely(hstatus & INT_ON_DATA_LENGTH_MISMATCH)) {
|
||||
for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
|
||||
qc = ata_qc_from_tag(ap, tag);
|
||||
ata_qc_for_each_with_internal(ap, qc, tag) {
|
||||
if (qc && ata_is_atapi(qc->tf.protocol)) {
|
||||
u32 hcontrol;
|
||||
/* Set HControl[27] to clear error registers */
|
||||
|
|
|
@ -675,7 +675,6 @@ static int nv_adma_slave_config(struct scsi_device *sdev)
|
|||
struct ata_port *ap = ata_shost_to_port(sdev->host);
|
||||
struct nv_adma_port_priv *pp = ap->private_data;
|
||||
struct nv_adma_port_priv *port0, *port1;
|
||||
struct scsi_device *sdev0, *sdev1;
|
||||
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
|
||||
unsigned long segment_boundary, flags;
|
||||
unsigned short sg_tablesize;
|
||||
|
@ -736,8 +735,6 @@ static int nv_adma_slave_config(struct scsi_device *sdev)
|
|||
|
||||
port0 = ap->host->ports[0]->private_data;
|
||||
port1 = ap->host->ports[1]->private_data;
|
||||
sdev0 = ap->host->ports[0]->link.device[0].sdev;
|
||||
sdev1 = ap->host->ports[1]->link.device[0].sdev;
|
||||
if ((port0->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
|
||||
(port1->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) {
|
||||
/*
|
||||
|
|
|
@ -1618,7 +1618,7 @@ static int rx_init(struct atm_dev *dev)
|
|||
skb_queue_head_init(&iadev->rx_dma_q);
|
||||
iadev->rx_free_desc_qhead = NULL;
|
||||
|
||||
iadev->rx_open = kcalloc(4, iadev->num_vc, GFP_KERNEL);
|
||||
iadev->rx_open = kcalloc(iadev->num_vc, sizeof(void *), GFP_KERNEL);
|
||||
if (!iadev->rx_open) {
|
||||
printk(KERN_ERR DEV_LABEL "itf %d couldn't get free page\n",
|
||||
dev->number);
|
||||
|
|
|
@ -1483,6 +1483,8 @@ static int zatm_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
|
|||
return -EFAULT;
|
||||
if (pool < 0 || pool > ZATM_LAST_POOL)
|
||||
return -EINVAL;
|
||||
pool = array_index_nospec(pool,
|
||||
ZATM_LAST_POOL + 1);
|
||||
if (copy_from_user(&info,
|
||||
&((struct zatm_pool_req __user *) arg)->info,
|
||||
sizeof(info))) return -EFAULT;
|
||||
|
|
|
@ -2235,7 +2235,7 @@ static void genpd_dev_pm_sync(struct device *dev)
|
|||
}
|
||||
|
||||
static int __genpd_dev_pm_attach(struct device *dev, struct device_node *np,
|
||||
unsigned int index)
|
||||
unsigned int index, bool power_on)
|
||||
{
|
||||
struct of_phandle_args pd_args;
|
||||
struct generic_pm_domain *pd;
|
||||
|
@ -2271,9 +2271,11 @@ static int __genpd_dev_pm_attach(struct device *dev, struct device_node *np,
|
|||
dev->pm_domain->detach = genpd_dev_pm_detach;
|
||||
dev->pm_domain->sync = genpd_dev_pm_sync;
|
||||
|
||||
genpd_lock(pd);
|
||||
ret = genpd_power_on(pd, 0);
|
||||
genpd_unlock(pd);
|
||||
if (power_on) {
|
||||
genpd_lock(pd);
|
||||
ret = genpd_power_on(pd, 0);
|
||||
genpd_unlock(pd);
|
||||
}
|
||||
|
||||
if (ret)
|
||||
genpd_remove_device(pd, dev);
|
||||
|
@ -2307,7 +2309,7 @@ int genpd_dev_pm_attach(struct device *dev)
|
|||
"#power-domain-cells") != 1)
|
||||
return 0;
|
||||
|
||||
return __genpd_dev_pm_attach(dev, dev->of_node, 0);
|
||||
return __genpd_dev_pm_attach(dev, dev->of_node, 0, true);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(genpd_dev_pm_attach);
|
||||
|
||||
|
@ -2359,14 +2361,14 @@ struct device *genpd_dev_pm_attach_by_id(struct device *dev,
|
|||
}
|
||||
|
||||
/* Try to attach the device to the PM domain at the specified index. */
|
||||
ret = __genpd_dev_pm_attach(genpd_dev, dev->of_node, index);
|
||||
ret = __genpd_dev_pm_attach(genpd_dev, dev->of_node, index, false);
|
||||
if (ret < 1) {
|
||||
device_unregister(genpd_dev);
|
||||
return ret ? ERR_PTR(ret) : NULL;
|
||||
}
|
||||
|
||||
pm_runtime_set_active(genpd_dev);
|
||||
pm_runtime_enable(genpd_dev);
|
||||
genpd_queue_power_off_work(dev_to_genpd(genpd_dev));
|
||||
|
||||
return genpd_dev;
|
||||
}
|
||||
|
|
|
@ -282,8 +282,8 @@ void drbd_request_endio(struct bio *bio)
|
|||
what = COMPLETED_OK;
|
||||
}
|
||||
|
||||
bio_put(req->private_bio);
|
||||
req->private_bio = ERR_PTR(blk_status_to_errno(bio->bi_status));
|
||||
bio_put(bio);
|
||||
|
||||
/* not req_mod(), we need irqsave here! */
|
||||
spin_lock_irqsave(&device->resource->req_lock, flags);
|
||||
|
|
|
@ -1613,6 +1613,7 @@ static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode,
|
|||
arg = (unsigned long) compat_ptr(arg);
|
||||
case LOOP_SET_FD:
|
||||
case LOOP_CHANGE_FD:
|
||||
case LOOP_SET_BLOCK_SIZE:
|
||||
err = lo_ioctl(bdev, mode, cmd, arg);
|
||||
break;
|
||||
default:
|
||||
|
|
|
@ -169,9 +169,9 @@ static int sysc_get_clocks(struct sysc *ddata)
|
|||
const char *name;
|
||||
int nr_fck = 0, nr_ick = 0, i, error = 0;
|
||||
|
||||
ddata->clock_roles = devm_kzalloc(ddata->dev,
|
||||
sizeof(*ddata->clock_roles) *
|
||||
ddata->clock_roles = devm_kcalloc(ddata->dev,
|
||||
SYSC_MAX_CLOCKS,
|
||||
sizeof(*ddata->clock_roles),
|
||||
GFP_KERNEL);
|
||||
if (!ddata->clock_roles)
|
||||
return -ENOMEM;
|
||||
|
@ -200,8 +200,8 @@ static int sysc_get_clocks(struct sysc *ddata)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
ddata->clocks = devm_kzalloc(ddata->dev,
|
||||
sizeof(*ddata->clocks) * ddata->nr_clocks,
|
||||
ddata->clocks = devm_kcalloc(ddata->dev,
|
||||
ddata->nr_clocks, sizeof(*ddata->clocks),
|
||||
GFP_KERNEL);
|
||||
if (!ddata->clocks)
|
||||
return -ENOMEM;
|
||||
|
|
|
@ -2088,8 +2088,10 @@ static int try_smi_init(struct smi_info *new_smi)
|
|||
return 0;
|
||||
|
||||
out_err:
|
||||
ipmi_unregister_smi(new_smi->intf);
|
||||
new_smi->intf = NULL;
|
||||
if (new_smi->intf) {
|
||||
ipmi_unregister_smi(new_smi->intf);
|
||||
new_smi->intf = NULL;
|
||||
}
|
||||
|
||||
kfree(init_name);
|
||||
|
||||
|
|
|
@ -210,34 +210,23 @@ static void kcs_bmc_handle_cmd(struct kcs_bmc *kcs_bmc)
|
|||
int kcs_bmc_handle_event(struct kcs_bmc *kcs_bmc)
|
||||
{
|
||||
unsigned long flags;
|
||||
int ret = 0;
|
||||
int ret = -ENODATA;
|
||||
u8 status;
|
||||
|
||||
spin_lock_irqsave(&kcs_bmc->lock, flags);
|
||||
|
||||
if (!kcs_bmc->running) {
|
||||
kcs_force_abort(kcs_bmc);
|
||||
ret = -ENODEV;
|
||||
goto out_unlock;
|
||||
status = read_status(kcs_bmc);
|
||||
if (status & KCS_STATUS_IBF) {
|
||||
if (!kcs_bmc->running)
|
||||
kcs_force_abort(kcs_bmc);
|
||||
else if (status & KCS_STATUS_CMD_DAT)
|
||||
kcs_bmc_handle_cmd(kcs_bmc);
|
||||
else
|
||||
kcs_bmc_handle_data(kcs_bmc);
|
||||
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
status = read_status(kcs_bmc) & (KCS_STATUS_IBF | KCS_STATUS_CMD_DAT);
|
||||
|
||||
switch (status) {
|
||||
case KCS_STATUS_IBF | KCS_STATUS_CMD_DAT:
|
||||
kcs_bmc_handle_cmd(kcs_bmc);
|
||||
break;
|
||||
|
||||
case KCS_STATUS_IBF:
|
||||
kcs_bmc_handle_data(kcs_bmc);
|
||||
break;
|
||||
|
||||
default:
|
||||
ret = -ENODATA;
|
||||
break;
|
||||
}
|
||||
|
||||
out_unlock:
|
||||
spin_unlock_irqrestore(&kcs_bmc->lock, flags);
|
||||
|
||||
return ret;
|
||||
|
|
|
@ -96,7 +96,7 @@ obj-$(CONFIG_ARCH_SPRD) += sprd/
|
|||
obj-$(CONFIG_ARCH_STI) += st/
|
||||
obj-$(CONFIG_ARCH_STRATIX10) += socfpga/
|
||||
obj-$(CONFIG_ARCH_SUNXI) += sunxi/
|
||||
obj-$(CONFIG_ARCH_SUNXI) += sunxi-ng/
|
||||
obj-$(CONFIG_SUNXI_CCU) += sunxi-ng/
|
||||
obj-$(CONFIG_ARCH_TEGRA) += tegra/
|
||||
obj-y += ti/
|
||||
obj-$(CONFIG_CLK_UNIPHIER) += uniphier/
|
||||
|
|
|
@ -672,7 +672,7 @@ static int of_da8xx_usb_phy_clk_init(struct device *dev, struct regmap *regmap)
|
|||
|
||||
usb1 = da8xx_cfgchip_register_usb1_clk48(dev, regmap);
|
||||
if (IS_ERR(usb1)) {
|
||||
if (PTR_ERR(usb0) == -EPROBE_DEFER)
|
||||
if (PTR_ERR(usb1) == -EPROBE_DEFER)
|
||||
return -EPROBE_DEFER;
|
||||
|
||||
dev_warn(dev, "Failed to register usb1_clk48 (%ld)\n",
|
||||
|
|
|
@ -107,7 +107,7 @@ extern const struct davinci_psc_init_data of_da850_psc1_init_data;
|
|||
#ifdef CONFIG_ARCH_DAVINCI_DM355
|
||||
extern const struct davinci_psc_init_data dm355_psc_init_data;
|
||||
#endif
|
||||
#ifdef CONFIG_ARCH_DAVINCI_DM356
|
||||
#ifdef CONFIG_ARCH_DAVINCI_DM365
|
||||
extern const struct davinci_psc_init_data dm365_psc_init_data;
|
||||
#endif
|
||||
#ifdef CONFIG_ARCH_DAVINCI_DM644x
|
||||
|
|
|
@ -1,24 +1,24 @@
|
|||
# SPDX-License-Identifier: GPL-2.0
|
||||
# Common objects
|
||||
lib-$(CONFIG_SUNXI_CCU) += ccu_common.o
|
||||
lib-$(CONFIG_SUNXI_CCU) += ccu_mmc_timing.o
|
||||
lib-$(CONFIG_SUNXI_CCU) += ccu_reset.o
|
||||
obj-y += ccu_common.o
|
||||
obj-y += ccu_mmc_timing.o
|
||||
obj-y += ccu_reset.o
|
||||
|
||||
# Base clock types
|
||||
lib-$(CONFIG_SUNXI_CCU) += ccu_div.o
|
||||
lib-$(CONFIG_SUNXI_CCU) += ccu_frac.o
|
||||
lib-$(CONFIG_SUNXI_CCU) += ccu_gate.o
|
||||
lib-$(CONFIG_SUNXI_CCU) += ccu_mux.o
|
||||
lib-$(CONFIG_SUNXI_CCU) += ccu_mult.o
|
||||
lib-$(CONFIG_SUNXI_CCU) += ccu_phase.o
|
||||
lib-$(CONFIG_SUNXI_CCU) += ccu_sdm.o
|
||||
obj-y += ccu_div.o
|
||||
obj-y += ccu_frac.o
|
||||
obj-y += ccu_gate.o
|
||||
obj-y += ccu_mux.o
|
||||
obj-y += ccu_mult.o
|
||||
obj-y += ccu_phase.o
|
||||
obj-y += ccu_sdm.o
|
||||
|
||||
# Multi-factor clocks
|
||||
lib-$(CONFIG_SUNXI_CCU) += ccu_nk.o
|
||||
lib-$(CONFIG_SUNXI_CCU) += ccu_nkm.o
|
||||
lib-$(CONFIG_SUNXI_CCU) += ccu_nkmp.o
|
||||
lib-$(CONFIG_SUNXI_CCU) += ccu_nm.o
|
||||
lib-$(CONFIG_SUNXI_CCU) += ccu_mp.o
|
||||
obj-y += ccu_nk.o
|
||||
obj-y += ccu_nkm.o
|
||||
obj-y += ccu_nkmp.o
|
||||
obj-y += ccu_nm.o
|
||||
obj-y += ccu_mp.o
|
||||
|
||||
# SoC support
|
||||
obj-$(CONFIG_SUN50I_A64_CCU) += ccu-sun50i-a64.o
|
||||
|
@ -38,12 +38,3 @@ obj-$(CONFIG_SUN8I_R40_CCU) += ccu-sun8i-r40.o
|
|||
obj-$(CONFIG_SUN9I_A80_CCU) += ccu-sun9i-a80.o
|
||||
obj-$(CONFIG_SUN9I_A80_CCU) += ccu-sun9i-a80-de.o
|
||||
obj-$(CONFIG_SUN9I_A80_CCU) += ccu-sun9i-a80-usb.o
|
||||
|
||||
# The lib-y file goals is supposed to work only in arch/*/lib or lib/. In our
|
||||
# case, we want to use that goal, but even though lib.a will be properly
|
||||
# generated, it will not be linked in, eventually resulting in a linker error
|
||||
# for missing symbols.
|
||||
#
|
||||
# We can work around that by explicitly adding lib.a to the obj-y goal. This is
|
||||
# an undocumented behaviour, but works well for now.
|
||||
obj-$(CONFIG_SUNXI_CCU) += lib.a
|
||||
|
|
|
@ -735,7 +735,7 @@ static void __arch_timer_setup(unsigned type,
|
|||
clk->features |= CLOCK_EVT_FEAT_DYNIRQ;
|
||||
clk->name = "arch_mem_timer";
|
||||
clk->rating = 400;
|
||||
clk->cpumask = cpu_all_mask;
|
||||
clk->cpumask = cpu_possible_mask;
|
||||
if (arch_timer_mem_use_virtual) {
|
||||
clk->set_state_shutdown = arch_timer_shutdown_virt_mem;
|
||||
clk->set_state_oneshot_stopped = arch_timer_shutdown_virt_mem;
|
||||
|
|
|
@ -189,14 +189,16 @@ static int check_vma(struct dev_dax *dev_dax, struct vm_area_struct *vma,
|
|||
|
||||
/* prevent private mappings from being established */
|
||||
if ((vma->vm_flags & VM_MAYSHARE) != VM_MAYSHARE) {
|
||||
dev_info(dev, "%s: %s: fail, attempted private mapping\n",
|
||||
dev_info_ratelimited(dev,
|
||||
"%s: %s: fail, attempted private mapping\n",
|
||||
current->comm, func);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
mask = dax_region->align - 1;
|
||||
if (vma->vm_start & mask || vma->vm_end & mask) {
|
||||
dev_info(dev, "%s: %s: fail, unaligned vma (%#lx - %#lx, %#lx)\n",
|
||||
dev_info_ratelimited(dev,
|
||||
"%s: %s: fail, unaligned vma (%#lx - %#lx, %#lx)\n",
|
||||
current->comm, func, vma->vm_start, vma->vm_end,
|
||||
mask);
|
||||
return -EINVAL;
|
||||
|
@ -204,13 +206,15 @@ static int check_vma(struct dev_dax *dev_dax, struct vm_area_struct *vma,
|
|||
|
||||
if ((dax_region->pfn_flags & (PFN_DEV|PFN_MAP)) == PFN_DEV
|
||||
&& (vma->vm_flags & VM_DONTCOPY) == 0) {
|
||||
dev_info(dev, "%s: %s: fail, dax range requires MADV_DONTFORK\n",
|
||||
dev_info_ratelimited(dev,
|
||||
"%s: %s: fail, dax range requires MADV_DONTFORK\n",
|
||||
current->comm, func);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!vma_is_dax(vma)) {
|
||||
dev_info(dev, "%s: %s: fail, vma is not DAX capable\n",
|
||||
dev_info_ratelimited(dev,
|
||||
"%s: %s: fail, vma is not DAX capable\n",
|
||||
current->comm, func);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
|
|
@ -794,7 +794,7 @@ static struct dma_chan *k3_of_dma_simple_xlate(struct of_phandle_args *dma_spec,
|
|||
struct k3_dma_dev *d = ofdma->of_dma_data;
|
||||
unsigned int request = dma_spec->args[0];
|
||||
|
||||
if (request > d->dma_requests)
|
||||
if (request >= d->dma_requests)
|
||||
return NULL;
|
||||
|
||||
return dma_get_slave_channel(&(d->chans[request].vc.chan));
|
||||
|
|
|
@ -3033,7 +3033,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
|
|||
pd->src_addr_widths = PL330_DMA_BUSWIDTHS;
|
||||
pd->dst_addr_widths = PL330_DMA_BUSWIDTHS;
|
||||
pd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
|
||||
pd->residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
|
||||
pd->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
|
||||
pd->max_burst = ((pl330->quirks & PL330_QUIRK_BROKEN_NO_FLUSHP) ?
|
||||
1 : PL330_MAX_BURST);
|
||||
|
||||
|
|
|
@ -1485,7 +1485,11 @@ static int omap_dma_probe(struct platform_device *pdev)
|
|||
od->ddev.src_addr_widths = OMAP_DMA_BUSWIDTHS;
|
||||
od->ddev.dst_addr_widths = OMAP_DMA_BUSWIDTHS;
|
||||
od->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
|
||||
od->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
|
||||
if (__dma_omap15xx(od->plat->dma_attr))
|
||||
od->ddev.residue_granularity =
|
||||
DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
|
||||
else
|
||||
od->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
|
||||
od->ddev.max_burst = SZ_16M - 1; /* CCEN: 24bit unsigned */
|
||||
od->ddev.dev = &pdev->dev;
|
||||
INIT_LIST_HEAD(&od->ddev.channels);
|
||||
|
|
|
@ -455,8 +455,10 @@ static int altera_cvp_probe(struct pci_dev *pdev,
|
|||
|
||||
mgr = fpga_mgr_create(&pdev->dev, conf->mgr_name,
|
||||
&altera_cvp_ops, conf);
|
||||
if (!mgr)
|
||||
return -ENOMEM;
|
||||
if (!mgr) {
|
||||
ret = -ENOMEM;
|
||||
goto err_unmap;
|
||||
}
|
||||
|
||||
pci_set_drvdata(pdev, mgr);
|
||||
|
||||
|
|
|
@ -190,6 +190,7 @@ struct amdgpu_job;
|
|||
struct amdgpu_irq_src;
|
||||
struct amdgpu_fpriv;
|
||||
struct amdgpu_bo_va_mapping;
|
||||
struct amdgpu_atif;
|
||||
|
||||
enum amdgpu_cp_irq {
|
||||
AMDGPU_CP_IRQ_GFX_EOP = 0,
|
||||
|
@ -1269,43 +1270,6 @@ struct amdgpu_vram_scratch {
|
|||
/*
|
||||
* ACPI
|
||||
*/
|
||||
struct amdgpu_atif_notification_cfg {
|
||||
bool enabled;
|
||||
int command_code;
|
||||
};
|
||||
|
||||
struct amdgpu_atif_notifications {
|
||||
bool display_switch;
|
||||
bool expansion_mode_change;
|
||||
bool thermal_state;
|
||||
bool forced_power_state;
|
||||
bool system_power_state;
|
||||
bool display_conf_change;
|
||||
bool px_gfx_switch;
|
||||
bool brightness_change;
|
||||
bool dgpu_display_event;
|
||||
};
|
||||
|
||||
struct amdgpu_atif_functions {
|
||||
bool system_params;
|
||||
bool sbios_requests;
|
||||
bool select_active_disp;
|
||||
bool lid_state;
|
||||
bool get_tv_standard;
|
||||
bool set_tv_standard;
|
||||
bool get_panel_expansion_mode;
|
||||
bool set_panel_expansion_mode;
|
||||
bool temperature_change;
|
||||
bool graphics_device_types;
|
||||
};
|
||||
|
||||
struct amdgpu_atif {
|
||||
struct amdgpu_atif_notifications notifications;
|
||||
struct amdgpu_atif_functions functions;
|
||||
struct amdgpu_atif_notification_cfg notification_cfg;
|
||||
struct amdgpu_encoder *encoder_for_bl;
|
||||
};
|
||||
|
||||
struct amdgpu_atcs_functions {
|
||||
bool get_ext_state;
|
||||
bool pcie_perf_req;
|
||||
|
@ -1466,7 +1430,7 @@ struct amdgpu_device {
|
|||
#if defined(CONFIG_DEBUG_FS)
|
||||
struct dentry *debugfs_regs[AMDGPU_DEBUGFS_MAX_COMPONENTS];
|
||||
#endif
|
||||
struct amdgpu_atif atif;
|
||||
struct amdgpu_atif *atif;
|
||||
struct amdgpu_atcs atcs;
|
||||
struct mutex srbm_mutex;
|
||||
/* GRBM index mutex. Protects concurrent access to GRBM index */
|
||||
|
@ -1894,6 +1858,12 @@ static inline bool amdgpu_atpx_dgpu_req_power_for_displays(void) { return false;
|
|||
static inline bool amdgpu_has_atpx(void) { return false; }
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_VGA_SWITCHEROO) && defined(CONFIG_ACPI)
|
||||
void *amdgpu_atpx_get_dhandle(void);
|
||||
#else
|
||||
static inline void *amdgpu_atpx_get_dhandle(void) { return NULL; }
|
||||
#endif
|
||||
|
||||
/*
|
||||
* KMS
|
||||
*/
|
||||
|
|
Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше
Загрузка…
Ссылка в новой задаче