Merge 4.3-rc7 into staging-next
We want the other staging patches in this branch as well. Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Коммит
cf516d08ec
1
.mailmap
1
.mailmap
|
@ -59,6 +59,7 @@ James Bottomley <jejb@mulgrave.(none)>
|
|||
James Bottomley <jejb@titanic.il.steeleye.com>
|
||||
James E Wilson <wilson@specifix.com>
|
||||
James Ketrenos <jketreno@io.(none)>
|
||||
<javier@osg.samsung.com> <javier.martinez@collabora.co.uk>
|
||||
Jean Tourrilhes <jt@hpl.hp.com>
|
||||
Jeff Garzik <jgarzik@pretzel.yyz.us>
|
||||
Jens Axboe <axboe@suse.de>
|
||||
|
|
|
@ -0,0 +1,7 @@
|
|||
This file contains documentation for running mainline
|
||||
kernel on omaps.
|
||||
|
||||
KERNEL NEW DEPENDENCIES
|
||||
v4.3+ Update is needed for custom .config files to make sure
|
||||
CONFIG_REGULATOR_PBIAS is enabled for MMC1 to work
|
||||
properly.
|
31
MAINTAINERS
31
MAINTAINERS
|
@ -894,11 +894,12 @@ M: Lennert Buytenhek <kernel@wantstofly.org>
|
|||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
S: Maintained
|
||||
|
||||
ARM/Allwinner A1X SoC support
|
||||
ARM/Allwinner sunXi SoC support
|
||||
M: Maxime Ripard <maxime.ripard@free-electrons.com>
|
||||
M: Chen-Yu Tsai <wens@csie.org>
|
||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
S: Maintained
|
||||
N: sun[x4567]i
|
||||
N: sun[x456789]i
|
||||
|
||||
ARM/Allwinner SoC Clock Support
|
||||
M: Emilio López <emilio@elopez.com.ar>
|
||||
|
@ -3591,6 +3592,13 @@ F: drivers/gpu/drm/i915/
|
|||
F: include/drm/i915*
|
||||
F: include/uapi/drm/i915*
|
||||
|
||||
DRM DRIVERS FOR ATMEL HLCDC
|
||||
M: Boris Brezillon <boris.brezillon@free-electrons.com>
|
||||
L: dri-devel@lists.freedesktop.org
|
||||
S: Supported
|
||||
F: drivers/gpu/drm/atmel-hlcdc/
|
||||
F: Documentation/devicetree/bindings/drm/atmel/
|
||||
|
||||
DRM DRIVERS FOR EXYNOS
|
||||
M: Inki Dae <inki.dae@samsung.com>
|
||||
M: Joonyoung Shim <jy0922.shim@samsung.com>
|
||||
|
@ -3619,6 +3627,14 @@ S: Maintained
|
|||
F: drivers/gpu/drm/imx/
|
||||
F: Documentation/devicetree/bindings/drm/imx/
|
||||
|
||||
DRM DRIVERS FOR GMA500 (Poulsbo, Moorestown and derivative chipsets)
|
||||
M: Patrik Jakobsson <patrik.r.jakobsson@gmail.com>
|
||||
L: dri-devel@lists.freedesktop.org
|
||||
T: git git://github.com/patjak/drm-gma500
|
||||
S: Maintained
|
||||
F: drivers/gpu/drm/gma500
|
||||
F: include/drm/gma500*
|
||||
|
||||
DRM DRIVERS FOR NVIDIA TEGRA
|
||||
M: Thierry Reding <thierry.reding@gmail.com>
|
||||
M: Terje Bergström <tbergstrom@nvidia.com>
|
||||
|
@ -6784,7 +6800,6 @@ F: drivers/scsi/megaraid/
|
|||
|
||||
MELLANOX ETHERNET DRIVER (mlx4_en)
|
||||
M: Amir Vadai <amirv@mellanox.com>
|
||||
M: Ido Shamay <idos@mellanox.com>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Supported
|
||||
W: http://www.mellanox.com
|
||||
|
@ -9114,6 +9129,15 @@ S: Supported
|
|||
F: Documentation/devicetree/bindings/net/snps,dwc-qos-ethernet.txt
|
||||
F: drivers/net/ethernet/synopsys/dwc_eth_qos.c
|
||||
|
||||
SYNOPSYS DESIGNWARE I2C DRIVER
|
||||
M: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
|
||||
M: Jarkko Nikula <jarkko.nikula@linux.intel.com>
|
||||
M: Mika Westerberg <mika.westerberg@linux.intel.com>
|
||||
L: linux-i2c@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/i2c/busses/i2c-designware-*
|
||||
F: include/linux/platform_data/i2c-designware.h
|
||||
|
||||
SYNOPSYS DESIGNWARE MMC/SD/SDIO DRIVER
|
||||
M: Seungwon Jeon <tgih.jun@samsung.com>
|
||||
M: Jaehoon Chung <jh80.chung@samsung.com>
|
||||
|
@ -11666,6 +11690,7 @@ F: drivers/tty/serial/zs.*
|
|||
ZSMALLOC COMPRESSED SLAB MEMORY ALLOCATOR
|
||||
M: Minchan Kim <minchan@kernel.org>
|
||||
M: Nitin Gupta <ngupta@vflare.org>
|
||||
R: Sergey Senozhatsky <sergey.senozhatsky.work@gmail.com>
|
||||
L: linux-mm@kvack.org
|
||||
S: Maintained
|
||||
F: mm/zsmalloc.c
|
||||
|
|
2
Makefile
2
Makefile
|
@ -1,7 +1,7 @@
|
|||
VERSION = 4
|
||||
PATCHLEVEL = 3
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc5
|
||||
EXTRAVERSION = -rc7
|
||||
NAME = Blurry Fish Butt
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
|
|
@ -402,11 +402,12 @@
|
|||
/* SMPS9 unused */
|
||||
|
||||
ldo1_reg: ldo1 {
|
||||
/* VDD_SD */
|
||||
/* VDD_SD / VDDSHV8 */
|
||||
regulator-name = "ldo1";
|
||||
regulator-min-microvolt = <1800000>;
|
||||
regulator-max-microvolt = <3300000>;
|
||||
regulator-boot-on;
|
||||
regulator-always-on;
|
||||
};
|
||||
|
||||
ldo2_reg: ldo2 {
|
||||
|
|
|
@ -46,7 +46,7 @@
|
|||
|
||||
/ {
|
||||
model = "Marvell Armada 385 Access Point Development Board";
|
||||
compatible = "marvell,a385-db-ap", "marvell,armada385", "marvell,armada38x";
|
||||
compatible = "marvell,a385-db-ap", "marvell,armada385", "marvell,armada380";
|
||||
|
||||
chosen {
|
||||
stdout-path = "serial1:115200n8";
|
||||
|
|
|
@ -152,7 +152,7 @@
|
|||
};
|
||||
|
||||
usb_phy2: phy@a2f400 {
|
||||
compatible = "marvell,berlin2-usb-phy";
|
||||
compatible = "marvell,berlin2cd-usb-phy";
|
||||
reg = <0xa2f400 0x128>;
|
||||
#phy-cells = <0>;
|
||||
resets = <&chip_rst 0x104 14>;
|
||||
|
@ -170,7 +170,7 @@
|
|||
};
|
||||
|
||||
usb_phy0: phy@b74000 {
|
||||
compatible = "marvell,berlin2-usb-phy";
|
||||
compatible = "marvell,berlin2cd-usb-phy";
|
||||
reg = <0xb74000 0x128>;
|
||||
#phy-cells = <0>;
|
||||
resets = <&chip_rst 0x104 12>;
|
||||
|
@ -178,7 +178,7 @@
|
|||
};
|
||||
|
||||
usb_phy1: phy@b78000 {
|
||||
compatible = "marvell,berlin2-usb-phy";
|
||||
compatible = "marvell,berlin2cd-usb-phy";
|
||||
reg = <0xb78000 0x128>;
|
||||
#phy-cells = <0>;
|
||||
resets = <&chip_rst 0x104 13>;
|
||||
|
|
|
@ -915,6 +915,11 @@
|
|||
};
|
||||
};
|
||||
|
||||
&pmu_system_controller {
|
||||
assigned-clocks = <&pmu_system_controller 0>;
|
||||
assigned-clock-parents = <&clock CLK_FIN_PLL>;
|
||||
};
|
||||
|
||||
&rtc {
|
||||
status = "okay";
|
||||
clocks = <&clock CLK_RTC>, <&max77802 MAX77802_CLK_32K_AP>;
|
||||
|
|
|
@ -878,6 +878,11 @@
|
|||
};
|
||||
};
|
||||
|
||||
&pmu_system_controller {
|
||||
assigned-clocks = <&pmu_system_controller 0>;
|
||||
assigned-clock-parents = <&clock CLK_FIN_PLL>;
|
||||
};
|
||||
|
||||
&rtc {
|
||||
status = "okay";
|
||||
clocks = <&clock CLK_RTC>, <&max77802 MAX77802_CLK_32K_AP>;
|
||||
|
|
|
@ -588,10 +588,10 @@
|
|||
status = "disabled";
|
||||
};
|
||||
|
||||
uart2: serial@30870000 {
|
||||
uart2: serial@30890000 {
|
||||
compatible = "fsl,imx7d-uart",
|
||||
"fsl,imx6q-uart";
|
||||
reg = <0x30870000 0x10000>;
|
||||
reg = <0x30890000 0x10000>;
|
||||
interrupts = <GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH>;
|
||||
clocks = <&clks IMX7D_UART2_ROOT_CLK>,
|
||||
<&clks IMX7D_UART2_ROOT_CLK>;
|
||||
|
|
|
@ -12,7 +12,7 @@
|
|||
|
||||
/ {
|
||||
model = "LogicPD Zoom DM3730 Torpedo Development Kit";
|
||||
compatible = "logicpd,dm3730-torpedo-devkit", "ti,omap36xx";
|
||||
compatible = "logicpd,dm3730-torpedo-devkit", "ti,omap3630", "ti,omap3";
|
||||
|
||||
gpio_keys {
|
||||
compatible = "gpio-keys";
|
||||
|
|
|
@ -67,7 +67,7 @@
|
|||
|
||||
timer@c1109940 {
|
||||
compatible = "amlogic,meson6-timer";
|
||||
reg = <0xc1109940 0x14>;
|
||||
reg = <0xc1109940 0x18>;
|
||||
interrupts = <0 10 1>;
|
||||
};
|
||||
|
||||
|
@ -80,36 +80,37 @@
|
|||
wdt: watchdog@c1109900 {
|
||||
compatible = "amlogic,meson6-wdt";
|
||||
reg = <0xc1109900 0x8>;
|
||||
interrupts = <0 0 1>;
|
||||
};
|
||||
|
||||
uart_AO: serial@c81004c0 {
|
||||
compatible = "amlogic,meson-uart";
|
||||
reg = <0xc81004c0 0x14>;
|
||||
reg = <0xc81004c0 0x18>;
|
||||
interrupts = <0 90 1>;
|
||||
clocks = <&clk81>;
|
||||
status = "disabled";
|
||||
};
|
||||
|
||||
uart_A: serial@c81084c0 {
|
||||
uart_A: serial@c11084c0 {
|
||||
compatible = "amlogic,meson-uart";
|
||||
reg = <0xc81084c0 0x14>;
|
||||
interrupts = <0 90 1>;
|
||||
reg = <0xc11084c0 0x18>;
|
||||
interrupts = <0 26 1>;
|
||||
clocks = <&clk81>;
|
||||
status = "disabled";
|
||||
};
|
||||
|
||||
uart_B: serial@c81084dc {
|
||||
uart_B: serial@c11084dc {
|
||||
compatible = "amlogic,meson-uart";
|
||||
reg = <0xc81084dc 0x14>;
|
||||
interrupts = <0 90 1>;
|
||||
reg = <0xc11084dc 0x18>;
|
||||
interrupts = <0 75 1>;
|
||||
clocks = <&clk81>;
|
||||
status = "disabled";
|
||||
};
|
||||
|
||||
uart_C: serial@c8108700 {
|
||||
uart_C: serial@c1108700 {
|
||||
compatible = "amlogic,meson-uart";
|
||||
reg = <0xc8108700 0x14>;
|
||||
interrupts = <0 90 1>;
|
||||
reg = <0xc1108700 0x18>;
|
||||
interrupts = <0 93 1>;
|
||||
clocks = <&clk81>;
|
||||
status = "disabled";
|
||||
};
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
|
||||
/ {
|
||||
model = "TI OMAP37XX EVM (TMDSEVM3730)";
|
||||
compatible = "ti,omap3-evm-37xx", "ti,omap36xx";
|
||||
compatible = "ti,omap3-evm-37xx", "ti,omap3630", "ti,omap3";
|
||||
|
||||
memory {
|
||||
device_type = "memory";
|
||||
|
|
|
@ -56,7 +56,7 @@
|
|||
/* VMMCI level-shifter enable */
|
||||
default_hrefv60_cfg2 {
|
||||
pins = "GPIO169_D22";
|
||||
ste,config = <&gpio_out_lo>;
|
||||
ste,config = <&gpio_out_hi>;
|
||||
};
|
||||
/* VMMCI level-shifter voltage select */
|
||||
default_hrefv60_cfg3 {
|
||||
|
|
|
@ -234,7 +234,9 @@
|
|||
gpio-controller;
|
||||
#interrupt-cells = <2>;
|
||||
interrupt-controller;
|
||||
/*
|
||||
gpio-ranges = <&pinmux 0 0 246>;
|
||||
*/
|
||||
};
|
||||
|
||||
apbmisc@70000800 {
|
||||
|
|
|
@ -258,7 +258,9 @@
|
|||
gpio-controller;
|
||||
#interrupt-cells = <2>;
|
||||
interrupt-controller;
|
||||
/*
|
||||
gpio-ranges = <&pinmux 0 0 251>;
|
||||
*/
|
||||
};
|
||||
|
||||
apbdma: dma@0,60020000 {
|
||||
|
|
|
@ -244,7 +244,9 @@
|
|||
gpio-controller;
|
||||
#interrupt-cells = <2>;
|
||||
interrupt-controller;
|
||||
/*
|
||||
gpio-ranges = <&pinmux 0 0 224>;
|
||||
*/
|
||||
};
|
||||
|
||||
apbmisc@70000800 {
|
||||
|
|
|
@ -349,7 +349,9 @@
|
|||
gpio-controller;
|
||||
#interrupt-cells = <2>;
|
||||
interrupt-controller;
|
||||
/*
|
||||
gpio-ranges = <&pinmux 0 0 248>;
|
||||
*/
|
||||
};
|
||||
|
||||
apbmisc@70000800 {
|
||||
|
|
|
@ -85,7 +85,7 @@
|
|||
};
|
||||
|
||||
ðsc {
|
||||
interrupts = <0 50 4>;
|
||||
interrupts = <0 52 4>;
|
||||
};
|
||||
|
||||
&serial0 {
|
||||
|
|
|
@ -21,6 +21,7 @@ config KVM
|
|||
depends on MMU && OF
|
||||
select PREEMPT_NOTIFIERS
|
||||
select ANON_INODES
|
||||
select ARM_GIC
|
||||
select HAVE_KVM_CPU_RELAX_INTERCEPT
|
||||
select HAVE_KVM_ARCH_TLB_FLUSH_ALL
|
||||
select KVM_MMIO
|
||||
|
|
|
@ -1080,7 +1080,7 @@ static int init_hyp_mode(void)
|
|||
*/
|
||||
err = kvm_timer_hyp_init();
|
||||
if (err)
|
||||
goto out_free_mappings;
|
||||
goto out_free_context;
|
||||
|
||||
#ifndef CONFIG_HOTPLUG_CPU
|
||||
free_boot_hyp_pgd();
|
||||
|
|
|
@ -200,15 +200,15 @@ no_clk:
|
|||
args.args_count = 0;
|
||||
child_domain = of_genpd_get_from_provider(&args);
|
||||
if (IS_ERR(child_domain))
|
||||
goto next_pd;
|
||||
continue;
|
||||
|
||||
if (of_parse_phandle_with_args(np, "power-domains",
|
||||
"#power-domain-cells", 0, &args) != 0)
|
||||
goto next_pd;
|
||||
continue;
|
||||
|
||||
parent_domain = of_genpd_get_from_provider(&args);
|
||||
if (IS_ERR(parent_domain))
|
||||
goto next_pd;
|
||||
continue;
|
||||
|
||||
if (pm_genpd_add_subdomain(parent_domain, child_domain))
|
||||
pr_warn("%s failed to add subdomain: %s\n",
|
||||
|
@ -216,8 +216,6 @@ no_clk:
|
|||
else
|
||||
pr_info("%s has as child subdomain: %s.\n",
|
||||
parent_domain->name, child_domain->name);
|
||||
next_pd:
|
||||
of_node_put(np);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -49,6 +49,7 @@ config SOC_OMAP5
|
|||
select OMAP_INTERCONNECT
|
||||
select OMAP_INTERCONNECT_BARRIER
|
||||
select PM_OPP if PM
|
||||
select ZONE_DMA if ARM_LPAE
|
||||
|
||||
config SOC_AM33XX
|
||||
bool "TI AM33XX"
|
||||
|
@ -78,6 +79,7 @@ config SOC_DRA7XX
|
|||
select OMAP_INTERCONNECT
|
||||
select OMAP_INTERCONNECT_BARRIER
|
||||
select PM_OPP if PM
|
||||
select ZONE_DMA if ARM_LPAE
|
||||
|
||||
config ARCH_OMAP2PLUS
|
||||
bool
|
||||
|
|
|
@ -106,6 +106,7 @@ DT_MACHINE_START(OMAP3_DT, "Generic OMAP3 (Flattened Device Tree)")
|
|||
MACHINE_END
|
||||
|
||||
static const char *const omap36xx_boards_compat[] __initconst = {
|
||||
"ti,omap3630",
|
||||
"ti,omap36xx",
|
||||
NULL,
|
||||
};
|
||||
|
@ -243,6 +244,9 @@ static const char *const omap5_boards_compat[] __initconst = {
|
|||
};
|
||||
|
||||
DT_MACHINE_START(OMAP5_DT, "Generic OMAP5 (Flattened Device Tree)")
|
||||
#if defined(CONFIG_ZONE_DMA) && defined(CONFIG_ARM_LPAE)
|
||||
.dma_zone_size = SZ_2G,
|
||||
#endif
|
||||
.reserve = omap_reserve,
|
||||
.smp = smp_ops(omap4_smp_ops),
|
||||
.map_io = omap5_map_io,
|
||||
|
@ -288,6 +292,9 @@ static const char *const dra74x_boards_compat[] __initconst = {
|
|||
};
|
||||
|
||||
DT_MACHINE_START(DRA74X_DT, "Generic DRA74X (Flattened Device Tree)")
|
||||
#if defined(CONFIG_ZONE_DMA) && defined(CONFIG_ARM_LPAE)
|
||||
.dma_zone_size = SZ_2G,
|
||||
#endif
|
||||
.reserve = omap_reserve,
|
||||
.smp = smp_ops(omap4_smp_ops),
|
||||
.map_io = dra7xx_map_io,
|
||||
|
@ -308,6 +315,9 @@ static const char *const dra72x_boards_compat[] __initconst = {
|
|||
};
|
||||
|
||||
DT_MACHINE_START(DRA72X_DT, "Generic DRA72X (Flattened Device Tree)")
|
||||
#if defined(CONFIG_ZONE_DMA) && defined(CONFIG_ARM_LPAE)
|
||||
.dma_zone_size = SZ_2G,
|
||||
#endif
|
||||
.reserve = omap_reserve,
|
||||
.map_io = dra7xx_map_io,
|
||||
.init_early = dra7xx_init_early,
|
||||
|
|
|
@ -559,7 +559,14 @@ static void pdata_quirks_check(struct pdata_init *quirks)
|
|||
|
||||
void __init pdata_quirks_init(const struct of_device_id *omap_dt_match_table)
|
||||
{
|
||||
omap_sdrc_init(NULL, NULL);
|
||||
/*
|
||||
* We still need this for omap2420 and omap3 PM to work, others are
|
||||
* using drivers/misc/sram.c already.
|
||||
*/
|
||||
if (of_machine_is_compatible("ti,omap2420") ||
|
||||
of_machine_is_compatible("ti,omap3"))
|
||||
omap_sdrc_init(NULL, NULL);
|
||||
|
||||
pdata_quirks_check(auxdata_quirks);
|
||||
of_platform_populate(NULL, omap_dt_match_table,
|
||||
omap_auxdata_lookup, NULL);
|
||||
|
|
|
@ -42,10 +42,6 @@
|
|||
#define PECR_IS(n) ((1 << ((n) * 2)) << 29)
|
||||
|
||||
extern void __init pxa_dt_irq_init(int (*fn)(struct irq_data *, unsigned int));
|
||||
#ifdef CONFIG_PM
|
||||
|
||||
#define ISRAM_START 0x5c000000
|
||||
#define ISRAM_SIZE SZ_256K
|
||||
|
||||
/*
|
||||
* NAND NFC: DFI bus arbitration subset
|
||||
|
@ -54,6 +50,11 @@ extern void __init pxa_dt_irq_init(int (*fn)(struct irq_data *, unsigned int));
|
|||
#define NDCR_ND_ARB_EN (1 << 12)
|
||||
#define NDCR_ND_ARB_CNTL (1 << 19)
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
|
||||
#define ISRAM_START 0x5c000000
|
||||
#define ISRAM_SIZE SZ_256K
|
||||
|
||||
static void __iomem *sram;
|
||||
static unsigned long wakeup_src;
|
||||
|
||||
|
|
|
@ -614,6 +614,7 @@ load_common:
|
|||
case BPF_LD | BPF_B | BPF_IND:
|
||||
load_order = 0;
|
||||
load_ind:
|
||||
update_on_xread(ctx);
|
||||
OP_IMM3(ARM_ADD, r_off, r_X, k, ctx);
|
||||
goto load_common;
|
||||
case BPF_LDX | BPF_IMM:
|
||||
|
|
|
@ -495,7 +495,7 @@ void __init orion_ge00_switch_init(struct dsa_platform_data *d, int irq)
|
|||
|
||||
d->netdev = &orion_ge00.dev;
|
||||
for (i = 0; i < d->nr_chips; i++)
|
||||
d->chip[i].host_dev = &orion_ge00_shared.dev;
|
||||
d->chip[i].host_dev = &orion_ge_mvmdio.dev;
|
||||
orion_switch_device.dev.platform_data = d;
|
||||
|
||||
platform_device_register(&orion_switch_device);
|
||||
|
|
|
@ -42,7 +42,7 @@ endif
|
|||
CHECKFLAGS += -D__aarch64__
|
||||
|
||||
ifeq ($(CONFIG_ARM64_ERRATUM_843419), y)
|
||||
CFLAGS_MODULE += -mcmodel=large
|
||||
KBUILD_CFLAGS_MODULE += -mcmodel=large
|
||||
endif
|
||||
|
||||
# Default value
|
||||
|
|
|
@ -44,7 +44,7 @@
|
|||
#define __ARM_NR_compat_cacheflush (__ARM_NR_COMPAT_BASE+2)
|
||||
#define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE+5)
|
||||
|
||||
#define __NR_compat_syscalls 388
|
||||
#define __NR_compat_syscalls 390
|
||||
#endif
|
||||
|
||||
#define __ARCH_WANT_SYS_CLONE
|
||||
|
|
|
@ -797,3 +797,12 @@ __SYSCALL(__NR_memfd_create, sys_memfd_create)
|
|||
__SYSCALL(__NR_bpf, sys_bpf)
|
||||
#define __NR_execveat 387
|
||||
__SYSCALL(__NR_execveat, compat_sys_execveat)
|
||||
#define __NR_userfaultfd 388
|
||||
__SYSCALL(__NR_userfaultfd, sys_userfaultfd)
|
||||
#define __NR_membarrier 389
|
||||
__SYSCALL(__NR_membarrier, sys_membarrier)
|
||||
|
||||
/*
|
||||
* Please add new compat syscalls above this comment and update
|
||||
* __NR_compat_syscalls in asm/unistd.h.
|
||||
*/
|
||||
|
|
|
@ -19,6 +19,9 @@
|
|||
/* Required for AArch32 compatibility. */
|
||||
#define SA_RESTORER 0x04000000
|
||||
|
||||
#define MINSIGSTKSZ 5120
|
||||
#define SIGSTKSZ 16384
|
||||
|
||||
#include <asm-generic/signal.h>
|
||||
|
||||
#endif
|
||||
|
|
|
@ -111,7 +111,7 @@ CONFIG_SCSI_QLA_FC=m
|
|||
CONFIG_SCSI_QLA_ISCSI=m
|
||||
CONFIG_SCSI_LPFC=m
|
||||
CONFIG_SCSI_VIRTIO=m
|
||||
CONFIG_SCSI_DH=m
|
||||
CONFIG_SCSI_DH=y
|
||||
CONFIG_SCSI_DH_RDAC=m
|
||||
CONFIG_SCSI_DH_ALUA=m
|
||||
CONFIG_ATA=y
|
||||
|
|
|
@ -114,7 +114,7 @@ CONFIG_SCSI_QLA_FC=m
|
|||
CONFIG_SCSI_QLA_ISCSI=m
|
||||
CONFIG_SCSI_LPFC=m
|
||||
CONFIG_SCSI_VIRTIO=m
|
||||
CONFIG_SCSI_DH=m
|
||||
CONFIG_SCSI_DH=y
|
||||
CONFIG_SCSI_DH_RDAC=m
|
||||
CONFIG_SCSI_DH_ALUA=m
|
||||
CONFIG_ATA=y
|
||||
|
|
|
@ -3,7 +3,6 @@
|
|||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#include <asm/reg.h>
|
||||
|
||||
/* bytes per L1 cache line */
|
||||
#if defined(CONFIG_8xx) || defined(CONFIG_403GCX)
|
||||
|
@ -40,12 +39,6 @@ struct ppc64_caches {
|
|||
};
|
||||
|
||||
extern struct ppc64_caches ppc64_caches;
|
||||
|
||||
static inline void logmpp(u64 x)
|
||||
{
|
||||
asm volatile(PPC_LOGMPP(R1) : : "r" (x));
|
||||
}
|
||||
|
||||
#endif /* __powerpc64__ && ! __ASSEMBLY__ */
|
||||
|
||||
#if defined(__ASSEMBLY__)
|
||||
|
|
|
@ -297,8 +297,6 @@ struct kvmppc_vcore {
|
|||
u32 arch_compat;
|
||||
ulong pcr;
|
||||
ulong dpdes; /* doorbell state (POWER8) */
|
||||
void *mpp_buffer; /* Micro Partition Prefetch buffer */
|
||||
bool mpp_buffer_is_valid;
|
||||
ulong conferring_threads;
|
||||
};
|
||||
|
||||
|
|
|
@ -61,8 +61,13 @@ struct machdep_calls {
|
|||
unsigned long addr,
|
||||
unsigned char *hpte_slot_array,
|
||||
int psize, int ssize, int local);
|
||||
/* special for kexec, to be called in real mode, linear mapping is
|
||||
* destroyed as well */
|
||||
/*
|
||||
* Special for kexec.
|
||||
* To be called in real mode with interrupts disabled. No locks are
|
||||
* taken as such, concurrent access on pre POWER5 hardware could result
|
||||
* in a deadlock.
|
||||
* The linear mapping is destroyed as well.
|
||||
*/
|
||||
void (*hpte_clear_all)(void);
|
||||
|
||||
void __iomem * (*ioremap)(phys_addr_t addr, unsigned long size,
|
||||
|
|
|
@ -141,7 +141,6 @@
|
|||
#define PPC_INST_ISEL 0x7c00001e
|
||||
#define PPC_INST_ISEL_MASK 0xfc00003e
|
||||
#define PPC_INST_LDARX 0x7c0000a8
|
||||
#define PPC_INST_LOGMPP 0x7c0007e4
|
||||
#define PPC_INST_LSWI 0x7c0004aa
|
||||
#define PPC_INST_LSWX 0x7c00042a
|
||||
#define PPC_INST_LWARX 0x7c000028
|
||||
|
@ -285,20 +284,6 @@
|
|||
#define __PPC_EH(eh) 0
|
||||
#endif
|
||||
|
||||
/* POWER8 Micro Partition Prefetch (MPP) parameters */
|
||||
/* Address mask is common for LOGMPP instruction and MPPR SPR */
|
||||
#define PPC_MPPE_ADDRESS_MASK 0xffffffffc000ULL
|
||||
|
||||
/* Bits 60 and 61 of MPP SPR should be set to one of the following */
|
||||
/* Aborting the fetch is indeed setting 00 in the table size bits */
|
||||
#define PPC_MPPR_FETCH_ABORT (0x0ULL << 60)
|
||||
#define PPC_MPPR_FETCH_WHOLE_TABLE (0x2ULL << 60)
|
||||
|
||||
/* Bits 54 and 55 of register for LOGMPP instruction should be set to: */
|
||||
#define PPC_LOGMPP_LOG_L2 (0x02ULL << 54)
|
||||
#define PPC_LOGMPP_LOG_L2L3 (0x01ULL << 54)
|
||||
#define PPC_LOGMPP_LOG_ABORT (0x03ULL << 54)
|
||||
|
||||
/* Deal with instructions that older assemblers aren't aware of */
|
||||
#define PPC_DCBAL(a, b) stringify_in_c(.long PPC_INST_DCBAL | \
|
||||
__PPC_RA(a) | __PPC_RB(b))
|
||||
|
@ -307,8 +292,6 @@
|
|||
#define PPC_LDARX(t, a, b, eh) stringify_in_c(.long PPC_INST_LDARX | \
|
||||
___PPC_RT(t) | ___PPC_RA(a) | \
|
||||
___PPC_RB(b) | __PPC_EH(eh))
|
||||
#define PPC_LOGMPP(b) stringify_in_c(.long PPC_INST_LOGMPP | \
|
||||
__PPC_RB(b))
|
||||
#define PPC_LWARX(t, a, b, eh) stringify_in_c(.long PPC_INST_LWARX | \
|
||||
___PPC_RT(t) | ___PPC_RA(a) | \
|
||||
___PPC_RB(b) | __PPC_EH(eh))
|
||||
|
|
|
@ -226,7 +226,6 @@
|
|||
#define CTRL_TE 0x00c00000 /* thread enable */
|
||||
#define CTRL_RUNLATCH 0x1
|
||||
#define SPRN_DAWR 0xB4
|
||||
#define SPRN_MPPR 0xB8 /* Micro Partition Prefetch Register */
|
||||
#define SPRN_RPR 0xBA /* Relative Priority Register */
|
||||
#define SPRN_CIABR 0xBB
|
||||
#define CIABR_PRIV 0x3
|
||||
|
|
|
@ -1043,6 +1043,9 @@ asmlinkage int ppc_rtas(struct rtas_args __user *uargs)
|
|||
if (!capable(CAP_SYS_ADMIN))
|
||||
return -EPERM;
|
||||
|
||||
if (!rtas.entry)
|
||||
return -EINVAL;
|
||||
|
||||
if (copy_from_user(&args, uargs, 3 * sizeof(u32)) != 0)
|
||||
return -EFAULT;
|
||||
|
||||
|
|
|
@ -36,7 +36,6 @@
|
|||
|
||||
#include <asm/reg.h>
|
||||
#include <asm/cputable.h>
|
||||
#include <asm/cache.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm/uaccess.h>
|
||||
|
@ -75,12 +74,6 @@
|
|||
|
||||
static DECLARE_BITMAP(default_enabled_hcalls, MAX_HCALL_OPCODE/4 + 1);
|
||||
|
||||
#if defined(CONFIG_PPC_64K_PAGES)
|
||||
#define MPP_BUFFER_ORDER 0
|
||||
#elif defined(CONFIG_PPC_4K_PAGES)
|
||||
#define MPP_BUFFER_ORDER 3
|
||||
#endif
|
||||
|
||||
static int dynamic_mt_modes = 6;
|
||||
module_param(dynamic_mt_modes, int, S_IRUGO | S_IWUSR);
|
||||
MODULE_PARM_DESC(dynamic_mt_modes, "Set of allowed dynamic micro-threading modes: 0 (= none), 2, 4, or 6 (= 2 or 4)");
|
||||
|
@ -1455,13 +1448,6 @@ static struct kvmppc_vcore *kvmppc_vcore_create(struct kvm *kvm, int core)
|
|||
vcore->kvm = kvm;
|
||||
INIT_LIST_HEAD(&vcore->preempt_list);
|
||||
|
||||
vcore->mpp_buffer_is_valid = false;
|
||||
|
||||
if (cpu_has_feature(CPU_FTR_ARCH_207S))
|
||||
vcore->mpp_buffer = (void *)__get_free_pages(
|
||||
GFP_KERNEL|__GFP_ZERO,
|
||||
MPP_BUFFER_ORDER);
|
||||
|
||||
return vcore;
|
||||
}
|
||||
|
||||
|
@ -1894,33 +1880,6 @@ static int on_primary_thread(void)
|
|||
return 1;
|
||||
}
|
||||
|
||||
static void kvmppc_start_saving_l2_cache(struct kvmppc_vcore *vc)
|
||||
{
|
||||
phys_addr_t phy_addr, mpp_addr;
|
||||
|
||||
phy_addr = (phys_addr_t)virt_to_phys(vc->mpp_buffer);
|
||||
mpp_addr = phy_addr & PPC_MPPE_ADDRESS_MASK;
|
||||
|
||||
mtspr(SPRN_MPPR, mpp_addr | PPC_MPPR_FETCH_ABORT);
|
||||
logmpp(mpp_addr | PPC_LOGMPP_LOG_L2);
|
||||
|
||||
vc->mpp_buffer_is_valid = true;
|
||||
}
|
||||
|
||||
static void kvmppc_start_restoring_l2_cache(const struct kvmppc_vcore *vc)
|
||||
{
|
||||
phys_addr_t phy_addr, mpp_addr;
|
||||
|
||||
phy_addr = virt_to_phys(vc->mpp_buffer);
|
||||
mpp_addr = phy_addr & PPC_MPPE_ADDRESS_MASK;
|
||||
|
||||
/* We must abort any in-progress save operations to ensure
|
||||
* the table is valid so that prefetch engine knows when to
|
||||
* stop prefetching. */
|
||||
logmpp(mpp_addr | PPC_LOGMPP_LOG_ABORT);
|
||||
mtspr(SPRN_MPPR, mpp_addr | PPC_MPPR_FETCH_WHOLE_TABLE);
|
||||
}
|
||||
|
||||
/*
|
||||
* A list of virtual cores for each physical CPU.
|
||||
* These are vcores that could run but their runner VCPU tasks are
|
||||
|
@ -2471,14 +2430,8 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
|
|||
|
||||
srcu_idx = srcu_read_lock(&vc->kvm->srcu);
|
||||
|
||||
if (vc->mpp_buffer_is_valid)
|
||||
kvmppc_start_restoring_l2_cache(vc);
|
||||
|
||||
__kvmppc_vcore_entry();
|
||||
|
||||
if (vc->mpp_buffer)
|
||||
kvmppc_start_saving_l2_cache(vc);
|
||||
|
||||
srcu_read_unlock(&vc->kvm->srcu, srcu_idx);
|
||||
|
||||
spin_lock(&vc->lock);
|
||||
|
@ -3073,14 +3026,8 @@ static void kvmppc_free_vcores(struct kvm *kvm)
|
|||
{
|
||||
long int i;
|
||||
|
||||
for (i = 0; i < KVM_MAX_VCORES; ++i) {
|
||||
if (kvm->arch.vcores[i] && kvm->arch.vcores[i]->mpp_buffer) {
|
||||
struct kvmppc_vcore *vc = kvm->arch.vcores[i];
|
||||
free_pages((unsigned long)vc->mpp_buffer,
|
||||
MPP_BUFFER_ORDER);
|
||||
}
|
||||
for (i = 0; i < KVM_MAX_VCORES; ++i)
|
||||
kfree(kvm->arch.vcores[i]);
|
||||
}
|
||||
kvm->arch.online_vcores = 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -582,13 +582,21 @@ static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
|
|||
* be when they isi), and we are the only one left. We rely on our kernel
|
||||
* mapping being 0xC0's and the hardware ignoring those two real bits.
|
||||
*
|
||||
* This must be called with interrupts disabled.
|
||||
*
|
||||
* Taking the native_tlbie_lock is unsafe here due to the possibility of
|
||||
* lockdep being on. On pre POWER5 hardware, not taking the lock could
|
||||
* cause deadlock. POWER5 and newer not taking the lock is fine. This only
|
||||
* gets called during boot before secondary CPUs have come up and during
|
||||
* crashdump and all bets are off anyway.
|
||||
*
|
||||
* TODO: add batching support when enabled. remember, no dynamic memory here,
|
||||
* athough there is the control page available...
|
||||
*/
|
||||
static void native_hpte_clear(void)
|
||||
{
|
||||
unsigned long vpn = 0;
|
||||
unsigned long slot, slots, flags;
|
||||
unsigned long slot, slots;
|
||||
struct hash_pte *hptep = htab_address;
|
||||
unsigned long hpte_v;
|
||||
unsigned long pteg_count;
|
||||
|
@ -596,13 +604,6 @@ static void native_hpte_clear(void)
|
|||
|
||||
pteg_count = htab_hash_mask + 1;
|
||||
|
||||
local_irq_save(flags);
|
||||
|
||||
/* we take the tlbie lock and hold it. Some hardware will
|
||||
* deadlock if we try to tlbie from two processors at once.
|
||||
*/
|
||||
raw_spin_lock(&native_tlbie_lock);
|
||||
|
||||
slots = pteg_count * HPTES_PER_GROUP;
|
||||
|
||||
for (slot = 0; slot < slots; slot++, hptep++) {
|
||||
|
@ -614,8 +615,8 @@ static void native_hpte_clear(void)
|
|||
hpte_v = be64_to_cpu(hptep->v);
|
||||
|
||||
/*
|
||||
* Call __tlbie() here rather than tlbie() since we
|
||||
* already hold the native_tlbie_lock.
|
||||
* Call __tlbie() here rather than tlbie() since we can't take the
|
||||
* native_tlbie_lock.
|
||||
*/
|
||||
if (hpte_v & HPTE_V_VALID) {
|
||||
hpte_decode(hptep, slot, &psize, &apsize, &ssize, &vpn);
|
||||
|
@ -625,8 +626,6 @@ static void native_hpte_clear(void)
|
|||
}
|
||||
|
||||
asm volatile("eieio; tlbsync; ptesync":::"memory");
|
||||
raw_spin_unlock(&native_tlbie_lock);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -487,9 +487,12 @@ int opal_machine_check(struct pt_regs *regs)
|
|||
* PRD component would have already got notified about this
|
||||
* error through other channels.
|
||||
*
|
||||
* In any case, let us just fall through. We anyway heading
|
||||
* down to panic path.
|
||||
* If hardware marked this as an unrecoverable MCE, we are
|
||||
* going to panic anyway. Even if it didn't, it's not safe to
|
||||
* continue at this point, so we should explicitly panic.
|
||||
*/
|
||||
|
||||
panic("PowerNV Unrecovered Machine Check");
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -171,7 +171,26 @@ static void pnv_smp_cpu_kill_self(void)
|
|||
* so clear LPCR:PECE1. We keep PECE2 enabled.
|
||||
*/
|
||||
mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1);
|
||||
|
||||
/*
|
||||
* Hard-disable interrupts, and then clear irq_happened flags
|
||||
* that we can safely ignore while off-line, since they
|
||||
* are for things for which we do no processing when off-line
|
||||
* (or in the case of HMI, all the processing we need to do
|
||||
* is done in lower-level real-mode code).
|
||||
*/
|
||||
hard_irq_disable();
|
||||
local_paca->irq_happened &= ~(PACA_IRQ_DEC | PACA_IRQ_HMI);
|
||||
|
||||
while (!generic_check_cpu_restart(cpu)) {
|
||||
/*
|
||||
* Clear IPI flag, since we don't handle IPIs while
|
||||
* offline, except for those when changing micro-threading
|
||||
* mode, which are handled explicitly below, and those
|
||||
* for coming online, which are handled via
|
||||
* generic_check_cpu_restart() calls.
|
||||
*/
|
||||
kvmppc_set_host_ipi(cpu, 0);
|
||||
|
||||
ppc64_runlatch_off();
|
||||
|
||||
|
@ -196,20 +215,20 @@ static void pnv_smp_cpu_kill_self(void)
|
|||
* having finished executing in a KVM guest, then srr1
|
||||
* contains 0.
|
||||
*/
|
||||
if ((srr1 & wmask) == SRR1_WAKEEE) {
|
||||
if (((srr1 & wmask) == SRR1_WAKEEE) ||
|
||||
(local_paca->irq_happened & PACA_IRQ_EE)) {
|
||||
icp_native_flush_interrupt();
|
||||
local_paca->irq_happened &= PACA_IRQ_HARD_DIS;
|
||||
smp_mb();
|
||||
} else if ((srr1 & wmask) == SRR1_WAKEHDBELL) {
|
||||
unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER);
|
||||
asm volatile(PPC_MSGCLR(%0) : : "r" (msg));
|
||||
kvmppc_set_host_ipi(cpu, 0);
|
||||
}
|
||||
local_paca->irq_happened &= ~(PACA_IRQ_EE | PACA_IRQ_DBELL);
|
||||
smp_mb();
|
||||
|
||||
if (cpu_core_split_required())
|
||||
continue;
|
||||
|
||||
if (!generic_check_cpu_restart(cpu))
|
||||
if (srr1 && !generic_check_cpu_restart(cpu))
|
||||
DBG("CPU%d Unexpected exit while offline !\n", cpu);
|
||||
}
|
||||
mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) | LPCR_PECE1);
|
||||
|
|
|
@ -194,11 +194,6 @@ static const struct os_area_db_id os_area_db_id_rtc_diff = {
|
|||
.key = OS_AREA_DB_KEY_RTC_DIFF
|
||||
};
|
||||
|
||||
static const struct os_area_db_id os_area_db_id_video_mode = {
|
||||
.owner = OS_AREA_DB_OWNER_LINUX,
|
||||
.key = OS_AREA_DB_KEY_VIDEO_MODE
|
||||
};
|
||||
|
||||
#define SECONDS_FROM_1970_TO_2000 946684800LL
|
||||
|
||||
/**
|
||||
|
|
|
@ -59,6 +59,7 @@ pages_do_alias(unsigned long addr1, unsigned long addr2)
|
|||
|
||||
#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)
|
||||
extern void copy_page(void *to, void *from);
|
||||
#define copy_user_page(to, from, vaddr, pg) __copy_user(to, from, PAGE_SIZE)
|
||||
|
||||
struct page;
|
||||
struct vm_area_struct;
|
||||
|
|
|
@ -433,6 +433,7 @@ static struct crypto_alg algs[] = { {
|
|||
.blkcipher = {
|
||||
.min_keysize = AES_MIN_KEY_SIZE,
|
||||
.max_keysize = AES_MAX_KEY_SIZE,
|
||||
.ivsize = AES_BLOCK_SIZE,
|
||||
.setkey = aes_set_key,
|
||||
.encrypt = cbc_encrypt,
|
||||
.decrypt = cbc_decrypt,
|
||||
|
@ -452,6 +453,7 @@ static struct crypto_alg algs[] = { {
|
|||
.blkcipher = {
|
||||
.min_keysize = AES_MIN_KEY_SIZE,
|
||||
.max_keysize = AES_MAX_KEY_SIZE,
|
||||
.ivsize = AES_BLOCK_SIZE,
|
||||
.setkey = aes_set_key,
|
||||
.encrypt = ctr_crypt,
|
||||
.decrypt = ctr_crypt,
|
||||
|
|
|
@ -274,6 +274,7 @@ static struct crypto_alg algs[] = { {
|
|||
.blkcipher = {
|
||||
.min_keysize = CAMELLIA_MIN_KEY_SIZE,
|
||||
.max_keysize = CAMELLIA_MAX_KEY_SIZE,
|
||||
.ivsize = CAMELLIA_BLOCK_SIZE,
|
||||
.setkey = camellia_set_key,
|
||||
.encrypt = cbc_encrypt,
|
||||
.decrypt = cbc_decrypt,
|
||||
|
|
|
@ -429,6 +429,7 @@ static struct crypto_alg algs[] = { {
|
|||
.blkcipher = {
|
||||
.min_keysize = DES_KEY_SIZE,
|
||||
.max_keysize = DES_KEY_SIZE,
|
||||
.ivsize = DES_BLOCK_SIZE,
|
||||
.setkey = des_set_key,
|
||||
.encrypt = cbc_encrypt,
|
||||
.decrypt = cbc_decrypt,
|
||||
|
@ -485,6 +486,7 @@ static struct crypto_alg algs[] = { {
|
|||
.blkcipher = {
|
||||
.min_keysize = DES3_EDE_KEY_SIZE,
|
||||
.max_keysize = DES3_EDE_KEY_SIZE,
|
||||
.ivsize = DES3_EDE_BLOCK_SIZE,
|
||||
.setkey = des3_ede_set_key,
|
||||
.encrypt = cbc3_encrypt,
|
||||
.decrypt = cbc3_decrypt,
|
||||
|
|
|
@ -70,8 +70,8 @@ KBUILD_AFLAGS += $(ARCH_INCLUDE)
|
|||
|
||||
USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -I%,,$(KBUILD_CFLAGS))) \
|
||||
$(ARCH_INCLUDE) $(MODE_INCLUDE) $(filter -I%,$(CFLAGS)) \
|
||||
-D_FILE_OFFSET_BITS=64 -idirafter include \
|
||||
-D__KERNEL__ -D__UM_HOST__
|
||||
-D_FILE_OFFSET_BITS=64 -idirafter $(srctree)/include \
|
||||
-idirafter $(obj)/include -D__KERNEL__ -D__UM_HOST__
|
||||
|
||||
#This will adjust *FLAGS accordingly to the platform.
|
||||
include $(ARCH_DIR)/Makefile-os-$(OS)
|
||||
|
|
|
@ -220,7 +220,7 @@ unsigned long segv(struct faultinfo fi, unsigned long ip, int is_user,
|
|||
show_regs(container_of(regs, struct pt_regs, regs));
|
||||
panic("Segfault with no mm");
|
||||
}
|
||||
else if (!is_user && address < TASK_SIZE) {
|
||||
else if (!is_user && address > PAGE_SIZE && address < TASK_SIZE) {
|
||||
show_regs(container_of(regs, struct pt_regs, regs));
|
||||
panic("Kernel tried to access user memory at addr 0x%lx, ip 0x%lx",
|
||||
address, ip);
|
||||
|
|
|
@ -96,7 +96,7 @@ int run_helper(void (*pre_exec)(void *), void *pre_data, char **argv)
|
|||
"ret = %d\n", -n);
|
||||
ret = n;
|
||||
}
|
||||
CATCH_EINTR(waitpid(pid, NULL, __WCLONE));
|
||||
CATCH_EINTR(waitpid(pid, NULL, __WALL));
|
||||
}
|
||||
|
||||
out_free2:
|
||||
|
@ -129,7 +129,7 @@ int run_helper_thread(int (*proc)(void *), void *arg, unsigned int flags,
|
|||
return err;
|
||||
}
|
||||
if (stack_out == NULL) {
|
||||
CATCH_EINTR(pid = waitpid(pid, &status, __WCLONE));
|
||||
CATCH_EINTR(pid = waitpid(pid, &status, __WALL));
|
||||
if (pid < 0) {
|
||||
err = -errno;
|
||||
printk(UM_KERN_ERR "run_helper_thread - wait failed, "
|
||||
|
@ -148,7 +148,7 @@ int run_helper_thread(int (*proc)(void *), void *arg, unsigned int flags,
|
|||
int helper_wait(int pid)
|
||||
{
|
||||
int ret, status;
|
||||
int wflags = __WCLONE;
|
||||
int wflags = __WALL;
|
||||
|
||||
CATCH_EINTR(ret = waitpid(pid, &status, wflags));
|
||||
if (ret < 0) {
|
||||
|
|
|
@ -667,6 +667,7 @@ setup_gop32(struct screen_info *si, efi_guid_t *proto,
|
|||
bool conout_found = false;
|
||||
void *dummy = NULL;
|
||||
u32 h = handles[i];
|
||||
u32 current_fb_base;
|
||||
|
||||
status = efi_call_early(handle_protocol, h,
|
||||
proto, (void **)&gop32);
|
||||
|
@ -678,7 +679,7 @@ setup_gop32(struct screen_info *si, efi_guid_t *proto,
|
|||
if (status == EFI_SUCCESS)
|
||||
conout_found = true;
|
||||
|
||||
status = __gop_query32(gop32, &info, &size, &fb_base);
|
||||
status = __gop_query32(gop32, &info, &size, ¤t_fb_base);
|
||||
if (status == EFI_SUCCESS && (!first_gop || conout_found)) {
|
||||
/*
|
||||
* Systems that use the UEFI Console Splitter may
|
||||
|
@ -692,6 +693,7 @@ setup_gop32(struct screen_info *si, efi_guid_t *proto,
|
|||
pixel_format = info->pixel_format;
|
||||
pixel_info = info->pixel_information;
|
||||
pixels_per_scan_line = info->pixels_per_scan_line;
|
||||
fb_base = current_fb_base;
|
||||
|
||||
/*
|
||||
* Once we've found a GOP supporting ConOut,
|
||||
|
@ -770,6 +772,7 @@ setup_gop64(struct screen_info *si, efi_guid_t *proto,
|
|||
bool conout_found = false;
|
||||
void *dummy = NULL;
|
||||
u64 h = handles[i];
|
||||
u32 current_fb_base;
|
||||
|
||||
status = efi_call_early(handle_protocol, h,
|
||||
proto, (void **)&gop64);
|
||||
|
@ -781,7 +784,7 @@ setup_gop64(struct screen_info *si, efi_guid_t *proto,
|
|||
if (status == EFI_SUCCESS)
|
||||
conout_found = true;
|
||||
|
||||
status = __gop_query64(gop64, &info, &size, &fb_base);
|
||||
status = __gop_query64(gop64, &info, &size, ¤t_fb_base);
|
||||
if (status == EFI_SUCCESS && (!first_gop || conout_found)) {
|
||||
/*
|
||||
* Systems that use the UEFI Console Splitter may
|
||||
|
@ -795,6 +798,7 @@ setup_gop64(struct screen_info *si, efi_guid_t *proto,
|
|||
pixel_format = info->pixel_format;
|
||||
pixel_info = info->pixel_information;
|
||||
pixels_per_scan_line = info->pixels_per_scan_line;
|
||||
fb_base = current_fb_base;
|
||||
|
||||
/*
|
||||
* Once we've found a GOP supporting ConOut,
|
||||
|
|
|
@ -554,6 +554,11 @@ static int __init camellia_aesni_init(void)
|
|||
{
|
||||
const char *feature_name;
|
||||
|
||||
if (!cpu_has_avx || !cpu_has_aes || !cpu_has_osxsave) {
|
||||
pr_info("AVX or AES-NI instructions are not detected.\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
if (!cpu_has_xfeatures(XSTATE_SSE | XSTATE_YMM, &feature_name)) {
|
||||
pr_info("CPU feature '%s' is not supported.\n", feature_name);
|
||||
return -ENODEV;
|
||||
|
|
|
@ -1226,10 +1226,8 @@ void kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err);
|
|||
|
||||
int kvm_is_in_guest(void);
|
||||
|
||||
int __x86_set_memory_region(struct kvm *kvm,
|
||||
const struct kvm_userspace_memory_region *mem);
|
||||
int x86_set_memory_region(struct kvm *kvm,
|
||||
const struct kvm_userspace_memory_region *mem);
|
||||
int __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size);
|
||||
int x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size);
|
||||
bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu);
|
||||
bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu);
|
||||
|
||||
|
|
|
@ -27,12 +27,11 @@ static __always_inline void *__inline_memcpy(void *to, const void *from, size_t
|
|||
function. */
|
||||
|
||||
#define __HAVE_ARCH_MEMCPY 1
|
||||
extern void *memcpy(void *to, const void *from, size_t len);
|
||||
extern void *__memcpy(void *to, const void *from, size_t len);
|
||||
|
||||
#ifndef CONFIG_KMEMCHECK
|
||||
#if (__GNUC__ == 4 && __GNUC_MINOR__ >= 3) || __GNUC__ > 4
|
||||
extern void *memcpy(void *to, const void *from, size_t len);
|
||||
#else
|
||||
#if (__GNUC__ == 4 && __GNUC_MINOR__ < 3) || __GNUC__ < 4
|
||||
#define memcpy(dst, src, len) \
|
||||
({ \
|
||||
size_t __len = (len); \
|
||||
|
|
|
@ -2907,6 +2907,7 @@ int mp_irqdomain_alloc(struct irq_domain *domain, unsigned int virq,
|
|||
struct irq_data *irq_data;
|
||||
struct mp_chip_data *data;
|
||||
struct irq_alloc_info *info = arg;
|
||||
unsigned long flags;
|
||||
|
||||
if (!info || nr_irqs > 1)
|
||||
return -EINVAL;
|
||||
|
@ -2939,11 +2940,14 @@ int mp_irqdomain_alloc(struct irq_domain *domain, unsigned int virq,
|
|||
|
||||
cfg = irqd_cfg(irq_data);
|
||||
add_pin_to_irq_node(data, ioapic_alloc_attr_node(info), ioapic, pin);
|
||||
|
||||
local_irq_save(flags);
|
||||
if (info->ioapic_entry)
|
||||
mp_setup_entry(cfg, data, info->ioapic_entry);
|
||||
mp_register_handler(virq, data->trigger);
|
||||
if (virq < nr_legacy_irqs())
|
||||
legacy_pic->mask(virq);
|
||||
local_irq_restore(flags);
|
||||
|
||||
apic_printk(APIC_VERBOSE, KERN_DEBUG
|
||||
"IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> IRQ %d Mode:%i Active:%i Dest:%d)\n",
|
||||
|
|
|
@ -550,14 +550,14 @@ unsigned long get_wchan(struct task_struct *p)
|
|||
if (sp < bottom || sp > top)
|
||||
return 0;
|
||||
|
||||
fp = READ_ONCE(*(unsigned long *)sp);
|
||||
fp = READ_ONCE_NOCHECK(*(unsigned long *)sp);
|
||||
do {
|
||||
if (fp < bottom || fp > top)
|
||||
return 0;
|
||||
ip = READ_ONCE(*(unsigned long *)(fp + sizeof(unsigned long)));
|
||||
ip = READ_ONCE_NOCHECK(*(unsigned long *)(fp + sizeof(unsigned long)));
|
||||
if (!in_sched_functions(ip))
|
||||
return ip;
|
||||
fp = READ_ONCE(*(unsigned long *)fp);
|
||||
fp = READ_ONCE_NOCHECK(*(unsigned long *)fp);
|
||||
} while (count++ < 16 && p->state != TASK_RUNNING);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -1173,6 +1173,14 @@ void __init setup_arch(char **cmdline_p)
|
|||
clone_pgd_range(initial_page_table + KERNEL_PGD_BOUNDARY,
|
||||
swapper_pg_dir + KERNEL_PGD_BOUNDARY,
|
||||
KERNEL_PGD_PTRS);
|
||||
|
||||
/*
|
||||
* sync back low identity map too. It is used for example
|
||||
* in the 32-bit EFI stub.
|
||||
*/
|
||||
clone_pgd_range(initial_page_table,
|
||||
swapper_pg_dir + KERNEL_PGD_BOUNDARY,
|
||||
KERNEL_PGD_PTRS);
|
||||
#endif
|
||||
|
||||
tboot_probe();
|
||||
|
|
|
@ -509,7 +509,7 @@ void __inquire_remote_apic(int apicid)
|
|||
*/
|
||||
#define UDELAY_10MS_DEFAULT 10000
|
||||
|
||||
static unsigned int init_udelay = UDELAY_10MS_DEFAULT;
|
||||
static unsigned int init_udelay = INT_MAX;
|
||||
|
||||
static int __init cpu_init_udelay(char *str)
|
||||
{
|
||||
|
@ -522,13 +522,16 @@ early_param("cpu_init_udelay", cpu_init_udelay);
|
|||
static void __init smp_quirk_init_udelay(void)
|
||||
{
|
||||
/* if cmdline changed it from default, leave it alone */
|
||||
if (init_udelay != UDELAY_10MS_DEFAULT)
|
||||
if (init_udelay != INT_MAX)
|
||||
return;
|
||||
|
||||
/* if modern processor, use no delay */
|
||||
if (((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && (boot_cpu_data.x86 == 6)) ||
|
||||
((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) && (boot_cpu_data.x86 >= 0xF)))
|
||||
init_udelay = 0;
|
||||
|
||||
/* else, use legacy delay */
|
||||
init_udelay = UDELAY_10MS_DEFAULT;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -657,7 +660,9 @@ wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip)
|
|||
/*
|
||||
* Give the other CPU some time to accept the IPI.
|
||||
*/
|
||||
if (init_udelay)
|
||||
if (init_udelay == 0)
|
||||
udelay(10);
|
||||
else
|
||||
udelay(300);
|
||||
|
||||
pr_debug("Startup point 1\n");
|
||||
|
@ -668,7 +673,9 @@ wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip)
|
|||
/*
|
||||
* Give the other CPU some time to accept the IPI.
|
||||
*/
|
||||
if (init_udelay)
|
||||
if (init_udelay == 0)
|
||||
udelay(10);
|
||||
else
|
||||
udelay(200);
|
||||
|
||||
if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */
|
||||
|
|
|
@ -2418,7 +2418,7 @@ static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase)
|
|||
u64 val, cr0, cr4;
|
||||
u32 base3;
|
||||
u16 selector;
|
||||
int i;
|
||||
int i, r;
|
||||
|
||||
for (i = 0; i < 16; i++)
|
||||
*reg_write(ctxt, i) = GET_SMSTATE(u64, smbase, 0x7ff8 - i * 8);
|
||||
|
@ -2460,13 +2460,17 @@ static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase)
|
|||
dt.address = GET_SMSTATE(u64, smbase, 0x7e68);
|
||||
ctxt->ops->set_gdt(ctxt, &dt);
|
||||
|
||||
r = rsm_enter_protected_mode(ctxt, cr0, cr4);
|
||||
if (r != X86EMUL_CONTINUE)
|
||||
return r;
|
||||
|
||||
for (i = 0; i < 6; i++) {
|
||||
int r = rsm_load_seg_64(ctxt, smbase, i);
|
||||
r = rsm_load_seg_64(ctxt, smbase, i);
|
||||
if (r != X86EMUL_CONTINUE)
|
||||
return r;
|
||||
}
|
||||
|
||||
return rsm_enter_protected_mode(ctxt, cr0, cr4);
|
||||
return X86EMUL_CONTINUE;
|
||||
}
|
||||
|
||||
static int em_rsm(struct x86_emulate_ctxt *ctxt)
|
||||
|
|
|
@ -4105,17 +4105,13 @@ static void seg_setup(int seg)
|
|||
static int alloc_apic_access_page(struct kvm *kvm)
|
||||
{
|
||||
struct page *page;
|
||||
struct kvm_userspace_memory_region kvm_userspace_mem;
|
||||
int r = 0;
|
||||
|
||||
mutex_lock(&kvm->slots_lock);
|
||||
if (kvm->arch.apic_access_page_done)
|
||||
goto out;
|
||||
kvm_userspace_mem.slot = APIC_ACCESS_PAGE_PRIVATE_MEMSLOT;
|
||||
kvm_userspace_mem.flags = 0;
|
||||
kvm_userspace_mem.guest_phys_addr = APIC_DEFAULT_PHYS_BASE;
|
||||
kvm_userspace_mem.memory_size = PAGE_SIZE;
|
||||
r = __x86_set_memory_region(kvm, &kvm_userspace_mem);
|
||||
r = __x86_set_memory_region(kvm, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT,
|
||||
APIC_DEFAULT_PHYS_BASE, PAGE_SIZE);
|
||||
if (r)
|
||||
goto out;
|
||||
|
||||
|
@ -4140,17 +4136,12 @@ static int alloc_identity_pagetable(struct kvm *kvm)
|
|||
{
|
||||
/* Called with kvm->slots_lock held. */
|
||||
|
||||
struct kvm_userspace_memory_region kvm_userspace_mem;
|
||||
int r = 0;
|
||||
|
||||
BUG_ON(kvm->arch.ept_identity_pagetable_done);
|
||||
|
||||
kvm_userspace_mem.slot = IDENTITY_PAGETABLE_PRIVATE_MEMSLOT;
|
||||
kvm_userspace_mem.flags = 0;
|
||||
kvm_userspace_mem.guest_phys_addr =
|
||||
kvm->arch.ept_identity_map_addr;
|
||||
kvm_userspace_mem.memory_size = PAGE_SIZE;
|
||||
r = __x86_set_memory_region(kvm, &kvm_userspace_mem);
|
||||
r = __x86_set_memory_region(kvm, IDENTITY_PAGETABLE_PRIVATE_MEMSLOT,
|
||||
kvm->arch.ept_identity_map_addr, PAGE_SIZE);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
@ -4949,14 +4940,9 @@ static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu)
|
|||
static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr)
|
||||
{
|
||||
int ret;
|
||||
struct kvm_userspace_memory_region tss_mem = {
|
||||
.slot = TSS_PRIVATE_MEMSLOT,
|
||||
.guest_phys_addr = addr,
|
||||
.memory_size = PAGE_SIZE * 3,
|
||||
.flags = 0,
|
||||
};
|
||||
|
||||
ret = x86_set_memory_region(kvm, &tss_mem);
|
||||
ret = x86_set_memory_region(kvm, TSS_PRIVATE_MEMSLOT, addr,
|
||||
PAGE_SIZE * 3);
|
||||
if (ret)
|
||||
return ret;
|
||||
kvm->arch.tss_addr = addr;
|
||||
|
|
|
@ -6453,6 +6453,12 @@ static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu)
|
|||
return 1;
|
||||
}
|
||||
|
||||
static inline bool kvm_vcpu_running(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
|
||||
!vcpu->arch.apf.halted);
|
||||
}
|
||||
|
||||
static int vcpu_run(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int r;
|
||||
|
@ -6461,8 +6467,7 @@ static int vcpu_run(struct kvm_vcpu *vcpu)
|
|||
vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
|
||||
|
||||
for (;;) {
|
||||
if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
|
||||
!vcpu->arch.apf.halted)
|
||||
if (kvm_vcpu_running(vcpu))
|
||||
r = vcpu_enter_guest(vcpu);
|
||||
else
|
||||
r = vcpu_block(kvm, vcpu);
|
||||
|
@ -7474,34 +7479,66 @@ void kvm_arch_sync_events(struct kvm *kvm)
|
|||
kvm_free_pit(kvm);
|
||||
}
|
||||
|
||||
int __x86_set_memory_region(struct kvm *kvm,
|
||||
const struct kvm_userspace_memory_region *mem)
|
||||
int __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size)
|
||||
{
|
||||
int i, r;
|
||||
unsigned long hva;
|
||||
struct kvm_memslots *slots = kvm_memslots(kvm);
|
||||
struct kvm_memory_slot *slot, old;
|
||||
|
||||
/* Called with kvm->slots_lock held. */
|
||||
BUG_ON(mem->slot >= KVM_MEM_SLOTS_NUM);
|
||||
if (WARN_ON(id >= KVM_MEM_SLOTS_NUM))
|
||||
return -EINVAL;
|
||||
|
||||
slot = id_to_memslot(slots, id);
|
||||
if (size) {
|
||||
if (WARN_ON(slot->npages))
|
||||
return -EEXIST;
|
||||
|
||||
/*
|
||||
* MAP_SHARED to prevent internal slot pages from being moved
|
||||
* by fork()/COW.
|
||||
*/
|
||||
hva = vm_mmap(NULL, 0, size, PROT_READ | PROT_WRITE,
|
||||
MAP_SHARED | MAP_ANONYMOUS, 0);
|
||||
if (IS_ERR((void *)hva))
|
||||
return PTR_ERR((void *)hva);
|
||||
} else {
|
||||
if (!slot->npages)
|
||||
return 0;
|
||||
|
||||
hva = 0;
|
||||
}
|
||||
|
||||
old = *slot;
|
||||
for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
|
||||
struct kvm_userspace_memory_region m = *mem;
|
||||
struct kvm_userspace_memory_region m;
|
||||
|
||||
m.slot |= i << 16;
|
||||
m.slot = id | (i << 16);
|
||||
m.flags = 0;
|
||||
m.guest_phys_addr = gpa;
|
||||
m.userspace_addr = hva;
|
||||
m.memory_size = size;
|
||||
r = __kvm_set_memory_region(kvm, &m);
|
||||
if (r < 0)
|
||||
return r;
|
||||
}
|
||||
|
||||
if (!size) {
|
||||
r = vm_munmap(old.userspace_addr, old.npages * PAGE_SIZE);
|
||||
WARN_ON(r < 0);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__x86_set_memory_region);
|
||||
|
||||
int x86_set_memory_region(struct kvm *kvm,
|
||||
const struct kvm_userspace_memory_region *mem)
|
||||
int x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size)
|
||||
{
|
||||
int r;
|
||||
|
||||
mutex_lock(&kvm->slots_lock);
|
||||
r = __x86_set_memory_region(kvm, mem);
|
||||
r = __x86_set_memory_region(kvm, id, gpa, size);
|
||||
mutex_unlock(&kvm->slots_lock);
|
||||
|
||||
return r;
|
||||
|
@ -7516,16 +7553,9 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
|
|||
* unless the the memory map has changed due to process exit
|
||||
* or fd copying.
|
||||
*/
|
||||
struct kvm_userspace_memory_region mem;
|
||||
memset(&mem, 0, sizeof(mem));
|
||||
mem.slot = APIC_ACCESS_PAGE_PRIVATE_MEMSLOT;
|
||||
x86_set_memory_region(kvm, &mem);
|
||||
|
||||
mem.slot = IDENTITY_PAGETABLE_PRIVATE_MEMSLOT;
|
||||
x86_set_memory_region(kvm, &mem);
|
||||
|
||||
mem.slot = TSS_PRIVATE_MEMSLOT;
|
||||
x86_set_memory_region(kvm, &mem);
|
||||
x86_set_memory_region(kvm, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT, 0, 0);
|
||||
x86_set_memory_region(kvm, IDENTITY_PAGETABLE_PRIVATE_MEMSLOT, 0, 0);
|
||||
x86_set_memory_region(kvm, TSS_PRIVATE_MEMSLOT, 0, 0);
|
||||
}
|
||||
kvm_iommu_unmap_guest(kvm);
|
||||
kfree(kvm->arch.vpic);
|
||||
|
@ -7628,27 +7658,6 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
|
|||
const struct kvm_userspace_memory_region *mem,
|
||||
enum kvm_mr_change change)
|
||||
{
|
||||
/*
|
||||
* Only private memory slots need to be mapped here since
|
||||
* KVM_SET_MEMORY_REGION ioctl is no longer supported.
|
||||
*/
|
||||
if ((memslot->id >= KVM_USER_MEM_SLOTS) && (change == KVM_MR_CREATE)) {
|
||||
unsigned long userspace_addr;
|
||||
|
||||
/*
|
||||
* MAP_SHARED to prevent internal slot pages from being moved
|
||||
* by fork()/COW.
|
||||
*/
|
||||
userspace_addr = vm_mmap(NULL, 0, memslot->npages * PAGE_SIZE,
|
||||
PROT_READ | PROT_WRITE,
|
||||
MAP_SHARED | MAP_ANONYMOUS, 0);
|
||||
|
||||
if (IS_ERR((void *)userspace_addr))
|
||||
return PTR_ERR((void *)userspace_addr);
|
||||
|
||||
memslot->userspace_addr = userspace_addr;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -7710,17 +7719,6 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
|
|||
{
|
||||
int nr_mmu_pages = 0;
|
||||
|
||||
if (change == KVM_MR_DELETE && old->id >= KVM_USER_MEM_SLOTS) {
|
||||
int ret;
|
||||
|
||||
ret = vm_munmap(old->userspace_addr,
|
||||
old->npages * PAGE_SIZE);
|
||||
if (ret < 0)
|
||||
printk(KERN_WARNING
|
||||
"kvm_vm_ioctl_set_memory_region: "
|
||||
"failed to munmap memory\n");
|
||||
}
|
||||
|
||||
if (!kvm->arch.n_requested_mmu_pages)
|
||||
nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm);
|
||||
|
||||
|
@ -7769,19 +7767,36 @@ void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
|
|||
kvm_mmu_invalidate_zap_all_pages(kvm);
|
||||
}
|
||||
|
||||
static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (!list_empty_careful(&vcpu->async_pf.done))
|
||||
return true;
|
||||
|
||||
if (kvm_apic_has_events(vcpu))
|
||||
return true;
|
||||
|
||||
if (vcpu->arch.pv.pv_unhalted)
|
||||
return true;
|
||||
|
||||
if (atomic_read(&vcpu->arch.nmi_queued))
|
||||
return true;
|
||||
|
||||
if (test_bit(KVM_REQ_SMI, &vcpu->requests))
|
||||
return true;
|
||||
|
||||
if (kvm_arch_interrupt_allowed(vcpu) &&
|
||||
kvm_cpu_has_interrupt(vcpu))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events)
|
||||
kvm_x86_ops->check_nested_events(vcpu, false);
|
||||
|
||||
return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
|
||||
!vcpu->arch.apf.halted)
|
||||
|| !list_empty_careful(&vcpu->async_pf.done)
|
||||
|| kvm_apic_has_events(vcpu)
|
||||
|| vcpu->arch.pv.pv_unhalted
|
||||
|| atomic_read(&vcpu->arch.nmi_queued) ||
|
||||
(kvm_arch_interrupt_allowed(vcpu) &&
|
||||
kvm_cpu_has_interrupt(vcpu));
|
||||
return kvm_vcpu_running(vcpu) || kvm_vcpu_has_events(vcpu);
|
||||
}
|
||||
|
||||
int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
|
||||
|
|
|
@ -12,7 +12,10 @@
|
|||
#include <skas.h>
|
||||
#include <sysdep/tls.h>
|
||||
|
||||
extern int modify_ldt(int func, void *ptr, unsigned long bytecount);
|
||||
static inline int modify_ldt (int func, void *ptr, unsigned long bytecount)
|
||||
{
|
||||
return syscall(__NR_modify_ldt, func, ptr, bytecount);
|
||||
}
|
||||
|
||||
static long write_ldt_entry(struct mm_id *mm_idp, int func,
|
||||
struct user_desc *desc, void **addr, int done)
|
||||
|
|
|
@ -576,7 +576,7 @@ void blk_cleanup_queue(struct request_queue *q)
|
|||
q->queue_lock = &q->__queue_lock;
|
||||
spin_unlock_irq(lock);
|
||||
|
||||
bdi_destroy(&q->backing_dev_info);
|
||||
bdi_unregister(&q->backing_dev_info);
|
||||
|
||||
/* @q is and will stay empty, shutdown and put */
|
||||
blk_put_queue(q);
|
||||
|
|
|
@ -641,6 +641,7 @@ void blk_mq_free_tags(struct blk_mq_tags *tags)
|
|||
{
|
||||
bt_free(&tags->bitmap_tags);
|
||||
bt_free(&tags->breserved_tags);
|
||||
free_cpumask_var(tags->cpumask);
|
||||
kfree(tags);
|
||||
}
|
||||
|
||||
|
|
|
@ -2296,10 +2296,8 @@ void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
|
|||
int i;
|
||||
|
||||
for (i = 0; i < set->nr_hw_queues; i++) {
|
||||
if (set->tags[i]) {
|
||||
if (set->tags[i])
|
||||
blk_mq_free_rq_map(set, set->tags[i], i);
|
||||
free_cpumask_var(set->tags[i]->cpumask);
|
||||
}
|
||||
}
|
||||
|
||||
kfree(set->tags);
|
||||
|
|
|
@ -540,6 +540,7 @@ static void blk_release_queue(struct kobject *kobj)
|
|||
struct request_queue *q =
|
||||
container_of(kobj, struct request_queue, kobj);
|
||||
|
||||
bdi_exit(&q->backing_dev_info);
|
||||
blkcg_exit_queue(q);
|
||||
|
||||
if (q->elevator) {
|
||||
|
|
|
@ -544,7 +544,8 @@ static int ahash_prepare_alg(struct ahash_alg *alg)
|
|||
struct crypto_alg *base = &alg->halg.base;
|
||||
|
||||
if (alg->halg.digestsize > PAGE_SIZE / 8 ||
|
||||
alg->halg.statesize > PAGE_SIZE / 8)
|
||||
alg->halg.statesize > PAGE_SIZE / 8 ||
|
||||
alg->halg.statesize == 0)
|
||||
return -EINVAL;
|
||||
|
||||
base->cra_type = &crypto_ahash_type;
|
||||
|
|
|
@ -61,6 +61,7 @@ ACPI_GLOBAL(struct acpi_table_header, acpi_gbl_original_dsdt_header);
|
|||
ACPI_INIT_GLOBAL(u32, acpi_gbl_dsdt_index, ACPI_INVALID_TABLE_INDEX);
|
||||
ACPI_INIT_GLOBAL(u32, acpi_gbl_facs_index, ACPI_INVALID_TABLE_INDEX);
|
||||
ACPI_INIT_GLOBAL(u32, acpi_gbl_xfacs_index, ACPI_INVALID_TABLE_INDEX);
|
||||
ACPI_INIT_GLOBAL(u32, acpi_gbl_fadt_index, ACPI_INVALID_TABLE_INDEX);
|
||||
|
||||
#if (!ACPI_REDUCED_HARDWARE)
|
||||
ACPI_GLOBAL(struct acpi_table_facs *, acpi_gbl_FACS);
|
||||
|
|
|
@ -85,7 +85,7 @@ void acpi_tb_set_table_loaded_flag(u32 table_index, u8 is_loaded);
|
|||
/*
|
||||
* tbfadt - FADT parse/convert/validate
|
||||
*/
|
||||
void acpi_tb_parse_fadt(u32 table_index);
|
||||
void acpi_tb_parse_fadt(void);
|
||||
|
||||
void acpi_tb_create_local_fadt(struct acpi_table_header *table, u32 length);
|
||||
|
||||
|
@ -138,8 +138,6 @@ acpi_status acpi_tb_get_owner_id(u32 table_index, acpi_owner_id *owner_id);
|
|||
*/
|
||||
acpi_status acpi_tb_initialize_facs(void);
|
||||
|
||||
u8 acpi_tb_tables_loaded(void);
|
||||
|
||||
void
|
||||
acpi_tb_print_table_header(acpi_physical_address address,
|
||||
struct acpi_table_header *header);
|
||||
|
|
|
@ -71,7 +71,7 @@ acpi_status acpi_enable(void)
|
|||
|
||||
/* ACPI tables must be present */
|
||||
|
||||
if (!acpi_tb_tables_loaded()) {
|
||||
if (acpi_gbl_fadt_index == ACPI_INVALID_TABLE_INDEX) {
|
||||
return_ACPI_STATUS(AE_NO_ACPI_TABLES);
|
||||
}
|
||||
|
||||
|
|
|
@ -298,7 +298,7 @@ acpi_tb_select_address(char *register_name, u32 address32, u64 address64)
|
|||
*
|
||||
* FUNCTION: acpi_tb_parse_fadt
|
||||
*
|
||||
* PARAMETERS: table_index - Index for the FADT
|
||||
* PARAMETERS: None
|
||||
*
|
||||
* RETURN: None
|
||||
*
|
||||
|
@ -307,7 +307,7 @@ acpi_tb_select_address(char *register_name, u32 address32, u64 address64)
|
|||
*
|
||||
******************************************************************************/
|
||||
|
||||
void acpi_tb_parse_fadt(u32 table_index)
|
||||
void acpi_tb_parse_fadt(void)
|
||||
{
|
||||
u32 length;
|
||||
struct acpi_table_header *table;
|
||||
|
@ -319,11 +319,11 @@ void acpi_tb_parse_fadt(u32 table_index)
|
|||
* Get a local copy of the FADT and convert it to a common format
|
||||
* Map entire FADT, assumed to be smaller than one page.
|
||||
*/
|
||||
length = acpi_gbl_root_table_list.tables[table_index].length;
|
||||
length = acpi_gbl_root_table_list.tables[acpi_gbl_fadt_index].length;
|
||||
|
||||
table =
|
||||
acpi_os_map_memory(acpi_gbl_root_table_list.tables[table_index].
|
||||
address, length);
|
||||
acpi_os_map_memory(acpi_gbl_root_table_list.
|
||||
tables[acpi_gbl_fadt_index].address, length);
|
||||
if (!table) {
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -97,29 +97,6 @@ acpi_status acpi_tb_initialize_facs(void)
|
|||
}
|
||||
#endif /* !ACPI_REDUCED_HARDWARE */
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* FUNCTION: acpi_tb_tables_loaded
|
||||
*
|
||||
* PARAMETERS: None
|
||||
*
|
||||
* RETURN: TRUE if required ACPI tables are loaded
|
||||
*
|
||||
* DESCRIPTION: Determine if the minimum required ACPI tables are present
|
||||
* (FADT, FACS, DSDT)
|
||||
*
|
||||
******************************************************************************/
|
||||
|
||||
u8 acpi_tb_tables_loaded(void)
|
||||
{
|
||||
|
||||
if (acpi_gbl_root_table_list.current_table_count >= 4) {
|
||||
return (TRUE);
|
||||
}
|
||||
|
||||
return (FALSE);
|
||||
}
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* FUNCTION: acpi_tb_check_dsdt_header
|
||||
|
@ -392,7 +369,8 @@ acpi_status __init acpi_tb_parse_root_table(acpi_physical_address rsdp_address)
|
|||
ACPI_COMPARE_NAME(&acpi_gbl_root_table_list.
|
||||
tables[table_index].signature,
|
||||
ACPI_SIG_FADT)) {
|
||||
acpi_tb_parse_fadt(table_index);
|
||||
acpi_gbl_fadt_index = table_index;
|
||||
acpi_tb_parse_fadt();
|
||||
}
|
||||
|
||||
next_table:
|
||||
|
|
|
@ -187,7 +187,7 @@ int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
|
|||
* global one. Requires architecture specific dev_get_cma_area() helper
|
||||
* function.
|
||||
*/
|
||||
struct page *dma_alloc_from_contiguous(struct device *dev, int count,
|
||||
struct page *dma_alloc_from_contiguous(struct device *dev, size_t count,
|
||||
unsigned int align)
|
||||
{
|
||||
if (align > CONFIG_CMA_ALIGNMENT)
|
||||
|
|
|
@ -77,13 +77,16 @@ static bool default_stop_ok(struct device *dev)
|
|||
dev_update_qos_constraint);
|
||||
|
||||
if (constraint_ns > 0) {
|
||||
constraint_ns -= td->start_latency_ns;
|
||||
constraint_ns -= td->save_state_latency_ns +
|
||||
td->stop_latency_ns +
|
||||
td->start_latency_ns +
|
||||
td->restore_state_latency_ns;
|
||||
if (constraint_ns == 0)
|
||||
return false;
|
||||
}
|
||||
td->effective_constraint_ns = constraint_ns;
|
||||
td->cached_stop_ok = constraint_ns > td->stop_latency_ns ||
|
||||
constraint_ns == 0;
|
||||
td->cached_stop_ok = constraint_ns >= 0;
|
||||
|
||||
/*
|
||||
* The children have been suspended already, so we don't need to take
|
||||
* their stop latencies into account here.
|
||||
|
@ -126,18 +129,6 @@ static bool default_power_down_ok(struct dev_pm_domain *pd)
|
|||
|
||||
off_on_time_ns = genpd->power_off_latency_ns +
|
||||
genpd->power_on_latency_ns;
|
||||
/*
|
||||
* It doesn't make sense to remove power from the domain if saving
|
||||
* the state of all devices in it and the power off/power on operations
|
||||
* take too much time.
|
||||
*
|
||||
* All devices in this domain have been stopped already at this point.
|
||||
*/
|
||||
list_for_each_entry(pdd, &genpd->dev_list, list_node) {
|
||||
if (pdd->dev->driver)
|
||||
off_on_time_ns +=
|
||||
to_gpd_data(pdd)->td.save_state_latency_ns;
|
||||
}
|
||||
|
||||
min_off_time_ns = -1;
|
||||
/*
|
||||
|
@ -193,7 +184,6 @@ static bool default_power_down_ok(struct dev_pm_domain *pd)
|
|||
* constraint_ns cannot be negative here, because the device has
|
||||
* been suspended.
|
||||
*/
|
||||
constraint_ns -= td->restore_state_latency_ns;
|
||||
if (constraint_ns <= off_on_time_ns)
|
||||
return false;
|
||||
|
||||
|
|
|
@ -60,6 +60,7 @@ struct nbd_device {
|
|||
bool disconnect; /* a disconnect has been requested by user */
|
||||
|
||||
struct timer_list timeout_timer;
|
||||
spinlock_t tasks_lock;
|
||||
struct task_struct *task_recv;
|
||||
struct task_struct *task_send;
|
||||
|
||||
|
@ -140,21 +141,23 @@ static void sock_shutdown(struct nbd_device *nbd)
|
|||
static void nbd_xmit_timeout(unsigned long arg)
|
||||
{
|
||||
struct nbd_device *nbd = (struct nbd_device *)arg;
|
||||
struct task_struct *task;
|
||||
unsigned long flags;
|
||||
|
||||
if (list_empty(&nbd->queue_head))
|
||||
return;
|
||||
|
||||
nbd->disconnect = true;
|
||||
|
||||
task = READ_ONCE(nbd->task_recv);
|
||||
if (task)
|
||||
force_sig(SIGKILL, task);
|
||||
spin_lock_irqsave(&nbd->tasks_lock, flags);
|
||||
|
||||
task = READ_ONCE(nbd->task_send);
|
||||
if (task)
|
||||
if (nbd->task_recv)
|
||||
force_sig(SIGKILL, nbd->task_recv);
|
||||
|
||||
if (nbd->task_send)
|
||||
force_sig(SIGKILL, nbd->task_send);
|
||||
|
||||
spin_unlock_irqrestore(&nbd->tasks_lock, flags);
|
||||
|
||||
dev_err(nbd_to_dev(nbd), "Connection timed out, killed receiver and sender, shutting down connection\n");
|
||||
}
|
||||
|
||||
|
@ -403,17 +406,24 @@ static int nbd_thread_recv(struct nbd_device *nbd)
|
|||
{
|
||||
struct request *req;
|
||||
int ret;
|
||||
unsigned long flags;
|
||||
|
||||
BUG_ON(nbd->magic != NBD_MAGIC);
|
||||
|
||||
sk_set_memalloc(nbd->sock->sk);
|
||||
|
||||
spin_lock_irqsave(&nbd->tasks_lock, flags);
|
||||
nbd->task_recv = current;
|
||||
spin_unlock_irqrestore(&nbd->tasks_lock, flags);
|
||||
|
||||
ret = device_create_file(disk_to_dev(nbd->disk), &pid_attr);
|
||||
if (ret) {
|
||||
dev_err(disk_to_dev(nbd->disk), "device_create_file failed!\n");
|
||||
|
||||
spin_lock_irqsave(&nbd->tasks_lock, flags);
|
||||
nbd->task_recv = NULL;
|
||||
spin_unlock_irqrestore(&nbd->tasks_lock, flags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -429,7 +439,9 @@ static int nbd_thread_recv(struct nbd_device *nbd)
|
|||
|
||||
device_remove_file(disk_to_dev(nbd->disk), &pid_attr);
|
||||
|
||||
spin_lock_irqsave(&nbd->tasks_lock, flags);
|
||||
nbd->task_recv = NULL;
|
||||
spin_unlock_irqrestore(&nbd->tasks_lock, flags);
|
||||
|
||||
if (signal_pending(current)) {
|
||||
siginfo_t info;
|
||||
|
@ -534,8 +546,11 @@ static int nbd_thread_send(void *data)
|
|||
{
|
||||
struct nbd_device *nbd = data;
|
||||
struct request *req;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&nbd->tasks_lock, flags);
|
||||
nbd->task_send = current;
|
||||
spin_unlock_irqrestore(&nbd->tasks_lock, flags);
|
||||
|
||||
set_user_nice(current, MIN_NICE);
|
||||
while (!kthread_should_stop() || !list_empty(&nbd->waiting_queue)) {
|
||||
|
@ -572,7 +587,15 @@ static int nbd_thread_send(void *data)
|
|||
nbd_handle_req(nbd, req);
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&nbd->tasks_lock, flags);
|
||||
nbd->task_send = NULL;
|
||||
spin_unlock_irqrestore(&nbd->tasks_lock, flags);
|
||||
|
||||
/* Clear maybe pending signals */
|
||||
if (signal_pending(current)) {
|
||||
siginfo_t info;
|
||||
dequeue_signal_lock(current, ¤t->blocked, &info);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1052,6 +1075,7 @@ static int __init nbd_init(void)
|
|||
nbd_dev[i].magic = NBD_MAGIC;
|
||||
INIT_LIST_HEAD(&nbd_dev[i].waiting_queue);
|
||||
spin_lock_init(&nbd_dev[i].queue_lock);
|
||||
spin_lock_init(&nbd_dev[i].tasks_lock);
|
||||
INIT_LIST_HEAD(&nbd_dev[i].queue_head);
|
||||
mutex_init(&nbd_dev[i].tx_lock);
|
||||
init_timer(&nbd_dev[i].timeout_timer);
|
||||
|
|
|
@ -603,27 +603,31 @@ static void req_completion(struct nvme_queue *nvmeq, void *ctx,
|
|||
struct nvme_iod *iod = ctx;
|
||||
struct request *req = iod_get_private(iod);
|
||||
struct nvme_cmd_info *cmd_rq = blk_mq_rq_to_pdu(req);
|
||||
|
||||
u16 status = le16_to_cpup(&cqe->status) >> 1;
|
||||
bool requeue = false;
|
||||
int error = 0;
|
||||
|
||||
if (unlikely(status)) {
|
||||
if (!(status & NVME_SC_DNR || blk_noretry_request(req))
|
||||
&& (jiffies - req->start_time) < req->timeout) {
|
||||
unsigned long flags;
|
||||
|
||||
requeue = true;
|
||||
blk_mq_requeue_request(req);
|
||||
spin_lock_irqsave(req->q->queue_lock, flags);
|
||||
if (!blk_queue_stopped(req->q))
|
||||
blk_mq_kick_requeue_list(req->q);
|
||||
spin_unlock_irqrestore(req->q->queue_lock, flags);
|
||||
return;
|
||||
goto release_iod;
|
||||
}
|
||||
|
||||
if (req->cmd_type == REQ_TYPE_DRV_PRIV) {
|
||||
if (cmd_rq->ctx == CMD_CTX_CANCELLED)
|
||||
status = -EINTR;
|
||||
error = -EINTR;
|
||||
else
|
||||
error = status;
|
||||
} else {
|
||||
status = nvme_error_status(status);
|
||||
error = nvme_error_status(status);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -635,8 +639,9 @@ static void req_completion(struct nvme_queue *nvmeq, void *ctx,
|
|||
if (cmd_rq->aborted)
|
||||
dev_warn(nvmeq->dev->dev,
|
||||
"completing aborted command with status:%04x\n",
|
||||
status);
|
||||
error);
|
||||
|
||||
release_iod:
|
||||
if (iod->nents) {
|
||||
dma_unmap_sg(nvmeq->dev->dev, iod->sg, iod->nents,
|
||||
rq_data_dir(req) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
|
||||
|
@ -649,7 +654,8 @@ static void req_completion(struct nvme_queue *nvmeq, void *ctx,
|
|||
}
|
||||
nvme_free_iod(nvmeq->dev, iod);
|
||||
|
||||
blk_mq_complete_request(req, status);
|
||||
if (likely(!requeue))
|
||||
blk_mq_complete_request(req, error);
|
||||
}
|
||||
|
||||
/* length is in bytes. gfp flags indicates whether we may sleep. */
|
||||
|
@ -1804,7 +1810,7 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
|
|||
|
||||
length = (io.nblocks + 1) << ns->lba_shift;
|
||||
meta_len = (io.nblocks + 1) * ns->ms;
|
||||
metadata = (void __user *)(unsigned long)io.metadata;
|
||||
metadata = (void __user *)(uintptr_t)io.metadata;
|
||||
write = io.opcode & 1;
|
||||
|
||||
if (ns->ext) {
|
||||
|
@ -1844,7 +1850,7 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
|
|||
c.rw.metadata = cpu_to_le64(meta_dma);
|
||||
|
||||
status = __nvme_submit_sync_cmd(ns->queue, &c, NULL,
|
||||
(void __user *)io.addr, length, NULL, 0);
|
||||
(void __user *)(uintptr_t)io.addr, length, NULL, 0);
|
||||
unmap:
|
||||
if (meta) {
|
||||
if (status == NVME_SC_SUCCESS && !write) {
|
||||
|
@ -1886,7 +1892,7 @@ static int nvme_user_cmd(struct nvme_dev *dev, struct nvme_ns *ns,
|
|||
timeout = msecs_to_jiffies(cmd.timeout_ms);
|
||||
|
||||
status = __nvme_submit_sync_cmd(ns ? ns->queue : dev->admin_q, &c,
|
||||
NULL, (void __user *)cmd.addr, cmd.data_len,
|
||||
NULL, (void __user *)(uintptr_t)cmd.addr, cmd.data_len,
|
||||
&cmd.result, timeout);
|
||||
if (status >= 0) {
|
||||
if (put_user(cmd.result, &ucmd->result))
|
||||
|
|
|
@ -96,6 +96,8 @@ static int atomic_dec_return_safe(atomic_t *v)
|
|||
#define RBD_MINORS_PER_MAJOR 256
|
||||
#define RBD_SINGLE_MAJOR_PART_SHIFT 4
|
||||
|
||||
#define RBD_MAX_PARENT_CHAIN_LEN 16
|
||||
|
||||
#define RBD_SNAP_DEV_NAME_PREFIX "snap_"
|
||||
#define RBD_MAX_SNAP_NAME_LEN \
|
||||
(NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1))
|
||||
|
@ -426,7 +428,7 @@ static ssize_t rbd_add_single_major(struct bus_type *bus, const char *buf,
|
|||
size_t count);
|
||||
static ssize_t rbd_remove_single_major(struct bus_type *bus, const char *buf,
|
||||
size_t count);
|
||||
static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping);
|
||||
static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth);
|
||||
static void rbd_spec_put(struct rbd_spec *spec);
|
||||
|
||||
static int rbd_dev_id_to_minor(int dev_id)
|
||||
|
@ -1863,9 +1865,11 @@ static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
|
|||
rbd_osd_read_callback(obj_request);
|
||||
break;
|
||||
case CEPH_OSD_OP_SETALLOCHINT:
|
||||
rbd_assert(osd_req->r_ops[1].op == CEPH_OSD_OP_WRITE);
|
||||
rbd_assert(osd_req->r_ops[1].op == CEPH_OSD_OP_WRITE ||
|
||||
osd_req->r_ops[1].op == CEPH_OSD_OP_WRITEFULL);
|
||||
/* fall through */
|
||||
case CEPH_OSD_OP_WRITE:
|
||||
case CEPH_OSD_OP_WRITEFULL:
|
||||
rbd_osd_write_callback(obj_request);
|
||||
break;
|
||||
case CEPH_OSD_OP_STAT:
|
||||
|
@ -2401,7 +2405,10 @@ static void rbd_img_obj_request_fill(struct rbd_obj_request *obj_request,
|
|||
opcode = CEPH_OSD_OP_ZERO;
|
||||
}
|
||||
} else if (op_type == OBJ_OP_WRITE) {
|
||||
opcode = CEPH_OSD_OP_WRITE;
|
||||
if (!offset && length == object_size)
|
||||
opcode = CEPH_OSD_OP_WRITEFULL;
|
||||
else
|
||||
opcode = CEPH_OSD_OP_WRITE;
|
||||
osd_req_op_alloc_hint_init(osd_request, num_ops,
|
||||
object_size, object_size);
|
||||
num_ops++;
|
||||
|
@ -3760,6 +3767,7 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
|
|||
/* set io sizes to object size */
|
||||
segment_size = rbd_obj_bytes(&rbd_dev->header);
|
||||
blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE);
|
||||
q->limits.max_sectors = queue_max_hw_sectors(q);
|
||||
blk_queue_max_segments(q, segment_size / SECTOR_SIZE);
|
||||
blk_queue_max_segment_size(q, segment_size);
|
||||
blk_queue_io_min(q, segment_size);
|
||||
|
@ -5125,44 +5133,51 @@ out_err:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int rbd_dev_probe_parent(struct rbd_device *rbd_dev)
|
||||
/*
|
||||
* @depth is rbd_dev_image_probe() -> rbd_dev_probe_parent() ->
|
||||
* rbd_dev_image_probe() recursion depth, which means it's also the
|
||||
* length of the already discovered part of the parent chain.
|
||||
*/
|
||||
static int rbd_dev_probe_parent(struct rbd_device *rbd_dev, int depth)
|
||||
{
|
||||
struct rbd_device *parent = NULL;
|
||||
struct rbd_spec *parent_spec;
|
||||
struct rbd_client *rbdc;
|
||||
int ret;
|
||||
|
||||
if (!rbd_dev->parent_spec)
|
||||
return 0;
|
||||
/*
|
||||
* We need to pass a reference to the client and the parent
|
||||
* spec when creating the parent rbd_dev. Images related by
|
||||
* parent/child relationships always share both.
|
||||
*/
|
||||
parent_spec = rbd_spec_get(rbd_dev->parent_spec);
|
||||
rbdc = __rbd_get_client(rbd_dev->rbd_client);
|
||||
|
||||
ret = -ENOMEM;
|
||||
parent = rbd_dev_create(rbdc, parent_spec, NULL);
|
||||
if (!parent)
|
||||
if (++depth > RBD_MAX_PARENT_CHAIN_LEN) {
|
||||
pr_info("parent chain is too long (%d)\n", depth);
|
||||
ret = -EINVAL;
|
||||
goto out_err;
|
||||
|
||||
ret = rbd_dev_image_probe(parent, false);
|
||||
if (ret < 0)
|
||||
goto out_err;
|
||||
rbd_dev->parent = parent;
|
||||
atomic_set(&rbd_dev->parent_ref, 1);
|
||||
|
||||
return 0;
|
||||
out_err:
|
||||
if (parent) {
|
||||
rbd_dev_unparent(rbd_dev);
|
||||
rbd_dev_destroy(parent);
|
||||
} else {
|
||||
rbd_put_client(rbdc);
|
||||
rbd_spec_put(parent_spec);
|
||||
}
|
||||
|
||||
parent = rbd_dev_create(rbd_dev->rbd_client, rbd_dev->parent_spec,
|
||||
NULL);
|
||||
if (!parent) {
|
||||
ret = -ENOMEM;
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
/*
|
||||
* Images related by parent/child relationships always share
|
||||
* rbd_client and spec/parent_spec, so bump their refcounts.
|
||||
*/
|
||||
__rbd_get_client(rbd_dev->rbd_client);
|
||||
rbd_spec_get(rbd_dev->parent_spec);
|
||||
|
||||
ret = rbd_dev_image_probe(parent, depth);
|
||||
if (ret < 0)
|
||||
goto out_err;
|
||||
|
||||
rbd_dev->parent = parent;
|
||||
atomic_set(&rbd_dev->parent_ref, 1);
|
||||
return 0;
|
||||
|
||||
out_err:
|
||||
rbd_dev_unparent(rbd_dev);
|
||||
if (parent)
|
||||
rbd_dev_destroy(parent);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -5280,7 +5295,7 @@ static void rbd_dev_image_release(struct rbd_device *rbd_dev)
|
|||
* parent), initiate a watch on its header object before using that
|
||||
* object to get detailed information about the rbd image.
|
||||
*/
|
||||
static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping)
|
||||
static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth)
|
||||
{
|
||||
int ret;
|
||||
|
||||
|
@ -5298,7 +5313,7 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping)
|
|||
if (ret)
|
||||
goto err_out_format;
|
||||
|
||||
if (mapping) {
|
||||
if (!depth) {
|
||||
ret = rbd_dev_header_watch_sync(rbd_dev);
|
||||
if (ret) {
|
||||
if (ret == -ENOENT)
|
||||
|
@ -5319,7 +5334,7 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping)
|
|||
* Otherwise this is a parent image, identified by pool, image
|
||||
* and snap ids - need to fill in names for those ids.
|
||||
*/
|
||||
if (mapping)
|
||||
if (!depth)
|
||||
ret = rbd_spec_fill_snap_id(rbd_dev);
|
||||
else
|
||||
ret = rbd_spec_fill_names(rbd_dev);
|
||||
|
@ -5341,12 +5356,12 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping)
|
|||
* Need to warn users if this image is the one being
|
||||
* mapped and has a parent.
|
||||
*/
|
||||
if (mapping && rbd_dev->parent_spec)
|
||||
if (!depth && rbd_dev->parent_spec)
|
||||
rbd_warn(rbd_dev,
|
||||
"WARNING: kernel layering is EXPERIMENTAL!");
|
||||
}
|
||||
|
||||
ret = rbd_dev_probe_parent(rbd_dev);
|
||||
ret = rbd_dev_probe_parent(rbd_dev, depth);
|
||||
if (ret)
|
||||
goto err_out_probe;
|
||||
|
||||
|
@ -5357,7 +5372,7 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping)
|
|||
err_out_probe:
|
||||
rbd_dev_unprobe(rbd_dev);
|
||||
err_out_watch:
|
||||
if (mapping)
|
||||
if (!depth)
|
||||
rbd_dev_header_unwatch_sync(rbd_dev);
|
||||
out_header_name:
|
||||
kfree(rbd_dev->header_name);
|
||||
|
@ -5420,7 +5435,7 @@ static ssize_t do_rbd_add(struct bus_type *bus,
|
|||
spec = NULL; /* rbd_dev now owns this */
|
||||
rbd_opts = NULL; /* rbd_dev now owns this */
|
||||
|
||||
rc = rbd_dev_image_probe(rbd_dev, true);
|
||||
rc = rbd_dev_image_probe(rbd_dev, 0);
|
||||
if (rc < 0)
|
||||
goto err_out_rbd_dev;
|
||||
|
||||
|
|
|
@ -1956,7 +1956,8 @@ static void blkback_changed(struct xenbus_device *dev,
|
|||
break;
|
||||
/* Missed the backend's Closing state -- fallthrough */
|
||||
case XenbusStateClosing:
|
||||
blkfront_closing(info);
|
||||
if (info)
|
||||
blkfront_closing(info);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1184,11 +1184,12 @@ static int arm_ccn_pmu_cpu_notifier(struct notifier_block *nb,
|
|||
if (!cpumask_test_and_clear_cpu(cpu, &dt->cpu))
|
||||
break;
|
||||
target = cpumask_any_but(cpu_online_mask, cpu);
|
||||
if (target < 0)
|
||||
if (target >= nr_cpu_ids)
|
||||
break;
|
||||
perf_pmu_migrate_context(&dt->pmu, cpu, target);
|
||||
cpumask_set_cpu(target, &dt->cpu);
|
||||
WARN_ON(irq_set_affinity(ccn->irq, &dt->cpu) != 0);
|
||||
if (ccn->irq)
|
||||
WARN_ON(irq_set_affinity(ccn->irq, &dt->cpu) != 0);
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -197,6 +197,7 @@ static void __init of_cpu_clk_setup(struct device_node *node)
|
|||
for_each_node_by_type(dn, "cpu") {
|
||||
struct clk_init_data init;
|
||||
struct clk *clk;
|
||||
struct clk *parent_clk;
|
||||
char *clk_name = kzalloc(5, GFP_KERNEL);
|
||||
int cpu, err;
|
||||
|
||||
|
@ -208,8 +209,9 @@ static void __init of_cpu_clk_setup(struct device_node *node)
|
|||
goto bail_out;
|
||||
|
||||
sprintf(clk_name, "cpu%d", cpu);
|
||||
parent_clk = of_clk_get(node, 0);
|
||||
|
||||
cpuclk[cpu].parent_name = of_clk_get_parent_name(node, 0);
|
||||
cpuclk[cpu].parent_name = __clk_get_name(parent_clk);
|
||||
cpuclk[cpu].clk_name = clk_name;
|
||||
cpuclk[cpu].cpu = cpu;
|
||||
cpuclk[cpu].reg_base = clock_complex_base;
|
||||
|
|
|
@ -776,6 +776,11 @@ static inline void intel_pstate_sample(struct cpudata *cpu)
|
|||
local_irq_save(flags);
|
||||
rdmsrl(MSR_IA32_APERF, aperf);
|
||||
rdmsrl(MSR_IA32_MPERF, mperf);
|
||||
if (cpu->prev_mperf == mperf) {
|
||||
local_irq_restore(flags);
|
||||
return;
|
||||
}
|
||||
|
||||
tsc = rdtsc();
|
||||
local_irq_restore(flags);
|
||||
|
||||
|
|
|
@ -672,8 +672,12 @@ int amdgpu_atombios_get_clock_info(struct amdgpu_device *adev)
|
|||
/* disp clock */
|
||||
adev->clock.default_dispclk =
|
||||
le32_to_cpu(firmware_info->info_21.ulDefaultDispEngineClkFreq);
|
||||
if (adev->clock.default_dispclk == 0)
|
||||
adev->clock.default_dispclk = 54000; /* 540 Mhz */
|
||||
/* set a reasonable default for DP */
|
||||
if (adev->clock.default_dispclk < 53900) {
|
||||
DRM_INFO("Changing default dispclk from %dMhz to 600Mhz\n",
|
||||
adev->clock.default_dispclk / 100);
|
||||
adev->clock.default_dispclk = 60000;
|
||||
}
|
||||
adev->clock.dp_extclk =
|
||||
le16_to_cpu(firmware_info->info_21.usUniphyDPModeExtClkFreq);
|
||||
adev->clock.current_dispclk = adev->clock.default_dispclk;
|
||||
|
|
|
@ -177,7 +177,7 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
|
|||
|
||||
/* get chunks */
|
||||
INIT_LIST_HEAD(&p->validated);
|
||||
chunk_array_user = (uint64_t __user *)(cs->in.chunks);
|
||||
chunk_array_user = (uint64_t __user *)(unsigned long)(cs->in.chunks);
|
||||
if (copy_from_user(chunk_array, chunk_array_user,
|
||||
sizeof(uint64_t)*cs->in.num_chunks)) {
|
||||
ret = -EFAULT;
|
||||
|
@ -197,7 +197,7 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
|
|||
struct drm_amdgpu_cs_chunk user_chunk;
|
||||
uint32_t __user *cdata;
|
||||
|
||||
chunk_ptr = (void __user *)chunk_array[i];
|
||||
chunk_ptr = (void __user *)(unsigned long)chunk_array[i];
|
||||
if (copy_from_user(&user_chunk, chunk_ptr,
|
||||
sizeof(struct drm_amdgpu_cs_chunk))) {
|
||||
ret = -EFAULT;
|
||||
|
@ -208,7 +208,7 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
|
|||
p->chunks[i].length_dw = user_chunk.length_dw;
|
||||
|
||||
size = p->chunks[i].length_dw;
|
||||
cdata = (void __user *)user_chunk.chunk_data;
|
||||
cdata = (void __user *)(unsigned long)user_chunk.chunk_data;
|
||||
p->chunks[i].user_ptr = cdata;
|
||||
|
||||
p->chunks[i].kdata = drm_malloc_ab(size, sizeof(uint32_t));
|
||||
|
|
|
@ -85,8 +85,6 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
|
|||
/* We borrow the event spin lock for protecting flip_status */
|
||||
spin_lock_irqsave(&crtc->dev->event_lock, flags);
|
||||
|
||||
/* set the proper interrupt */
|
||||
amdgpu_irq_get(adev, &adev->pageflip_irq, work->crtc_id);
|
||||
/* do the flip (mmio) */
|
||||
adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base);
|
||||
/* set the flip status */
|
||||
|
|
|
@ -242,11 +242,11 @@ static struct pci_device_id pciidlist[] = {
|
|||
{0x1002, 0x985F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
|
||||
#endif
|
||||
/* topaz */
|
||||
{0x1002, 0x6900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
|
||||
{0x1002, 0x6901, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
|
||||
{0x1002, 0x6902, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
|
||||
{0x1002, 0x6903, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
|
||||
{0x1002, 0x6907, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
|
||||
{0x1002, 0x6900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT},
|
||||
{0x1002, 0x6901, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT},
|
||||
{0x1002, 0x6902, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT},
|
||||
{0x1002, 0x6903, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT},
|
||||
{0x1002, 0x6907, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT},
|
||||
/* tonga */
|
||||
{0x1002, 0x6920, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA},
|
||||
{0x1002, 0x6921, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA},
|
||||
|
|
|
@ -402,3 +402,19 @@ bool amdgpu_fbdev_robj_is_fb(struct amdgpu_device *adev, struct amdgpu_bo *robj)
|
|||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
void amdgpu_fbdev_restore_mode(struct amdgpu_device *adev)
|
||||
{
|
||||
struct amdgpu_fbdev *afbdev = adev->mode_info.rfbdev;
|
||||
struct drm_fb_helper *fb_helper;
|
||||
int ret;
|
||||
|
||||
if (!afbdev)
|
||||
return;
|
||||
|
||||
fb_helper = &afbdev->helper;
|
||||
|
||||
ret = drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper);
|
||||
if (ret)
|
||||
DRM_DEBUG("failed to restore crtc mode\n");
|
||||
}
|
||||
|
|
|
@ -485,7 +485,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
|
|||
* Outdated mess for old drm with Xorg being in charge (void function now).
|
||||
*/
|
||||
/**
|
||||
* amdgpu_driver_firstopen_kms - drm callback for last close
|
||||
* amdgpu_driver_lastclose_kms - drm callback for last close
|
||||
*
|
||||
* @dev: drm dev pointer
|
||||
*
|
||||
|
@ -493,6 +493,9 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
|
|||
*/
|
||||
void amdgpu_driver_lastclose_kms(struct drm_device *dev)
|
||||
{
|
||||
struct amdgpu_device *adev = dev->dev_private;
|
||||
|
||||
amdgpu_fbdev_restore_mode(adev);
|
||||
vga_switcheroo_process_delayed_switch();
|
||||
}
|
||||
|
||||
|
|
|
@ -567,6 +567,7 @@ void amdgpu_fbdev_fini(struct amdgpu_device *adev);
|
|||
void amdgpu_fbdev_set_suspend(struct amdgpu_device *adev, int state);
|
||||
int amdgpu_fbdev_total_size(struct amdgpu_device *adev);
|
||||
bool amdgpu_fbdev_robj_is_fb(struct amdgpu_device *adev, struct amdgpu_bo *robj);
|
||||
void amdgpu_fbdev_restore_mode(struct amdgpu_device *adev);
|
||||
|
||||
void amdgpu_fb_output_poll_changed(struct amdgpu_device *adev);
|
||||
|
||||
|
|
|
@ -294,10 +294,14 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
|
|||
struct amdgpu_device *adev = dev_get_drvdata(dev);
|
||||
umode_t effective_mode = attr->mode;
|
||||
|
||||
/* Skip limit attributes if DPM is not enabled */
|
||||
/* Skip attributes if DPM is not enabled */
|
||||
if (!adev->pm.dpm_enabled &&
|
||||
(attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
|
||||
attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr))
|
||||
attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr ||
|
||||
attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
|
||||
attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
|
||||
attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
|
||||
attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
|
||||
return 0;
|
||||
|
||||
/* Skip fan attributes if fan is not present */
|
||||
|
|
|
@ -455,8 +455,10 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
|
|||
return -ENOMEM;
|
||||
|
||||
r = amdgpu_ib_get(ring, NULL, ndw * 4, ib);
|
||||
if (r)
|
||||
if (r) {
|
||||
kfree(ib);
|
||||
return r;
|
||||
}
|
||||
ib->length_dw = 0;
|
||||
|
||||
/* walk over the address space and update the page directory */
|
||||
|
|
|
@ -6185,6 +6185,11 @@ static int ci_dpm_late_init(void *handle)
|
|||
if (!amdgpu_dpm)
|
||||
return 0;
|
||||
|
||||
/* init the sysfs and debugfs files late */
|
||||
ret = amdgpu_pm_sysfs_init(adev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = ci_set_temperature_range(adev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -6232,9 +6237,6 @@ static int ci_dpm_sw_init(void *handle)
|
|||
adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
|
||||
if (amdgpu_dpm == 1)
|
||||
amdgpu_pm_print_power_states(adev);
|
||||
ret = amdgpu_pm_sysfs_init(adev);
|
||||
if (ret)
|
||||
goto dpm_failed;
|
||||
mutex_unlock(&adev->pm.mutex);
|
||||
DRM_INFO("amdgpu: dpm initialized\n");
|
||||
|
||||
|
|
|
@ -1567,6 +1567,9 @@ static void cik_pcie_gen3_enable(struct amdgpu_device *adev)
|
|||
int ret, i;
|
||||
u16 tmp16;
|
||||
|
||||
if (pci_is_root_bus(adev->pdev->bus))
|
||||
return;
|
||||
|
||||
if (amdgpu_pcie_gen2 == 0)
|
||||
return;
|
||||
|
||||
|
|
|
@ -596,6 +596,12 @@ static int cz_dpm_late_init(void *handle)
|
|||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
if (amdgpu_dpm) {
|
||||
int ret;
|
||||
/* init the sysfs and debugfs files late */
|
||||
ret = amdgpu_pm_sysfs_init(adev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* powerdown unused blocks for now */
|
||||
cz_dpm_powergate_uvd(adev, true);
|
||||
cz_dpm_powergate_vce(adev, true);
|
||||
|
@ -632,10 +638,6 @@ static int cz_dpm_sw_init(void *handle)
|
|||
if (amdgpu_dpm == 1)
|
||||
amdgpu_pm_print_power_states(adev);
|
||||
|
||||
ret = amdgpu_pm_sysfs_init(adev);
|
||||
if (ret)
|
||||
goto dpm_init_failed;
|
||||
|
||||
mutex_unlock(&adev->pm.mutex);
|
||||
DRM_INFO("amdgpu: dpm initialized\n");
|
||||
|
||||
|
|
|
@ -255,6 +255,24 @@ static u32 dce_v10_0_vblank_get_counter(struct amdgpu_device *adev, int crtc)
|
|||
return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
|
||||
}
|
||||
|
||||
static void dce_v10_0_pageflip_interrupt_init(struct amdgpu_device *adev)
|
||||
{
|
||||
unsigned i;
|
||||
|
||||
/* Enable pflip interrupts */
|
||||
for (i = 0; i < adev->mode_info.num_crtc; i++)
|
||||
amdgpu_irq_get(adev, &adev->pageflip_irq, i);
|
||||
}
|
||||
|
||||
static void dce_v10_0_pageflip_interrupt_fini(struct amdgpu_device *adev)
|
||||
{
|
||||
unsigned i;
|
||||
|
||||
/* Disable pflip interrupts */
|
||||
for (i = 0; i < adev->mode_info.num_crtc; i++)
|
||||
amdgpu_irq_put(adev, &adev->pageflip_irq, i);
|
||||
}
|
||||
|
||||
/**
|
||||
* dce_v10_0_page_flip - pageflip callback.
|
||||
*
|
||||
|
@ -2663,9 +2681,10 @@ static void dce_v10_0_crtc_dpms(struct drm_crtc *crtc, int mode)
|
|||
dce_v10_0_vga_enable(crtc, true);
|
||||
amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
|
||||
dce_v10_0_vga_enable(crtc, false);
|
||||
/* Make sure VBLANK interrupt is still enabled */
|
||||
/* Make sure VBLANK and PFLIP interrupts are still enabled */
|
||||
type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
|
||||
amdgpu_irq_update(adev, &adev->crtc_irq, type);
|
||||
amdgpu_irq_update(adev, &adev->pageflip_irq, type);
|
||||
drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id);
|
||||
dce_v10_0_crtc_load_lut(crtc);
|
||||
break;
|
||||
|
@ -3025,6 +3044,8 @@ static int dce_v10_0_hw_init(void *handle)
|
|||
dce_v10_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
|
||||
}
|
||||
|
||||
dce_v10_0_pageflip_interrupt_init(adev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -3039,6 +3060,8 @@ static int dce_v10_0_hw_fini(void *handle)
|
|||
dce_v10_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
|
||||
}
|
||||
|
||||
dce_v10_0_pageflip_interrupt_fini(adev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -3050,6 +3073,8 @@ static int dce_v10_0_suspend(void *handle)
|
|||
|
||||
dce_v10_0_hpd_fini(adev);
|
||||
|
||||
dce_v10_0_pageflip_interrupt_fini(adev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -3075,6 +3100,8 @@ static int dce_v10_0_resume(void *handle)
|
|||
/* initialize hpd */
|
||||
dce_v10_0_hpd_init(adev);
|
||||
|
||||
dce_v10_0_pageflip_interrupt_init(adev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -3369,7 +3396,6 @@ static int dce_v10_0_pageflip_irq(struct amdgpu_device *adev,
|
|||
spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
|
||||
|
||||
drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id);
|
||||
amdgpu_irq_put(adev, &adev->pageflip_irq, crtc_id);
|
||||
queue_work(amdgpu_crtc->pflip_queue, &works->unpin_work);
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -233,6 +233,24 @@ static u32 dce_v11_0_vblank_get_counter(struct amdgpu_device *adev, int crtc)
|
|||
return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
|
||||
}
|
||||
|
||||
static void dce_v11_0_pageflip_interrupt_init(struct amdgpu_device *adev)
|
||||
{
|
||||
unsigned i;
|
||||
|
||||
/* Enable pflip interrupts */
|
||||
for (i = 0; i < adev->mode_info.num_crtc; i++)
|
||||
amdgpu_irq_get(adev, &adev->pageflip_irq, i);
|
||||
}
|
||||
|
||||
static void dce_v11_0_pageflip_interrupt_fini(struct amdgpu_device *adev)
|
||||
{
|
||||
unsigned i;
|
||||
|
||||
/* Disable pflip interrupts */
|
||||
for (i = 0; i < adev->mode_info.num_crtc; i++)
|
||||
amdgpu_irq_put(adev, &adev->pageflip_irq, i);
|
||||
}
|
||||
|
||||
/**
|
||||
* dce_v11_0_page_flip - pageflip callback.
|
||||
*
|
||||
|
@ -2640,9 +2658,10 @@ static void dce_v11_0_crtc_dpms(struct drm_crtc *crtc, int mode)
|
|||
dce_v11_0_vga_enable(crtc, true);
|
||||
amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
|
||||
dce_v11_0_vga_enable(crtc, false);
|
||||
/* Make sure VBLANK interrupt is still enabled */
|
||||
/* Make sure VBLANK and PFLIP interrupts are still enabled */
|
||||
type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
|
||||
amdgpu_irq_update(adev, &adev->crtc_irq, type);
|
||||
amdgpu_irq_update(adev, &adev->pageflip_irq, type);
|
||||
drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id);
|
||||
dce_v11_0_crtc_load_lut(crtc);
|
||||
break;
|
||||
|
@ -2888,7 +2907,7 @@ static int dce_v11_0_early_init(void *handle)
|
|||
|
||||
switch (adev->asic_type) {
|
||||
case CHIP_CARRIZO:
|
||||
adev->mode_info.num_crtc = 4;
|
||||
adev->mode_info.num_crtc = 3;
|
||||
adev->mode_info.num_hpd = 6;
|
||||
adev->mode_info.num_dig = 9;
|
||||
break;
|
||||
|
@ -3000,6 +3019,8 @@ static int dce_v11_0_hw_init(void *handle)
|
|||
dce_v11_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
|
||||
}
|
||||
|
||||
dce_v11_0_pageflip_interrupt_init(adev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -3014,6 +3035,8 @@ static int dce_v11_0_hw_fini(void *handle)
|
|||
dce_v11_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
|
||||
}
|
||||
|
||||
dce_v11_0_pageflip_interrupt_fini(adev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -3025,6 +3048,8 @@ static int dce_v11_0_suspend(void *handle)
|
|||
|
||||
dce_v11_0_hpd_fini(adev);
|
||||
|
||||
dce_v11_0_pageflip_interrupt_fini(adev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -3051,6 +3076,8 @@ static int dce_v11_0_resume(void *handle)
|
|||
/* initialize hpd */
|
||||
dce_v11_0_hpd_init(adev);
|
||||
|
||||
dce_v11_0_pageflip_interrupt_init(adev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -3345,7 +3372,6 @@ static int dce_v11_0_pageflip_irq(struct amdgpu_device *adev,
|
|||
spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
|
||||
|
||||
drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id);
|
||||
amdgpu_irq_put(adev, &adev->pageflip_irq, crtc_id);
|
||||
queue_work(amdgpu_crtc->pflip_queue, &works->unpin_work);
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -204,6 +204,24 @@ static u32 dce_v8_0_vblank_get_counter(struct amdgpu_device *adev, int crtc)
|
|||
return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
|
||||
}
|
||||
|
||||
static void dce_v8_0_pageflip_interrupt_init(struct amdgpu_device *adev)
|
||||
{
|
||||
unsigned i;
|
||||
|
||||
/* Enable pflip interrupts */
|
||||
for (i = 0; i < adev->mode_info.num_crtc; i++)
|
||||
amdgpu_irq_get(adev, &adev->pageflip_irq, i);
|
||||
}
|
||||
|
||||
static void dce_v8_0_pageflip_interrupt_fini(struct amdgpu_device *adev)
|
||||
{
|
||||
unsigned i;
|
||||
|
||||
/* Disable pflip interrupts */
|
||||
for (i = 0; i < adev->mode_info.num_crtc; i++)
|
||||
amdgpu_irq_put(adev, &adev->pageflip_irq, i);
|
||||
}
|
||||
|
||||
/**
|
||||
* dce_v8_0_page_flip - pageflip callback.
|
||||
*
|
||||
|
@ -2575,9 +2593,10 @@ static void dce_v8_0_crtc_dpms(struct drm_crtc *crtc, int mode)
|
|||
dce_v8_0_vga_enable(crtc, true);
|
||||
amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
|
||||
dce_v8_0_vga_enable(crtc, false);
|
||||
/* Make sure VBLANK interrupt is still enabled */
|
||||
/* Make sure VBLANK and PFLIP interrupts are still enabled */
|
||||
type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
|
||||
amdgpu_irq_update(adev, &adev->crtc_irq, type);
|
||||
amdgpu_irq_update(adev, &adev->pageflip_irq, type);
|
||||
drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id);
|
||||
dce_v8_0_crtc_load_lut(crtc);
|
||||
break;
|
||||
|
@ -2933,6 +2952,8 @@ static int dce_v8_0_hw_init(void *handle)
|
|||
dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
|
||||
}
|
||||
|
||||
dce_v8_0_pageflip_interrupt_init(adev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -2947,6 +2968,8 @@ static int dce_v8_0_hw_fini(void *handle)
|
|||
dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
|
||||
}
|
||||
|
||||
dce_v8_0_pageflip_interrupt_fini(adev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -2958,6 +2981,8 @@ static int dce_v8_0_suspend(void *handle)
|
|||
|
||||
dce_v8_0_hpd_fini(adev);
|
||||
|
||||
dce_v8_0_pageflip_interrupt_fini(adev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -2981,6 +3006,8 @@ static int dce_v8_0_resume(void *handle)
|
|||
/* initialize hpd */
|
||||
dce_v8_0_hpd_init(adev);
|
||||
|
||||
dce_v8_0_pageflip_interrupt_init(adev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -3376,7 +3403,6 @@ static int dce_v8_0_pageflip_irq(struct amdgpu_device *adev,
|
|||
spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
|
||||
|
||||
drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id);
|
||||
amdgpu_irq_put(adev, &adev->pageflip_irq, crtc_id);
|
||||
queue_work(amdgpu_crtc->pflip_queue, &works->unpin_work);
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -2995,6 +2995,15 @@ static int kv_dpm_late_init(void *handle)
|
|||
{
|
||||
/* powerdown unused blocks for now */
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
int ret;
|
||||
|
||||
if (!amdgpu_dpm)
|
||||
return 0;
|
||||
|
||||
/* init the sysfs and debugfs files late */
|
||||
ret = amdgpu_pm_sysfs_init(adev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
kv_dpm_powergate_acp(adev, true);
|
||||
kv_dpm_powergate_samu(adev, true);
|
||||
|
@ -3038,9 +3047,6 @@ static int kv_dpm_sw_init(void *handle)
|
|||
adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
|
||||
if (amdgpu_dpm == 1)
|
||||
amdgpu_pm_print_power_states(adev);
|
||||
ret = amdgpu_pm_sysfs_init(adev);
|
||||
if (ret)
|
||||
goto dpm_failed;
|
||||
mutex_unlock(&adev->pm.mutex);
|
||||
DRM_INFO("amdgpu: dpm initialized\n");
|
||||
|
||||
|
|
|
@ -1005,6 +1005,9 @@ static void vi_pcie_gen3_enable(struct amdgpu_device *adev)
|
|||
u32 mask;
|
||||
int ret;
|
||||
|
||||
if (pci_is_root_bus(adev->pdev->bus))
|
||||
return;
|
||||
|
||||
if (amdgpu_pcie_gen2 == 0)
|
||||
return;
|
||||
|
||||
|
|
Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше
Загрузка…
Ссылка в новой задаче