-----BEGIN PGP SIGNATURE-----
 
 iQEcBAABAgAGBQJY881cAAoJEHm+PkMAQRiGG4UH+wa2z6Qet36Uc4nXFZuSMYrO
 ErUWs1QpTDDv4a+LE4fgyMvM3j9XqtpfQLy1n70jfD14IqPBhHe4gytasAf+8lg1
 YvddFx0Yl3sygVu3dDBNigWeVDbfwepW59coN0vI5nrMo+wrei8aVIWcFKOxdMuO
 n72u9vuhrkEnLJuQk7SF+t4OQob9McXE3s7QgyRopmlKhKo7mh8On7K2BRI5uluL
 t0j5kZM0a43EUT5rq9xR8f5pgtyfTMG/FO2MuzZn43MJcZcyfmnOP/cTSIvAKA5U
 1i12lxlokYhURNUe+S6jm8A47TrqSRSJxaQJZRlfGJksZ0LJa8eUaLDCviBQEoE=
 =6QWZ
 -----END PGP SIGNATURE-----

Merge tag 'v4.11-rc7' into drm-next

Backmerge Linux 4.11-rc7 from Linus tree, to fix some
conflicts that were causing problems with the rerere cache
in drm-tip.
This commit is contained in:
Dave Airlie 2017-04-19 11:07:14 +10:00
Родитель a6a5c983b3 4f7d029b9b
Коммит 856ee92e86
178 изменённых файлов: 2006 добавлений и 1093 удалений

Просмотреть файл

@ -99,6 +99,8 @@ Linas Vepstas <linas@austin.ibm.com>
Linus Lüssing <linus.luessing@c0d3.blue> <linus.luessing@web.de>
Linus Lüssing <linus.luessing@c0d3.blue> <linus.luessing@ascom.ch>
Mark Brown <broonie@sirena.org.uk>
Martin Kepplinger <martink@posteo.de> <martin.kepplinger@theobroma-systems.com>
Martin Kepplinger <martink@posteo.de> <martin.kepplinger@ginzinger.com>
Matthieu CASTET <castet.matthieu@free.fr>
Mauro Carvalho Chehab <mchehab@kernel.org> <mchehab@brturbo.com.br>
Mauro Carvalho Chehab <mchehab@kernel.org> <maurochehab@gmail.com>

Просмотреть файл

@ -13331,7 +13331,7 @@ F: drivers/virtio/
F: tools/virtio/
F: drivers/net/virtio_net.c
F: drivers/block/virtio_blk.c
F: include/linux/virtio_*.h
F: include/linux/virtio*.h
F: include/uapi/linux/virtio_*.h
F: drivers/crypto/virtio/

Просмотреть файл

@ -1,7 +1,7 @@
VERSION = 4
PATCHLEVEL = 11
SUBLEVEL = 0
EXTRAVERSION = -rc6
EXTRAVERSION = -rc7
NAME = Fearless Coyote
# *DOCUMENTATION*

Просмотреть файл

@ -371,6 +371,8 @@
phy1: ethernet-phy@1 {
reg = <7>;
eee-broken-100tx;
eee-broken-1000t;
};
};

Просмотреть файл

@ -672,6 +672,7 @@
ti,non-removable;
bus-width = <4>;
cap-power-off-card;
keep-power-in-suspend;
pinctrl-names = "default";
pinctrl-0 = <&mmc2_pins>;

Просмотреть файл

@ -283,6 +283,7 @@
device_type = "pci";
ranges = <0x81000000 0 0 0x03000 0 0x00010000
0x82000000 0 0x20013000 0x13000 0 0xffed000>;
bus-range = <0x00 0xff>;
#interrupt-cells = <1>;
num-lanes = <1>;
linux,pci-domain = <0>;
@ -319,6 +320,7 @@
device_type = "pci";
ranges = <0x81000000 0 0 0x03000 0 0x00010000
0x82000000 0 0x30013000 0x13000 0 0xffed000>;
bus-range = <0x00 0xff>;
#interrupt-cells = <1>;
num-lanes = <1>;
linux,pci-domain = <1>;

Просмотреть файл

@ -121,7 +121,7 @@
&i2c3 {
clock-frequency = <400000>;
at24@50 {
compatible = "at24,24c02";
compatible = "atmel,24c64";
readonly;
reg = <0x50>;
};

Просмотреть файл

@ -66,12 +66,6 @@
opp-microvolt = <1200000>;
clock-latency-ns = <244144>; /* 8 32k periods */
};
opp@1200000000 {
opp-hz = /bits/ 64 <1200000000>;
opp-microvolt = <1320000>;
clock-latency-ns = <244144>; /* 8 32k periods */
};
};
cpus {
@ -81,16 +75,22 @@
operating-points-v2 = <&cpu0_opp_table>;
};
cpu@1 {
operating-points-v2 = <&cpu0_opp_table>;
};
cpu@2 {
compatible = "arm,cortex-a7";
device_type = "cpu";
reg = <2>;
operating-points-v2 = <&cpu0_opp_table>;
};
cpu@3 {
compatible = "arm,cortex-a7";
device_type = "cpu";
reg = <3>;
operating-points-v2 = <&cpu0_opp_table>;
};
};

Просмотреть файл

@ -270,6 +270,7 @@ extern const struct smp_operations omap4_smp_ops;
extern int omap4_mpuss_init(void);
extern int omap4_enter_lowpower(unsigned int cpu, unsigned int power_state);
extern int omap4_hotplug_cpu(unsigned int cpu, unsigned int power_state);
extern u32 omap4_get_cpu1_ns_pa_addr(void);
#else
static inline int omap4_enter_lowpower(unsigned int cpu,
unsigned int power_state)

Просмотреть файл

@ -50,7 +50,7 @@ void omap4_cpu_die(unsigned int cpu)
omap4_hotplug_cpu(cpu, PWRDM_POWER_OFF);
if (omap_secure_apis_support())
boot_cpu = omap_read_auxcoreboot0();
boot_cpu = omap_read_auxcoreboot0() >> 9;
else
boot_cpu =
readl_relaxed(base + OMAP_AUX_CORE_BOOT_0) >> 5;

Просмотреть файл

@ -64,6 +64,7 @@
#include "prm-regbits-44xx.h"
static void __iomem *sar_base;
static u32 old_cpu1_ns_pa_addr;
#if defined(CONFIG_PM) && defined(CONFIG_SMP)
@ -212,6 +213,11 @@ static void __init save_l2x0_context(void)
{}
#endif
u32 omap4_get_cpu1_ns_pa_addr(void)
{
return old_cpu1_ns_pa_addr;
}
/**
* omap4_enter_lowpower: OMAP4 MPUSS Low Power Entry Function
* The purpose of this function is to manage low power programming
@ -460,22 +466,30 @@ int __init omap4_mpuss_init(void)
void __init omap4_mpuss_early_init(void)
{
unsigned long startup_pa;
void __iomem *ns_pa_addr;
if (!(cpu_is_omap44xx() || soc_is_omap54xx()))
if (!(soc_is_omap44xx() || soc_is_omap54xx()))
return;
sar_base = omap4_get_sar_ram_base();
if (cpu_is_omap443x())
/* Save old NS_PA_ADDR for validity checks later on */
if (soc_is_omap44xx())
ns_pa_addr = sar_base + CPU1_WAKEUP_NS_PA_ADDR_OFFSET;
else
ns_pa_addr = sar_base + OMAP5_CPU1_WAKEUP_NS_PA_ADDR_OFFSET;
old_cpu1_ns_pa_addr = readl_relaxed(ns_pa_addr);
if (soc_is_omap443x())
startup_pa = __pa_symbol(omap4_secondary_startup);
else if (cpu_is_omap446x())
else if (soc_is_omap446x())
startup_pa = __pa_symbol(omap4460_secondary_startup);
else if ((__boot_cpu_mode & MODE_MASK) == HYP_MODE)
startup_pa = __pa_symbol(omap5_secondary_hyp_startup);
else
startup_pa = __pa_symbol(omap5_secondary_startup);
if (cpu_is_omap44xx())
if (soc_is_omap44xx())
writel_relaxed(startup_pa, sar_base +
CPU1_WAKEUP_NS_PA_ADDR_OFFSET);
else

Просмотреть файл

@ -94,6 +94,5 @@ ENTRY(omap_read_auxcoreboot0)
ldr r12, =0x103
dsb
smc #0
mov r0, r0, lsr #9
ldmfd sp!, {r2-r12, pc}
ENDPROC(omap_read_auxcoreboot0)

Просмотреть файл

@ -21,6 +21,7 @@
#include <linux/io.h>
#include <linux/irqchip/arm-gic.h>
#include <asm/sections.h>
#include <asm/smp_scu.h>
#include <asm/virt.h>
@ -40,10 +41,14 @@
#define OMAP5_CORE_COUNT 0x2
#define AUX_CORE_BOOT0_GP_RELEASE 0x020
#define AUX_CORE_BOOT0_HS_RELEASE 0x200
struct omap_smp_config {
unsigned long cpu1_rstctrl_pa;
void __iomem *cpu1_rstctrl_va;
void __iomem *scu_base;
void __iomem *wakeupgen_base;
void *startup_addr;
};
@ -140,7 +145,6 @@ static int omap4_boot_secondary(unsigned int cpu, struct task_struct *idle)
static struct clockdomain *cpu1_clkdm;
static bool booted;
static struct powerdomain *cpu1_pwrdm;
void __iomem *base = omap_get_wakeupgen_base();
/*
* Set synchronisation state between this boot processor
@ -155,9 +159,11 @@ static int omap4_boot_secondary(unsigned int cpu, struct task_struct *idle)
* A barrier is added to ensure that write buffer is drained
*/
if (omap_secure_apis_support())
omap_modify_auxcoreboot0(0x200, 0xfffffdff);
omap_modify_auxcoreboot0(AUX_CORE_BOOT0_HS_RELEASE,
0xfffffdff);
else
writel_relaxed(0x20, base + OMAP_AUX_CORE_BOOT_0);
writel_relaxed(AUX_CORE_BOOT0_GP_RELEASE,
cfg.wakeupgen_base + OMAP_AUX_CORE_BOOT_0);
if (!cpu1_clkdm && !cpu1_pwrdm) {
cpu1_clkdm = clkdm_lookup("mpu1_clkdm");
@ -261,9 +267,72 @@ static void __init omap4_smp_init_cpus(void)
set_cpu_possible(i, true);
}
/*
* For now, just make sure the start-up address is not within the booting
* kernel space as that means we just overwrote whatever secondary_startup()
* code there was.
*/
static bool __init omap4_smp_cpu1_startup_valid(unsigned long addr)
{
if ((addr >= __pa(PAGE_OFFSET)) && (addr <= __pa(__bss_start)))
return false;
return true;
}
/*
* We may need to reset CPU1 before configuring, otherwise kexec boot can end
* up trying to use old kernel startup address or suspend-resume will
* occasionally fail to bring up CPU1 on 4430 if CPU1 fails to enter deeper
* idle states.
*/
static void __init omap4_smp_maybe_reset_cpu1(struct omap_smp_config *c)
{
unsigned long cpu1_startup_pa, cpu1_ns_pa_addr;
bool needs_reset = false;
u32 released;
if (omap_secure_apis_support())
released = omap_read_auxcoreboot0() & AUX_CORE_BOOT0_HS_RELEASE;
else
released = readl_relaxed(cfg.wakeupgen_base +
OMAP_AUX_CORE_BOOT_0) &
AUX_CORE_BOOT0_GP_RELEASE;
if (released) {
pr_warn("smp: CPU1 not parked?\n");
return;
}
cpu1_startup_pa = readl_relaxed(cfg.wakeupgen_base +
OMAP_AUX_CORE_BOOT_1);
cpu1_ns_pa_addr = omap4_get_cpu1_ns_pa_addr();
/* Did the configured secondary_startup() get overwritten? */
if (!omap4_smp_cpu1_startup_valid(cpu1_startup_pa))
needs_reset = true;
/*
* If omap4 or 5 has NS_PA_ADDR configured, CPU1 may be in a
* deeper idle state in WFI and will wake to an invalid address.
*/
if ((soc_is_omap44xx() || soc_is_omap54xx()) &&
!omap4_smp_cpu1_startup_valid(cpu1_ns_pa_addr))
needs_reset = true;
if (!needs_reset || !c->cpu1_rstctrl_va)
return;
pr_info("smp: CPU1 parked within kernel, needs reset (0x%lx 0x%lx)\n",
cpu1_startup_pa, cpu1_ns_pa_addr);
writel_relaxed(1, c->cpu1_rstctrl_va);
readl_relaxed(c->cpu1_rstctrl_va);
writel_relaxed(0, c->cpu1_rstctrl_va);
}
static void __init omap4_smp_prepare_cpus(unsigned int max_cpus)
{
void __iomem *base = omap_get_wakeupgen_base();
const struct omap_smp_config *c = NULL;
if (soc_is_omap443x())
@ -281,6 +350,7 @@ static void __init omap4_smp_prepare_cpus(unsigned int max_cpus)
/* Must preserve cfg.scu_base set earlier */
cfg.cpu1_rstctrl_pa = c->cpu1_rstctrl_pa;
cfg.startup_addr = c->startup_addr;
cfg.wakeupgen_base = omap_get_wakeupgen_base();
if (soc_is_dra74x() || soc_is_omap54xx()) {
if ((__boot_cpu_mode & MODE_MASK) == HYP_MODE)
@ -299,15 +369,7 @@ static void __init omap4_smp_prepare_cpus(unsigned int max_cpus)
if (cfg.scu_base)
scu_enable(cfg.scu_base);
/*
* Reset CPU1 before configuring, otherwise kexec will
* end up trying to use old kernel startup address.
*/
if (cfg.cpu1_rstctrl_va) {
writel_relaxed(1, cfg.cpu1_rstctrl_va);
readl_relaxed(cfg.cpu1_rstctrl_va);
writel_relaxed(0, cfg.cpu1_rstctrl_va);
}
omap4_smp_maybe_reset_cpu1(&cfg);
/*
* Write the address of secondary startup routine into the
@ -319,7 +381,7 @@ static void __init omap4_smp_prepare_cpus(unsigned int max_cpus)
omap_auxcoreboot_addr(__pa_symbol(cfg.startup_addr));
else
writel_relaxed(__pa_symbol(cfg.startup_addr),
base + OMAP_AUX_CORE_BOOT_1);
cfg.wakeupgen_base + OMAP_AUX_CORE_BOOT_1);
}
const struct smp_operations omap4_smp_ops __initconst = {

Просмотреть файл

@ -222,6 +222,14 @@ static int _omap_device_notifier_call(struct notifier_block *nb,
dev_err(dev, "failed to idle\n");
}
break;
case BUS_NOTIFY_BIND_DRIVER:
od = to_omap_device(pdev);
if (od && (od->_state == OMAP_DEVICE_STATE_ENABLED) &&
pm_runtime_status_suspended(dev)) {
od->_driver_status = BUS_NOTIFY_BIND_DRIVER;
pm_runtime_set_active(dev);
}
break;
case BUS_NOTIFY_ADD_DEVICE:
if (pdev->dev.of_node)
omap_device_build_from_dt(pdev);

Просмотреть файл

@ -6,6 +6,7 @@ menuconfig ARCH_ORION5X
select GPIOLIB
select MVEBU_MBUS
select PCI
select PHYLIB if NETDEVICES
select PLAT_ORION_LEGACY
help
Support for the following Marvell Orion 5x series SoCs:

Просмотреть файл

@ -468,6 +468,7 @@ void __init orion_ge11_init(struct mv643xx_eth_platform_data *eth_data,
eth_data, &orion_ge11);
}
#ifdef CONFIG_ARCH_ORION5X
/*****************************************************************************
* Ethernet switch
****************************************************************************/
@ -480,6 +481,9 @@ void __init orion_ge00_switch_init(struct dsa_chip_data *d)
struct mdio_board_info *bd;
unsigned int i;
if (!IS_BUILTIN(CONFIG_PHYLIB))
return;
for (i = 0; i < ARRAY_SIZE(d->port_names); i++)
if (!strcmp(d->port_names[i], "cpu"))
break;
@ -493,6 +497,7 @@ void __init orion_ge00_switch_init(struct dsa_chip_data *d)
mdiobus_register_board_info(&orion_ge00_switch_board_info, 1);
}
#endif
/*****************************************************************************
* I2C

Просмотреть файл

@ -179,8 +179,10 @@
usbphy: phy@01c19400 {
compatible = "allwinner,sun50i-a64-usb-phy";
reg = <0x01c19400 0x14>,
<0x01c1a800 0x4>,
<0x01c1b800 0x4>;
reg-names = "phy_ctrl",
"pmu0",
"pmu1";
clocks = <&ccu CLK_USB_PHY0>,
<&ccu CLK_USB_PHY1>;

Просмотреть файл

@ -0,0 +1,29 @@
#ifndef _ASM_IA64_ASM_PROTOTYPES_H
#define _ASM_IA64_ASM_PROTOTYPES_H
#include <asm/cacheflush.h>
#include <asm/checksum.h>
#include <asm/esi.h>
#include <asm/ftrace.h>
#include <asm/page.h>
#include <asm/pal.h>
#include <asm/string.h>
#include <asm/uaccess.h>
#include <asm/unwind.h>
#include <asm/xor.h>
extern const char ia64_ivt[];
signed int __divsi3(signed int, unsigned int);
signed int __modsi3(signed int, unsigned int);
signed long long __divdi3(signed long long, unsigned long long);
signed long long __moddi3(signed long long, unsigned long long);
unsigned int __udivsi3(unsigned int, unsigned int);
unsigned int __umodsi3(unsigned int, unsigned int);
unsigned long long __udivdi3(unsigned long long, unsigned long long);
unsigned long long __umoddi3(unsigned long long, unsigned long long);
#endif /* _ASM_IA64_ASM_PROTOTYPES_H */

Просмотреть файл

@ -24,25 +24,25 @@ AFLAGS___modsi3.o = -DMODULO
AFLAGS___umodsi3.o = -DUNSIGNED -DMODULO
$(obj)/__divdi3.o: $(src)/idiv64.S FORCE
$(call if_changed_dep,as_o_S)
$(call if_changed_rule,as_o_S)
$(obj)/__udivdi3.o: $(src)/idiv64.S FORCE
$(call if_changed_dep,as_o_S)
$(call if_changed_rule,as_o_S)
$(obj)/__moddi3.o: $(src)/idiv64.S FORCE
$(call if_changed_dep,as_o_S)
$(call if_changed_rule,as_o_S)
$(obj)/__umoddi3.o: $(src)/idiv64.S FORCE
$(call if_changed_dep,as_o_S)
$(call if_changed_rule,as_o_S)
$(obj)/__divsi3.o: $(src)/idiv32.S FORCE
$(call if_changed_dep,as_o_S)
$(call if_changed_rule,as_o_S)
$(obj)/__udivsi3.o: $(src)/idiv32.S FORCE
$(call if_changed_dep,as_o_S)
$(call if_changed_rule,as_o_S)
$(obj)/__modsi3.o: $(src)/idiv32.S FORCE
$(call if_changed_dep,as_o_S)
$(call if_changed_rule,as_o_S)
$(obj)/__umodsi3.o: $(src)/idiv32.S FORCE
$(call if_changed_dep,as_o_S)
$(call if_changed_rule,as_o_S)

Просмотреть файл

@ -201,7 +201,7 @@ ENTRY_CFI(pa_memcpy)
add dst,len,end
/* short copy with less than 16 bytes? */
cmpib,>>=,n 15,len,.Lbyte_loop
cmpib,COND(>>=),n 15,len,.Lbyte_loop
/* same alignment? */
xor src,dst,t0
@ -216,7 +216,7 @@ ENTRY_CFI(pa_memcpy)
/* loop until we are 64-bit aligned */
.Lalign_loop64:
extru dst,31,3,t1
cmpib,=,n 0,t1,.Lcopy_loop_16
cmpib,=,n 0,t1,.Lcopy_loop_16_start
20: ldb,ma 1(srcspc,src),t1
21: stb,ma t1,1(dstspc,dst)
b .Lalign_loop64
@ -225,6 +225,7 @@ ENTRY_CFI(pa_memcpy)
ASM_EXCEPTIONTABLE_ENTRY(20b,.Lcopy_done)
ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done)
.Lcopy_loop_16_start:
ldi 31,t0
.Lcopy_loop_16:
cmpb,COND(>>=),n t0,len,.Lword_loop
@ -267,7 +268,7 @@ ENTRY_CFI(pa_memcpy)
/* loop until we are 32-bit aligned */
.Lalign_loop32:
extru dst,31,2,t1
cmpib,=,n 0,t1,.Lcopy_loop_4
cmpib,=,n 0,t1,.Lcopy_loop_8
20: ldb,ma 1(srcspc,src),t1
21: stb,ma t1,1(dstspc,dst)
b .Lalign_loop32
@ -277,7 +278,7 @@ ENTRY_CFI(pa_memcpy)
ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done)
.Lcopy_loop_4:
.Lcopy_loop_8:
cmpib,COND(>>=),n 15,len,.Lbyte_loop
10: ldw 0(srcspc,src),t1
@ -299,7 +300,7 @@ ENTRY_CFI(pa_memcpy)
ASM_EXCEPTIONTABLE_ENTRY(16b,.Lcopy_done)
ASM_EXCEPTIONTABLE_ENTRY(17b,.Lcopy_done)
b .Lcopy_loop_4
b .Lcopy_loop_8
ldo -16(len),len
.Lbyte_loop:
@ -324,7 +325,7 @@ ENTRY_CFI(pa_memcpy)
.Lunaligned_copy:
/* align until dst is 32bit-word-aligned */
extru dst,31,2,t1
cmpib,COND(=),n 0,t1,.Lcopy_dstaligned
cmpib,=,n 0,t1,.Lcopy_dstaligned
20: ldb 0(srcspc,src),t1
ldo 1(src),src
21: stb,ma t1,1(dstspc,dst)
@ -362,7 +363,7 @@ ENTRY_CFI(pa_memcpy)
cmpiclr,<> 1,t0,%r0
b,n .Lcase1
.Lcase0:
cmpb,= %r0,len,.Lcda_finish
cmpb,COND(=) %r0,len,.Lcda_finish
nop
1: ldw,ma 4(srcspc,src), a3
@ -376,7 +377,7 @@ ENTRY_CFI(pa_memcpy)
1: ldw,ma 4(srcspc,src), a3
ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
ldo -1(len),len
cmpb,=,n %r0,len,.Ldo0
cmpb,COND(=),n %r0,len,.Ldo0
.Ldo4:
1: ldw,ma 4(srcspc,src), a0
ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
@ -402,7 +403,7 @@ ENTRY_CFI(pa_memcpy)
1: stw,ma t0, 4(dstspc,dst)
ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcopy_done)
ldo -4(len),len
cmpb,<> %r0,len,.Ldo4
cmpb,COND(<>) %r0,len,.Ldo4
nop
.Ldo0:
shrpw a2, a3, %sar, t0
@ -436,14 +437,14 @@ ENTRY_CFI(pa_memcpy)
/* fault exception fixup handlers: */
#ifdef CONFIG_64BIT
.Lcopy16_fault:
10: b .Lcopy_done
std,ma t1,8(dstspc,dst)
b .Lcopy_done
10: std,ma t1,8(dstspc,dst)
ASM_EXCEPTIONTABLE_ENTRY(10b,.Lcopy_done)
#endif
.Lcopy8_fault:
10: b .Lcopy_done
stw,ma t1,4(dstspc,dst)
b .Lcopy_done
10: stw,ma t1,4(dstspc,dst)
ASM_EXCEPTIONTABLE_ENTRY(10b,.Lcopy_done)
.exit

Просмотреть файл

@ -30,8 +30,10 @@ static int __init vdso32_setup(char *s)
{
vdso32_enabled = simple_strtoul(s, NULL, 0);
if (vdso32_enabled > 1)
if (vdso32_enabled > 1) {
pr_warn("vdso32 values other than 0 and 1 are no longer allowed; vdso disabled\n");
vdso32_enabled = 0;
}
return 1;
}
@ -62,13 +64,18 @@ subsys_initcall(sysenter_setup);
/* Register vsyscall32 into the ABI table */
#include <linux/sysctl.h>
static const int zero;
static const int one = 1;
static struct ctl_table abi_table2[] = {
{
.procname = "vsyscall32",
.data = &vdso32_enabled,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec
.proc_handler = proc_dointvec_minmax,
.extra1 = (int *)&zero,
.extra2 = (int *)&one,
},
{}
};

Просмотреть файл

@ -507,6 +507,9 @@ static void intel_pmu_lbr_read_32(struct cpu_hw_events *cpuc)
cpuc->lbr_entries[i].to = msr_lastbranch.to;
cpuc->lbr_entries[i].mispred = 0;
cpuc->lbr_entries[i].predicted = 0;
cpuc->lbr_entries[i].in_tx = 0;
cpuc->lbr_entries[i].abort = 0;
cpuc->lbr_entries[i].cycles = 0;
cpuc->lbr_entries[i].reserved = 0;
}
cpuc->lbr_stack.nr = i;

Просмотреть файл

@ -287,7 +287,7 @@ struct task_struct;
#define ARCH_DLINFO_IA32 \
do { \
if (vdso32_enabled) { \
if (VDSO_CURRENT_BASE) { \
NEW_AUX_ENT(AT_SYSINFO, VDSO_ENTRY); \
NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_CURRENT_BASE); \
} \

Просмотреть файл

@ -55,7 +55,8 @@ static inline int arch_memcpy_from_pmem(void *dst, const void *src, size_t n)
* @size: number of bytes to write back
*
* Write back a cache range using the CLWB (cache line write back)
* instruction.
* instruction. Note that @size is internally rounded up to be cache
* line size aligned.
*/
static inline void arch_wb_cache_pmem(void *addr, size_t size)
{
@ -69,15 +70,6 @@ static inline void arch_wb_cache_pmem(void *addr, size_t size)
clwb(p);
}
/*
* copy_from_iter_nocache() on x86 only uses non-temporal stores for iovec
* iterators, so for other types (bvec & kvec) we must do a cache write-back.
*/
static inline bool __iter_needs_pmem_wb(struct iov_iter *i)
{
return iter_is_iovec(i) == false;
}
/**
* arch_copy_from_iter_pmem - copy data from an iterator to PMEM
* @addr: PMEM destination address
@ -94,7 +86,35 @@ static inline size_t arch_copy_from_iter_pmem(void *addr, size_t bytes,
/* TODO: skip the write-back by always using non-temporal stores */
len = copy_from_iter_nocache(addr, bytes, i);
if (__iter_needs_pmem_wb(i))
/*
* In the iovec case on x86_64 copy_from_iter_nocache() uses
* non-temporal stores for the bulk of the transfer, but we need
* to manually flush if the transfer is unaligned. A cached
* memory copy is used when destination or size is not naturally
* aligned. That is:
* - Require 8-byte alignment when size is 8 bytes or larger.
* - Require 4-byte alignment when size is 4 bytes.
*
* In the non-iovec case the entire destination needs to be
* flushed.
*/
if (iter_is_iovec(i)) {
unsigned long flushed, dest = (unsigned long) addr;
if (bytes < 8) {
if (!IS_ALIGNED(dest, 4) || (bytes != 4))
arch_wb_cache_pmem(addr, 1);
} else {
if (!IS_ALIGNED(dest, 8)) {
dest = ALIGN(dest, boot_cpu_data.x86_clflush_size);
arch_wb_cache_pmem(addr, 1);
}
flushed = dest - (unsigned long) addr;
if (bytes > flushed && !IS_ALIGNED(bytes - flushed, 8))
arch_wb_cache_pmem(addr + bytes - 1, 1);
}
} else
arch_wb_cache_pmem(addr, bytes);
return len;

Просмотреть файл

@ -200,11 +200,11 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
}
out:
rdtgroup_kn_unlock(of->kn);
for_each_enabled_rdt_resource(r) {
kfree(r->tmp_cbms);
r->tmp_cbms = NULL;
}
rdtgroup_kn_unlock(of->kn);
return ret ?: nbytes;
}

Просмотреть файл

@ -846,7 +846,7 @@ void signal_fault(struct pt_regs *regs, void __user *frame, char *where)
task_pid_nr(current) > 1 ? KERN_INFO : KERN_EMERG,
me->comm, me->pid, where, frame,
regs->ip, regs->sp, regs->orig_ax);
print_vma_addr(" in ", regs->ip);
print_vma_addr(KERN_CONT " in ", regs->ip);
pr_cont("\n");
}

Просмотреть файл

@ -151,8 +151,8 @@ int __copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from,
if (from->si_signo == SIGSEGV) {
if (from->si_code == SEGV_BNDERR) {
compat_uptr_t lower = (unsigned long)&to->si_lower;
compat_uptr_t upper = (unsigned long)&to->si_upper;
compat_uptr_t lower = (unsigned long)from->si_lower;
compat_uptr_t upper = (unsigned long)from->si_upper;
put_user_ex(lower, &to->si_lower);
put_user_ex(upper, &to->si_upper);
}

Просмотреть файл

@ -255,7 +255,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
pr_info("%s[%d] trap %s ip:%lx sp:%lx error:%lx",
tsk->comm, tsk->pid, str,
regs->ip, regs->sp, error_code);
print_vma_addr(" in ", regs->ip);
print_vma_addr(KERN_CONT " in ", regs->ip);
pr_cont("\n");
}
@ -519,7 +519,7 @@ do_general_protection(struct pt_regs *regs, long error_code)
pr_info("%s[%d] general protection ip:%lx sp:%lx error:%lx",
tsk->comm, task_pid_nr(tsk),
regs->ip, regs->sp, error_code);
print_vma_addr(" in ", regs->ip);
print_vma_addr(KERN_CONT " in ", regs->ip);
pr_cont("\n");
}

Просмотреть файл

@ -643,21 +643,40 @@ void __init init_mem_mapping(void)
* devmem_is_allowed() checks to see if /dev/mem access to a certain address
* is valid. The argument is a physical page number.
*
*
* On x86, access has to be given to the first megabyte of ram because that area
* contains BIOS code and data regions used by X and dosemu and similar apps.
* Access has to be given to non-kernel-ram areas as well, these contain the PCI
* mmio resources as well as potential bios/acpi data regions.
* On x86, access has to be given to the first megabyte of RAM because that
* area traditionally contains BIOS code and data regions used by X, dosemu,
* and similar apps. Since they map the entire memory range, the whole range
* must be allowed (for mapping), but any areas that would otherwise be
* disallowed are flagged as being "zero filled" instead of rejected.
* Access has to be given to non-kernel-ram areas as well, these contain the
* PCI mmio resources as well as potential bios/acpi data regions.
*/
int devmem_is_allowed(unsigned long pagenr)
{
if (pagenr < 256)
return 1;
if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
if (page_is_ram(pagenr)) {
/*
* For disallowed memory regions in the low 1MB range,
* request that the page be shown as all zeros.
*/
if (pagenr < 256)
return 2;
return 0;
if (!page_is_ram(pagenr))
return 1;
return 0;
}
/*
* This must follow RAM test, since System RAM is considered a
* restricted resource under CONFIG_STRICT_IOMEM.
*/
if (iomem_is_exclusive(pagenr << PAGE_SHIFT)) {
/* Low 1MB bypasses iomem restrictions. */
if (pagenr < 256)
return 1;
return 0;
}
return 1;
}
void free_init_pages(char *what, unsigned long begin, unsigned long end)

Просмотреть файл

@ -201,6 +201,10 @@ void __init efi_arch_mem_reserve(phys_addr_t addr, u64 size)
return;
}
/* No need to reserve regions that will never be freed. */
if (md.attribute & EFI_MEMORY_RUNTIME)
return;
size += addr % EFI_PAGE_SIZE;
size = round_up(size, EFI_PAGE_SIZE);
addr = round_down(addr, EFI_PAGE_SIZE);

Просмотреть файл

@ -421,10 +421,8 @@ acpi_ut_walk_aml_resources(struct acpi_walk_state *walk_state,
ACPI_FUNCTION_TRACE(ut_walk_aml_resources);
/*
* The absolute minimum resource template is one end_tag descriptor.
* However, we will treat a lone end_tag as just a simple buffer.
*/
/* The absolute minimum resource template is one end_tag descriptor */
if (aml_length < sizeof(struct aml_resource_end_tag)) {
return_ACPI_STATUS(AE_AML_NO_RESOURCE_END_TAG);
}
@ -456,8 +454,9 @@ acpi_ut_walk_aml_resources(struct acpi_walk_state *walk_state,
/* Invoke the user function */
if (user_function) {
status = user_function(aml, length, offset,
resource_index, context);
status =
user_function(aml, length, offset, resource_index,
context);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
@ -481,12 +480,6 @@ acpi_ut_walk_aml_resources(struct acpi_walk_state *walk_state,
*context = aml;
}
/* Check if buffer is defined to be longer than the resource length */
if (aml_length > (offset + length)) {
return_ACPI_STATUS(AE_AML_NO_RESOURCE_END_TAG);
}
/* Normal exit */
return_ACPI_STATUS(AE_OK);

Просмотреть файл

@ -1617,7 +1617,11 @@ static int cmp_map(const void *m0, const void *m1)
const struct nfit_set_info_map *map0 = m0;
const struct nfit_set_info_map *map1 = m1;
return map0->region_offset - map1->region_offset;
if (map0->region_offset < map1->region_offset)
return -1;
else if (map0->region_offset > map1->region_offset)
return 1;
return 0;
}
/* Retrieve the nth entry referencing this spa */

Просмотреть файл

@ -1857,15 +1857,20 @@ static void acpi_bus_attach(struct acpi_device *device)
return;
device->flags.match_driver = true;
if (!ret) {
ret = device_attach(&device->dev);
if (ret < 0)
return;
if (!ret && device->pnp.type.platform_id)
acpi_default_enumeration(device);
if (ret > 0) {
acpi_device_set_enumerated(device);
goto ok;
}
ret = device_attach(&device->dev);
if (ret < 0)
return;
if (ret > 0 || !device->pnp.type.platform_id)
acpi_device_set_enumerated(device);
else
acpi_default_enumeration(device);
ok:
list_for_each_entry(child, &device->children, node)
acpi_bus_attach(child);

Просмотреть файл

@ -278,11 +278,6 @@ static int atiixp_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
};
const struct ata_port_info *ppi[] = { &info, &info };
/* SB600/700 don't have secondary port wired */
if ((pdev->device == PCI_DEVICE_ID_ATI_IXP600_IDE) ||
(pdev->device == PCI_DEVICE_ID_ATI_IXP700_IDE))
ppi[1] = &ata_dummy_port_info;
return ata_pci_bmdma_init_one(pdev, ppi, &atiixp_sht, NULL,
ATA_HOST_PARALLEL_SCAN);
}

Просмотреть файл

@ -644,14 +644,16 @@ static void svia_configure(struct pci_dev *pdev, int board_id,
pci_write_config_byte(pdev, SATA_NATIVE_MODE, tmp8);
}
/* enable IRQ on hotplug */
pci_read_config_byte(pdev, SVIA_MISC_3, &tmp8);
if ((tmp8 & SATA_HOTPLUG) != SATA_HOTPLUG) {
dev_dbg(&pdev->dev,
"enabling SATA hotplug (0x%x)\n",
(int) tmp8);
tmp8 |= SATA_HOTPLUG;
pci_write_config_byte(pdev, SVIA_MISC_3, tmp8);
if (board_id == vt6421) {
/* enable IRQ on hotplug */
pci_read_config_byte(pdev, SVIA_MISC_3, &tmp8);
if ((tmp8 & SATA_HOTPLUG) != SATA_HOTPLUG) {
dev_dbg(&pdev->dev,
"enabling SATA hotplug (0x%x)\n",
(int) tmp8);
tmp8 |= SATA_HOTPLUG;
pci_write_config_byte(pdev, SVIA_MISC_3, tmp8);
}
}
/*

Просмотреть файл

@ -523,7 +523,7 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
if (size == PAGE_SIZE) {
copy_page(mem, cmem);
memcpy(mem, cmem, PAGE_SIZE);
} else {
struct zcomp_strm *zstrm = zcomp_stream_get(zram->comp);
@ -717,7 +717,7 @@ compress_again:
if ((clen == PAGE_SIZE) && !is_partial_io(bvec)) {
src = kmap_atomic(page);
copy_page(cmem, src);
memcpy(cmem, src, PAGE_SIZE);
kunmap_atomic(src);
} else {
memcpy(cmem, src, clen);
@ -928,7 +928,7 @@ static int zram_rw_page(struct block_device *bdev, sector_t sector,
}
index = sector >> SECTORS_PER_PAGE_SHIFT;
offset = sector & (SECTORS_PER_PAGE - 1) << SECTOR_SHIFT;
offset = (sector & (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
bv.bv_page = page;
bv.bv_len = PAGE_SIZE;

Просмотреть файл

@ -60,6 +60,10 @@ static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
#endif
#ifdef CONFIG_STRICT_DEVMEM
static inline int page_is_allowed(unsigned long pfn)
{
return devmem_is_allowed(pfn);
}
static inline int range_is_allowed(unsigned long pfn, unsigned long size)
{
u64 from = ((u64)pfn) << PAGE_SHIFT;
@ -75,6 +79,10 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
return 1;
}
#else
static inline int page_is_allowed(unsigned long pfn)
{
return 1;
}
static inline int range_is_allowed(unsigned long pfn, unsigned long size)
{
return 1;
@ -122,23 +130,31 @@ static ssize_t read_mem(struct file *file, char __user *buf,
while (count > 0) {
unsigned long remaining;
int allowed;
sz = size_inside_page(p, count);
if (!range_is_allowed(p >> PAGE_SHIFT, count))
allowed = page_is_allowed(p >> PAGE_SHIFT);
if (!allowed)
return -EPERM;
if (allowed == 2) {
/* Show zeros for restricted memory. */
remaining = clear_user(buf, sz);
} else {
/*
* On ia64 if a page has been mapped somewhere as
* uncached, then it must also be accessed uncached
* by the kernel or data corruption may occur.
*/
ptr = xlate_dev_mem_ptr(p);
if (!ptr)
return -EFAULT;
/*
* On ia64 if a page has been mapped somewhere as uncached, then
* it must also be accessed uncached by the kernel or data
* corruption may occur.
*/
ptr = xlate_dev_mem_ptr(p);
if (!ptr)
return -EFAULT;
remaining = copy_to_user(buf, ptr, sz);
unxlate_dev_mem_ptr(p, ptr);
}
remaining = copy_to_user(buf, ptr, sz);
unxlate_dev_mem_ptr(p, ptr);
if (remaining)
return -EFAULT;
@ -181,30 +197,36 @@ static ssize_t write_mem(struct file *file, const char __user *buf,
#endif
while (count > 0) {
int allowed;
sz = size_inside_page(p, count);
if (!range_is_allowed(p >> PAGE_SHIFT, sz))
allowed = page_is_allowed(p >> PAGE_SHIFT);
if (!allowed)
return -EPERM;
/*
* On ia64 if a page has been mapped somewhere as uncached, then
* it must also be accessed uncached by the kernel or data
* corruption may occur.
*/
ptr = xlate_dev_mem_ptr(p);
if (!ptr) {
if (written)
break;
return -EFAULT;
}
/* Skip actual writing when a page is marked as restricted. */
if (allowed == 1) {
/*
* On ia64 if a page has been mapped somewhere as
* uncached, then it must also be accessed uncached
* by the kernel or data corruption may occur.
*/
ptr = xlate_dev_mem_ptr(p);
if (!ptr) {
if (written)
break;
return -EFAULT;
}
copied = copy_from_user(ptr, buf, sz);
unxlate_dev_mem_ptr(p, ptr);
if (copied) {
written += sz - copied;
if (written)
break;
return -EFAULT;
copied = copy_from_user(ptr, buf, sz);
unxlate_dev_mem_ptr(p, ptr);
if (copied) {
written += sz - copied;
if (written)
break;
return -EFAULT;
}
}
buf += sz;

Просмотреть файл

@ -2202,14 +2202,16 @@ static int virtcons_freeze(struct virtio_device *vdev)
vdev->config->reset(vdev);
virtqueue_disable_cb(portdev->c_ivq);
if (use_multiport(portdev))
virtqueue_disable_cb(portdev->c_ivq);
cancel_work_sync(&portdev->control_work);
cancel_work_sync(&portdev->config_work);
/*
* Once more: if control_work_handler() was running, it would
* enable the cb as the last step.
*/
virtqueue_disable_cb(portdev->c_ivq);
if (use_multiport(portdev))
virtqueue_disable_cb(portdev->c_ivq);
remove_controlq_data(portdev);
list_for_each_entry(port, &portdev->ports, list) {

Просмотреть файл

@ -2398,6 +2398,20 @@ EXPORT_SYMBOL_GPL(cpufreq_boost_enabled);
*********************************************************************/
static enum cpuhp_state hp_online;
static int cpuhp_cpufreq_online(unsigned int cpu)
{
cpufreq_online(cpu);
return 0;
}
static int cpuhp_cpufreq_offline(unsigned int cpu)
{
cpufreq_offline(cpu);
return 0;
}
/**
* cpufreq_register_driver - register a CPU Frequency driver
* @driver_data: A struct cpufreq_driver containing the values#
@ -2460,8 +2474,8 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
}
ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "cpufreq:online",
cpufreq_online,
cpufreq_offline);
cpuhp_cpufreq_online,
cpuhp_cpufreq_offline);
if (ret < 0)
goto err_if_unreg;
hp_online = ret;

Просмотреть файл

@ -506,7 +506,7 @@ static int caam_rsa_init_tfm(struct crypto_akcipher *tfm)
ctx->dev = caam_jr_alloc();
if (IS_ERR(ctx->dev)) {
dev_err(ctx->dev, "Job Ring Device allocation for transform failed\n");
pr_err("Job Ring Device allocation for transform failed\n");
return PTR_ERR(ctx->dev);
}

Просмотреть файл

@ -281,7 +281,8 @@ static int deinstantiate_rng(struct device *ctrldev, int state_handle_mask)
/* Try to run it through DECO0 */
ret = run_descriptor_deco0(ctrldev, desc, &status);
if (ret || status) {
if (ret ||
(status && status != JRSTA_SSRC_JUMP_HALT_CC)) {
dev_err(ctrldev,
"Failed to deinstantiate RNG4 SH%d\n",
sh_idx);
@ -301,15 +302,13 @@ static int caam_remove(struct platform_device *pdev)
struct device *ctrldev;
struct caam_drv_private *ctrlpriv;
struct caam_ctrl __iomem *ctrl;
int ring;
ctrldev = &pdev->dev;
ctrlpriv = dev_get_drvdata(ctrldev);
ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl;
/* Remove platform devices for JobRs */
for (ring = 0; ring < ctrlpriv->total_jobrs; ring++)
of_device_unregister(ctrlpriv->jrpdev[ring]);
/* Remove platform devices under the crypto node */
of_platform_depopulate(ctrldev);
/* De-initialize RNG state handles initialized by this driver. */
if (ctrlpriv->rng4_sh_init)
@ -418,10 +417,21 @@ DEFINE_SIMPLE_ATTRIBUTE(caam_fops_u32_ro, caam_debugfs_u32_get, NULL, "%llu\n");
DEFINE_SIMPLE_ATTRIBUTE(caam_fops_u64_ro, caam_debugfs_u64_get, NULL, "%llu\n");
#endif
static const struct of_device_id caam_match[] = {
{
.compatible = "fsl,sec-v4.0",
},
{
.compatible = "fsl,sec4.0",
},
{},
};
MODULE_DEVICE_TABLE(of, caam_match);
/* Probe routine for CAAM top (controller) level */
static int caam_probe(struct platform_device *pdev)
{
int ret, ring, ridx, rspec, gen_sk, ent_delay = RTSDCTL_ENT_DLY_MIN;
int ret, ring, gen_sk, ent_delay = RTSDCTL_ENT_DLY_MIN;
u64 caam_id;
struct device *dev;
struct device_node *nprop, *np;
@ -597,47 +607,24 @@ static int caam_probe(struct platform_device *pdev)
goto iounmap_ctrl;
}
/*
* Detect and enable JobRs
* First, find out how many ring spec'ed, allocate references
* for all, then go probe each one.
*/
rspec = 0;
for_each_available_child_of_node(nprop, np)
if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
of_device_is_compatible(np, "fsl,sec4.0-job-ring"))
rspec++;
ctrlpriv->jrpdev = devm_kcalloc(&pdev->dev, rspec,
sizeof(*ctrlpriv->jrpdev), GFP_KERNEL);
if (ctrlpriv->jrpdev == NULL) {
ret = -ENOMEM;
ret = of_platform_populate(nprop, caam_match, NULL, dev);
if (ret) {
dev_err(dev, "JR platform devices creation error\n");
goto iounmap_ctrl;
}
ring = 0;
ridx = 0;
ctrlpriv->total_jobrs = 0;
for_each_available_child_of_node(nprop, np)
if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
of_device_is_compatible(np, "fsl,sec4.0-job-ring")) {
ctrlpriv->jrpdev[ring] =
of_platform_device_create(np, NULL, dev);
if (!ctrlpriv->jrpdev[ring]) {
pr_warn("JR physical index %d: Platform device creation error\n",
ridx);
ridx++;
continue;
}
ctrlpriv->jr[ring] = (struct caam_job_ring __iomem __force *)
((__force uint8_t *)ctrl +
(ridx + JR_BLOCK_NUMBER) *
(ring + JR_BLOCK_NUMBER) *
BLOCK_OFFSET
);
ctrlpriv->total_jobrs++;
ring++;
ridx++;
}
}
/* Check to see if QI present. If so, enable */
ctrlpriv->qi_present =
@ -847,17 +834,6 @@ disable_caam_ipg:
return ret;
}
static struct of_device_id caam_match[] = {
{
.compatible = "fsl,sec-v4.0",
},
{
.compatible = "fsl,sec4.0",
},
{},
};
MODULE_DEVICE_TABLE(of, caam_match);
static struct platform_driver caam_driver = {
.driver = {
.name = "caam",

Просмотреть файл

@ -66,7 +66,6 @@ struct caam_drv_private_jr {
struct caam_drv_private {
struct device *dev;
struct platform_device **jrpdev; /* Alloc'ed array per sub-device */
struct platform_device *pdev;
/* Physical-presence section */

Просмотреть файл

@ -2,6 +2,7 @@ menuconfig DEV_DAX
tristate "DAX: direct access to differentiated memory"
default m if NVDIMM_DAX
depends on TRANSPARENT_HUGEPAGE
select SRCU
help
Support raw access to differentiated (persistence, bandwidth,
latency...) memory via an mmap(2) capable character

Просмотреть файл

@ -25,6 +25,7 @@
#include "dax.h"
static dev_t dax_devt;
DEFINE_STATIC_SRCU(dax_srcu);
static struct class *dax_class;
static DEFINE_IDA(dax_minor_ida);
static int nr_dax = CONFIG_NR_DEV_DAX;
@ -60,7 +61,7 @@ struct dax_region {
* @region - parent region
* @dev - device backing the character device
* @cdev - core chardev data
* @alive - !alive + rcu grace period == no new mappings can be established
* @alive - !alive + srcu grace period == no new mappings can be established
* @id - child id in the region
* @num_resources - number of physical address extents in this device
* @res - array of physical address ranges
@ -569,7 +570,7 @@ static int __dax_dev_pud_fault(struct dax_dev *dax_dev, struct vm_fault *vmf)
static int dax_dev_huge_fault(struct vm_fault *vmf,
enum page_entry_size pe_size)
{
int rc;
int rc, id;
struct file *filp = vmf->vma->vm_file;
struct dax_dev *dax_dev = filp->private_data;
@ -578,7 +579,7 @@ static int dax_dev_huge_fault(struct vm_fault *vmf,
? "write" : "read",
vmf->vma->vm_start, vmf->vma->vm_end);
rcu_read_lock();
id = srcu_read_lock(&dax_srcu);
switch (pe_size) {
case PE_SIZE_PTE:
rc = __dax_dev_pte_fault(dax_dev, vmf);
@ -592,7 +593,7 @@ static int dax_dev_huge_fault(struct vm_fault *vmf,
default:
return VM_FAULT_FALLBACK;
}
rcu_read_unlock();
srcu_read_unlock(&dax_srcu, id);
return rc;
}
@ -713,11 +714,11 @@ static void unregister_dax_dev(void *dev)
* Note, rcu is not protecting the liveness of dax_dev, rcu is
* ensuring that any fault handlers that might have seen
* dax_dev->alive == true, have completed. Any fault handlers
* that start after synchronize_rcu() has started will abort
* that start after synchronize_srcu() has started will abort
* upon seeing dax_dev->alive == false.
*/
dax_dev->alive = false;
synchronize_rcu();
synchronize_srcu(&dax_srcu);
unmap_mapping_range(dax_dev->inode->i_mapping, 0, 0, 1);
cdev_del(cdev);
device_unregister(dev);

Просмотреть файл

@ -149,7 +149,8 @@ setup_gop32(efi_system_table_t *sys_table_arg, struct screen_info *si,
status = __gop_query32(sys_table_arg, gop32, &info, &size,
&current_fb_base);
if (status == EFI_SUCCESS && (!first_gop || conout_found)) {
if (status == EFI_SUCCESS && (!first_gop || conout_found) &&
info->pixel_format != PIXEL_BLT_ONLY) {
/*
* Systems that use the UEFI Console Splitter may
* provide multiple GOP devices, not all of which are
@ -266,7 +267,8 @@ setup_gop64(efi_system_table_t *sys_table_arg, struct screen_info *si,
status = __gop_query64(sys_table_arg, gop64, &info, &size,
&current_fb_base);
if (status == EFI_SUCCESS && (!first_gop || conout_found)) {
if (status == EFI_SUCCESS && (!first_gop || conout_found) &&
info->pixel_format != PIXEL_BLT_ONLY) {
/*
* Systems that use the UEFI Console Splitter may
* provide multiple GOP devices, not all of which are

Просмотреть файл

@ -1333,7 +1333,7 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
if (!fence) {
event_free(gpu, event);
ret = -ENOMEM;
goto out_pm_put;
goto out_unlock;
}
gpu->event[event].fence = fence;
@ -1373,6 +1373,7 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
hangcheck_timer_reset(gpu);
ret = 0;
out_unlock:
mutex_unlock(&gpu->lock);
out_pm_put:

Просмотреть файл

@ -285,9 +285,6 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
{
int ret;
if (vgpu->failsafe)
return 0;
if (WARN_ON(bytes > 4))
return -EINVAL;

Просмотреть файл

@ -776,7 +776,8 @@ static void init_vgpu_execlist(struct intel_vgpu *vgpu, int ring_id)
_EL_OFFSET_STATUS_PTR);
ctx_status_ptr.dw = vgpu_vreg(vgpu, ctx_status_ptr_reg);
ctx_status_ptr.read_ptr = ctx_status_ptr.write_ptr = 0x7;
ctx_status_ptr.read_ptr = 0;
ctx_status_ptr.write_ptr = 0x7;
vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw;
}

Просмотреть файл

@ -75,11 +75,11 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt)
struct gvt_firmware_header *h;
void *firmware;
void *p;
unsigned long size;
unsigned long size, crc32_start;
int i;
int ret;
size = sizeof(*h) + info->mmio_size + info->cfg_space_size - 1;
size = sizeof(*h) + info->mmio_size + info->cfg_space_size;
firmware = vzalloc(size);
if (!firmware)
return -ENOMEM;
@ -112,6 +112,9 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt)
memcpy(gvt->firmware.mmio, p, info->mmio_size);
crc32_start = offsetof(struct gvt_firmware_header, crc32) + 4;
h->crc32 = crc32_le(0, firmware + crc32_start, size - crc32_start);
firmware_attr.size = size;
firmware_attr.private = firmware;
@ -234,7 +237,7 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt)
firmware->mmio = mem;
sprintf(path, "%s/vid_0x%04x_did_0x%04x_rid_0x%04x.golden_hw_state",
sprintf(path, "%s/vid_0x%04x_did_0x%04x_rid_0x%02x.golden_hw_state",
GVT_FIRMWARE_PATH, pdev->vendor, pdev->device,
pdev->revision);

Просмотреть файл

@ -52,6 +52,8 @@ static const struct intel_gvt_ops intel_gvt_ops = {
.vgpu_create = intel_gvt_create_vgpu,
.vgpu_destroy = intel_gvt_destroy_vgpu,
.vgpu_reset = intel_gvt_reset_vgpu,
.vgpu_activate = intel_gvt_activate_vgpu,
.vgpu_deactivate = intel_gvt_deactivate_vgpu,
};
/**

Просмотреть файл

@ -395,7 +395,8 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu);
void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
unsigned int engine_mask);
void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu);
void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu);
void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu);
/* validating GM functions */
#define vgpu_gmadr_is_aperture(vgpu, gmadr) \
@ -462,6 +463,8 @@ struct intel_gvt_ops {
struct intel_vgpu_type *);
void (*vgpu_destroy)(struct intel_vgpu *);
void (*vgpu_reset)(struct intel_vgpu *);
void (*vgpu_activate)(struct intel_vgpu *);
void (*vgpu_deactivate)(struct intel_vgpu *);
};

Просмотреть файл

@ -546,6 +546,8 @@ static int intel_vgpu_open(struct mdev_device *mdev)
if (ret)
goto undo_group;
intel_gvt_ops->vgpu_activate(vgpu);
atomic_set(&vgpu->vdev.released, 0);
return ret;
@ -571,6 +573,8 @@ static void __intel_vgpu_release(struct intel_vgpu *vgpu)
if (atomic_cmpxchg(&vgpu->vdev.released, 0, 1))
return;
intel_gvt_ops->vgpu_deactivate(vgpu);
ret = vfio_unregister_notifier(mdev_dev(vgpu->vdev.mdev), VFIO_IOMMU_NOTIFY,
&vgpu->vdev.iommu_notifier);
WARN(ret, "vfio_unregister_notifier for iommu failed: %d\n", ret);

Просмотреть файл

@ -195,6 +195,47 @@ static void intel_gvt_update_vgpu_types(struct intel_gvt *gvt)
}
}
/**
* intel_gvt_active_vgpu - activate a virtual GPU
* @vgpu: virtual GPU
*
* This function is called when user wants to activate a virtual GPU.
*
*/
void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu)
{
mutex_lock(&vgpu->gvt->lock);
vgpu->active = true;
mutex_unlock(&vgpu->gvt->lock);
}
/**
* intel_gvt_deactive_vgpu - deactivate a virtual GPU
* @vgpu: virtual GPU
*
* This function is called when user wants to deactivate a virtual GPU.
* All virtual GPU runtime information will be destroyed.
*
*/
void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu)
{
struct intel_gvt *gvt = vgpu->gvt;
mutex_lock(&gvt->lock);
vgpu->active = false;
if (atomic_read(&vgpu->running_workload_num)) {
mutex_unlock(&gvt->lock);
intel_gvt_wait_vgpu_idle(vgpu);
mutex_lock(&gvt->lock);
}
intel_vgpu_stop_schedule(vgpu);
mutex_unlock(&gvt->lock);
}
/**
* intel_gvt_destroy_vgpu - destroy a virtual GPU
* @vgpu: virtual GPU
@ -208,16 +249,9 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
mutex_lock(&gvt->lock);
vgpu->active = false;
WARN(vgpu->active, "vGPU is still active!\n");
idr_remove(&gvt->vgpu_idr, vgpu->id);
if (atomic_read(&vgpu->running_workload_num)) {
mutex_unlock(&gvt->lock);
intel_gvt_wait_vgpu_idle(vgpu);
mutex_lock(&gvt->lock);
}
intel_vgpu_stop_schedule(vgpu);
intel_vgpu_clean_sched_policy(vgpu);
intel_vgpu_clean_gvt_context(vgpu);
intel_vgpu_clean_execlist(vgpu);
@ -349,7 +383,6 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
if (ret)
goto out_clean_shadow_ctx;
vgpu->active = true;
mutex_unlock(&gvt->lock);
return vgpu;

Просмотреть файл

@ -1469,8 +1469,6 @@ static int i915_drm_suspend(struct drm_device *dev)
goto out;
}
intel_guc_suspend(dev_priv);
intel_display_suspend(dev);
intel_dp_mst_suspend(dev);

Просмотреть файл

@ -4456,6 +4456,8 @@ int i915_gem_suspend(struct drm_i915_private *dev_priv)
i915_gem_context_lost(dev_priv);
mutex_unlock(&dev->struct_mutex);
intel_guc_suspend(dev_priv);
cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
cancel_delayed_work_sync(&dev_priv->gt.retire_work);

Просмотреть файл

@ -53,6 +53,17 @@ static bool i915_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
BUG();
}
static void i915_gem_shrinker_unlock(struct drm_device *dev, bool unlock)
{
if (!unlock)
return;
mutex_unlock(&dev->struct_mutex);
/* expedite the RCU grace period to free some request slabs */
synchronize_rcu_expedited();
}
static bool any_vma_pinned(struct drm_i915_gem_object *obj)
{
struct i915_vma *vma;
@ -232,11 +243,8 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
intel_runtime_pm_put(dev_priv);
i915_gem_retire_requests(dev_priv);
if (unlock)
mutex_unlock(&dev_priv->drm.struct_mutex);
/* expedite the RCU grace period to free some request slabs */
synchronize_rcu_expedited();
i915_gem_shrinker_unlock(&dev_priv->drm, unlock);
return count;
}
@ -296,8 +304,7 @@ i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
count += obj->base.size >> PAGE_SHIFT;
}
if (unlock)
mutex_unlock(&dev->struct_mutex);
i915_gem_shrinker_unlock(dev, unlock);
return count;
}
@ -324,8 +331,8 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
sc->nr_to_scan - freed,
I915_SHRINK_BOUND |
I915_SHRINK_UNBOUND);
if (unlock)
mutex_unlock(&dev->struct_mutex);
i915_gem_shrinker_unlock(dev, unlock);
return freed;
}
@ -367,8 +374,7 @@ i915_gem_shrinker_unlock_uninterruptible(struct drm_i915_private *dev_priv,
struct shrinker_lock_uninterruptible *slu)
{
dev_priv->mm.interruptible = slu->was_interruptible;
if (slu->unlock)
mutex_unlock(&dev_priv->drm.struct_mutex);
i915_gem_shrinker_unlock(&dev_priv->drm, slu->unlock);
}
static int

Просмотреть файл

@ -14,6 +14,7 @@
#include <linux/slab.h>
#include <linux/fb.h>
#include <linux/prefetch.h>
#include <asm/unaligned.h>
#include <drm/drmP.h>
#include "udl_drv.h"
@ -163,7 +164,7 @@ static void udl_compress_hline16(
const u8 *const start = pixel;
const uint16_t repeating_pixel_val16 = pixel_val16;
*(uint16_t *)cmd = cpu_to_be16(pixel_val16);
put_unaligned_be16(pixel_val16, cmd);
cmd += 2;
pixel += bpp;

Просмотреть файл

@ -819,8 +819,7 @@ static int hid_scan_report(struct hid_device *hid)
hid->group = HID_GROUP_WACOM;
break;
case USB_VENDOR_ID_SYNAPTICS:
if (hid->group == HID_GROUP_GENERIC ||
hid->group == HID_GROUP_MULTITOUCH_WIN_8)
if (hid->group == HID_GROUP_GENERIC)
if ((parser->scan_flags & HID_SCAN_FLAG_VENDOR_SPECIFIC)
&& (parser->scan_flags & HID_SCAN_FLAG_GD_POINTER))
/*
@ -2096,6 +2095,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UGEE_TABLET_45) },
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_DRAWIMAGE_G3) },
{ HID_USB_DEVICE(USB_VENDOR_ID_UGTIZER, USB_DEVICE_ID_UGTIZER_TABLET_GP0610) },
{ HID_USB_DEVICE(USB_VENDOR_ID_UGEE, USB_DEVICE_ID_UGEE_TABLET_EX07S) },
{ HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SMARTJOY_PLUS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SUPER_JOY_BOX_3) },
{ HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_DUAL_USB_JOYPAD) },

Просмотреть файл

@ -1028,6 +1028,9 @@
#define USB_DEVICE_ID_UGEE_TABLET_45 0x0045
#define USB_DEVICE_ID_YIYNOVA_TABLET 0x004d
#define USB_VENDOR_ID_UGEE 0x28bd
#define USB_DEVICE_ID_UGEE_TABLET_EX07S 0x0071
#define USB_VENDOR_ID_UNITEC 0x227d
#define USB_DEVICE_ID_UNITEC_USB_TOUCH_0709 0x0709
#define USB_DEVICE_ID_UNITEC_USB_TOUCH_0A19 0x0a19

Просмотреть файл

@ -977,6 +977,7 @@ static int uclogic_probe(struct hid_device *hdev,
}
break;
case USB_DEVICE_ID_UGTIZER_TABLET_GP0610:
case USB_DEVICE_ID_UGEE_TABLET_EX07S:
/* If this is the pen interface */
if (intf->cur_altsetting->desc.bInterfaceNumber == 1) {
rc = uclogic_tablet_enable(hdev);
@ -1069,6 +1070,7 @@ static const struct hid_device_id uclogic_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UGEE_TABLET_45) },
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_DRAWIMAGE_G3) },
{ HID_USB_DEVICE(USB_VENDOR_ID_UGTIZER, USB_DEVICE_ID_UGTIZER_TABLET_GP0610) },
{ HID_USB_DEVICE(USB_VENDOR_ID_UGEE, USB_DEVICE_ID_UGEE_TABLET_EX07S) },
{ }
};
MODULE_DEVICE_TABLE(hid, uclogic_devices);

Просмотреть файл

@ -817,6 +817,7 @@ isert_post_recvm(struct isert_conn *isert_conn, u32 count)
rx_wr->sg_list = &rx_desc->rx_sg;
rx_wr->num_sge = 1;
rx_wr->next = rx_wr + 1;
rx_desc->in_use = false;
}
rx_wr--;
rx_wr->next = NULL; /* mark end of work requests list */
@ -835,6 +836,15 @@ isert_post_recv(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc)
struct ib_recv_wr *rx_wr_failed, rx_wr;
int ret;
if (!rx_desc->in_use) {
/*
* if the descriptor is not in-use we already reposted it
* for recv, so just silently return
*/
return 0;
}
rx_desc->in_use = false;
rx_wr.wr_cqe = &rx_desc->rx_cqe;
rx_wr.sg_list = &rx_desc->rx_sg;
rx_wr.num_sge = 1;
@ -1397,6 +1407,8 @@ isert_recv_done(struct ib_cq *cq, struct ib_wc *wc)
return;
}
rx_desc->in_use = true;
ib_dma_sync_single_for_cpu(ib_dev, rx_desc->dma_addr,
ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
@ -1659,10 +1671,23 @@ isert_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc)
ret = isert_check_pi_status(cmd, isert_cmd->rw.sig->sig_mr);
isert_rdma_rw_ctx_destroy(isert_cmd, isert_conn);
if (ret)
transport_send_check_condition_and_sense(cmd, cmd->pi_err, 0);
else
isert_put_response(isert_conn->conn, isert_cmd->iscsi_cmd);
if (ret) {
/*
* transport_generic_request_failure() expects to have
* plus two references to handle queue-full, so re-add
* one here as target-core will have already dropped
* it after the first isert_put_datain() callback.
*/
kref_get(&cmd->cmd_kref);
transport_generic_request_failure(cmd, cmd->pi_err);
} else {
/*
* XXX: isert_put_response() failure is not retried.
*/
ret = isert_put_response(isert_conn->conn, isert_cmd->iscsi_cmd);
if (ret)
pr_warn_ratelimited("isert_put_response() ret: %d\n", ret);
}
}
static void
@ -1699,13 +1724,15 @@ isert_rdma_read_done(struct ib_cq *cq, struct ib_wc *wc)
cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
spin_unlock_bh(&cmd->istate_lock);
if (ret) {
target_put_sess_cmd(se_cmd);
transport_send_check_condition_and_sense(se_cmd,
se_cmd->pi_err, 0);
} else {
/*
* transport_generic_request_failure() will drop the extra
* se_cmd->cmd_kref reference after T10-PI error, and handle
* any non-zero ->queue_status() callback error retries.
*/
if (ret)
transport_generic_request_failure(se_cmd, se_cmd->pi_err);
else
target_execute_cmd(se_cmd);
}
}
static void
@ -2171,26 +2198,28 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
chain_wr = &isert_cmd->tx_desc.send_wr;
}
isert_rdma_rw_ctx_post(isert_cmd, isert_conn, cqe, chain_wr);
isert_dbg("Cmd: %p posted RDMA_WRITE for iSER Data READ\n", isert_cmd);
return 1;
rc = isert_rdma_rw_ctx_post(isert_cmd, isert_conn, cqe, chain_wr);
isert_dbg("Cmd: %p posted RDMA_WRITE for iSER Data READ rc: %d\n",
isert_cmd, rc);
return rc;
}
static int
isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery)
{
struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
int ret;
isert_dbg("Cmd: %p RDMA_READ data_length: %u write_data_done: %u\n",
isert_cmd, cmd->se_cmd.data_length, cmd->write_data_done);
isert_cmd->tx_desc.tx_cqe.done = isert_rdma_read_done;
isert_rdma_rw_ctx_post(isert_cmd, conn->context,
&isert_cmd->tx_desc.tx_cqe, NULL);
ret = isert_rdma_rw_ctx_post(isert_cmd, conn->context,
&isert_cmd->tx_desc.tx_cqe, NULL);
isert_dbg("Cmd: %p posted RDMA_READ memory for ISER Data WRITE\n",
isert_cmd);
return 0;
isert_dbg("Cmd: %p posted RDMA_READ memory for ISER Data WRITE rc: %d\n",
isert_cmd, ret);
return ret;
}
static int

Просмотреть файл

@ -60,7 +60,7 @@
#define ISER_RX_PAD_SIZE (ISCSI_DEF_MAX_RECV_SEG_LEN + 4096 - \
(ISER_RX_PAYLOAD_SIZE + sizeof(u64) + sizeof(struct ib_sge) + \
sizeof(struct ib_cqe)))
sizeof(struct ib_cqe) + sizeof(bool)))
#define ISCSI_ISER_SG_TABLESIZE 256
@ -85,6 +85,7 @@ struct iser_rx_desc {
u64 dma_addr;
struct ib_sge rx_sg;
struct ib_cqe rx_cqe;
bool in_use;
char pad[ISER_RX_PAD_SIZE];
} __packed;

Просмотреть файл

@ -202,6 +202,7 @@ static const struct xpad_device {
{ 0x1430, 0x8888, "TX6500+ Dance Pad (first generation)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
{ 0x146b, 0x0601, "BigBen Interactive XBOX 360 Controller", 0, XTYPE_XBOX360 },
{ 0x1532, 0x0037, "Razer Sabertooth", 0, XTYPE_XBOX360 },
{ 0x1532, 0x0a03, "Razer Wildcat", 0, XTYPE_XBOXONE },
{ 0x15e4, 0x3f00, "Power A Mini Pro Elite", 0, XTYPE_XBOX360 },
{ 0x15e4, 0x3f0a, "Xbox Airflo wired controller", 0, XTYPE_XBOX360 },
{ 0x15e4, 0x3f10, "Batarang Xbox 360 controller", 0, XTYPE_XBOX360 },
@ -326,6 +327,7 @@ static struct usb_device_id xpad_table[] = {
XPAD_XBOX360_VENDOR(0x1430), /* RedOctane X-Box 360 controllers */
XPAD_XBOX360_VENDOR(0x146b), /* BigBen Interactive Controllers */
XPAD_XBOX360_VENDOR(0x1532), /* Razer Sabertooth */
XPAD_XBOXONE_VENDOR(0x1532), /* Razer Wildcat */
XPAD_XBOX360_VENDOR(0x15e4), /* Numark X-Box 360 controllers */
XPAD_XBOX360_VENDOR(0x162e), /* Joytech X-Box 360 controllers */
XPAD_XBOX360_VENDOR(0x1689), /* Razer Onza */

Просмотреть файл

@ -230,6 +230,8 @@ static int __init imx_gpcv2_irqchip_init(struct device_node *node,
return -ENOMEM;
}
raw_spin_lock_init(&cd->rlock);
cd->gpc_base = of_iomap(node, 0);
if (!cd->gpc_base) {
pr_err("fsl-gpcv2: unable to map gpc registers\n");

Просмотреть файл

@ -557,7 +557,7 @@ static int ifi_canfd_poll(struct napi_struct *napi, int quota)
int work_done = 0;
u32 stcmd = readl(priv->base + IFI_CANFD_STCMD);
u32 rxstcmd = readl(priv->base + IFI_CANFD_STCMD);
u32 rxstcmd = readl(priv->base + IFI_CANFD_RXSTCMD);
u32 errctr = readl(priv->base + IFI_CANFD_ERROR_CTR);
/* Handle bus state changes */

Просмотреть файл

@ -826,8 +826,7 @@ static int rcar_can_probe(struct platform_device *pdev)
devm_can_led_init(ndev);
dev_info(&pdev->dev, "device registered (regs @ %p, IRQ%d)\n",
priv->regs, ndev->irq);
dev_info(&pdev->dev, "device registered (IRQ%d)\n", ndev->irq);
return 0;
fail_candev:

Просмотреть файл

@ -990,7 +990,7 @@ static void team_port_disable(struct team *team,
#define TEAM_ENC_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \
NETIF_F_RXCSUM | NETIF_F_ALL_TSO)
static void ___team_compute_features(struct team *team)
static void __team_compute_features(struct team *team)
{
struct team_port *port;
u32 vlan_features = TEAM_VLAN_FEATURES & NETIF_F_ALL_FOR_ALL;
@ -1023,16 +1023,10 @@ static void ___team_compute_features(struct team *team)
team->dev->priv_flags |= IFF_XMIT_DST_RELEASE;
}
static void __team_compute_features(struct team *team)
{
___team_compute_features(team);
netdev_change_features(team->dev);
}
static void team_compute_features(struct team *team)
{
mutex_lock(&team->lock);
___team_compute_features(team);
__team_compute_features(team);
mutex_unlock(&team->lock);
netdev_change_features(team->dev);
}
@ -1641,6 +1635,7 @@ static void team_uninit(struct net_device *dev)
team_notify_peers_fini(team);
team_queue_override_fini(team);
mutex_unlock(&team->lock);
netdev_change_features(dev);
}
static void team_destructor(struct net_device *dev)
@ -1928,6 +1923,10 @@ static int team_add_slave(struct net_device *dev, struct net_device *port_dev)
mutex_lock(&team->lock);
err = team_port_add(team, port_dev);
mutex_unlock(&team->lock);
if (!err)
netdev_change_features(dev);
return err;
}
@ -1939,6 +1938,10 @@ static int team_del_slave(struct net_device *dev, struct net_device *port_dev)
mutex_lock(&team->lock);
err = team_port_del(team, port_dev);
mutex_unlock(&team->lock);
if (!err)
netdev_change_features(dev);
return err;
}

Просмотреть файл

@ -908,7 +908,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1040, 2)}, /* Telit LE922A */
{QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */
{QMI_FIXED_INTF(0x1bc7, 0x1201, 2)}, /* Telit LE920 */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1201, 2)}, /* Telit LE920, LE920A4 */
{QMI_FIXED_INTF(0x1c9e, 0x9b01, 3)}, /* XS Stick W100-2 from 4G Systems */
{QMI_FIXED_INTF(0x0b3c, 0xc000, 4)}, /* Olivetti Olicard 100 */
{QMI_FIXED_INTF(0x0b3c, 0xc001, 4)}, /* Olivetti Olicard 120 */

Просмотреть файл

@ -1929,7 +1929,7 @@ static int __usbnet_read_cmd(struct usbnet *dev, u8 cmd, u8 reqtype,
" value=0x%04x index=0x%04x size=%d\n",
cmd, reqtype, value, index, size);
if (data) {
if (size) {
buf = kmalloc(size, GFP_KERNEL);
if (!buf)
goto out;
@ -1938,8 +1938,13 @@ static int __usbnet_read_cmd(struct usbnet *dev, u8 cmd, u8 reqtype,
err = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
cmd, reqtype, value, index, buf, size,
USB_CTRL_GET_TIMEOUT);
if (err > 0 && err <= size)
memcpy(data, buf, err);
if (err > 0 && err <= size) {
if (data)
memcpy(data, buf, err);
else
netdev_dbg(dev->net,
"Huh? Data requested but thrown away.\n");
}
kfree(buf);
out:
return err;
@ -1960,7 +1965,13 @@ static int __usbnet_write_cmd(struct usbnet *dev, u8 cmd, u8 reqtype,
buf = kmemdup(data, size, GFP_KERNEL);
if (!buf)
goto out;
}
} else {
if (size) {
WARN_ON_ONCE(1);
err = -EINVAL;
goto out;
}
}
err = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
cmd, reqtype, value, index, buf, size,

Просмотреть файл

@ -2230,14 +2230,8 @@ static bool virtnet_validate_features(struct virtio_device *vdev)
#define MIN_MTU ETH_MIN_MTU
#define MAX_MTU ETH_MAX_MTU
static int virtnet_probe(struct virtio_device *vdev)
static int virtnet_validate(struct virtio_device *vdev)
{
int i, err;
struct net_device *dev;
struct virtnet_info *vi;
u16 max_queue_pairs;
int mtu;
if (!vdev->config->get) {
dev_err(&vdev->dev, "%s failure: config access disabled\n",
__func__);
@ -2247,6 +2241,25 @@ static int virtnet_probe(struct virtio_device *vdev)
if (!virtnet_validate_features(vdev))
return -EINVAL;
if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) {
int mtu = virtio_cread16(vdev,
offsetof(struct virtio_net_config,
mtu));
if (mtu < MIN_MTU)
__virtio_clear_bit(vdev, VIRTIO_NET_F_MTU);
}
return 0;
}
static int virtnet_probe(struct virtio_device *vdev)
{
int i, err;
struct net_device *dev;
struct virtnet_info *vi;
u16 max_queue_pairs;
int mtu;
/* Find if host supports multiqueue virtio_net device */
err = virtio_cread_feature(vdev, VIRTIO_NET_F_MQ,
struct virtio_net_config,
@ -2362,11 +2375,20 @@ static int virtnet_probe(struct virtio_device *vdev)
offsetof(struct virtio_net_config,
mtu));
if (mtu < dev->min_mtu) {
__virtio_clear_bit(vdev, VIRTIO_NET_F_MTU);
} else {
dev->mtu = mtu;
dev->max_mtu = mtu;
/* Should never trigger: MTU was previously validated
* in virtnet_validate.
*/
dev_err(&vdev->dev, "device MTU appears to have changed "
"it is now %d < %d", mtu, dev->min_mtu);
goto free_stats;
}
dev->mtu = mtu;
dev->max_mtu = mtu;
/* TODO: size buffers correctly in this case. */
if (dev->mtu > ETH_DATA_LEN)
vi->big_packets = true;
}
if (vi->any_header_sg)
@ -2544,6 +2566,7 @@ static struct virtio_driver virtio_net_driver = {
.driver.name = KBUILD_MODNAME,
.driver.owner = THIS_MODULE,
.id_table = id_table,
.validate = virtnet_validate,
.probe = virtnet_probe,
.remove = virtnet_remove,
.config_changed = virtnet_config_changed,

Просмотреть файл

@ -934,8 +934,14 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
rc = nd_desc->ndctl(nd_desc, nvdimm, cmd, buf, buf_len, NULL);
if (rc < 0)
goto out_unlock;
nvdimm_bus_unlock(&nvdimm_bus->dev);
if (copy_to_user(p, buf, buf_len))
rc = -EFAULT;
vfree(buf);
return rc;
out_unlock:
nvdimm_bus_unlock(&nvdimm_bus->dev);
out:

Просмотреть файл

@ -243,7 +243,15 @@ static int nsio_rw_bytes(struct nd_namespace_common *ndns,
}
if (unlikely(is_bad_pmem(&nsio->bb, sector, sz_align))) {
if (IS_ALIGNED(offset, 512) && IS_ALIGNED(size, 512)) {
/*
* FIXME: nsio_rw_bytes() may be called from atomic
* context in the btt case and nvdimm_clear_poison()
* takes a sleeping lock. Until the locking can be
* reworked this capability requires that the namespace
* is not claimed by btt.
*/
if (IS_ALIGNED(offset, 512) && IS_ALIGNED(size, 512)
&& (!ndns->claim || !is_nd_btt(ndns->claim))) {
long cleared;
cleared = nvdimm_clear_poison(&ndns->dev, offset, size);

Просмотреть файл

@ -395,7 +395,7 @@ EXPORT_SYMBOL_GPL(nvdimm_create);
int alias_dpa_busy(struct device *dev, void *data)
{
resource_size_t map_end, blk_start, new, busy;
resource_size_t map_end, blk_start, new;
struct blk_alloc_info *info = data;
struct nd_mapping *nd_mapping;
struct nd_region *nd_region;
@ -436,29 +436,19 @@ int alias_dpa_busy(struct device *dev, void *data)
retry:
/*
* Find the free dpa from the end of the last pmem allocation to
* the end of the interleave-set mapping that is not already
* covered by a blk allocation.
* the end of the interleave-set mapping.
*/
busy = 0;
for_each_dpa_resource(ndd, res) {
if (strncmp(res->name, "pmem", 4) != 0)
continue;
if ((res->start >= blk_start && res->start < map_end)
|| (res->end >= blk_start
&& res->end <= map_end)) {
if (strncmp(res->name, "pmem", 4) == 0) {
new = max(blk_start, min(map_end + 1,
res->end + 1));
if (new != blk_start) {
blk_start = new;
goto retry;
}
} else
busy += min(map_end, res->end)
- max(nd_mapping->start, res->start) + 1;
} else if (nd_mapping->start > res->start
&& map_end < res->end) {
/* total eclipse of the PMEM region mapping */
busy += nd_mapping->size;
break;
new = max(blk_start, min(map_end + 1, res->end + 1));
if (new != blk_start) {
blk_start = new;
goto retry;
}
}
}
@ -470,52 +460,11 @@ int alias_dpa_busy(struct device *dev, void *data)
return 1;
}
info->available -= blk_start - nd_mapping->start + busy;
info->available -= blk_start - nd_mapping->start;
return 0;
}
static int blk_dpa_busy(struct device *dev, void *data)
{
struct blk_alloc_info *info = data;
struct nd_mapping *nd_mapping;
struct nd_region *nd_region;
resource_size_t map_end;
int i;
if (!is_nd_pmem(dev))
return 0;
nd_region = to_nd_region(dev);
for (i = 0; i < nd_region->ndr_mappings; i++) {
nd_mapping = &nd_region->mapping[i];
if (nd_mapping->nvdimm == info->nd_mapping->nvdimm)
break;
}
if (i >= nd_region->ndr_mappings)
return 0;
map_end = nd_mapping->start + nd_mapping->size - 1;
if (info->res->start >= nd_mapping->start
&& info->res->start < map_end) {
if (info->res->end <= map_end) {
info->busy = 0;
return 1;
} else {
info->busy -= info->res->end - map_end;
return 0;
}
} else if (info->res->end >= nd_mapping->start
&& info->res->end <= map_end) {
info->busy -= nd_mapping->start - info->res->start;
return 0;
} else {
info->busy -= nd_mapping->size;
return 0;
}
}
/**
* nd_blk_available_dpa - account the unused dpa of BLK region
* @nd_mapping: container of dpa-resource-root + labels
@ -545,11 +494,7 @@ resource_size_t nd_blk_available_dpa(struct nd_region *nd_region)
for_each_dpa_resource(ndd, res) {
if (strncmp(res->name, "blk", 3) != 0)
continue;
info.res = res;
info.busy = resource_size(res);
device_for_each_child(&nvdimm_bus->dev, &info, blk_dpa_busy);
info.available -= info.busy;
info.available -= resource_size(res);
}
return info.available;

Просмотреть файл

@ -2023,7 +2023,7 @@ nvme_fc_configure_admin_queue(struct nvme_fc_ctrl *ctrl)
}
ctrl->ctrl.sqsize =
min_t(int, NVME_CAP_MQES(ctrl->cap) + 1, ctrl->ctrl.sqsize);
min_t(int, NVME_CAP_MQES(ctrl->cap), ctrl->ctrl.sqsize);
error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap);
if (error)

Просмотреть файл

@ -1606,7 +1606,7 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl)
}
ctrl->ctrl.sqsize =
min_t(int, NVME_CAP_MQES(ctrl->cap) + 1, ctrl->ctrl.sqsize);
min_t(int, NVME_CAP_MQES(ctrl->cap), ctrl->ctrl.sqsize);
error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap);
if (error)

Просмотреть файл

@ -392,7 +392,7 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
}
ctrl->ctrl.sqsize =
min_t(int, NVME_CAP_MQES(ctrl->cap) + 1, ctrl->ctrl.sqsize);
min_t(int, NVME_CAP_MQES(ctrl->cap), ctrl->ctrl.sqsize);
error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap);
if (error)

Просмотреть файл

@ -13,6 +13,7 @@
* published by the Free Software Foundation.
*/
#include <linux/dmi.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
@ -1524,10 +1525,31 @@ static void chv_gpio_irq_handler(struct irq_desc *desc)
chained_irq_exit(chip, desc);
}
/*
* Certain machines seem to hardcode Linux IRQ numbers in their ACPI
* tables. Since we leave GPIOs that are not capable of generating
* interrupts out of the irqdomain the numbering will be different and
* cause devices using the hardcoded IRQ numbers fail. In order not to
* break such machines we will only mask pins from irqdomain if the machine
* is not listed below.
*/
static const struct dmi_system_id chv_no_valid_mask[] = {
{
/* See https://bugzilla.kernel.org/show_bug.cgi?id=194945 */
.ident = "Acer Chromebook (CYAN)",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
DMI_MATCH(DMI_PRODUCT_NAME, "Edgar"),
DMI_MATCH(DMI_BIOS_DATE, "05/21/2016"),
},
}
};
static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq)
{
const struct chv_gpio_pinrange *range;
struct gpio_chip *chip = &pctrl->chip;
bool need_valid_mask = !dmi_check_system(chv_no_valid_mask);
int ret, i, offset;
*chip = chv_gpio_chip;
@ -1536,7 +1558,7 @@ static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq)
chip->label = dev_name(pctrl->dev);
chip->parent = pctrl->dev;
chip->base = -1;
chip->irq_need_valid_mask = true;
chip->irq_need_valid_mask = need_valid_mask;
ret = devm_gpiochip_add_data(pctrl->dev, chip, pctrl);
if (ret) {
@ -1567,7 +1589,7 @@ static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq)
intsel &= CHV_PADCTRL0_INTSEL_MASK;
intsel >>= CHV_PADCTRL0_INTSEL_SHIFT;
if (intsel >= pctrl->community->nirqs)
if (need_valid_mask && intsel >= pctrl->community->nirqs)
clear_bit(i, chip->irq_valid_mask);
}

Просмотреть файл

@ -1468,82 +1468,82 @@ const struct samsung_pin_ctrl exynos5420_pin_ctrl[] __initconst = {
/* pin banks of exynos5433 pin-controller - ALIVE */
static const struct samsung_pin_bank_data exynos5433_pin_banks0[] __initconst = {
EXYNOS_PIN_BANK_EINTW(8, 0x000, "gpa0", 0x00),
EXYNOS_PIN_BANK_EINTW(8, 0x020, "gpa1", 0x04),
EXYNOS_PIN_BANK_EINTW(8, 0x040, "gpa2", 0x08),
EXYNOS_PIN_BANK_EINTW(8, 0x060, "gpa3", 0x0c),
EXYNOS_PIN_BANK_EINTW_EXT(8, 0x020, "gpf1", 0x1004, 1),
EXYNOS_PIN_BANK_EINTW_EXT(4, 0x040, "gpf2", 0x1008, 1),
EXYNOS_PIN_BANK_EINTW_EXT(4, 0x060, "gpf3", 0x100c, 1),
EXYNOS_PIN_BANK_EINTW_EXT(8, 0x080, "gpf4", 0x1010, 1),
EXYNOS_PIN_BANK_EINTW_EXT(8, 0x0a0, "gpf5", 0x1014, 1),
EXYNOS5433_PIN_BANK_EINTW(8, 0x000, "gpa0", 0x00),
EXYNOS5433_PIN_BANK_EINTW(8, 0x020, "gpa1", 0x04),
EXYNOS5433_PIN_BANK_EINTW(8, 0x040, "gpa2", 0x08),
EXYNOS5433_PIN_BANK_EINTW(8, 0x060, "gpa3", 0x0c),
EXYNOS5433_PIN_BANK_EINTW_EXT(8, 0x020, "gpf1", 0x1004, 1),
EXYNOS5433_PIN_BANK_EINTW_EXT(4, 0x040, "gpf2", 0x1008, 1),
EXYNOS5433_PIN_BANK_EINTW_EXT(4, 0x060, "gpf3", 0x100c, 1),
EXYNOS5433_PIN_BANK_EINTW_EXT(8, 0x080, "gpf4", 0x1010, 1),
EXYNOS5433_PIN_BANK_EINTW_EXT(8, 0x0a0, "gpf5", 0x1014, 1),
};
/* pin banks of exynos5433 pin-controller - AUD */
static const struct samsung_pin_bank_data exynos5433_pin_banks1[] __initconst = {
EXYNOS_PIN_BANK_EINTG(7, 0x000, "gpz0", 0x00),
EXYNOS_PIN_BANK_EINTG(4, 0x020, "gpz1", 0x04),
EXYNOS5433_PIN_BANK_EINTG(7, 0x000, "gpz0", 0x00),
EXYNOS5433_PIN_BANK_EINTG(4, 0x020, "gpz1", 0x04),
};
/* pin banks of exynos5433 pin-controller - CPIF */
static const struct samsung_pin_bank_data exynos5433_pin_banks2[] __initconst = {
EXYNOS_PIN_BANK_EINTG(2, 0x000, "gpv6", 0x00),
EXYNOS5433_PIN_BANK_EINTG(2, 0x000, "gpv6", 0x00),
};
/* pin banks of exynos5433 pin-controller - eSE */
static const struct samsung_pin_bank_data exynos5433_pin_banks3[] __initconst = {
EXYNOS_PIN_BANK_EINTG(3, 0x000, "gpj2", 0x00),
EXYNOS5433_PIN_BANK_EINTG(3, 0x000, "gpj2", 0x00),
};
/* pin banks of exynos5433 pin-controller - FINGER */
static const struct samsung_pin_bank_data exynos5433_pin_banks4[] __initconst = {
EXYNOS_PIN_BANK_EINTG(4, 0x000, "gpd5", 0x00),
EXYNOS5433_PIN_BANK_EINTG(4, 0x000, "gpd5", 0x00),
};
/* pin banks of exynos5433 pin-controller - FSYS */
static const struct samsung_pin_bank_data exynos5433_pin_banks5[] __initconst = {
EXYNOS_PIN_BANK_EINTG(6, 0x000, "gph1", 0x00),
EXYNOS_PIN_BANK_EINTG(7, 0x020, "gpr4", 0x04),
EXYNOS_PIN_BANK_EINTG(5, 0x040, "gpr0", 0x08),
EXYNOS_PIN_BANK_EINTG(8, 0x060, "gpr1", 0x0c),
EXYNOS_PIN_BANK_EINTG(2, 0x080, "gpr2", 0x10),
EXYNOS_PIN_BANK_EINTG(8, 0x0a0, "gpr3", 0x14),
EXYNOS5433_PIN_BANK_EINTG(6, 0x000, "gph1", 0x00),
EXYNOS5433_PIN_BANK_EINTG(7, 0x020, "gpr4", 0x04),
EXYNOS5433_PIN_BANK_EINTG(5, 0x040, "gpr0", 0x08),
EXYNOS5433_PIN_BANK_EINTG(8, 0x060, "gpr1", 0x0c),
EXYNOS5433_PIN_BANK_EINTG(2, 0x080, "gpr2", 0x10),
EXYNOS5433_PIN_BANK_EINTG(8, 0x0a0, "gpr3", 0x14),
};
/* pin banks of exynos5433 pin-controller - IMEM */
static const struct samsung_pin_bank_data exynos5433_pin_banks6[] __initconst = {
EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpf0", 0x00),
EXYNOS5433_PIN_BANK_EINTG(8, 0x000, "gpf0", 0x00),
};
/* pin banks of exynos5433 pin-controller - NFC */
static const struct samsung_pin_bank_data exynos5433_pin_banks7[] __initconst = {
EXYNOS_PIN_BANK_EINTG(3, 0x000, "gpj0", 0x00),
EXYNOS5433_PIN_BANK_EINTG(3, 0x000, "gpj0", 0x00),
};
/* pin banks of exynos5433 pin-controller - PERIC */
static const struct samsung_pin_bank_data exynos5433_pin_banks8[] __initconst = {
EXYNOS_PIN_BANK_EINTG(6, 0x000, "gpv7", 0x00),
EXYNOS_PIN_BANK_EINTG(5, 0x020, "gpb0", 0x04),
EXYNOS_PIN_BANK_EINTG(8, 0x040, "gpc0", 0x08),
EXYNOS_PIN_BANK_EINTG(2, 0x060, "gpc1", 0x0c),
EXYNOS_PIN_BANK_EINTG(6, 0x080, "gpc2", 0x10),
EXYNOS_PIN_BANK_EINTG(8, 0x0a0, "gpc3", 0x14),
EXYNOS_PIN_BANK_EINTG(2, 0x0c0, "gpg0", 0x18),
EXYNOS_PIN_BANK_EINTG(4, 0x0e0, "gpd0", 0x1c),
EXYNOS_PIN_BANK_EINTG(6, 0x100, "gpd1", 0x20),
EXYNOS_PIN_BANK_EINTG(8, 0x120, "gpd2", 0x24),
EXYNOS_PIN_BANK_EINTG(5, 0x140, "gpd4", 0x28),
EXYNOS_PIN_BANK_EINTG(2, 0x160, "gpd8", 0x2c),
EXYNOS_PIN_BANK_EINTG(7, 0x180, "gpd6", 0x30),
EXYNOS_PIN_BANK_EINTG(3, 0x1a0, "gpd7", 0x34),
EXYNOS_PIN_BANK_EINTG(5, 0x1c0, "gpg1", 0x38),
EXYNOS_PIN_BANK_EINTG(2, 0x1e0, "gpg2", 0x3c),
EXYNOS_PIN_BANK_EINTG(8, 0x200, "gpg3", 0x40),
EXYNOS5433_PIN_BANK_EINTG(6, 0x000, "gpv7", 0x00),
EXYNOS5433_PIN_BANK_EINTG(5, 0x020, "gpb0", 0x04),
EXYNOS5433_PIN_BANK_EINTG(8, 0x040, "gpc0", 0x08),
EXYNOS5433_PIN_BANK_EINTG(2, 0x060, "gpc1", 0x0c),
EXYNOS5433_PIN_BANK_EINTG(6, 0x080, "gpc2", 0x10),
EXYNOS5433_PIN_BANK_EINTG(8, 0x0a0, "gpc3", 0x14),
EXYNOS5433_PIN_BANK_EINTG(2, 0x0c0, "gpg0", 0x18),
EXYNOS5433_PIN_BANK_EINTG(4, 0x0e0, "gpd0", 0x1c),
EXYNOS5433_PIN_BANK_EINTG(6, 0x100, "gpd1", 0x20),
EXYNOS5433_PIN_BANK_EINTG(8, 0x120, "gpd2", 0x24),
EXYNOS5433_PIN_BANK_EINTG(5, 0x140, "gpd4", 0x28),
EXYNOS5433_PIN_BANK_EINTG(2, 0x160, "gpd8", 0x2c),
EXYNOS5433_PIN_BANK_EINTG(7, 0x180, "gpd6", 0x30),
EXYNOS5433_PIN_BANK_EINTG(3, 0x1a0, "gpd7", 0x34),
EXYNOS5433_PIN_BANK_EINTG(5, 0x1c0, "gpg1", 0x38),
EXYNOS5433_PIN_BANK_EINTG(2, 0x1e0, "gpg2", 0x3c),
EXYNOS5433_PIN_BANK_EINTG(8, 0x200, "gpg3", 0x40),
};
/* pin banks of exynos5433 pin-controller - TOUCH */
static const struct samsung_pin_bank_data exynos5433_pin_banks9[] __initconst = {
EXYNOS_PIN_BANK_EINTG(3, 0x000, "gpj1", 0x00),
EXYNOS5433_PIN_BANK_EINTG(3, 0x000, "gpj1", 0x00),
};
/*

Просмотреть файл

@ -79,17 +79,6 @@
.name = id \
}
#define EXYNOS_PIN_BANK_EINTW_EXT(pins, reg, id, offs, pctl_idx) \
{ \
.type = &bank_type_alive, \
.pctl_offset = reg, \
.nr_pins = pins, \
.eint_type = EINT_TYPE_WKUP, \
.eint_offset = offs, \
.name = id, \
.pctl_res_idx = pctl_idx, \
} \
#define EXYNOS5433_PIN_BANK_EINTG(pins, reg, id, offs) \
{ \
.type = &exynos5433_bank_type_off, \

Просмотреть файл

@ -36,6 +36,14 @@ static const struct pwm_lpss_boardinfo pwm_lpss_bxt_info = {
.clk_rate = 19200000,
.npwm = 4,
.base_unit_bits = 22,
.bypass = true,
};
/* Tangier */
static const struct pwm_lpss_boardinfo pwm_lpss_tng_info = {
.clk_rate = 19200000,
.npwm = 4,
.base_unit_bits = 22,
};
static int pwm_lpss_probe_pci(struct pci_dev *pdev,
@ -97,7 +105,7 @@ static const struct pci_device_id pwm_lpss_pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0x0ac8), (unsigned long)&pwm_lpss_bxt_info},
{ PCI_VDEVICE(INTEL, 0x0f08), (unsigned long)&pwm_lpss_byt_info},
{ PCI_VDEVICE(INTEL, 0x0f09), (unsigned long)&pwm_lpss_byt_info},
{ PCI_VDEVICE(INTEL, 0x11a5), (unsigned long)&pwm_lpss_bxt_info},
{ PCI_VDEVICE(INTEL, 0x11a5), (unsigned long)&pwm_lpss_tng_info},
{ PCI_VDEVICE(INTEL, 0x1ac8), (unsigned long)&pwm_lpss_bxt_info},
{ PCI_VDEVICE(INTEL, 0x2288), (unsigned long)&pwm_lpss_bsw_info},
{ PCI_VDEVICE(INTEL, 0x2289), (unsigned long)&pwm_lpss_bsw_info},

Просмотреть файл

@ -37,6 +37,7 @@ static const struct pwm_lpss_boardinfo pwm_lpss_bxt_info = {
.clk_rate = 19200000,
.npwm = 4,
.base_unit_bits = 22,
.bypass = true,
};
static int pwm_lpss_probe_platform(struct platform_device *pdev)

Просмотреть файл

@ -57,7 +57,7 @@ static inline void pwm_lpss_write(const struct pwm_device *pwm, u32 value)
writel(value, lpwm->regs + pwm->hwpwm * PWM_SIZE + PWM);
}
static int pwm_lpss_update(struct pwm_device *pwm)
static int pwm_lpss_wait_for_update(struct pwm_device *pwm)
{
struct pwm_lpss_chip *lpwm = to_lpwm(pwm->chip);
const void __iomem *addr = lpwm->regs + pwm->hwpwm * PWM_SIZE + PWM;
@ -65,8 +65,6 @@ static int pwm_lpss_update(struct pwm_device *pwm)
u32 val;
int err;
pwm_lpss_write(pwm, pwm_lpss_read(pwm) | PWM_SW_UPDATE);
/*
* PWM Configuration register has SW_UPDATE bit that is set when a new
* configuration is written to the register. The bit is automatically
@ -122,6 +120,12 @@ static void pwm_lpss_prepare(struct pwm_lpss_chip *lpwm, struct pwm_device *pwm,
pwm_lpss_write(pwm, ctrl);
}
static inline void pwm_lpss_cond_enable(struct pwm_device *pwm, bool cond)
{
if (cond)
pwm_lpss_write(pwm, pwm_lpss_read(pwm) | PWM_ENABLE);
}
static int pwm_lpss_apply(struct pwm_chip *chip, struct pwm_device *pwm,
struct pwm_state *state)
{
@ -137,18 +141,21 @@ static int pwm_lpss_apply(struct pwm_chip *chip, struct pwm_device *pwm,
return ret;
}
pwm_lpss_prepare(lpwm, pwm, state->duty_cycle, state->period);
ret = pwm_lpss_update(pwm);
pwm_lpss_write(pwm, pwm_lpss_read(pwm) | PWM_SW_UPDATE);
pwm_lpss_cond_enable(pwm, lpwm->info->bypass == false);
ret = pwm_lpss_wait_for_update(pwm);
if (ret) {
pm_runtime_put(chip->dev);
return ret;
}
pwm_lpss_write(pwm, pwm_lpss_read(pwm) | PWM_ENABLE);
pwm_lpss_cond_enable(pwm, lpwm->info->bypass == true);
} else {
ret = pwm_lpss_is_updating(pwm);
if (ret)
return ret;
pwm_lpss_prepare(lpwm, pwm, state->duty_cycle, state->period);
return pwm_lpss_update(pwm);
pwm_lpss_write(pwm, pwm_lpss_read(pwm) | PWM_SW_UPDATE);
return pwm_lpss_wait_for_update(pwm);
}
} else if (pwm_is_enabled(pwm)) {
pwm_lpss_write(pwm, pwm_lpss_read(pwm) & ~PWM_ENABLE);

Просмотреть файл

@ -22,6 +22,7 @@ struct pwm_lpss_boardinfo {
unsigned long clk_rate;
unsigned int npwm;
unsigned long base_unit_bits;
bool bypass;
};
struct pwm_lpss_chip *pwm_lpss_probe(struct device *dev, struct resource *r,

Просмотреть файл

@ -191,6 +191,28 @@ static int rockchip_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
return 0;
}
static int rockchip_pwm_enable(struct pwm_chip *chip,
struct pwm_device *pwm,
bool enable,
enum pwm_polarity polarity)
{
struct rockchip_pwm_chip *pc = to_rockchip_pwm_chip(chip);
int ret;
if (enable) {
ret = clk_enable(pc->clk);
if (ret)
return ret;
}
pc->data->set_enable(chip, pwm, enable, polarity);
if (!enable)
clk_disable(pc->clk);
return 0;
}
static int rockchip_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
struct pwm_state *state)
{
@ -207,22 +229,26 @@ static int rockchip_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
return ret;
if (state->polarity != curstate.polarity && enabled) {
pc->data->set_enable(chip, pwm, false, state->polarity);
ret = rockchip_pwm_enable(chip, pwm, false, state->polarity);
if (ret)
goto out;
enabled = false;
}
ret = rockchip_pwm_config(chip, pwm, state->duty_cycle, state->period);
if (ret) {
if (enabled != curstate.enabled)
pc->data->set_enable(chip, pwm, !enabled,
state->polarity);
rockchip_pwm_enable(chip, pwm, !enabled,
state->polarity);
goto out;
}
if (state->enabled != enabled)
pc->data->set_enable(chip, pwm, state->enabled,
state->polarity);
if (state->enabled != enabled) {
ret = rockchip_pwm_enable(chip, pwm, state->enabled,
state->polarity);
if (ret)
goto out;
}
/*
* Update the state with the real hardware, which can differ a bit

Просмотреть файл

@ -275,7 +275,7 @@ int reset_control_status(struct reset_control *rstc)
}
EXPORT_SYMBOL_GPL(reset_control_status);
static struct reset_control *__reset_control_get(
static struct reset_control *__reset_control_get_internal(
struct reset_controller_dev *rcdev,
unsigned int index, bool shared)
{
@ -308,7 +308,7 @@ static struct reset_control *__reset_control_get(
return rstc;
}
static void __reset_control_put(struct reset_control *rstc)
static void __reset_control_put_internal(struct reset_control *rstc)
{
lockdep_assert_held(&reset_list_mutex);
@ -377,7 +377,7 @@ struct reset_control *__of_reset_control_get(struct device_node *node,
}
/* reset_list_mutex also protects the rcdev's reset_control list */
rstc = __reset_control_get(rcdev, rstc_id, shared);
rstc = __reset_control_get_internal(rcdev, rstc_id, shared);
mutex_unlock(&reset_list_mutex);
@ -385,6 +385,17 @@ struct reset_control *__of_reset_control_get(struct device_node *node,
}
EXPORT_SYMBOL_GPL(__of_reset_control_get);
struct reset_control *__reset_control_get(struct device *dev, const char *id,
int index, bool shared, bool optional)
{
if (dev->of_node)
return __of_reset_control_get(dev->of_node, id, index, shared,
optional);
return optional ? NULL : ERR_PTR(-EINVAL);
}
EXPORT_SYMBOL_GPL(__reset_control_get);
/**
* reset_control_put - free the reset controller
* @rstc: reset controller
@ -396,7 +407,7 @@ void reset_control_put(struct reset_control *rstc)
return;
mutex_lock(&reset_list_mutex);
__reset_control_put(rstc);
__reset_control_put_internal(rstc);
mutex_unlock(&reset_list_mutex);
}
EXPORT_SYMBOL_GPL(reset_control_put);
@ -417,8 +428,7 @@ struct reset_control *__devm_reset_control_get(struct device *dev,
if (!ptr)
return ERR_PTR(-ENOMEM);
rstc = __of_reset_control_get(dev ? dev->of_node : NULL,
id, index, shared, optional);
rstc = __reset_control_get(dev, id, index, shared, optional);
if (!IS_ERR(rstc)) {
*ptr = rstc;
devres_add(dev, ptr);

Просмотреть файл

@ -1690,9 +1690,6 @@ struct aac_dev
#define aac_adapter_sync_cmd(dev, command, p1, p2, p3, p4, p5, p6, status, r1, r2, r3, r4) \
(dev)->a_ops.adapter_sync_cmd(dev, command, p1, p2, p3, p4, p5, p6, status, r1, r2, r3, r4)
#define aac_adapter_check_health(dev) \
(dev)->a_ops.adapter_check_health(dev)
#define aac_adapter_restart(dev, bled, reset_type) \
((dev)->a_ops.adapter_restart(dev, bled, reset_type))
@ -2615,6 +2612,14 @@ static inline unsigned int cap_to_cyls(sector_t capacity, unsigned divisor)
return capacity;
}
static inline int aac_adapter_check_health(struct aac_dev *dev)
{
if (unlikely(pci_channel_offline(dev->pdev)))
return -1;
return (dev)->a_ops.adapter_check_health(dev);
}
/* SCp.phase values */
#define AAC_OWNER_MIDLEVEL 0x101
#define AAC_OWNER_LOWLEVEL 0x102

Просмотреть файл

@ -1873,7 +1873,8 @@ int aac_check_health(struct aac_dev * aac)
spin_unlock_irqrestore(&aac->fib_lock, flagv);
if (BlinkLED < 0) {
printk(KERN_ERR "%s: Host adapter dead %d\n", aac->name, BlinkLED);
printk(KERN_ERR "%s: Host adapter is dead (or got a PCI error) %d\n",
aac->name, BlinkLED);
goto out;
}

Просмотреть файл

@ -6293,7 +6293,12 @@ static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
break;
case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
case IPR_IOASA_IR_DUAL_IOA_DISABLED:
scsi_cmd->result |= (DID_PASSTHROUGH << 16);
/*
* exception: do not set DID_PASSTHROUGH on CHECK CONDITION
* so SCSI mid-layer and upper layers handle it accordingly.
*/
if (scsi_cmd->result != SAM_STAT_CHECK_CONDITION)
scsi_cmd->result |= (DID_PASSTHROUGH << 16);
break;
case IPR_IOASC_BUS_WAS_RESET:
case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:

Просмотреть файл

@ -99,7 +99,8 @@ static void qedf_fcoe_process_vlan_resp(struct qedf_ctx *qedf,
qedf_set_vlan_id(qedf, vid);
/* Inform waiter that it's ok to call fcoe_ctlr_link up() */
complete(&qedf->fipvlan_compl);
if (!completion_done(&qedf->fipvlan_compl))
complete(&qedf->fipvlan_compl);
}
}

Просмотреть файл

@ -2803,6 +2803,7 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
atomic_set(&qedf->num_offloads, 0);
qedf->stop_io_on_error = false;
pci_set_drvdata(pdev, qedf);
init_completion(&qedf->fipvlan_compl);
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_INFO,
"QLogic FastLinQ FCoE Module qedf %s, "

Просмотреть файл

@ -1160,8 +1160,13 @@ static inline
uint32_t qla2x00_isp_reg_stat(struct qla_hw_data *ha)
{
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
return ((RD_REG_DWORD(&reg->host_status)) == ISP_REG_DISCONNECT);
if (IS_P3P_TYPE(ha))
return ((RD_REG_DWORD(&reg82->host_int)) == ISP_REG_DISCONNECT);
else
return ((RD_REG_DWORD(&reg->host_status)) ==
ISP_REG_DISCONNECT);
}
/**************************************************************************

Просмотреть файл

@ -2102,6 +2102,22 @@ static void read_capacity_error(struct scsi_disk *sdkp, struct scsi_device *sdp,
#define READ_CAPACITY_RETRIES_ON_RESET 10
/*
* Ensure that we don't overflow sector_t when CONFIG_LBDAF is not set
* and the reported logical block size is bigger than 512 bytes. Note
* that last_sector is a u64 and therefore logical_to_sectors() is not
* applicable.
*/
static bool sd_addressable_capacity(u64 lba, unsigned int sector_size)
{
u64 last_sector = (lba + 1ULL) << (ilog2(sector_size) - 9);
if (sizeof(sector_t) == 4 && last_sector > U32_MAX)
return false;
return true;
}
static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
unsigned char *buffer)
{
@ -2167,7 +2183,7 @@ static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
return -ENODEV;
}
if ((sizeof(sdkp->capacity) == 4) && (lba >= 0xffffffffULL)) {
if (!sd_addressable_capacity(lba, sector_size)) {
sd_printk(KERN_ERR, sdkp, "Too big for this kernel. Use a "
"kernel compiled with support for large block "
"devices.\n");
@ -2256,7 +2272,7 @@ static int read_capacity_10(struct scsi_disk *sdkp, struct scsi_device *sdp,
return sector_size;
}
if ((sizeof(sdkp->capacity) == 4) && (lba == 0xffffffff)) {
if (!sd_addressable_capacity(lba, sector_size)) {
sd_printk(KERN_ERR, sdkp, "Too big for this kernel. Use a "
"kernel compiled with support for large block "
"devices.\n");
@ -2956,7 +2972,8 @@ static int sd_revalidate_disk(struct gendisk *disk)
q->limits.io_opt = logical_to_bytes(sdp, sdkp->opt_xfer_blocks);
rw_max = logical_to_sectors(sdp, sdkp->opt_xfer_blocks);
} else
rw_max = BLK_DEF_MAX_SECTORS;
rw_max = min_not_zero(logical_to_sectors(sdp, dev_max),
(sector_t)BLK_DEF_MAX_SECTORS);
/* Combine with controller limits */
q->limits.max_sectors = min(rw_max, queue_max_hw_sectors(q));

Просмотреть файл

@ -836,6 +836,7 @@ static void get_capabilities(struct scsi_cd *cd)
unsigned char *buffer;
struct scsi_mode_data data;
struct scsi_sense_hdr sshdr;
unsigned int ms_len = 128;
int rc, n;
static const char *loadmech[] =
@ -862,10 +863,11 @@ static void get_capabilities(struct scsi_cd *cd)
scsi_test_unit_ready(cd->device, SR_TIMEOUT, MAX_RETRIES, &sshdr);
/* ask for mode page 0x2a */
rc = scsi_mode_sense(cd->device, 0, 0x2a, buffer, 128,
rc = scsi_mode_sense(cd->device, 0, 0x2a, buffer, ms_len,
SR_TIMEOUT, 3, &data, NULL);
if (!scsi_status_is_good(rc)) {
if (!scsi_status_is_good(rc) || data.length > ms_len ||
data.header_length + data.block_descriptor_length > data.length) {
/* failed, drive doesn't have capabilities mode page */
cd->cdi.speed = 1;
cd->cdi.mask |= (CDC_CD_R | CDC_CD_RW | CDC_DVD_R |

Просмотреть файл

@ -485,8 +485,7 @@ static void iscsit_get_rx_pdu(struct iscsi_conn *);
int iscsit_queue_rsp(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
{
iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state);
return 0;
return iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state);
}
EXPORT_SYMBOL(iscsit_queue_rsp);

Просмотреть файл

@ -1398,11 +1398,10 @@ static u32 lio_sess_get_initiator_sid(
static int lio_queue_data_in(struct se_cmd *se_cmd)
{
struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
struct iscsi_conn *conn = cmd->conn;
cmd->i_state = ISTATE_SEND_DATAIN;
cmd->conn->conn_transport->iscsit_queue_data_in(cmd->conn, cmd);
return 0;
return conn->conn_transport->iscsit_queue_data_in(conn, cmd);
}
static int lio_write_pending(struct se_cmd *se_cmd)
@ -1431,16 +1430,14 @@ static int lio_write_pending_status(struct se_cmd *se_cmd)
static int lio_queue_status(struct se_cmd *se_cmd)
{
struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
struct iscsi_conn *conn = cmd->conn;
cmd->i_state = ISTATE_SEND_STATUS;
if (cmd->se_cmd.scsi_status || cmd->sense_reason) {
iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state);
return 0;
return iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
}
cmd->conn->conn_transport->iscsit_queue_status(cmd->conn, cmd);
return 0;
return conn->conn_transport->iscsit_queue_status(conn, cmd);
}
static void lio_queue_tm_rsp(struct se_cmd *se_cmd)

Просмотреть файл

@ -781,22 +781,6 @@ static void iscsi_check_proposer_for_optional_reply(struct iscsi_param *param)
} else if (IS_TYPE_NUMBER(param)) {
if (!strcmp(param->name, MAXRECVDATASEGMENTLENGTH))
SET_PSTATE_REPLY_OPTIONAL(param);
/*
* The GlobalSAN iSCSI Initiator for MacOSX does
* not respond to MaxBurstLength, FirstBurstLength,
* DefaultTime2Wait or DefaultTime2Retain parameter keys.
* So, we set them to 'reply optional' here, and assume the
* the defaults from iscsi_parameters.h if the initiator
* is not RFC compliant and the keys are not negotiated.
*/
if (!strcmp(param->name, MAXBURSTLENGTH))
SET_PSTATE_REPLY_OPTIONAL(param);
if (!strcmp(param->name, FIRSTBURSTLENGTH))
SET_PSTATE_REPLY_OPTIONAL(param);
if (!strcmp(param->name, DEFAULTTIME2WAIT))
SET_PSTATE_REPLY_OPTIONAL(param);
if (!strcmp(param->name, DEFAULTTIME2RETAIN))
SET_PSTATE_REPLY_OPTIONAL(param);
/*
* Required for gPXE iSCSI boot client
*/

Просмотреть файл

@ -567,7 +567,7 @@ static void iscsit_remove_cmd_from_immediate_queue(
}
}
void iscsit_add_cmd_to_response_queue(
int iscsit_add_cmd_to_response_queue(
struct iscsi_cmd *cmd,
struct iscsi_conn *conn,
u8 state)
@ -578,7 +578,7 @@ void iscsit_add_cmd_to_response_queue(
if (!qr) {
pr_err("Unable to allocate memory for"
" struct iscsi_queue_req\n");
return;
return -ENOMEM;
}
INIT_LIST_HEAD(&qr->qr_list);
qr->cmd = cmd;
@ -590,6 +590,7 @@ void iscsit_add_cmd_to_response_queue(
spin_unlock_bh(&conn->response_queue_lock);
wake_up(&conn->queues_wq);
return 0;
}
struct iscsi_queue_req *iscsit_get_cmd_from_response_queue(struct iscsi_conn *conn)
@ -737,21 +738,23 @@ void iscsit_free_cmd(struct iscsi_cmd *cmd, bool shutdown)
{
struct se_cmd *se_cmd = NULL;
int rc;
bool op_scsi = false;
/*
* Determine if a struct se_cmd is associated with
* this struct iscsi_cmd.
*/
switch (cmd->iscsi_opcode) {
case ISCSI_OP_SCSI_CMD:
se_cmd = &cmd->se_cmd;
__iscsit_free_cmd(cmd, true, shutdown);
op_scsi = true;
/*
* Fallthrough
*/
case ISCSI_OP_SCSI_TMFUNC:
rc = transport_generic_free_cmd(&cmd->se_cmd, shutdown);
if (!rc && shutdown && se_cmd && se_cmd->se_sess) {
__iscsit_free_cmd(cmd, true, shutdown);
se_cmd = &cmd->se_cmd;
__iscsit_free_cmd(cmd, op_scsi, shutdown);
rc = transport_generic_free_cmd(se_cmd, shutdown);
if (!rc && shutdown && se_cmd->se_sess) {
__iscsit_free_cmd(cmd, op_scsi, shutdown);
target_put_sess_cmd(se_cmd);
}
break;

Просмотреть файл

@ -31,7 +31,7 @@ extern int iscsit_find_cmd_for_recovery(struct iscsi_session *, struct iscsi_cmd
struct iscsi_conn_recovery **, itt_t);
extern void iscsit_add_cmd_to_immediate_queue(struct iscsi_cmd *, struct iscsi_conn *, u8);
extern struct iscsi_queue_req *iscsit_get_cmd_from_immediate_queue(struct iscsi_conn *);
extern void iscsit_add_cmd_to_response_queue(struct iscsi_cmd *, struct iscsi_conn *, u8);
extern int iscsit_add_cmd_to_response_queue(struct iscsi_cmd *, struct iscsi_conn *, u8);
extern struct iscsi_queue_req *iscsit_get_cmd_from_response_queue(struct iscsi_conn *);
extern void iscsit_remove_cmd_from_tx_queues(struct iscsi_cmd *, struct iscsi_conn *);
extern bool iscsit_conn_all_queues_empty(struct iscsi_conn *);

Просмотреть файл

@ -197,8 +197,7 @@ target_emulate_report_target_port_groups(struct se_cmd *cmd)
/*
* Set the ASYMMETRIC ACCESS State
*/
buf[off++] |= (atomic_read(
&tg_pt_gp->tg_pt_gp_alua_access_state) & 0xff);
buf[off++] |= tg_pt_gp->tg_pt_gp_alua_access_state & 0xff;
/*
* Set supported ASYMMETRIC ACCESS State bits
*/
@ -710,7 +709,7 @@ target_alua_state_check(struct se_cmd *cmd)
spin_lock(&lun->lun_tg_pt_gp_lock);
tg_pt_gp = lun->lun_tg_pt_gp;
out_alua_state = atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state);
out_alua_state = tg_pt_gp->tg_pt_gp_alua_access_state;
nonop_delay_msecs = tg_pt_gp->tg_pt_gp_nonop_delay_msecs;
// XXX: keeps using tg_pt_gp witout reference after unlock
@ -911,7 +910,7 @@ static int core_alua_write_tpg_metadata(
}
/*
* Called with tg_pt_gp->tg_pt_gp_md_mutex held
* Called with tg_pt_gp->tg_pt_gp_transition_mutex held
*/
static int core_alua_update_tpg_primary_metadata(
struct t10_alua_tg_pt_gp *tg_pt_gp)
@ -934,7 +933,7 @@ static int core_alua_update_tpg_primary_metadata(
"alua_access_state=0x%02x\n"
"alua_access_status=0x%02x\n",
tg_pt_gp->tg_pt_gp_id,
tg_pt_gp->tg_pt_gp_alua_pending_state,
tg_pt_gp->tg_pt_gp_alua_access_state,
tg_pt_gp->tg_pt_gp_alua_access_status);
snprintf(path, ALUA_METADATA_PATH_LEN,
@ -1013,13 +1012,52 @@ static void core_alua_queue_state_change_ua(struct t10_alua_tg_pt_gp *tg_pt_gp)
spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
}
static void core_alua_do_transition_tg_pt_work(struct work_struct *work)
static int core_alua_do_transition_tg_pt(
struct t10_alua_tg_pt_gp *tg_pt_gp,
int new_state,
int explicit)
{
struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(work,
struct t10_alua_tg_pt_gp, tg_pt_gp_transition_work);
struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
bool explicit = (tg_pt_gp->tg_pt_gp_alua_access_status ==
ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG);
int prev_state;
mutex_lock(&tg_pt_gp->tg_pt_gp_transition_mutex);
/* Nothing to be done here */
if (tg_pt_gp->tg_pt_gp_alua_access_state == new_state) {
mutex_unlock(&tg_pt_gp->tg_pt_gp_transition_mutex);
return 0;
}
if (explicit && new_state == ALUA_ACCESS_STATE_TRANSITION) {
mutex_unlock(&tg_pt_gp->tg_pt_gp_transition_mutex);
return -EAGAIN;
}
/*
* Save the old primary ALUA access state, and set the current state
* to ALUA_ACCESS_STATE_TRANSITION.
*/
prev_state = tg_pt_gp->tg_pt_gp_alua_access_state;
tg_pt_gp->tg_pt_gp_alua_access_state = ALUA_ACCESS_STATE_TRANSITION;
tg_pt_gp->tg_pt_gp_alua_access_status = (explicit) ?
ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG :
ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA;
core_alua_queue_state_change_ua(tg_pt_gp);
if (new_state == ALUA_ACCESS_STATE_TRANSITION) {
mutex_unlock(&tg_pt_gp->tg_pt_gp_transition_mutex);
return 0;
}
/*
* Check for the optional ALUA primary state transition delay
*/
if (tg_pt_gp->tg_pt_gp_trans_delay_msecs != 0)
msleep_interruptible(tg_pt_gp->tg_pt_gp_trans_delay_msecs);
/*
* Set the current primary ALUA access state to the requested new state
*/
tg_pt_gp->tg_pt_gp_alua_access_state = new_state;
/*
* Update the ALUA metadata buf that has been allocated in
@ -1034,93 +1072,19 @@ static void core_alua_do_transition_tg_pt_work(struct work_struct *work)
* struct file does NOT affect the actual ALUA transition.
*/
if (tg_pt_gp->tg_pt_gp_write_metadata) {
mutex_lock(&tg_pt_gp->tg_pt_gp_md_mutex);
core_alua_update_tpg_primary_metadata(tg_pt_gp);
mutex_unlock(&tg_pt_gp->tg_pt_gp_md_mutex);
}
/*
* Set the current primary ALUA access state to the requested new state
*/
atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
tg_pt_gp->tg_pt_gp_alua_pending_state);
pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu"
" from primary access state %s to %s\n", (explicit) ? "explicit" :
"implicit", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
tg_pt_gp->tg_pt_gp_id,
core_alua_dump_state(tg_pt_gp->tg_pt_gp_alua_previous_state),
core_alua_dump_state(tg_pt_gp->tg_pt_gp_alua_pending_state));
core_alua_dump_state(prev_state),
core_alua_dump_state(new_state));
core_alua_queue_state_change_ua(tg_pt_gp);
spin_lock(&dev->t10_alua.tg_pt_gps_lock);
atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
if (tg_pt_gp->tg_pt_gp_transition_complete)
complete(tg_pt_gp->tg_pt_gp_transition_complete);
}
static int core_alua_do_transition_tg_pt(
struct t10_alua_tg_pt_gp *tg_pt_gp,
int new_state,
int explicit)
{
struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
DECLARE_COMPLETION_ONSTACK(wait);
/* Nothing to be done here */
if (atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state) == new_state)
return 0;
if (explicit && new_state == ALUA_ACCESS_STATE_TRANSITION)
return -EAGAIN;
/*
* Flush any pending transitions
*/
if (!explicit)
flush_work(&tg_pt_gp->tg_pt_gp_transition_work);
/*
* Save the old primary ALUA access state, and set the current state
* to ALUA_ACCESS_STATE_TRANSITION.
*/
atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
ALUA_ACCESS_STATE_TRANSITION);
tg_pt_gp->tg_pt_gp_alua_access_status = (explicit) ?
ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG :
ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA;
core_alua_queue_state_change_ua(tg_pt_gp);
if (new_state == ALUA_ACCESS_STATE_TRANSITION)
return 0;
tg_pt_gp->tg_pt_gp_alua_previous_state =
atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state);
tg_pt_gp->tg_pt_gp_alua_pending_state = new_state;
/*
* Check for the optional ALUA primary state transition delay
*/
if (tg_pt_gp->tg_pt_gp_trans_delay_msecs != 0)
msleep_interruptible(tg_pt_gp->tg_pt_gp_trans_delay_msecs);
/*
* Take a reference for workqueue item
*/
spin_lock(&dev->t10_alua.tg_pt_gps_lock);
atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
schedule_work(&tg_pt_gp->tg_pt_gp_transition_work);
if (explicit) {
tg_pt_gp->tg_pt_gp_transition_complete = &wait;
wait_for_completion(&wait);
tg_pt_gp->tg_pt_gp_transition_complete = NULL;
}
mutex_unlock(&tg_pt_gp->tg_pt_gp_transition_mutex);
return 0;
}
@ -1685,14 +1649,12 @@ struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(struct se_device *dev,
}
INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_list);
INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_lun_list);
mutex_init(&tg_pt_gp->tg_pt_gp_md_mutex);
mutex_init(&tg_pt_gp->tg_pt_gp_transition_mutex);
spin_lock_init(&tg_pt_gp->tg_pt_gp_lock);
atomic_set(&tg_pt_gp->tg_pt_gp_ref_cnt, 0);
INIT_WORK(&tg_pt_gp->tg_pt_gp_transition_work,
core_alua_do_transition_tg_pt_work);
tg_pt_gp->tg_pt_gp_dev = dev;
atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED);
tg_pt_gp->tg_pt_gp_alua_access_state =
ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED;
/*
* Enable both explicit and implicit ALUA support by default
*/
@ -1797,8 +1759,6 @@ void core_alua_free_tg_pt_gp(
dev->t10_alua.alua_tg_pt_gps_counter--;
spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
flush_work(&tg_pt_gp->tg_pt_gp_transition_work);
/*
* Allow a struct t10_alua_tg_pt_gp_member * referenced by
* core_alua_get_tg_pt_gp_by_name() in
@ -1938,8 +1898,8 @@ ssize_t core_alua_show_tg_pt_gp_info(struct se_lun *lun, char *page)
"Primary Access Status: %s\nTG Port Secondary Access"
" State: %s\nTG Port Secondary Access Status: %s\n",
config_item_name(tg_pt_ci), tg_pt_gp->tg_pt_gp_id,
core_alua_dump_state(atomic_read(
&tg_pt_gp->tg_pt_gp_alua_access_state)),
core_alua_dump_state(
tg_pt_gp->tg_pt_gp_alua_access_state),
core_alua_dump_status(
tg_pt_gp->tg_pt_gp_alua_access_status),
atomic_read(&lun->lun_tg_pt_secondary_offline) ?

Просмотреть файл

@ -2392,7 +2392,7 @@ static ssize_t target_tg_pt_gp_alua_access_state_show(struct config_item *item,
char *page)
{
return sprintf(page, "%d\n",
atomic_read(&to_tg_pt_gp(item)->tg_pt_gp_alua_access_state));
to_tg_pt_gp(item)->tg_pt_gp_alua_access_state);
}
static ssize_t target_tg_pt_gp_alua_access_state_store(struct config_item *item,

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше